From 9877f81fe0b0f6d76c73d0faa98054ec698baeaa Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 25 Nov 2020 08:30:53 -0500 Subject: [PATCH] update for node v15 --- build/package.json | 9 +- dist/face-api.esm.js | 2587 ++++++++++++++-------------- dist/face-api.esm.js.map | 6 +- dist/face-api.esm.nobundle.js | 715 ++++---- dist/face-api.esm.nobundle.js.map | 6 +- dist/face-api.js | 2587 ++++++++++++++-------------- dist/face-api.js.map | 6 +- dist/face-api.node.js | 2583 +++++++++++++-------------- dist/face-api.node.js.map | 6 +- dist/face-api.node.nobundle.js | 827 ++++----- dist/face-api.node.nobundle.js.map | 6 +- package-lock.json | 84 +- package.json | 6 +- 13 files changed, 4752 insertions(+), 4676 deletions(-) diff --git a/build/package.json b/build/package.json index 04b807c..7a79361 100644 --- a/build/package.json +++ b/build/package.json @@ -1,6 +1,6 @@ { "name": "@vladmandic/face-api", - "version": "0.8.6", + "version": "0.8.8", "description": "JavaScript module for Face Detection and Face Recognition Using Tensorflow/JS", "main": "dist/face-api.node.js", "module": "dist/face-api.esm.js", @@ -10,6 +10,7 @@ "node": ">=12.0.0" }, "scripts": { + "start": "node --trace-warnings example/node.js", "build-esm": "esbuild --bundle --format=esm --target=es2018 --platform=browser --minify --sourcemap --outfile=./dist/face-api.esm.js --log-level=error --tsconfig=./tsconfig.json --external:util --external:string_decoder --external:fs src/index.ts", "build-esm-nobundle": "esbuild --bundle --format=esm --target=es2018 --platform=browser --sourcemap --outfile=./dist/face-api.esm.nobundle.js --log-level=error --tsconfig=./tsconfig.json --external:@tensorflow --external:util --external:string_decoder --external:fs --global-name=faceapi src/index.ts", "build-iife": "esbuild --bundle --format=iife --target=es2018 --platform=browser --minify --sourcemap --outfile=./dist/face-api.js --log-level=error --tsconfig=./tsconfig.json --external:util --external:string_decoder --external:fs --global-name=faceapi src/index.ts", @@ -43,12 +44,12 @@ "devDependencies": { "@tensorflow/tfjs": "^2.7.0", "@tensorflow/tfjs-node": "^2.7.0", - "@types/node": "^14.11.8", - "esbuild": "^0.6.34", + "@types/node": "^14.14.10", + "esbuild": "^0.8.15", "rimraf": "^3.0.2", "ts-node": "^9.0.0", "tslib": "^2.0.3", - "typescript": "^4.1.0-dev.20201013" + "typescript": "^4.1.2" }, "dependencies": {} } diff --git a/dist/face-api.esm.js b/dist/face-api.esm.js index 1a82f1c..c08252e 100644 --- a/dist/face-api.esm.js +++ b/dist/face-api.esm.js @@ -1,75 +1,75 @@ -var Mm=Object.defineProperty,LJ=Object.prototype.hasOwnProperty,ES=(r,l)=>()=>(l||(l={exports:{}},r(l.exports,l)),l.exports),XC=r=>Mm(r,"__esModule",{value:!0}),Pm=(r,l)=>{XC(r);for(var u in l)Mm(r,u,{get:l[u],enumerable:!0})},SJ=(r,l)=>{if(XC(r),typeof l=="object"||typeof l=="function")for(let u in l)!LJ.call(r,u)&&u!=="default"&&Mm(r,u,{get:()=>l[u],enumerable:!0});return r},Je=r=>r&&r.__esModule?r:SJ(Mm({},"default",{value:r,enumerable:!0}),r);var ZC=ES((Ac,JC)=>{"use strict";var IJ=function(){if(typeof self!="undefined")return self;if(typeof window!="undefined")return window;if(typeof mr!="undefined")return mr;throw new Error("unable to locate global object")},mr=IJ();JC.exports=Ac=mr.fetch;mr.fetch&&(Ac.default=mr.fetch.bind(mr));Ac.Headers=mr.Headers;Ac.Request=mr.Request;Ac.Response=mr.Response});var Ze=ES((zm,QC)=>{(function(r,l){typeof zm=="object"&&typeof QC!="undefined"?l(zm):typeof define=="function"&&define.amd?define(["exports"],l):(r=r||self,l(r.tf=r.tf||{}))})(zm,function(r){"use strict";const l=1e-7,u=1e-4;class p{constructor(e,t){this.backend=e,this.dataMover=t,this.data=new WeakMap,this.dataIdsCount=0}get(e){return this.data.has(e)||this.dataMover.moveData(this.backend,e),this.data.get(e)}set(e,t){this.dataIdsCount++,this.data.set(e,t)}has(e){return this.data.has(e)}delete(e){return this.dataIdsCount--,this.data.delete(e)}numDataIds(){return this.dataIdsCount}}class y{time(e){return g("time")}read(e){return g("read")}readSync(e){return g("readSync")}numDataIds(){return g("numDataIds")}disposeData(e){return g("disposeData")}write(e,t,n){return g("write")}move(e,t,n,s){return g("move")}memory(){return g("memory")}floatPrecision(){return g("floatPrecision")}epsilon(){return this.floatPrecision()===32?l:u}batchMatMul(e,t,n,s){return g("batchMatMul")}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){return g("fusedBatchMatMul")}slice(e,t,n){return g("slice")}stridedSlice(e,t,n,s){return g("stridedSlice")}unstack(e,t){return g("unstack")}reverse(e,t){return g("reverse")}concat(e,t){return g("concat")}neg(e){return g("neg")}add(e,t){return g("add")}addN(e){return g("addN")}subtract(e,t){return g("subtract")}multiply(e,t){return g("multiply")}realDivide(e,t){return g("realDivide")}floorDiv(e,t){return g("floorDiv")}sum(e,t){return g("sum")}prod(e,t){return g("prod")}unsortedSegmentSum(e,t,n){return g("unsortedSegmentSum")}argMin(e,t){return g("argMin")}argMax(e,t){return g("argMax")}equal(e,t){return g("equal")}notEqual(e,t){return g("notEqual")}less(e,t){return g("less")}lessEqual(e,t){return g("lessEqual")}greater(e,t){return g("greater")}greaterEqual(e,t){return g("greaterEqual")}logicalNot(e){return g("logicalNot")}logicalAnd(e,t){return g("logicalAnd")}logicalOr(e,t){return g("logicalOr")}where(e){return g("where")}select(e,t,n){return g("select")}topk(e,t,n){return g("topk")}min(e,t){return g("min")}minimum(e,t){return g("minimum")}mod(e,t){return g("mod")}max(e,t){return g("max")}maximum(e,t){return g("maximum")}all(e,t){return g("all")}any(e,t){return g("any")}squaredDifference(e,t){return g("squaredDifference")}ceil(e){return g("ceil")}floor(e){return g("floor")}round(e){return g("round")}sign(e){return g("sign")}isNaN(e){return g("isNaN")}isInf(e){return g("isInf")}isFinite(e){return g("isFinite")}pow(e,t){return g("pow")}exp(e){return g("exp")}expm1(e){return g("expm1")}softmax(e,t){return g("softmax")}log(e){return g("log")}log1p(e){return g("log1p")}sqrt(e){return g("sqrt")}rsqrt(e){return g("rsqrt")}square(e){return g("square")}reciprocal(e){return g("reciprocal")}relu(e){return g("relu")}relu6(e){return g("relu6")}prelu(e,t){return g("prelu")}elu(e){return g("elu")}eluDer(e,t){return g("eluDer")}selu(e){return g("selu")}int(e){return g("int")}clip(e,t,n){return g("clip")}abs(e){return g("abs")}complexAbs(e){return g("complexAbs")}sigmoid(e){return g("sigmoid")}softplus(e){return g("softplus")}sin(e){return g("sin")}cos(e){return g("cos")}tan(e){return g("tan")}asin(e){return g("asin")}acos(e){return g("acos")}atan(e){return g("atan")}atan2(e,t){return g("atan2")}sinh(e){return g("sinh")}cosh(e){return g("cosh")}tanh(e){return g("tanh")}asinh(e){return g("asinh")}acosh(e){return g("acosh")}atanh(e){return g("atanh")}erf(e){return g("erf")}step(e,t){return g("step")}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return g("fusedConv2d")}conv2d(e,t,n){return g("conv2d")}conv2dDerInput(e,t,n){return g("conv2dDerInput")}conv2dDerFilter(e,t,n){return g("conv2dDerFilter")}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){return g("fusedDepthwiseConv2D")}depthwiseConv2D(e,t,n){return g("depthwiseConv2D")}depthwiseConv2DDerInput(e,t,n){return g("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(e,t,n){return g("depthwiseConv2DDerFilter")}conv3d(e,t,n){return g("conv3d")}conv3dDerInput(e,t,n){return g("conv3dDerInput")}conv3dDerFilter(e,t,n){return g("conv3dDerFilter")}maxPool(e,t){return g("maxPool")}maxPoolBackprop(e,t,n,s){return g("maxPoolBackprop")}avgPool(e,t){return g("avgPool")}avgPoolBackprop(e,t,n){return g("avgPoolBackprop")}avgPool3d(e,t){return g("avgPool3d")}avgPool3dBackprop(e,t,n){return g("avgPool3dBackprop")}maxPool3d(e,t){return g("maxPool3d")}maxPool3dBackprop(e,t,n,s){return g("maxPool3dBackprop")}reshape(e,t){return g("reshape")}cast(e,t){return g("cast")}tile(e,t){return g("tile")}pad(e,t,n){return g("pad")}transpose(e,t){return g("transpose")}gather(e,t,n){return g("gather")}gatherND(e,t){return g("gatherND")}scatterND(e,t,n){return g("scatterND")}batchToSpaceND(e,t,n){return g("batchToSpaceND")}spaceToBatchND(e,t,n){return g("spaceToBatchND")}resizeBilinear(e,t,n,s){return g("resizeBilinear")}resizeBilinearBackprop(e,t,n){return g("resizeBilinearBackprop")}resizeNearestNeighbor(e,t,n,s){return g("resizeNearestNeighbor")}resizeNearestNeighborBackprop(e,t,n){return g("resizeNearestNeighborBackprop")}batchNorm(e,t,n,s,i,o){return g("batchNorm")}localResponseNormalization4D(e,t,n,s,i){return g("localResponseNormalization4D")}LRNGrad(e,t,n,s,i,o,a){return g("LRNGrad")}multinomial(e,t,n,s){return g("multinomial")}oneHot(e,t,n,s){return g("oneHot")}cumsum(e,t,n,s){return g("cumsum")}nonMaxSuppression(e,t,n,s,i){return g("nonMaxSuppression")}fft(e){return g("fft")}ifft(e){return g("ifft")}complex(e,t){return g("complex")}real(e){return g("real")}imag(e){return g("imag")}cropAndResize(e,t,n,s,i,o){return g("cropAndResize")}depthToSpace(e,t,n){return g("depthToSpace")}split(e,t,n){return g("split")}sparseToDense(e,t,n,s){return g("sparseToDense")}diag(e){return g("diag")}fill(e,t,n){return g("fill")}onesLike(e){return g("onesLike")}zerosLike(e){return g("zerosLike")}linspace(e,t,n){return g("linspace")}dispose(){return g("dispose")}}function g(e){throw new Error(`'${e}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}function I(e){let t=e.length,n=0,s=0;for(;t>0;)s=Math.random()*t|0,t--,n=e[t],e[t]=e[s],e[s]=n}function S(e,t,n){return Math.max(e,Math.min(t,n))}function T(e){return e%2===0?e:e+1}function C(e){let t=0;for(let n=0;nn+` Shapes ${e} and ${t} must match`)}function ne(e){A(e!=null,()=>"The input to the tensor constructor must be a non-null value.")}function te(e,t=[],n=!1){if(t==null&&(t=[]),Array.isArray(e)||hn(e)&&!n)for(let s=0;s0,n){return new Promise((s,i)=>{let o=0;const a=()=>{if(e()){s();return}o++;const c=t(o);if(n!=null&&o>=n){i();return}setTimeout(a,c)};a()})}function Vt(e,t){let n=1,s=-1;for(let o=0;o=0)n*=e[o];else if(e[o]===-1){if(s!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${s} and dim ${o}`);s=o}else if(e[o]<0)throw Error(`Shapes can not be < 0. Found ${e[o]} at dim ${o}`);if(s===-1){if(t>0&&t!==n)throw Error(`Size(${t}) must match the product of shape ${e}`);return e}if(n===0)throw Error(`Cannot infer the missing size in [${e}] when there are 0 elements`);if(t%n!==0)throw Error(`The implicit shape can't be a fractional number. Got ${t} / ${n}`);const i=e.slice();return i[s]=t/n,i}function qe(e,t){const n=t.length;return e=e==null?t.map((s,i)=>i):[].concat(e),A(e.every(s=>s>=-n&&s`All values in axis param must be in range [-${n}, ${n}) but got axis ${e}`),A(e.every(s=>Le(s)),()=>`All values in axis param must be integers but got axis ${e}`),e.map(s=>s<0?n+s:s)}function ln(e,t){const n=[],s=[],i=t!=null&&Array.isArray(t)&&t.length===0,o=t==null||i?null:qe(t,e).sort();let a=0;for(let c=0;cc)&&e[c]===1&&(n.push(e[c]),s.push(c)),o[a]<=c&&a++}e[c]!==1&&(n.push(e[c]),s.push(c))}return{newShape:n,keptDims:s}}function bt(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else throw new Error(`Unknown data type ${e}`);return n}function ws(e,t){let n=null;if(e==null||e==="float32")n=new Float32Array(t);else if(e==="int32")n=new Int32Array(t);else if(e==="bool")n=new Uint8Array(t);else if(e==="string")n=new Array(t);else throw new Error(`Unknown data type ${e}`);return n}function Nr(e,t){for(let n=0;nt+=n.length),t}function Yi(e){return typeof e=="string"||e instanceof String}function xx(e){return typeof e=="boolean"}function Qu(e){return typeof e=="number"}function wa(e){return Array.isArray(e)?wa(e[0]):e instanceof Float32Array?"float32":e instanceof Int32Array||e instanceof Uint8Array?"int32":Qu(e)?"float32":Yi(e)?"string":xx(e)?"bool":"float32"}function Rr(e){return!!(e&&e.constructor&&e.call&&e.apply)}function ed(e,t){for(let n=t;n=0;--s)n[s]=n[s+1]*e[s+1];return n}function Tx(e,t,n){const s=new Array;if(t.length===1){const i=t[0];for(let o=0;oc*h);for(let c=0;cs*i);if(n===0)return[];if(n!==t.length)throw new Error(`[${e}] does not match the input size ${t.length}.`);return Tx(0,e,t)}function Mg(e,t){const n=La(e,t);for(let s=0;ss*i,1);if(t==null||t==="float32")return Ls(e,new Float32Array(n));if(t==="int32")return Ls(e,new Int32Array(n));if(t==="bool")return Ls(e,new Uint8Array(n));throw new Error(`Unknown data type ${t}`)}function zg(e){e.forEach(t=>{A(Number.isInteger(t)&&t>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${e}].`)})}function _s(e,t,n){if(t===0)return 0;if(t===1)return e[0];let s=e[e.length-1];for(let i=0;i{const[s,i]=n.split(":");this.urlFlags[s]=GD(s,i)})}}}function zD(e){const t={};return e.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(n,...s)=>(VD(t,s[0],s[1]),s.join("="))),t}function VD(e,t,n){e[decodeURIComponent(t)]=decodeURIComponent(n||"")}function GD(e,t){if(t=t.toLowerCase(),t==="true"||t==="false")return t==="true";if(`${+t}`===t)return+t;throw new Error(`Could not parse value flag value ${t} for flag ${e}.`)}function oe(){return r.ENV}r.ENV=null;function YD(e){r.ENV=e}let Vg;function Nx(){if(Vg==null){let e;if(typeof window!="undefined")e=window;else if(typeof global!="undefined")e=global;else if(typeof process!="undefined")e=process;else if(typeof self!="undefined")e=self;else throw new Error("Could not find a global object");Vg=e}return Vg}function HD(){const e=Nx();return e._tfGlobals==null&&(e._tfGlobals=new Map),e._tfGlobals}function Cx(e,t){const n=HD();if(n.has(e))return n.get(e);{const s=t();return n.set(e,s),n.get(e)}}const td="Abs",ol="Acos",al="Acosh",wo="Add",Gg="AddN",Rx="All",Ox="Any",Yg="ArgMax",Hg="ArgMin",cl="Asin",ll="Asinh",hl="Atan",ul="Atanh",nd="Atan2",dl="AvgPool",sd="AvgPoolBackprop",qg="AvgPool3D",Ex="AvgPool3DBackprop",id="BatchMatMul",jg="BatchToSpaceND",Kg="BroadcastTo",Sa="Cast",pl="Ceil",ml="ClipByValue",rd="Complex",fl="Concat",od="Conv2D",Xg="Conv2DBackpropFilter",ad="Conv2DBackpropInput",cd="Conv3D",Jg="Conv3DBackpropFilterV2",Zg="Conv3DBackpropInputV2",Ia="Cos",gl="Cosh",Qg="Cumsum",Dx="CropAndResize",kx="DepthToSpace",ld="DepthwiseConv2dNative",ey="DepthwiseConv2dNativeBackpropFilter",ty="DepthwiseConv2dNativeBackpropInput",Fx="Diag",hd="Dilation2D",ud="Dilation2DBackpropInput",dd="Dilation2DBackpropFilter",xa="Div",yl="Elu",_x="EluGrad",bl="Erf",Wx="Equal",wl="Exp",Ll="Expm1",pd="FFT",ny="Fill",md="FlipLeftRight",Sl="Floor",sy="FloorDiv",Il="FusedBatchNorm",iy="GatherV2",$x="GatherNd",Ux="Greater",ry="GreaterEqual",xl="Identity",fd="IFFT",gd="Imag",Tl="IsFinite",Al="IsInf",vl="IsNan",Bx="Less",Mx="LessEqual",Px="LinSpace",Nl="Log",Cl="Log1p",zx="LogicalAnd",yd="LogicalNot",Vx="LogicalOr",oy="LogSoftmax",ay="LRN",Gx="LRNBackprop",Rl="Max",cy="Maximum",Ol="MaxPool",bd="MaxPoolBackprop",ly="MaxPool3D",Yx="MaxPool3DBackprop",wd="MaxPoolWithArgmax",hy="Mean",uy="Min",dy="Minimum",El="MirrorPad",py="Mod",Ta="Multiply",my="Negate",Dl="NotEqual",fy="NonMaxSuppressionV3",Ld="NonMaxSuppressionV4",Sd="NonMaxSuppressionV5",gy="OnesLike",yy="OneHot",Id="PadV2",qD="Pool",by="Pow",xd="Prelu",Hx="Prod",qx="Range",Td="Real",kl="Reciprocal",Fl="Relu",_l="Reshape",wy="ResizeNearestNeighbor",jx="ResizeNearestNeighborGrad",Ly="ResizeBilinear",Kx="ResizeBilinearGrad",Wl="Relu6",Sy="Reverse",$l="Round",Ul="Rsqrt",Xx="ScatterNd",Iy="SelectV2",Bl="Selu",Ad="Slice",Aa="Sin",Ml="Sinh",Pl="Sign",zl="Sigmoid",Vl="Softplus",Gl="Sqrt",xy="Sum",vd="SpaceToBatchND",Ty="SplitV",Ay="Softmax",va="SquaredDifference",Nd="Square",Na="Sub",Jx="SparseToDense",Zx="StridedSlice",Ca="Tan",Yl="Tanh",vy="Tile",Qx="TopK",Hl="Transpose",Cd="Unique",Ny="Unpack",Cy="UnsortedSegmentSum",Ry="ZerosLike",ql="Step",Rd="FromPixels",Od="RotateWithOffset",Ed="_FusedMatMul",Dd="FusedConv2D",kd="FusedDepthwiseConv2D";const Ra=Cx("kernelRegistry",()=>new Map),jl=Cx("gradRegistry",()=>new Map);function Oy(e,t){const n=Dy(e,t);return Ra.get(n)}function Ey(e){return jl.get(e)}function Fd(e){const t=Ra.entries(),n=[];for(;;){const{done:s,value:i}=t.next();if(s)break;const[o,a]=i,[c]=o.split("_");c===e&&n.push(a)}return n}function _d(e){const{kernelName:t,backendName:n}=e,s=Dy(t,n);Ra.has(s)&&console.warn(`The kernel '${t}' for backend '${n}' is already registered`),Ra.set(s,e)}function eT(e){const{kernelName:t}=e;jl.has(t)&&(oe().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${t}'`)),jl.set(t,e)}function jD(e,t){const n=Dy(e,t);if(!Ra.has(n))throw new Error(`The kernel '${e}' for backend '${t}' is not registered`);Ra.delete(n)}function KD(e){if(!jl.has(e))throw new Error(`The gradient '${e}' for backend is not registered`);jl.delete(e)}function XD(e,t){const n=Fd(e);n.forEach(s=>{const i=Object.assign({},s,{backendName:t});_d(i)})}function Dy(e,t){return`${t}_${e}`}function tT(e,t){return t==="string"?Wd(e):Or([e],t)}function JD(e,t){return e instanceof Float32Array&&t==="float32"||e instanceof Int32Array&&t==="int32"||e instanceof Uint8Array&&t==="bool"}function Or(e,t){if(t==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(e)&&(e=te(e)),oe().getBool("DEBUG")&&Nr(e,t),JD(e,t))return e;if(t==null||t==="float32"||t==="complex64")return new Float32Array(e);if(t==="int32")return new Int32Array(e);if(t==="bool"){const n=new Uint8Array(e.length);for(let s=0;s{s=n()},o=this.backendTimer.time(i);for(let c=0;c{ek(d,h.dtype,e)})}const a={kernelName:e,outputs:s,inputs:t,timeMs:o.then(c=>c.kernelMs),extraInfo:o.then(c=>c.getExtraProfileInfo!=null?c.getExtraProfileInfo():"")};return a}logKernelProfile(e){const{kernelName:t,outputs:n,timeMs:s,inputs:i,extraInfo:o}=e;n.forEach(a=>{Promise.all([a.data(),s,o]).then(c=>{this.logger.logKernelProfile(t,a,c[0],c[1],i,c[2])})})}}function ek(e,t,n){if(t!=="float32")return!1;for(let s=0;s0?L:""} `}}console.log(`%c${c} %c${a} %c${h}D ${m} %c${d} %c${f} %c${o}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function nk(e,t,n){const s={},i={};for(let h=0;hs[x.id]=!0),w=!0,i[d.id]=!0;break}if(w)break}}const o={};o[n.id]=!0;const a={};for(let h=e.length-1;h>=0;h--){const d=e[h],m=d.inputs;for(let f=0;f=0;i--){const o=t[i],a=[];if(o.outputs.forEach(h=>{const d=e[h.id];d!=null?a.push(d):a.push(null)}),o.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${o.kernelName}.`);const c=o.gradient(a);for(const h in o.inputs){if(!(h in c))throw new Error(`Cannot backprop through input ${h}. Available gradients found: ${Object.keys(c)}.`);const d=n(()=>c[h]());if(d.dtype!=="float32")throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input ${h} must have 'float32' dtype, but has '${d.dtype}'`);const m=o.inputs[h];if(!ae(d.shape,m.shape))throw new Error(`Error in gradient for op ${o.kernelName}. The gradient of input '${h}' has shape '${d.shape}', which does not match the shape of the input '${m.shape}'`);if(e[m.id]==null)e[m.id]=d;else{const f=e[m.id];e[m.id]=s(f,d),f.dispose()}}}}const sT=20,Xl=3,ky=7;function ik(e,t,n,s){const i=je(t),o=rk(e,t,n,i),a=t.length,c=$d(e,t,n,i,o),h=["Tensor"];return s&&(h.push(` dtype: ${n}`),h.push(` rank: ${a}`),h.push(` shape: [${t}]`),h.push(" values:")),h.push(c.map(d=>" "+d).join(` -`)),h.join(` -`)}function rk(e,t,n,s){const i=P(t),o=s[s.length-1],a=new Array(o).fill(0),c=t.length,h=n==="complex64"?Zl(e):e;if(c>1)for(let d=0;dsT){const v=Xl*a;let N=Array.from(e.slice(0,v)),O=Array.from(e.slice((c-Xl)*a,c*a));return n==="complex64"&&(N=Zl(N),O=Zl(O)),["["+N.map((E,k)=>Jl(E,i[k],n)).join(", ")+", ..., "+O.map((E,k)=>Jl(E,i[c-Xl+k],n)).join(", ")+"]"]}const x=n==="complex64"?Zl(e):Array.from(e);return["["+x.map((v,N)=>Jl(v,i[N],n)).join(", ")+"]"]}const d=t.slice(1),m=s.slice(1),f=s[0]*a,b=[];if(c>sT){for(let x=0;x`Length of values '${s}' does not match the size inferred by the shape '${this.size}'.`)}if(t==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=n||ws(t,this.size),this.strides=je(e)}set(e,...t){t.length===0&&(t=[0]),A(t.length===this.rank,()=>`The number of provided coordinates (${t.length}) must match the rank (${this.rank})`);const n=this.locToIndex(t);this.values[n]=e}get(...e){e.length===0&&(e=[0]);let t=0;for(const s of e){if(s<0||s>=this.shape[t]){const i=`Requested out of range element at ${e}. Buffer shape=${this.shape}`;throw new Error(i)}t++}let n=e[e.length-1];for(let s=0;sKl(n))}catch(n){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return e}dataSync(){this.throwIfDisposed();const e=Si().readSync(this.dataId);if(this.dtype==="string")try{return e.map(t=>Kl(t))}catch(t){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return e}async bytes(){this.throwIfDisposed();const e=await Si().read(this.dataId);return this.dtype==="string"?e:new Uint8Array(e.buffer)}dispose(){if(this.isDisposed)return;Si().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(e=!1){return Oa.print(this,e)}clone(){return this.throwIfDisposed(),Oa.clone(this)}toString(e=!1){const t=this.dataSync();return ik(t,this.shape,this.dtype,e)}cast(e){return this.throwIfDisposed(),Oa.cast(this,e)}variable(e=!0,t,n){return this.throwIfDisposed(),Si().makeVariable(this,e,t,n)}}Object.defineProperty(ee,Symbol.hasInstance,{value:e=>!!e&&e.data!=null&&e.dataSync!=null&&e.throwIfDisposed!=null});class Ql extends ee{constructor(e,t,n,s){super(e.shape,e.dtype,e.dataId,s);this.trainable=t,this.name=n}assign(e){if(e.dtype!==this.dtype)throw new Error(`dtype of the new value (${e.dtype}) and previous value (${this.dtype}) must match`);if(!ae(e.shape,this.shape))throw new Error(`shape of the new value (${e.shape}) and previous value (${this.shape}) must match`);Si().disposeTensor(this),this.dataId=e.dataId,Si().incRef(this,null)}dispose(){Si().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(Ql,Symbol.hasInstance,{value:e=>e instanceof ee&&e.assign!=null&&e.assign instanceof Function});(function(e){e.R0="R0",e.R1="R1",e.R2="R2",e.R3="R3",e.R4="R4",e.R5="R5",e.R6="R6"})(r.Rank||(r.Rank={}));var Fy;(function(e){e.float32="float32",e.int32="int32",e.bool="int32",e.complex64="complex64"})(Fy||(Fy={}));var _y;(function(e){e.float32="float32",e.int32="int32",e.bool="bool",e.complex64="complex64"})(_y||(_y={}));var Wy;(function(e){e.float32="float32",e.int32="float32",e.bool="float32",e.complex64="complex64"})(Wy||(Wy={}));var $y;(function(e){e.float32="complex64",e.int32="complex64",e.bool="complex64",e.complex64="complex64"})($y||($y={}));const lk={float32:Wy,int32:Fy,bool:_y,complex64:$y};function $n(e,t){if(e==="string"||t==="string"){if(e==="string"&&t==="string")return"string";throw new Error(`Can not upcast ${e} with ${t}`)}return lk[e][t]}function Ud(e){return $n(e,"int32")}function Gt(e,t){if(e.dtype===t.dtype)return[e,t];const n=$n(e.dtype,t.dtype);return[e.cast(n),t.cast(n)]}function oT(e,t){A(e.dtype===t.dtype,()=>`The dtypes of the first(${e.dtype}) and second(${t.dtype}) input must match`)}function Bd(e,t){return t.some(n=>n.id===e.id)}function Hi(e){const t=[],n=new Set;return aT(e,t,n),t}function aT(e,t,n){if(e==null)return;if(e instanceof ee){t.push(e);return}if(!hk(e))return;const s=e;for(const i in s){const o=s[i];n.has(o)||(n.add(o),aT(o,t,n))}}function hk(e){return Array.isArray(e)||typeof e=="object"}var uk=Object.freeze({__proto__:null,makeTypesMatch:Gt,assertTypesMatch:oT,isTensorInList:Bd,getTensorsInContainer:Hi});class cT{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const e in this.registeredVariables)this.registeredVariables[e].dispose()}}class eh{constructor(e){this.ENV=e,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new cT}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const e=this.getSortedBackends();for(let t=0;t{t.setupFunc!=null&&t.setupFunc(this.backendInstance)})}disposeRegisteredKernels(e){const t=Fd(e);t.forEach(n=>{n.disposeFunc!=null&&n.disposeFunc(this.registry[e])})}initializeBackend(e){const t=this.registryFactory[e];if(t==null)throw new Error(`Cannot initialize backend ${e}, no registration found.`);try{const n=t.factory();if(n&&!(n instanceof y)&&typeof n.then=="function"){const s=++this.pendingBackendInitId,i=n.then(o=>s(sthis.registryFactory[t].priority-this.registryFactory[e].priority)}initializeBackendsAndReturnBest(){const e=this.getSortedBackends();for(let t=0;tthis.startScope(n),()=>this.endScope(s),()=>(s=t(),s instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),s))}scopedRun(e,t,n){e();try{const s=n();return t(),s}catch(s){throw t(),s}}nextTensorId(){return eh.nextTensorId++}nextVariableId(){return eh.nextVariableId++}clone(e){const t=this.makeTensorFromDataId(e.dataId,e.shape,e.dtype),n={x:e},s=o=>({x:()=>{const a="float32",c={x:o},h={dtype:a};return G.runKernelFunc(d=>d.cast(o,a),c,null,Sa,h)}}),i=[];return this.addTapeNode(this.state.activeScope.name,n,[t],s,i,{}),t}runKernel(e,t,n,s,i){const o=null,a=null;return this.runKernelFunc(o,t,a,e,n,s,i)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(e,t,n){const s=this.backend.numDataIds();let i=0;n.forEach(c=>{i+=c.dtype==="complex64"?3:1});const o=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],a=s-t-i-o;if(a>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${a} data ids) after running '${e}'`)}runKernelFunc(e,t,n,s,i,o,a){let c,h=[];const d=this.isTapeOn();s==null&&(s=this.state.activeScope!=null?this.state.activeScope.name:"");const m=this.state.numBytes,f=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let b;const w=Oy(s,this.backendName);let L;if(w!=null)b=()=>{const v=this.backend.numDataIds();L=w.kernelFunc({inputs:t,attrs:i,backend:this.backend});const N=Array.isArray(L)?L:[L];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,v,N);const O=N.map(({dataId:E,shape:k,dtype:F})=>this.makeTensorFromDataId(E,k,F));if(d){let E=this.getTensorsForGradient(s,t,O);if(E==null){a==null&&(a=[]);const k=O.filter((F,U)=>a[U]);E=(o||[]).slice().concat(k)}h=this.saveTensorsForBackwardMode(E)}return O};else{const v=N=>{if(!d)return;h=N.map(O=>this.keep(this.clone(O)))};b=()=>{const N=this.backend.numDataIds();L=this.tidy(()=>e(this.backend,v));const O=Array.isArray(L)?L:[L];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(s,N,O),O}}let x;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?c=b():(x=this.profiler.profileKernel(s,t,()=>b()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(x),c=x.outputs)}),d&&this.addTapeNode(s,t,c,n,h,i),this.state.profiling&&this.state.activeProfile.kernels.push({name:s,bytesAdded:this.state.numBytes-m,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-f,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(t).map(v=>t[v]!=null?t[v].shape:null),outputShapes:c.map(v=>v.shape),kernelTimeMs:x.timeMs,extraInfo:x.extraInfo}),Array.isArray(L)?c:c[0]}saveTensorsForBackwardMode(e){const t=e.map(n=>this.keep(this.clone(n)));return t}getTensorsForGradient(e,t,n){const s=Ey(e);if(s!=null){const i=s.inputsToSave||[],o=s.outputsToSave||[];let a;s.saveAllInputs?(A(Array.isArray(t),()=>"saveAllInputs is true, expected inputs to be an array."),a=Object.keys(t).map(h=>t[h])):a=i.map(h=>t[h]);const c=n.filter((h,d)=>o[d]);return a.concat(c)}return null}makeTensor(e,t,n,s){if(e==null)throw new Error("Values passed to engine.makeTensor() are null");n=n||"float32",s=s||this.backend;let i=e;n==="string"&&Yi(e[0])&&(i=e.map(c=>Wd(c)));const o=s.write(i,t,n),a=new ee(t,n,o,this.nextTensorId());if(this.incRef(a,s),n==="string"){const c=this.state.tensorInfo.get(o),h=Ix(i);this.state.numBytes+=h-c.bytes,c.bytes=h}return a}makeTensorFromDataId(e,t,n,s){n=n||"float32";const i=new ee(t,n,e,this.nextTensorId());return this.incRef(i,s),i}makeVariable(e,t=!0,n,s){n=n||this.nextVariableId().toString(),s!=null&&s!==e.dtype&&(e=e.cast(s));const i=new Ql(e,t,n,this.nextTensorId());if(this.state.registeredVariables[i.name]!=null)throw new Error(`Variable with name ${i.name} was already registered`);return this.state.registeredVariables[i.name]=i,this.incRef(i,this.backend),i}incRef(e,t){const n=this.state.tensorInfo.has(e.dataId)?this.state.tensorInfo.get(e.dataId).refCount:0;if(this.state.numTensors++,e.dtype==="string"&&this.state.numStringTensors++,n===0){this.state.numDataBuffers++;let s=0;e.dtype!=="complex64"&&e.dtype!=="string"&&(s=e.size*Bg(e.dtype)),this.state.tensorInfo.set(e.dataId,{backend:t||this.backend,dtype:e.dtype,shape:e.shape,bytes:s,refCount:0}),this.state.numBytes+=s}this.state.tensorInfo.get(e.dataId).refCount++,e instanceof Ql||this.track(e)}disposeTensor(e){if(!this.state.tensorInfo.has(e.dataId))return;this.state.numTensors--,e.dtype==="string"&&this.state.numStringTensors--;const t=this.state.tensorInfo.get(e.dataId),n=t.refCount;n<=1?(e.dtype!=="complex64"&&(this.state.numBytes-=t.bytes),this.state.numDataBuffers--,t.backend.disposeData(e.dataId),this.state.tensorInfo.delete(e.dataId)):this.state.tensorInfo.get(e.dataId).refCount--}disposeVariables(){for(const e in this.state.registeredVariables){const t=this.state.registeredVariables[e];this.disposeVariable(t)}}disposeVariable(e){this.disposeTensor(e),this.state.registeredVariables[e.name]!=null&&delete this.state.registeredVariables[e.name]}memory(){const e=this.backend.memory();return e.numTensors=this.state.numTensors,e.numDataBuffers=this.state.numDataBuffers,e.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(e.unreliable=!0,e.reasons==null&&(e.reasons=[]),e.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),e}async profile(e){this.state.profiling=!0;const t=this.state.numBytes,n=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await e(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(s=>s.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-t,this.state.activeProfile.newTensors=this.state.numTensors-n;for(const s of this.state.activeProfile.kernels)s.kernelTimeMs=await s.kernelTimeMs,s.extraInfo=await s.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(e,t,n,s,i,o){const a={id:this.state.nextTapeNodeId++,kernelName:e,inputs:t,outputs:n,saved:i},c=Ey(e);c!=null&&(s=c.gradFunc),s!=null&&(a.gradient=h=>(h=h.map((d,m)=>{if(d==null){const f=n[m],b=La(f.size,f.dtype);return this.makeTensor(b,f.shape,f.dtype)}return d}),s(h.length>1?h:h[0],i,o))),this.state.activeTape.push(a)}keep(e){return e.kept=!0,e}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(e){const t={track:[],name:"unnamed scope",id:this.state.nextScopeId++};e&&(t.name=e),this.state.scopeStack.push(t),this.state.activeScope=t}endScope(e){const t=Hi(e),n=new Set(t.map(i=>i.id));for(let i=0;i{!i.kept&&i.scopeId===s.id&&this.track(i)})}gradients(e,t,n,s=!1){if(A(t.length>0,()=>"gradients() received an empty list of xs."),n!=null&&n.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${n.dtype}'`);const i=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",e));A(i instanceof ee,()=>"The result y returned by f() must be a tensor.");const o=nk(this.state.activeTape,t,i);if(!s&&o.length===0&&t.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const a={};a[i.id]=n==null?dk(i.shape):n,sk(a,o,h=>this.tidy(h),pk);const c=t.map(h=>a[h.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(h=>{for(const d of h.saved)d.dispose()}),this.state.activeTape=null),{value:i,grads:c}})}customGrad(e){return A(Rr(e),()=>"The f passed in customGrad(f) must be a function."),(...t)=>{A(t.every(i=>i instanceof ee),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let n;const s={};return t.forEach((i,o)=>{s[o]=i}),this.runKernelFunc((i,o)=>(n=e(...t,o),A(n.value instanceof ee,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),A(Rr(n.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),n.value),s,(i,o)=>{const a=n.gradFunc(i,o),c=Array.isArray(a)?a:[a];A(c.length===t.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),A(c.every(d=>d instanceof ee),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const h={};return c.forEach((d,m)=>{h[m]=()=>d}),h})}}readSync(e){const t=this.state.tensorInfo.get(e);return t.backend.readSync(e)}read(e){const t=this.state.tensorInfo.get(e);return t.backend.read(e)}async time(e){const t=jn(),n=await this.backend.time(e);return n.wallMs=jn()-t,n}track(e){return this.state.activeScope!=null&&(e.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(e)),e}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new cT;for(const e in this.registry)this.disposeRegisteredKernels(e),this.registry[e].dispose(),delete this.registry[e];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}eh.nextTensorId=0,eh.nextVariableId=0;function dk(e){const t=Mg(P(e),"float32");return G.makeTensor(t,e,"float32")}function lT(){const e=Nx();if(e._tfengine==null){const t=new vx(e);e._tfengine=new eh(t)}return YD(e._tfengine.ENV),ok(()=>e._tfengine),e._tfengine}const G=lT();function pk(e,t){const n={a:e,b:t};return G.runKernelFunc((s,i)=>{const o=s.add(e,t);return i([e,t]),o},n,null,wo)}function mk(){return typeof navigator!="undefined"&&navigator!=null}function hT(){if(mk()){const e=navigator.userAgent||navigator.vendor||window.opera;return/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(e)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(e.substr(0,4))}return!1}function Uy(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}var fk=Object.freeze({__proto__:null,isMobile:hT,isBrowser:Uy});const qi=oe();qi.registerFlag("DEBUG",()=>!1,e=>{e&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")}),qi.registerFlag("IS_BROWSER",()=>Uy()),qi.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined"),qi.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)),qi.registerFlag("PROD",()=>!1),qi.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>qi.getBool("DEBUG")),qi.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0),qi.registerFlag("IS_TEST",()=>!1);function Ii(e,t){let n=e;if(hn(e))return t==="string"?[]:[e.length];if(!Array.isArray(e))return[];const s=[];for(;Array.isArray(n)||hn(n)&&t!=="string";)s.push(n.length),n=n[0];return Array.isArray(e)&&oe().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&uT(e,s,[]),s}function uT(e,t,n){if(n=n||[],!Array.isArray(e)&&!hn(e)){A(t.length===0,()=>`Element arr[${n.join("][")}] is a primitive, but should be an array/TypedArray of ${t[0]} elements`);return}A(t.length>0,()=>`Element arr[${n.join("][")}] should be a primitive, but is an array of ${e.length} elements`),A(e.length===t[0],()=>`Element arr[${n.join("][")}] should have ${t[0]} elements, but has ${e.length} elements`);const s=t.slice(1);for(let i=0;i=0&&(i=s),dT(s,i,t,n),e==null||!hn(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string"){const h=e==null?"null":e.constructor.name;throw new Error(`Argument '${t}' passed to '${n}' must be a Tensor or TensorLike, but got '${h}'`)}const o=Ii(e,i);!hn(e)&&!Array.isArray(e)&&(e=[e]);const a=!0,c=i!=="string"?Or(e,i):te(e,[],a);return G.makeTensor(c,o,i)}function th(e,t,n,s="numeric"){if(!Array.isArray(e))throw new Error(`Argument ${t} passed to ${n} must be a \`Tensor[]\` or \`TensorLike[]\``);const i=e;return i.map((o,a)=>W(o,`${t}[${a}]`,n),s)}const pT="__op";function z(e){const t=Object.keys(e);if(t.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${t.length} keys.`);let n=t[0];const s=e[n];n.endsWith("_")&&(n=n.substring(0,n.length-1)),n=n+pT;const i=(...o)=>{G.startScope(n);try{const a=s(...o);return bo(a)&&console.error("Cannot return a Promise inside of tidy."),G.endScope(a),a}catch(a){throw G.endScope(null),a}};return Object.defineProperty(i,"name",{value:n,configurable:!0}),i}function gk(e,t){const n=W(e,"real","complex"),s=W(t,"imag","complex");B(n.shape,s.shape,`real and imag shapes, ${n.shape} and ${s.shape}, must match in call to tf.complex().`);const i=a=>a.complex(n,s),o={real:n,imag:s};return G.runKernelFunc(i,o,null,rd)}const ji=z({complex_:gk});function Er(e,t,n,s){if(s==null&&(s=wa(e)),s==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!hn(e)&&!Array.isArray(e)&&typeof e!="number"&&typeof e!="boolean"&&typeof e!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(t!=null){zg(t);const i=P(t),o=P(n);A(i===o,()=>`Based on the provided shape, [${t}], the tensor should have ${i} values but has ${o}`);for(let a=0;a`Error creating a new Tensor. Inferred shape (${n}) does not match the provided shape (${t}). `)}}return!hn(e)&&!Array.isArray(e)&&(e=[e]),t=t||n,e=s!=="string"?Or(e,s):te(e,[],!0),G.makeTensor(e,t,s)}function sn(e,t,n){const s=Ii(e,n);return Er(e,t,s,n)}const By={float32:4,float16:2,int32:4,uint16:2,uint8:1,bool:1,complex64:8};const Md=4;async function My(e,t){const n=[],s=[],i=Array.isArray(e)?e.map(a=>a.name):Object.keys(e);for(let a=0;a{const b=await h.bytes(),w=b.reduce((v,N)=>v+N.length,0)+Md*b.length,L=new Uint8Array(w);let x=0;for(let v=0;v{if(t+=o.byteLength,n.push(o.byteLength===o.buffer.byteLength?o:new o.constructor(o)),!(o instanceof Float32Array||o instanceof Int32Array||o instanceof Uint8Array))throw new Error(`Unsupported TypedArray subtype: ${o.constructor.name}`)});const s=new Uint8Array(t);let i=0;return n.forEach(o=>{s.set(new Uint8Array(o.buffer),i),i+=o.byteLength}),s.buffer}const Py=typeof Buffer!="undefined"&&(typeof Blob=="undefined"||typeof atob=="undefined"||typeof btoa=="undefined");function mT(e){return Py?Buffer.byteLength(e):new Blob([e]).size}function bk(e){if(Py)return Buffer.from(e).toString("base64");const t=new Uint8Array(e);let n="";for(let s=0,i=t.length;s{t+=i.byteLength});const n=new Uint8Array(t);let s=0;return e.forEach(i=>{n.set(new Uint8Array(i),s),s+=i.byteLength}),n.buffer}function fT(e){const t="/";for(e=e.trim();e.endsWith(t);)e=e.slice(0,e.length-1);const n=e.split(t);return n[n.length-1]}function nh(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("Expected JSON model topology, received ArrayBuffer.");return{dateSaved:new Date,modelTopologyType:"JSON",modelTopologyBytes:e.modelTopology==null?0:mT(JSON.stringify(e.modelTopology)),weightSpecsBytes:e.weightSpecs==null?0:mT(JSON.stringify(e.weightSpecs)),weightDataBytes:e.weightData==null?0:e.weightData.byteLength}}function Lk(){const e=n=>{let s=n<<13,i=0;for(;(s&8388608)===0;)i-=8388608,s<<=1;return s&=~8388608,i+=947912704,s|i},t=new Uint32Array(2048);t[0]=0;for(let n=1;n<1024;n++)t[n]=e(n);for(let n=1024;n<2048;n++)t[n]=939524096+(n-1024<<13);return t}function Sk(){const e=new Uint32Array(64);e[0]=0,e[31]=1199570944,e[32]=2147483648,e[63]=3347054592;for(let t=1;t<31;t++)e[t]=t<<23;for(let t=33;t<63;t++)e[t]=2147483648+(t-32<<23);return e}function Ik(){const e=new Uint32Array(64);for(let t=0;t<64;t++)e[t]=1024;return e[0]=e[32]=0,e}function xk(){const e=Lk(),t=Sk(),n=Ik();return s=>{const i=new ArrayBuffer(4*s.length),o=new Uint32Array(i);for(let a=0;a>10]+(c&1023)]+t[c>>10];o[a]=h}return new Float32Array(i)}}class en{constructor(){this.saveRouters=[],this.loadRouters=[]}static getInstance(){return en.instance==null&&(en.instance=new en),en.instance}static registerSaveRouter(e){en.getInstance().saveRouters.push(e)}static registerLoadRouter(e){en.getInstance().loadRouters.push(e)}static getSaveHandlers(e){return en.getHandlers(e,"save")}static getLoadHandlers(e,t){return en.getHandlers(e,"load",t)}static getHandlers(e,t,n){const s=[],i=t==="load"?en.getInstance().loadRouters:en.getInstance().saveRouters;return i.forEach(o=>{const a=o(e,n);a!==null&&s.push(a)}),s}}const Tk=e=>en.registerSaveRouter(e),Ak=e=>en.registerLoadRouter(e),zy=e=>en.getSaveHandlers(e),Vy=(e,t)=>en.getLoadHandlers(e,t);const Vd="tensorflowjs",Gy=1,Lo="models_store",Dr="model_info_store";async function bee(){const e=Yy();return new Promise((t,n)=>{const s=e.deleteDatabase(Vd);s.onsuccess=()=>t(),s.onerror=i=>n(i)})}function Yy(){if(!oe().getBool("IS_BROWSER"))throw new Error("Failed to obtain IndexedDB factory because the current environmentis not a web browser.");const e=typeof window=="undefined"?self:window,t=e.indexedDB||e.mozIndexedDB||e.webkitIndexedDB||e.msIndexedDB||e.shimIndexedDB;if(t==null)throw new Error("The current browser does not appear to support IndexedDB.");return t}function Hy(e){const t=e.result;t.createObjectStore(Lo,{keyPath:"modelPath"}),t.createObjectStore(Dr,{keyPath:"modelPath"})}class So{constructor(e){if(this.indexedDB=Yy(),e==null||!e)throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");this.modelPath=e}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");return this.databaseAction(this.modelPath,e)}async load(){return this.databaseAction(this.modelPath)}databaseAction(e,t){return new Promise((n,s)=>{const i=this.indexedDB.open(Vd,Gy);i.onupgradeneeded=()=>Hy(i),i.onsuccess=()=>{const o=i.result;if(t==null){const a=o.transaction(Lo,"readonly"),c=a.objectStore(Lo),h=c.get(this.modelPath);h.onsuccess=()=>{if(h.result==null)return o.close(),s(new Error(`Cannot find model with path '${this.modelPath}' in IndexedDB.`));n(h.result.modelArtifacts)},h.onerror=d=>(o.close(),s(h.error)),a.oncomplete=()=>o.close()}else{const a=nh(t),c=o.transaction(Dr,"readwrite");let h=c.objectStore(Dr);const d=h.put({modelPath:this.modelPath,modelArtifactsInfo:a});let m;d.onsuccess=()=>{m=o.transaction(Lo,"readwrite");const f=m.objectStore(Lo),b=f.put({modelPath:this.modelPath,modelArtifacts:t,modelArtifactsInfo:a});b.onsuccess=()=>n({modelArtifactsInfo:a}),b.onerror=w=>{h=c.objectStore(Dr);const L=h.delete(this.modelPath);L.onsuccess=()=>(o.close(),s(b.error)),L.onerror=x=>(o.close(),s(b.error))}},d.onerror=f=>(o.close(),s(d.error)),c.oncomplete=()=>{m==null?o.close():m.oncomplete=()=>o.close()}}},i.onerror=o=>s(i.error)})}}So.URL_SCHEME="indexeddb://";const gT=e=>oe().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(So.URL_SCHEME))?vk(e.slice(So.URL_SCHEME.length)):null;en.registerSaveRouter(gT),en.registerLoadRouter(gT);function vk(e){return new So(e)}function Nk(e){return e.startsWith(So.URL_SCHEME)?e.slice(So.URL_SCHEME.length):e}class Ck{constructor(){this.indexedDB=Yy()}async listModels(){return new Promise((e,t)=>{const n=this.indexedDB.open(Vd,Gy);n.onupgradeneeded=()=>Hy(n),n.onsuccess=()=>{const s=n.result,i=s.transaction(Dr,"readonly"),o=i.objectStore(Dr),a=o.getAll();a.onsuccess=()=>{const c={};for(const h of a.result)c[h.modelPath]=h.modelArtifactsInfo;e(c)},a.onerror=c=>(s.close(),t(a.error)),i.oncomplete=()=>s.close()},n.onerror=s=>t(n.error)})}async removeModel(e){return e=Nk(e),new Promise((t,n)=>{const s=this.indexedDB.open(Vd,Gy);s.onupgradeneeded=()=>Hy(s),s.onsuccess=()=>{const i=s.result,o=i.transaction(Dr,"readwrite"),a=o.objectStore(Dr),c=a.get(e);let h;c.onsuccess=()=>{if(c.result==null)return i.close(),n(new Error(`Cannot find model with path '${e}' in IndexedDB.`));{const d=a.delete(e),m=()=>{h=i.transaction(Lo,"readwrite");const f=h.objectStore(Lo),b=f.delete(e);b.onsuccess=()=>t(c.result.modelArtifactsInfo),b.onerror=w=>n(c.error)};d.onsuccess=m,d.onerror=f=>(m(),i.close(),n(c.error))}},c.onerror=d=>(i.close(),n(c.error)),o.oncomplete=()=>{h==null?i.close():h.oncomplete=()=>i.close()}},s.onerror=i=>n(s.error)})}}const xi="/",Io="tensorflowjs_models",yT="info",Rk="model_topology",Ok="weight_specs",Ek="weight_data",Dk="model_metadata";function wee(){if(!oe().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("purgeLocalStorageModels() cannot proceed because local storage is unavailable in the current environment.");const e=window.localStorage,t=[];for(let n=0;ni.length){e.removeItem(s);const o=wT(s);t.indexOf(o)===-1&&t.push(o)}}return t}function bT(e){return{info:[Io,e,yT].join(xi),topology:[Io,e,Rk].join(xi),weightSpecs:[Io,e,Ok].join(xi),weightData:[Io,e,Ek].join(xi),modelMetadata:[Io,e,Dk].join(xi)}}function wT(e){const t=e.split(xi);if(t.length<3)throw new Error(`Invalid key format: ${e}`);return t.slice(1,t.length-1).join(xi)}function kk(e){return e.startsWith(xo.URL_SCHEME)?e.slice(xo.URL_SCHEME.length):e}class xo{constructor(e){if(!oe().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("The current environment does not support local storage.");if(this.LS=window.localStorage,e==null||!e)throw new Error("For local storage, modelPath must not be null, undefined or empty.");this.modelPath=e,this.keys=bT(this.modelPath)}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");{const t=JSON.stringify(e.modelTopology),n=JSON.stringify(e.weightSpecs),s=nh(e);try{return this.LS.setItem(this.keys.info,JSON.stringify(s)),this.LS.setItem(this.keys.topology,t),this.LS.setItem(this.keys.weightSpecs,n),this.LS.setItem(this.keys.weightData,bk(e.weightData)),this.LS.setItem(this.keys.modelMetadata,JSON.stringify({format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata})),{modelArtifactsInfo:s}}catch(i){throw this.LS.removeItem(this.keys.info),this.LS.removeItem(this.keys.topology),this.LS.removeItem(this.keys.weightSpecs),this.LS.removeItem(this.keys.weightData),this.LS.removeItem(this.keys.modelMetadata),new Error(`Failed to save model '${this.modelPath}' to local storage: size quota being exceeded is a possible cause of this failure: modelTopologyBytes=${s.modelTopologyBytes}, weightSpecsBytes=${s.weightSpecsBytes}, weightDataBytes=${s.weightDataBytes}.`)}}}async load(){const e=JSON.parse(this.LS.getItem(this.keys.info));if(e==null)throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);if(e.modelTopologyType!=="JSON")throw new Error("BrowserLocalStorage does not support loading non-JSON model topology yet.");const t={},n=JSON.parse(this.LS.getItem(this.keys.topology));if(n==null)throw new Error(`In local storage, the topology of model '${this.modelPath}' is missing.`);t.modelTopology=n;const s=JSON.parse(this.LS.getItem(this.keys.weightSpecs));if(s==null)throw new Error(`In local storage, the weight specs of model '${this.modelPath}' are missing.`);t.weightSpecs=s;const i=this.LS.getItem(this.keys.modelMetadata);if(i!=null){const a=JSON.parse(i);t.format=a.format,t.generatedBy=a.generatedBy,t.convertedBy=a.convertedBy,t.userDefinedMetadata=a.userDefinedMetadata}const o=this.LS.getItem(this.keys.weightData);if(o==null)throw new Error(`In local storage, the binary weight values of model '${this.modelPath}' are missing.`);return t.weightData=wk(o),t}}xo.URL_SCHEME="localstorage://";const LT=e=>oe().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(xo.URL_SCHEME))?Fk(e.slice(xo.URL_SCHEME.length)):null;en.registerSaveRouter(LT),en.registerLoadRouter(LT);function Fk(e){return new xo(e)}class _k{constructor(){A(oe().getBool("IS_BROWSER"),()=>"Current environment is not a web browser"),A(typeof window=="undefined"||typeof window.localStorage!="undefined",()=>"Current browser does not appear to support localStorage"),this.LS=window.localStorage}async listModels(){const e={},t=Io+xi,n=xi+yT;for(let s=0;s"scheme must not be undefined or null."),e.endsWith(Ea)&&(e=e.slice(0,e.indexOf(Ea))),A(e.length>0,()=>"scheme must not be an empty string.");const n=Ss.getInstance();A(n.managers[e]==null,()=>`A model store manager is already registered for scheme '${e}'.`),n.managers[e]=t}static getManager(e){const t=this.getInstance().managers[e];if(t==null)throw new Error(`Cannot find model manager for scheme '${e}'`);return t}static getSchemes(){return Object.keys(this.getInstance().managers)}}function Gd(e){if(e.indexOf(Ea)===-1)throw new Error(`The url string provided does not contain a scheme. Supported schemes are: ${Ss.getSchemes().join(",")}`);return{scheme:e.split(Ea)[0],path:e.split(Ea)[1]}}async function ST(e,t,n=!1){A(e!==t,()=>`Old path and new path are the same: '${e}'`);const s=en.getLoadHandlers(e);A(s.length>0,()=>`Copying failed because no load handler is found for source URL ${e}.`),A(s.length<2,()=>`Copying failed because more than one (${s.length}) load handlers for source URL ${e}.`);const i=s[0],o=en.getSaveHandlers(t);A(o.length>0,()=>`Copying failed because no save handler is found for destination URL ${t}.`),A(o.length<2,()=>`Copying failed because more than one (${s.length}) save handlers for destination URL ${t}.`);const a=o[0],c=Gd(e).scheme,h=Gd(e).path,d=c===Gd(e).scheme,m=await i.load();n&&d&&await Ss.getManager(c).removeModel(h);const f=await a.save(m);return n&&!d&&await Ss.getManager(c).removeModel(h),f.modelArtifactsInfo}async function Wk(){const e=Ss.getSchemes(),t={};for(const n of e){const s=await Ss.getManager(n).listModels();for(const i in s){const o=n+Ea+i;t[o]=s[i]}}return t}async function $k(e){const t=Gd(e),n=Ss.getManager(t.scheme);return n.removeModel(t.path)}async function Uk(e,t){const n=!1;return ST(e,t,n)}async function Bk(e,t){const n=!0;return ST(e,t,n)}class Mk{fetch(e,t){return fetch(e,t)}now(){return performance.now()}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Browser's encoder only supports utf-8, but got ${t}`);return this.textEncoder==null&&(this.textEncoder=new TextEncoder),this.textEncoder.encode(e)}decode(e,t){return new TextDecoder(t).decode(e)}}if(oe().get("IS_BROWSER")){oe().setPlatform("browser",new Mk);try{Ss.registerManager(xo.URL_SCHEME,new _k)}catch(e){}try{Ss.registerManager(So.URL_SCHEME,new Ck)}catch(e){}}const Pk={importFetch:()=>ZC()};let Da;function Lee(){Da=null}function See(e){Da=e}function Iee(){return Da}class zk{constructor(){this.util=require("util"),this.textEncoder=new this.util.TextEncoder}fetch(e,t){return oe().global.fetch!=null?oe().global.fetch(e,t):(Da==null&&(Da=Pk.importFetch()),Da(e,t))}now(){const e=process.hrtime();return e[0]*1e3+e[1]/1e6}encode(e,t){if(t!=="utf-8"&&t!=="utf8")throw new Error(`Node built-in encoder only supports utf-8, but got ${t}`);return this.textEncoder.encode(e)}decode(e,t){return e.length===0?"":new this.util.TextDecoder(t).decode(e)}}oe().get("IS_NODE")&&oe().setPlatform("node",new zk);function wt(e,t="float32",n){return t=t||"float32",zg(e),new an(e,t,n)}function Vk(e,t){const n=W(e,"x","cast");if(!Cr(t))throw new Error(`Failed to cast to unknown dtype ${t}`);if(t==="string"&&n.dtype!=="string"||t!=="string"&&n.dtype==="string")throw new Error("Only strings can be casted to strings");const s={x:n},i={dtype:t};return G.runKernelFunc(o=>o.cast(n,t),s,null,Sa,i)}const Ae=z({cast_:Vk});function Gk(e){const t=W(e,"x","clone",null),n=()=>G.makeTensorFromDataId(t.dataId,t.shape,t.dtype),s={x:t};return G.runKernelFunc(n,s,null,xl)}const kr=z({clone_:Gk});function IT(e,t=!1){console.log(e.toString(t))}lT();const Yk={buffer:wt,cast:Ae,clone:kr,print:IT};ak(Yk);const Hk="model",qk=".json",jk=".weights.bin";function xT(e){return new Promise(t=>setTimeout(t)).then(e)}class ka{constructor(e){if(!oe().getBool("IS_BROWSER"))throw new Error("browserDownloads() cannot proceed because the current environment is not a browser.");e.startsWith(ka.URL_SCHEME)&&(e=e.slice(ka.URL_SCHEME.length)),(e==null||e.length===0)&&(e=Hk),this.modelTopologyFileName=e+qk,this.weightDataFileName=e+jk}async save(e){if(typeof document=="undefined")throw new Error("Browser downloads are not supported in this environment since `document` is not present");const t=window.URL.createObjectURL(new Blob([e.weightData],{type:"application/octet-stream"}));if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet.");{const n=[{paths:["./"+this.weightDataFileName],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,weightsManifest:n},i=window.URL.createObjectURL(new Blob([JSON.stringify(s)],{type:"application/json"})),o=this.jsonAnchor==null?document.createElement("a"):this.jsonAnchor;if(o.download=this.modelTopologyFileName,o.href=i,await xT(()=>o.dispatchEvent(new MouseEvent("click"))),e.weightData!=null){const a=this.weightDataAnchor==null?document.createElement("a"):this.weightDataAnchor;a.download=this.weightDataFileName,a.href=t,await xT(()=>a.dispatchEvent(new MouseEvent("click")))}return{modelArtifactsInfo:nh(e)}}}}ka.URL_SCHEME="downloads://";class Kk{constructor(e){if(e==null||e.length<1)throw new Error(`When calling browserFiles, at least 1 file is required, but received ${e}`);this.files=e}async load(){const e=this.files[0],t=this.files.slice(1);return new Promise((n,s)=>{const i=new FileReader;i.onload=o=>{const a=JSON.parse(o.target.result),c=a.modelTopology;if(c==null){s(new Error(`modelTopology field is missing from file ${e.name}`));return}t.length===0&&n({modelTopology:c});const h=a.weightsManifest;if(h==null){s(new Error(`weightManifest field is missing from file ${e.name}`));return}let d;try{d=this.checkManifestAndWeightFiles(h,t)}catch(w){s(w);return}const m=[],f=[],b=[];h.forEach(w=>{w.paths.forEach(L=>{f.push(L),b.push(null)}),m.push(...w.weights)}),h.forEach(w=>{w.paths.forEach(L=>{const x=new FileReader;x.onload=v=>{const N=v.target.result,O=f.indexOf(L);b[O]=N,b.indexOf(null)===-1&&n({modelTopology:c,weightSpecs:m,weightData:zd(b),format:a.format,generatedBy:a.generatedBy,convertedBy:a.convertedBy,userDefinedMetadata:a.userDefinedMetadata})},x.onerror=v=>s(`Failed to weights data from file of path '${L}'.`),x.readAsArrayBuffer(d[L])})})},i.onerror=o=>s(`Failed to read model topology and weights manifest JSON from file '${e.name}'. BrowserFiles supports loading Keras-style tf.Model artifacts only.`),i.readAsText(e)})}checkManifestAndWeightFiles(e,t){const n=[],s=t.map(o=>fT(o.name)),i={};for(const o of e)o.paths.forEach(a=>{const c=fT(a);if(n.indexOf(c)!==-1)throw new Error(`Duplicate file basename found in weights manifest: '${c}'`);if(n.push(c),s.indexOf(c)===-1)throw new Error(`Weight file with basename '${c}' is not provided.`);i[a]=t[s.indexOf(c)]});if(n.length!==t.length)throw new Error(`Mismatch in the number of files in weights manifest (${n.length}) and the number of weight files provided (${t.length}).`);return i}}const Xk=e=>oe().getBool("IS_BROWSER")&&(!Array.isArray(e)&&e.startsWith(ka.URL_SCHEME))?Jk(e.slice(ka.URL_SCHEME.length)):null;en.registerSaveRouter(Xk);function Jk(e="model"){return new ka(e)}function Zk(e){return new Kk(e)}function TT(e,t,n,s){a(e),n=n==null?0:n,s=s==null?1:s,c(n,s);let i=0;const o=h=>(h.then(d=>{const m=n+ ++i/e.length*(s-n);return t(m),d}),h);function a(h){A(h!=null&&Array.isArray(h)&&h.length>0,()=>"promises must be a none empty array")}function c(h,d){A(h>=0&&h<=1,()=>`Progress fraction must be in range [0, 1], but got startFraction ${h}`),A(d>=0&&d<=1,()=>`Progress fraction must be in range [0, 1], but got endFraction ${d}`),A(d>=h,()=>`startFraction must be no more than endFraction, but got startFraction ${h} and endFraction ${d}`)}return Promise.all(e.map(o))}async function AT(e,t){t==null&&(t={});const n=t.fetchFunc==null?oe().platform.fetch:t.fetchFunc,s=e.map(f=>n(f,t.requestInit,{isBinary:!0})),i=0,o=.5,a=t.onProgress==null?await Promise.all(s):await TT(s,t.onProgress,i,o),c=a.map(f=>f.arrayBuffer()),h=.5,d=1,m=t.onProgress==null?await Promise.all(c):await TT(c,t.onProgress,h,d);return m}async function vT(e,t="",n,s){const i=a=>AT(a,{requestInit:s}),o=NT(i);return o(e,t,n)}function NT(e){return async(t,n="",s)=>{const i=t.map(()=>!1),o={},a=s!=null?s.map(()=>!1):[],c=[];if(t.forEach((w,L)=>{let x=0;w.weights.forEach(v=>{const N="quantization"in v?v.quantization.dtype:v.dtype,O=By[N]*P(v.shape),E=()=>{i[L]=!0,o[L]==null&&(o[L]=[]),o[L].push({manifestEntry:v,groupOffset:x,sizeBytes:O})};s!=null?s.forEach((k,F)=>{k===v.name&&(E(),a[F]=!0)}):E(),c.push(v.name),x+=O})}),!a.every(w=>w)){const w=s.filter((L,x)=>!a[x]);throw new Error(`Could not find weights in manifest with names: ${w.join(", ")}. -Manifest JSON has weights with names: ${c.join(", ")}.`)}const h=i.reduce((w,L,x)=>(L&&w.push(x),w),[]),d=[];h.forEach(w=>{t[w].paths.forEach(L=>{const x=n+(n.endsWith("/")?"":"/")+L;d.push(x)})});const m=await e(d),f={};let b=0;return h.forEach(w=>{const L=t[w].paths.length;let x=0;for(let k=0;k{const F=v.slice(k.groupOffset,k.groupOffset+k.sizeBytes),U=Pd(F,[k.manifestEntry]);for(const $ in U)f[$]=U[$]}),b+=L}),f}}const Qk="application/octet-stream",eF="application/json";class qy{constructor(e,t){if(this.DEFAULT_METHOD="POST",t==null&&(t={}),this.weightPathPrefix=t.weightPathPrefix,this.onProgress=t.onProgress,this.weightUrlConverter=t.weightUrlConverter,t.fetchFunc!=null?(A(typeof t.fetchFunc=="function",()=>"Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"),this.fetch=t.fetchFunc):this.fetch=oe().platform.fetch,A(e!=null&&e.length>0,()=>"URL path for http must not be null, undefined or empty."),Array.isArray(e)&&A(e.length===2,()=>`URL paths for http must have a length of 2, (actual length is ${e.length}).`),this.path=e,t.requestInit!=null&&t.requestInit.body!=null)throw new Error("requestInit is expected to have no pre-existing body, but has one.");this.requestInit=t.requestInit||{}}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet.");const t=Object.assign({method:this.DEFAULT_METHOD},this.requestInit);t.body=new FormData;const n=[{paths:["./model.weights.bin"],weights:e.weightSpecs}],s={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata,weightsManifest:n};t.body.append("model.json",new Blob([JSON.stringify(s)],{type:eF}),"model.json"),e.weightData!=null&&t.body.append("model.weights.bin",new Blob([e.weightData],{type:Qk}),"model.weights.bin");const i=await this.fetch(this.path,t);if(i.ok)return{modelArtifactsInfo:nh(e),responses:[i]};throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${i.status}.`)}async load(){const e=await this.fetch(this.path,this.requestInit);if(!e.ok)throw new Error(`Request to ${this.path} failed with status code ${e.status}. Please verify this URL points to the model JSON of the model to load.`);let t;try{t=await e.json()}catch(b){let w=`Failed to parse model JSON of response from ${this.path}.`;throw this.path.endsWith(".pb")?w+=" Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository.":w+=" Please make sure the server is serving valid JSON for this request.",new Error(w)}const n=t.modelTopology,s=t.weightsManifest,i=t.generatedBy,o=t.convertedBy,a=t.format,c=t.userDefinedMetadata;if(n==null&&s==null)throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`);let h,d;if(s!=null){const b=await this.loadWeights(s);[h,d]=b}const m={modelTopology:n,weightSpecs:h,weightData:d,userDefinedMetadata:c,generatedBy:i,convertedBy:o,format:a},f=t.modelInitializer;return f&&(m.modelInitializer=f),m}async loadWeights(e){const t=Array.isArray(this.path)?this.path[1]:this.path,[n,s]=tF(t),i=this.weightPathPrefix||n,o=[];for(const d of e)o.push(...d.weights);const a=[],c=[];for(const d of e)for(const m of d.paths)this.weightUrlConverter!=null?c.push(this.weightUrlConverter(m)):a.push(i+m+s);this.weightUrlConverter&&a.push(...await Promise.all(c));const h=await AT(a,{requestInit:this.requestInit,fetchFunc:this.fetch,onProgress:this.onProgress});return[o,zd(h)]}}qy.URL_SCHEME_REGEX=/^https?:\/\//;function tF(e){const t=e.lastIndexOf("/"),n=e.lastIndexOf("?"),s=e.substring(0,t),i=n>t?e.substring(n):"";return[s+"/",i]}function jy(e){return e.match(qy.URL_SCHEME_REGEX)!=null}const CT=(e,t)=>{if(typeof fetch=="undefined"&&(t==null||t.fetchFunc==null))return null;{let n=!0;if(Array.isArray(e)?n=e.every(s=>jy(s)):n=jy(e),n)return Ky(e,t)}return null};en.registerSaveRouter(CT),en.registerLoadRouter(CT);function Ky(e,t){return new qy(e,t)}function Yd(e,t){return Ky(e,t)}class Xy{constructor(e){this.modelArtifacts=e}async load(){return this.modelArtifacts}}class nF{constructor(e){this.saveHandler=e}async save(e){return this.saveHandler(e)}}function sF(e,t,n,s){if(arguments.length===1){const i=e.modelTopology!=null||e.weightSpecs!=null;return i?new Xy(e):(console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Xy({modelTopology:e}))}else return console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Xy({modelTopology:e,weightSpecs:t,weightData:n,trainingConfig:s})}function iF(e){return new nF(e)}var rF=Object.freeze({__proto__:null,browserFiles:Zk,browserHTTPRequest:Yd,concatenateArrayBuffers:zd,decodeWeights:Pd,encodeWeights:My,fromMemory:sF,getLoadHandlers:Vy,getModelArtifactsInfoForJSON:nh,getSaveHandlers:zy,http:Ky,isHTTPScheme:jy,loadWeights:vT,registerLoadRouter:Ak,registerSaveRouter:Tk,weightsLoaderFactory:NT,withSaveHandler:iF,copyModel:Uk,listModels:Wk,moveModel:Bk,removeModel:$k});function oF(e,t){const n=W(e,"x","reshape",null),s={x:n},i={shape:t},o=(a,c)=>(t=Vt(t,n.size),A(n.size===P(t),()=>"new shape and old shape must have the same number of elements."),c([n]),a.reshape(n,t));return G.runKernelFunc(o,s,null,_l,i)}const K=z({reshape_:oF});function aF(e,t,n=!1,s=!1){let i=W(e,"a","matMul"),o=W(t,"b","matMul");[i,o]=Gt(i,o);const a=(d,m)=>{m([i,o]);const f=n?i.shape[i.rank-2]:i.shape[i.rank-1],b=s?o.shape[o.rank-1]:o.shape[o.rank-2],w=n?i.shape[i.rank-1]:i.shape[i.rank-2],L=s?o.shape[o.rank-2]:o.shape[o.rank-1],x=i.shape.slice(0,-2),v=o.shape.slice(0,-2),N=P(x),O=P(v),E=N===O||N===1||O===1;A(i.rank>=2&&o.rank>=2&&E,()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${x}) and (${v}).`),A(f===b,()=>`Error in matMul: inner shapes (${f}) and (${b}) of Tensors with shapes ${i.shape} and ${o.shape} and transposeA=${n} and transposeB=${s} must match.`);const k=N>O?x:v,F=k.concat([w,L]),U=n?K(i,[N,f,w]):K(i,[N,w,f]),$=s?K(o,[O,L,b]):K(o,[O,b,L]),Y=d.batchMatMul(U,$,n,s);return K(Y,F)},c={a:i,b:o},h={transposeA:n,transposeB:s};return G.runKernelFunc(a,c,null,id,h)}const ct=z({matMul_:aF});function cF(e,t,n=1,s=0){if(t<2)throw new Error(`Error in oneHot: depth must be >=2, but it is ${t}`);const i=W(e,"indices","oneHot","int32"),o=[...i.shape,t],a=(d,m)=>(m([i]),K(d.oneHot(K(i,[i.size]),t,n,s),o)),c={indices:i},h={depth:t,onValue:n,offValue:s};return G.runKernelFunc(a,c,null,yy,h)}const To=z({oneHot_:cF});function lF(e,t){const n=W(e,"x","transpose");if(t==null&&(t=n.shape.map((o,a)=>a).reverse()),A(n.rank===t.length,()=>`Error in transpose: rank of input ${n.rank} must match length of perm ${t}.`),t.forEach(o=>{A(o>=0&&o`All entries in 'perm' must be between 0 and ${n.rank-1} but got ${t}`)}),n.rank<=1)return n.clone();const s={x:n},i={perm:t};return G.runKernelFunc(o=>o.transpose(n,t),s,null,Hl,i)}const Ye=z({transpose_:lF});function hF(e,t,n){const s=W(e,"labels","confusionMatrix"),i=W(t,"predictions","confusionMatrix");A(n==null||n>0&&Number.isInteger(n),()=>`If provided, numClasses must be a positive integer, but got ${n}`),A(s.rank===1,()=>`Expected the rank of labels to be 1, but got ${s.rank}`),A(i.rank===1,()=>`Expected the rank of predictions to be 1, but got ${i.rank}`),A(s.shape[0]===i.shape[0],()=>`Mismatch in the number of examples: ${s.shape[0]} vs. ${i.shape[0]}. Labels and predictions should have the same number of elements.`),A(n>0&&Number.isInteger(n),()=>`numClasses is required to be a positive integer, but got ${n}`);const o=To(Ae(s,"int32"),n),a=To(Ae(i,"int32"),n),c=Ye(o),h=ct(c,a);return Ae(h,"int32")}const uF=z({confusionMatrix_:hF});var dF=Object.freeze({__proto__:null,confusionMatrix:uF});function RT(e,t,n){if(ne(e),t!=null&&t.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const s=Ii(e,n);if(s.length!==3&&s.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return Er(e,t,s,n)}let Fa;function pF(e,t=3){if(t>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(e==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let n=!1,s=!1,i=!1,o=!1,a=!1;if(e.data instanceof Uint8Array)n=!0;else if(typeof ImageData!="undefined"&&e instanceof ImageData)s=!0;else if(typeof HTMLVideoElement!="undefined"&&e instanceof HTMLVideoElement)i=!0;else if(typeof HTMLImageElement!="undefined"&&e instanceof HTMLImageElement)o=!0;else if(e.getContext!=null)a=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${e.constructor.name}`);if(i){const w=2;if(i&&e.readyState element.")}const c=Oy(Rd,G.backendName);if(c!=null){const w={pixels:e},L={numChannels:t};return G.runKernel(Rd,w,L)}const[h,d]=i?[e.videoWidth,e.videoHeight]:[e.width,e.height];let m;a?m=e.getContext("2d").getImageData(0,0,h,d).data:s||n?m=e.data:(o||i)&&(Fa==null&&(Fa=document.createElement("canvas").getContext("2d")),Fa.canvas.width=h,Fa.canvas.height=d,Fa.drawImage(e,0,0,h,d),m=Fa.getImageData(0,0,h,d).data);let f;if(t===4)f=new Int32Array(m);else{const w=h*d;f=new Int32Array(w*t);for(let L=0;L4||o===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${o}`);if(n.dtype!=="float32"&&n.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${n.dtype}. Please use float32 or int32 tensors.`);const a=await n.data(),c=n.dtype==="float32"?255:1,h=new Uint8ClampedArray(i*s*4);for(let d=0;d1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${w}.`)}else if(n.dtype==="int32"&&(w<0||w>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${w}.`);o===1?(m[0]=w*c,m[1]=w*c,m[2]=w*c):m[b]=w*c}const f=d*4;h[f+0]=Math.round(m[0]),h[f+1]=Math.round(m[1]),h[f+2]=Math.round(m[2]),h[f+3]=Math.round(m[3])}if(t!=null){t.width=i,t.height=s;const d=t.getContext("2d"),m=new ImageData(h,i,s);d.putImageData(m,0,0)}return n!==e&&n.dispose(),h}const OT=z({fromPixels_:pF});var fF=Object.freeze({__proto__:null,toPixels:mF,fromPixels:OT});function Hd(e,t){if(e.rank<1)throw new Error(`tf.gatherND() expects the input to be rank 1 or higher, but the rank was ${e.rank}.`);if(t.rank<1)throw new Error(`tf.gatherND() expects the indices to be rank 1 or higher, but the rank was ${t.rank}.`);if(t.dtype!=="int32")throw new Error(`tf.gatherND() expects the indices to be int32 type, but the dtype was ${t.dtype}.`);if(t.shape[t.rank-1]>e.rank)throw new Error(`index innermost dimension length must be <= tensor rank; saw: ${t.shape[t.rank-1]} vs. ${e.rank}`);if(e.size===0)throw new Error(`Requested more than 0 entries, but input is empty. Input shape: ${e.shape}.`);const n=t.shape,s=n[n.length-1];let i=1;for(let d=0;dd/c),1].slice(0,s);return[a,i,c,h]}var gF=Object.freeze({__proto__:null,prepareAndValidate:Hd});function Jy(e,t,n){const s=t.rank>1?t.shape[t.rank-1]:1,i=t.rank>1?t.rank-1:1,o=`Must have updates.shape = indices.shape[:batchDim] + shape[sliceDim:], got updates.shape: ${n.shape}, indices.shape: ${t.shape}, shape: ${e}, sliceDim: ${s}, and batchDim: ${i}.`;if(n.rank1?t.shape[s-1]:1,o=n.length;let a=1;for(let f=i;f`Error in slice${s}D: Length of begin ${t} must match the rank of the array (${s}).`),A(s===n.length,()=>`Error in slice${s}D: Length of size ${n} must match the rank of the array (${s}).`);for(let i=0;i`Error in slice${s}D: begin[${i}] + size[${i}] (${t[i]+n[i]}) would overflow input.shape[${i}] (${e.shape[i]})`)}function qd(e){const t=[];let n=0;for(;e>0;)e&1&&t.push(n),e/=2,n++;return t}function jd(e,t,n){const s=[];for(let i=0;i0){const w=t[0],L=n+1;m=_T(a,w,L,s,e),f=WT(c,w,L,i,e),b=ET(o,w,L,e)}else for(let w=0;w-1)o[c]=0;else{const h=DT(t,n,c);let d=s[h];e&1<-1)o[c]=Number.MAX_SAFE_INTEGER;else{const h=DT(t,n,c);let d=s[h];e&1<0?a=Number.MIN_SAFE_INTEGER:a=Number.MAX_SAFE_INTEGER);const h=s[i];return a<0&&(a+=h),a=S(0,a,h-1),a}function BT(e,t,n,s,i,o){let a=t[i];const c=n[i]||1;(e&1<0?a=Number.MAX_SAFE_INTEGER:a=Number.MIN_SAFE_INTEGER);const h=s[i];return a<0&&(a+=h),c>0?a=S(0,a,h):a=S(-1,a,h-1),a}function eb(e,t,n){let s=n.length;for(let i=0;i1){s=i;break}for(let i=s+1;i0||n[i]!==e[i])return!1;return!0}function tb(e,t){let n=e.length>0?e[e.length-1]:1;for(let s=0;s{A(a!==-1,()=>"slice() does not support negative begin indexing.")});let o;return n==null?o=new Array(i).fill(-1):typeof n=="number"?o=[n,...new Array(i-1).fill(-1)]:n.lengtha>=0?a:(A(a===-1,()=>`Negative size values should be exactly -1 but got ${a} for the slice() size at index ${c}.`),e.shape[c]-s[c])),[s,o]}var MT=Object.freeze({__proto__:null,assertParamsValid:Qy,maskToAxes:qd,computeOutShape:jd,stridesWithElidedDims:ET,getNormalizedAxes:FT,startIndicesWithElidedDims:_T,stopIndicesWithElidedDims:WT,stridesForAxis:$T,startForAxis:UT,stopForAxis:BT,isSliceContinous:eb,computeFlatOffset:tb,parseSliceParams:Kd});class Ao{getClassName(){return this.constructor.className}static fromConfig(e,t){return new e(t)}}class Ws{constructor(){this.classNameMap={}}static getMap(){return Ws.instance==null&&(Ws.instance=new Ws),Ws.instance}static register(e){Ws.getMap().classNameMap[e.className]=[e,e.fromConfig]}}function fe(e){A(e.className!=null,()=>"Class being registered does not have the static className property defined."),A(typeof e.className=="string",()=>"className is required to be a string, but got type "+typeof e.className),A(e.className.length>0,()=>"Class being registered has an empty-string as its className, which is disallowed."),Ws.register(e)}var bF=Object.freeze({__proto__:null,Serializable:Ao,SerializationMap:Ws,registerClass:fe});const wF=.001,PT=.1;function LF(e,t,n){return n==null&&(n=Xd()),nb(e,t,(s,i)=>ib(s,i,n))}function Xd(){return G.backend.floatPrecision()===32?wF:PT}function nb(e,t,n){let s=!0;if((hn(e)||hn(t))&&(s=!1),hn(e)&&hn(t)&&(s=!0),s){const a=e.constructor.name,c=t.constructor.name;if(a!==c)throw new Error(`Arrays are of different type. Actual: ${a}. Expected: ${c}`)}if(Array.isArray(e)&&Array.isArray(t)){const a=Ii(e),c=Ii(t);if(!ae(a,c))throw new Error(`Arrays have different shapes. Actual: [${a}]. Expected: [${c}]`)}const i=hn(e)?e:te(e),o=hn(t)?t:te(t);if(i.length!==o.length)throw new Error(`Arrays have different lengths actual: ${i.length} vs expected: ${o.length}. -Actual: ${i}. -Expected: ${o}.`);for(let a=0;at.fail(),()=>t())}function IF(e,t){const n=typeof t=="string"||typeof t=="number"||typeof t=="boolean"?[t]:t;return Yi(e)||Yi(e[0])||Yi(t)||Yi(t[0])?nb(e,n,(s,i)=>s==i):nb(e,t,(s,i)=>ib(s,i,0))}function sb(e,t,n){if(n==null&&(n=Xd()),!ib(e,t,n))throw new Error(`Numbers differ: actual === ${e}, expected === ${t}`)}function ib(e,t,n){return!isFinite(e)&&!isFinite(t)?!0:!(isNaN(e)||isNaN(t)||Math.abs(e-t)>n)}function xF(e,t,n){for(let s=0;sn)throw new Error(`Value out of range:${e[s]} low: ${t}, high: ${n}`)}function TF(e,t){expect(new Float32Array(e)).toEqual(new Float32Array(t))}var AF=Object.freeze({__proto__:null,TEST_EPSILON_FLOAT16:PT,expectArraysClose:LF,testEpsilon:Xd,expectPromiseToFail:SF,expectArraysEqual:IF,expectNumbersClose:sb,expectValuesInRange:xF,expectArrayBuffersEqual:TF});const zT="2.7.0";function vF(){oe().set("PROD",!0)}function NF(){oe().set("DEBUG",!0)}function CF(){oe().set("DEPRECATION_WARNINGS_ENABLED",!1),console.warn("TensorFlow.js deprecation warnings have been disabled.")}function un(e){oe().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(e+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}ck(un);function RF(){G.disposeVariables()}function Ki(){return G}function Jd(){return G.memory()}function OF(e){return G.profile(e)}function Q(e,t){return G.tidy(e,t)}function He(e){const t=Hi(e);t.forEach(n=>n.dispose())}function bn(e){return G.keep(e)}function EF(e){return G.time(e)}function VT(e){return G.setBackend(e)}function DF(){return G.ready()}function kF(){return G.backendName}function FF(e){G.removeBackend(e)}function _F(e){return G.findBackend(e)}function WF(e){return G.findBackendFactory(e)}function rb(e,t,n=1){return G.registerBackend(e,t,n)}function GT(){return G.backend}function $F(e,t){oe().setPlatform(e,t)}function UF(e,t){let n=W(e,"a","add"),s=W(t,"b","add");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.add(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,wo)}const be=z({add_:UF});function BF(e,t){let n=W(e,"a","floorDiv"),s=W(t,"b","floorDiv");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.floorDiv(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,sy)}const Zd=z({floorDiv_:BF});function MF(e,t){let n=W(e,"a","div"),s=W(t,"b","div");if([n,s]=Gt(n,s),n.dtype==="int32"&&s.dtype==="int32")return Zd(n,s);const i=(c,h)=>{const d=c.realDivide(n,s);return h([n,s]),d},o={a:n,b:s},a={};return G.runKernelFunc(i,o,null,xa,a)}const We=z({div_:MF});function PF(e,t){let n=W(e,"a","mul"),s=W(t,"b","mul");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.multiply(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,Ta)}const X=z({mul_:PF});function zF(e){const t=W(e,"x","abs"),n={x:t};return G.runKernelFunc((s,i)=>(i([t]),t.dtype==="complex64"?s.complexAbs(t):s.abs(t)),n,null,td)}const dn=z({abs_:zF});function VF(e){const t=W(e,"x","acos"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.acos(t);return i([t]),o},n,null,ol)}const ob=z({acos_:VF});function GF(e){const t=W(e,"x","acosh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.acosh(t);return i([t]),o},n,null,al)}const ab=z({acosh_:GF});function YF(e){A(Array.isArray(e),()=>"The argument passed to tf.addN() must be a list of tensors"),A(e.length>=1,()=>`Must pass at least one tensor to tf.addN(), but got ${e.length}`);const t=e.map((o,a)=>W(o,`tensors${a}`,"addN")),n=t[0];t.forEach(o=>{if(o.dtype!==n.dtype)throw new Error("All tensors passed to tf.addN() must have the same dtype")}),t.forEach(o=>{if(!ae(o.shape,n.shape))throw new Error("All tensors passed to tf.addN() must have the same shape")});const s=(o,a)=>{const c=o.addN(t);return a(t),c},i=t;return G.runKernelFunc(s,i,null,Gg)}const YT=z({addN_:YF});function cb(e,t){for(let n=0;ne[o]);return[n,i]}function vn(e,t){const n=t.map(s=>1);return HT(e,n,t)}function Kn(e,t,n){A(cb(t,n),()=>`${e} supports only inner-most axes for now. Got axes ${t} and rank-${n} input.`)}function Xn(e,t){if(cb(e,t))return null;const n=[];for(let s=0;sn.push(s)),n}function sh(e){return e.map((t,n)=>[n,t]).sort((t,n)=>t[1]-n[1]).map(t=>t[0])}function as(e,t){const n=[];for(let s=t-e;s{const h=qe(t,s.shape);let d=h;const m=Xn(d,s.rank);m!=null&&(s=Ye(s,m),d=as(d.length,s.rank));const f=c.all(s,d);if(n){const b=vn(f.shape,h);return K(f,b)}return f},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,Rx,a)}const Qd=z({all_:HF});function qF(e,t=null,n=!1){let s=W(e,"x","any","bool");const i=c=>{const h=qe(t,s.shape);let d=h;const m=Xn(d,s.rank);m!=null&&(s=Ye(s,m),d=as(d.length,s.rank));const f=c.any(s,d);if(n){const b=vn(f.shape,h);return K(f,b)}return f},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,Ox,a)}const ih=z({any_:qF});function jF(e,t=0){let n=W(e,"x","argMax");const s=(a,c)=>{c([n]);let h=qe(t,n.shape);const d=Xn(h,n.rank);return d!=null&&(n=Ye(n,d),h=as(h.length,n.rank)),a.argMax(n,h[0])},i={x:n},o={axis:t};return G.runKernelFunc(s,i,null,Yg,o)}const rh=z({argMax_:jF});function KF(e,t=0){let n=W(e,"x","argMin");const s=(a,c)=>{c([n]),t==null&&(t=0);let h=qe(t,n.shape);const d=Xn(h,n.rank);return d!=null&&(n=Ye(n,d),h=as(h.length,n.rank)),a.argMin(n,h[0])},i={x:n},o={axis:t};return G.runKernelFunc(s,i,null,Hg,o)}const lb=z({argMin_:KF});function XF(e){const t=W(e,"x","asin"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.asin(t);return i([t]),o},n,null,cl)}const hb=z({asin_:XF});function JF(e){const t=W(e,"x","asinh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.asinh(t);return i([t]),o},n,null,ll)}const ub=z({asinh_:JF});function ZF(e){const t=W(e,"x","atan"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.atan(t);return i([t]),o},n,null,hl)}const db=z({atan_:ZF});function QF(e,t){let n=W(e,"a","atan2"),s=W(t,"b","atan2");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.atan2(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,nd)}const pb=z({atan2_:QF});function e_(e){const t=W(e,"x","atanh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.atanh(t);return i([t]),o},n,null,ul)}const mb=z({atanh_:e_});function ep(e,t,n,s,i="NHWC",o){const a=e[3],c=[...t,a],h=Wr(i);return kn(e,c,n,o,s,null,null,h)}function Un(e,t,n,s,i,o,a="channelsLast"){const[c,h]=tp(t);let d;if(a==="channelsLast")d=[c,h,e[3],e[3]];else if(a==="channelsFirst")d=[c,h,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return kn(e,d,n,s,i,o,!1,a)}function oh(e,t,n,s,i,o,a="NDHWC"){const[c,h,d]=gb(t);let m,f;if(a==="NDHWC")f="channelsLast",m=[c,h,d,e[4],e[4]];else if(a==="NCDHW")f="channelsFirst",m=[c,h,d,e[1],e[1]];else throw new Error(`Unknown dataFormat ${a}`);return Fr(e,m,n,s,i,!1,f,o)}function kn(e,t,n,s,i,o,a=!1,c="channelsLast"){let[h,d,m,f]=[-1,-1,-1,-1];if(c==="channelsLast")[h,d,m,f]=e;else if(c==="channelsFirst")[h,f,d,m]=e;else throw new Error(`Unknown dataFormat ${c}`);const[b,w,,L]=t,[x,v]=tp(n),[N,O]=tp(s),E=Wa(b,N),k=Wa(w,O),{padInfo:F,outHeight:U,outWidth:$}=s_(i,d,m,x,v,E,k,o,c),Y=a?L*f:L;let j;return c==="channelsFirst"?j=[h,Y,U,$]:c==="channelsLast"&&(j=[h,U,$,Y]),{batchSize:h,dataFormat:c,inHeight:d,inWidth:m,inChannels:f,outHeight:U,outWidth:$,outChannels:Y,padInfo:F,strideHeight:x,strideWidth:v,filterHeight:b,filterWidth:w,effectiveFilterHeight:E,effectiveFilterWidth:k,dilationHeight:N,dilationWidth:O,inShape:e,outShape:j,filterShape:t}}function Fr(e,t,n,s,i,o=!1,a="channelsLast",c){let[h,d,m,f,b]=[-1,-1,-1,-1,-1];if(a==="channelsLast")[h,d,m,f,b]=e;else if(a==="channelsFirst")[h,b,d,m,f]=e;else throw new Error(`Unknown dataFormat ${a}`);const[w,L,x,,v]=t,[N,O,E]=gb(n),[k,F,U]=gb(s),$=Wa(w,k),Y=Wa(L,F),j=Wa(x,U),{padInfo:Z,outDepth:ie,outHeight:de,outWidth:he}=i_(i,d,m,f,N,O,E,$,Y,j,c),ue=o?v*b:v;let me;return a==="channelsFirst"?me=[h,ue,ie,de,he]:a==="channelsLast"&&(me=[h,ie,de,he,ue]),{batchSize:h,dataFormat:a,inDepth:d,inHeight:m,inWidth:f,inChannels:b,outDepth:ie,outHeight:de,outWidth:he,outChannels:ue,padInfo:Z,strideDepth:N,strideHeight:O,strideWidth:E,filterDepth:w,filterHeight:L,filterWidth:x,effectiveFilterDepth:$,effectiveFilterHeight:Y,effectiveFilterWidth:j,dilationDepth:k,dilationHeight:F,dilationWidth:U,inShape:e,outShape:me,filterShape:t}}function t_(e,t,n,s,i){s==null&&(s=fb(e,t,n));const o=e[0],a=e[1],c=vo((o-t+2*s)/n+1,i);A(Le(c),()=>`The output # of rows (${c}) must be an integer. Change the stride and/or zero pad parameters`);const h=vo((a-t+2*s)/n+1,i);return A(Le(h),()=>`The output # of columns (${h}) must be an integer. Change the stride and/or zero pad parameters`),[c,h]}function n_(e,t,n,s,i,o){i==null&&(i=fb(e,t,s));const a=e[0],c=e[1],h=e[2],d=vo((a-t+2*i)/s+1,o);A(Le(d),()=>`The output # of depths (${d}) must be an integer. Change the stride and/or zero pad parameters`);const m=vo((c-t+2*i)/s+1,o);A(Le(m),()=>`The output # of rows (${m}) must be an integer. Change the stride and/or zero pad parameters`);const f=vo((h-t+2*i)/s+1,o);return A(Le(f),()=>`The output # of columns (${f}) must be an integer. Change the stride and/or zero pad parameters`),[d,m,f,n]}function fb(e,t,n,s=1){const i=Wa(t,s);return Math.floor((e[0]*(n-1)-n+i)/2)}function tp(e){return typeof e=="number"?[e,e,e]:e.length===2?[e[0],e[1],1]:e}function gb(e){return typeof e=="number"?[e,e,e]:e}function Wa(e,t){return t<=1?e:e+(e-1)*(t-1)}function s_(e,t,n,s,i,o,a,c,h){let d,m,f;if(typeof e=="number"){const b=e===0?"VALID":"NUMBER";d={top:e,bottom:e,left:e,right:e,type:b};const w=t_([t,n],o,s,e,c);m=w[0],f=w[1]}else if(e==="same"){m=Math.ceil(t/s),f=Math.ceil(n/i);const b=Math.max(0,(m-1)*s+o-t),w=Math.max(0,(f-1)*i+a-n),L=Math.floor(b/2),x=b-L,v=Math.floor(w/2),N=w-v;d={top:L,bottom:x,left:v,right:N,type:"SAME"}}else if(e==="valid")d={top:0,bottom:0,left:0,right:0,type:"VALID"},m=Math.ceil((t-o+1)/s),f=Math.ceil((n-a+1)/i);else if(typeof e=="object"){const b=h==="channelsLast"?e[1][0]:e[2][0],w=h==="channelsLast"?e[1][1]:e[2][1],L=h==="channelsLast"?e[2][0]:e[3][0],x=h==="channelsLast"?e[2][1]:e[3][1],v=b===0&&w===0&&L===0&&x===0?"VALID":"EXPLICIT";d={top:b,bottom:w,left:L,right:x,type:v},m=vo((t-o+b+w)/s+1,c),f=vo((n-a+L+x)/i+1,c)}else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:d,outHeight:m,outWidth:f}}function i_(e,t,n,s,i,o,a,c,h,d,m){let f,b,w,L;if(typeof e=="number"){const x=e===0?"VALID":"NUMBER";f={top:e,bottom:e,left:e,right:e,front:e,back:e,type:x};const v=n_([t,n,s,1],c,1,i,e,m);b=v[0],w=v[1],L=v[2]}else if(e==="same"){b=Math.ceil(t/i),w=Math.ceil(n/o),L=Math.ceil(s/a);const x=(b-1)*i+c-t,v=(w-1)*o+h-n,N=(L-1)*a+d-s,O=Math.floor(x/2),E=x-O,k=Math.floor(v/2),F=v-k,U=Math.floor(N/2),$=N-U;f={top:k,bottom:F,left:U,right:$,front:O,back:E,type:"SAME"}}else if(e==="valid")f={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},b=Math.ceil((t-c+1)/i),w=Math.ceil((n-h+1)/o),L=Math.ceil((s-d+1)/a);else throw Error(`Unknown padding parameter: ${e}`);return{padInfo:f,outDepth:b,outHeight:w,outWidth:L}}function vo(e,t){if(!t)return e;switch(t){case"round":return Math.round(e);case"ceil":return Math.ceil(e);case"floor":return Math.floor(e);default:throw new Error(`Unknown roundingMode ${t}`)}}function _r(e){const[t,n,s]=tp(e);return t===1&&n===1&&s===1}function cn(e,t){return _r(e)||_r(t)}function Wr(e){if(e==="NHWC")return"channelsLast";if(e==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${e}`)}function r_(e,t,n,s,i){const o=W(e,"x","avgPool","float32"),a=1;A(cn(n,a),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`);let c=o,h=!1;o.rank===3&&(h=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),A(c.rank===4,()=>`Error in avgPool: x must be rank 4 but got rank ${c.rank}.`),i!=null&&A(Le(s),()=>`Error in avgPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const d=(w,L)=>{const x=Un(c.shape,t,n,1,s,i);return L([c]),x.filterWidth===1&&x.filterHeight===1&&ae(x.inShape,x.outShape)?c.clone():w.avgPool(c,x)},m={x:c},f={filterSize:t,strides:n,pad:s,dimRoundingMode:i};let b=G.runKernelFunc(d,m,null,dl,f);return b=Ae(b,o.dtype),h?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const ah=z({avgPool_:r_});function o_(e,t,n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:un("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","avgPool3d","float32");let h=c,d=!1;c.rank===4&&(d=!0,h=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),A(h.rank===5,()=>`Error in avgPool3d: x must be rank 5 but got rank ${h.rank}.`),A(o==="NDHWC",()=>`Error in avgPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),A(cn(n,a),()=>`Error in avgPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&A(Le(s),()=>`Error in avgPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(L,x)=>{a==null&&(a=[1,1,1]);const v=oh(h.shape,t,n,a,s,i,o);return x([h]),L.avgPool3d(h,v)},f={x:h},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a};let w=G.runKernelFunc(m,f,null,qg,b);return w=Ae(w,h.dtype),d?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const yb=z({avgPool3d_:o_});function np(e,t){const n=e[0].length;e.forEach((i,o)=>{A(i.length===n,()=>`Error in concat${n}D: rank of tensors[${o}] must be the same as the rank of the rest (${n})`)}),A(t>=0&&t`Error in concat${n}D: axis must be between 0 and ${n-1}.`);const s=e[0];e.forEach((i,o)=>{for(let a=0;a`Error in concat${n}D: Shape of tensors[${o}] (${i}) does not match the shape of the rest (${s}) along the non-concatenated axis ${o}.`)})}function Xi(e,t){const n=e[0].slice();for(let s=1;s=1,()=>"Pass at least one tensor to concat");let n=th(e,"tensors","concat");n[0].dtype==="complex64"&&n.forEach(a=>{if(a.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor - with dtype ${a.dtype}. `)});const s=(a,c)=>{const h=qe(t,n[0].shape)[0],d=Xi(n.map(b=>b.shape),h);if(P(d)===0)return sn([],d);if(n=n.filter(b=>b.size>0),n.length===1)return n[0];const m=n.map(b=>b.shape);np(m,h);const f=a.concat(n,h);return c(n),f},i=n,o={axis:t};return G.runKernelFunc(s,i,null,fl,o)}const Yt=z({concat_:a_});function c_(e){const t=W(e,"x","sigmoid"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.sigmoid(t);return i([o]),o},n,null,zl)}const Ti=z({sigmoid_:c_});function l_(e,t,n){const s=W(e,"x","slice");if(s.rank===0)throw new Error("Slicing scalar is not possible");const i=(c,h)=>{const[d,m]=Kd(s,t,n);return Qy(s,d,m),h([s]),c.slice(s,d,m)},o={x:s},a={begin:t,size:n};return G.runKernelFunc(i,o,null,Ad,a)}const tt=z({slice_:l_});function h_(e){const t=W(e,"x","tanh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.tanh(t);return i([o]),o},n,null,Yl)}const $a=z({tanh_:h_});function u_(e,t,n,s,i,o){const a=W(e,"forgetBias","basicLSTMCell"),c=W(t,"lstmKernel","basicLSTMCell"),h=W(n,"lstmBias","basicLSTMCell"),d=W(s,"data","basicLSTMCell"),m=W(i,"c","basicLSTMCell"),f=W(o,"h","basicLSTMCell"),b=Yt([d,f],1),w=ct(b,c),L=be(w,h),x=L.shape[0],v=L.shape[1]/4,N=[x,v],O=tt(L,[0,0],N),E=tt(L,[0,v],N),k=tt(L,[0,v*2],N),F=tt(L,[0,v*3],N),U=be(X(Ti(O),$a(E)),X(m,Ti(be(a,k)))),$=X($a(U),Ti(F));return[U,$]}const d_=z({basicLSTMCell_:u_});function p_(e,t,n){const s=W(e,"x","batchToSpaceND"),i=t.reduce((h,d)=>h*d);A(s.rank>=1+t.length,()=>`input rank is ${s.rank} but should be > than blockShape.length ${t.length}`),A(n.length===t.length,()=>`crops.length is ${n.length} but should be equal to blockShape.length ${t.length}`),A(s.shape[0]%i===0,()=>`input tensor batch is ${s.shape[0]} but is not divisible by the product of the elements of blockShape ${t.join(" * ")} === ${i}`);const o=h=>h.batchToSpaceND(s,t,n),a={x:s},c={blockShape:t,crops:n};return G.runKernelFunc(o,a,null,jg,c)}const ch=z({batchToSpaceND_:p_});function m_(e){let t;return e.rank===0||e.rank===1?t=K(e,[1,1,1,e.size]):e.rank===2?t=K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?t=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):t=e,t}function f_(e,t,n,s,i,o){o==null&&(o=.001);const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;s!=null&&(m=W(s,"offset","batchNorm")),A(c.rank===h.rank,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),A(m==null||c.rank===m.rank,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),A(d==null||c.rank===d.rank,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");const f=m_(a),b=(v,N)=>(N([f,c,h,d]),v.batchNorm(f,sp(c),sp(h),sp(m),sp(d),o)),w={x:f,scale:d,offset:m,mean:c,variance:h},L={varianceEpsilon:o},x=G.runKernelFunc(b,w,null,Il,L);return K(x,a.shape)}function sp(e){return e==null?null:e.rank===0?K(e,[e.size]):e.rank===1?e:e.rank===2?K(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?K(e,[1,e.shape[0],e.shape[1],e.shape[2]]):e}const No=z({batchNorm_:f_});function g_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),A(a.rank===2,()=>`Error in batchNorm2D: x must be rank 2 but got rank ${a.rank}.`),A(c.rank===2||c.rank===1,()=>`Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${c.rank}.`),A(h.rank===2||h.rank===1,()=>`Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${h.rank}.`),d!=null&&A(d.rank===2||d.rank===1,()=>`Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${d.rank}.`),m!=null&&A(m.rank===2||m.rank===1,()=>`Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${m.rank}.`),No(a,c,h,m,d,o)}const qT=z({batchNorm2d_:g_});function y_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),A(a.rank===3,()=>`Error in batchNorm3D: x must be rank 3 but got rank ${a.rank}.`),A(c.rank===3||c.rank===1,()=>`Error in batchNorm3D: mean must be rank 3 or rank 1 but got rank ${c.rank}.`),A(h.rank===3||h.rank===1,()=>`Error in batchNorm3D: variance must be rank 3 or rank 1 but got rank ${h.rank}.`),d!=null&&A(d.rank===3||d.rank===1,()=>`Error in batchNorm3D: scale must be rank 3 or rank 1 but got rank ${d.rank}.`),m!=null&&A(m.rank===3||m.rank===1,()=>`Error in batchNorm3D: offset must be rank 3 or rank 1 but got rank ${m.rank}.`),No(a,c,h,m,d,o)}const jT=z({batchNorm3d_:y_});function b_(e,t,n,s,i,o){const a=W(e,"x","batchNorm"),c=W(t,"mean","batchNorm"),h=W(n,"variance","batchNorm");let d;i!=null&&(d=W(i,"scale","batchNorm"));let m;return s!=null&&(m=W(s,"offset","batchNorm")),A(a.rank===4,()=>`Error in batchNorm4D: x must be rank 4 but got rank ${a.rank}.`),A(c.rank===4||c.rank===1,()=>`Error in batchNorm4D: mean must be rank 4 or rank 1 but got rank ${c.rank}.`),A(h.rank===4||h.rank===1,()=>`Error in batchNorm4D: variance must be rank 4 or rank 1 but got rank ${h.rank}.`),d!=null&&A(d.rank===4||d.rank===1,()=>`Error in batchNorm4D: scale must be rank 4 or rank 1 but got rank ${d.rank}.`),m!=null&&A(m.rank===4||m.rank===1,()=>`Error in batchNorm4D: offset must be rank 4 or rank 1 but got rank ${m.rank}.`),No(a,c,h,m,d,o)}const KT=z({batchNorm4d_:b_});function w_(e,t){let n=W(e,"broadcastTo","x");const s=n.shape;if(t.some(m=>!(m>0)||m%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${t}].`);if(t.lengthn.rank){const m=n.shape.slice();for(;m.length=0;m--)if(i[m]===t[m])o[m]=1;else if(n.shape[m]!==1)throw new Error(`broadcastTo(): [${s}] cannot be broadcast to [${t}].`);const a=o.map((m,f)=>m>1?f:-1).filter(m=>m>=0);if(a.length===0)return kr(n);const c=m=>m.tile(n,o),h={x:n},d={shape:t,inputShape:i};return G.runKernelFunc(c,h,null,Kg,d)}const lh=z({broadcastTo_:w_});function L_(e){const t=W(e,"x","ceil"),n={x:t};return G.runKernelFunc(s=>s.ceil(t),n,null,pl)}const bb=z({ceil_:L_});function S_(e,t,n){const s=W(e,"x","clipByValue");A(t<=n,()=>`Error in clip: min (${t}) must be less than or equal to max (${n}).`);const i={x:s},o={clipValueMin:t,clipValueMax:n};return G.runKernelFunc((a,c)=>{const h=a.clip(s,t,n);return c([s]),h},i,null,ml,o)}const Jn=z({clipByValue_:S_});function I_(e){return Yt(e,0)}const XT=z({concat1d_:I_});function x_(e,t){return Yt(e,t)}const JT=z({concat2d_:x_});function T_(e,t){return Yt(e,t)}const ZT=z({concat3d_:T_});function A_(e,t){return Yt(e,t)}const QT=z({concat4d_:A_});function v_(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","conv2d"),h=W(t,"filter","conv2d");let d=c,m=!1;c.rank===3&&(m=!0,d=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),A(d.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${d.rank}.`),A(h.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${h.rank}.`),a!=null&&A(Le(s),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const f=i==="NHWC"?d.shape[3]:d.shape[1];A(f===h.shape[2],()=>`Error in conv2d: depth of input (${f}) must match input depth for filter ${h.shape[2]}.`),A(cn(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const b=(v,N)=>{const O=Wr(i),E=kn(d.shape,h.shape,n,o,s,a,!1,O),k=v.conv2d(d,h,E);return N([d,h]),k},w={x:d,filter:h},L={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},x=G.runKernelFunc(b,w,null,od,L);return m?K(x,[x.shape[1],x.shape[2],x.shape[3]]):x}const Ji=z({conv2d_:v_});function N_(e,t,n,s,i="NWC",o=1,a){const c=W(e,"x","conv1d"),h=W(t,"filter","conv1d");let d=c,m=!1;c.rank===2&&(m=!0,d=K(c,[1,c.shape[0],c.shape[1]])),A(d.rank===3,()=>`Error in conv1d: input must be rank 3, but got rank ${d.rank}.`),A(h.rank===3,()=>`Error in conv1d: filter must be rank 3, but got rank ${h.rank}.`),a!=null&&A(Le(s),()=>`Error in conv1d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),A(d.shape[2]===h.shape[1],()=>`Error in conv1d: depth of input (${d.shape[2]}) must match input depth for filter ${h.shape[1]}.`),A(cn(n,o),()=>`Error in conv1D: Either stride or dilation must be 1. Got stride ${n} and dilation '${o}'`),A(i==="NWC",()=>`Error in conv1d: got dataFormat of ${i} but only NWC is currently supported.`);const f=K(h,[1,h.shape[0],h.shape[1],h.shape[2]]),b=K(d,[d.shape[0],1,d.shape[1],d.shape[2]]),w=[1,n],L=[1,o],x="NHWC",v=Ji(b,f,w,s,x,L,a);return m?K(v,[v.shape[2],v.shape[3]]):K(v,[v.shape[0],v.shape[2],v.shape[3]])}const ip=z({conv1d_:N_});function C_(e,t,n,s,i,o="NHWC",a){A(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let c=e,h=t,d=!1;t.rank===3&&(d=!0,h=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]),c=[1,e[0],e[1],e[2]]),A(c.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${c.length}.`),A(h.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${h.rank}`),A(n.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${n.rank}`);const m=o==="NHWC"?c[3]:c[1],f=o==="NHWC"?h.shape[3]:h.shape[1];A(m===n.shape[2],()=>`Error in conv2dDerInput: depth of input (${m}) must match input depth for filter ${n.shape[2]}.`),A(f===n.shape[3],()=>`Error in conv2dDerInput: depth of output (${f}) must match output depth for filter ${n.shape[3]}.`),a!=null&&A(Le(i),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const b=(v,N)=>{const O=1,E=Wr(o),k=kn(c,n.shape,s,O,i,a,!1,E),F=v.conv2dDerInput(h,n,k);return N([h,n]),F},w={dy:h,filter:n},L={strides:s,pad:i,dataFormat:o,dimRoundingMode:a,inputShape:c},x=G.runKernelFunc(b,w,null,ad,L);return d?K(x,[x.shape[1],x.shape[2],x.shape[3]]):x}const wb=z({conv2DBackpropInput_:C_});function R_(e,t,n,s,i,o){const a=W(e,"x","conv2dTranspose"),c=W(t,"filter","conv2dTranspose");return wb(n,a,c,s,i,"NHWC",o)}const rp=z({conv2dTranspose_:R_});function O_(e,t,n,s,i="NDHWC",o=[1,1,1]){const a=W(e,"x","conv3d"),c=W(t,"filter","conv3d");let h=a,d=!1;a.rank===4&&(d=!0,h=K(a,[1,a.shape[0],a.shape[1],a.shape[2],a.shape[3]])),A(h.rank===5,()=>`Error in conv3d: input must be rank 5, but got rank ${h.rank}.`),A(c.rank===5,()=>`Error in conv3d: filter must be rank 5, but got rank ${c.rank}.`),A(h.shape[4]===c.shape[3],()=>`Error in conv3d: depth of input (${h.shape[4]}) must match input depth for filter ${c.shape[3]}.`),A(cn(n,o),()=>`Error in conv3D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),A(i==="NDHWC",()=>`Error in conv3d: got dataFormat of ${i} but only NDHWC is currently supported.`);const m=(L,x)=>{const v=Fr(h.shape,c.shape,n,o,s),N=L.conv3d(h,c,v);return x([h,c]),N},f={x:h,filter:c},b={strides:n,pad:s,dataFormat:i,dilations:o},w=G.runKernelFunc(m,f,null,cd,b);return d?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const Lb=z({conv3d_:O_});function E_(e,t,n,s,i){A(e.length===t.rank,()=>`Length of inShape (${e.length}) and rank of dy (${t.rank}) must match`);let o=e,a=t,c=!1;t.rank===4&&(c=!0,a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]]),o=[1,e[0],e[1],e[2],e[3]]);const h=o[4],d=a.shape[4];A(o.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${o.length}.`),A(a.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${a.rank}`),A(n.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${n.rank}`),A(h===n.shape[3],()=>`Error in conv3dDerInput: depth of input (${h}) must match input depth for filter ${n.shape[3]}.`),A(d===n.shape[4],()=>`Error in conv3dDerInput: depth of output (${d}) must match output depth for filter ${n.shape[4]}.`);const m=L=>{const x=1,v=Fr(o,n.shape,s,x,i);return L.conv3dDerInput(a,n,v)},f={dy:a,filter:n},b={pad:i,strides:s,inputShape:o},w=G.runKernelFunc(m,f,null,Zg,b);return c?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const eA=z({conv3DBackpropInput_:E_});function D_(e,t,n,s,i){const o=W(e,"x","conv3dTranspose"),a=W(t,"filter","conv3dTranspose");return eA(n,o,a,s,i)}const k_=z({conv3dTranspose_:D_});function F_(e){const t=W(e,"x","cos"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.cos(t);return i([t]),o},n,null,Ia)}const hh=z({cos_:F_});function __(e){const t=W(e,"x","cosh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.cosh(t);return i([t]),o},n,null,gl)}const op=z({cosh_:__});function W_(e,t=0,n=!1,s=!1){const i=W(e,"x","cumsum"),o=(h,d)=>{const m=Xn([t],i.rank);let f=i;m!=null&&(f=Ye(i,m));const b=as(1,i.rank)[0];let w=h.cumsum(f,b,n,s);if(d([i]),m!=null){const L=sh(m);w=Ye(w,L)}return w},a={x:i},c={axis:t,exclusive:n,reverse:s};return G.runKernelFunc(o,a,null,Qg,c)}const ap=z({cumsum_:W_});function $_(e,t,n="NHWC"){const s=W(e,"x","depthToSpace"),i=n==="NHWC"?s.shape[1]:s.shape[2],o=n==="NHWC"?s.shape[2]:s.shape[3],a=n==="NHWC"?s.shape[3]:s.shape[1];A(i*t>=0,()=>`Negative dimension size caused by overflow when multiplying - ${i} and ${t} for depthToSpace with input shape - ${s.shape}`),A(o*t>=0,()=>`Negative dimension size caused by overflow when multiplying - ${o} and ${t} for depthToSpace with input shape - ${s.shape}`),A(a%(t*t)===0,()=>`Dimension size must be evenly divisible by ${t*t} but is ${a} for depthToSpace with input shape ${s.shape}`);const c=m=>m.depthToSpace(s,t,n),h={x:s},d={blockSize:t,dataFormat:n};return G.runKernelFunc(c,h,null,kx,d)}const Sb=z({depthToSpace_:$_});function U_(e,t,n,s,i="NHWC",o=[1,1],a){const c=W(e,"x","depthwiseConv2d"),h=W(t,"filter","depthwiseConv2d");let d=c,m=!1;c.rank===3&&(m=!0,d=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),A(d.rank===4,()=>`Error in depthwiseConv2d: input must be rank 4, but got rank ${d.rank}.`),A(h.rank===4,()=>`Error in depthwiseConv2d: filter must be rank 4, but got rank ${h.rank}.`),A(d.shape[3]===h.shape[2],()=>`Error in depthwiseConv2d: number of input channels (${d.shape[3]}) must match the inChannels dimension in filter ${h.shape[2]}.`),a!=null&&A(Le(s),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`);const f=(x,v)=>{o==null&&(o=[1,1]),A(cn(n,o),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`);const N=kn(d.shape,h.shape,n,o,s,a,!0),O=x.depthwiseConv2D(d,h,N);return v([d,h]),O},b={x:d,filter:h},w={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a},L=G.runKernelFunc(f,b,null,ld,w);return m?K(L,[L.shape[1],L.shape[2],L.shape[3]]):L}const Co=z({depthwiseConv2d_:U_});function B_(e){const t=W(e,"x","diag"),n=i=>{const o=K(t,[t.size]),a=i.diag(o),c=[...e.shape,...e.shape];return K(a,c)},s={x:t};return G.runKernelFunc(n,s,null,Fx)}const M_=z({diag_:B_});function P_(e,t,n,s,i=[1,1],o="NHWC"){const a=W(e,"x","dilation2d"),c=W(t,"filter","dilation2d");A(a.rank===3||a.rank===4,()=>`Error in dilation2d: input must be rank 3 or 4, but got rank ${a.rank}.`),A(c.rank===3,()=>`Error in dilation2d: filter must be rank 3, but got rank ${c.rank}.`),A(o==="NHWC",()=>`Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${o}`);let h=a,d=!1;a.rank===3&&(h=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),d=!0);const m={x:h,filter:c},f={strides:n,pad:s,dilations:i},b=G.runKernel(hd,m,f);return d?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const Ib=z({dilation2d_:P_});function Ro(e,t){const n=e.length,s=[];for(let i=0;i1&&a===1&&s.unshift(o)}return s}function pn(e,t){const n=[];for(let s=0;s1)&&n.unshift(o)}return n}function nt(e,t){const n=[],s=Math.max(e.length,t.length);for(let i=0;ia.equal(n,s),o={a:n,b:s};return G.runKernelFunc(i,o,null,Wx)}const Xs=z({equal_:z_});function V_(e,t,n){const s=W(t,"a","where"),i=W(n,"b","where"),o=W(e,"condition","where","bool"),a=nt(s.shape,i.shape),c=lh(s,a),h=lh(i,a);o.rank===1&&A(o.shape[0]===s.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),o.rank!==1&&B(o.shape,h.shape,"Error in where: ");const d=(f,b)=>{const w=f.select(o,c,h);return b([o]),w},m={condition:o,t:c,e:h};return G.runKernelFunc(d,m,null,Iy)}const Bn=z({where_:V_});function G_(e){const t=W(e,"x","zerosLike"),n={x:t};return G.runKernelFunc(s=>s.zerosLike(t),n,null,Ry)}const et=z({zerosLike_:G_});function Y_(e,t){let n=W(e,"a","div"),s=W(t,"b","div");[n,s]=Gt(n,s);const i=We(n,s),o=et(i),a=Xs(s,o);return Bn(a,o,i)}const xb=z({divNoNan_:Y_});function H_(e,t){const n=W(e,"t1","dot"),s=W(t,"t2","dot");A((n.rank===1||n.rank===2)&&(s.rank===1||s.rank===2),()=>`Error in dot: inputs must all be rank 1 or 2, but got ranks ${n.rank} and ${s.rank}.`);const i=n.rank===1?n.size:n.shape[1],o=s.rank===1?s.size:s.shape[0];if(A(i===o,()=>`Error in dot: inner dimensions of inputs must match, but got ${i} and ${o}.`),n.rank===1&&s.rank===1){const a=K(n,[1,-1]),c=K(s,[-1,1]),h=ct(a,c);return K(h,[])}else if(n.rank===1&&s.rank===2){const a=K(n,[1,-1]),c=K(s,[s.shape[0],s.shape[1]]),h=ct(a,c);return K(h,[h.size])}else if(n.rank===2&&s.rank===1){const a=K(s,[-1,1]),c=ct(n,a);return K(c,[c.size])}else{const a=K(s,[s.shape[0],s.shape[1]]),c=ct(n,a);return c}}const tA=z({dot_:H_});function q_(e){const t=W(e,"x","elu"),n=(i,o)=>{const a=i.elu(t);return o([a]),a},s={x:t};return G.runKernelFunc(n,s,null,yl)}const Ua=z({elu_:q_});function j_(e){let t=W(e,"x","erf");A(t.dtype==="int32"||t.dtype==="float32",()=>"Input dtype must be `int32` or `float32`."),t.dtype==="int32"&&(t=Ae(t,"float32"));const n={x:t};return G.runKernelFunc((s,i)=>{const o=s.erf(t);return i([t]),o},n,null,bl)}const Tb=z({erf_:j_});function K_(e){const t=W(e,"x","exp"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.exp(t);return i([o]),o},n,null,wl)}const Is=z({exp_:K_});function X_(e,t=0){const n=null,s=W(e,"x","expandDims",n);A(t<=s.rank,()=>"Axis must be <= rank of the tensor");const i=s.shape.slice();return t<0&&(A(-(s.rank+1)<=t,()=>`Axis must be in the interval [${-(s.rank+1)}, ${s.rank}]`),t=s.rank+t+1),i.splice(t,0,1),K(s,i)}const Zn=z({expandDims_:X_});function J_(e){const t=W(e,"x","expm1"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.expm1(t);return i([t]),o},n,null,Ll)}const Ab=z({expm1_:J_});function Z_(e,t){const n=null,s=W(e,"x","tile",n);A(s.rank===t.length,()=>`Error in transpose: rank of input ${s.rank} must match length of reps ${t}.`);const i=(h,d)=>{const m=h.tile(s,t);return d([s]),m},o=[s],a={x:s},c={reps:t};return G.runKernelFunc(i,a,null,vy,c,o)}const $r=z({tile_:Z_});function Q_(e,t,n,s="float32"){t==null&&(t=e);const i=wt([e,t],s),o=e<=t?e:t;for(let c=0;ci.fill(e,t,n),{},null,ny,s)}function eW(e){const t=W(e,"x","floor"),n={x:t};return G.runKernelFunc(s=>s.floor(t),n,null,Sl)}const Ma=z({floor_:eW});const vb=30;function uh(e){return e<=vb?e:ed(e,Math.floor(Math.sqrt(e)))}function tW(e,t){let n=!1,s;for(e<=vb?(s=e,n=!0):s=ed(e,Math.floor(Math.sqrt(e)));!n;)s>t||s===e?n=!0:s=ed(e,s+1);return s}function nW(e,t,n){const s=[],i=e.length;for(let o=0;o{const m=qe(n,s.shape)[0],f=nA(s,i,m),b=h.gather(s,K(i,[i.size]),m);return d([s,i]),K(b,f.outputShape)};return G.runKernelFunc(c,o,null,iy,a)}const Pa=z({gather_:iW});function rW(e,t){let n=W(e,"a","greater"),s=W(t,"b","greater");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=a=>a.greater(n,s),o={a:n,b:s};return G.runKernelFunc(i,o,null,Ux)}const xs=z({greater_:rW});function oW(e,t){let n=W(e,"a","greaterEqual"),s=W(t,"b","greaterEqual");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=(a,c)=>{const h=a.greaterEqual(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,ry)}const Zi=z({greaterEqual_:oW});function aW(e){const t=W(e,"input","imag"),n=i=>i.imag(t),s={input:t};return G.runKernelFunc(n,s,null,gd)}const dh=z({imag_:aW});function cW(e){const t=W(e,"x","isFinite"),n={x:t};return G.runKernelFunc(s=>s.isFinite(t),n,null,Tl)}const sA=z({isFinite_:cW});function lW(e){const t=W(e,"x","isInf"),n={x:t};return G.runKernelFunc(s=>s.isInf(t),n,null,Al)}const iA=z({isInf_:lW});function hW(e){const t=W(e,"x","isNaN"),n={x:t};return G.runKernelFunc(s=>s.isNaN(t),n,null,vl)}const rA=z({isNaN_:hW});function uW(e,t){let n=W(e,"a","maximum"),s=W(t,"b","maximum");[n,s]=Gt(n,s),n.dtype==="bool"&&(n=Ae(n,"int32"),s=Ae(s,"int32")),nt(n.shape,s.shape);const i=(a,c)=>{const h=a.maximum(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,cy)}const $s=z({maximum_:uW});function Ce(e,t){if((hn(e)&&t!=="string"||Array.isArray(e))&&t!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(t==="string"&&hn(e)&&!(e instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const n=[],s=[];return Er(e,n,s,t)}function dW(e,t=.2){const n=W(e,"x","leakyRelu");return $s(X(Ce(t),n),n)}const lp=z({leakyRelu_:dW});function pW(e,t){let n=W(e,"a","less"),s=W(t,"b","less");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=a=>a.less(n,s),o={a:n,b:s};return G.runKernelFunc(i,o,null,Bx)}const ph=z({less_:pW});function mW(e,t){let n=W(e,"a","lessEqual"),s=W(t,"b","lessEqual");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=(a,c)=>{const h=a.lessEqual(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,Mx)}const Ur=z({lessEqual_:mW});function oA(e,t,n){if(n<=0)throw new Error("The number of values should be positive.");const s={start:e,stop:t,num:n};return G.runKernelFunc(i=>i.linspace(e,t,n),{},null,Px,s)}function fW(e,t=5,n=1,s=1,i=.5){const o=W(e,"x","localResponseNormalization");A(o.rank===4||o.rank===3,()=>`Error in localResponseNormalization: x must be rank 3 or 4 but got - rank ${o.rank}.`),A(Le(t),()=>`Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${t}.`);let a=o,c=!1;o.rank===3&&(c=!0,a=K(o,[1,o.shape[0],o.shape[1],o.shape[2]]));const h=(b,w)=>{const L=b.localResponseNormalization4D(a,t,n,s,i);return w([a,L]),L},d={x:a},m={depthRadius:t,bias:n,alpha:s,beta:i},f=G.runKernelFunc(h,d,null,ay,m);return c?K(f,[f.shape[1],f.shape[2],f.shape[3]]):f}const Nb=z({localResponseNormalization_:fW});function gW(e){const t=W(e,"x","log"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.log(t);return i([t]),o},n,null,Nl)}const cs=z({log_:gW});function yW(e){const t=W(e,"x","log1p"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.log1p(t);return i([t]),o},n,null,Cl)}const hp=z({log1p_:yW});function bW(e){return A(Rr(e),()=>"The f passed in grad(f) must be a function"),(t,n)=>{const s=W(t,"x","tf.grad",null),i=n!=null?W(n,"dy","tf.grad"):null;return G.tidy(()=>{const{value:o,grads:a}=G.gradients(()=>e(s),[s],i);return i!=null&&B(o.shape,i.shape,"The shape of dy passed in grad(f)(x, dy) must match the shape returned by f(x)"),up(a),a[0]})}}function wW(e){return A(Rr(e),()=>"The f passed in grads(f) must be a function"),(t,n)=>{A(Array.isArray(t),()=>"The args passed in grads(f)(args) must be an array of `Tensor`s or `TensorLike`s");const s=th(t,"args","tf.grads",null),i=n!=null?W(n,"dy","tf.grads"):null;return G.tidy(()=>{const{value:o,grads:a}=G.gradients(()=>e(...s),s,i);return i!=null&&B(o.shape,i.shape,"The shape of dy passed in grads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),up(a),a})}}function LW(e){return A(Rr(e),()=>"The f passed in valueAndGrad(f) must be a function"),(t,n)=>{A(t instanceof ee,()=>"The x passed in valueAndGrad(f)(x) must be a tensor"),A(n==null||n instanceof ee,()=>"The dy passed in valueAndGrad(f)(x, dy) must be a tensor");const{grads:s,value:i}=G.gradients(()=>e(t),[t],n);return up(s),{grad:s[0],value:i}}}function SW(e){return A(Rr(e),()=>"The f passed in valueAndGrads(f) must be a function"),(t,n)=>{A(Array.isArray(t)&&t.every(i=>i instanceof ee),()=>"The args passed in valueAndGrads(f)(args) must be array of tensors"),A(n==null||n instanceof ee,()=>"The dy passed in valueAndGrads(f)(args, dy) must be a tensor");const s=G.gradients(()=>e(...t),t,n);return n!=null&&B(s.value.shape,n.shape,"The shape of dy passed in valueAndGrads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),up(s.grads),s}}function Cb(e,t){A(Rr(e),()=>"The f passed in variableGrads(f) must be a function"),A(t==null||Array.isArray(t)&&t.every(d=>d instanceof Ql),()=>"The varList passed in variableGrads(f, varList) must be an array of variables");const n=t!=null;if(!n){t=[];for(const d in G.registeredVariables)t.push(G.registeredVariables[d])}const s=n?t.filter(d=>!d.trainable):null,i=t.length;t=t.filter(d=>d.trainable),A(t.length>0,()=>`variableGrads() expects at least one of the input variables to be trainable, but none of the ${i} variables is trainable.`);const o=!0,{value:a,grads:c}=G.gradients(e,t,null,o);A(c.some(d=>d!=null),()=>"Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize()."),A(a.rank===0,()=>`The f passed in variableGrads(f) must return a scalar, but it returned a rank-${a.rank} tensor`);const h={};return t.forEach((d,m)=>{c[m]!=null&&(h[d.name]=c[m])}),s!=null&&s.forEach(d=>h[d.name]=null),{value:a,grads:h}}function Ai(e){return G.customGrad(e)}function up(e){const t=e.filter(n=>n==null).length;if(t>0)throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that - the f you passed encloses all operations that lead from x to y.`)}function IW(e){const t=W(e,"x","neg"),n={x:t};return G.runKernelFunc(s=>s.neg(t),n,null,my)}const Ht=z({neg_:IW});function xW(e){const t=W(e,"x","softplus"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.softplus(t);return i([t]),o},n,null,Vl)}const za=z({softplus_:xW});function TW(e){const t=W(e,"x","logSigmoid"),n=Ai(s=>{const i=Ht(za(Ht(s))),o=a=>{const c=X(a,Ti(Ht(s)));return c};return{value:i,gradFunc:o}});return n(t)}const aA=z({logSigmoid_:TW});function AW(e,t=null,n=!1){const s=W(e,"x","max"),i=(c,h)=>{const d=qe(t,s.shape);let m=d;const f=Xn(m,s.rank);let b=s;f!=null&&(b=Ye(s,f),m=as(m.length,b.rank));const w=c.max(b,m);f!=null&&b.dispose();let L=w;if(n){const x=vn(L.shape,qe(t,s.shape));L=K(L,x),w.dispose()}return h([s,L]),L},o={x:s},a={reductionIndices:t,keepDims:n};return G.runKernelFunc(i,o,null,Rl,a)}const Qn=z({max_:AW});function vW(e,t){let n=W(e,"a","sub"),s=W(t,"b","sub");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.subtract(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,Na)}const Re=z({sub_:vW});function NW(e,t=null,n=!1){let s=W(e,"x","sum");s.dtype==="bool"&&(s=Ae(s,"int32"));const i=(c,h)=>{h([s]);const d=qe(t,s.shape),m=Xn(d,s.rank);let f=d,b=s;m!=null&&(b=Ye(s,m),f=as(f.length,s.rank));let w=c.sum(b,f);if(n){const L=vn(w.shape,d);w=K(w,L)}return w},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,xy,a)}const $e=z({sum_:NW});function CW(e,t=-1){const n=W(e,"logits","logSoftmax");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and axis was ${t}`);const s=(a,c)=>{const h=!0,d=Qn(e,t,!0),m=Re(e,d),f=Re(Ae(m,"float32"),cs($e(Is(m),t,h)));return c([f]),f},i={logits:n},o={axis:t};return G.runKernelFunc(s,i,null,oy,o)}const dp=z({logSoftmax_:CW});function RW(e,t=null,n=!1){const s=W(e,"x","logSumExp"),i=qe(t,s.shape),o=Qn(s,i,!0),a=Re(s,o),c=Is(a),h=$e(c,i),d=cs(h),m=be(K(o,d.shape),d);if(n){const f=vn(m.shape,i);return K(m,f)}return m}const Rb=z({logSumExp_:RW});function OW(e,t){const n=W(e,"a","logicalAnd","bool"),s=W(t,"b","logicalAnd","bool");nt(n.shape,s.shape);const i={a:n,b:s};return G.runKernelFunc(o=>o.logicalAnd(n,s),i,null,zx)}const Us=z({logicalAnd_:OW});function EW(e){const t=W(e,"x","logicalNot","bool"),n={x:t};return G.runKernelFunc(s=>s.logicalNot(t),n,null,yd)}const mh=z({logicalNot_:EW});function DW(e,t){const n=W(e,"a","logicalOr","bool"),s=W(t,"b","logicalOr","bool");nt(n.shape,s.shape);const i={a:n,b:s};return G.runKernelFunc(o=>o.logicalOr(n,s),i,null,Vx)}const pp=z({logicalOr_:DW});function kW(e,t){const n=W(e,"a","logicalXor","bool"),s=W(t,"b","logicalXor","bool");return nt(n.shape,s.shape),Us(pp(e,t),mh(Us(e,t)))}const cA=z({logicalXor_:kW});function FW(e,t,n,s,i){const o=W(e,"x","maxPool"),a=1;let c=o,h=!1;o.rank===3&&(h=!0,c=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),A(c.rank===4,()=>`Error in maxPool: input must be rank 4 but got rank ${c.rank}.`),A(cn(n,a),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&A(Le(s),()=>`Error in maxPool: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const d=(w,L)=>{const x=Un(c.shape,t,n,1,s,i);let v;return x.filterWidth===1&&x.filterHeight===1&&ae(x.inShape,x.outShape)?v=c.clone():v=w.maxPool(c,x),L([c,v]),v},m={x:c},f={filterSize:t,strides:n,pad:s,dimRoundingMode:i},b=G.runKernelFunc(d,m,null,Ol,f);return h?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const fh=z({maxPool_:FW});function _W(e,t=[1,1,1],n,s,i,o="NDHWC",a){a==null?a=[1,1,1]:un("dilations is deprecated, this field will be gone in v3.0.0.");const c=W(e,"x","maxPool3d");let h=c,d=!1;c.rank===4&&(d=!0,h=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),A(h.rank===5,()=>`Error in maxPool3d: x must be rank 5 but got rank ${h.rank}.`),A(o==="NDHWC",()=>`Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${o}`),A(cn(n,a),()=>`Error in maxPool3d: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),i!=null&&A(Le(s),()=>`Error in maxPool3d: pad must be an integer when using, dimRoundingMode ${i} but got pad ${s}.`);const m=(L,x)=>{a==null&&(a=[1,1,1]);const v=oh(h.shape,t,n,a,s,i,o),N=L.maxPool3d(h,v);return x([h,N]),N},f={x:h},b={filterSize:t,strides:n,pad:s,dimRoundingMode:i,dataFormat:o,dilations:a},w=G.runKernelFunc(m,f,null,ly,b);return d?K(w,[w.shape[1],w.shape[2],w.shape[3],w.shape[4]]):w}const Ob=z({maxPool3d_:_W});function WW(e,t,n,s,i=!1){const o=W(e,"x","maxPoolWithArgmax"),a={x:o},c={filterSize:t,strides:n,pad:s,includeBatchInIndex:i},h=G.runKernel(wd,a,c);return{result:h[0],indexes:h[1]}}const lA=z({maxPoolWithArgmax_:WW});function dt(e,t="float32"){if(t==="complex64"){const s=dt(e,"float32"),i=dt(e,"float32");return ji(s,i)}const n=La(P(e),t);return G.makeTensor(n,e,t)}function Js(e,t="float32"){if(t==="complex64"){const s=Js(e,"float32"),i=dt(e,"float32");return ji(s,i)}const n=Mg(P(e),t);return G.makeTensor(n,e,t)}function $W(e,t=null,n=!1){const s=W(e,"x","mean"),i=qe(t,s.shape),o=An(s.shape,i),a=o[1],c=P(a),h={x:s},d={axis:t,keepDims:n},m=()=>{const b=Ce(c),w=b.dtype===s.dtype?s:Ae(s,b.dtype),L=We(w,b);return $e(L,t,n)},f=Ai(b=>{const w=G.runKernelFunc(m,h,null,hy,d),L=x=>{const v=b.shape.slice();i.forEach(E=>{v[E]=1});const N=K(x,v),O=We(X(N,Js(b.shape,"float32")),c);return O};return{value:w,gradFunc:L}});return f(s)}const qt=z({mean_:$W});function UW(e,t=null,n=!1){const s=W(e,"x","min"),i=(c,h)=>{const d=qe(t,s.shape);let m=d;const f=Xn(m,s.rank);let b=s;f!=null&&(b=Ye(s,f),m=as(m.length,s.rank));const w=c.min(b,m);f!=null&&b.dispose();let L=w;if(n){const x=vn(L.shape,d);L=K(w,x),w.dispose()}return h([s,L]),L},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,uy,a)}const Va=z({min_:UW});function BW(e,t){let n=W(e,"a","minimum"),s=W(t,"b","minimum");[n,s]=Gt(n,s),n.dtype==="bool"&&(n=Ae(n,"int32"),s=Ae(s,"int32")),nt(n.shape,s.shape);const i=(a,c)=>{const h=a.minimum(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,dy)}const Oo=z({minimum_:BW});function MW(e,t,n){A(n==="reflect"||n==="symmetric",()=>`Invalid mode. Mode must be either reflect or symmetric. Got ${n}.`);const s=W(e,"x","mirrorPad");if(s.rank===0)throw new Error("mirrorPad(scalar) is not defined. Pass non-scalar to mirrorPad");A(t.length===s.rank,()=>`Padding doesn't match input. Must be ${s.rank}. Got ${t.length}.`);const i=n==="reflect"?1:0;for(let c=0;c"Invalid number of paddings. Must be length of 2 each."),A(t[c][0]>=0&&t[c][0]<=s.shape[c]-i&&t[c][1]>=0&&t[c][1]<=s.shape[c]-i,()=>`Padding in dimension ${c} cannot be greater than or equal to ${s.shape[c]-i} or less than 0 for input of shape ${s.shape}`);const o={paddings:t,mode:n},a={x:s};return G.runKernel(El,a,o)}const Eb=z({mirrorPad_:MW});function PW(e,t){let n=W(e,"a","mod"),s=W(t,"b","mod");[n,s]=Gt(n,s);const i=(a,c)=>{const h=a.mod(n,s);return c([n,s]),h},o={a:n,b:s};return G.runKernelFunc(i,o,null,py)}const mp=z({mod_:PW});function zW(e){const t=W(e,"x","square"),n={},s=[t],i=[];return G.runKernelFunc((o,a)=>(a([t]),o.square(t)),{x:t},null,"Square",n,s,i)}const At=z({square_:zW});function VW(e,t=null,n=!1){e=W(e,"x","moments");const s=qe(t,e.shape),i=qt(e,s,n);let o=i.shape;n||(o=vn(i.shape,s));const a=At(Re(Ae(e,"float32"),K(i,o))),c=qt(a,s,n);return{mean:i,variance:c}}const fp=z({moments_:VW});function GW(e,t,n,s){const i=W(t,"data","multiRNNCell"),o=th(n,"c","multiRNNCell"),a=th(s,"h","multiRNNCell");let c=i;const h=[];for(let f=0;f2)throw new Error(`Rank of probabilities must be 1 or 2, but is ${a}`);n=n||Math.random();const c=a===1?K(i,[1,-1]):i,h=G.runKernelFunc(d=>d.multinomial(c,s,t,n),{logits2D:c});return a===1?K(h,[h.size]):h}const hA=z({multinomial_:HW});function qW(e,t){let n=W(e,"a","notEqual"),s=W(t,"b","notEqual");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=a=>a.notEqual(n,s),o={a:n,b:s};return G.runKernelFunc(i,o,null,Dl)}const Br=z({notEqual_:qW});function jW(e){const t=W(e,"input","real"),n=i=>i.real(t),s={input:t};return G.runKernelFunc(n,s,null,Td)}const Ga=z({real_:jW});function KW(e){const t=W(e,"x","onesLike"),n=(i,o)=>{if(t.dtype==="complex64"){const a=Fn(Ga(t)),c=et(dh(t));return ji(a,c)}return i.onesLike(t)},s={x:t};return G.runKernelFunc(n,s,null,gy)}const Fn=z({onesLike_:KW});function XW(e,t){const n=W(e,"v1","outerProduct"),s=W(t,"v2","outerProduct");A(n.rank===1&&s.rank===1,()=>`Error in outerProduct: inputs must be rank 1, but got ranks ${n.rank} and ${s.rank}.`);const i=K(n,[-1,1]),o=K(s,[1,-1]);return ct(i,o)}const JW=z({outerProduct_:XW});function ZW(e,t,n=0){const s=W(e,"x","pad");if(s.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const i=(c,h)=>(h([s]),c.pad(s,t,n)),o={paddings:t,constantValue:n},a={x:s};return G.runKernelFunc(i,a,null,Id,o)}const vi=z({pad_:ZW});function QW(e,t,n=0){return A(t.length===2,()=>"Invalid number of paddings. Must be length of 2."),vi(e,[t],n)}const e$=z({pad1d_:QW});function t$(e,t,n=0){return A(t.length===2&&t[0].length===2&&t[1].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),vi(e,t,n)}const n$=z({pad2d_:t$});function s$(e,t,n=0){return A(t.length===3&&t[0].length===2&&t[1].length===2&&t[2].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),vi(e,t,n)}const i$=z({pad3d_:s$});function r$(e,t,n=0){return A(t.length===4&&t[0].length===2&&t[1].length===2&&t[2].length===2&&t[3].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),vi(e,t,n)}const o$=z({pad4d_:r$});function a$(e,t,n){const s=W(e,"x","spaceToBatchND");A(s.rank>=1+t.length,()=>`input rank ${s.rank} should be > than [blockShape] ${t.length}`),A(n.length===t.length,()=>`paddings.shape[0] ${n.length} must be equal to [blockShape] ${t.length}`),A(s.shape.reduce((c,h,d)=>d>0&&d<=t.length?c&&(h+n[d-1][0]+n[d-1][1])%t[d-1]===0:c,!0),()=>`input spatial dimensions ${s.shape.slice(1)} with paddings ${n.toString()} must be divisible by blockShapes ${t.toString()}`);const i=c=>c.spaceToBatchND(s,t,n),o={x:s},a={blockShape:t,paddings:n};return G.runKernelFunc(i,o,null,vd,a)}const gh=z({spaceToBatchND_:a$});function c$(e,t,n,s,i,o){i==null&&(i=[1,1]),o==null&&(o=1),s===0&&(s="valid");const a=W(e,"x","maxPool");let c=a,h=!1;a.rank===3&&(h=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]])),A(cn(o,i),()=>`Error in pool: Either strides or dilations must be 1. Got strides ${o} and dilations '${i}'`);const d=Un(c.shape,t,o,i,s),m=[d.dilationHeight,d.dilationWidth];let f;s==="same"?f=h$([d.filterHeight,d.filterWidth],m):f=[[0,0],[0,0]];const b=m[0]===1&&m[1]===1,[w,L]=l$([d.inHeight,d.inWidth],m,f),x=b?s:"valid",v=b?c:gh(c,m,w),N=n==="avg"?()=>ah(v,t,o,x):()=>fh(v,t,o,x),O=N(),E=b?O:ch(O,m,L);return h?K(E,[E.shape[1],E.shape[2],E.shape[3]]):E}function l$(e,t,n){const s=n.map(m=>m[0]),i=n.map(m=>m[1]),o=e.concat(s,i),a=t.map((m,f)=>(m-o[f]%m)%m),c=i.map((m,f)=>m+a[f]),h=t.map((m,f)=>[s[f],c[f]]),d=t.map((m,f)=>[0,a[f]]);return[h,d]}function h$(e,t){const n=e.map((a,c)=>a+(a-1)*(t[c]-1)),s=n.map(a=>a-1),i=s.map(a=>Math.floor(a/2)),o=s.map((a,c)=>a-i[c]);return s.map((a,c)=>[i[c],o[c]])}const uA=z({pool_:c$});function u$(e,t){let n=W(e,"base","pow"),s=W(t,"exp","pow");[n,s]=Gt(n,s);const i={a:n,b:s},o=(a,c)=>{const h=a.pow(n,s);return c([n,s,h]),h};return G.runKernelFunc(o,i,null,by)}const Zs=z({pow_:u$});function d$(e,t){const n=W(e,"x","prelu"),s=W(t,"alpha","prelu"),i=(a,c)=>{const h=a.prelu(n,s);return c([n,s]),h},o={x:n,alpha:s};return G.runKernelFunc(i,o,null,xd)}const yh=z({prelu_:d$});function p$(e,t=null,n=!1){let s=W(e,"x","prod");s.dtype==="bool"&&(s=Ae(s,"int32"));const i=c=>{const h=qe(t,s.shape),d=Xn(h,s.rank);let m=h,f=s;d!=null&&(f=Ye(s,d),m=as(m.length,s.rank));let b=c.prod(f,m);if(n){const w=vn(b.shape,h);b=K(b,w)}return b},o={x:s},a={axis:t,keepDims:n};return G.runKernelFunc(i,o,null,Hx,a)}const gp=z({prod_:p$});function m$(e,t,n){const s=P(e);let i=null;if(n==null||n==="float32")i=new Float32Array(s);else if(n==="int32")i=new Int32Array(s);else if(n==="bool")i=new Uint8Array(s);else throw new Error(`Unknown data type ${n}`);for(let o=0;o>>0,b-=h,b*=h,h=b>>>0,b-=h,h+=b*4294967296}return(h>>>0)*23283064365386963e-26};return d}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.alea=a})(Ya,e,!1)}),y$=Eo(function(e){(function(t,n,s){function i(c){var h=this,d="";h.x=0,h.y=0,h.z=0,h.w=0,h.next=function(){var f=h.x^h.x<<11;return h.x=h.y,h.y=h.z,h.z=h.w,h.w^=h.w>>>19^f^f>>>8},c===(c|0)?h.x=c:d+=c;for(var m=0;m>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(typeof m=="object"&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor128=a})(Ya,e,!1)}),b$=Eo(function(e){(function(t,n,s){function i(c){var h=this,d="";h.next=function(){var f=h.x^h.x>>>2;return h.x=h.y,h.y=h.z,h.z=h.w,h.w=h.v,(h.d=h.d+362437|0)+(h.v=h.v^h.v<<4^(f^f<<1))|0},h.x=0,h.y=0,h.z=0,h.w=0,h.v=0,c===(c|0)?h.x=c:d+=c;for(var m=0;m>>4),h.next()}function o(c,h){return h.x=c.x,h.y=c.y,h.z=c.z,h.w=c.w,h.v=c.v,h.d=c.d,h}function a(c,h){var d=new i(c),m=h&&h.state,f=function(){return(d.next()>>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(typeof m=="object"&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorwow=a})(Ya,e,!1)}),w$=Eo(function(e){(function(t,n,s){function i(c){var h=this;h.next=function(){var m=h.x,f=h.i,b,w,L;return b=m[f],b^=b>>>7,w=b^b<<24,b=m[f+1&7],w^=b^b>>>10,b=m[f+3&7],w^=b^b>>>3,b=m[f+4&7],w^=b^b<<7,b=m[f+7&7],b=b^b<<13,w^=b^b<<9,m[f]=w,h.i=f+1&7,w};function d(m,f){var b,w,L=[];if(f===(f|0))w=L[0]=f;else for(f=""+f,b=0;b0;--b)m.next()}d(h,c)}function o(c,h){return h.x=c.x.slice(),h.i=c.i,h}function a(c,h){c==null&&(c=+new Date);var d=new i(c),m=h&&h.state,f=function(){return(d.next()>>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(m.x&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xorshift7=a})(Ya,e,!1)}),L$=Eo(function(e){(function(t,n,s){function i(c){var h=this;h.next=function(){var m=h.w,f=h.X,b=h.i,w,L;return h.w=m=m+1640531527|0,L=f[b+34&127],w=f[b=b+1&127],L^=L<<13,w^=w<<17,L^=L>>>15,w^=w>>>12,L=f[b]=L^w,h.i=b,L+(m^m>>>16)|0};function d(m,f){var b,w,L,x,v,N=[],O=128;for(f===(f|0)?(w=f,f=null):(f=f+"\0",w=0,O=Math.max(O,f.length)),L=0,x=-32;x>>15,w^=w<<4,w^=w>>>13,x>=0&&(v=v+1640531527|0,b=N[x&127]^=w+v,L=b==0?L+1:0);for(L>=128&&(N[(f&&f.length||0)&127]=-1),L=127,x=4*128;x>0;--x)w=N[L+34&127],b=N[L=L+1&127],w^=w<<13,b^=b<<17,w^=w>>>15,b^=b>>>12,N[L]=w^b;m.w=v,m.X=N,m.i=L}d(h,c)}function o(c,h){return h.i=c.i,h.w=c.w,h.X=c.X.slice(),h}function a(c,h){c==null&&(c=+new Date);var d=new i(c),m=h&&h.state,f=function(){return(d.next()>>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(m.X&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.xor4096=a})(Ya,e,!1)}),S$=Eo(function(e){(function(t,n,s){function i(c){var h=this,d="";h.next=function(){var f=h.b,b=h.c,w=h.d,L=h.a;return f=f<<25^f>>>7^b,b=b-w|0,w=w<<24^w>>>8^L,L=L-f|0,h.b=f=f<<20^f>>>12^b,h.c=b=b-w|0,h.d=w<<16^b>>>16^L,h.a=L-f|0},h.a=0,h.b=0,h.c=2654435769|0,h.d=1367130551,c===Math.floor(c)?(h.a=c/4294967296|0,h.b=c|0):d+=c;for(var m=0;m>>0)/4294967296};return f.double=function(){do var b=d.next()>>>11,w=(d.next()>>>0)/4294967296,L=(b+w)/(1<<21);while(L===0);return L},f.int32=d.next,f.quick=f,m&&(typeof m=="object"&&o(m,d),f.state=function(){return o(d,{})}),f}n&&n.exports?n.exports=a:s&&s.amd?s(function(){return a}):this.tychei=a})(Ya,e,!1)}),Do=Eo(function(e){(function(t,n){var s=this,i=256,o=6,a=52,c="random",h=n.pow(i,o),d=n.pow(2,a),m=d*2,f=i-1,b;function w(k,F,U){var $=[];F=F==!0?{entropy:!0}:F||{};var Y=N(v(F.entropy?[k,E(t)]:k==null?O():k,3),$),j=new L($),Z=function(){for(var ie=j.g(o),de=h,he=0;ie=m;)ie/=2,de/=2,he>>>=1;return(ie+he)/de};return Z.int32=function(){return j.g(4)|0},Z.quick=function(){return j.g(4)/4294967296},Z.double=Z,N(E(j.S),t),(F.pass||U||function(ie,de,he,ue){return ue&&(ue.S&&x(ue,j),ie.state=function(){return x(j,{})}),he?(n[c]=ie,de):ie})(Z,Y,"global"in F?F.global:this==n,F.state)}n["seed"+c]=w;function L(k){var F,U=k.length,$=this,Y=0,j=$.i=$.j=0,Z=$.S=[];for(U||(k=[U++]);Y=1||o===0);const a=Math.sqrt(-2*Math.log(o)/o);e=this.mean+this.stdDev*s*a,t=this.mean+this.stdDev*i*a,(!this.truncated||this.isValidTruncated(e))&&(n=!0)}return(!this.truncated||this.isValidTruncated(t))&&(this.nextVal=this.convertValue(t)),this.convertValue(e)}convertValue(e){return this.dtype==null||this.dtype==="float32"?e:Math.round(e)}isValidTruncated(e){return e<=this.upper&&e>=this.lower}}class x${constructor(e,t,n,s){this.alpha=e,this.beta=1/t,this.dtype=n;const i=s||Math.random();this.randu=Ha(i.toString()),this.randn=new Db(0,1,n,!1,this.randu()),e<1?this.d=e+2/3:this.d=e-1/3,this.c=1/Math.sqrt(9*this.d)}nextValue(){let e,t,n,s,i,o;for(;;){do s=this.randn.nextValue(),o=1+this.c*s;while(o<=0);if(o*=o*o,e=s*s,t=1-.331*e*e,n=.5*e+this.d*(1-o+Math.log(o)),i=this.randu(),ithis.dtype==null||this.dtype==="float32",this.min=e,this.range=t-e,this.dtype=n,s==null&&(s=Math.random()),typeof s=="number"&&(s=s.toString()),!this.canReturnFloat()&&this.range<=1)throw new Error(`The difference between ${e} - ${t} <= 1 and dtype is not float`);this.random=Ha(s)}convertValue(e){return this.canReturnFloat()?e:Math.round(e)}nextValue(){return this.convertValue(this.min+this.range*this.random())}}function vee(e){const t=e.length,n=N$(e),s=v$(e),i=t/6*(Math.pow(n,2)+.25*Math.pow(s-3,2)),o=5.991;if(i>o)throw new Error(`Invalid p-value for JB: ${i}`)}function Nee(e,t,n,s){s==null&&(s=Xd());const i=kb(e);sb(i,t,s),sb(A$(e,i),n,s)}function kb(e){let t=0;for(let n=0;n{const a=e===t,c=e1;if(a||c||h)return dt([0],s);const d=Math.abs(Math.ceil((t-e)/n)),m=La(d,s);t{const o=s.reciprocal(t);return i([t]),o},n,null,kl)}const _b=z({reciprocal_:D$});function k$(e){const t=W(e,"x","relu"),n=(i,o)=>(o([t]),t.dtype==="bool"?Ae(t,"int32"):i.relu(t)),s={x:t};return G.runKernelFunc(n,s,null,Fl)}const Ni=z({relu_:k$});function F$(e){const t=W(e,"x","relu6"),n=(i,o)=>(o([t]),t.dtype==="bool"?Ae(t,"int32"):i.relu6(t)),s={x:t};return G.runKernelFunc(n,s,null,Wl)}const Wb=z({relu6_:F$});function _$(e,t){const n=W(e,"x","reverse"),s=a=>{const c=qe(t,n.shape);if(n.rank===0)return kr(n);const h=a.reverse(n,c);return K(h,n.shape)},i={x:n},o={dims:t};return G.runKernelFunc(s,i,null,Sy,o)}const Ts=z({reverse_:_$});function W$(e){const t=W(e,"x","reverse");return A(t.rank===1,()=>`Error in reverse1D: x must be rank 1 but got rank ${t.rank}.`),Ts(t,0)}const $$=z({reverse1d_:W$});function U$(e,t){const n=W(e,"x","reverse");return A(n.rank===2,()=>`Error in reverse2D: x must be rank 2 but got rank ${n.rank}.`),Ts(n,t)}const B$=z({reverse2d_:U$});function M$(e,t){const n=W(e,"x","reverse");return A(n.rank===3,()=>`Error in reverse3D: x must be rank 3 but got rank ${n.rank}.`),Ts(n,t)}const P$=z({reverse3d_:M$});function z$(e,t){const n=W(e,"x","reverse");return A(n.rank===4,()=>`Error in reverse4D: x must be rank 4 but got rank ${n.rank}.`),Ts(n,t)}const V$=z({reverse4d_:z$});function G$(e){const t=W(e,"x","round"),n={x:t};return G.runKernelFunc(s=>s.round(t),n,null,$l)}const $b=z({round_:G$});function Y$(e){const t=W(e,"x","rsqrt"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.rsqrt(t);return i([t]),o},n,null,Ul)}const yp=z({rsqrt_:Y$});function H$(e){const t=W(e,"x","selu"),n=(i,o)=>{const a=i.selu(t);return o([t]),a},s={x:t};return G.runKernelFunc(n,s,null,Bl)}const bp=z({selu_:H$});function q$(e,t,n,s,i,o=[1,1],a="NHWC"){const c=W(e,"x","separableConv2d"),h=W(t,"depthwiseFilter","separableConv2d"),d=W(n,"pointwiseFilter","separableConv2d");let m=c,f=!1;if(c.rank===3&&(f=!0,m=K(c,[1,c.shape[0],c.shape[1],c.shape[2]])),a==="NCHW")throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported");A(m.rank===4,()=>`Error in separableConv2d: input must be rank 4, but got rank ${m.rank}.`),A(h.rank===4,()=>`Error in separableConv2d: depthwise filter must be rank 4, but got rank ${h.rank}.`),A(d.rank===4,()=>`Error in separableConv2d: pointwise filter must be rank 4, but got rank ${h.rank}.`),A(d.shape[0]===1,()=>`Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${d.shape[0]}.`),A(d.shape[1]===1,()=>`Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${d.shape[1]}.`);const b=h.shape[2],w=h.shape[3];A(d.shape[2]===b*w,()=>`Error in separableConv2d: the third dimension of pointwise filter must be ${b*w}, but got ${d.shape[2]}.`);const L=Co(m,h,s,i,a,o),x=1,v=Ji(L,d,x,"valid",a);return f?K(v,[v.shape[1],v.shape[2],v.shape[3]]):v}const Ub=z({separableConv2d_:q$});async function j$(e,t){const n=W(e,"x","setdiff1d"),s=W(t,"y","setdiff1d");A(n.dtype===s.dtype,()=>`x and y should have the same dtype, but got x (${n.dtype}) and y (${s.dtype}).`),A(n.rank===1,()=>`x should be 1D tensor, but got x (${n.shape}).`),A(s.rank===1,()=>`y should be 1D tensor, but got y (${s.shape}).`);const i=await n.data(),o=await s.data(),a=new Set(o);let c=0;for(let m=0;ms.sign(t),n,null,Pl)}const Bb=z({sign_:K$});function X$(e){const t=W(e,"x","sin"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.sin(t);return i([t]),o},n,null,Aa)}const wp=z({sin_:X$});function J$(e){const t=W(e,"x","sinh"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.sinh(t);return i([t]),o},n,null,Ml)}const Lp=z({sinh_:J$});function Z$(e,t,n){const s=W(e,"x","slice1d");return A(s.rank===1,()=>`slice1d expects a rank-1 tensor, but got a rank-${s.rank} tensor`),tt(s,[t],[n])}const Sp=z({slice1d_:Z$});function Q$(e,t,n){const s=W(e,"x","slice2d");return A(s.rank===2,()=>`slice2d expects a rank-2 tensor, but got a rank-${s.rank} tensor`),tt(s,t,n)}const Mb=z({slice2d_:Q$});function eU(e,t,n){const s=W(e,"x","slice3d");return A(s.rank===3,()=>`slice3d expects a rank-3 tensor, but got a rank-${s.rank} tensor`),tt(s,t,n)}const Ip=z({slice3d_:eU});function tU(e,t,n){const s=W(e,"x","slice4d");return A(s.rank===4,()=>`slice4d expects a rank-4 tensor, but got a rank-${s.rank} tensor`),tt(s,t,n)}const wh=z({slice4d_:tU});function nU(e,t=-1){const n=W(e,"logits","softmax","float32");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and dim was ${t}`);const s={logits:n},i={dim:t};return G.runKernelFunc((o,a)=>{const c=o.softmax(n,t);return a([c]),c},s,null,Ay,i)}const Fo=z({softmax_:nU});function sU(e){A(e.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${e.dtype}.`);const t={input:e};return G.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=e.as2D(i,s),a=n.fft(o);return a.reshape(e.shape)},t,null,pd)}const Lh=z({fft_:sU});function iU(e){A(e.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${e.dtype}.`);const t={input:e};return G.runKernelFunc(n=>{const s=e.shape[e.shape.length-1],i=e.size/s,o=K(e,[i,s]),a=n.ifft(o);return K(a,e.shape)},t,null,fd)}const qa=z({ifft_:iU});function rU(e){const t=e.shape[e.shape.length-1],n=e.size/t;let s;if(t<=2){const i=K(e,[n,t]);s=qa(i)}else{const i=[n,2*(t-1)],o=K(Ga(e),[n,t]),a=K(dh(e),[n,t]),c=Ts(tt(o,[0,1],[n,t-2]),1),h=X(Ts(tt(a,[0,1],[n,t-2]),1),Ce(-1)),d=Yt([o,c],1),m=Yt([a,h],1),f=K(ji(d,m),[i[0],i[1]]);s=qa(f)}if(s=Ga(s),e.rank===3&&e.shape[0]!==0){const i=s,o=e.shape[0];s=K(s,[o,s.shape[0]/o,s.shape[1]]),i.dispose()}return s}const xp=z({irfft_:rU});function pA(e,t,n=0){let s=[];if(typeof t=="number")A(e.shape[n]%t===0,()=>"Number of splits must evenly divide the axis."),s=new Array(t).fill(e.shape[n]/t);else{const i=t.reduce((a,c)=>(c===-1&&(a+=1),a),0);A(i<=1,()=>"There should be only one negative value in split array.");const o=t.indexOf(-1);if(o!==-1){const a=t.reduce((c,h)=>h>0?c+h:c);t[o]=e.shape[n]-a}A(e.shape[n]===t.reduce((a,c)=>a+c),()=>"The sum of sizes must match the size of the axis dimension."),s=t}return s}function oU(e,t,n=0){const s=W(e,"x","split"),i=(c,h)=>{const d=qe(n,s.shape)[0],m=pA(s,t,d);return c.split(s,m,d)},o={x:s},a={numOrSizeSplits:t,axis:n};return G.runKernelFunc(i,o,null,Ty,a)}const hs=z({split_:oU});function aU(e,t){A(e.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${e.dtype}`);let n=e.shape[e.shape.length-1];const s=e.size/n;let i;if(t!=null&&t0),x=e.shape.map(v=>v);x[e.shape.length-1]=t,i=tt(e,L,x),n=t}else if(t!=null&&t>n){const L=e.shape.map(x=>x);L[e.shape.length-1]=t-n,i=Yt([e,dt(L)],e.shape.length-1),n=t}else i=e;const o=et(i),a=K(ji(i,o),[s,n]),c=Lh(a),h=Math.floor(n/2)+1,d=Ga(c),m=dh(c),f=hs(d,[h,n-h],d.shape.length-1),b=hs(m,[h,n-h],m.shape.length-1),w=i.shape.slice();return w[i.shape.length-1]=h,K(ji(f[0],b[0]),w)}const Sh=z({rfft_:aU});function cU(e){const t=W(e,"x","sqrt"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.sqrt(t);return i([t]),o},n,null,Gl)}const Nn=z({sqrt_:cU});function lU(e,t){let n=W(e,"a","squaredDifference"),s=W(t,"b","squaredDifference");[n,s]=Gt(n,s),nt(n.shape,s.shape);const i=(c,h)=>{const d=c.squaredDifference(n,s);return h([n,s]),d},o={a:n,b:s},a={};return G.runKernelFunc(i,o,null,va,a)}const Ih=z({squaredDifference_:lU});function hU(e,t){const n=W(e,"x","squeeze");return K(n,ln(n.shape,t).newShape)}const Mr=z({squeeze_:hU});function uU(e,t=0){const n=th(e,"tensors","stack");if(A(n.length>=1,()=>"Pass at least one tensor to tf.stack"),n.length===1)return Zn(n[0],t);const s=n[0].rank,i=n[0].shape,o=n[0].dtype;A(t<=s,()=>"Axis must be <= rank of the tensor"),n.forEach(c=>{B(i,c.shape,"All tensors passed to stack must have matching shapes"),A(o===c.dtype,()=>"All tensors passed to stack must have matching dtypes")});const a=n.map(c=>Zn(c,t));return Yt(a,t)}const es=z({stack_:uU});function dU(e,t=0){const n=W(e,"x","step"),s={x:n},i={alpha:t};return G.runKernelFunc(o=>o.step(n,t),s,null,ql,i)}const ja=z({step_:dU});function pU(e,t,n,s,i=0,o=0,a=0,c=0,h=0){let d=W(e,"x","stridedSlice");const m=w=>{s==null&&(s=new Array(t.length));const L=qd(a);if(L.length>1)throw new Error("Multiple ellipses in slice is not allowed.");if(a!==0&&c!==0)throw new Error("Using both ellipsisMask and newAxisMask is not yet supported.");if(a!==0&&h!==0)throw new Error("Using both ellipsisMask and shrinkAxisMask is not yet supported.");const x=d.rank-t.length,v=qd(c),N=d.shape.slice();v.forEach(Z=>{t[Z]=0,n[Z]=1,N.splice(Z,0,1)}),d=K(d,N);const{begin:O,end:E,strides:k}=FT(d.shape,L,x,t,n,s,i,o,a);t=O,n=E,s=k;const F=qd(h);F.forEach(Z=>{n[Z]=t[Z]+1,s[Z]=1});const U=jd(t,n,s),$=U.filter((Z,ie)=>F.indexOf(ie)===-1),Y=s.every(Z=>Z===1);if(Y)return K(tt(d,t,U),$);const j=w.stridedSlice(d,t,n,s);return K(j,$)},f={x:d},b={begin:t,end:n,strides:s,beginMask:i,endMask:o,ellipsisMask:a,newAxisMask:c,shrinkAxisMask:h};return G.runKernelFunc(m,f,null,Zx,b)}const Pb=z({stridedSlice_:pU});function mU(e){const t=W(e,"x","tan"),n={x:t};return G.runKernelFunc((s,i)=>{const o=s.tan(t);return i([t]),o},n,null,Ca)}const zb=z({tan_:mU});function Pr(e,t,n){if(ne(e),t!=null&&t.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const s=Ii(e,n);if(s.length!==2&&s.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return Er(e,t,s,n)}function Ka(e,t,n){if(ne(e),t!=null&&t.length!==4)throw new Error("tensor4d() requires shape to have four numbers");const s=Ii(e,n);if(s.length!==4&&s.length!==1)throw new Error("tensor4d() requires values to be number[][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor4d() requires shape to be provided when `values` are a flat array");return Er(e,t,s,n)}function fU(e,t,n){if(ne(e),t!=null&&t.length!==5)throw new Error("tensor5d() requires shape to have five numbers");const s=Ii(e,n);if(s.length!==5&&s.length!==1)throw new Error("tensor5d() requires values to be number[][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor5d() requires shape to be provided when `values` are a flat array");return Er(e,t,s,n)}function gU(e,t,n){if(ne(e),t!=null&&t.length!==6)throw new Error("tensor6d() requires shape to have six numbers");const s=Ii(e,n);if(s.length!==6&&s.length!==1)throw new Error("tensor6d() requires values to be number[][][][][][] or flat/TypedArray");if(s.length===1&&t==null)throw new Error("tensor6d() requires shape to be provided when `values` are a flat array");return t=t||s,Er(e,t,s,n)}function yU(e,t=1,n=!0){const s=W(e,"x","topk");if(s.rank===0)throw new Error("topk() expects the input to be of rank 1 or higher");const i=s.shape[s.shape.length-1];if(t>i)throw new Error(`'k' passed to topk() must be <= the last dimension (${i}) but got ${t}`);const o={x:s},a={k:t,sorted:n},[c,h]=G.runKernelFunc(d=>d.topk(s,t,n),o,null,Qx,a);return{values:c,indices:h}}const Vb=z({topk_:yU});function bU(e,t=0,n=1,s,i){if(s!=null&&s==="bool")throw new Error("Unsupported data type $ { dtype }");const o=new Db(t,n,s,!0,i),a=wt(e,s);for(let c=0;c0,()=>"The input tensor must be at least 1D");const s={x:n},i={axis:t},[o,a]=G.runKernel(Cd,s,i);return{values:o,indices:a}}const Tp=z({unique_:wU});function LU(e,t,n){const s=W(e,"x","unsortedSegmentSum"),i=W(t,"segmentIds","unsortedSegmentSum","int32");A(Le(n),()=>"numSegments must be of dtype int");const o={x:s,segmentIds:i},a={numSegments:n},c=(h,d)=>{const m=h.unsortedSegmentSum(s,i,n);return d([i]),m};return G.runKernelFunc(c,o,null,Cy,a)}const Gb=z({unsortedSegmentSum_:LU});function SU(e,t=0){const n=W(e,"x","unstack");A(t>=-n.shape.length&&t`Axis = ${t} is not in [-${n.shape.length}, ${n.shape.length})`),t<0&&(t+=n.shape.length);const s={value:n},i={axis:t},o=a=>a.unstack(n,t);return G.runKernelFunc(o,s,null,Ny,i)}const Qs=z({unstack_:SU});function mA(e,t=!0,n,s){return G.makeVariable(e,t,n,s)}function Ap(e,t){const n=[];for(let o=0;o0,()=>"mask cannot be scalar"),B(c.slice(o,o+a),i.shape,"mask's shape must match the first K dimensions of tensor's shape,");let h=1;for(let x=o;x"Shape mismatch in v and x");const h=Ce(1),d=Re(h,c);let m=X(Re(a,o),d);if(i){A(s!=null,()=>"When using zeroDebias: true, step is required.");const f=W(s,"step","movingAverage");m=We(m,Re(h,Zs(c,f)))}return be(o,m)}const zU=z({movingAverage_:PU});function VU(e,t,n){const s=W(e,"indices","scatterND","int32"),i=W(t,"updates","scatterND");Zy(i,s,n);const o=h=>h.scatterND(s,i,n),a={indices:s,updates:i},c={shape:n};return G.runKernelFunc(o,a,null,Xx,c)}const EA=z({scatterND_:VU});function GU(e,t,n,s){if(e.dtype!=="int32")throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${e.dtype}.`);if(e.rank>2)throw new Error(`sparseIndices should be a scalar, vector, or matrix, but got shape ${e.shape}.`);const i=e.rank>0?e.shape[0]:1,o=e.rank>1?e.shape[1]:1;if(n.length!==o)throw new Error(`outputShape has incorrect number of elements:, ${n.length}, should be: ${o}.`);const a=t.size;if(!(t.rank===0||t.rank===1&&a===i))throw new Error(`sparseValues has incorrect shape ${t.shape}, should be [] or [${i}]`);if(t.dtype!==s.dtype)throw new Error("sparseValues.dtype must match defaultValues.dtype")}function YU(e,t,n,s=0){const i=W(e,"sparseIndices","sparseToDense","int32"),o=W(t,"sparseValues","sparseToDense"),a=W(s,"defaultValue","sparseToDense",o.dtype);GU(i,o,n,a);const c={sparseIndices:i,sparseValues:o,defaultValue:a},h={outputShape:n};return G.runKernelFunc(d=>d.sparseToDense(i,o,n,a),c,null,Jx,h)}const Hb=z({sparseToDense_:YU});function HU(e,t){const n=W(t,"indices","gatherND","int32"),s=W(e,"x","gatherND"),i=a=>a.gatherND(s,n),o={params:s,indices:n};return G.runKernelFunc(i,o,null,$x)}const DA=z({gatherND_:HU});function qU(e,t){if(t==null)return e.shape.slice();if(ae(e.shape,t))return t;if(e.shape.length===t.length){const n=[];for(let s=0;s`x has to be a floating point tensor since it's going to be scaled, but got a ${i.dtype} tensor instead.`),A(t>=0&&t<1,()=>`rate must be a float in the range [0, 1), but got ${t}.`),t===0)return e instanceof ee?i.clone():i;const o=qU(i,n),a=1-t,c=We(Ma(be(ko(o,0,1,"float32",s),a)),a);return X(i,c)}const kA=z({dropout_:jU});function FA(e){return Math.floor(Math.pow(2,Math.ceil(Math.log(e)/Math.log(2))))}function qb(e,t,n){const s=1-e%2,i=new Float32Array(e);for(let o=0;o1,()=>`inTopK() expects the predictions to be of rank 2 or higher, but got ${s.rank}`),A(s.rank-1===i.rank,()=>`predictions rank should be 1 larger than targets rank, but got predictions rank ${s.rank} and targets rank ${i.rank}`),B(s.shape.slice(0,s.shape.length-1),i.shape,"predictions's shape should be align with the targets' shape, except the last dimension.");const o=s.shape[s.shape.length-1];A(n>0&&n<=o,()=>`'k' passed to inTopK() must be > 0 && <= the predictions last dimension (${o}), but got ${n}`);const a=await s.data(),c=await i.data(),[h,d]=[a.length/o,o],m=bt("bool",h);for(let f=0;fv.value-x.value),m[f]=0;for(let x=0;x`Error in conv2dDerFilter: input must be rank 4, but got shape ${c.shape}.`),A(h.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${h.shape}.`),A(n.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${n}.`);const d=o==="NHWC"?c.shape[3]:c.shape[1],m=o==="NHWC"?h.shape[3]:h.shape[1];A(d===n[2],()=>`Error in conv2dDerFilter: depth of input ${d}) must match input depth in filter (${n[2]}.`),A(m===n[3],()=>`Error in conv2dDerFilter: depth of dy (${m}) must match output depth for filter (${n[3]}).`),a!=null&&A(Le(i),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${a} but got pad ${i}.`);const f=L=>{const x=1,v=Wr(o),N=kn(c.shape,n,s,x,i,a,!1,v);return L.conv2dDerFilter(c,h,N)},b={x:c,dy:h},w={strides:s,pad:i,dataFormat:o,dimRoundingMode:a,filterShape:n};return G.runKernelFunc(f,b,null,Xg,w)}const jb=z({conv2DBackpropFilter_:JU});function Np(e,t,n){if(n==null||n==="linear")return e;if(n==="relu")return X(e,ja(t));throw new Error(`Cannot compute gradient for fused activation ${n}.`)}function Cp(e,t){let n=t;const s=pn(e.shape,t.shape);return s.length>0&&(n=$e(n,s)),K(n,e.shape)}function Rp(e,t,n){if(t==="linear")return e;if(t==="relu")return Ni(e);if(t==="elu")return Ua(e);if(t==="relu6")return Wb(e);if(t==="prelu")return yh(e,n);throw new Error(`Unknown fused activation ${t}.`)}const Op=(e,t)=>{const n=e>0;return!n||t==="linear"};function ZU({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:h="linear",preluActivationWeights:d}){if(h=h||"linear",Op(G.state.gradientDepth,h)===!1){let F=Ji(e,t,n,s,i,o,a);return c!=null&&(F=be(F,c)),Rp(F,h,d)}const m=W(e,"x","conv2d"),f=W(t,"filter","conv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),A(b.rank===4,()=>`Error in fused conv2d: input must be rank 4, but got rank ${b.rank}.`),A(f.rank===4,()=>`Error in fused conv2d: filter must be rank 4, but got rank ${f.rank}.`),a!=null&&A(Le(s),()=>`Error in fused conv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${s}.`),A(b.shape[3]===f.shape[2],()=>`Error in conv2d: depth of input (${b.shape[3]}) must match input depth for filter ${f.shape[2]}.`),A(cn(n,o),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),A(i==="NHWC",()=>`Error in conv2d: got dataFormat of ${i} but only NHWC is currently supported.`);const L=kn(b.shape,f.shape,n,o,s,a);let x;c!=null&&(x=W(c,"bias","fused conv2d"),[x]=Gt(x,m),nt(L.outShape,x.shape));let v;d!=null&&(v=W(d,"prelu weights","fused conv2d"));const N=(F,U)=>{const[$,Y,j,Z]=U,ie=Np(F,j,h);A(_r(o),()=>`Error in gradient of fused conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`);const de=wb(Y.shape,ie,$,n,s),he=jb(Y,ie,$.shape,n,s),ue=[de,he];if(Z!=null){const me=Cp(Z,ie);ue.push(me)}return ue},O=F=>{const U=F.fusedConv2d({input:b,filter:f,convInfo:L,bias:x,activation:h,preluActivationWeights:v});return U},E={x:b,filter:f,bias:x,preluActivationWeights:v},k={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:h};if(c==null){const F=Ai((U,$,Y)=>{let j=G.runKernelFunc(O,E,null,Dd,k);return Y([$,U,j]),w&&(j=K(j,[j.shape[1],j.shape[2],j.shape[3]])),{value:j,gradFunc:N}});return F(b,f)}else{const F=Ai((U,$,Y,j)=>{let Z=G.runKernelFunc(O,E,null,Dd,k);return j([$,U,Z,Y]),w&&(Z=K(Z,[Z.shape[1],Z.shape[2],Z.shape[3]])),{value:Z,gradFunc:N}});return F(b,f,x)}}const Kb=z({fusedConv2d_:ZU});function QU(e,t,n,s,i,o=[1,1],a){let c=e;e.rank===3&&(c=K(e,[1,e.shape[0],e.shape[1],e.shape[2]]));let h=t;h.rank===3&&(h=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const d=b=>{const w=kn(e.shape,n,s,o,i,a,!0);return b.depthwiseConv2DDerFilter(c,h,w)},m={x:c,dy:h},f={strides:s,pad:i,dimRoundingMode:a,dilations:o,filterShape:n};return G.runKernelFunc(d,m,null,ey,f)}const _A=z({depthwiseConv2dNativeBackpropFilter_:QU});function eB(e,t,n,s,i,o=[1,1],a){let c=t,h=!1;t.rank===3&&(h=!0,c=K(t,[1,t.shape[0],t.shape[1],t.shape[2]]));const d=w=>{const L=kn(e,n.shape,s,o,i,a,!0);return w.depthwiseConv2DDerInput(c,n,L)},m={dy:c,filter:n},f={strides:s,pad:i,dimRoundingMode:a,dilations:o,inputShape:e},b=G.runKernelFunc(d,m,null,ty,f);return h?K(b,[b.shape[1],b.shape[2],b.shape[3]]):b}const WA=z({depthwiseConv2dNativeBackpropInput_:eB});function tB({x:e,filter:t,strides:n,pad:s,dataFormat:i="NHWC",dilations:o=[1,1],dimRoundingMode:a,bias:c,activation:h="linear",preluActivationWeights:d}){if(Op(G.state.gradientDepth,h)===!1){let F=Co(e,t,n,s,i,o,a);return c!=null&&(F=be(F,c)),Rp(F,h,d)}const m=W(e,"x","depthwiseConv2d"),f=W(t,"filter","depthwiseConv2d");let b=m,w=!1;m.rank===3&&(w=!0,b=K(m,[1,m.shape[0],m.shape[1],m.shape[2]])),A(b.rank===4,()=>`Error in fused depthwiseConv2d: input must be rank 4, but got rank ${b.rank}.`),A(f.rank===4,()=>`Error in fused depthwiseConv2d: filter must be rank 4, but got rank ${f.rank}.`),A(b.shape[3]===f.shape[2],()=>`Error in fused depthwiseConv2d: number of input channels (${b.shape[3]}) must match the inChannels dimension in filter ${f.shape[2]}.`),o==null&&(o=[1,1]),A(cn(n,o),()=>`Error in fused depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${o}'`),a!=null&&A(Le(s),()=>`Error in fused depthwiseConv2d: pad must be an integer when using dimRoundingMode ${a} but got pad ${s}.`);const L=kn(b.shape,f.shape,n,o,s,a,!0);let x;c!=null&&(x=W(c,"bias","fused conv2d"),[x]=Gt(x,m),nt(L.outShape,x.shape));let v;d!=null&&(v=W(d,"prelu weights","fused depthwiseConv2d"));const N=(F,U)=>{A(_r(o),()=>`Error in gradient of fused depthwiseConv2d: dilation rates greater than 1 are not yet supported. Got dilations '${o}'`);const[$,Y,j,Z]=U,ie=Np(F,j,h),de=WA(Y.shape,ie,$,n,s,o,a),he=_A(Y,ie,$.shape,n,s,o,a);if(Z!=null){const ue=Cp(x,ie);return[de,he,ue]}return[de,he]},O=F=>{const U=F.fusedDepthwiseConv2D({input:b,filter:f,convInfo:L,bias:x,activation:h,preluActivationWeights:v});return U},E={x:b,filter:f,bias:x,preluActivationWeights:v},k={strides:n,pad:s,dataFormat:i,dilations:o,dimRoundingMode:a,activation:h};if(c==null){const F=Ai((U,$,Y)=>{let j=G.runKernelFunc(O,E,null,kd,k);return Y([$,U,j]),w&&(j=K(j,[j.shape[1],j.shape[2],j.shape[3]])),{value:j,gradFunc:N}});return F(b,f)}else{const F=Ai((U,$,Y,j)=>{let Z=G.runKernelFunc(O,E,null,kd,k);return j([$,U,Z,Y]),w&&(Z=K(Z,[Z.shape[1],Z.shape[2],Z.shape[3]])),{value:Z,gradFunc:N}});return F(b,f,x)}}const $A=z({fusedDepthwiseConv2d_:tB});function nB({a:e,b:t,transposeA:n=!1,transposeB:s=!1,bias:i,activation:o="linear",preluActivationWeights:a}){if(Op(G.state.gradientDepth,o)===!1){let Z=ct(e,t,n,s);return i!=null&&(Z=be(Z,i)),Rp(Z,o,a)}let c=W(e,"a","fused matMul"),h=W(t,"b","fused matMul");[c,h]=Gt(c,h);const d=n?c.shape[c.rank-2]:c.shape[c.rank-1],m=s?h.shape[h.rank-1]:h.shape[h.rank-2],f=n?c.shape[c.rank-1]:c.shape[c.rank-2],b=s?h.shape[h.rank-2]:h.shape[h.rank-1],w=c.shape.slice(0,-2),L=h.shape.slice(0,-2),x=P(w),v=P(L);A(c.rank>=2&&h.rank>=2&&c.rank===h.rank,()=>`Error in fused matMul: inputs must have the same rank of at least 2, got ranks ${c.rank} and ${h.rank}.`),A(ae(w,L),()=>`Error in fused matMul: outer dimensions (${w}) and (${L}) of Tensors with shapes ${c.shape} and ${h.shape} must match.`),A(d===m,()=>`Error in fused matMul: inner shapes (${d}) and (${m}) of Tensors with shapes ${c.shape} and ${h.shape} and transposeA=${n} and transposeB=${s} must match.`);const N=c.shape.slice(0,-2).concat([f,b]),O=n?K(c,[x,d,f]):K(c,[x,f,d]),E=s?K(h,[v,b,m]):K(h,[v,m,b]);let k;i!=null&&(k=W(i,"bias","fused matMul"),[k]=Gt(k,c),nt(N,k.shape));let F;a!=null&&(F=W(a,"prelu weights","fused matMul"));const U=(Z,ie)=>{const[de,he,ue,me]=ie,ce=Np(K(Z,ue.shape),ue,o);let ye,pe;if(!n&&!s?(ye=ct(ce,he,!1,!0),pe=ct(de,ce,!0,!1)):!n&&s?(ye=ct(ce,he,!1,!1),pe=ct(ce,de,!0,!1)):n&&!s?(ye=ct(he,ce,!1,!0),pe=ct(de,ce,!1,!1)):(ye=ct(he,ce,!0,!0),pe=ct(ce,de,!0,!0)),i!=null){const we=Cp(me,ce);return[ye,pe,we]}else return[ye,pe]},$=Z=>{const ie=Z.fusedBatchMatMul({a:O,b:E,transposeA:n,transposeB:s,bias:k,activation:o,preluActivationWeights:F});return ie},Y={a:O,b:E,bias:k,preluActivationWeights:F},j={transposeA:n,transposeB:s,activation:o};if(i==null){const Z=Ai((ie,de,he)=>{const ue=G.runKernelFunc($,Y,null,Ed,j);return he([ie,de,ue]),{value:K(ue,N),gradFunc:U}});return Z(O,E)}else{const Z=Ai((ie,de,he,ue)=>{const me=G.runKernelFunc($,Y,null,Ed,j);return ue([ie,de,me,he]),{value:K(me,N),gradFunc:U}});return Z(O,E,k)}}const Ep=z({fusedMatMul_:nB});var sB=Object.freeze({__proto__:null,conv2d:Kb,depthwiseConv2d:$A,matMul:Ep});function iB(e){return qb(e,.54,.46)}const rB=z({hammingWindow_:iB});function oB(e){return qb(e,.5,.5)}const UA=z({hannWindow_:oB});function aB(e,t,n,s=!1,i=0){let o=0;const a=[];for(;o+t<=e.size;)a.push(tt(e,o,t)),o+=n;if(s)for(;o`Error in cropAndResize: image must be rank 4,but got rank ${a.rank}.`),A(c.rank===2&&c.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${d},4] but had shape ${c.shape}.`),A(h.rank===1&&h.shape[0]===d,()=>`Error in cropAndResize: boxInd must be have size [${d}] but had shape ${c.shape}.`),A(s.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${s.length}.`),A(s[0]>=1&&s[1]>=1,()=>`cropSize must be atleast [1,1], but was ${s}`),A(i==="bilinear"||i==="nearest",()=>`method must be bilinear or nearest, but was ${i}`);const m=L=>L.cropAndResize(a,c,h,s,i,o),f={image:a,boxes:c,boxInd:h},b={method:i,extrapolationValue:o,cropSize:s},w=G.runKernelFunc(m,f,null,Dx,b);return w}const uB=z({cropAndResize_:hB});function dB(e){const t=W(e,"image","flipLeftRight","float32");A(t.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${t.rank}.`);const n={image:t},s=G.runKernel(md,n,{});return s}const pB=z({flipLeftRight_:dB});function mB(e,t,n=0,s=.5){const i=W(e,"image","rotateWithOffset","float32");A(i.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${i.rank}.`);const o={image:i},a={radians:t,fillValue:n,center:s},c=G.runKernel(Od,o,a);return c}const fB=z({rotateWithOffset_:mB});function Xa(e,t,n,s,i,o){s==null&&(s=.5),i==null&&(i=Number.NEGATIVE_INFINITY),o==null&&(o=0);const a=e.shape[0];return n=Math.min(n,a),A(0<=s&&s<=1,()=>`iouThreshold must be in [0, 1], but was '${s}'`),A(e.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${e.rank}'`),A(e.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${e.shape[1]}`),A(t.rank===1,()=>"scores must be a 1D tensor"),A(t.shape[0]===a,()=>`scores has incompatible shape with boxes. Expected ${a}, but was ${t.shape[0]}`),A(0<=o&&o<=1,()=>`softNmsSigma must be in [0, 1], but was '${o}'`),{maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o}}function gB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppression"),a=W(t,"scores","nonMaxSuppression"),c=Xa(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const h={maxOutputSize:n,iouThreshold:s,scoreThreshold:i};return G.runKernelFunc(d=>d.nonMaxSuppression(o,a,n,s,i),{boxes:o,scores:a},null,fy,h)}const yB=z({nonMaxSuppression_:gB});function bB(e,t,n){const s=wB(e,t,n),i=s<0?-(s+1):s;e.splice(i,0,t)}function wB(e,t,n){return SB(e,t,n||LB)}function LB(e,t){return e>t?1:e>>1);const c=n(t,e[o]);c>0?s=o+1:(i=o,a=!c)}return a?s:-s-1}function Dp(e,t,n,s,i){return Xb(e,t,n,s,i,0).selectedIndices}function kp(e,t,n,s,i,o){return Xb(e,t,n,s,i,0,!1,o,!0)}function Fp(e,t,n,s,i,o){return Xb(e,t,n,s,i,o,!0)}function Xb(e,t,n,s,i,o,a=!1,c=!1,h=!1){const d=[];for(let v=0;vi&&d.push({score:t[v],boxIndex:v,suppressBeginIndex:0});d.sort(MA);const m=o>0?-.5/o:0,f=[],b=[];for(;f.length0;){const v=d.pop(),{score:N,boxIndex:O,suppressBeginIndex:E}=v;if(N=E;--F){const U=IB(e,O,f[F]);if(U>=s){k=!0;break}if(v.score=v.score*xB(s,m,U),v.score<=i)break}v.suppressBeginIndex=f.length,k||(v.score===N?(f.push(O),b.push(v.score)):v.score>i&&bB(d,v,MA))}const w=f.length,L=n-w;c&&L>0&&(f.push(...new Array(L).fill(0)),b.push(...new Array(L).fill(0)));const x={selectedIndices:ls(f,"int32")};return a&&(x.selectedScores=ls(b,"float32")),h&&(x.validOutputs=Ce(w,"int32")),x}function IB(e,t,n){const s=e.subarray(t*4,t*4+4),i=e.subarray(n*4,n*4+4),o=Math.min(s[0],s[2]),a=Math.min(s[1],s[3]),c=Math.max(s[0],s[2]),h=Math.max(s[1],s[3]),d=Math.min(i[0],i[2]),m=Math.min(i[1],i[3]),f=Math.max(i[0],i[2]),b=Math.max(i[1],i[3]),w=(c-o)*(h-a),L=(f-d)*(b-m);if(w<=0||L<=0)return 0;const x=Math.max(o,d),v=Math.max(a,m),N=Math.min(c,f),O=Math.min(h,b),E=Math.max(N-x,0)*Math.max(O-v,0);return E/(w+L-E)}function xB(e,t,n){const s=Math.exp(t*n*n);return n<=e?s:0}function MA(e,t){return e.score-t.score||e.score===t.score&&t.boxIndex-e.boxIndex}async function TB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY){const o=W(e,"boxes","nonMaxSuppressionAsync"),a=W(t,"scores","nonMaxSuppressionAsync"),c=Xa(o,a,n,s,i);n=c.maxOutputSize,s=c.iouThreshold,i=c.scoreThreshold;const h=await Promise.all([o.data(),a.data()]),d=h[0],m=h[1],f=Dp(d,m,n,s,i);return o!==e&&o.dispose(),a!==t&&a.dispose(),f}const AB=TB;function vB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),h=Xa(a,c,n,s,i,o);n=h.maxOutputSize,s=h.iouThreshold,i=h.scoreThreshold,o=h.softNmsSigma;const d={boxes:a,scores:c},m={maxOutputSize:n,iouThreshold:s,scoreThreshold:i,softNmsSigma:o},f=G.runKernel(Sd,d,m);return{selectedIndices:f[0],selectedScores:f[1]}}const NB=z({nonMaxSuppressionWithScore_:vB});async function CB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=0){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),h=Xa(a,c,n,s,i,o);n=h.maxOutputSize,s=h.iouThreshold,i=h.scoreThreshold,o=h.softNmsSigma;const d=await Promise.all([a.data(),c.data()]),m=d[0],f=d[1],b=Fp(m,f,n,s,i,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),b}const RB=CB;function OB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppression"),c=W(t,"scores","nonMaxSuppression"),h=Xa(a,c,n,s,i,null),d=h.maxOutputSize,m=h.iouThreshold,f=h.scoreThreshold,b={boxes:a,scores:c},w={maxOutputSize:d,iouThreshold:m,scoreThreshold:f,padToMaxOutputSize:o},L=G.runKernel(Ld,b,w);return{selectedIndices:L[0],validOutputs:L[1]}}const EB=z({nonMaxSuppressionPadded_:OB});async function DB(e,t,n,s=.5,i=Number.NEGATIVE_INFINITY,o=!1){const a=W(e,"boxes","nonMaxSuppressionAsync"),c=W(t,"scores","nonMaxSuppressionAsync"),h=Xa(a,c,n,s,i,null),d=h.maxOutputSize,m=h.iouThreshold,f=h.scoreThreshold,[b,w]=await Promise.all([a.data(),c.data()]),L=kp(b,w,d,m,f,o);return a!==e&&a.dispose(),c!==t&&c.dispose(),L}const kB=DB;function FB(e,t,n=!1){const s=W(e,"images","resizeBilinear");A(s.rank===3||s.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${s.rank}.`),A(t.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${t}.`);let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,h=(b,w)=>(w([i]),b.resizeBilinear(i,a,c,n)),d={images:i},m={alignCorners:n,size:t},f=G.runKernelFunc(h,d,null,Ly,m);return o?K(f,[f.shape[1],f.shape[2],f.shape[3]]):f}const PA=z({resizeBilinear_:FB});function _B(e,t,n=!1){const s=W(e,"images","resizeNearestNeighbor");A(s.rank===3||s.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${s.rank}.`),A(t.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${t}.`),A(s.dtype==="float32"||s.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let i=s,o=!1;s.rank===3&&(o=!0,i=K(s,[1,s.shape[0],s.shape[1],s.shape[2]]));const[a,c]=t,h={images:i},d={alignCorners:n,size:t},m=(b,w)=>(w([i]),b.resizeNearestNeighbor(i,a,c,n)),f=G.runKernelFunc(m,h,null,wy,d);return o?K(f,[f.shape[1],f.shape[2],f.shape[3]]):f}const zA=z({resizeNearestNeighbor_:_B});function WB(e,t,n){A(t%1===0,()=>`bandPart(): numLower must be an integer, got ${t}.`),A(n%1===0,()=>`bandPart(): numUpper must be an integer, got ${n}.`);const s=W(e,"a","bandPart");A(s.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${s.rank}.`);const i=s.shape,[o,a]=s.shape.slice(-2);if(!(t<=o))throw new Error(`bandPart(): numLower (${t}) must not be greater than the number of rows (${o}).`);if(!(n<=a))throw new Error(`bandPart(): numUpper (${n}) must not be greater than the number of columns (${a}).`);t<0&&(t=o),n<0&&(n=a);const c=K(bh(0,o,1,"int32"),[-1,1]),h=bh(0,a,1,"int32"),d=Re(c,h),m=Us(Ur(d,Ce(+t,"int32")),Zi(d,Ce(-n,"int32"))),f=dt([o,a],s.dtype);return K(es(Qs(K(s,[-1,o,a])).map(b=>Bn(m,b,f))),i)}const $B=z({bandPart_:WB});function UB(e){let t;if(Array.isArray(e)){t=!1,A(e!=null&&e.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const i=e[0].shape[0];for(let o=1;o`Gram-Schmidt: Non-unique lengths found in the input vectors: (${e[o].shape[0]} vs. ${i})`)}else t=!0,e=hs(e,e.shape[0],0).map(i=>Mr(i,[0]));A(e.length<=e[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${e.length}) exceeds number of dimensions (${e[0].shape[0]}).`);const n=[],s=e;for(let i=0;i{let o=s[i];if(i>0)for(let a=0;a=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${e.rank}`),e.rank===2)return VA(e,t);{const n=e.shape.slice(0,e.shape.length-2).reduce((h,d)=>h*d),s=Qs(K(e,[n,e.shape[e.shape.length-2],e.shape[e.shape.length-1]]),0),i=[],o=[];s.forEach(h=>{const[d,m]=VA(h,t);i.push(d),o.push(m)});const a=K(es(i,0),e.shape),c=K(es(o,0),e.shape);return[a,c]}}function VA(e,t=!1){return G.tidy(()=>{A(e.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${e.shape.length}D Tensor.`);const n=e.shape[0],s=e.shape[1];let i=cp(n),o=kr(e);const a=Pr([[1]],[1,1]);let c=kr(a);const h=n>=s?s:n;for(let d=0;d{const w=tt(o,[d,d],[n-d,1]),L=vp(w),x=tt(o,[d,d],[1,1]),v=Bn(xs(x,0),Pr([[-1]]),Pr([[1]])),N=Re(x,X(v,L)),O=We(w,N);O.shape[0]===1?c=kr(a):c=Yt([a,tt(O,[1,0],[O.shape[0]-1,O.shape[1]])],0);const E=Ht(We(ct(v,N),L)),k=tt(o,[d,0],[n-d,s]),F=X(E,c),U=Ye(c);if(d===0)o=Re(k,ct(F,ct(U,k)));else{const j=Re(k,ct(F,ct(U,k)));o=Yt([tt(o,[0,0],[d,s]),j],0)}const $=Ye(F),Y=tt(i,[0,d],[n,i.shape[1]-d]);if(d===0)i=Re(Y,ct(ct(Y,c),$));else{const j=Re(Y,ct(ct(Y,c),$));i=Yt([tt(i,[0,0],[n,d]),j],1)}return[c,o,i]}),He([m,f,b])}return!t&&n>s&&(i=tt(i,[0,0],[n,s]),o=tt(o,[0,0],[s,s])),[i,o]})}const PB=z({qr_:MB});(function(e){e[e.NONE=0]="NONE",e[e.MEAN=1]="MEAN",e[e.SUM=2]="SUM",e[e.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(r.Reduction||(r.Reduction={}));function zB(e,t,n=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const s=W(e,"losses","computeWeightedLoss");let i=null;t!=null&&(i=W(t,"weights","computeWeightedLoss"));const o=i==null?s:X(s,i);if(n===r.Reduction.NONE)return o;if(n===r.Reduction.SUM)return $e(o);if(n===r.Reduction.MEAN){if(i==null)return qt(o);{const a=s.size/i.size,c=We($e(o),$e(i));return a>1?We(c,Ce(a)):c}}if(n===r.Reduction.SUM_BY_NONZERO_WEIGHTS){if(i==null)return We($e(o),Ce(s.size));{const a=X(i,Js(s.shape)),c=Ae($e(Br(a,Ce(0))),"float32");return We($e(o),c)}}throw Error(`Unknown reduction: ${n}`)}const Qi=z({computeWeightedLoss_:zB});function VB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","absoluteDifference"),o=W(t,"predictions","absoluteDifference");let a=null;n!=null&&(a=W(n,"weights","absoluteDifference")),B(i.shape,o.shape,"Error in absoluteDifference: ");const c=dn(Re(i,o));return Qi(c,a,s)}const GB=z({absoluteDifference_:VB});function YB(e,t,n,s,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","cosineDistance"),a=W(t,"predictions","cosineDistance");let c=null;s!=null&&(c=W(s,"weights","cosineDistance")),B(o.shape,a.shape,"Error in cosineDistance: ");const h=Ce(1),d=Re(h,$e(X(o,a),n,!0));return Qi(d,c,i)}const HB=z({cosineDistance_:YB});function qB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let i=W(e,"labels","hingeLoss");const o=W(t,"predictions","hingeLoss");let a=null;n!=null&&(a=W(n,"weights","hingeLoss")),B(i.shape,o.shape,"Error in hingeLoss: ");const c=Ce(1);i=Re(X(Ce(2),i),c);const h=Ni(Re(c,X(i,o)));return Qi(h,a,s)}const jB=z({hingeLoss_:qB});function KB(e,t,n,s=1,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","huberLoss"),a=W(t,"predictions","huberLoss");let c=null;n!=null&&(c=W(n,"weights","huberLoss")),B(o.shape,a.shape,"Error in huberLoss: ");const h=Ce(s),d=dn(Re(a,o)),m=Oo(d,h),f=Re(d,m),b=be(X(Ce(.5),At(m)),X(h,f));return Qi(b,c,i)}const XB=z({huberLoss_:KB});function JB(e,t,n,s=1e-7,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const o=W(e,"labels","logLoss"),a=W(t,"predictions","logLoss");let c=null;n!=null&&(c=W(n,"weights","logLoss")),B(o.shape,a.shape,"Error in logLoss: ");const h=Ce(1),d=Ce(s),m=Ht(X(o,cs(be(a,d)))),f=X(Re(h,o),cs(be(Re(h,a),d))),b=Re(m,f);return Qi(b,c,i)}const ZB=z({logLoss_:JB});function QB(e,t,n,s=r.Reduction.SUM_BY_NONZERO_WEIGHTS){const i=W(e,"labels","meanSquaredError"),o=W(t,"predictions","meanSquaredError");let a=null;n!=null&&(a=W(n,"weights","meanSquaredError")),B(i.shape,o.shape,"Error in meanSquaredError: ");const c=Ih(i,o);return Qi(c,a,s)}const eM=z({meanSquaredError_:QB});function tM(e,t){const n=W(e,"labels","sigmoidCrossEntropyWithLogits"),s=W(t,"logits","sigmoidCrossEntropyWithLogits");B(n.shape,s.shape,"Error in sigmoidCrossEntropyWithLogits: ");const i=Ni(s),o=X(s,n),a=hp(Is(Ht(dn(s))));return be(Re(i,o),a)}function nM(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"multiClassLabels","sigmoidCrossEntropy");const a=W(t,"logits","sigmoidCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","sigmoidCrossEntropy")),B(o.shape,a.shape,"Error in sigmoidCrossEntropy: "),s>0){const d=Ce(s),m=Ce(1),f=Ce(.5);o=be(X(o,Re(m,d)),X(f,d))}const h=tM(o,a);return Qi(h,c,i)}const sM=z({sigmoidCrossEntropy_:nM});function iM(e,t,n=-1){if(n===-1&&(n=t.rank-1),n!==t.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${t.rank} and dim was ${n}`);const s=Ai((i,o,a)=>{const c=!0,h=Rb(o,[n],c),d=Re(Ae(o,"float32"),h);a([i,d]);const m=Ht(X(d,i)),f=$e(m,[n]),b=(w,L)=>{const[x,v]=L,N=vn(w.shape,[n]);return[X(K(w,N),Re(Ae(x,"float32"),Is(v))),X(K(w,N),Re(Is(v),Ae(x,"float32")))]};return{value:f,gradFunc:b}});return s(e,t)}function rM(e,t,n,s=0,i=r.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=W(e,"onehotLabels","softmaxCrossEntropy");const a=W(t,"logits","softmaxCrossEntropy");let c=null;if(n!=null&&(c=W(n,"weights","softmaxCrossEntropy")),B(o.shape,a.shape,"Error in softmaxCrossEntropy: "),s>0){const d=Ce(s),m=Ce(1),f=Ce(o.shape[1]);o=be(X(o,Re(m,d)),We(d,f))}const h=iM(o,a);return Qi(h,c,i)}const oM=z({softmaxCrossEntropy_:rM});const aM={fft:Lh,ifft:qa,rfft:Sh,irfft:xp},cM={hammingWindow:rB,hannWindow:UA,frame:BA,stft:lB},zr={flipLeftRight:pB,resizeNearestNeighbor:zA,resizeBilinear:PA,rotateWithOffset:fB,cropAndResize:uB,nonMaxSuppression:yB,nonMaxSuppressionAsync:AB,nonMaxSuppressionWithScore:NB,nonMaxSuppressionWithScoreAsync:RB,nonMaxSuppressionPadded:EB,nonMaxSuppressionPaddedAsync:kB},GA={bandPart:$B,gramSchmidt:BB,qr:PB},lM={absoluteDifference:GB,computeWeightedLoss:Qi,cosineDistance:HB,hingeLoss:jB,huberLoss:XB,logLoss:ZB,meanSquaredError:eM,sigmoidCrossEntropy:sM,softmaxCrossEntropy:oM};class er extends Ao{minimize(e,t=!1,n){const{value:s,grads:i}=this.computeGradients(e,n);if(n!=null){const o=n.map(a=>({name:a.name,tensor:i[a.name]}));this.applyGradients(o)}else this.applyGradients(i);return He(i),t?s:(s.dispose(),null)}get iterations(){return this.iterations_==null&&(this.iterations_=0),this.iterations_}incrementIterations(){this.iterations_=this.iterations+1}computeGradients(e,t){return Cb(e,t)}dispose(){this.iterations_!=null&&He(this.iterations_)}async saveIterations(){return this.iterations_==null&&(this.iterations_=0),{name:"iter",tensor:Ce(this.iterations_,"int32")}}async getWeights(){throw new Error("getWeights() is not implemented for this optimizer yet.")}async setWeights(e){throw new Error(`setWeights() is not implemented for this optimizer class ${this.getClassName()}`)}async extractIterations(e){return this.iterations_=(await e[0].tensor.data())[0],e.slice(1)}}Object.defineProperty(er,Symbol.hasInstance,{value:e=>e.minimize!=null&&e.computeGradients!=null&&e.applyGradients!=null});class Th extends er{constructor(e,t,n=null){super();this.learningRate=e,this.rho=t,this.epsilon=n,this.accumulatedGrads=[],this.accumulatedUpdates=[],n==null&&(this.epsilon=G.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=G.registeredVariables[n],o=!1;this.accumulatedGrads[s]==null&&(this.accumulatedGrads[s]={originalName:`${n}/accum_grad`,variable:Q(()=>et(i).variable(o))}),this.accumulatedUpdates[s]==null&&(this.accumulatedUpdates[s]={originalName:`${n}/accum_var`,variable:Q(()=>et(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedGrads[s].variable,h=this.accumulatedUpdates[s].variable;Q(()=>{const d=be(X(c,this.rho),X(At(a),1-this.rho)),m=X(We(Nn(be(h,this.epsilon)),Nn(be(c,this.epsilon))),a),f=be(X(h,this.rho),X(At(m),1-this.rho));c.assign(d),h.assign(f);const b=be(X(m,-this.learningRate),i);i.assign(b)})}),this.incrementIterations()}dispose(){this.accumulatedUpdates!=null&&(He(this.accumulatedGrads.map(e=>e.variable)),He(this.accumulatedUpdates.map(e=>e.variable)))}async getWeights(){const e=[...this.accumulatedGrads,...this.accumulatedUpdates];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=e.length/2,n=!1;this.accumulatedGrads=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedUpdates=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,rho:this.rho,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.rho,t.epsilon)}}Th.className="Adadelta",fe(Th);class Ah extends er{constructor(e,t=.1){super();this.learningRate=e,this.initialAccumulatorValue=t,this.accumulatedGrads=[]}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=G.registeredVariables[n];if(this.accumulatedGrads[s]==null){const c=!1;this.accumulatedGrads[s]={originalName:`${n}/accumulator`,variable:Q(()=>Ba(i.shape,this.initialAccumulatorValue).variable(c))}}const o=Array.isArray(e)?e[s].tensor:e[n];if(o==null)return;const a=this.accumulatedGrads[s].variable;Q(()=>{const c=be(a,At(o));a.assign(c);const h=be(X(We(o,Nn(be(c,G.backend.epsilon()))),-this.learningRate),i);i.assign(h)})}),this.incrementIterations()}dispose(){this.accumulatedGrads!=null&&He(this.accumulatedGrads.map(e=>e.variable))}async getWeights(){return[await this.saveIterations()].concat(this.accumulatedGrads.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulatedGrads=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,initialAccumulatorValue:this.initialAccumulatorValue}}static fromConfig(e,t){return new e(t.learningRate,t.initialAccumulatorValue)}}Ah.className="Adagrad",fe(Ah);class vh extends er{constructor(e,t,n,s=null){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.accumulatedFirstMoment=[],this.accumulatedSecondMoment=[],Q(()=>{this.accBeta1=Ce(t).variable(),this.accBeta2=Ce(n).variable()}),s==null&&(this.epsilon=G.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);Q(()=>{const n=Re(1,this.accBeta1),s=Re(1,this.accBeta2);t.forEach((i,o)=>{const a=G.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:Q(()=>et(a).variable(c))}),this.accumulatedSecondMoment[o]==null&&(this.accumulatedSecondMoment[o]={originalName:`${i}/v`,variable:Q(()=>et(a).variable(c))});const h=Array.isArray(e)?e[o].tensor:e[i];if(h==null)return;const d=this.accumulatedFirstMoment[o].variable,m=this.accumulatedSecondMoment[o].variable,f=be(X(d,this.beta1),X(h,1-this.beta1)),b=be(X(m,this.beta2),X(At(h),1-this.beta2)),w=We(f,n),L=We(b,s);d.assign(f),m.assign(b);const x=be(X(We(w,be(Nn(L),this.epsilon)),-this.learningRate),a);a.assign(x)}),this.accBeta1.assign(X(this.accBeta1,this.beta1)),this.accBeta2.assign(X(this.accBeta2,this.beta2))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.accBeta2.dispose(),this.accumulatedFirstMoment!=null&&He(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedSecondMoment!=null&&He(this.accumulatedSecondMoment.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedFirstMoment,...this.accumulatedSecondMoment];return[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e),Q(()=>{this.accBeta1.assign(Zs(this.beta1,this.iterations_+1)),this.accBeta2.assign(Zs(this.beta2,this.iterations_+1))});const t=e.length/2,n=!1;this.accumulatedFirstMoment=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedSecondMoment=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)}))}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon)}}vh.className="Adam",fe(vh);class Nh extends er{constructor(e,t,n,s=null,i=0){super();this.learningRate=e,this.beta1=t,this.beta2=n,this.epsilon=s,this.decay=i,this.accumulatedFirstMoment=[],this.accumulatedWeightedInfNorm=[],Q(()=>{this.iteration=Ce(0).variable(),this.accBeta1=Ce(t).variable()}),s==null&&(this.epsilon=G.backend.epsilon())}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);Q(()=>{const n=Re(1,this.accBeta1),s=We(-this.learningRate,be(X(this.iteration,this.decay),1));t.forEach((i,o)=>{const a=G.registeredVariables[i],c=!1;this.accumulatedFirstMoment[o]==null&&(this.accumulatedFirstMoment[o]={originalName:`${i}/m`,variable:et(a).variable(c)}),this.accumulatedWeightedInfNorm[o]==null&&(this.accumulatedWeightedInfNorm[o]={originalName:`${i}/v`,variable:et(a).variable(c)});const h=Array.isArray(e)?e[o].tensor:e[i];if(h==null)return;const d=this.accumulatedFirstMoment[o].variable,m=this.accumulatedWeightedInfNorm[o].variable,f=be(X(d,this.beta1),X(h,1-this.beta1)),b=X(m,this.beta2),w=dn(h),L=$s(b,w);d.assign(f),m.assign(L);const x=be(X(We(s,n),We(f,be(L,this.epsilon))),a);a.assign(x)}),this.iteration.assign(be(this.iteration,1)),this.accBeta1.assign(X(this.accBeta1,this.beta1))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.iteration.dispose(),this.accumulatedFirstMoment!=null&&He(this.accumulatedFirstMoment.map(e=>e.variable)),this.accumulatedWeightedInfNorm!=null&&He(this.accumulatedWeightedInfNorm.map(e=>e.variable))}async getWeights(){throw new Error("getWeights() is not implemented for Adamax yet.")}async setWeights(e){throw new Error("setWeights() is not implemented for Adamax yet.")}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon,decay:this.decay}}static fromConfig(e,t){return new e(t.learningRate,t.beta1,t.beta2,t.epsilon,t.decay)}}Nh.className="Adamax",fe(Nh);class Ja extends er{constructor(e){super();this.learningRate=e,this.setLearningRate(e)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=Array.isArray(e)?e[s].tensor:e[n];if(i==null)return;const o=G.registeredVariables[n];Q(()=>{const a=be(X(this.c,i),o);o.assign(a)})}),this.incrementIterations()}setLearningRate(e){this.learningRate=e,this.c!=null&&this.c.dispose(),this.c=bn(Ce(-e))}dispose(){this.c.dispose()}async getWeights(){return[await this.saveIterations()]}async setWeights(e){if(e=await this.extractIterations(e),e.length!==0)throw new Error("SGD optimizer does not have settable weights.")}getConfig(){return{learningRate:this.learningRate}}static fromConfig(e,t){return new e(t.learningRate)}}Ja.className="SGD",fe(Ja);class Ch extends Ja{constructor(e,t,n=!1){super(e);this.learningRate=e,this.momentum=t,this.useNesterov=n,this.accumulations=[],this.m=Ce(this.momentum)}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=G.registeredVariables[n];if(this.accumulations[s]==null){const c=!1;this.accumulations[s]={originalName:`${n}/momentum`,variable:Q(()=>et(i).variable(c))}}const o=this.accumulations[s].variable,a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;Q(()=>{let c;const h=be(X(this.m,o),a);this.useNesterov?c=be(X(this.c,be(a,X(h,this.m))),i):c=be(X(this.c,h),i),o.assign(h),i.assign(c)})}),this.incrementIterations()}dispose(){this.m.dispose(),this.accumulations!=null&&He(this.accumulations.map(e=>e.variable))}setMomentum(e){this.momentum=e}async getWeights(){return[await this.saveIterations()].concat(this.accumulations.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=!1;this.accumulations=e.map(n=>({originalName:n.name,variable:n.tensor.variable(t)}))}getConfig(){return{learningRate:this.learningRate,momentum:this.momentum,useNesterov:this.useNesterov}}static fromConfig(e,t){return new e(t.learningRate,t.momentum,t.useNesterov)}}Ch.className="Momentum",fe(Ch);class Rh extends er{constructor(e,t=.9,n=0,s=null,i=!1){super();if(this.learningRate=e,this.decay=t,this.momentum=n,this.epsilon=s,this.accumulatedMeanSquares=[],this.accumulatedMoments=[],this.accumulatedMeanGrads=[],this.centered=i,s==null&&(this.epsilon=G.backend.epsilon()),e==null)throw new Error("learningRate for RMSPropOptimizer must be defined.")}applyGradients(e){const t=Array.isArray(e)?e.map(n=>n.name):Object.keys(e);t.forEach((n,s)=>{const i=G.registeredVariables[n],o=!1;this.accumulatedMeanSquares[s]==null&&(this.accumulatedMeanSquares[s]={originalName:`${n}/rms`,variable:Q(()=>et(i).variable(o))}),this.accumulatedMoments[s]==null&&(this.accumulatedMoments[s]={originalName:`${n}/momentum`,variable:Q(()=>et(i).variable(o))}),this.accumulatedMeanGrads[s]==null&&this.centered&&(this.accumulatedMeanGrads[s]={originalName:`${n}/mg`,variable:Q(()=>et(i).variable(o))});const a=Array.isArray(e)?e[s].tensor:e[n];if(a==null)return;const c=this.accumulatedMeanSquares[s].variable,h=this.accumulatedMoments[s].variable;Q(()=>{const d=be(X(c,this.decay),X(At(a),1-this.decay));if(this.centered){const m=this.accumulatedMeanGrads[s].variable,f=be(X(m,this.decay),X(a,1-this.decay)),b=We(X(a,this.learningRate),Nn(Re(d,be(At(f),this.epsilon)))),w=be(X(h,this.momentum),b);c.assign(d),m.assign(f),h.assign(w);const L=Re(i,w);i.assign(L)}else{const m=be(X(c,this.decay),X(At(a),1-this.decay)),f=be(X(h,this.momentum),We(X(a,this.learningRate),Nn(be(m,this.epsilon))));c.assign(m),h.assign(f);const b=Re(i,f);i.assign(b)}})}),this.incrementIterations()}dispose(){this.accumulatedMeanSquares!=null&&He(this.accumulatedMeanSquares.map(e=>e.variable)),this.accumulatedMeanGrads!=null&&this.centered&&He(this.accumulatedMeanGrads.map(e=>e.variable)),this.accumulatedMoments!=null&&He(this.accumulatedMoments.map(e=>e.variable))}async getWeights(){const e=[...this.accumulatedMeanSquares,...this.accumulatedMoments];return this.centered&&e.push(...this.accumulatedMeanGrads),[await this.saveIterations()].concat(e.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(e){e=await this.extractIterations(e);const t=this.centered?e.length/3:e.length/2,n=!1;this.accumulatedMeanSquares=e.slice(0,t).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.accumulatedMoments=e.slice(t,t*2).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})),this.centered&&(this.accumulatedMeanGrads=e.slice(t*2,t*3).map(s=>({originalName:s.name,variable:s.tensor.variable(n)})))}getConfig(){return{learningRate:this.learningRate,decay:this.decay,momentum:this.momentum,epsilon:this.epsilon,centered:this.centered}}static fromConfig(e,t){return new e(t.learningRate,t.decay,t.momentum,t.epsilon,t.centered)}}Rh.className="RMSProp",fe(Rh);class _o{static sgd(e){return new Ja(e)}static momentum(e,t,n=!1){return new Ch(e,t,n)}static rmsprop(e,t=.9,n=0,s=null,i=!1){return new Rh(e,t,n,s,i)}static adam(e=.001,t=.9,n=.999,s=null){return new vh(e,t,n,s)}static adadelta(e=.001,t=.95,n=null){return new Th(e,t,n)}static adamax(e=.002,t=.9,n=.999,s=null,i=0){return new Nh(e,t,n,s,i)}static adagrad(e,t=.1){return new Ah(e,t)}}const Wo={sgd:_o.sgd,momentum:_o.momentum,adadelta:_o.adadelta,adagrad:_o.adagrad,rmsprop:_o.rmsprop,adamax:_o.adamax,adam:_o.adam};const hM=(()=>typeof requestAnimationFrame!="undefined"?requestAnimationFrame:typeof setImmediate!="undefined"?setImmediate:e=>e())();function _p(){return new Promise(e=>hM(()=>e()))}function Jb(e,t,n){const s=n*(typeof e=="number"?e:e[0]),i=t*(typeof e=="number"?e:e[1]);return[s,i]}function Oh(e,t,n,s=!0){let i=[];if(s)i=i.concat(t.slice(0)),i.push(e[0]/n),i=i.concat(e.slice(1));else{i=i.concat(e[0]);const o=t.length;for(let a=0;a=t*2+1||a%2===1?o.push(a):i.push(a);s.push(...i),s.push(0),s.push(...o)}return s}function Dh(e,t,n,s=!0){const i=[];s?i.push(e[0]/n):i.push(e[0]*n);for(let o=1;o{const a=[...i];a[n]=o;const c=tt(e,s,a);return s[n]+=o,c})}function hw(e,t){const n=new Array(e.rank);for(let i=0;iE.value-O.value);const x=f*s,v=h.subarray(x,x+s),N=d.subarray(x,x+s);for(let O=0;O{const[n]=t;return{x:()=>X(e,ja(Ae(n,"float32"),-1))}}};const fM={kernelName:ol,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=At(Ae(n,"float32")),i=Nn(Re(Ce(1),s));return Ht(We(e,i))}}}};const gM={kernelName:al,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Nn(Re(At(Ae(n,"float32")),1));return We(e,s)}}}};const yM={kernelName:wo,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{let c=e;const h=pn(n.shape,i);return h.length>0&&(c=$e(c,h)),K(c,n.shape)},a=()=>{let c=e;const h=pn(s.shape,i);return h.length>0&&(c=$e(c,h)),K(c,s.shape)};return{a:o,b:a}}};const bM={kernelName:Gg,saveAllInputs:!0,gradFunc:(e,t)=>{const n={};return t.forEach((s,i)=>{n[i]=()=>e.clone()}),n}};const wM={kernelName:Yg,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>et(n)}}};const LM={kernelName:Hg,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>et(n)}}};const SM={kernelName:cl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,Nn(Re(Ce(1),At(Ae(n,"float32")))))}}};const IM={kernelName:ll,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=Nn(be(Ce(1),At(Ae(n,"float32"))));return We(e,s)}}}};const xM={kernelName:nd,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=be(At(n),At(s));let h=X(e,We(s,c));const d=pn(n.shape,i);return d.length>0&&(h=$e(h,d)),K(h,n.shape)},a=()=>{const c=be(At(n),At(s));let h=Ht(X(e,We(n,c)));const d=pn(s.shape,i);return d.length>0&&(h=$e(h,d)),K(h,s.shape)};return{a:o,b:a}}};const TM={kernelName:hl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,be(At(Ae(n,"float32")),1))}}};const AM={kernelName:ul,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,Re(Ce(1),At(Ae(n,"float32"))))}}};function vM(e,t,n,s,i=[1,1,1],o,a){const c=W(e,"dy","avgPool3dBackprop"),h=W(t,"input","avgPool3dBackprop");let d=c,m=h,f=!1;h.rank===4&&(f=!0,d=K(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]]),m=K(h,[1,h.shape[0],h.shape[1],h.shape[2],h.shape[3]])),A(d.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${d.rank}.`),A(m.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${m.rank}.`),A(cn(s,i),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${s} and dilations '${i}'`),a!=null&&A(Le(o),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const b=v=>{const N=oh(m.shape,n,s,i,o,a);return v.avgPool3dBackprop(d,m,N)},w={dy:d,input:m},L={filterSize:n,strides:s,dilations:i,pad:o,dimRoundingMode:a},x=G.runKernelFunc(b,w,null,Ex,L);return f?K(x,[x.shape[1],x.shape[2],x.shape[3],x.shape[4]]):x}const NM=z({avgPool3dBackprop_:vM});const CM={kernelName:qg,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,dilations:a,pad:c,dimRoundingMode:h}=n,d=a==null?[1,1,1]:a;return{x:()=>NM(e,s,i,o,d,c,h)}}};function RM(e,t,n,s,i){const o=W(e,"dy","avgPoolBackprop"),a=W(t,"input","avgPoolBackprop");A(a.rank===o.rank,()=>`Rank of input (${a.rank}) does not match rank of dy (${o.rank})`);let c=a,h=o,d=!1;a.rank===3&&(d=!0,c=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),h=K(o,[1,o.shape[0],o.shape[1],o.shape[2]])),A(h.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${h.rank}.`),A(c.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${c.rank}.`);const m=L=>{const x=Un(c.shape,n,s,1,i);return L.avgPoolBackprop(h,c,x)},f={dy:h,input:c},b={filterSize:n,strides:s,pad:i},w=G.runKernelFunc(m,f,null,sd,b);return d?K(w,[w.shape[1],w.shape[2],w.shape[3]]):w}const OM=z({avgPoolBackprop_:RM});const EM={kernelName:dl,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{filterSize:i,strides:o,pad:a}=n;return{x:()=>OM(e,s,i,o,a)}}};const DM={kernelName:id,inputsToSave:["a","b"],gradFunc:(e,t,n)=>{const[s,i]=t,{transposeA:o,transposeB:a}=n;return!o&&!a?{a:()=>ct(e,i,!1,!0),b:()=>ct(s,e,!0,!1)}:!o&&a?{a:()=>ct(e,i,!1,!1),b:()=>ct(e,s,!0,!1)}:o&&!a?{a:()=>ct(i,e,!1,!0),b:()=>ct(s,e,!1,!1)}:{a:()=>ct(i,e,!0,!0),b:()=>ct(e,s,!0,!0)}}};const kM={kernelName:jg,gradFunc:(e,t,n)=>{const{blockShape:s,crops:i}=n;return{x:()=>gh(e,s,i)}}};const FM={kernelName:Kg,gradFunc:(e,t,n)=>{const s=n,i=s.inputShape,o=s.shape,a=Array.from(o);for(let h=i.length-1;h>=0;h--)if(i[h]===o[h])a[h]=1;else if(i[h]!==1)throw new Error(`broadcastTo(): [${i}] cannot be broadcast to [${o}].`);const c=[];for(let h=0;h1&&c.push(h);return{x:()=>$e(e,c,!0)}}};const _M={kernelName:Sa,gradFunc:e=>({x:()=>e.clone()})};const WM={kernelName:pl,gradFunc:e=>({x:()=>et(e)})};const $M={kernelName:ml,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{clipValueMin:i,clipValueMax:o}=n;return{x:()=>Bn(Us(Zi(s,i),Ur(s,o)),e,et(e))}}};const UM={kernelName:fl,saveAllInputs:!0,gradFunc:(e,t,n)=>{const s=t.map(h=>h.shape),{axis:i}=n,o=qe(i,t[0].shape)[0],a=s.map(h=>h[o]),c=hs(e,a,o);return c.map(h=>()=>h)}};const BM={kernelName:od,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{dilations:o,strides:a,pad:c,dataFormat:h}=n;return A(_r(o),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${o}'`),{x:()=>wb(s.shape,e,i,a,c,h),filter:()=>jb(s,e,i.shape,a,c,h)}}};const MM={kernelName:ad,inputsToSave:["dy","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,{strides:o,pad:a,dataFormat:c,dimRoundingMode:h}=n;return{dy:()=>Ji(e,i,o,a,c,1,h),filter:()=>jb(e,s,i.shape,o,a,c,h)}}};function PM(e,t,n,s,i){let o=e;e.rank===4&&(o=K(e,[1,e.shape[0],e.shape[1],e.shape[2],e.shape[3]]));let a=t;a.rank===4&&(a=K(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]])),A(o.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${o.shape}.`),A(a.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${a.shape}.`),A(n.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${n}.`),A(o.shape[4]===n[3],()=>`Error in conv3dDerFilter: depth of input ${o.shape[4]}) must match input depth in filter (${n[3]}.`),A(a.shape[4]===n[4],()=>`Error in conv3dDerFilter: depth of dy (${a.shape[4]}) must match output depth for filter (${n[4]}).`);const c=m=>{const f=1,b=Fr(o.shape,n,s,f,i);return m.conv3dDerFilter(o,a,b)},h={x:o,dy:a},d={strides:s,pad:i,filterShape:n};return G.runKernelFunc(c,h,null,Jg,d)}const zM=z({conv3DBackpropFilter_:PM});const VM={kernelName:cd,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o}=n;A(_r(s),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${s}'`);const[a,c]=t;return{x:()=>eA(a.shape,e,c,i,o),filter:()=>zM(a,e,c.shape,i,o)}}};const GM={kernelName:Ia,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Ht(wp(Ae(n,"float32"))),e)}}};const YM={kernelName:gl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Lp(Ae(n,"float32")),e)}}};const HM={kernelName:Qg,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{axis:i,exclusive:o,reverse:a}=n;return{x:()=>{const c=Xn([i],s.rank);let h=ap(e,i,o,!a);return c!=null&&(h=Ye(h,c)),h}}}};const qM={kernelName:ld,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const{dilations:s,strides:i,pad:o,dimRoundingMode:a}=n,c=s==null?[1,1]:s;A(_r(c),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${c}'`);const[h,d]=t;return A(h.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${h.rank}.`),A(d.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${d.rank}.`),A(h.shape[3]===d.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${h.shape[3]}) must match the inChannels dimension in filter ${d.shape[2]}.`),A(cn(i,c),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${i} and dilations '${c}'.`),a!=null&&A(Le(o),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`),{x:()=>WA(h.shape,e,d,i,o,s,a),filter:()=>_A(h,e,d.shape,i,o,s,a)}}};const jM={kernelName:hd,inputsToSave:["x","filter"],gradFunc:(e,t,n)=>{const[s,i]=t,o={x:s,filter:i,dy:e},a={x:s,filter:i,dy:e};return{x:()=>G.runKernel(ud,o,n),filter:()=>G.runKernel(dd,a,n)}}};const KM={kernelName:xa,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=We(e,Ae(s,"float32")),h=pn(n.shape,i);return h.length>0?K($e(c,h),n.shape):c},a=()=>{let c=X(e,Ae(n,"float32"));const h=pn(s.shape,i);h.length>0&&(c=K($e(c,h),s.shape));const d=At(s);return Ht(We(c,Ae(d,"float32")))};return{a:o,b:a}}};const XM={kernelName:yl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t,s=o=>o.eluDer(e,n),i={dy:e,y:n};return{x:()=>G.runKernelFunc(s,i,null,_x)}}};const JM={kernelName:bl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(Is(Ht(At(n))),2/Math.sqrt(Math.PI));return{x:()=>X(e,s)}}};const ZM={kernelName:wl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,n)}}};const QM={kernelName:Ll,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,Is(n))}}};const eP={kernelName:Sl,gradFunc:e=>({x:()=>et(e)})};const tP={kernelName:sy,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=We(e,Ae(s,"float32")),h=pn(n.shape,i);return h.length>0?K($e(c,h),n.shape):c},a=()=>{let c=X(e,Ae(n,"float32"));const h=pn(s.shape,i);h.length>0&&(c=K($e(c,h),s.shape));const d=At(s);return Ht(We(c,Ae(d,"float32")))};return{a:o,b:a}}};const nP={kernelName:Il,inputsToSave:["x","mean","variance","scale"],gradFunc:(e,t,n)=>{const{varianceEpsilon:s}=n,[i,o,a,c]=t,h=c==null?Ce(1):c,d=pn(o.shape,i.shape),m=[];if(o.rank===1){for(let k=0;ko.rank===1?K(X(X(e,$r(K(w,[1,1,1,o.shape[0]]),m)),h),i.shape):K(X(X(e,w),h),i.shape),v=()=>{let k=X(X(w,Ce(-1)),b);return o.rank===1&&(k=$e(k,d)),K(k,o.shape)},N=()=>{let k=X(X(L,f),b);return o.rank===1&&(k=$e(k,d)),K(k,o.shape)},O=()=>{const k=X(f,w);let F=X(e,k);return o.rank===1&&(F=$e(F,d)),K(F,o.shape)},E=()=>{let k=e;return o.rank===1&&(k=$e(k,d)),K(k,o.shape)};return{x,mean:v,variance:N,scale:O,offset:E}}};const sP={kernelName:iy,inputsToSave:["x","indices"],gradFunc:(e,t,n)=>{const[s,i]=t,{axis:o}=n,a=qe(o,s.shape)[0],c=()=>{const h=s.shape,d=i.size,m=h.slice(0,a),f=m.length,b=h.slice(o,h.length).slice(1),w=b.length,L=ZA(0,f),x=ZA(f+1,f+1+w),v=QA([m,[d],b]),N=K(e,v),O=K(i,[d]),E=QA([[f],L,x]),k=Ye(N,E);let F=Gb(k,O,s.shape[a]);const U=sh(E);return F=Ye(F,U),F};return{x:c,indices:()=>i}}};function ZA(e,t){const n=[];for(let s=e;s{const[n,s]=t;return{a:()=>et(n),b:()=>et(s)}}};const rP={kernelName:xl,gradFunc:e=>({x:()=>Ae(e,"float32")})};const oP={kernelName:Tl,gradFunc:e=>({x:()=>et(e)})};const aP={kernelName:Al,gradFunc:e=>({x:()=>et(e)})};const cP={kernelName:vl,gradFunc:e=>({x:()=>et(e)})};const lP={kernelName:Cl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,be(n,1))}}};const hP={kernelName:Nl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,Ae(n,"float32"))}}};const uP={kernelName:oy,inputsToSave:[],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{axis:i}=n;return{logits:()=>{const o=!0,a=Is(s);return Re(e,X($e(e,i,o),a))}}}};function dP(e,t,n,s=5,i=1,o=1,a=.5){const c=m=>m.LRNGrad(n,e,t,s,i,o,a),h={x:e,y:t,dy:n},d={depthRadius:s,bias:i,alpha:o,beta:a};return G.runKernelFunc(c,h,null,Gx,d)}const pP=z({localResponseNormalizationBackprop_:dP});const mP={kernelName:ay,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{depthRadius:o,bias:a,alpha:c,beta:h}=n;return{x:()=>pP(s,i,e,o,a,c,h)}}};function ev(e,t,n,s){return t.rank{const i=X(e,Ae(Xs(n,t),e.dtype));return i}}}const tv={kernelName:Rl,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{reductionIndices:i}=s,o=t[0],a=t[1],c=qe(i,o.shape),h=ev(e,a,o,c);return{x:()=>h.x()}}};const fP={kernelName:cy,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,Ae(Zi(n,s),"float32")),o=()=>X(e,Ae(ph(n,s),"float32"));return{a:i,b:o}}};function gP(e,t,n,s,i,o=[1,1,1],a,c){const h=W(e,"dy","maxPool3dBackprop"),d=W(t,"input","maxPool3dBackprop"),m=W(n,"output","maxPool3dBackprop");let f=h,b=d,w=m,L=!1;d.rank===4&&(L=!0,f=K(h,[1,h.shape[0],h.shape[1],h.shape[2],h.shape[3]]),b=K(d,[1,d.shape[0],d.shape[1],d.shape[2],d.shape[3]]),w=K(m,[1,m.shape[0],m.shape[1],m.shape[2],m.shape[3]])),A(f.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${f.rank}.`),A(b.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${b.rank}.`),A(w.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${w.rank}.`),A(cn(i,o),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${i} and dilations '${o}'`),c!=null&&A(Le(a),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${c} but got pad ${a}.`);const x=E=>{const k=oh(b.shape,s,i,o,a,c);return E.maxPool3dBackprop(f,b,w,k)},v={dy:f,input:b,output:w},N={filterSize:s,strides:i,dilations:o,pad:a,dimRoundingMode:c},O=G.runKernelFunc(x,v,null,Yx,N);return L?K(O,[O.shape[1],O.shape[2],O.shape[3],O.shape[4]]):O}const yP=z({maxPool3dBackprop_:gP});const bP={kernelName:ly,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,dilations:c,pad:h,dimRoundingMode:d}=n,m=c==null?[1,1,1]:c;return{x:()=>yP(e,s,i,o,a,m,h,d)}}};function wP(e,t,n,s,i,o,a){const c=W(e,"dy","maxPoolBackprop"),h=W(t,"input","maxPoolBackprop"),d=W(n,"output","maxPoolBackprop");A(h.rank===c.rank,()=>`Rank of input (${h.rank}) does not match rank of dy (${c.rank})`),A(c.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${c.rank}.`),A(h.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${h.rank}.`),a!=null&&A(Le(o),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${a} but got pad ${o}.`);const m=w=>{const L=Un(h.shape,s,i,1,o,a);return w.maxPoolBackprop(c,h,d,L)},f={dy:c,input:h,output:d},b={filterSize:s,strides:i,pad:o,dimRoundingMode:a};return G.runKernelFunc(m,f,null,bd,b)}const LP=z({maxPoolBackprop_:wP});const SP={kernelName:Ol,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s,i]=t,{filterSize:o,strides:a,pad:c}=n;return{x:()=>LP(e,s,i,o,a,c)}}};const IP={kernelName:uy,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(e,t,n)=>{const s=n,{axis:i}=s,[o,a]=t,c=qe(i,o.shape),h=ev(e,a,o,c);return{x:()=>h.x()}}};const xP={kernelName:dy,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=()=>X(e,Ae(Ur(n,s),"float32")),o=()=>X(e,Ae(xs(n,s),"float32"));return{a:i,b:o}}};const TP={kernelName:El,inputsToSave:["x"],gradFunc:(e,t,n)=>{const s=t[0],{paddings:i}=n,o=i.map(a=>a[0]);return{x:()=>tt(e,o,s.shape)}}};const AP={kernelName:py,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=pn(n.shape,i);return c.length>0?K($e(e,c),n.shape):e},a=()=>{const c=X(e,Ht(Ma(We(n,s)))),h=pn(s.shape,i);return h.length>0?K($e(c,h),s.shape):c};return{a:o,b:a}}};const vP={kernelName:Ta,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{const c=X(e,Ae(s,"float32")),h=pn(n.shape,i);return h.length>0?K($e(c,h),n.shape):c},a=()=>{const c=X(e,Ae(n,"float32")),h=pn(s.shape,i);return h.length>0?K($e(c,h),s.shape):c};return{a:o,b:a}}};const NP={kernelName:my,gradFunc:e=>({x:()=>Ht(e)})};const CP={kernelName:yy,inputsToSave:["indices"],gradFunc:(e,t)=>{const n=t[0];return{indices:()=>dt(n.shape,"float32")}}};const RP={kernelName:gy,gradFunc:e=>({x:()=>et(e)})};const nv={kernelName:Id,inputsToSave:["x"],gradFunc:(e,t,n)=>{const s=t[0],{paddings:i}=n,o=i.map(a=>a[0]);return{x:()=>tt(e,o,s.shape)}}};const OP={kernelName:by,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(e,t)=>{const[n,s,i]=t,o=n,a=s,c=nt(o.shape,a.shape),h=()=>{const m=Ae(a,"float32");let f=X(e,X(m,Zs(o,Re(m,Ce(1)))));const b=pn(o.shape,c);return b.length>0&&(f=$e(f,b)),K(f,o.shape)},d=()=>{const m=xs(o,0),f=Bn(m,cs(o),et(o));let b=X(e,X(i,f));const w=pn(a.shape,c);return w.length>0&&(b=$e(b,w)),K(b,a.shape)};return{a:h,b:d}}};const EP={kernelName:xd,inputsToSave:["x","alpha"],gradFunc:(e,t)=>{const[n,s]=t,i=xs(n,0);return{x:()=>Bn(i,e,X(e,s)),alpha:()=>{let o=Bn(i,et(e),X(e,n));const a=pn(s.shape,e.shape);return a.length>0&&(o=$e(o,a)),K(o,s.shape)}}}};const DP={kernelName:kl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,Ht(At(n)))}}};const kP={kernelName:Wl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t,s=X(Ur(n,6),ja(n));return{x:()=>X(e,Ae(s,"float32"))}}};const FP={kernelName:Fl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,Ae(ja(n),"float32"))}}};const _P={kernelName:_l,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>K(e,n.shape)}}};const WP={kernelName:Ly,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:h}=n;return c.resizeBilinearBackprop(e,s,h)},o={images:s},a=()=>G.runKernelFunc(i,o,null,Kx,n);return{images:a}}};const $P={kernelName:wy,inputsToSave:["images"],gradFunc:(e,t,n)=>{const[s]=t,i=c=>{const{alignCorners:h}=n;return c.resizeNearestNeighborBackprop(e,s,h)},o={images:s},a=()=>G.runKernelFunc(i,o,null,jx,n);return{images:a}}};const UP={kernelName:Sy,gradFunc:(e,t,n)=>{const{dims:s}=n,i=qe(s,e.shape);return{x:()=>Ts(e,i)}}};const BP={kernelName:$l,gradFunc:e=>({x:()=>et(e)})};const MP={kernelName:Ul,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>Ht(We(e,X(Zs(n,1.5),2)))}}};const PP={kernelName:Iy,inputsToSave:["condition"],gradFunc:(e,t)=>{const[n]=t;return{condition:()=>Ae(et(n),"float32"),t:()=>X(e,Ae(n,e.dtype)),e:()=>X(e,Ae(mh(n),e.dtype))}}};const zP={kernelName:Bl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>{const s=xs(n,Ce(0)),i=Ce(Wp),o=Ce($p),a=X(e,o),c=X(X(e,i),Is(Ae(n,"float32")));return Bn(s,a,c)}}}};const VP={kernelName:zl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(n,Re(Ce(1),n)))}}};const GP={kernelName:Pl,gradFunc:e=>({x:()=>et(e)})};const YP={kernelName:Aa,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(hh(Ae(n,"float32")),e)}}};const HP={kernelName:Ml,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(op(Ae(n,"float32")),e)}}};const qP={kernelName:Ad,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{begin:i,size:o}=n,a=s.shape,[c,h]=Kd(s,i,o),d=[];for(let m=0;mvi(e,d)}}};const jP={kernelName:Ay,outputsToSave:[!0],gradFunc:(e,t,n)=>{const[s]=t,{dim:i}=n,o=!0,a=X(e,s);return{logits:()=>Re(a,X($e(a,[i],o),s))}}};const KP={kernelName:Vl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,Ti(n))}}};const sv={kernelName:vd,gradFunc:(e,t,n)=>{const{blockShape:s,paddings:i}=n;return{x:()=>ch(e,s,i)}}};const iv={kernelName:Ty,gradFunc:(e,t,n)=>{const{axis:s}=n;return{x:()=>Yt(e,s)}}};const XP={kernelName:Gl,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,X(Nn(Ae(n,"float32")),2))}}};const JP={kernelName:Nd,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(e,X(Ae(n,"float32"),2))}}};const ZP={kernelName:va,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=Ce(2),o=()=>X(e,X(i,Re(n,s))),a=()=>X(e,X(i,Re(s,n)));return{a:o,b:a}}};const QP={kernelName:ql,gradFunc:e=>({x:()=>et(e)})};const ez={kernelName:Na,inputsToSave:["a","b"],gradFunc:(e,t)=>{const[n,s]=t,i=nt(n.shape,s.shape),o=()=>{let c=e;const h=pn(n.shape,i);return h.length>0&&(c=$e(c,h)),K(c,n.shape)},a=()=>{let c=e;const h=pn(s.shape,i);return h.length>0&&(c=$e(c,h)),K(Ht(c),s.shape)};return{a:o,b:a}}};const tz={kernelName:xy,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,i=s.shape.slice(),{axis:o}=n,a=qe(o,s.shape);a.forEach(d=>{i[d]=1});const c=K(e,i),h=X(c,Js(s.shape,"float32"));return{x:()=>h}}};const nz={kernelName:Ca,inputsToSave:["x"],gradFunc:(e,t)=>{const[n]=t;return{x:()=>We(e,At(hh(n)))}}};const sz={kernelName:Yl,outputsToSave:[!0],gradFunc:(e,t)=>{const[n]=t;return{x:()=>X(Re(Ce(1),At(n)),e)}}};const iz={kernelName:vy,inputsToSave:["x"],gradFunc:(e,t,n)=>{const[s]=t,{reps:i}=n,o=()=>{let a=et(s);if(s.rank===1)for(let c=0;c{const s=n,{perm:i}=s,o=sh(i);return{x:()=>Ye(e,o)}}};const oz={kernelName:Ny,gradFunc:(e,t,n)=>{const s=n,{axis:i}=s;return{value:()=>es(e,i)}}};const az={kernelName:Cy,inputsToSave:["segmentIds"],gradFunc:(e,t)=>{const[n]=t,s=()=>cz(e,n);return{x:s}}};function cz(e,t){const n=$s(t,et(t)),s=Pa(e,n);let i=Zi(t,Ce(0,"int32"));const o=s.rank-i.rank;for(let c=0;c({x:()=>et(e)})};const hz=[mM,fM,gM,yM,bM,wM,LM,SM,IM,xM,TM,AM,CM,EM,DM,kM,FM,_M,WM,$M,UM,MM,BM,VM,GM,YM,HM,qM,jM,KM,XM,JM,ZM,QM,tP,eP,nP,sP,iP,rP,oP,aP,cP,lP,hP,uP,mP,tv,tv,fP,bP,SP,IP,xP,TP,AP,vP,NP,CP,RP,nv,nv,OP,EP,DP,kP,FP,_P,WP,$P,UP,BP,MP,PP,zP,VP,GP,YP,HP,qP,jP,KP,sv,sv,iv,iv,XP,ZP,JP,QP,ez,tz,nz,sz,iz,rz,oz,az,lz];for(const e of hz)eT(e);ee.prototype.abs=function(){return this.throwIfDisposed(),dn(this)};ee.prototype.acos=function(){return this.throwIfDisposed(),ob(this)};ee.prototype.acosh=function(){return this.throwIfDisposed(),ab(this)};ee.prototype.addStrict=function(e){return this.throwIfDisposed(),SA(this,e)};ee.prototype.add=function(e){return this.throwIfDisposed(),be(this,e)};ee.prototype.all=function(e,t){return this.throwIfDisposed(),Qd(this,e,t)};ee.prototype.any=function(e,t){return this.throwIfDisposed(),ih(this,e,t)};ee.prototype.argMax=function(e){return this.throwIfDisposed(),rh(this,e)};ee.prototype.argMin=function(e){return this.throwIfDisposed(),lb(this,e)};ee.prototype.asScalar=function(){return this.throwIfDisposed(),A(this.size===1,()=>"The array must have only 1 element."),K(this,[])};ee.prototype.asType=function(e){return this.throwIfDisposed(),Ae(this,e)};ee.prototype.as1D=function(){return this.throwIfDisposed(),K(this,[this.size])};ee.prototype.as2D=function(e,t){return this.throwIfDisposed(),K(this,[e,t])};ee.prototype.as3D=function(e,t,n){return this.throwIfDisposed(),K(this,[e,t,n])};ee.prototype.as4D=function(e,t,n,s){return this.throwIfDisposed(),K(this,[e,t,n,s])};ee.prototype.as5D=function(e,t,n,s,i){return this.throwIfDisposed(),K(this,[e,t,n,s,i])};ee.prototype.asin=function(){return this.throwIfDisposed(),hb(this)};ee.prototype.asinh=function(){return this.throwIfDisposed(),ub(this)};ee.prototype.atan=function(){return this.throwIfDisposed(),db(this)};ee.prototype.atan2=function(e){return this.throwIfDisposed(),pb(this,e)};ee.prototype.atanh=function(){return this.throwIfDisposed(),mb(this)};ee.prototype.avgPool=function(e,t,n,s){return this.throwIfDisposed(),ah(this,e,t,n,s)};ee.prototype.batchToSpaceND=function(e,t){return this.throwIfDisposed(),ch(this,e,t)};ee.prototype.batchNorm=function(e,t,n,s,i){return this.throwIfDisposed(),No(this,e,t,n,s,i)};ee.prototype.broadcastTo=function(e){return this.throwIfDisposed(),lh(this,e)};ee.prototype.cast=function(e){return this.throwIfDisposed(),Ae(this,e)};ee.prototype.ceil=function(){return this.throwIfDisposed(),bb(this)};ee.prototype.clipByValue=function(e,t){return this.throwIfDisposed(),Jn(this,e,t)};ee.prototype.concat=function(e,t){return this.throwIfDisposed(),e instanceof ee&&(e=[e]),Yt([this,...e],t)};ee.prototype.conv1d=function(e,t,n,s,i,o){return this.throwIfDisposed(),ip(this,e,t,n,s,i,o)};ee.prototype.conv2dTranspose=function(e,t,n,s,i){return this.throwIfDisposed(),rp(this,e,t,n,s,i)};ee.prototype.conv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Ji(this,e,t,n,s,i,o)};ee.prototype.cos=function(){return this.throwIfDisposed(),hh(this)};ee.prototype.cosh=function(){return this.throwIfDisposed(),op(this)};ee.prototype.cumsum=function(e,t,n){return this.throwIfDisposed(),ap(this,e,t,n)};ee.prototype.depthToSpace=function(e,t){return this.throwIfDisposed(),Sb(this,e,t)};ee.prototype.depthwiseConv2D=function(e,t,n,s,i,o){return un("depthwiseConv2D is deprecated, use depthwiseConv2d instead"),this.throwIfDisposed(),Co(this,e,t,n,s,i,o)};ee.prototype.depthwiseConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Co(this,e,t,n,s,i,o)};ee.prototype.dilation2d=function(e,t,n,s,i){return this.throwIfDisposed(),Ib(this,e,t,n,s,i)};ee.prototype.divNoNan=function(e){return this.throwIfDisposed(),xb(this,e)};ee.prototype.divStrict=function(e){return this.throwIfDisposed(),IA(this,e)};ee.prototype.div=function(e){return this.throwIfDisposed(),We(this,e)};ee.prototype.dot=function(e){return this.throwIfDisposed(),tA(this,e)};ee.prototype.elu=function(){return this.throwIfDisposed(),Ua(this)};ee.prototype.equalStrict=function(e){return this.throwIfDisposed(),fA(this,e)};ee.prototype.equal=function(e){return this.throwIfDisposed(),Xs(this,e)};ee.prototype.erf=function(){return this.throwIfDisposed(),Tb(this)};ee.prototype.exp=function(){return this.throwIfDisposed(),Is(this)};ee.prototype.expandDims=function(e){return this.throwIfDisposed(),Zn(this,e)};ee.prototype.expm1=function(){return this.throwIfDisposed(),Ab(this)};ee.prototype.fft=function(){return this.throwIfDisposed(),Lh(this)};ee.prototype.flatten=function(){return this.throwIfDisposed(),K(this,[this.size])};ee.prototype.floor=function(){return this.throwIfDisposed(),Ma(this)};ee.prototype.floorDiv=function(e){return this.throwIfDisposed(),Zd(this,e)};ee.prototype.gather=function(e,t){return this.throwIfDisposed(),Pa(this,e,t)};ee.prototype.greaterEqualStrict=function(e){return this.throwIfDisposed(),gA(this,e)};ee.prototype.greaterEqual=function(e){return this.throwIfDisposed(),Zi(this,e)};ee.prototype.greaterStrict=function(e){return this.throwIfDisposed(),yA(this,e)};ee.prototype.greater=function(e){return this.throwIfDisposed(),xs(this,e)};ee.prototype.ifft=function(){return this.throwIfDisposed(),qa(this)};ee.prototype.irfft=function(){return this.throwIfDisposed(),xp(this)};ee.prototype.isFinite=function(){return this.throwIfDisposed(),sA(this)};ee.prototype.isInf=function(){return this.throwIfDisposed(),iA(this)};ee.prototype.isNaN=function(){return this.throwIfDisposed(),rA(this)};ee.prototype.leakyRelu=function(e){return this.throwIfDisposed(),lp(this,e)};ee.prototype.lessEqualStrict=function(e){return this.throwIfDisposed(),bA(this,e)};ee.prototype.lessEqual=function(e){return this.throwIfDisposed(),Ur(this,e)};ee.prototype.lessStrict=function(e){return this.throwIfDisposed(),wA(this,e)};ee.prototype.less=function(e){return this.throwIfDisposed(),ph(this,e)};ee.prototype.localResponseNormalization=function(e,t,n,s){return this.throwIfDisposed(),Nb(this,e,t,n,s)};ee.prototype.logSigmoid=function(){return this.throwIfDisposed(),aA(this)};ee.prototype.logSoftmax=function(e){return this.throwIfDisposed(),dp(this,e)};ee.prototype.logSumExp=function(e,t){return this.throwIfDisposed(),Rb(this,e,t)};ee.prototype.log=function(){return this.throwIfDisposed(),cs(this)};ee.prototype.log1p=function(){return this.throwIfDisposed(),hp(this)};ee.prototype.logicalAnd=function(e){return this.throwIfDisposed(),Us(this,e)};ee.prototype.logicalNot=function(){return this.throwIfDisposed(),mh(this)};ee.prototype.logicalOr=function(e){return this.throwIfDisposed(),pp(this,e)};ee.prototype.logicalXor=function(e){return this.throwIfDisposed(),cA(this,e)};ee.prototype.matMul=function(e,t,n){return this.throwIfDisposed(),ct(this,e,t,n)};ee.prototype.maxPool=function(e,t,n,s){return this.throwIfDisposed(),fh(this,e,t,n,s)};ee.prototype.max=function(e,t){return this.throwIfDisposed(),Qn(this,e,t)};ee.prototype.maximumStrict=function(e){return this.throwIfDisposed(),xA(this,e)};ee.prototype.maximum=function(e){return this.throwIfDisposed(),$s(this,e)};ee.prototype.mean=function(e,t){return this.throwIfDisposed(),qt(this,e,t)};ee.prototype.min=function(e,t){return this.throwIfDisposed(),Va(this,e,t)};ee.prototype.minimumStrict=function(e){return this.throwIfDisposed(),TA(this,e)};ee.prototype.minimum=function(e){return this.throwIfDisposed(),Oo(this,e)};ee.prototype.mirrorPad=function(e,t){return this.throwIfDisposed(),Eb(this,e,t)};ee.prototype.modStrict=function(e){return this.throwIfDisposed(),AA(this,e)};ee.prototype.mod=function(e){return this.throwIfDisposed(),mp(this,e)};ee.prototype.mulStrict=function(e){return this.throwIfDisposed(),vA(this,e)};ee.prototype.mul=function(e){return this.throwIfDisposed(),X(this,e)};ee.prototype.neg=function(){return this.throwIfDisposed(),Ht(this)};ee.prototype.norm=function(e,t,n){return this.throwIfDisposed(),vp(this,e,t,n)};ee.prototype.notEqualStrict=function(e){return this.throwIfDisposed(),LA(this,e)};ee.prototype.notEqual=function(e){return this.throwIfDisposed(),Br(this,e)};ee.prototype.oneHot=function(e,t=1,n=0){return this.throwIfDisposed(),To(this,e,t,n)};ee.prototype.onesLike=function(){return this.throwIfDisposed(),Fn(this)};ee.prototype.pad=function(e,t){return this.throwIfDisposed(),vi(this,e,t)};ee.prototype.pool=function(e,t,n,s,i){return this.throwIfDisposed(),uA(this,e,t,n,s,i)};ee.prototype.powStrict=function(e){return this.throwIfDisposed(),NA(this,e)};ee.prototype.pow=function(e){return this.throwIfDisposed(),Zs(this,e)};ee.prototype.prelu=function(e){return this.throwIfDisposed(),yh(this,e)};ee.prototype.prod=function(e,t){return this.throwIfDisposed(),gp(this,e,t)};ee.prototype.reciprocal=function(){return this.throwIfDisposed(),_b(this)};ee.prototype.relu=function(){return this.throwIfDisposed(),Ni(this)};ee.prototype.relu6=function(){return this.throwIfDisposed(),Wb(this)};ee.prototype.reshapeAs=function(e){return this.throwIfDisposed(),K(this,e.shape)};ee.prototype.reshape=function(e){return this.throwIfDisposed(),K(this,e)};ee.prototype.resizeBilinear=function(e,t){return this.throwIfDisposed(),PA(this,e,t)};ee.prototype.resizeNearestNeighbor=function(e,t){return this.throwIfDisposed(),zA(this,e,t)};ee.prototype.reverse=function(e){return this.throwIfDisposed(),Ts(this,e)};ee.prototype.rfft=function(){return this.throwIfDisposed(),Sh(this)};ee.prototype.round=function(){return this.throwIfDisposed(),$b(this)};ee.prototype.rsqrt=function(){return this.throwIfDisposed(),yp(this)};ee.prototype.selu=function(){return this.throwIfDisposed(),bp(this)};ee.prototype.separableConv2d=function(e,t,n,s,i,o){return this.throwIfDisposed(),Ub(this,e,t,n,s,i,o)};ee.prototype.sigmoid=function(){return this.throwIfDisposed(),Ti(this)};ee.prototype.sign=function(){return this.throwIfDisposed(),Bb(this)};ee.prototype.sin=function(){return this.throwIfDisposed(),wp(this)};ee.prototype.sinh=function(){return this.throwIfDisposed(),Lp(this)};ee.prototype.slice=function(e,t){return this.throwIfDisposed(),tt(this,e,t)};ee.prototype.softmax=function(e){return this.throwIfDisposed(),Fo(this,e)};ee.prototype.softplus=function(){return this.throwIfDisposed(),za(this)};ee.prototype.spaceToBatchND=function(e,t){return this.throwIfDisposed(),gh(this,e,t)};ee.prototype.split=function(e,t){return this.throwIfDisposed(),hs(this,e,t)};ee.prototype.sqrt=function(){return this.throwIfDisposed(),Nn(this)};ee.prototype.square=function(){return this.throwIfDisposed(),At(this)};ee.prototype.squaredDifference=function(e){return this.throwIfDisposed(),Ih(this,e)};ee.prototype.squaredDifferenceStrict=function(e){return this.throwIfDisposed(),CA(this,e)};ee.prototype.squeeze=function(e){return this.throwIfDisposed(),Mr(this,e)};ee.prototype.stack=function(e,t){this.throwIfDisposed();const n=e instanceof ee?[this,e]:[this,...e];return es(n,t)};ee.prototype.step=function(e){return this.throwIfDisposed(),ja(this,e)};ee.prototype.stridedSlice=function(e,t,n,s,i,o,a,c){return this.throwIfDisposed(),Pb(this,e,t,n,s,i,o,a,c)};ee.prototype.subStrict=function(e){return this.throwIfDisposed(),RA(this,e)};ee.prototype.sub=function(e){return this.throwIfDisposed(),Re(this,e)};ee.prototype.sum=function(e,t){return this.throwIfDisposed(),$e(this,e,t)};ee.prototype.tan=function(){return this.throwIfDisposed(),zb(this)};ee.prototype.tanh=function(){return this.throwIfDisposed(),$a(this)};ee.prototype.tile=function(e){return this.throwIfDisposed(),$r(this,e)};ee.prototype.toBool=function(){return this.throwIfDisposed(),Ae(this,"bool")};ee.prototype.toFloat=function(){return this.throwIfDisposed(),Ae(this,"float32")};ee.prototype.toInt=function(){return this.throwIfDisposed(),Ae(this,"int32")};ee.prototype.topk=function(e,t){return this.throwIfDisposed(),Vb(this,e,t)};ee.prototype.transpose=function(e){return this.throwIfDisposed(),Ye(this,e)};ee.prototype.unique=function(e){return this.throwIfDisposed(),Tp(this,e)};ee.prototype.unsortedSegmentSum=function(e,t){return this.throwIfDisposed(),Gb(this,e,t)};ee.prototype.unstack=function(e){return this.throwIfDisposed(),Qs(this,e)};ee.prototype.where=function(e,t){return this.throwIfDisposed(),Bn(e,this,t)};ee.prototype.zerosLike=function(){return this.throwIfDisposed(),et(this)};let Up;function mn(){return Up==null&&(Up=GT().epsilon()),Up}function Cee(e){Up=e}function ei(){return"channelsLast"}class nr extends Error{constructor(e){super(e);Object.setPrototypeOf(this,nr.prototype)}}class ti extends Error{constructor(e){super(e);Object.setPrototypeOf(this,ti.prototype)}}class q extends Error{constructor(e){super(e);Object.setPrototypeOf(this,q.prototype)}}class Pe extends Error{constructor(e){super(e);Object.setPrototypeOf(this,Pe.prototype)}}class rv extends Error{constructor(e){super(e);Object.setPrototypeOf(this,rv.prototype)}}class uz extends Error{constructor(e){super(e);Object.setPrototypeOf(this,uz.prototype)}}function $o(e,t){if(Array.isArray(e)){let n=[];for(let s=0;sn.toUpperCase())}let Bs={};function dw(e){if(e==null)return null;const t={};return t.className=e.getClassName(),t.config=e.getConfig(),t}function pw(e){if(e==null||typeof e!="object")return;if(Array.isArray(e))e.forEach(t=>pw(t));else{const t=Object.keys(e);for(const n of t){const s=e[n];s!=null&&typeof s=="object"&&(!Array.isArray(s)&&s.type==="ndarray"&&typeof s.value=="number"?e[n]=s.value:pw(s))}}}function kh(e,t={},n={},s="object",i=!1){if(typeof e=="string"){const o=e;let a;if(o in n)a=n[o];else if(o in Bs)a=Bs[o];else if(a=t[o],a==null)throw new q(`Unknown ${s}: ${e}. This may be due to one of the following reasons: -1. The ${s} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. -2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);return a}else{const o=e;if(o.className==null||o.config==null)throw new q(`${s}: Improper config format: ${JSON.stringify(o)}. -'className' and 'config' must set.`);const a=o.className;let c,h;if(a in n?[c,h]=n[a]:a in Bs?[c,h]=Bs.className:a in t&&([c,h]=t[a]),c==null)throw new q(`Unknown ${s}: ${a}. This may be due to one of the following reasons: -1. The ${s} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. -2. The custom ${s} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);if(h!=null){const d={};for(const w of Object.keys(Bs))d[w]=Bs[w];for(const w of Object.keys(n))d[w]=n[w];const m=o.config;m.customObjects=d;const f=Object.assign({},Bs);for(const w of Object.keys(n))Bs[w]=n[w];pw(o.config);const b=h(c,o.config,n,i);return Bs=Object.assign({},f),b}else{const d=Object.assign({},Bs);for(const f of Object.keys(n))Bs[f]=n[f];const m=new c(o.config);return Bs=Object.assign({},d),m}}}function dz(e,t){return et?1:0}function Bp(e,t){return-1*dz(e,t)}function Oee(e){switch(e){case"float32":return"float32";default:throw new q(`Invalid dtype: ${e}`)}}function Eee(e,t){if(e==null||t==null)return e===t;if(e.length!==t.length)return!1;for(let n=0;n=0),As(s>=n),Array.isArray(e)&&e.length>=n&&e.length<=s&&e.every(i=>typeof i===t)}function wn(e,t){Array.isArray(e)?(A(e.length>0,()=>`${t} is unexpectedly an empty array.`),e.forEach((n,s)=>wn(n,`element ${s+1} of ${t}`))):A(Number.isInteger(e)&&e>0,()=>`Expected ${t} to be a positive integer, but got ${av(e)}.`)}function av(e){return e===null?"null":Array.isArray(e)?"["+e.map(t=>av(t)).join(",")+"]":typeof e=="string"?`"${e}"`:`${e}`}function mz(e,t){let n=jn(),s;const i=(...o)=>{const a=jn();return a-n0,"arrayOfValues is empty");for(const t of e)As(Array.isArray(t),"one of the values is not an array"),As(t.length>0,"one of the values is empty");return e.reduce((t,n)=>t.length===0?n.map(s=>[s]):n.map(s=>t.map(i=>[...i,s])).reduce((s,i)=>s.concat(i),[]),[])}function fw(e,t){return Q(()=>Nn($e(X(e,e),t,!0)))}class Fh extends Ao{getConfig(){return{}}}class gw extends Fh{constructor(e){super();this.defaultMaxValue=2,this.defaultAxis=0,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return Q(()=>{const t=fw(e,this.axis),n=Jn(t,0,this.maxValue);return X(e,We(n,be(mn(),t)))})}getConfig(){return{maxValue:this.maxValue,axis:this.axis}}}gw.className="MaxNorm",fe(gw);class yw extends Fh{constructor(e){super();this.defaultAxis=0,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return Q(()=>We(e,be(mn(),fw(e,this.axis))))}getConfig(){return{axis:this.axis}}}yw.className="UnitNorm",fe(yw);class bw extends Fh{apply(e){return Ni(e)}}bw.className="NonNeg",fe(bw);class ww extends Fh{constructor(e){super();this.defaultMinValue=0,this.defaultMaxValue=1,this.defaultRate=1,this.defaultAxis=0,this.minValue=e.minValue!=null?e.minValue:this.defaultMinValue,this.maxValue=e.maxValue!=null?e.maxValue:this.defaultMaxValue,this.rate=e.rate!=null?e.rate:this.defaultRate,this.axis=e.axis!=null?e.axis:this.defaultAxis}apply(e){return Q(()=>{const t=fw(e,this.axis),n=be(X(this.rate,Jn(t,this.minValue,this.maxValue)),X(1-this.rate,t));return X(e,We(n,be(mn(),t)))})}getConfig(){return{minValue:this.minValue,maxValue:this.maxValue,rate:this.rate,axis:this.axis}}}ww.className="MinMaxNorm",fe(ww);const lv={maxNorm:"MaxNorm",minMaxNorm:"MinMaxNorm",nonNeg:"NonNeg",unitNorm:"UnitNorm"};function fn(e){return dw(e)}function hv(e,t={}){return kh(e,Ws.getMap().classNameMap,t,"constraint")}function gn(e){if(e==null)return null;if(typeof e=="string"){const t=e in lv?lv[e]:e,n={className:t,config:{}};return hv(n)}else return e instanceof Fh?e:hv(e)}function fz(e){return new gw(e)}function gz(e){return new yw(e)}function yz(){return new bw}function bz(e){return new ww(e)}var wz=Object.freeze({__proto__:null,maxNorm:fz,unitNorm:gz,nonNeg:yz,minMaxNorm:bz});const Lz=["channelsFirst","channelsLast"],Sz=["valid","same","causal"],Iz=["max","avg"],xz=["sum","mul","concat","ave"],kee=["temporal"];const ec=new Map;function jt(e){Qa(Lz,"DataFormat",e)}function vs(e){Qa(Sz,"PaddingMode",e)}function uv(e){Qa(Iz,"PoolMode",e)}const _h=[],dv="/";function Bo(e,t){_h.push(e);try{const n=t();return _h.pop(),n}catch(n){throw _h.pop(),n}}function Tz(){return _h.length===0?"":_h.join(dv)+dv}function pv(e){if(!fv(e))throw new Error("Not a valid tensor name: '"+e+"'");return Tz()+e}function mv(e){if(!fv(e))throw new Error("Not a valid tensor name: '"+e+"'");ec.has(e)||ec.set(e,0);const t=ec.get(e);if(ec.set(e,ec.get(e)+1),t>0){const n=`${e}_${t}`;return ec.set(n,1),n}else return e}const Az=new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/);function fv(e){return!!e.match(Az)}function vz(e){return e===parseInt(e.toString(),10)}function Gr(e,t,n){t==null&&(t=0),n==null&&(n=e.length);let s=1;for(let i=t;ii-o),n=Math.floor((t.length-1)/2),s=Math.ceil((t.length-1)/2);return n===s?t[n]:(t[n]+t[s])/2}function ni(e,t){if(t0?t.reduce((n,s)=>n*s):1}function Wh(e,t){return e.asType(t)}function $h(e,t=-1){const n=e.shape.slice();return t<0&&(t=n.length+t+1),n.splice(t,0,1),e.reshape(n)}function Rz(e,t){return Q(()=>{if(e.shape.length!==2)throw new q(`repeat() expects a rank-2 tensor, but received a rank-${e.shape.length} tensor.`);const n=$h(e,1);return Iw(n,[1,t,1])})}function Oz(e){const t=[Gr(e.shape)];return e.reshape(t)}function Ez(e){if(e.rank<=1)throw new q(`batchFlatten requires a minimum rank of 2. Got rank: ${e.rank}.`);const t=[e.shape[0],Gr(e.shape,1)];return e.reshape(t)}function Mo(e,t,n){return Q(()=>{switch(e.rank){case 1:return Sp(e,t,n);case 2:return Mb(e,[t,0],[n,e.shape[1]]);case 3:return Ip(e,[t,0,0],[n,e.shape[1],e.shape[2]]);case 4:return wh(e,[t,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3]]);case 5:return tt(e,[t,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4]]);case 6:return tt(e,[t,0,0,0,0,0],[n,e.shape[1],e.shape[2],e.shape[3],e.shape[4],e.shape[5]]);default:throw new q(`sliceAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Lw(e,t,n){return Q(()=>{switch(e.rank){case 1:return Sp(e,t,n);case 2:return Mb(e,[0,t],[e.shape[0],n]);case 3:return Ip(e,[0,0,t],[e.shape[0],e.shape[1],n]);case 4:return wh(e,[0,0,0,t],[e.shape[0],e.shape[1],e.shape[2],n]);default:throw new q(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Pp(e,t,n,s){return Q(()=>{switch(e.rank){case 1:return Sp(e,t,n);case 2:switch(s){case 1:return Mo(e,t,n);case 2:return Lw(e,t,n);default:throw new q(`The axis is not within the rank of the tensor ${s}`)}case 3:switch(s){case 1:return Mo(e,t,n);case 2:return Ip(e,[0,t,0],[e.shape[0],n,e.shape[2]]);case 3:return Lw(e,t,n);default:throw new q(`The axis is not within the rank of the tensor ${s}`)}case 4:switch(s){case 1:return Mo(e,t,n);case 2:return wh(e,[0,t,0,0],[e.shape[0],n,e.shape[2],e.shape[3]]);case 3:return wh(e,[0,0,t,0],[e.shape[0],e.shape[1],n,e.shape[3]]);case 4:return Lw(e,t,n);default:throw new q(`The axis is not within the rank of the tensor ${s}`)}default:throw new q(`sliceAlongLastAxis() received an unsupported tensor rank: ${e.rank}`)}})}function Sw(e,t=-1){let n;return t<0&&(n=e[0].rank,n!==0?t=n:t=0),t===e[0].rank&&(t=-1),Yt(e,t)}function yv(e,t){switch(e.rank){case 1:return XT([e,t]);case 2:return JT([e,t],0);case 3:return ZT([e,t],0);case 4:return QT([e,t],0);default:throw new q(`concatAlongFirstAxis() received an unsupported tensor rank: ${e.rank}`)}}function Iw(e,t){if(Array.isArray(t)||(t=[t]),e.rank!==t.length)throw new q(`The length of input n (${t.length}) does not match the number of dimensions in input x (${e.rank})`);return $r(e,t)}function zp(e,t=0,n=1,s,i){return Fb(e,t,n,s,i)}function Ci(e,t,n,s){if(e.rank<2||t.rank<2)throw new Pe(`dot requires both inputs to be rank >= 2 but got x shape = ${e.shape} and y shape = ${t.shape}`);if(t.rank>=3){const i=e.shape.slice(-1)[0],o=t.shape.slice(-2)[0];if(i!==o)throw new Pe(`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${e.shape} and y shape = ${t.shape}`)}if(e.rank===2&&t.rank===2){const i=!1,o=!1;return Ep({a:e,b:t,transposeA:i,transposeB:o,bias:s?xw(e.rank,s,ei()):null,activation:n})}else{const i=e.shape.slice(),o=i.pop();e=e.reshape([-1,o]);const a=t.shape.slice(),c=a.pop(),h=a.pop(),d=[...a,c],m=Array.from({length:t.rank},(L,x)=>x===0?t.rank-2:x<=t.rank-2?x-1:x);t=t.transpose(m).reshape([h,-1]);const f=[...i,...d],b=!1,w=!1;return Ep({a:e,b:t,transposeA:b,transposeB:w,bias:s?xw(e.rank,s,ei()):null,activation:n}).reshape(f)}}function Mee(e){return Q(()=>{const t=et(e),n=Fn(e);return Bn(Xs(e,t),t,Bn(xs(e,et(e)),n,X(-1,n)))})}function Pee(e,t){return Q(()=>{if(e.rank!==1)throw new Error("Only 1D one-hot tensors are supported in the deeplearn backend, at present.");return e=e.toInt(),To(e,t).toFloat()})}function bv(e,t,n){return Q(()=>(Array.isArray(t)?t=ls(t,"int32"):t=t.toInt(),Pa(e,t,n)))}function Uh(e){return X(e,e)}function zee(e,t){return Q(()=>{if(typeof t=="number"&&(t=Ce(Math.round(t),"int32")),t.dtype!=="int32")throw new Pe(`Non-int32 dtype (${t.dtype}) is not supported by pow() yet`);return Zs(e,t)})}function xw(e,t,n){const s=t.shape;if(t.rank!==1&&t.rank!==e)throw new q(`Unexpected bias dimensions: ${t.rank}; expected it to be 1 or ${e}`);if(e===5){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1,1]):t.reshape([1,s[3],s[0],s[1],s[2]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===4){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1,1]):t.reshape([1,s[2],s[0],s[1]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,1,s[0]]):t.reshape([1].concat(s))}else if(e===3){if(n==="channelsFirst")return s.length===1?t.reshape([1,s[0],1]):t.reshape([1,s[1],s[0]]);if(n==="channelsLast")return s.length===1?t.reshape([1,1,s[0]]):t.reshape([1].concat(s))}else if(e<3)return t;throw new q(`Unsupported input rank by biasAdd: ${t.rank}`)}function Ri(e,t,n){return Q(()=>(n==null&&(n=ei()),jt(n),e.add(xw(e.rank,t,n))))}function Dz(e,t=1){if(t!==1)throw new Pe(`Support for alpha values other than 1 (${t}) is not implemented yet.`);return Ua(e)}function kz(e){return Q(()=>We(e,dn(e).add(1)))}function wv(e,t,n,s){return Q(()=>kA(e,t,n,s))}function Fz(e){return Q(()=>{const t=be(.5,X(.2,e));return Jn(t,0,1)})}function Bh(e,t,n=!1){return n?e():t()}const _z=["fanIn","fanOut","fanAvg"],Wz=["normal","uniform","truncatedNormal"],Vee=["Zeros","Ones","Constant","RandomNormal","RandomUniform","TruncatedNormal","VarianceScaling","Orthogonal","Identity"];function $z(e){Qa(_z,"FanMode",e)}function Uz(e){Qa(Wz,"Distribution",e)}class Ms extends Ao{fromConfigUsesCustomObjects(){return!1}getConfig(){return{}}}class Tw extends Ms{apply(e,t){return dt(e,t)}}Tw.className="Zeros",fe(Tw);class Vp extends Ms{apply(e,t){return Js(e,t)}}Vp.className="Ones",fe(Vp);class Aw extends Ms{constructor(e){super();if(typeof e!="object")throw new q(`Expected argument of type ConstantConfig but got ${e}`);if(e.value===void 0)throw new q(`config must have value set but got ${e}`);this.value=e.value}apply(e,t){return Q(()=>X(Ce(this.value),Js(e,t)))}getConfig(){return{value:this.value}}}Aw.className="Constant",fe(Aw);class vw extends Ms{constructor(e){super();this.DEFAULT_MINVAL=-.05,this.DEFAULT_MAXVAL=.05,this.minval=e.minval||this.DEFAULT_MINVAL,this.maxval=e.maxval||this.DEFAULT_MAXVAL,this.seed=e.seed}apply(e,t){return ko(e,this.minval,this.maxval,t)}getConfig(){return{minval:this.minval,maxval:this.maxval,seed:this.seed}}}vw.className="RandomUniform",fe(vw);class Nw extends Ms{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Pe(`randomNormal does not support dType ${t}.`);return zp(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}Nw.className="RandomNormal",fe(Nw);class Cw extends Ms{constructor(e){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=e.mean||this.DEFAULT_MEAN,this.stddev=e.stddev||this.DEFAULT_STDDEV,this.seed=e.seed}apply(e,t){if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Pe(`truncatedNormal does not support dType ${t}.`);return xh(e,this.mean,this.stddev,t,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}Cw.className="TruncatedNormal",fe(Cw);class Rw extends Ms{constructor(e){super();this.gain=e.gain!=null?e.gain:1}apply(e,t){return Q(()=>{if(e.length!==2||e[0]!==e[1])throw new q("Identity matrix initializer can only be used for 2D square matrices.");return X(this.gain,cp(e[0]))})}getConfig(){return{gain:this.gain}}}Rw.className="Identity",fe(Rw);function Bz(e,t="channelsLast"){let n,s;if(jt(t),e.length===2)n=e[0],s=e[1];else if([3,4,5].indexOf(e.length)!==-1){if(t==="channelsFirst"){const i=Gr(e,2);n=e[1]*i,s=e[0]*i}else if(t==="channelsLast"){const i=Gr(e,0,e.length-2);n=e[e.length-2]*i,s=e[e.length-1]*i}}else{const i=Gr(e);n=Math.sqrt(i),s=Math.sqrt(i)}return[n,s]}class ns extends Ms{constructor(e){super();if(e.scale<0)throw new q(`scale must be a positive float. Got: ${e.scale}`);this.scale=e.scale==null?1:e.scale,this.mode=e.mode==null?"fanIn":e.mode,$z(this.mode),this.distribution=e.distribution==null?"normal":e.distribution,Uz(this.distribution),this.seed=e.seed}apply(e,t){const n=Bz(e),s=n[0],i=n[1];let o=this.scale;if(this.mode==="fanIn"?o/=Math.max(1,s):this.mode==="fanOut"?o/=Math.max(1,i):o/=Math.max(1,(s+i)/2),this.distribution==="normal"){const a=Math.sqrt(o);if(t=t||"float32",t!=="float32"&&t!=="int32")throw new Pe(`${this.getClassName()} does not support dType ${t}.`);return xh(e,0,a,t,this.seed)}else{const a=Math.sqrt(3*o);return ko(e,-a,a,t)}}getConfig(){return{scale:this.scale,mode:this.mode,distribution:this.distribution,seed:this.seed}}}ns.className="VarianceScaling",fe(ns);class Gp extends ns{constructor(e){super({scale:1,mode:"fanAvg",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return ns.className}}Gp.className="GlorotUniform",fe(Gp);class Yp extends ns{constructor(e){super({scale:1,mode:"fanAvg",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return ns.className}}Yp.className="GlorotNormal",fe(Yp);class Hp extends ns{constructor(e){super({scale:2,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return ns.className}}Hp.className="HeNormal",fe(Hp);class qp extends ns{constructor(e){super({scale:2,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return ns.className}}qp.className="HeUniform",fe(qp);class jp extends ns{constructor(e){super({scale:1,mode:"fanIn",distribution:"normal",seed:e==null?null:e.seed})}getClassName(){return ns.className}}jp.className="LeCunNormal",fe(jp);class Kp extends ns{constructor(e){super({scale:1,mode:"fanIn",distribution:"uniform",seed:e==null?null:e.seed})}getClassName(){return ns.className}}Kp.className="LeCunNormal",fe(Kp);class Ow extends Ms{constructor(e){super();if(this.DEFAULT_GAIN=1,this.gain=e.gain==null?this.DEFAULT_GAIN:e.gain,this.seed=e.seed,this.seed!=null)throw new Pe("Random seed is not implemented for Orthogonal Initializer yet.")}apply(e,t){return Q(()=>{if(e.length<2)throw new Pe("Shape must be at least 2D.");e[0]*e[1]>2e3&&console.warn(`Orthogonal initializer is being called on a matrix with more than 2000 (${e[0]*e[1]}) elements: Slowness may result.`);const n=e[0]>e[1]?[e[1],e[0]]:e,s=zp(n,0,1,"float32");let i=GA.gramSchmidt(s);return e[0]>e[1]&&(i=i.transpose()),X(this.gain,i)})}getConfig(){return{gain:this.gain,seed:this.seed}}}Ow.className="Orthogonal",fe(Ow);const Lv={constant:"Constant",glorotNormal:"GlorotNormal",glorotUniform:"GlorotUniform",heNormal:"HeNormal",heUniform:"HeUniform",identity:"Identity",leCunNormal:"LeCunNormal",leCunUniform:"LeCunUniform",ones:"Ones",orthogonal:"Orthogonal",randomNormal:"RandomNormal",randomUniform:"RandomUniform",truncatedNormal:"TruncatedNormal",varianceScaling:"VarianceScaling",zeros:"Zeros"};function Sv(e,t={}){return kh(e,Ws.getMap().classNameMap,t,"initializer")}function Kt(e){return dw(e)}function Pt(e){if(typeof e=="string"){const t=e in Lv?Lv[e]:e;if(t==="GlorotNormal")return new Yp;if(t==="GlorotUniform")return new Gp;if(t==="HeNormal")return new Hp;if(t==="HeUniform")return new qp;if(t==="LeCunNormal")return new jp;if(t==="LeCunUniform")return new Kp;{const n={};return n.className=t,n.config={},Sv(n)}}else return e instanceof Ms?e:Sv(e)}function Mz(){return new Tw}function Pz(){return new Vp}function zz(e){return new Aw(e)}function Vz(e){return new vw(e)}function Gz(e){return new Nw(e)}function Yz(e){return new Cw(e)}function Hz(e){return new Rw(e)}function qz(e){return new ns(e)}function jz(e){return new Gp(e)}function Kz(e){return new Yp(e)}function Xz(e){return new Hp(e)}function Jz(e){return new qp(e)}function Zz(e){return new jp(e)}function Qz(e){return new Kp(e)}function e3(e){return new Ow(e)}var t3=Object.freeze({__proto__:null,zeros:Mz,ones:Pz,constant:zz,randomUniform:Vz,randomNormal:Gz,truncatedNormal:Yz,identity:Hz,varianceScaling:qz,glorotUniform:jz,glorotNormal:Kz,heNormal:Xz,heUniform:Jz,leCunNormal:Zz,leCunUniform:Qz,orthogonal:e3});let n3=0;function Iv(){return n3++}const Xp={};function Jp(e=""){return e in Xp||(Xp[e]=0),Xp[e]+=1,e+Xp[e].toString()}function Ew(e){return Array.isArray(e)&&Array.isArray(e[0])}function Zp(e){return e.length===0?[]:Array.isArray(e[0])?e:[e]}function Xe(e){let t;if(Array.isArray(e)){if(e.length!==1)throw new q(`Expected Tensor length to be 1; got ${e.length}`);t=e[0]}else t=e;return t}function Nt(e){if(Array.isArray(e)&&Array.isArray(e[0])){if(e.length===1)return e=e,e[0];throw new q(`Expected exactly 1 Shape; got ${e.length}`)}else return e}function Qp(e){let t=0;for(const n of e)n.shape.length===0?t+=1:t+=n.shape.reduce((s,i)=>s*i);return t}const xv="Variable";class si{constructor(e,t="float32",n=xv,s=!0,i=null){this.dtype=t==null?"float32":t,this.shape=e.shape,this.id=Iv(),n=n==null?xv:n,this.originalName=pv(n),this.name=mv(this.originalName),this.trainable_=s,this.constraint=i,this.val=mA(e,this.trainable_,this.name,this.dtype)}read(){return this.assertNotDisposed(),this.val}write(e){return this.assertNotDisposed(),s3(this.val,e),this.val.id!==e.id&&(this.val.assign(e),this.constraint!=null&&this.val.assign(this.constraint.apply(this.val))),this}dispose(){this.assertNotDisposed(),this.val.dispose()}assertNotDisposed(){if(this.val.isDisposed)throw new Error(`LayersVariable ${this.name} is already disposed.`)}get trainable(){return this.trainable_}set trainable(e){this.trainable_=e,this.val.trainable=e}}function s3(e,t){if(e.shape.toString()!==t.shape.toString())throw new Error("Shape mismatch: "+JSON.stringify(e.shape)+" vs. "+JSON.stringify(t.shape))}function Gee(e,t,n,s){return new si(e,t,n,!0,s)}function Yee(e,t,n){return new si(dt(e),t,n)}function Hee(e,t,n){return new si(et(e),t,n)}function qee(e,t,n){const s=Js(e);return new si(s,t,n)}function jee(e,t,n){const s=Fn(e);return new si(s,t,n)}function Kee(e,t,n){return new si(cp(e),t,n)}function Xee(e,t,n,s,i,o="randomUniform"){return new si(ko(e,t,n,s),s,o)}function Jee(e,t=0,n=1,s,i,o="truncatedNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new Pe(`randomNormal does not support dType ${s}.`);return new si(xh(e,t,n,s,i),s,o)}function Zee(e,t=0,n=1,s,i,o="randomNormal"){if(s=s||"float32",s!=="float32"&&s!=="int32")throw new Pe(`randomNormalVariable does not support dType ${s}.`);return new si(Fb(e,t,n,s,i),s,o)}function Qee(e,t){return e.write(t)}function ete(e,t){return e.write(be(e.read(),t))}function tte(e,t){return e.write(Re(e.read(),t))}function Dw(e){return e.map(t=>t.read())}function kw(e){e.forEach(t=>{const n=t[0];n.write(t[1])})}function nte(e,t){const n=t.map(i=>i.read()),s=Cb(e,n);return t.map(i=>s.grads[i.name])}class Ln{constructor(e){this.dtype=e.dtype,this.shape=e.shape,e.shape!=null?this.ndim=e.shape.length:this.ndim=e.ndim,this.maxNDim=e.maxNDim,this.minNDim=e.minNDim,this.axes=e.axes||{}}}class ii{constructor(e,t,n,s,i,o,a){this.dtype=e,this.shape=t,this.sourceLayer=n,this.inputs=s,this.callArgs=i,this.outputTensorIndex=a,this.id=Iv(),o!=null&&(this.originalName=pv(o),this.name=mv(this.originalName)),this.rank=t.length}}let i3=0;class em{constructor(e,t){this.callArgs=t,this.id=i3++,this.outboundLayer=e.outboundLayer,this.inboundLayers=e.inboundLayers,this.nodeIndices=e.nodeIndices,this.tensorIndices=e.tensorIndices,this.inputTensors=e.inputTensors,this.outputTensors=e.outputTensors,this.inputMasks=e.inputMasks,this.outputMasks=e.outputMasks,this.inputShapes=e.inputShapes,this.outputShapes=e.outputShapes;for(const n of e.inboundLayers)n!=null&&n.outboundNodes.push(this);e.outboundLayer.inboundNodes.push(this)}getConfig(){const e=[];for(const t of this.inboundLayers)t!=null?e.push(t.name):e.push(null);return{outboundLayer:this.outboundLayer?this.outboundLayer.name:null,inboundLayers:e,nodeIndices:this.nodeIndices,tensorIndices:this.tensorIndices}}}let r3=0;class lt extends Ao{constructor(e={}){super();this._callHook=null,this._addedWeightNames=[],this._stateful=!1,this.id=r3++,this.activityRegularizer=null,this.inputSpec=null,this.supportsMasking=!1,this._trainableWeights=[],this._nonTrainableWeights=[],this._losses=[],this._updates=[],this._built=!1,this.inboundNodes=[],this.outboundNodes=[];let t=e.name;if(!t){const n=this.getClassName();t=sr(n)+"_"+Jp(n)}if(this.name=t,this.trainable_=e.trainable==null?!0:e.trainable,e.inputShape!=null||e.batchInputShape!=null){let n;if(e.batchInputShape!=null)n=e.batchInputShape;else if(e.inputShape!=null){let i=null;e.batchSize!=null&&(i=e.batchSize),n=[i].concat(e.inputShape)}this.batchInputShape=n;let s=e.dtype;s==null&&(s=e.inputDType),s==null&&(s="float32"),this.dtype=s}e.weights!=null?this.initialWeights=e.weights:this.initialWeights=null,this._refCount=null,this.fastWeightInitDuringBuild=!1}static nodeKey(e,t){return e.name+"_ib-"+t.toString()}getNodeAtIndex(e,t){if(this.inboundNodes.length===0)throw new ti(`The layer has never been called and thus has no defined ${t}.`);if(this.inboundNodes.length<=e)throw new q(`Asked to get ${t} at node ${e}, but the layer has only ${this.inboundNodes.length} inbound nodes.`);return this.inboundNodes[e]}getInputAt(e){return ts(this.getNodeAtIndex(e,"input").inputTensors)}getOutputAt(e){return ts(this.getNodeAtIndex(e,"output").outputTensors)}get input(){if(this.inboundNodes.length>1)throw new nr(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer input" is ill-defined. Use \`getInputAt(nodeIndex)\` instead.`);if(this.inboundNodes.length===0)throw new nr(`Layer ${this.name} is not connected, no input to return.`);return ts(this.getNodeAtIndex(0,"input").inputTensors)}get output(){if(this.inboundNodes.length===0)throw new nr(`Layer ${this.name} has no inbound nodes.`);if(this.inboundNodes.length>1)throw new nr(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use \`getOutputAt(nodeIndex)\` instead.`);return ts(this.getNodeAtIndex(0,"output").outputTensors)}get losses(){return this._losses}calculateLosses(){return this.losses.map(e=>e())}get updates(){return this._updates}get built(){return this._built}set built(e){this._built=e}get trainable(){return this.trainable_}set trainable(e){this._trainableWeights.forEach(t=>t.trainable=e),this.trainable_=e}get trainableWeights(){return this.trainable_?this._trainableWeights.filter(e=>e.trainable):[]}set trainableWeights(e){this._trainableWeights=e}get nonTrainableWeights(){return this.trainable?this._trainableWeights.filter(e=>!e.trainable).concat(this._nonTrainableWeights):this._trainableWeights.concat(this._nonTrainableWeights)}set nonTrainableWeights(e){this._nonTrainableWeights=e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}get stateful(){return this._stateful}resetStates(){if(!this.stateful)throw new Error("Cannot call the resetStates() method of a non-stateful Layer object.")}assertInputCompatibility(e){if(e=Et(e),this.inputSpec==null||this.inputSpec.length===0)return;const t=Et(this.inputSpec);if(e.length!==t.length)throw new q(`Layer ${this.name} expects ${t.length} inputs, but it received ${e.length} input tensors. Input received: ${e}`);for(let n=0;ni.maxNDim)throw new q(`Input ${n} is incompatible with layer ${this.name}: expected max_ndim=${i.maxNDim}, found ndim=${o}`);if(i.minNDim!=null&&o=0?a[h]:a[a.length+h];if(d!=null&&[d,null].indexOf(m)===-1)throw new q(`Input ${n} is incompatible with layer ${this.name}: expected axis ${h} of input shape to have value ${d} but got shape ${a}.`)}}if(i.shape!=null)for(let a=0;a{if(!this.built){this.assertInputCompatibility(e);const o=[];for(const a of Et(e))o.push(a.shape);this.build(ts(o)),this.built=!0,this.initialWeights&&this.setWeights(this.initialWeights),this._refCount===null&&i&&(this._refCount=1)}if(this.assertInputCompatibility(e),i){let o=this.call(e,t);const a=Et(o),c=[];for(let h of a)n.indexOf(h)!==-1&&(h=h.clone()),c.push(h);if(o=ts(c),this.activityRegularizer!=null)throw new Pe("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return o}else{const o=o3(e),a=this.computeOutputShape(o);let c;const h=a3(e);if(this.warnOnIncompatibleInputShape(Array.isArray(e)?o[0]:o),a!=null&&a.length>0&&Array.isArray(a[0])?c=a.map((d,m)=>new ii(h,d,this,Et(e),t,this.name,m)):c=new ii(h,a,this,Et(e),t,this.name),this.addInboundNode(e,c,null,null,o,a,t),this._refCount++,this.activityRegularizer!=null)throw new Pe("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return c}})}warnOnIncompatibleInputShape(e){if(this.batchInputShape==null)return;if(e.length!==this.batchInputShape.length)console.warn(`The rank of the input tensor provided (shape: ${JSON.stringify(e)}) does not match that of the batchInputShape (${JSON.stringify(this.batchInputShape)}) of the layer ${this.name}`);else{let t=!1;this.batchInputShape.forEach((n,s)=>{n!=null&&e[s]!=null&&e[s]!==n&&(t=!0)}),t&&console.warn(`The shape of the input tensor (${JSON.stringify(e)}) does not match the expectation of layer ${this.name}: ${JSON.stringify(this.batchInputShape)}`)}}get outputShape(){if(this.inboundNodes==null||this.inboundNodes.length===0)throw new nr(`The layer ${this.name} has never been called and thus has no defined output shape.`);const e=[];for(const t of this.inboundNodes){const n=JSON.stringify(t.outputShapes);e.indexOf(n)===-1&&e.push(n)}if(e.length===1){const t=this.inboundNodes[0].outputShapes;return Array.isArray(t)&&Array.isArray(t[0])&&t.length===1?t[0]:t}else throw new nr(`The layer ${this.name} has multiple inbound nodes with different output shapes. Hence the notion of "output shape" is ill-defined for the layer.`)}countParams(){if(!this.built)throw new ti(`You tried to call countParams() on ${this.name}, but the layer is not built yet. Build it first by calling build(batchInputShape).`);return Qp(this.weights)}build(e){this.built=!0}getWeights(e=!1){return Dw(e?this.trainableWeights:this.weights)}setWeights(e){Q(()=>{const t=this.weights;if(t.length!==e.length)throw new q(`You called setWeights(weights) on layer "${this.name}" with a weight list of length ${e.length}, but the layer was expecting ${t.length} weights. Provided weights: ${e}...`);if(t.length===0)return;const n=[],s=Dw(t);for(let i=0;ii.apply(h.read())),o==null&&(o=!0),o?this._trainableWeights.push(h):this._nonTrainableWeights.push(h),h}setFastWeightInitDuringBuild(e){this.fastWeightInitDuringBuild=e}addLoss(e){if(e==null||Array.isArray(e)&&e.length===0)return;e=Et(e),this._losses!==void 0&&this._losses!==null&&this.losses.push(...e)}computeOutputShape(e){return e}computeMask(e,t){if(!this.supportsMasking){if(t!=null)if(Array.isArray(t))t.forEach(n=>{if(n!=null)throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`)});else throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`);return null}return t}addInboundNode(e,t,n,s,i,o,a=null){const c=Et(e);t=Et(t),n=Et(n),s=Et(s),i=Zp(i),o=Zp(o);const h=[],d=[],m=[];for(const f of c)h.push(f.sourceLayer),d.push(f.nodeIndex),m.push(f.tensorIndex);new em({outboundLayer:this,inboundLayers:h,nodeIndices:d,tensorIndices:m,inputTensors:c,outputTensors:t,inputMasks:n,outputMasks:s,inputShapes:i,outputShapes:o},a);for(let f=0;fe.dispose()),this.weights.length}assertNotDisposed(){if(this._refCount===0)throw new Error(`Layer '${this.name}' is already disposed.`)}dispose(){if(!this.built)throw new Error(`Cannot dispose Layer ${this.name} because it has not been built yet.`);if(this._refCount===null)throw new Error(`Cannot dispose Layer ${this.name} because it has not been used yet.`);this.assertNotDisposed();let e=0;return--this._refCount===0&&(e=this.disposeWeights()),{refCountAfterDispose:this._refCount,numDisposedVariables:e}}}function o3(e){e=Et(e);const t=[];for(const n of e)t.push(n.shape);return ts(t)}function a3(e){return"float32"}function Tv(e,t,n){if((t==null||n!=null&&n>0)&&(t=e.sourceLayer,n=e.nodeIndex),t.inboundNodes.length===0)return[e];{const s=t.inboundNodes[n];if(s.inboundLayers.length===0)return s.inputTensors;{const i=[];for(let o=0;o0){const i=await Promise.all(t);for(let o=0;obe(this.totals[s],X(i,n)));this.totals[s]=a,o!=null&&o.dispose()}}}async onEpochEnd(e,t){if(t!=null)for(const n of this.params.metrics){if(this.totals[n]==null)continue;typeof this.totals[n]=="number"?t[n]=this.totals[n]/this.seen:Q(()=>{const s=X(We(1,this.seen),this.totals[n]);t[n]=s,this.totals[n].dispose(),bn(t[n])})}}}class Rv extends sc{async onTrainBegin(e){this.epoch=[],this.history={}}async onEpochEnd(e,t){t==null&&(t={}),this.epoch.push(e);for(const n in t)this.history[n]==null&&(this.history[n]=[]),this.history[n].push(t[n])}async syncData(){const e=[],t=[],n=[];for(const i in this.history){const o=this.history[i];for(let a=0;anew Ov(s,t))}class Ps{constructor(){}static registerCallbackConstructor(e,t){A(e>=0&&Number.isInteger(e),()=>`Verbosity level is expected to be an integer >= 0, but got ${e}`),Ps.checkForDuplicate(t),Ps.constructors[e]==null&&(Ps.constructors[e]=[]),Ps.constructors[e].push(t)}static checkForDuplicate(e){for(const t in Ps.constructors){const n=Ps.constructors[+t];n.forEach(s=>{if(s===e)throw new q("Duplicate callback constructor.")})}}static clear(){Ps.constructors={}}static createCallbacks(e){const t=[];for(const n in Ps.constructors){const s=+n;e>=s&&t.push(...Ps.constructors[s])}return t.map(n=>new n)}}Ps.constructors={};function Dv(e,t,n,s,i,o,a,c,h){const d=new Rv,m=[new l3,...Ps.createCallbacks(t)];e!=null&&m.push(...e),m.push(d);const f=new Cv(m);return f.setParams({epochs:n,initialEpoch:s,samples:i,steps:o,batchSize:a,verbose:t,doValidation:c,metrics:h}),{callbackList:f,history:d}}function ri(e,t={},n=!1){return kh(e,Ws.getMap().classNameMap,t,"layer",n)}function tm(e,t){return Q(()=>{e.dtype!=="float32"&&(e=e.asType("float32"));const n=$e(Uh(e),t,!0),s=Ba(n.shape,mn()),i=Nn($s(n,s));return We(e,i)})}function ir(e,t){return Q(()=>qt(Uh(Re(t,e)),-1))}function ic(e,t){return Q(()=>qt(dn(Re(t,e)),-1))}function qr(e,t){return Q(()=>{const n=Re(e,t),s=Jn(dn(e),mn(),Number.MAX_VALUE),i=dn(We(n,s));return X(100,qt(i,-1))})}function Fw(e,t){return Q(()=>{const n=Jn(t,mn(),Number.MAX_VALUE),s=cs(be(1,n)),i=Jn(e,mn(),Number.MAX_VALUE),o=cs(be(1,i));return qt(Uh(Re(s,o)),-1)})}function h3(e,t){return Q(()=>{const n=$s(0,Re(1,X(e,t)));return qt(Uh(n),-1)})}function u3(e,t){return Q(()=>{const n=$s(0,Re(1,X(e,t)));return qt(n,-1)})}function d3(e,t){return Q(()=>{const n=$e(X(e,t),-1),s=Qn(X(Re(1,e),t),-1);return $s(0,be(1,Re(s,n)))})}function p3(e,t){return Q(()=>{const n=Math.log(2),s=Re(t,e),i=Re(be(s,za(X(-2,s))),n);return qt(i,-1)})}function Mh(e,t,n=!1){return Q(()=>{if(n)t=Fo(t);else{const s=$e(t,t.shape.length-1,!0);t=We(t,s)}return t=Jn(t,mn(),1-mn()),Ht($e(X(e.toFloat(),cs(t)),t.shape.length-1))})}function nm(e,t,n=!1){return Q(()=>{const s=Ma(Oz(e)).toInt();t=Jn(t,mn(),1-mn());const i=t.shape,o=To(s,i[i.length-1]).reshape(i);return Mh(o,t,n)})}function m3(e,t){if(!ae(e.shape,t.shape))throw new q(`logits and labels must have the same shape, but got shapes ${JSON.stringify(e.shape)} and ${JSON.stringify(t.shape)}`);return Q(()=>{const n=t.relu(),s=t.abs().neg();return n.sub(t.mul(e)).add(s.exp().log1p())})}function sm(e,t){return Q(()=>{let n;return n=Jn(t,mn(),1-mn()),n=cs(We(n,Re(1,n))),qt(m3(e,n),-1)})}function _w(e,t){return Q(()=>{const n=Jn(e,mn(),1),s=Jn(t,mn(),1);return $e(X(e,cs(We(n,s))),-1)})}function f3(e,t){return Q(()=>{const n=cs(be(mn(),t));return qt(Re(t,X(e,n)),-1)})}function im(e,t){return Q(()=>{const n=tm(e,-1),s=tm(t,-1),i=X(n,s);return Ht($e(i,-1))})}const ste=ir,ite=ir,rte=ic,ote=ic,ate=qr,cte=qr,lte=Fw,hte=Fw,ute=_w,dte=_w,pte=im,rm={meanSquaredError:ir,meanAbsoluteError:ic,meanAbsolutePercentageError:qr,meanSquaredLogarithmicError:Fw,squaredHinge:h3,hinge:u3,categoricalHinge:d3,logcosh:p3,categoricalCrossentropy:Mh,sparseCategoricalCrossentropy:nm,binaryCrossentropy:sm,kullbackLeiblerDivergence:_w,poisson:f3,cosineProximity:im};function Ww(e){if(typeof e=="string"){if(e in rm)return rm[e];let t=`Unknown loss ${e}`;throw e.toLowerCase().includes("softmaxcrossentropy")&&(t=`Unknown loss ${e}. Use "categoricalCrossentropy" as the string name for tf.losses.softmaxCrossEntropy`),new q(t)}else return e}function $w(e,t){return Q(()=>{const n=X(.5,Fn(t)),s=Wh(xs(t,n),e.dtype);return qt(Xs(e,s),-1)})}function Uw(e,t){return Q(()=>Wh(Xs(rh(e,-1),rh(t,-1)),"float32"))}function kv(e,t){return Q(()=>Us(e.equal(1),t.equal(1)).sum().cast("float32"))}function g3(e,t){return Q(()=>Us(e.equal(1),t.equal(0)).sum().cast("float32"))}function y3(e,t){return Q(()=>Us(e.equal(0),t.equal(1)).sum().cast("float32"))}function Fv(e,t){return Q(()=>{const n=kv(e,t),s=y3(e,t),i=n.add(s);return Bn(xs(i,0),n.div(i),0).cast("float32")})}function b3(e,t){return Q(()=>{const n=kv(e,t),s=g3(e,t),i=n.add(s);return Bn(xs(i,0),n.div(i),0).cast("float32")})}function _v(e,t){return sm(e,t)}function Wv(e,t){return e.rank===t.rank&&(e=e.squeeze([e.rank-1])),t=t.argMax(-1),t.dtype!==e.dtype&&(t=t.asType(e.dtype)),Xs(e,t).asType("float32")}function mte(e,t){throw new Pe}function fte(e,t){throw new Pe}const w3=ir,L3=ir,S3=ic,I3=ic,x3=qr,T3=qr,Bw=Mh,A3=im,$v=nm,om={binaryAccuracy:$w,categoricalAccuracy:Uw,precision:Fv,categoricalCrossentropy:Bw,sparseCategoricalCrossentropy:$v,mse:w3,MSE:L3,mae:S3,MAE:I3,mape:x3,MAPE:T3,cosine:A3};function v3(e){if(typeof e=="string"&&e in om)return om[e];if(typeof e!="string"&&e!=null)return e;throw new q(`Unknown metric ${e}`)}function am(e){if(As(e!==null,`Unknown LossOrMetricFn ${e}`),typeof e=="string")return e;{let t;for(const n of Object.keys(rm))if(rm[n]===e){t=n;break}if(t!==void 0)return t;for(const n of Object.keys(om))if(om[n]===e){t=n;break}return t!==void 0?t:e.name}}function N3(e){const t={Adagrad:()=>Wo.adagrad(.01),Adadelta:()=>Wo.adadelta(1,.95,mn()),Adam:()=>Wo.adam(.001,.9,.999,mn()),Adamax:()=>Wo.adamax(.002,.9,.999,mn(),0),RMSProp:()=>Wo.rmsprop(.001,.9,0,mn()),SGD:()=>Wo.sgd(.01)};if(t.adagrad=t.Adagrad,t.adadelta=t.Adadelta,t.adam=t.Adam,t.adamax=t.Adamax,t.rmsprop=t.RMSProp,t.sgd=t.SGD,e in t)return t[e]();throw new q(`Unknown Optimizer ${e}`)}const Uv=1*1024*1024;function Bv(e,t,n=!1){if(e==null||typeof e!="object"||Object.getPrototypeOf(e)!==Object.prototype||!Mw(e))throw new Error("User-defined metadata is expected to be a JSON object, but is not.");if(n){const s=JSON.stringify(e);s.length>Uv&&console.warn(`User-defined metadata of model "${t}" is too large in size (length=${s.length} when serialized). It is not recommended to store such large objects in user-defined metadata. Please make sure its serialized length is <= ${Uv}.`)}}function Mw(e){if(e===null)return!0;if(typeof e=="object")if(Object.getPrototypeOf(e)===Object.prototype){const t=Object.keys(e);for(const n of t){if(typeof n!="string")return!1;if(!Mw(e[n]))return!1}return!0}else if(Array.isArray(e)){for(const t of e)if(!Mw(t))return!1;return!0}else return!1;else{const t=typeof e;return t==="string"||t==="number"||t==="boolean"}}function C3(e,t,n,s=console.log){const i=O3(e),o=["Layer (type)","Output shape","Param #"];i?(t=t||65,n=n||[.45,.85,1]):(t=t||98,n=n||[.33,.55,.67,1]),n[n.length-1]<=1&&(n=n.map(m=>Math.floor(t*m)));let a;if(!i){o.push("Receives inputs"),a=[];for(const m in e.nodesByDepth)a.push(...e.nodesByDepth[m])}s("_".repeat(t)),cm(o,n,s),s("=".repeat(t));const c=e.layers;for(let m=0;m1||i.length===1&&i[0].inboundLayers.length>1){t=!1;break}s.push(...i)}if(t)for(const i of e.layers){let o=!1;for(const a of i.inboundNodes)if(s.indexOf(a)!==-1)if(o){t=!1;break}else o=!0;if(!t)break}return t}function cm(e,t,n=console.log){let s="";for(let i=0;i0&&(s=s.slice(0,s.length-1)+" "),s+=e[i],s=s.slice(0,t[i]),s+=" ".repeat(t[i]-s.length);n(s)}function E3(e,t,n){let s;try{s=JSON.stringify(e.outputShape)}catch(c){s="multiple"}const i=e.name,o=e.getClassName(),a=[`${i} (${o})`,s,e.countParams().toString()];cm(a,t,n)}function D3(e,t,n,s){let i;try{i=JSON.stringify(e.outputShape)}catch(m){i="multiple"}const o=[];for(const m of e.inboundNodes){if(n!=null&&n.length>0&&n.indexOf(m)===-1)continue;for(let f=0;fL.name),h=[],d=t.names();for(const L of c)d.indexOf(L)!==-1?h.push(t.getValue(L)):h.push(null);s!=null&&(s.maxNumTensors=-Infinity,s.minNumTensors=Infinity);const m=c.join(",")+"|"+t.names().join(",");let f,b;if(zw[m]==null){const L=F3(a,t);f=L.sorted,b=L.recipientCounts,zw[m]=f,Pv[m]=b}f=zw[m],b={},i||Object.assign(b,Pv[m]);const w=new Po(t);for(let L=0;Ls.maxNumTensors&&(s.maxNumTensors=j),j0,()=>"Expected at least one fetch, got none");let n=[],s={};if(e.length===1){const i=zv(e[0],t);n=i.sorted,s=i.recipientMap}else{const i=new Set;for(const o of e){const{sorted:a,recipientMap:c}=zv(o,t);for(const h of a)i.has(h.name)||(n.push(h),i.add(h.name));for(const h in c)s[h]==null&&(s[h]=new Set),c[h].forEach(d=>s[h].add(d))}}return{sorted:n,recipientCounts:_3(s)}}function _3(e){const t={};for(const n in e)t[n]=e[n].size;return t}function zv(e,t){const n=new Set,s=[],i={};for(const c of t.names())n.add(c);const o=[],a=[];for(o.push(e);o.length>0;){const c=o[o.length-1];if(n.has(c.name)){o.pop();continue}const h=a[a.length-1]===o.length-1;if(c.inputs.length===0||h)o.pop(),s.push(c),n.add(c.name),h&&a.pop();else{a.push(o.length-1);for(const d of c.inputs){if(i[d.name]==null&&(i[d.name]=new Set),i[d.name].add(c.name),n.has(d.name))continue;o.push(d)}}}return{sorted:s,recipientMap:i}}function W3(e){let t;if(e.sourceLayer.inboundNodes.length===1)t=e.sourceLayer.output;else{let n=null;for(let s=0;sN.name)}`);Vr(this.outputs).length!==this.outputs.length&&console.warn(`The list of outputs passed to the model is redundant. All outputs should only appear once. Found: ${this.outputs.map(N=>N.name)}`),this.inputLayers=[],this.inputLayersNodeIndices=[],this.inputLayersTensorIndices=[],this.outputLayers=[],this.outputLayersNodeIndices=[],this.outputLayersTensorIndices=[],this.layers=[],this.internalContainerRefs=[];for(const N of this.outputs){const O=N.sourceLayer,E=N.nodeIndex,k=N.tensorIndex;this.outputLayers.push(O),this.outputLayersNodeIndices.push(E),this.outputLayersTensorIndices.push(k)}for(const N of this.inputs){const O=N.sourceLayer,E=N.nodeIndex,k=N.tensorIndex;As(E===0,"input layer has >1 nodes"),As(k===0,"input layer has >1 tensors"),this.inputLayers.push(O),this.inputLayersNodeIndices.push(E),this.inputLayersTensorIndices.push(k)}this.inputNames=[],this.outputNames=[],this.feedInputShapes=[],this.feedInputNames=[],this.feedOutputNames=[];for(let N=0;NN.shape),this.internalOutputShapes=this.outputs.map(N=>N.shape);const t={},n={},s={},i={},o={},a=[],c=(N,O,E,k,F,U)=>{(k==null||F==null||U==null)&&(k=N.sourceLayer,F=N.nodeIndex,U=N.tensorIndex);const $=k.inboundNodes[F];if(E.indexOf($)!==-1)throw new ti(`The tensor ${N.name} at layer "${k.name}" is part of a cycle.`);if(O.indexOf($)!==-1)return;this.containerNodes.add(Oi.nodeKey(k,F)),k.id in o||(o[k.id]=Object.keys(o).length),E.indexOf($)===-1&&E.push($);const Y=$.inboundLayers.length;for(let j=0;j=0;)E.splice(E.indexOf($),1);a.push($)},h=[],d=[];for(const N of this.outputs)c(N,h,d);const m=a.slice().reverse();for(const N of m){n[N.id]=N,N.id in t||(t[N.id]=0);let O=t[N.id];const E=s[N.outboundLayer.id]==null?0:s[N.outboundLayer.id];O=Math.max(O,E),s[N.outboundLayer.id]=O,i[N.outboundLayer.id]=N.outboundLayer,t[N.id]=O;for(let k=0;kparseInt(N,10)).sort(Bp);this.layers=[];for(const N of w){const O=b[N];O.sort((E,k)=>{const F=o[E.id],U=o[k.id];return FU?1:0});for(const E of O)E instanceof Oi&&this.internalContainerRefs.push(E),this.layers.push(E)}this.layersByDepth=b,w=Object.keys(f).map(N=>parseInt(N,10)).sort(Bp);const L=this.inputs.slice(),x=[];for(const N of w)for(const O of f[N]){const E=O.outboundLayer;if(E!=null){for(const k of O.inputTensors)if(L.indexOf(k)===-1)throw new ti(`Graph disconnected: cannot obtain value for tensor ${k} at layer "${E.name}". The following previous layers were accessed without issue: ${x}`);for(const k of O.outputTensors)L.push(k);x.push(E.name)}}this.nodesByDepth=f;const v=this.layers.map(N=>N.name);for(const N of v){const O=v.filter(E=>E===N).length;if(O!==1)throw new ti(`The name "${N}" is used ${O} times in the model. All layer names should be unique. Layer names: `+JSON.stringify(v))}this.outboundNodes=[],this.inboundNodes=[],new em({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:this.inputs.map(N=>null),outputMasks:this.outputs.map(N=>null),inputShapes:this.inputs.map(N=>N.shape),outputShapes:this.outputs.map(N=>N.shape)}),this.built=!0,this._refCount=1}assertNotDisposed(){if(this._refCount===0)throw new Error(`Container '${this.name}' is already disposed.`)}dispose(){this.assertNotDisposed();const e={refCountAfterDispose:null,numDisposedVariables:0};if(--this._refCount===0){for(const t of this.layers)e.numDisposedVariables+=t.dispose().numDisposedVariables;for(const t of this.internalContainerRefs)e.numDisposedVariables+=t.dispose().numDisposedVariables}return e.refCountAfterDispose=this._refCount,e}get trainable(){return this.trainable_}set trainable(e){this.layers.forEach(t=>{t._trainableWeights.forEach(n=>n.trainable=e)}),this.trainable_=e}get trainableWeights(){if(this._trainableWeights.length>0)throw new q("Container instance unexpectedly contains _trainableWeights.The trainable weights of a Container are a union of the trainable weights of its consituent Layers. Its own _trainableWeights must remain an empty Array.");if(!this.trainable)return[];let e=[];for(const t of this.layers)e=e.concat(t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.layers)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.layers)t.push(...n.trainableWeights);return t.concat(e)}return e}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}loadWeights(e,t=!0){const n={};let s=0;for(const o of this.layers)for(const a of o.weights){if(n[a.originalName]!=null)throw new q(`Duplicate weight name: ${a.originalName}`);n[a.originalName]=a,s++}const i=[];for(const o in e){let a=o;if(n[o]==null){const c=o.split("/"),h=c.slice(0,-2).concat([c[c.length-1]]);a=h.join("/")}if(n[a]!=null)i.push([n[a],e[o]]);else if(t)throw new q(`Provided weight data has no target variable: ${o}`);delete n[a]}if(t){const o=[];for(const a in n)o.push(a);if(o.length>0)throw new q(`${o.length} of ${s} weights are not set: ${o}`)}kw(i)}updatedConfig(){const e=this.getConfig(),t={};return t.className=this.getClassName(),t.config=e,t.kerasVersion=`tfjs-layers ${lm}`,t.backend="TensorFlow.js",t}toJSON(e,t=!0){const n=Pw(this.updatedConfig());return t?JSON.stringify(n):n}call(e,t){return Q(()=>{e=Et(e);const n=new Po;for(let s=0;s{e=Et(e);let n;return t==null?n=$o(null,e.length):n=Et(t),this.runInternalGraph(e,n)[1]})}computeOutputShape(e){const t=Zp(e);if(t.length!==this.inputLayers.length)throw new q(`Invalid inputShape argument ${e}: model has ${this.inputLayers.length} tensor inputs.`);const n={};for(let a=0;aparseInt(a,10)).sort(Bp);if(s.length>1)for(const a of s){const c=this.nodesByDepth[a];for(const h of c){const d=h.outboundLayer;if(this.inputLayers.map(L=>L.id).indexOf(d.id)!==-1)continue;const m=[];for(let L=0;LparseInt(c,10)).sort(Bp);for(const c of s){const h=this.nodesByDepth[c];for(const d of h){const m=d.outboundLayer,f=d.inputTensors,b=d.outputTensors,w=new Array;for(const L of f)L.id in n&&w.push(n[L.id]);if(w.length===f.length){let L={},x,v,N,O;if(d.callArgs!=null&&(L=d.callArgs),w.length===1){const[E,k]=w[0];L.mask==null&&(L.mask=k),N=Et(m.call(E,L)),O=Et(m.computeMask(E,k)),x=[E],v=[k]}else x=w.map(E=>E[0]),v=w.map(E=>E[1]),L.mask==null&&(L.mask=v),N=Et(m.call(x,L)),O=Et(m.computeMask(x,v));if(m.activityRegularizer)throw new Pe("LayersModel invocation with concrete Tensor value(s) in the presence of activity regularizer(s) is not supported yet.");for(let E=0;E{const e=[];for(const t of this.layers)for(let n=0;n0){const L=[];for(let x=0;x0&&x.apply(ts(N),O)}function h(x){const v=x.name,N=ri(x,t.customObjects!=null?t.customObjects:{});N.setFastWeightInitDuringBuild(s),i[v]=N;const O=x.inboundNodes;O.forEach(E=>{if(!(E instanceof Array))throw new q(`Corrupted configuration, expected array for nodeData: ${E}`);a(N,E)})}const d=t.name,m=t.layers;for(const x of m)h(x);for(;!pz(o);)for(const x of m){const v=i[x.name];if(v.name in o){const N=o[v.name];delete o[v.name];for(const O of N)c(v,O)}}const f=[],b=[],w=t.inputLayers;for(const x of w){const v=x[0],N=x[1],O=x[2];As(v in i);const E=i[v],k=E.inboundNodes[N].outputTensors;f.push(k[O])}const L=t.outputLayers;for(const x of L){const v=x[0],N=x[1],O=x[2];As(v in i);const E=i[v],k=E.inboundNodes[N].outputTensors;b.push(k[O])}return new e({inputs:f,outputs:b,name:d})}get stateful(){if(this._stateful)throw new q("Container instance unexpectedly has _stateful = true. The statefulness of a Container is determined by the Layers it contains. Its _stateful property must remain the default false.");for(const e of this.layers)if(e.stateful)return!0;return!1}resetStates(){Q(()=>{this.layers.forEach(e=>{e.stateful&&e.resetStates()})})}}function Vv(e,t,n){const s=t.length;if(e==null||Array.isArray(e)&&e.length===0)return t.map(i=>null);if(s===1)return Array.isArray(e)&&e.length===1?e:typeof e=="object"&&t[0]in e?[e[t[0]]]:[e];if(Array.isArray(e)){if(e.length!==s)throw new Error(`Provided ${n} is an array of ${e.length} element(s), but the model has ${s} outputs. Make sure a set of weights is provided for each model output.`);return e}else if(typeof e=="object"&&Object.keys(e).length>0&&typeof e[Object.keys(e)[0]]=="object"){const i=[];return t.forEach(o=>{o in e?i.push(e[o]):i.push(null)}),i}else throw new Error(`The model has multiple (${s}) outputs, so ${n} must be either an array with ${s} elements or an object with ${t} keys. Provided ${n} not understood: ${JSON.stringify(e)}`)}function Gv(e,t){return Vv(e,t,"classWeight")}function gte(e,t){return Vv(e,t,"sampleWeight")}async function Yv(e,t,n,s){if(t!=null||s!=null)throw new Error("Support sampleWeight is not implemented yet");if(n!=null){const i=Q(()=>{if(e.shape.length===1)return e.clone();if(e.shape.length===2)if(e.shape[1]>1){const c=1;return e.argMax(c)}else{if(e.shape[1]===1)return e.reshape([e.shape[0]]);throw new Error(`Encountered unexpected last-dimension size (${e.shape[1]}) during handling of class weights. The size is expected to be >= 1.`)}else throw new Error(`Unexpected rank of target (y) tensor (${e.rank}) during handling of class weights. The rank is expected to be 1 or 2.`)}),o=Array.from(await i.data());He(i);const a=[];return o.forEach(c=>{if(n[c]==null)throw new Error(`classWeight must contain all classes in the training data. The class ${c} exists in the data but not in classWeight`);a.push(n[c])}),ls(a,"float32")}else return null}function $3(e,t){return X(e,t)}const U3=32;function Hv(e,t){let n,s;const i=t;n=i.xs,s=i.ys,A(n!=null&&s!=null,()=>`A Dataset iterator for fitDataset() is expected to generate objects of the form \`{xs: xVal, ys: yVal}\`, where the two values may be \`tf.Tensor\`, an array of Tensors, or a map of string to Tensor. The provided Dataset instead generates ${t}`);const o=qv("input",e.inputNames,n),a=qv("output",e.outputNames,s),c=o[0].shape[0];A(o.length===e.inputs.length,()=>`LayersModel has ${e.inputs.length} inputs, but the dataset provides ${o.length} inputs. (Expected input keys: ${JSON.stringify(e.inputNames)})`),A(a.length===e.outputs.length,()=>`LayersModel has ${e.outputs.length} outputs, but the dataset provides ${a.length} outputs. (Expected output keys: ${JSON.stringify(e.outputNames)})`);for(let h=0;h`Batch size mismatch: input ${e.inputNames[h]} has ${o[h].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);for(let h=0;h`Batch size mismatch: output ${e.outputNames[h]} has ${a[h].shape[0]}; expected ${c} based on input ${e.inputNames[0]}.`);return{xs:o,ys:a}}function qv(e,t,n){if(n instanceof ee)return[n];if(Array.isArray(n))return A(n.length===t.length,()=>`Received an array of ${n.length} Tensors, but expected ${t.length} to match the ${e} keys ${t}.`),n;{const s=[];for(const i of t){if(n[i]==null)throw new q(`The feature data generated by the dataset lacks the required ${e} key '${i}'.`);s.push(n[i])}return s}}function B3(e){if(e.length===3)throw new Pe("Validation with sample weights is not implemented yet.");return{xs:e[0],ys:e[1]}}async function M3(e,t,n){const s=n.batchesPerEpoch!=null;if(A(e.optimizer!=null,()=>"You must compile a model before training/testing. Use LayersModel.compile(modelCompileConfig)."),A(n!=null,()=>"For fitDataset(), the 2nd argument (config) is required, but it is not provided in this call."),A(n.epochs!=null&&n.epochs>0&&Number.isInteger(n.epochs),()=>`For fitDataset(), config.epochs is expected to be a positive integer, but got ${n.epochs}`),A(!s||n.batchesPerEpoch>0&&Number.isInteger(n.batchesPerEpoch),()=>`For fitDataset(), config.batchesPerEpoch is expected to be a positive integer if specified, but got ${n.batchesPerEpoch}`),A(n.validationSplit==null,()=>"`validationSplit` is not supported by `fitDataset()`. Use validationData instead."),e.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");e.isTraining=!0;try{const i=n.validationData!=null;let o,a;if(i)if(jv(n.validationData))A(n.validationBatches==null||n.validationBatches>0&&Number.isInteger(n.validationBatches),()=>`For fitDataset() with dataset-based validation, config.validationBatches is expected not to be provided, or to be a positive integer, but got ${n.validationBatches}`);else{const v=B3(n.validationData);o=v.xs,a=v.ys}const c=e.makeTrainFunction(),h=e.getDedupedMetricsNames();let d;i?d=h.slice().concat(h.map(v=>"val_"+v)):d=h.slice();const m=Ev(n.callbacks,n.yieldEvery),f=n.verbose==null?1:n.verbose,{callbackList:b,history:w}=Dv(m,f,n.epochs,null,null,P3(t,n),null,i,d);b.setModel(e),e.history=w,await b.onTrainBegin(),e.stopTraining_=!1;let L=n.initialEpoch==null?0:n.initialEpoch,x=await t.iterator();for(;L=n.batchesPerEpoch:E.done){if(i){let k;jv(n.validationData)?k=Et(await e.evaluateDataset(n.validationData,{batches:n.validationBatches})):k=Et(e.evaluate(o,a,{batchSize:n.validationBatchSize==null?U3:n.validationBatchSize,verbose:0}));for(let F=0;F0)throw new Pe("Verbose mode is not implemented yet.");A(!s||n.batches>0&&Number.isInteger(n.batches),()=>`Test loop expects \`batches\` to be a positive integer, but received ${JSON.stringify(n.batches)}`);const a=z3(t)?t:await t.iterator();let c=0,h=0;for(;s?h{if(d.value){const{xs:m,ys:f}=Hv(e,d.value),b=m.concat(f),w=Q(()=>i(b));if(He(b),h===0)for(let x=0;xbe(o[x],X(L,v))),h>0&&He(N)}He(w),c+=L,++h}return o}),d.done){s&&console.warn(`Your dataset iterator ran out of data during evaluateDataset(). Interrupting evalution. Make sure that your dataset can generate at least \`batches\` batches (in this case, ${n.batches} batches). You may need to use the repeat() function when building your dataset.`);break}}for(let d=0;d0&&Number.isInteger(e),()=>`batchSize is required to be a positive integer, but got ${e}`)}function Vh(e,t,n){return e==null?[null]:Array.isArray(e)?e.map(s=>Mo(s,t,n-t)):Mo(e,t,n-t)}function Gw(e,t){return Q(()=>e==null?null:Array.isArray(e)?e.map(n=>Gw(n,t)):bv(e,t.dtype==="int32"?t:t.toInt()))}function Yw(e,t){const n=[];let s=0,i=null;for(;s=e&&(i=e),n.push([s,i]),s=i;return n}async function G3(e,t,n,s,i,o,a,c,h,d,m,f,b,w,L){i==null&&(i=32),o==null&&(o=1),m==null&&(m=!0),b==null&&(b=0);let x=!1;if(h!=null&&d!=null&&(x=!0),L!=null&&(x=!0,w==null))throw new q("Can only use `validationSteps` when doing step-wise training, i.e., `stepsPerEpoch` must be set.");const v=e.checkNumSamples(n,i,w,"steps_per_epoch");let N;v!=null&&(N=ni(0,v)),a==null&&(a=1);const{callbackList:O,history:E}=Dv(c,a,o,b,v,w,i,x,f);O.setModel(e),e.history=E,await O.onTrainBegin(),e.stopTraining_=!1;for(let k=b;k{const Z=$[Y][0],ie=$[Y][1],de=Mo(U,Z,ie-Z);j.batch=Y,j.size=ie-Z;const he=Gw(n,de),ue=t(he);for(let me=0;me0){if(L=!0,s.validationData.length===2)a=s.validationData[0],c=s.validationData[1];else throw s.validationData.length===3?new Pe("validationData including sample weights is not supported yet."):new q(`When passing validation data, it must contain 2 (valX, valY) or 3 (valX, valY, valSampleWeight) items; ${s.validationData} is invalid.`);const $=!0,Y=await e.standardizeUserData(a,c,null,null,$,f);h=Y[0],d=Y[1],x=h.concat(d)}else if(s.validationSplit!=null&&s.validationSplit>0&&s.validationSplit<1){L=!0;const $=Math.floor(i[0].shape[0]*(1-s.validationSplit)),Y=i[0].shape[0];h=Vh(i,$,Y),i=Vh(i,0,$),d=Vh(o,$,Y),o=Vh(o,0,$),x=h.concat(d)}else s.validationSteps!=null&&(L=!0);const v=i.concat(o).concat(m);e.checkTrainableWeightsConsistency();const N=e.makeTrainFunction(),O=e.getDedupedMetricsNames();let E,k;L?(e.makeTestFunction(),E=e.testFunction,k=O.slice().concat(O.map($=>"val_"+$))):(E=null,x=[],k=O.slice());const F=Ev(s.callbacks,s.yieldEvery),U=await G3(e,N,v,O,f,s.epochs,s.verbose,F,E,x,s.shuffle,k,s.initialEpoch,null,null);return U}finally{e.isTraining=!1,zo(i,t),zo(o,n),zo(h,a),zo(d,c),m!=null&&He(m)}}function Kv(e){const t=[];e instanceof ee&&(e=[e]);for(let n=0;nn.push(i.id));else if(t!=null)for(const i in t){const o=t[i];n.push(o.id)}const s=[];if(e instanceof ee)n.indexOf(e.id)===-1&&s.push(e);else if(Array.isArray(e))e.forEach(i=>{n.indexOf(i.id)===-1&&s.push(i)});else if(e!=null)for(const i in e){const o=e[i];n.indexOf(o.id)===-1&&s.push(o)}s.forEach(i=>{i.isDisposed||i.dispose()})}function H3(e){return e instanceof ee}function Hw(e){return Array.isArray(e)}function Xv(e){return!H3(e)&&!Hw(e)}function Jv(e,t,n,s=!0,i=""){if(t==null||t.length===0){if(e!=null){let a=!1;if(Hw(e)&&e.length>0)a=!0;else if(Xv(e)){for(const c in e)if(e.hasOwnProperty(c)){a=!0;break}}else a=!0;if(a)throw new q(`Error when checking model ${i} expected no data, but got ${e}`)}return[]}if(e==null)return t.map(a=>null);let o;if(Xv(e)){e=e,o=[];for(const a of t){if(e[a]==null)throw new q(`No data provided for "${a}". Need data for each key in: ${t}`);o.push(e[a])}}else if(Hw(e)){if(e=e,e.length!==t.length)throw new q(`Error when checking model ${i}: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see ${t.length} Tensor(s), but instead got the following list of Tensor(s): ${e}`);o=e}else{if(e=e,t.length>1)throw new q(`The model ${i} expects ${t.length} Tensor(s), but only received one Tensor. Found: Tensor with shape ${e.shape}`);o=[e]}if(o=Kv(o),n!=null)for(let a=0;a=0&&d!==m)throw new q(`Error when checking ${i}: expected ${t[a]} to have shape [${n[a]}], but got array with shape [${c.shape}].`)}}return o}function q3(e,t,n){const s=Vr(e.map(o=>o.shape[0]));s.sort();const i=Vr(t.map(o=>o.shape[0]));if(i.sort(),s.length>1)throw new q(`All input Tensors (x) should have the same number of samples. Got array shapes: ${JSON.stringify(e.map(o=>o.shape))}`);if(i.length>1)throw new q(`All target Tensors (y) should have the same number of samples. Got array shapes: ${JSON.stringify(t.map(o=>o.shape))}`);if(s.length>0&&i.length>0&&!ae(s,i))throw new q(`Input Tensors should have the same number of samples as target Tensors. Found ${s[0]} input sample(s) and ${i[0]} target sample(s).`)}function j3(e,t,n){const s=[ir,sm,Mh];for(let i=0;i1)throw new q(`The model expects ${t.length} ${i} Tensors, but only received one Tensor. Found: array with shape ${JSON.stringify(e.shape)}.`);o=[e]}if(n!=null)for(let a=0;a[]);let n;if(typeof e=="string"||typeof e=="function")n=[e];else if(Array.isArray(e)||typeof e=="object")n=e;else throw new TypeError(`Type of metrics argument not understood. Expected an string,function, Array, or Object, found: ${e}`);if(Array.isArray(n))return t.map(s=>n);{const s=[];for(const i of t){let o=n.hasOwnProperty(i)?n[i]:[];Array.isArray(o)||(o=[o]),s.push(o)}return s}}const X3="layers-model";class rr extends Oi{constructor(e){super(e);this.isTraining=!1}summary(e,t,n=console.log){if(!this.built)throw new q("This model has never been called, thus its weights have not been created yet. So no summary can be displayed. Build the model first (e.g., by calling it on some test data).");C3(this,e,t,n)}compile(e){if(e.loss==null&&(e.loss=[]),this.loss=e.loss,typeof e.optimizer=="string")this.optimizer_=N3(e.optimizer),this.isOptimizerOwned=!0;else{if(!(e.optimizer instanceof er))throw new q("User-defined optimizer must be an instance of tf.Optimizer.");this.optimizer_=e.optimizer,this.isOptimizerOwned=!1}let t=[];if(!Array.isArray(e.loss)&&typeof e.loss!="string"&&typeof e.loss!="function"){e.loss=e.loss;for(const o in e.loss)if(this.outputNames.indexOf(o)===-1)throw new q(`Unknown entry in loss dictionary: "${o}". Only expected the following keys: ${this.outputNames}`);for(const o of this.outputNames)e.loss[o]==null&&console.warn(`Output "${o}" is missing from loss dictionary. We assume this was done on purpose, and we will not be expecting data to be passed to ${o} during training`),t.push(Ww(e.loss[o]))}else if(Array.isArray(e.loss)){if(e.loss.length!==this.outputs.length)throw new q(`When passing an Array as loss, it should have one entry per model output. The model has ${this.outputs.length} output(s), but you passed loss=${e.loss}.`);const o=e.loss;t=o.map(a=>Ww(a))}else{const o=Ww(e.loss);this.outputs.forEach(a=>{t.push(o)})}this.lossFunctions=t,this.feedOutputNames=[],this.feedOutputShapes=[],this.feedLossFns=[];for(let o=0;o{for(let o=0;o1&&(this.metricsTensors.push([a,o]),this.metricsNames.push(this.outputNames[o]+"_loss"))}});const s=K3(e.metrics,this.outputNames),i=(o,a,c)=>{this.outputNames.length>1&&(a=this.outputNames[o]+"_"+a),this.metricsNames.push(a),this.metricsTensors.push([c,o])};Bo("metric",()=>{for(let o=0;o{const d="";let m,f,b;for(const w of h){if(typeof w=="string"&&["accuracy","acc","crossentropy","ce"].indexOf(w)!==-1){const x=this.internalOutputShapes[o];x[x.length-1]===1||this.lossFunctions[o]===sm?["accuracy","acc"].indexOf(w)!==-1?f=$w:["crossentropy","ce"].indexOf(w)!==-1&&(f=_v):this.lossFunctions[o]===nm?["accuracy","acc"].indexOf(w)!==-1?f=Wv:["crossentropy","ce"].indexOf(w)!==-1&&(f=$v):["accuracy","acc"].indexOf(w)!==-1?f=Uw:["crossentropy","ce"].indexOf(w)!==-1&&(f=Bw);let v;["accuracy","acc"].indexOf(w)!==-1?v="acc":["crossentropy","ce"].indexOf(w)!==-1&&(v="ce"),b=f,m=d+v}else{const x=v3(w);b=x,m=d+am(w)}let L;Bo(m,()=>{L=b}),i(o,m,L)}};c(a)}}),this.collectedTrainableWeights=this.trainableWeights}checkTrainableWeightsConsistency(){if(this.collectedTrainableWeights==null)return;this.trainableWeights.length!==this.collectedTrainableWeights.length&&console.warn("Discrepancy between trainableweights and collected trainable weights. Did you set `model.trainable` without calling `model.compile()` afterwards?")}evaluate(e,t,n={}){const s=n.batchSize==null?32:n.batchSize;Vw(s);const i=!0,o=this.standardizeUserDataXY(e,t,i,s);try{const a=o[0].concat(o[1]);this.makeTestFunction();const c=this.testFunction,h=this.testLoop(c,a,s,n.verbose,n.steps);return ts(h)}finally{zo(o[0],e),zo(o[1],t)}}async evaluateDataset(e,t){return this.makeTestFunction(),V3(this,e,t)}checkNumSamples(e,t,n,s="steps"){let i;if(n!=null){if(i=null,t!=null)throw new q(`If ${s} is set, batchSize must be null or undefined.Got batchSize = ${t}`)}else if(e!=null)Array.isArray(e)?i=e[0].shape[0]:i=e.shape[0];else throw new q(`Either the input data should have a defined shape, or ${s} shoud be specified.`);return i}execute(e,t){if(Array.isArray(t)&&t.length===0)throw new q("`outputs` is an empty Array, which is not allowed.");const n=Array.isArray(t),s=n?t:[t],i=this.retrieveSymbolicTensors(s),o=new Po;if(e instanceof ee&&(e=[e]),Array.isArray(e)){if(e.length!==this.inputs.length)throw new q(`The number of inputs provided (${e.length}) does not match the number of inputs of this model (${this.inputs.length}).`);for(let c=0;ca.name);for(let a=0;a0){const s=[];throw t.forEach((i,o)=>{i==null&&s.push(e[o])}),new q(`Cannot find SymbolicTensors for output name(s): ${JSON.stringify(s)}`)}return t}predictLoop(e,t=32,n=!1){return Q(()=>{const s=this.checkNumSamples(e);if(n)throw new Pe("Verbose predictLoop() is not implemented yet.");const i=Yw(s,t),o=this.outputs.map(a=>[]);for(let a=0;a{const h=i[a][0],d=i[a][1],m=Vh(e,h,d),f=[];if(Array.isArray(m))for(let w=0;wo[d].push(h))}return ts(o.map(a=>Yt(a,0)))})}predict(e,t={}){const n=Kv(e);Zv(n,this.inputNames,this.feedInputShapes,!1);try{const s=t.batchSize==null?32:t.batchSize;return Vw(s),this.predictLoop(n,s)}finally{zo(n,e)}}predictOnBatch(e){Zv(e,this.inputNames,this.feedInputShapes,!0);const t=(Array.isArray(e)?e[0]:e).shape[0];return this.predictLoop(e,t)}standardizeUserDataXY(e,t,n=!0,s){if(this.optimizer_==null)throw new ti("You must compile a model before training/testing. Use LayersModel.compile(modelCompileArgs).");const i=[];for(let o=0;o0&&e[0].shape[0]%s!==0)throw new q(`In a stateful network, you should only pass inputs with a number of samples that is divisible by the batch size ${s}. Found: ${e[0].shape[0]} sample(s).`);return[e,t]}async standardizeUserData(e,t,n,s,i=!0,o){const[a,c]=this.standardizeUserDataXY(e,t,i,o);if(n!=null)throw new Error("sample weight is not supported yet.");let h=null;if(s!=null){const d=Gv(s,this.outputNames);h=[];for(let m=0;m{const o=this.checkNumSamples(t,n,i,"steps"),a=[];if(s>0)throw new Pe("Verbose mode is not implemented yet.");if(i!=null)throw new Pe("steps mode in testLoop() is not implemented yet");{const c=Yw(o,n),h=ls(ni(0,o));for(let d=0;d1){const o=ov(e.slice(0,n),s);i+=`_${o}`}t.push(i)}return t}makeTrainFunction(){return e=>{const t=[],n=e.slice(0,this.inputs.length),s=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),i=e.slice(this.inputs.length+this.outputs.length,this.inputs.length+this.outputs.length*2),o=[],a=()=>{const m=[];for(let L=0;L1&&L{w=be(w,L)}),w},c=this.collectedTrainableWeights.map(m=>m.read()),h=!0,d=this.optimizer_.minimize(a,h,c);return[d].concat(o)}}makeTestFunction(){this.testFunction=e=>Q(()=>{const t=[];let n;const s=e.slice(0,this.inputs.length),i=e.slice(this.inputs.length,this.inputs.length+this.outputs.length),o=[];for(let h=0;hsr(t))}else{const t=Object.keys(this.loss);e={};const n=this.loss;for(const s of t)if(typeof n[s]=="string")e[s]=sr(n[s]);else throw new Error("Serialization of non-string loss is not supported.")}return e}getMetricIdentifiers(){if(typeof this.metrics=="string"||typeof this.metrics=="function")return[sr(am(this.metrics))];if(Array.isArray(this.metrics))return this.metrics.map(e=>sr(am(e)));{const e={};for(const t in this.metrics)e[t]=sr(am(this.metrics[t]));return e}}getTrainingConfig(){return{loss:this.getLossIdentifiers(),metrics:this.getMetricIdentifiers(),optimizer_config:{class_name:this.optimizer.getClassName(),config:this.optimizer.getConfig()}}}loadTrainingConfig(e){if(e.weighted_metrics!=null)throw new Error("Loading weight_metrics is not supported yet.");if(e.loss_weights!=null)throw new Error("Loading loss_weights is not supported yet.");if(e.sample_weight_mode!=null)throw new Error("Loading sample_weight_mode is not supported yet.");const t=Ph(e.optimizer_config),n=ri(t);let s;if(typeof e.loss=="string")s=Uo(e.loss);else if(Array.isArray(e.loss))s=e.loss.map(o=>Uo(o));else if(e.loss!=null){s={};for(const o in e.loss)s[o]=Uo(e.loss[o])}let i;if(Array.isArray(e.metrics))i=e.metrics.map(o=>Uo(o));else if(e.metrics!=null){i={};for(const o in e.metrics)i[o]=Uo(e.metrics[o])}this.compile({loss:s,metrics:i,optimizer:n})}async save(e,t){if(typeof e=="string"){const h=zy(e);if(h.length===0)throw new q(`Cannot find any save handlers for URL '${e}'`);if(h.length>1)throw new q(`Found more than one (${h.length}) save handlers for URL '${e}'`);e=h[0]}if(e.save==null)throw new q("LayersModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");const n=await My(this.getNamedWeights(t)),s=!1,i=null,o=this.toJSON(i,s),a={modelTopology:o,format:X3,generatedBy:`TensorFlow.js tfjs-layers v${lm}`,convertedBy:null},c=t==null?!1:t.includeOptimizer;if(c&&this.optimizer!=null){a.trainingConfig=this.getTrainingConfig();const h="optimizer",{data:d,specs:m}=await My(await this.optimizer.getWeights(),h);n.specs.push(...m),n.data=zd([n.data,d])}if(this.userDefinedMetadata!=null){const h=!0;Bv(this.userDefinedMetadata,this.name,h),a.userDefinedMetadata=this.userDefinedMetadata}return a.weightData=n.data,a.weightSpecs=n.specs,e.save(a)}setUserDefinedMetadata(e){Bv(e,this.name),this.userDefinedMetadata=e}getUserDefinedMetadata(){return this.userDefinedMetadata}}rr.className="Model",fe(rr);class Qv extends rr{}Qv.className="Functional",fe(Qv);async function J3(e,t){"modelTopology"in e||(e={modelTopology:e}),e=e;let n=e.modelTopology;n.model_config!=null&&(n=n.model_config);const s=Ph(n),i=ri(s,t);if(e.weightsManifest!=null){const o=await vT(e.weightsManifest,e.pathPrefix,i.weights.map(c=>c.originalName)),a={};for(const c of i.weights)a[c.originalName]=o[c.originalName];i.loadWeights(a),He(o)}return i}async function Z3(e,t){if(t==null&&(t={}),typeof e=="string"){const n=Vy(e,t);if(n.length===0)n.push(Yd(e,t));else if(n.length>1)throw new q(`Found more than one (${n.length}) load handlers for URL '${e}'`);e=n[0]}return Q3(e,void 0,t)}async function Q3(e,t,n){if(n==null&&(n={}),e.load==null)throw new q("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const s=await e.load();let i=s.modelTopology;i.model_config!=null&&(i=i.model_config);const o=n.strict==null?!0:n.strict,a=s.weightData!=null&&s.weightSpecs!=null&&o,c=ri(Ph(i),t,a),h=s.trainingConfig;if(h!=null&&c.loadTrainingConfig(h),s.userDefinedMetadata!=null&&c.setUserDefinedMetadata(s.userDefinedMetadata),s.weightData!=null){if(s.weightSpecs==null)throw new q("LayersModel artifacts contains weight data, but not weight specs. Therefore loading of weights cannot proceed.");const{modelWeights:d,optimizerWeights:m}=eV(s.weightData,s.weightSpecs);c.loadWeights(d,o),c.optimizer!=null&&m.length>0&&await c.optimizer.setWeights(m),He(d),He(m.map(f=>f.tensor))}return c}function eV(e,t){const n=Pd(e,t),s={},i=[];return t.forEach(o=>{o.group==="optimizer"?i.push({name:o.name,tensor:n[o.name]}):s[o.name]=n[o.name]}),{modelWeights:s,optimizerWeights:i}}class rc extends rr{constructor(e){super({inputs:[],outputs:[]});if(e=e||{},this.trainable=!0,this.built=!1,this.name=e.name!=null?e.name:Jp("sequential_"),e.layers!=null)for(const t of e.layers)this.add(t)}checkShape(e){const t=e.inboundNodes[0].outputTensors[0].shape;if(t.some(n=>n<0))throw new q(`Negative dimension size caused by adding layer ${e.name} with input shape [${e.inboundNodes[0].inputTensors[0].shape}]`)}add(e){const t=e instanceof rc||e instanceof rr;let n;if(t){if(n=e,n.outputs.length!==1)throw new q("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");if(n.inputs.length!==1)throw new q("All layers in a Sequential model should have a single input tensor. For multi-input layers, use the functional API.")}if(this.outputs.length===0){if(e.inboundNodes.length===0){if(e.batchInputShape==null)throw new q("The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.");const s=Av({batchShape:e.batchInputShape,dtype:e.dtype,name:e.name+"_input"});e.apply(s)}if(t)this.outputs=n.outputs,this.inputs=n.inputs;else{if(e.inboundNodes.length!==1)throw new q(`A layer added to a Sequential model must not already be connected somewhere else. LayersModel received layer ${e.name} which has ${e.inboundNodes.length} pre-existing inbound connections.`);if(e.inboundNodes[0].outputTensors.length!==1)throw new q("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[e.inboundNodes[0].outputTensors[0]],this.inputs=Tv(this.outputs[0])}this.inboundNodes=[],new em({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:$o(null,this.inputs.length),outputMasks:[null],inputShapes:this.inputs.map(s=>s.shape),outputShapes:this.outputs[0].shape})}else{const s=e.apply(this.outputs[0]);if(Array.isArray(s))throw new TypeError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(e),this.outputs=[s],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}this.layers.push(e),this.built=!1}pop(){if(this.layers.length===0)throw new TypeError("There are no layers in the model.");if(this.layers.pop(),this.layers.length===0)this.outputs=[],this.inboundNodes=[],this.outboundNodes=[];else{const e=this.layers.length-1;this.layers[e].outboundNodes=[],this.outputs=[this.layers[e].output],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}}call(e,t){return this.model==null&&this.build(),this.model.call(e,t)}build(e){if(Nt(e),this.inputs.length===0||this.outputs.length===0)throw new TypeError("Sequential model cannot be built: model is empty. Add some layers first.");this.model=new rr({inputs:this.inputs,outputs:this.outputs[0],name:this.name+"_model"}),this.model.trainable=this.trainable,this.supportsMasking=this.model.supportsMasking,this.inputLayers=this.model.inputLayers,this.inputLayersNodeIndices=this.model.inputLayersNodeIndices,this.inputLayersTensorIndices=this.model.inputLayersTensorIndices,this.outputLayers=this.model.outputLayers,this.outputLayersNodeIndices=this.model.outputLayersNodeIndices,this.outputLayersTensorIndices=this.model.outputLayersTensorIndices,this.nodesByDepth=this.model.nodesByDepth,this.containerNodes=this.model.containerNodes,this.outputNames=this.model.outputNames,this.inputNames=this.model.inputNames,this.built=!0}countParams(){return this.built||this.build(),super.countParams()}summary(e,t,n=console.log){this.built||this.build(),super.summary(e,t,n)}setWeights(e){this.model==null&&this.build(),this.model.setWeights(e)}evaluate(e,t,n={}){if(!this.built)throw new ti("The model needs to be compiled before being used.");return this.model.evaluate(e,t,n)}async evaluateDataset(e,t){if(!this.built)throw new ti("The model needs to be compiled before being used.");return this.model.evaluateDataset(e,t)}predict(e,t={}){return this.model==null&&this.build(),this.model.predict(e,t)}predictOnBatch(e){return this.model==null&&this.build(),this.model.predictOnBatch(e)}compile(e){this.build(),this.model.compile(e),this.optimizer_=this.model.optimizer,this.isOptimizerOwned=this.model.isOptimizerOwned,this.loss=this.model.loss,this.metrics=this.model.metrics,this.metricsTensors=this.model.metricsTensors,this.metricsNames=this.model.metricsNames}get optimizer(){return this.model==null?void 0:this.model.optimizer}set optimizer(e){this.model.optimizer=e}async fit(e,t,n={}){if(!this.built)throw new ti("The model needs to be compiled before being used.");return this.model.fit(e,t,n)}async fitDataset(e,t){if(!this.built)throw new ti("The model needs to be compiled before being used.");return this.model.fitDataset(e,t)}async trainOnBatch(e,t){return this.model.trainOnBatch(e,t)}static fromConfig(e,t,n={},s=!1){let i,o={};if(t instanceof Array){if(!(t[0].className!=null)||t[0].className==="Merge")throw new q("Legacy serialization format not supported yet.");i=t}else A(t.layers!=null,()=>"When the config data for a Sequential model is not an Array, it must be an Object that contains the 'layers' field."),i=t.layers,delete t.layers,o=t;const a=new e(o);if(!(a instanceof rc))throw new Pe(`Sequential.fromConfig called on non-Sequential input: ${a}`);for(const c of i){const h=void 0,d=ri(c,h,s);s&&d.setFastWeightInitDuringBuild(!0),a.add(d)}return a}set stopTraining(e){if(this.model==null)throw new q("Cannot set the stopTraining property of a sequential model before it is compiled.");this.model.stopTraining=e}get stopTraining(){if(this.model==null)throw new q("Cannot get the stopTraining property of a sequential model before it is compiled.");return this.model.stopTraining}getConfig(){const e=[];for(const t of this.layers){const n={};n.className=t.getClassName(),n.config=t.getConfig(),e.push(n)}return{name:this.name,layers:e}}}rc.className="Sequential",fe(rc);function tV(e){return new rr(e)}function nV(e){return new rc(e)}function sV(e,t){return t==null&&(t={}),Z3(e,t)}function eN(e){return Av(e)}function iV(e,t){Ps.registerCallbackConstructor(e,t)}class us extends Ao{getConfig(){return{}}}class tN extends us{apply(e,t=1){return Dz(e,t)}}tN.className="elu",fe(tN);class nN extends us{apply(e){return bp(e)}}nN.className="selu",fe(nN);class sN extends us{apply(e){return Ni(e)}}sN.className="relu",fe(sN);class iN extends us{apply(e){return Q(()=>Oo(6,Ni(e)))}}iN.className="relu6",fe(iN);class rN extends us{apply(e){return e}}rN.className="linear",fe(rN);class oN extends us{apply(e){return Ti(e)}}oN.className="sigmoid",fe(oN);class aN extends us{apply(e){return Fz(e)}}aN.className="hardSigmoid",fe(aN);class cN extends us{apply(e){return za(e)}}cN.className="softplus",fe(cN);class lN extends us{apply(e){return kz(e)}}lN.className="softsign",fe(lN);class hN extends us{apply(e){return $a(e)}}hN.className="tanh",fe(hN);class qw extends us{apply(e,t=-1){return Fo(e,t)}}qw.className="softmax",fe(qw);class uN extends us{apply(e,t=-1){return dp(e,t)}}uN.className="logSoftmax",fe(uN);class dN extends us{apply(e,t=1){return Q(()=>Ti(e.mul(t)).mul(e))}}dN.className="swish",fe(dN);function jr(e){return e.getClassName()}function jw(e,t={}){return kh(e,Ws.getMap().classNameMap,t,"activation")}function Kr(e){if(e==null){const t={};return t.className="linear",t.config={},jw(t)}if(typeof e=="string"){const t={};return t.className=e,t.config={},jw(t)}else return e instanceof us?e:jw(e)}function Kw(e){if(e!=null&&typeof e!="object")throw new Error(`Argument to L1L2 regularizer's constructor is expected to be an object, but received: ${e}`)}class pN extends Ao{}class Gh extends pN{constructor(e){super();Kw(e),this.l1=e==null||e.l1==null?.01:e.l1,this.l2=e==null||e.l2==null?.01:e.l2,this.hasL1=this.l1!==0,this.hasL2=this.l2!==0}apply(e){return Q(()=>{let t=dt([1]);return this.hasL1&&(t=be(t,$e(X(this.l1,dn(e))))),this.hasL2&&(t=be(t,$e(X(this.l2,Uh(e))))),t.asScalar()})}getConfig(){return{l1:this.l1,l2:this.l2}}static fromConfig(e,t){return new e({l1:t.l1,l2:t.l2})}}Gh.className="L1L2",fe(Gh);function rV(e){return Kw(e),new Gh({l1:e!=null?e.l1:null,l2:0})}function oV(e){return Kw(e),new Gh({l2:e!=null?e.l2:null,l1:0})}const mN={l1l2:"L1L2"};function Ct(e){return dw(e)}function fN(e,t={}){return kh(e,Ws.getMap().classNameMap,t,"regularizer")}function zt(e){if(e==null)return null;if(typeof e=="string"){const t=e in mN?mN[e]:e,n={className:t,config:{}};return fN(n)}else return e instanceof pN?e:fN(e)}class Xw extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null&&(this.maxValue=e.maxValue)}call(e,t){e=Xe(e);let n=Ni(e);return this.maxValue!=null&&(n=Jn(n,0,this.maxValue)),n}computeOutputShape(e){return e}getConfig(){const e={maxValue:this.maxValue},t=super.getConfig();return Object.assign(e,t),e}}Xw.className="ReLU",fe(Xw);class Jw extends lt{constructor(e){super(e==null?{}:e);this.DEFAULT_ALPHA=.3,e==null&&(e={}),this.alpha=e.alpha==null?this.DEFAULT_ALPHA:e.alpha}call(e,t){const n=Xe(e);return lp(n,this.alpha)}computeOutputShape(e){return e}getConfig(){const e={alpha:this.alpha},t=super.getConfig();return Object.assign(e,t),e}}Jw.className="LeakyReLU",fe(Jw);class Zw extends lt{constructor(e){super(e==null?{}:e);if(this.DEFAULT_ALPHA_INITIALIZER="zeros",e==null&&(e={}),this.supportsMasking=!0,this.alphaInitializer=Pt(e.alphaInitializer||this.DEFAULT_ALPHA_INITIALIZER),this.alphaRegularizer=zt(e.alphaRegularizer),this.alphaConstraint=gn(e.alphaConstraint),e.sharedAxes==null)this.sharedAxes=null;else if(Array.isArray(e.sharedAxes))this.sharedAxes=e.sharedAxes;else if(typeof e.sharedAxes=="number")this.sharedAxes=[e.sharedAxes];else throw new q(`Expected sharedAxes to be a number or an array of numbers, but got ${e.sharedAxes}`)}build(e){e=Nt(e);const t=e.slice(1);if(this.sharedAxes!=null)for(const s of this.sharedAxes)t[s-1]=1;this.alpha=this.addWeight("alpha",t,"float32",this.alphaInitializer,this.alphaRegularizer,!0,this.alphaConstraint);const n={};if(this.sharedAxes!=null)for(let s=1;s(jt(t),t==="channelsFirst"?Ye(e,[0,2,3,1]):e))}function gN(e,t){return Q(()=>(jt(t),t==="channelsFirst"?Ye(e,[0,2,3,4,1]):e))}function yN(e,t,n,s=1,i="valid",o,a=1){return Q(()=>{if(o==null&&(o=ei()),jt(o),e.shape.length!==3)throw new q(`The input of a conv1dWithBias operation should be 3, but is ${e.shape.length} instead.`);if(t.shape.length!==3)throw new q(`The kernel for a conv1dWithBias operation should be 3, but is ${t.shape.length} instead`);if(n!=null&&n.shape.length!==1)throw new q(`The bias for a conv1dWithBias operation should be 1, but is ${t.shape.length} instead`);if(o==="channelsFirst"&&(e=Ye(e,[0,2,1])),i==="causal")throw new Pe("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");let c=ip(e,t,s,i==="same"?"same":"valid","NWC",a);return n!=null&&(c=Ri(c,n)),c})}function yte(e,t,n=1,s="valid",i,o=1){return Q(()=>(jt(i),yN(e,t,null,n,s,i,o)))}function bte(e,t,n=[1,1],s="valid",i,o){return Q(()=>(jt(i),sL(e,t,null,n,s,i,o)))}function sL(e,t,n,s=[1,1],i="valid",o,a,c=null){return Q(()=>{if(o==null&&(o=ei()),jt(o),e.rank!==3&&e.rank!==4)throw new q(`conv2dWithBiasActivation expects input to be of rank 3 or 4, but received ${e.rank}.`);if(t.rank!==3&&t.rank!==4)throw new q(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, but received ${e.rank}.`);let h=nL(e,o);if(i==="causal")throw new Pe("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");return h=Kb({x:h,filter:t,strides:s,pad:i==="same"?"same":"valid",dilations:a,dataFormat:"NHWC",bias:n,activation:c}),o==="channelsFirst"&&(h=Ye(h,[0,3,1,2])),h})}function wte(e,t,n=[1,1,1],s="valid",i,o){return Q(()=>(jt(i),bN(e,t,null,n,s,i,o)))}function bN(e,t,n,s=[1,1,1],i="valid",o,a){return Q(()=>{if(o==null&&(o=ei()),jt(o),e.rank!==4&&e.rank!==5)throw new q(`conv3dWithBias expects input to be of rank 4 or 5, but received ${e.rank}.`);if(t.rank!==4&&t.rank!==5)throw new q(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ${e.rank}.`);let c=gN(e,o);if(i==="causal")throw new Pe("The support for CAUSAL padding mode in conv3dWithBias is not implemented yet.");return c=Lb(c,t,s,i==="same"?"same":"valid","NDHWC",a),n!=null&&(c=Ri(c,n)),o==="channelsFirst"&&(c=Ye(c,[0,4,1,2,3])),c})}class iL extends lt{constructor(e,t){super(t);if(this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",iL.verifyArgs(t),this.rank=e,wn(this.rank,"rank"),this.rank!==1&&this.rank!==2&&this.rank!==3)throw new Pe(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is not implemented yet.`);if(this.kernelSize=oc(t.kernelSize,e,"kernelSize"),this.strides=oc(t.strides==null?1:t.strides,e,"strides"),this.padding=t.padding==null?"valid":t.padding,vs(this.padding),this.dataFormat=t.dataFormat==null?"channelsLast":t.dataFormat,jt(this.dataFormat),this.activation=Kr(t.activation),this.useBias=t.useBias==null?!0:t.useBias,this.biasInitializer=Pt(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.biasConstraint=gn(t.biasConstraint),this.biasRegularizer=zt(t.biasRegularizer),this.activityRegularizer=zt(t.activityRegularizer),this.dilationRate=oc(t.dilationRate==null?1:t.dilationRate,e,"dilationRate"),this.rank===1&&Array.isArray(this.dilationRate)&&this.dilationRate.length!==1)throw new q(`dilationRate must be a number or an array of a single number for 1D convolution, but received ${JSON.stringify(this.dilationRate)}`);if(this.rank===2){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==2)throw new q(`dilationRate must be a number or array of two numbers for 2D convolution, but received ${JSON.stringify(this.dilationRate)}`)}else if(this.rank===3){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==3)throw new q(`dilationRate must be a number or array of three numbers for 3D convolution, but received ${JSON.stringify(this.dilationRate)}`)}}static verifyArgs(e){if(As("kernelSize"in e,"required key 'kernelSize' not in config"),typeof e.kernelSize!="number"&&!mw(e.kernelSize,"number",1,3))throw new q(`BaseConv expects config.kernelSize to be number or number[] with length 1, 2, or 3, but received ${JSON.stringify(e.kernelSize)}.`)}getConfig(){const e={kernelSize:this.kernelSize,strides:this.strides,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,activation:jr(this.activation),useBias:this.useBias,biasInitializer:Kt(this.biasInitializer),biasRegularizer:Ct(this.biasRegularizer),activityRegularizer:Ct(this.activityRegularizer),biasConstraint:fn(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}class Yh extends iL{constructor(e,t){super(e,t);this.kernel=null,Yh.verifyArgs(t),this.filters=t.filters,wn(this.filters,"filters"),this.kernelInitializer=Pt(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.kernelConstraint=gn(t.kernelConstraint),this.kernelRegularizer=zt(t.kernelRegularizer)}build(e){e=Nt(e);const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new q(`The channel dimension of the input should be defined. Found ${e[t]}`);const n=e[t],s=this.kernelSize.concat([n,this.filters]);this.kernel=this.addWeight("kernel",s,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[{ndim:this.rank+2,axes:{[t]:n}}],this.built=!0}call(e,t){return Q(()=>{e=Xe(e);let n;const s=this.bias==null?null:this.bias.read(),i=cv(this.activation.getClassName());if(i!=null&&this.rank===2)n=sL(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate,i);else{if(this.rank===1)n=yN(e,this.kernel.read(),s,this.strides[0],this.padding,this.dataFormat,this.dilationRate[0]);else if(this.rank===2)n=sL(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else if(this.rank===3)n=bN(e,this.kernel.read(),s,this.strides,this.padding,this.dataFormat,this.dilationRate);else throw new Pe("convolutions greater than 3D are not implemented yet.");this.activation!=null&&(n=this.activation.apply(n))}return n})}computeOutputShape(e){e=Nt(e);const t=[],n=this.dataFormat==="channelsLast"?e.slice(1,e.length-1):e.slice(2);for(let i=0;i 0 but got ${JSON.stringify(e.filters)}`)}}class Hh extends Yh{constructor(e){super(2,e);Hh.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!mw(e.kernelSize,"number",1,2))throw new q(`Conv2D expects config.kernelSize to be number or number[] with length 1 or 2, but received ${JSON.stringify(e.kernelSize)}.`)}}Hh.className="Conv2D",fe(Hh);class um extends Yh{constructor(e){super(3,e);um.verifyArgs(e)}getConfig(){const e=super.getConfig();return delete e.rank,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!(Array.isArray(e.kernelSize)&&(e.kernelSize.length===1||e.kernelSize.length===3)))throw new q(`Conv3D expects config.kernelSize to be number or [number, number, number], but received ${JSON.stringify(e.kernelSize)}.`)}}um.className="Conv3D",fe(um);class rL extends Hh{constructor(e){super(e);if(this.inputSpec=[new Ln({ndim:4})],this.padding!=="same"&&this.padding!=="valid")throw new q(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(e){if(e=Nt(e),e.length!==4)throw new q("Input should have rank 4; Received input shape: "+JSON.stringify(e));const t=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[t]==null)throw new q("The channel dimension of the inputs should be defined. Found `None`.");const n=e[t],s=this.kernelSize.concat([this.filters,n]);this.kernel=this.addWeight("kernel",s,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new Ln({ndim:4,axes:{[t]:n}})],this.built=!0}call(e,t){return Q(()=>{let n=Xe(e);if(n.shape.length!==4)throw new q(`Conv2DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${n.shape.length}`);const s=n.shape,i=s[0];let o,a;this.dataFormat==="channelsFirst"?(o=2,a=3):(o=1,a=2);const c=s[o],h=s[a],d=this.kernelSize[0],m=this.kernelSize[1],f=this.strides[0],b=this.strides[1],w=hm(c,f,d,this.padding),L=hm(h,b,m,this.padding),x=[i,w,L,this.filters];this.dataFormat!=="channelsLast"&&(n=Ye(n,[0,2,3,1]));let v=rp(n,this.kernel.read(),x,this.strides,this.padding);return this.dataFormat!=="channelsLast"&&(v=Ye(v,[0,3,1,2])),this.bias!=null&&(v=Ri(v,this.bias.read(),this.dataFormat)),this.activation!=null&&(v=this.activation.apply(v)),v})}computeOutputShape(e){e=Nt(e);const t=e.slice();let n,s,i;this.dataFormat==="channelsFirst"?(n=1,s=2,i=3):(n=3,s=1,i=2);const o=this.kernelSize[0],a=this.kernelSize[1],c=this.strides[0],h=this.strides[1];return t[n]=this.filters,t[s]=hm(t[s],c,o,this.padding),t[i]=hm(t[i],h,a,this.padding),t}getConfig(){const e=super.getConfig();return delete e.dilationRate,e}}rL.className="Conv2DTranspose",fe(rL);class wN extends Yh{constructor(e,t){super(e,t);if(this.DEFAULT_DEPTHWISE_INITIALIZER="glorotUniform",this.DEFAULT_POINTWISE_INITIALIZER="glorotUniform",this.depthwiseKernel=null,this.pointwiseKernel=null,t.filters==null)throw new q("The `filters` configuration field is required by SeparableConv, but is unspecified.");if(t.kernelInitializer!=null||t.kernelRegularizer!=null||t.kernelConstraint!=null)throw new q("Fields kernelInitializer, kernelRegularizer and kernelConstraint are invalid for SeparableConv2D. Use depthwiseInitializer, depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, pointwiseRegularizer and pointwiseConstraint instead.");if(t.padding!=null&&t.padding!=="same"&&t.padding!=="valid")throw new q(`SeparableConv${this.rank}D supports only padding modes: 'same' and 'valid', but received ${JSON.stringify(t.padding)}`);this.depthMultiplier=t.depthMultiplier==null?1:t.depthMultiplier,this.depthwiseInitializer=Pt(t.depthwiseInitializer||this.DEFAULT_DEPTHWISE_INITIALIZER),this.depthwiseRegularizer=zt(t.depthwiseRegularizer),this.depthwiseConstraint=gn(t.depthwiseConstraint),this.pointwiseInitializer=Pt(t.depthwiseInitializer||this.DEFAULT_POINTWISE_INITIALIZER),this.pointwiseRegularizer=zt(t.pointwiseRegularizer),this.pointwiseConstraint=gn(t.pointwiseConstraint)}build(e){if(e=Nt(e),e.length{e=Xe(e);let n;if(this.rank===1)throw new Pe("1D separable convolution is not implemented yet.");return this.rank===2&&(this.dataFormat==="channelsFirst"&&(e=Ye(e,[0,2,3,1])),n=Ub(e,this.depthwiseKernel.read(),this.pointwiseKernel.read(),this.strides,this.padding,this.dilationRate,"NHWC")),this.useBias&&(n=Ri(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),this.dataFormat==="channelsFirst"&&(n=Ye(n,[0,3,1,2])),n})}getConfig(){const e=super.getConfig();return delete e.rank,delete e.kernelInitializer,delete e.kernelRegularizer,delete e.kernelConstraint,e.depthwiseInitializer=Kt(this.depthwiseInitializer),e.pointwiseInitializer=Kt(this.pointwiseInitializer),e.depthwiseRegularizer=Ct(this.depthwiseRegularizer),e.pointwiseRegularizer=Ct(this.pointwiseRegularizer),e.depthwiseConstraint=fn(this.depthwiseConstraint),e.pointwiseConstraint=fn(this.pointwiseConstraint),e}}wN.className="SeparableConv";class oL extends wN{constructor(e){super(2,e)}}oL.className="SeparableConv2D",fe(oL);class dm extends Yh{constructor(e){super(1,e);dm.verifyArgs(e),this.inputSpec=[{ndim:3}]}getConfig(){const e=super.getConfig();return delete e.rank,delete e.dataFormat,e}static verifyArgs(e){if(typeof e.kernelSize!="number"&&!mw(e.kernelSize,"number",1,1))throw new q(`Conv1D expects config.kernelSize to be number or number[] with length 1, but received ${JSON.stringify(e.kernelSize)}.`)}}dm.className="Conv1D",fe(dm);class aL extends lt{constructor(e){super(e);typeof e.cropping=="number"?this.cropping=[[e.cropping,e.cropping],[e.cropping,e.cropping]]:typeof e.cropping[0]=="number"?this.cropping=[[e.cropping[0],e.cropping[0]],[e.cropping[1],e.cropping[1]]]:this.cropping=e.cropping,this.dataFormat=e.dataFormat===void 0?"channelsLast":e.dataFormat,this.inputSpec=[{ndim:4}]}computeOutputShape(e){return this.dataFormat==="channelsFirst"?[e[0],e[1],e[2]-this.cropping[0][0]-this.cropping[0][1],e[3]-this.cropping[1][0]-this.cropping[1][1]]:[e[0],e[1]-this.cropping[0][0]-this.cropping[0][1],e[2]-this.cropping[1][0]-this.cropping[1][1],e[3]]}call(e,t){return Q(()=>{if(e=Xe(e),this.dataFormat==="channelsLast"){const n=Pp(e,this.cropping[0][0],e.shape[1]-this.cropping[0][0]-this.cropping[0][1],2);return Pp(n,this.cropping[1][0],e.shape[2]-this.cropping[1][1]-this.cropping[1][0],3)}else{const n=Pp(e,this.cropping[0][0],e.shape[2]-this.cropping[0][0]-this.cropping[0][1],3);return Pp(n,this.cropping[1][0],e.shape[3]-this.cropping[1][1]-this.cropping[1][0],4)}})}getConfig(){const e={cropping:this.cropping,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}aL.className="Cropping2D",fe(aL);class cL extends lt{constructor(e){super(e);this.DEFAULT_SIZE=[2,2],this.inputSpec=[{ndim:4}],this.size=e.size==null?this.DEFAULT_SIZE:e.size,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat}computeOutputShape(e){if(this.dataFormat==="channelsFirst"){const t=e[2]==null?null:this.size[0]*e[2],n=e[3]==null?null:this.size[1]*e[3];return[e[0],e[1],t,n]}else{const t=e[1]==null?null:this.size[0]*e[1],n=e[2]==null?null:this.size[1]*e[2];return[e[0],t,n,e[3]]}}call(e,t){return Q(()=>{let n=Xe(e);const s=n.shape;if(this.dataFormat==="channelsFirst"){n=Ye(n,[0,2,3,1]);const i=this.size[0]*s[2],o=this.size[1]*s[3],a=n.resizeNearestNeighbor([i,o]);return Ye(a,[0,3,1,2])}else{const i=this.size[0]*s[1],o=this.size[1]*s[2];return n.resizeNearestNeighbor([i,o])}})}getConfig(){const e={size:this.size,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}cL.className="UpSampling2D",fe(cL);function aV(e,t,n=[1,1],s="valid",i,o){return Q(()=>{i==null&&(i=ei()),jt(i);let a=nL(e,i);if(e.rank!==4)throw new q(`Input for depthwiseConv2d is required to be 4-D, but is instead ${e.rank}-D`);if(t.rank!==4)throw new q(`depthwiseKernel is required to be 4-D, but is instead ${t.rank}-D`);return a=Co(a,t,n,s==="same"?"same":"valid","NHWC",o),i==="channelsFirst"&&(a=Ye(a,[0,3,1,2])),a})}class lL extends iL{constructor(e){super(2,e);this.depthwiseKernel=null,this.depthMultiplier=e.depthMultiplier==null?1:e.depthMultiplier,this.depthwiseInitializer=Pt(e.depthwiseInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.depthwiseConstraint=gn(e.depthwiseConstraint),this.depthwiseRegularizer=zt(e.depthwiseRegularizer)}build(e){if(e=Nt(e),e.length<4)throw new q(`Inputs to DepthwiseConv2D should have rank 4. Received input shape: ${JSON.stringify(e)}.`);const t=this.dataFormat==="channelsFirst"?1:3;if(e[t]==null||e[t]<0)throw new q(`The channel dimension of the inputs to DepthwiseConv2D should be defined, but is not (${e[t]}).`);const n=e[t],s=[this.kernelSize[0],this.kernelSize[1],n,this.depthMultiplier];this.depthwiseKernel=this.addWeight("depthwise_kernel",s,null,this.depthwiseInitializer,this.depthwiseRegularizer,!0,this.depthwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[n*this.depthMultiplier],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return Q(()=>{e=Xe(e);let n=aV(e,this.depthwiseKernel.read(),this.strides,this.padding,this.dataFormat,null);return this.useBias&&(n=Ri(n,this.bias.read(),this.dataFormat)),this.activation!=null&&(n=this.activation.apply(n)),n})}computeOutputShape(e){e=Nt(e);const t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[1]*this.depthMultiplier:e[3]*this.depthMultiplier,i=oi(t,this.kernelSize[0],this.padding,this.strides[0]),o=oi(n,this.kernelSize[1],this.padding,this.strides[1]);return this.dataFormat==="channelsFirst"?[e[0],s,i,o]:[e[0],i,o,s]}getConfig(){const e=super.getConfig();return e.depthMultiplier=this.depthMultiplier,e.depthwiseInitializer=Kt(this.depthwiseInitializer),e.depthwiseRegularizer=Ct(this.depthwiseRegularizer),e.depthwiseConstraint=fn(this.depthwiseRegularizer),e}}lL.className="DepthwiseConv2D",fe(lL);function LN(e,t,n,s){if(Array.isArray(e)){if(t!=null||n!=null)throw new q("When inputs is an array, neither initialState or constants should be provided");s!=null&&(n=e.slice(e.length-s,e.length),e=e.slice(0,e.length-s)),e.length>1&&(t=e.slice(1,e.length)),e=e[0]}function i(o){return o==null||Array.isArray(o)?o:[o]}return t=i(t),n=i(n),{inputs:e,initialState:t,constants:n}}function SN(e,t,n,s=!1,i,o,a=!1,c=!1){return Q(()=>{const h=t.shape.length;if(h<3)throw new q(`Input should be at least 3D, but is ${h}D.`);const d=[1,0].concat(ni(2,h));if(t=Ye(t,d),o!=null)throw new Pe("The rnn() functoin of the deeplearn.js backend does not support constants yet.");a&&console.warn("Backend rnn(): the unroll = true option is not applicable to the imperative deeplearn.js backend."),i!=null&&(i=i.asType("bool").asType("float32"),i.rank===h-1&&(i=Zn(i,-1)),i=Ye(i,d)),s&&(t=Ts(t,0),i!=null&&(i=Ts(i,0)));const m=[];let f,b=n;const w=t.shape[0],L=Qs(t);let x;i!=null&&(x=Qs(i));for(let N=0;Ne(O,b));if(i==null)f=E[0],b=E[1];else{const k=Q(()=>{const F=x[N],U=Fn(F).sub(F),$=E[0].mul(F).add(b[0].mul(U)),Y=b.map((j,Z)=>E[1][Z].mul(F).add(j.mul(U)));return{output:$,newStates:Y}});f=k.output,b=k.newStates}c&&m.push(f)}let v;if(c){const N=1;v=es(m,N)}return[f,v,b]})}class Ei extends lt{constructor(e){super(e);let t;if(e.cell==null)throw new q("cell property is missing for the constructor of RNN.");if(Array.isArray(e.cell)?t=new fm({cells:e.cell}):t=e.cell,t.stateSize==null)throw new q("The RNN cell should have an attribute `stateSize` (tuple of integers, one integer per RNN state).");this.cell=t,this.returnSequences=e.returnSequences==null?!1:e.returnSequences,this.returnState=e.returnState==null?!1:e.returnState,this.goBackwards=e.goBackwards==null?!1:e.goBackwards,this._stateful=e.stateful==null?!1:e.stateful,this.unroll=e.unroll==null?!1:e.unroll,this.supportsMasking=!0,this.inputSpec=[new Ln({ndim:3})],this.stateSpec=null,this.states_=null,this.numConstants=null,this.keptStates=[]}getStates(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;return ni(0,e).map(t=>null)}else return this.states_}setStates(e){this.states_=e}computeOutputShape(e){Ew(e)&&(e=e[0]),e=e;let t=this.cell.stateSize;Array.isArray(t)||(t=[t]);const n=t[0];let s;if(this.returnSequences?s=[e[0],e[1],n]:s=[e[0],n],this.returnState){const i=[];for(const o of t)i.push([e[0],o]);return[s].concat(i)}else return s}computeMask(e,t){return Q(()=>{Array.isArray(t)&&(t=t[0]);const n=this.returnSequences?t:null;if(this.returnState){const s=this.states.map(i=>null);return[n].concat(s)}else return n})}get states(){if(this.states_==null){const e=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1,t=[];for(let n=0;na.shape[a.shape.length-1]),o))throw new q(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`)}else this.stateSpec=o.map(a=>new Ln({shape:[null,a]}));this.stateful&&this.resetStates()}resetStates(e,t=!1){Q(()=>{if(!this.stateful)throw new nr("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape[0];if(n==null)throw new q("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.states_==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>dt([n,s])):this.states_=[dt([n,this.cell.stateSize])];else if(e==null)He(this.states_),this.keptStates!=null&&(He(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(s=>dt([n,s])):this.states_[0]=dt([n,this.cell.stateSize]);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new q(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t===!0?this.keptStates.push(this.states_.slice()):He(this.states_);for(let s=0;sbn(s.clone()))})}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=LN(e,n,s,this.numConstants);e=i.inputs,n=i.initialState,s=i.constants;let o=[],a=[];if(n!=null){t.initialState=n,o=o.concat(n),this.stateSpec=[];for(const h of n)this.stateSpec.push(new Ln({shape:h.shape}));a=a.concat(this.stateSpec)}s!=null&&(t.constants=s,o=o.concat(s),this.numConstants=s.length);const c=o[0]instanceof ii;if(c){const h=[e].concat(o),d=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=d;const f=super.apply(h,t);return this.inputSpec=m,f}else return super.apply(e,t)}call(e,t){return Q(()=>{const n=t==null?null:t.mask,s=t==null?null:t.training;let i=t==null?null:t.initialState;e=Xe(e),i==null&&(this.stateful?i=this.states_:i=this.getInitialState(e));const o=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;if(i.length!==o)throw new q(`RNN Layer has ${o} state(s) but was passed ${i.length} initial state(s).`);this.unroll&&console.warn("Ignoring unroll = true for RNN layer, due to imperative backend.");const a={training:s},c=(w,L)=>{const x=this.cell.call([w].concat(L),a);return[x[0],x.slice(1)]},h=SN(c,e,i,this.goBackwards,n,null,this.unroll,this.returnSequences),d=h[0],m=h[1],f=h[2];this.stateful&&this.resetStates(f,s);const b=this.returnSequences?m:d;return this.returnState?[b].concat(f):b})}getInitialState(e){return Q(()=>{let t=dt(e.shape);return t=$e(t,[1,2]),t=$h(t),Array.isArray(this.cell.stateSize)?this.cell.stateSize.map(n=>n>1?Iw(t,[1,n]):t):this.cell.stateSize>1?[Iw(t,[1,this.cell.stateSize])]:[t]})}get trainableWeights(){return this.trainable?this.cell.trainableWeights:[]}get nonTrainableWeights(){return this.trainable?this.cell.nonTrainableWeights:this.cell.weights}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.cell!=null&&this.cell.setFastWeightInitDuringBuild(e)}getConfig(){const e=super.getConfig(),t={returnSequences:this.returnSequences,returnState:this.returnState,goBackwards:this.goBackwards,stateful:this.stateful,unroll:this.unroll};this.numConstants!=null&&(t.numConstants=this.numConstants);const n=this.cell.getConfig();return this.getClassName()===Ei.className&&(t.cell={className:this.cell.getClassName(),config:n}),Object.assign({},n,e,t)}static fromConfig(e,t,n={}){const s=t.cell,i=ri(s,n);return new e(Object.assign(t,{cell:i}))}}Ei.className="RNN",fe(Ei);class ac extends lt{}class pm extends ac{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,wn(this.units,"units"),this.activation=Kr(e.activation==null?this.DEFAULT_ACTIVATION:e.activation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Pt(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Pt(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Pt(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=zt(e.kernelRegularizer),this.recurrentRegularizer=zt(e.recurrentRegularizer),this.biasRegularizer=zt(e.biasRegularizer),this.kernelConstraint=gn(e.kernelConstraint),this.recurrentConstraint=gn(e.recurrentConstraint),this.biasConstraint=gn(e.biasConstraint),this.dropout=tc([1,Yr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=tc([1,Yr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=Nt(e),this.kernel=this.addWeight("kernel",[e[e.length-1],this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return Q(()=>{if(e=e,e.length!==2)throw new q(`SimpleRNNCell expects 2 input Tensors, got ${e.length}.`);let n=e[1];e=e[0];const s=t.training==null?!1:t.training;0Fn(e),rate:this.dropout,training:s})),0Fn(n),rate:this.recurrentDropout,training:s}));let i;const o=this.dropoutMask,a=this.recurrentDropoutMask;o!=null?i=Ci(X(e,o),this.kernel.read()):i=Ci(e,this.kernel.read()),this.bias!=null&&(i=Ri(i,this.bias.read())),a!=null&&(n=X(n,a));let c=be(i,Ci(n,this.recurrentKernel.read()));return this.activation!=null&&(c=this.activation.apply(c)),[c,c]})}getConfig(){const e=super.getConfig(),t={units:this.units,activation:jr(this.activation),useBias:this.useBias,kernelInitializer:Kt(this.kernelInitializer),recurrentInitializer:Kt(this.recurrentInitializer),biasInitializer:Kt(this.biasInitializer),kernelRegularizer:Ct(this.kernelRegularizer),recurrentRegularizer:Ct(this.recurrentRegularizer),biasRegularizer:Ct(this.biasRegularizer),activityRegularizer:Ct(this.activityRegularizer),kernelConstraint:fn(this.kernelConstraint),recurrentConstraint:fn(this.recurrentConstraint),biasConstraint:fn(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout};return Object.assign({},e,t)}}pm.className="SimpleRNNCell",fe(pm);class hL extends Ei{constructor(e){e.cell=new pm(e),super(e)}call(e,t){return Q(()=>{this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return new e(t)}}hL.className="SimpleRNN",fe(hL);class mm extends ac{constructor(e){super(e);if(this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.resetAfter)throw new q("GRUCell does not support reset_after parameter set to true.");this.units=e.units,wn(this.units,"units"),this.activation=Kr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Kr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Pt(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Pt(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Pt(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=zt(e.kernelRegularizer),this.recurrentRegularizer=zt(e.recurrentRegularizer),this.biasRegularizer=zt(e.biasRegularizer),this.kernelConstraint=gn(e.kernelConstraint),this.recurrentConstraint=gn(e.recurrentConstraint),this.biasConstraint=gn(e.biasConstraint),this.dropout=tc([1,Yr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=tc([1,Yr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){e=Nt(e);const t=e[e.length-1];this.kernel=this.addWeight("kernel",[t,this.units*3],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*3],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units*3],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(e,t){return Q(()=>{if(e=e,e.length!==2)throw new q(`GRUCell expects 2 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training==null?!1:t.training;let s=e[1];e=e[0],0Fn(e),rate:this.dropout,training:n,count:3})),0Fn(s),rate:this.recurrentDropout,training:n,count:3}));const i=this.dropoutMask,o=this.recurrentDropoutMask;let a,c,h;0{this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}uL.className="GRU",fe(uL);class qh extends ac{constructor(e){super(e);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=e.units,wn(this.units,"units"),this.activation=Kr(e.activation===void 0?this.DEFAULT_ACTIVATION:e.activation),this.recurrentActivation=Kr(e.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:e.recurrentActivation),this.useBias=e.useBias==null?!0:e.useBias,this.kernelInitializer=Pt(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=Pt(e.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=Pt(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.unitForgetBias=e.unitForgetBias,this.kernelRegularizer=zt(e.kernelRegularizer),this.recurrentRegularizer=zt(e.recurrentRegularizer),this.biasRegularizer=zt(e.biasRegularizer),this.kernelConstraint=gn(e.kernelConstraint),this.recurrentConstraint=gn(e.recurrentConstraint),this.biasConstraint=gn(e.biasConstraint),this.dropout=tc([1,Yr([0,e.dropout==null?0:e.dropout])]),this.recurrentDropout=tc([1,Yr([0,e.recurrentDropout==null?0:e.recurrentDropout])]),this.implementation=e.implementation,this.stateSize=[this.units,this.units],this.dropoutMask=null,this.recurrentDropoutMask=null}build(e){var t;e=Nt(e);const n=e[e.length-1];this.kernel=this.addWeight("kernel",[n,this.units*4],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*4],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint);let s;if(this.useBias){if(this.unitForgetBias){const i=this.biasInitializer,o=this.units;s=new(t=class extends Ms{apply(c,h){const d=i.apply([o]),m=new Vp().apply([o]),f=i.apply([o*2]);return yv(yv(d,m),f)}},t.className="CustomInit",t)}else s=this.biasInitializer;this.bias=this.addWeight("bias",[this.units*4],null,s,this.biasRegularizer,!0,this.biasConstraint)}else this.bias=null;this.built=!0}call(e,t){return Q(()=>{const n=t.training==null?!1:t.training;if(e=e,e.length!==3)throw new q(`LSTMCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);let s=e[1];const i=e[2];e=e[0],0Fn(e),rate:this.dropout,training:n,count:4})),0Fn(s),rate:this.recurrentDropout,training:n,count:4}));const o=this.dropoutMask,a=this.recurrentDropoutMask;let c,h,d,m;0{this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}static fromConfig(e,t){return t.implmentation===0&&(t.implementation=1),new e(t)}}dL.className="LSTM",fe(dL);class fm extends ac{constructor(e){super(e);this.cells=e.cells}get stateSize(){const e=[];for(const t of this.cells.slice().reverse())Array.isArray(t.stateSize)?e.push(...t.stateSize):e.push(t.stateSize);return e}call(e,t){return Q(()=>{e=e;let n=e.slice(1);const s=[];for(const a of this.cells.slice().reverse())Array.isArray(a.stateSize)?s.push(n.splice(0,a.stateSize.length)):s.push(n.splice(0,1));s.reverse();const i=[];let o;for(let a=0;a{Bo(`RNNCell_${s}`,()=>{n.build(e),Array.isArray(n.stateSize)?t=n.stateSize[0]:t=n.stateSize,e=[e[0],t]})}),this.built=!0}getConfig(){const e=super.getConfig(),t=i=>({className:i.getClassName(),config:i.getConfig()}),n=this.cells.map(t),s={cells:n};return Object.assign({},e,s)}static fromConfig(e,t,n={}){const s=[];for(const i of t.cells)s.push(ri(i,n));return new e({cells:s})}get trainableWeights(){if(!this.trainable)return[];const e=[];for(const t of this.cells)e.push(...t.trainableWeights);return e}get nonTrainableWeights(){const e=[];for(const t of this.cells)e.push(...t.nonTrainableWeights);if(!this.trainable){const t=[];for(const n of this.cells)t.push(...n.trainableWeights);return t.concat(e)}return e}getWeights(){const e=[];for(const t of this.cells)e.push(...t.weights);return Dw(e)}setWeights(e){const t=[];for(const n of this.cells){const s=n.weights.length,i=e.splice(s);for(let o=0;owv(t(),n),a=()=>Bh(o,t,s);if(!i||i<=1)return bn(a().clone());const c=Array(i).fill(void 0).map(a);return c.map(h=>bn(h.clone()))}var cV=function(e,t){var n={};for(var s in e)Object.prototype.hasOwnProperty.call(e,s)&&t.indexOf(s)<0&&(n[s]=e[s]);if(e!=null&&typeof Object.getOwnPropertySymbols=="function")for(var i=0,s=Object.getOwnPropertySymbols(e);i{if(this.cell.dropoutMask!=null&&(He(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(He(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null),t&&t.constants)throw new q("ConvRNN2D cell does not support constants");const n=t==null?null:t.mask,s=t==null?null:t.training,i=t==null?null:t.initialState;return super.call(e,{mask:n,training:s,initialState:i})})}computeOutputShape(e){let t=this.computeSingleOutputShape(e);return this.returnSequences||(t=[t[0],...t.slice(2)]),this.returnState&&(t=[t,...Array(2).fill([e[0],...t.slice(-3)])]),t}getInitialState(e){return Q(()=>{const{stateSize:t}=this.cell,n=e.shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=dt(i);return Array.isArray(t)?Array(t.length).fill(o):[o]})}resetStates(e,t=!1){Q(()=>{if(!this.stateful)throw new nr("Cannot call resetStates() on an RNN Layer that is not stateful.");const n=this.inputSpec[0].shape,s=this.computeSingleOutputShape(n),i=[s[0],...s.slice(2)],o=n[0];if(o==null)throw new q("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.getStates()==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>dt(i)):this.states_=[dt(i)];else if(e==null)He(this.states_),this.keptStates!=null&&(He(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>dt(i)):this.states_[0]=dt(i);else{if(Array.isArray(e)||(e=[e]),e.length!==this.states_.length)throw new q(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${e.length} state value(s). Input received: ${e}`);t?this.keptStates.push(this.states_.slice()):He(this.states_);for(let a=0;abn(a.clone()))})}computeSingleOutputShape(e){const{dataFormat:t,filters:n,kernelSize:s,padding:i,strides:o,dilationRate:a}=this.cell,c=t==="channelsFirst",h=e[c?3:2],d=e[c?4:3],m=oi(h,s[0],i,o[0],a[0]),f=oi(d,s[1],i,o[1],a[1]),b=[...e.slice(0,2),...c?[n,m,f]:[m,f,n]];return b}}IN.className="ConvRNN2D";class gm extends qh{constructor(e){const{filters:t,kernelSize:n,strides:s,padding:i,dataFormat:o,dilationRate:a}=e;super(Object.assign({},e,{units:t}));this.filters=t,wn(this.filters,"filters"),this.kernelSize=oc(n,2,"kernelSize"),this.kernelSize.forEach(c=>wn(c,"kernelSize")),this.strides=oc(s||1,2,"strides"),this.strides.forEach(c=>wn(c,"strides")),this.padding=i||"valid",vs(this.padding),this.dataFormat=o||"channelsLast",jt(this.dataFormat),this.dilationRate=oc(a||1,2,"dilationRate"),this.dilationRate.forEach(c=>wn(c,"dilationRate"))}build(e){var t;e=Nt(e);const n=this.dataFormat==="channelsFirst"?1:e.length-1;if(e[n]==null)throw new q(`The channel dimension of the input should be defined. Found ${e[n]}`);const s=e[n],i=4,o=this.kernelSize.concat([s,this.filters*i]);this.kernel=this.addWeight("kernel",o,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint);const a=this.kernelSize.concat([this.filters,this.filters*i]);if(this.recurrentKernel=this.addWeight("recurrent_kernel",a,null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){let c;if(this.unitForgetBias){const h=this.biasInitializer,d=this.filters;c=new(t=class extends Ms{apply(f,b){const w=h.apply([d]),L=Js([d]),x=h.apply([d*2]);return Sw([w,L,x])}},t.className="CustomInit",t)}else c=this.biasInitializer;this.bias=this.addWeight("bias",[this.filters*i],null,c,this.biasRegularizer,!0,this.biasConstraint)}this.built=!0}call(e,t){return Q(()=>{if(e.length!==3)throw new q(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ${e.length}.`);const n=t.training||!1,s=e[0],i=e[1],o=e[2],a=4;0Fn(s),rate:this.dropout,training:n,count:a}));const c=this.dropoutMask,h=(we,Se,xe)=>!Se||!Se[xe]?we:X(Se[xe],we);let d=h(s,c,0),m=h(s,c,1),f=h(s,c,2),b=h(s,c,3);0Fn(i),rate:this.recurrentDropout,training:n,count:a}));const w=this.recurrentDropoutMask;let L=h(i,w,0),x=h(i,w,1),v=h(i,w,2),N=h(i,w,3);const O=3,[E,k,F,U]=hs(this.kernel.read(),a,O),[$,Y,j,Z]=this.useBias?hs(this.bias.read(),a):[null,null,null,null];d=this.inputConv(d,E,$,this.padding),m=this.inputConv(m,k,Y,this.padding),f=this.inputConv(f,F,j,this.padding),b=this.inputConv(b,U,Z,this.padding);const[ie,de,he,ue]=hs(this.recurrentKernel.read(),a,O);L=this.recurrentConv(L,ie),x=this.recurrentConv(x,de),v=this.recurrentConv(v,he),N=this.recurrentConv(N,ue);const me=this.recurrentActivation.apply(be(d,L)),ce=this.recurrentActivation.apply(be(m,x)),ye=be(X(ce,o),X(me,this.activation.apply(be(f,v)))),pe=X(this.recurrentActivation.apply(be(b,N)),this.activation.apply(ye));return[pe,pe,ye]})}getConfig(){const e=super.getConfig(),{units:t}=e,n=cV(e,["units"]),s={filters:this.filters,kernelSize:this.kernelSize,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,strides:this.strides};return Object.assign({},n,s)}inputConv(e,t,n,s){const i=Ji(e,t,this.strides,s||"valid",this.dataFormat==="channelsFirst"?"NCHW":"NHWC",this.dilationRate);return n?Ri(i,n,this.dataFormat):i}recurrentConv(e,t){const n=1;return Ji(e,t,n,"same",this.dataFormat==="channelsFirst"?"NCHW":"NHWC")}}gm.className="ConvLSTM2DCell",fe(gm);class pL extends IN{constructor(e){const t=new gm(e);super(Object.assign({},e,{cell:t}))}static fromConfig(e,t){return new e(t)}}pL.className="ConvLSTM2D",fe(pL);class ym extends lt{constructor(e){super(e);this.rate=Math.max(Math.min(e.rate,1),0),this.noiseShape=e.noiseShape,this.seed=e.seed,this.supportsMasking=!0}getNoiseShape(e){if(this.noiseShape==null)return this.noiseShape;const t=e.shape,n=[];for(let s=0;s{this.invokeCallHook(e,t);const n=Xe(e);if(0wv(n,this.rate,i,this.seed),()=>n,s);return o}return e})}getConfig(){const e={rate:this.rate,noiseShape:this.noiseShape,seed:this.seed},t=super.getConfig();return Object.assign(e,t),e}dispose(){return super.dispose()}}ym.className="Dropout",fe(ym);class mL extends ym{constructor(e){super(e);this.inputSpec=[{ndim:3}]}getNoiseShape(e){const t=e.shape;return[t[0],1,t[2]]}}mL.className="SpatialDropout1D",fe(mL);class fL extends lt{constructor(e){super(e);if(this.activation=null,this.useBias=!0,this.kernel=null,this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",e.batchInputShape==null&&e.inputShape==null&&e.inputDim!=null){let t=null;e.batchSize!=null&&(t=e.batchSize),this.batchInputShape=[t,e.inputDim]}this.units=e.units,wn(this.units,"units"),this.activation=Kr(e.activation),e.useBias!=null&&(this.useBias=e.useBias),this.kernelInitializer=Pt(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.biasInitializer=Pt(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelConstraint=gn(e.kernelConstraint),this.biasConstraint=gn(e.biasConstraint),this.kernelRegularizer=zt(e.kernelRegularizer),this.biasRegularizer=zt(e.biasRegularizer),this.activityRegularizer=zt(e.activityRegularizer),this.supportsMasking=!0,this.inputSpec=[{minNDim:2}]}build(e){e=Nt(e);const t=e[e.length-1];this.kernel==null&&(this.kernel=this.addWeight("kernel",[t,this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint))),this.inputSpec=[{minNDim:2,axes:{[-1]:t}}],this.built=!0}computeOutputShape(e){e=Nt(e);const t=e.slice();return t[t.length-1]=this.units,t}call(e,t){return Q(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=cv(this.activation.getClassName());let i;return s!=null?i=Ci(n,this.kernel.read(),s,this.bias?this.bias.read():null):(i=Ci(n,this.kernel.read()),this.bias!=null&&(i=Ri(i,this.bias.read())),this.activation!=null&&(i=this.activation.apply(i))),i})}getConfig(){const e={units:this.units,activation:jr(this.activation),useBias:this.useBias,kernelInitializer:Kt(this.kernelInitializer),biasInitializer:Kt(this.biasInitializer),kernelRegularizer:Ct(this.kernelRegularizer),biasRegularizer:Ct(this.biasRegularizer),activityRegularizer:Ct(this.activityRegularizer),kernelConstraint:fn(this.kernelConstraint),biasConstraint:fn(this.biasConstraint)},t=super.getConfig();return Object.assign(e,t),e}}fL.className="Dense",fe(fL);class gL extends lt{constructor(e){e=e||{},super(e),this.inputSpec=[{minNDim:3}],this.dataFormat=e.dataFormat}computeOutputShape(e){e=Nt(e);for(const t of e.slice(1))if(t==null)throw new q(`The shape of the input to "Flatten" is not fully defined (got ${e.slice(1)}). Make sure to pass a complete "input_shape" or "batch_input_shape" argument to the first layer in your model.`);return[e[0],Gr(e,1)]}call(e,t){return Q(()=>{this.invokeCallHook(e,t);let n=Xe(e);if(this.dataFormat==="channelsFirst"&&n.rank>1){const s=[0];for(let i=2;i{this.invokeCallHook(e,t);const n=Xe(e);return this.activation.apply(n)})}getConfig(){const e={activation:jr(this.activation)},t=super.getConfig();return Object.assign(e,t),e}}yL.className="Activation",fe(yL);class bL extends lt{constructor(e){super(e);this.n=e.n,this.inputSpec=[{ndim:2}]}computeOutputShape(e){return[e[0],this.n,e[1]]}call(e,t){return Q(()=>(e=Xe(e),Rz(e,this.n)))}getConfig(){const e={n:this.n},t=super.getConfig();return Object.assign(e,t),e}}bL.className="RepeatVector",fe(bL);class wL extends lt{constructor(e){super(e);this.targetShape=e.targetShape;for(let t=0;t{this.invokeCallHook(e,t);const n=Xe(e),s=n.shape,i=s.slice(0,1).concat(this.fixUnknownDimension(s.slice(1),this.targetShape));return n.reshape(i)})}getConfig(){const e={targetShape:this.targetShape},t=super.getConfig();return Object.assign(e,t),e}}wL.className="Reshape",fe(wL);class LL extends lt{constructor(e){super(e);if(e.dims==null)throw new Error("Required configuration field `dims` is missing during Permute constructor call.");if(!Array.isArray(e.dims))throw new Error(`Permute constructor requires \`dims\` to be an Array, but received ${e.dims} instead.`);const t=ni(1,e.dims.length+1);if(!ae(e.dims.slice().sort(),t))throw new Error("Invalid permutation `dims`: "+JSON.stringify(e.dims)+" `dims` must contain consecutive integers starting from 1.");this.dims=e.dims,this.dimsIncludingBatch=[0].concat(this.dims),this.inputSpec=[new Ln({ndim:this.dims.length+1})]}computeOutputShape(e){e=Nt(e);const t=e.slice();return this.dims.forEach((n,s)=>{t[s+1]=e[n]}),t}call(e,t){return Ye(Xe(e),this.dimsIncludingBatch)}getConfig(){const e={dims:this.dims},t=super.getConfig();return Object.assign(e,t),e}}LL.className="Permute",fe(LL);class SL extends lt{constructor(e){super(e==null?{}:e);this.supportsMasking=!0,e!=null?this.maskValue=e.maskValue==null?0:e.maskValue:this.maskValue=0}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={maskValue:this.maskValue};return Object.assign(t,e),t}computeMask(e,t){const n=Xe(e),s=-1;return ih(Br(n,this.maskValue),s)}call(e,t){return Q(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=-1,i=!0,o=ih(Br(n,this.maskValue),s,i),a=n.mul(o.asType(n.dtype));return a})}}SL.className="Masking",fe(SL);class IL extends lt{constructor(e){super(e);if(this.embeddings=null,this.DEFAULT_EMBEDDINGS_INITIALIZER="randomUniform",e.batchInputShape==null&&e.inputShape==null){let t=null;e.batchSize!=null&&(t=e.batchSize),e.inputLength==null?this.batchInputShape=[t,null]:this.batchInputShape=[t].concat(Et(e.inputLength))}this.inputDim=e.inputDim,wn(this.inputDim,"inputDim"),this.outputDim=e.outputDim,wn(this.outputDim,"outputDim"),this.embeddingsInitializer=Pt(e.embeddingsInitializer||this.DEFAULT_EMBEDDINGS_INITIALIZER),this.embeddingsRegularizer=zt(e.embeddingsRegularizer),this.activityRegularizer=zt(e.activityRegularizer),this.embeddingsConstraint=gn(e.embeddingsConstraint),this.maskZero=e.maskZero,this.supportsMasking=e.maskZero,this.inputLength=e.inputLength}build(e){this.embeddings=this.addWeight("embeddings",[this.inputDim,this.outputDim],this.dtype,this.embeddingsInitializer,this.embeddingsRegularizer,!0,this.embeddingsConstraint),this.built=!0}warnOnIncompatibleInputShape(e){}computeMask(e,t){return Q(()=>this.maskZero?(e=Xe(e),Br(e,et(e))):null)}computeOutputShape(e){if(e=Nt(e),this.inputLength==null)return[...e,this.outputDim];const t=Et(this.inputLength);if(t.length!==e.length-1)throw new q(`"inputLength" is ${this.inputLength}, but received input shape has shape ${e}`);{let n=0;for(let s=0;s{this.invokeCallHook(e,t);let n=Xe(e);n.dtype!=="int32"&&(n=Wh(n,"int32"));const s=bv(this.embeddings.read(),n.as1D());return s.reshape(Nt(this.computeOutputShape(n.shape)))})}getConfig(){const e={inputDim:this.inputDim,outputDim:this.outputDim,embeddingsInitializer:Kt(this.embeddingsInitializer),embeddingsRegularizer:Ct(this.embeddingsRegularizer),activityRegularizer:Ct(this.activityRegularizer),embeddingsConstraint:fn(this.embeddingsConstraint),maskZero:this.maskZero,inputLength:this.inputLength},t=super.getConfig();return Object.assign(e,t),e}}IL.className="Embedding",fe(IL);class Vo extends lt{constructor(e){super(e||{});this.supportsMasking=!0}mergeFunction(e){throw new Pe}computeElementwiseOpOutputShape(e,t){if(e==null||t==null)return null;if(e.length1)throw new q(`Can not merge tensors with different batch sizes. Got tensors with shapes: ${JSON.stringify(e)}.`);let n=e[0]==null?null:e[0].slice(1);for(let i=1;ii.length);e.indexOf(null)===-1&&Vr(s).length===1?this.reshapeRequired=!1:this.reshapeRequired=!0}call(e,t){return Q(()=>{if(e=e,this.reshapeRequired){const n=[],s=e.map(i=>i.rank);if(s.indexOf(null)===-1){const i=Yr(s);for(let o of e){const a=o.rank;for(let c=0;c1){const d=ni(1,h).concat([0]);n.push(Ye(c,d)),i=!0}else n.push(c)}let o=this.mergeFunction(n);const a=o.rank;if(i){if(a==null){const c=o.shape,h=c.length,d=c[h-1],m=[d].concat(c.slice(0,c.length-1));o=Ye(o.reshape([-1,d]),[1,0]).reshape(m)}else if(a>1){const c=[a-1].concat(ni(0,a-1));o=Ye(o,c)}}return o}}else return this.mergeFunction(e)})}computeOutputShape(e){e=e;let t;e[0]==null?t=null:t=e[0].slice(1);for(let s=1;s{if(t==null)return null;if(!Array.isArray(t))throw new q("`mask` should be an Array");if(!Array.isArray(e))throw new q("`inputs` should be an Array");if(t.length!==e.length)throw new q(`The Array 'inputs' and 'mask' are expected to have the same length, but have different lengths (${e.length} vs ${t.length})`);if(t.every(s=>s==null))return null;t=t.map(s=>s==null?s:Zn(s,0));let n=t[0];for(let s=1;s{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0].clone();for(let n=1;n{let t=e[0];for(let n=1;n{let t=e[0];for(let n=1;n1)throw new q("A `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got input shapes: "+JSON.stringify(e))}mergeFunction(e){return Q(()=>Sw(e,this.axis))}computeOutputShape(e){if(!(Array.isArray(e)&&Array.isArray(e[0])))throw new q("A `Concatenate` layer should be called on a list of inputs.");const t=e,n=t[0].slice(),s=this.axis<0?n.length+this.axis:this.axis;for(const i of t.slice(1)){if(n[s]==null||i[s]==null){n[s]=null;break}n[s]+=i[s]}return n}computeMask(e,t){if(t==null)return null;if(!Array.isArray(t))throw new q("`mask` should be an array for Concatenate");if(!Array.isArray(e))throw new q("`inputs` should be an array for Concatenate");if(t.length!==e.length)throw new q(`Mismatch in the length of mask (${t.length}) and the legnth of inputs (${e.length})`);return Q(()=>{let n=!0;if(t.forEach(o=>{if(o!=null){n=!1;return}}),n)return null;const s=[];for(let o=0;o3||t.shape.length>3)throw new Pe("batchDot is not implemented for tensors of 4D or higher rank yet");if(A(e.shape.length>=2,()=>`batchDot requires the rank of x to be >= 2, but got ${e.shape.length}`),A(e.shape.length>=2,()=>`batchDot requires the rank of y to be >= 2, but got ${t.shape.length}`),typeof n=="number"&&(n=[n,n]),e.dtype==="complex64"||t.dtype==="complex64")throw new Pe("batchDot is not implemented for complex64-type Tensors yet.");const s=e.shape.length,i=t.shape.length;n==null&&(n=[s-1,i-2]);const o=n;return Q(()=>{let a;if(s>i){a=s-i;const h=[];for(let d=0;ds){a=i-s;const h=[];for(let d=0;d0){let h;s>i?h=s+i-3:h=s-1;const d=[];for(let m=h;m"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0],n=e[1];if(t.length>3||n.length>3)throw new Pe("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);if(t[s[0]]!==n[s[1]])throw new q(`Dimension incompatibility: ${t[s[0]]} !== ${n[s[1]]}`)}mergeFunction(e){if(e.length!==2)throw new q(`A \`Dot\` layer must be called on exactly 2 inputs, but received ${e.length} input(s).`);let t=e[0],n=e[1],s;return Array.isArray(this.axes)?s=this.axes.map((i,o)=>eu(i,e[o].shape.length)):s=[eu(this.axes,t.shape.length),eu(this.axes,n.shape.length)],this.normalize&&(t=tm(t,s[0]),n=tm(n,s[1])),lV(t,n,s)}interpretAxes(e,t){let n;return Array.isArray(this.axes)?n=this.axes:n=[eu(this.axes,e.length),eu(this.axes,t.length)],n}computeOutputShape(e){A(Array.isArray(e)&&e.length===2&&Array.isArray(e[0])&&Array.isArray(e[1]),()=>"A `Dot` layer should be called on a list of exactly 2 inputs.");const t=e[0].slice(),n=e[1].slice();if(t.length>3||n.length>3)throw new Pe("Dot layer does not support tensors of 4D or higher rank yet.");const s=this.interpretAxes(t,n);t.splice(s[0],1),n.splice(s[1],1),n.splice(0,1);const i=t.concat(n);return i.length===1&&i.push(1),i}computeMask(e,t){return null}getConfig(){const e={axes:this.axes,normalize:this.normalize},t=super.getConfig();return Object.assign(e,t),e}}xL.className="Dot",fe(xL);class TL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.stddev=e.stddev}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={stddev:this.stddev};return Object.assign(t,e),t}call(e,t){return Q(()=>{this.invokeCallHook(e,t);const n=Xe(e),s=()=>zp(n.shape,0,this.stddev).add(n),i=Bh(s,()=>n,t.training||!1);return i})}}TL.className="GaussianNoise",fe(TL);class AL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return Q(()=>{this.invokeCallHook(e,t);const n=Xe(e);if(this.rate>0&&this.rate<1){const s=()=>{const i=Math.sqrt(this.rate/(1-this.rate));return n.mul(zp(n.shape,1,i))};return Bh(s,()=>n,t.training||!1)}return n})}}AL.className="GaussianDropout",fe(AL);class vL extends lt{constructor(e){super(e);this.supportsMasking=!0,this.rate=e.rate,this.noiseShape=e.noiseShape}_getNoiseShape(e){return this.noiseShape||Xe(e).shape}computeOutputShape(e){return e}getConfig(){const e=super.getConfig(),t={rate:this.rate};return Object.assign(t,e),t}call(e,t){return Q(()=>{if(this.rate<1&&this.rate>0){const n=this._getNoiseShape(e),s=()=>{const i=Xe(e),o=1.6732632423543772,a=1.0507009873554805,c=-o*a;let h=Zi(ko(n),this.rate);h=Wh(h,"float32");const d=((1-this.rate)*(1+this.rate*c**2))**-.5,m=-d*c*this.rate,f=i.mul(h).add(h.add(-1).mul(c));return f.mul(d).add(m)};return Bh(s,()=>Xe(e),t.training||!1)}return e})}}vL.className="AlphaDropout",fe(vL);function tu(e,t,n,s,i,o=.001){let a;if(e.rank===2)a=qT(e,t,n,s,i,o);else if(e.rank===3)a=jT(e,t,n,s,i,o);else if(e.rank===4)a=KT(e,t,n,s,i,o);else throw new Pe(`batchNormalization is not implemented for array of rank ${e.rank} yet`);return a}function hV(e,t,n,s,i=.001){return Q(()=>{const o=fp(e,s),a=o.mean,c=o.variance,h=tu(e,a,c,n,t,i);return[h,a,c]})}function uV(e,t,n,s,i=.001){return Q(()=>{const o=fp(e,s),a=o.mean,c=o.variance,h=[];for(const L of ni(0,e.rank))s.indexOf(L)!==-1?h.push(1):h.push(e.shape[L]);const d=a.reshape(h),m=c.reshape(h),f=t==null?null:t.reshape(h),b=n==null?null:n.reshape(h),w=tu(e,d,m,b,f,i);return[w,a,c]})}function dV(e,t,n,s,i=.001){return ae(s.slice().sort(),ni(0,e.rank-1))?hV(e,t,n,s,i):uV(e,t,n,s,i)}class NL extends lt{constructor(e){e==null&&(e={}),super(e),this.supportsMasking=!0,this.axis=e.axis==null?-1:e.axis,this.momentum=e.momentum==null?.99:e.momentum,this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Pt(e.betaInitializer||"zeros"),this.gammaInitializer=Pt(e.gammaInitializer||"ones"),this.movingMeanInitializer=Pt(e.movingMeanInitializer||"zeros"),this.movingVarianceInitializer=Pt(e.movingVarianceInitializer||"ones"),this.betaConstraint=gn(e.betaConstraint),this.gammaConstraint=gn(e.gammaConstraint),this.betaRegularizer=zt(e.betaRegularizer),this.gammaRegularizer=zt(e.gammaRegularizer)}build(e){e=Nt(e);const t=this.axis>=0?this.axis:this.axis+e.length,n=e[t];if(n==null)throw new q(`Axis ${t} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(e)}.`);this.inputSpec=[new Ln({ndim:e.length,axes:{[t]:n}})];const s=[n];this.scale&&(this.gamma=this.addWeight("gamma",s,null,this.gammaInitializer,this.gammaRegularizer,!0,this.gammaConstraint)),this.center&&(this.beta=this.addWeight("beta",s,null,this.betaInitializer,this.betaRegularizer,!0,this.betaConstraint)),this.movingMean=this.addWeight("moving_mean",s,null,this.movingMeanInitializer,null,!1),this.movingVariance=this.addWeight("moving_variance",s,null,this.movingVarianceInitializer,null,!1),this.built=!0}call(e,t){return Q(()=>{const n=t.training==null?!1:t.training,s=Xe(e),i=s.shape,o=i.length,a=ni(0,o),c=this.axis>=0?this.axis:this.axis+o;a.splice(c,1);const h=$o(1,o);h[c]=i[c];const d=a.slice();d.sort();const m=!ae(d,ni(0,o).slice(0,o-1)),f=()=>{if(m){const N=this.movingMean.read().reshape(h),O=this.movingVariance.read().reshape(h),E=this.center?this.beta.read().reshape(h):null,k=this.scale?this.gamma.read().reshape(h):null;return tu(s,N,O,E,k,this.epsilon)}else return tu(s,this.movingMean.read(),this.movingVariance.read(),this.beta==null?null:this.beta.read(),this.gamma==null?null:this.gamma.read(),this.epsilon)};if(!n)return f();const[b,w,L]=dV(s,this.gamma.read(),this.beta.read(),a,this.epsilon),x=(N,O,E)=>{Q(()=>{const k=1-E,F=N.read(),U=F.sub(O).mul(k);N.write(F.sub(U))})},v=()=>{x(this.movingMean,w,this.momentum),x(this.movingVariance,L,this.momentum)};return v(),b})}getConfig(){const e={axis:this.axis,momentum:this.momentum,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:Kt(this.betaInitializer),gammaInitializer:Kt(this.gammaInitializer),movingMeanInitializer:Kt(this.movingMeanInitializer),movingVarianceInitializer:Kt(this.movingVarianceInitializer),betaRegularizer:Ct(this.betaRegularizer),gammaRegularizer:Ct(this.gammaRegularizer),betaConstraint:fn(this.betaConstraint),gammaConstraint:fn(this.gammaConstraint)},t=super.getConfig();return Object.assign(e,t),e}}NL.className="BatchNormalization",fe(NL);class CL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.axis=e.axis==null?-1:e.axis,typeof this.axis=="number"){if(!Number.isInteger(this.axis))throw new Error(`Expected axis to be an integer, but received ${this.axis}`)}else if(Array.isArray(this.axis)){for(const t of this.axis)if(!Number.isInteger(t))throw new Error(`Expected axis to be an array of integers, but received ${JSON.stringify(this.axis)}`)}else throw new Error(`Expected axis to be an integer or an array of integers, but received ${JSON.stringify(this.axis)}`);this.epsilon=e.epsilon==null?.001:e.epsilon,this.center=e.center==null?!0:e.center,this.scale=e.scale==null?!0:e.scale,this.betaInitializer=Pt(e.betaInitializer||"zeros"),this.gammaInitializer=Pt(e.gammaInitializer||"ones"),this.betaRegularizer=zt(e.betaRegularizer),this.gammaRegularizer=zt(e.gammaRegularizer),this.supportsMasking=!0}build(e){e=Nt(e);const t=e.length;typeof this.axis=="number"&&(this.axis=[this.axis]);for(let i=0;i=t)throw new Error(`Invalid axis: ${i}`);if(this.axis.length!==Vr(this.axis).length)throw new Error(`Found duplicate axes in: ${this.axis}`);const n=this.axis.map(i=>e[i]),s=!0;this.scale?this.gamma=this.addWeight("gamma",n,"float32",this.gammaInitializer,this.gammaRegularizer,s):this.gamma=null,this.center?this.beta=this.addWeight("beta",n,"float32",this.betaInitializer,this.betaRegularizer,s):this.beta=null,this.built=!0}call(e,t){const n=Xe(e),s=n.shape,i=s.length;return Q(()=>{const o=!0;let{mean:a,variance:c}=fp(n,this.axis,o);const h=$o(1,i);for(const L of this.axis)h[L]=s[L];const d=L=>L!=null&&L.shape.length!==i&&this.axis!==[i-1]?L.reshape(h):L;let m=d(this.gamma.read()),f=d(this.beta.read());const b=[],w=[];for(let L=0;L{if(e.rank!==3)throw new q(`temporalPadding expects input tensor to be 3-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[1,1]),t.length!==2)throw new q(`temporalPadding expects input padding pattern to be a length-2 array, but received a length-${t.length} array.`);const n=[[0,0],t,[0,0]];return vi(e,n)})}function pV(e,t,n){return Q(()=>{if(e.rank!==4)throw new q(`temporalPadding expects input tensor to be 4-D, but received a ${e.rank}-D tensor.`);if(t==null&&(t=[[1,1],[1,1]]),t.length!==2||t[0].length!==2||t[1].length!==2)throw new q("spatial2dPadding expects `padding` to be an Array of two Arrays, each of which is an Array of two integers.");if(n==null&&(n=ei()),n!=="channelsLast"&&n!=="channelsFirst")throw new q(`Unknown data format: ${n}. Supported data formats are 'channelsLast' and 'channelsFirst.`);let s;return n==="channelsFirst"?s=[[0,0],[0,0],t[0],t[1]]:s=[[0,0],t[0],t[1],[0,0]],vi(e,s)})}class RL extends lt{constructor(e){if(e==null&&(e={}),super(e),this.dataFormat=e.dataFormat==null?ei():e.dataFormat,e.padding==null)this.padding=[[1,1],[1,1]];else if(typeof e.padding=="number")this.padding=[[e.padding,e.padding],[e.padding,e.padding]];else{if(e.padding=e.padding,e.padding.length!==2)throw new q(`ZeroPadding2D expects padding to be a length-2 array, but received a length-${e.padding.length} array.`);let t,n;if(typeof e.padding[0]=="number")t=[e.padding[0],e.padding[0]],n=[e.padding[1],e.padding[1]];else{if(e.padding=e.padding,e.padding[0].length!==2)throw new q(`ZeroPadding2D expects height padding to be a length-2 array, but received a length-${e.padding[0].length} array.`);if(t=e.padding[0],e.padding[1].length!==2)throw new q(`ZeroPadding2D expects width padding to be a length-2 array, but received a length-${e.padding[1].length} array.`);n=e.padding[1]}this.padding=[t,n]}this.inputSpec=[new Ln({ndim:4})]}computeOutputShape(e){e=Nt(e);let t,n;return this.dataFormat==="channelsFirst"?(e[2]!=null&&e[2]>=0?t=e[2]+this.padding[0][0]+this.padding[0][1]:t=null,e[3]!=null&&e[3]>=0?n=e[3]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],e[1],t,n]):(e[1]!=null&&e[1]>=0?t=e[1]+this.padding[0][0]+this.padding[0][1]:t=null,e[2]!=null&&e[2]>=0?n=e[2]+this.padding[1][0]+this.padding[1][1]:n=null,[e[0],t,n,e[3]])}call(e,t){return Q(()=>pV(Xe(e),this.padding,this.dataFormat))}getConfig(){const e={padding:this.padding,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}RL.className="ZeroPadding2D",fe(RL);function bm(e,t,n,s,i,o){return Q(()=>{jt(i),uv(o),vs(s),n==null&&(n=[1,1]),s==null&&(s="valid"),i==null&&(i=ei()),o==null&&(o="max"),e=nL(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=fh(e,t,n,c):a=ah(e,t,n,c),i==="channelsFirst"&&(a=Ye(a,[0,3,1,2])),a})}function xN(e,t,n,s,i,o){return Q(()=>{jt(i),uv(o),vs(s),n==null&&(n=[1,1,1]),s==null&&(s="valid"),i==null&&(i=ei()),o==null&&(o="max"),e=gN(e,i);let a;const c=s==="same"?"same":"valid";return o==="max"?a=Ob(e,t,n,c):a=yb(e,t,n,c),i==="channelsFirst"&&(a=Ye(a,[0,4,1,2,3])),a})}class TN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=2),super(e),typeof e.poolSize=="number")this.poolSize=[e.poolSize];else if(Array.isArray(e.poolSize)&&e.poolSize.length===1&&typeof e.poolSize[0]=="number")this.poolSize=e.poolSize;else throw new q(`poolSize for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.poolSize)}`);if(wn(this.poolSize,"poolSize"),e.strides==null)this.strides=this.poolSize;else if(typeof e.strides=="number")this.strides=[e.strides];else if(Array.isArray(e.strides)&&e.strides.length===1&&typeof e.strides[0]=="number")this.strides=e.strides;else throw new q(`strides for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(e.strides)}`);wn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,vs(this.padding),this.inputSpec=[new Ln({ndim:3})]}computeOutputShape(e){e=Nt(e);const t=oi(e[1],this.poolSize[0],this.padding,this.strides[0]);return[e[0],t,e[2]]}call(e,t){return Q(()=>{this.invokeCallHook(e,t),e=$h(Xe(e),2);const n=this.poolingFunction(Xe(e),[this.poolSize[0],1],[this.strides[0],1],this.padding,"channelsLast");return Mr(n,[2])})}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides},t=super.getConfig();return Object.assign(e,t),e}}class OL extends TN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),bm(e,t,n,s,i,"max")}}OL.className="MaxPooling1D",fe(OL);class EL extends TN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),bm(e,t,n,s,i,"avg")}}EL.className="AveragePooling1D",fe(EL);class AN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==2)throw new q(`If the strides property of a 2D pooling layer is an Array, it is expected to have a length of 2, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides];wn(this.poolSize,"poolSize"),wn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,jt(this.dataFormat),vs(this.padding),this.inputSpec=[new Ln({ndim:4})]}computeOutputShape(e){e=Nt(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2];return t=oi(t,this.poolSize[0],this.padding,this.strides[0]),n=oi(n,this.poolSize[1],this.padding,this.strides[1]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n]:[e[0],t,n,e[3]]}call(e,t){return Q(()=>(this.invokeCallHook(e,t),this.poolingFunction(Xe(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class DL extends AN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),bm(e,t,n,s,i,"max")}}DL.className="MaxPooling2D",fe(DL);class kL extends AN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),bm(e,t,n,s,i,"avg")}}kL.className="AveragePooling2D",fe(kL);class vN extends lt{constructor(e){if(e.poolSize==null&&(e.poolSize=[2,2,2]),super(e),this.poolSize=Array.isArray(e.poolSize)?e.poolSize:[e.poolSize,e.poolSize,e.poolSize],e.strides==null)this.strides=this.poolSize;else if(Array.isArray(e.strides)){if(e.strides.length!==3)throw new q(`If the strides property of a 3D pooling layer is an Array, it is expected to have a length of 3, but received length ${e.strides.length}.`);this.strides=e.strides}else this.strides=[e.strides,e.strides,e.strides];wn(this.poolSize,"poolSize"),wn(this.strides,"strides"),this.padding=e.padding==null?"valid":e.padding,this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,jt(this.dataFormat),vs(this.padding),this.inputSpec=[new Ln({ndim:5})]}computeOutputShape(e){e=Nt(e);let t=this.dataFormat==="channelsFirst"?e[2]:e[1],n=this.dataFormat==="channelsFirst"?e[3]:e[2],s=this.dataFormat==="channelsFirst"?e[4]:e[3];return t=oi(t,this.poolSize[0],this.padding,this.strides[0]),n=oi(n,this.poolSize[1],this.padding,this.strides[1]),s=oi(s,this.poolSize[2],this.padding,this.strides[2]),this.dataFormat==="channelsFirst"?[e[0],e[1],t,n,s]:[e[0],t,n,s,e[4]]}call(e,t){return Q(()=>(this.invokeCallHook(e,t),this.poolingFunction(Xe(e),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){const e={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class FL extends vN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),xN(e,t,n,s,i,"max")}}FL.className="MaxPooling3D",fe(FL);class _L extends vN{constructor(e){super(e)}poolingFunction(e,t,n,s,i){return jt(i),vs(s),xN(e,t,n,s,i,"avg")}}_L.className="AveragePooling3D",fe(_L);class NN extends lt{constructor(e){super(e);this.inputSpec=[new Ln({ndim:3})]}computeOutputShape(e){return[e[0],e[2]]}call(e,t){throw new Pe}}class WL extends NN{constructor(e){super(e||{})}call(e,t){return Q(()=>{const n=Xe(e);return qt(n,1)})}}WL.className="GlobalAveragePooling1D",fe(WL);class $L extends NN{constructor(e){super(e||{})}call(e,t){return Q(()=>{const n=Xe(e);return Qn(n,1)})}}$L.className="GlobalMaxPooling1D",fe($L);class CN extends lt{constructor(e){super(e);this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,jt(this.dataFormat),this.inputSpec=[new Ln({ndim:4})]}computeOutputShape(e){return e=e,this.dataFormat==="channelsLast"?[e[0],e[3]]:[e[0],e[1]]}call(e,t){throw new Pe}getConfig(){const e={dataFormat:this.dataFormat},t=super.getConfig();return Object.assign(e,t),e}}class UL extends CN{call(e,t){return Q(()=>{const n=Xe(e);return this.dataFormat==="channelsLast"?qt(n,[1,2]):qt(n,[2,3])})}}UL.className="GlobalAveragePooling2D",fe(UL);class BL extends CN{call(e,t){return Q(()=>{const n=Xe(e);return this.dataFormat==="channelsLast"?Qn(n,[1,2]):Qn(n,[2,3])})}}BL.className="GlobalMaxPooling2D",fe(BL);class RN extends lt{constructor(e){super(e);this.layer=e.layer}build(e){this.built=!0}get trainable(){return this.layer!=null?this.layer.trainable:!1}set trainable(e){this.layer!=null&&(this.layer.trainable=e)}get trainableWeights(){return this.layer.trainableWeights}get nonTrainableWeights(){return this.layer.nonTrainableWeights}get updates(){return this.layer._updates}get losses(){return this.layer.losses}getWeights(){return this.layer.getWeights()}setWeights(e){this.layer.setWeights(e)}getConfig(){const e={layer:{className:this.layer.getClassName(),config:this.layer.getConfig()}},t=super.getConfig();return Object.assign(e,t),e}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.layer!=null&&this.layer.setFastWeightInitDuringBuild(e)}static fromConfig(e,t,n={}){const s=t.layer,i=ri(s,n);delete t.layer;const o={layer:i};return Object.assign(o,t),new e(o)}}class ML extends RN{constructor(e){super(e);this.supportsMasking=!0}build(e){if(e=Nt(e),e.length<3)throw new q(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(e)}`);this.inputSpec=[{shape:e}];const t=[e[0]].concat(e.slice(2));this.layer.built||(this.layer.build(t),this.layer.built=!0),super.build(e)}computeOutputShape(e){e=Nt(e);const t=[e[0]].concat(e.slice(2)),n=this.layer.computeOutputShape(t),s=e[1];return[n[0],s].concat(n.slice(1))}call(e,t){return Q(()=>{e=Xe(e);const n=(o,a)=>{const c=Xe(this.layer.call(o,t));return[c,[]]},s=SN(n,e,[],!1,null,null,!1,!0),i=s[1];return i})}}ML.className="TimeDistributed",fe(ML);function mV(e){Qa(xz,"BidirectionalMergeMode",e)}const fV="concat";class PL extends RN{constructor(e){super(e);const t=e.layer.getConfig(),n={};n.className=e.layer.getClassName(),n.config=t,this.forwardLayer=ri(n),t.goBackwards=!(t.goBackwards===!0);const s={};if(s.className=e.layer.getClassName(),s.config=t,this.backwardLayer=ri(s),this.forwardLayer.name="forward_"+this.forwardLayer.name,this.backwardLayer.name="backward_"+this.backwardLayer.name,this.mergeMode=e.mergeMode===void 0?fV:e.mergeMode,mV(this.mergeMode),e.weights)throw new Pe("weights support is not implemented for Bidirectional layer yet.");this._stateful=e.layer.stateful,this.returnSequences=e.layer.returnSequences,this.returnState=e.layer.returnState,this.supportsMasking=!0,this._trainable=!0,this.inputSpec=e.layer.inputSpec,this.numConstants=null}get trainable(){return this._trainable}set trainable(e){this._trainable=e,this.forwardLayer!=null&&(this.forwardLayer.trainable=e),this.backwardLayer!=null&&(this.backwardLayer.trainable=e)}getWeights(){return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights())}setWeights(e){const t=e.length,n=Math.floor(t/2);this.forwardLayer.setWeights(e.slice(0,n)),this.backwardLayer.setWeights(e.slice(n))}computeOutputShape(e){let t=this.forwardLayer.computeOutputShape(e);Array.isArray(t)&&Array.isArray(t[0])||(t=[t]),t=t;let n,s,i;return this.returnState&&(i=t.slice(1)),n=t[0],n=n,this.mergeMode==="concat"?(n[n.length-1]*=2,s=[n]):this.mergeMode==null?s=[n,n.slice()]:s=[n],this.returnState?this.mergeMode==null?s.concat(i).concat(i.slice()):[n].concat(i).concat(i.slice()):ts(s)}apply(e,t){let n=t==null?null:t.initialState,s=t==null?null:t.constants;t==null&&(t={});const i=LN(e,n,s,this.numConstants);if(e=i.inputs,n=i.initialState,s=i.constants,Array.isArray(e)&&(n=e.slice(1),e=e[0]),(n==null||n.length===0)&&s==null)return super.apply(e,t);const o=[],a=[];if(n!=null){const h=n.length;if(h%2>0)throw new q("When passing `initialState` to a Bidrectional RNN, the state should be an Array containing the states of the underlying RNNs.");t.initialState=n,o.push(...n);const d=n.map(m=>new Ln({shape:m.shape}));this.forwardLayer.stateSpec=d.slice(0,h/2),this.backwardLayer.stateSpec=d.slice(h/2),a.push(...d)}if(s!=null)throw new Pe("Support for constants in Bidirectional layers is not implemented yet.");const c=o[0]instanceof ii;for(const h of o)if(h instanceof ii!==c)throw new q("The initial state of a Bidirectional layer cannot be specified as a mix of symbolic and non-symbolic tensors");if(c){const h=[e].concat(o),d=this.inputSpec.concat(a),m=this.inputSpec;this.inputSpec=d;const f=super.apply(h,t);return this.inputSpec=m,f}else return super.apply(e,t)}call(e,t){return Q(()=>{const n=t.initialState;let s,i;if(n==null)s=this.forwardLayer.call(e,t),i=this.backwardLayer.call(e,t);else{const c=n.slice(0,n.length/2),h=n.slice(n.length/2);s=this.forwardLayer.call(e,Object.assign(t,{initialState:c})),i=this.backwardLayer.call(e,Object.assign(t,{initialState:h}))}let o;this.returnState&&(Array.isArray(s)&&(o=s.slice(1).concat(i.slice(1))),s=s[0],i=i[0]),this.returnSequences&&(i=Ts(i,1));let a;return this.mergeMode==="concat"?a=Sw([s,i]):this.mergeMode==="sum"?a=be(s,i):this.mergeMode==="ave"?a=X(.5,be(s,i)):this.mergeMode==="mul"?a=X(s,i):this.mergeMode==null&&(a=[s,i]),this.returnState?this.mergeMode==null?a.concat(o):[a].concat(o):a})}resetStates(e){this.forwardLayer.resetStates(),this.backwardLayer.resetStates()}build(e){Bo(this.forwardLayer.name,()=>{this.forwardLayer.build(e)}),Bo(this.backwardLayer.name,()=>{this.backwardLayer.build(e)}),this.built=!0}computeMask(e,t){Array.isArray(t)&&(t=t[0]);let n;if(this.returnSequences?this.mergeMode==null?n=[t,t]:n=t:this.mergeMode==null?n=[null,null]:n=null,this.returnState){const s=this.forwardLayer.states,i=s.map(o=>null);return Array.isArray(n)?n.concat(i).concat(i):[n].concat(i).concat(i)}else return n}get trainableWeights(){return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights)}get nonTrainableWeights(){return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights)}setFastWeightInitDuringBuild(e){super.setFastWeightInitDuringBuild(e),this.forwardLayer!=null&&this.forwardLayer.setFastWeightInitDuringBuild(e),this.backwardLayer!=null&&this.backwardLayer.setFastWeightInitDuringBuild(e)}getConfig(){const e={mergeMode:this.mergeMode},t=super.getConfig();return Object.assign(e,t),e}static fromConfig(e,t){const n=ri(t.layer);if(delete t.layer,t.numConstants!=null)throw new Pe("Deserialization of a Bidirectional layer with numConstants present is not supported yet.");const s=t;return s.layer=n,new e(s)}}PL.className="Bidirectional",fe(PL);function gV(e){return new nc(e)}function yV(e){return new Qw(e)}function bV(e){return new Xw(e)}function wV(e){return new Jw(e)}function LV(e){return new Zw(e)}function SV(e){return new tL(e)}function IV(e){return new eL(e)}function xV(e){return new dm(e)}function TV(e){return new Hh(e)}function AV(e){return new rL(e)}function vV(e){return new um(e)}function NV(e){return new oL(e)}function CV(e){return new aL(e)}function RV(e){return new cL(e)}function OV(e){return new lL(e)}function EV(e){return new yL(e)}function DV(e){return new fL(e)}function kV(e){return new ym(e)}function FV(e){return new mL(e)}function _V(e){return new gL(e)}function WV(e){return new bL(e)}function $V(e){return new wL(e)}function UV(e){return new LL(e)}function BV(e){return new IL(e)}function MV(e){return new jh(e)}function PV(e){return new Xh(e)}function zV(e){return new Qh(e)}function VV(e){return new Jh(e)}function GV(e){return new Zh(e)}function YV(e){return new Kh(e)}function HV(e){return new xL(e)}function qV(e){return new NL(e)}function jV(e){return new CL(e)}function KV(e){return new RL(e)}function zL(e){return new EL(e)}function XV(e){return zL(e)}function JV(e){return zL(e)}function VL(e){return new kL(e)}function ZV(e){return VL(e)}function QV(e){return VL(e)}function GL(e){return new _L(e)}function eG(e){return GL(e)}function tG(e){return GL(e)}function nG(e){return new WL(e)}function sG(e){return new UL(e)}function ON(e){return new $L(e)}function EN(e){return new BL(e)}function DN(e){return new OL(e)}function kN(e){return new DL(e)}function iG(e){return new FL(e)}function rG(e){return new uL(e)}function oG(e){return new mm(e)}function aG(e){return new dL(e)}function cG(e){return new qh(e)}function lG(e){return new hL(e)}function hG(e){return new pm(e)}function uG(e){return new pL(e)}function dG(e){return new gm(e)}function pG(e){return new Ei(e)}function mG(e){return new fm(e)}function fG(e){return new PL(e)}function gG(e){return new ML(e)}const yG=ON,bG=EN,wG=DN,LG=kN;function SG(e){return new TL(e)}function IG(e){return new AL(e)}function xG(e){return new vL(e)}function TG(e){return new SL(e)}var AG=Object.freeze({__proto__:null,inputLayer:gV,elu:yV,reLU:bV,leakyReLU:wV,prelu:LV,softmax:SV,thresholdedReLU:IV,conv1d:xV,conv2d:TV,conv2dTranspose:AV,conv3d:vV,separableConv2d:NV,cropping2D:CV,upSampling2d:RV,depthwiseConv2d:OV,activation:EV,dense:DV,dropout:kV,spatialDropout1d:FV,flatten:_V,repeatVector:WV,reshape:$V,permute:UV,embedding:BV,add:MV,average:PV,concatenate:zV,maximum:VV,minimum:GV,multiply:YV,dot:HV,batchNormalization:qV,layerNormalization:jV,zeroPadding2d:KV,averagePooling1d:zL,avgPool1d:XV,avgPooling1d:JV,averagePooling2d:VL,avgPool2d:ZV,avgPooling2d:QV,averagePooling3d:GL,avgPool3d:eG,avgPooling3d:tG,globalAveragePooling1d:nG,globalAveragePooling2d:sG,globalMaxPooling1d:ON,globalMaxPooling2d:EN,maxPooling1d:DN,maxPooling2d:kN,maxPooling3d:iG,gru:rG,gruCell:oG,lstm:aG,lstmCell:cG,simpleRNN:lG,simpleRNNCell:hG,convLstm2d:uG,convLstm2dCell:dG,rnn:pG,stackedRNNCells:mG,bidirectional:fG,timeDistributed:gG,globalMaxPool1d:yG,globalMaxPool2d:bG,maxPool1d:wG,maxPool2d:LG,Layer:lt,RNN:Ei,RNNCell:ac,input:eN,gaussianNoise:SG,gaussianDropout:IG,alphaDropout:xG,masking:TG});function vG(e,t){return $w(e,t)}function NG(e,t){return _v(e,t)}function CG(e,t){return Wv(e,t)}function RG(e,t){return Uw(e,t)}function OG(e,t){return Bw(e,t)}function EG(e,t){return Fv(e,t)}function DG(e,t){return b3(e,t)}function kG(e,t){return im(e,t)}function FG(e,t){return ic(e,t)}function _G(e,t){return qr(e,t)}function WG(e,t){return qr(e,t)}function $G(e,t){return qr(e,t)}function UG(e,t){return ir(e,t)}function BG(e,t){return ir(e,t)}function MG(e,t){return ir(e,t)}var PG=Object.freeze({__proto__:null,binaryAccuracy:vG,binaryCrossentropy:NG,sparseCategoricalAccuracy:CG,categoricalAccuracy:RG,categoricalCrossentropy:OG,precision:EG,recall:DG,cosineProximity:kG,meanAbsoluteError:FG,meanAbsolutePercentageError:_G,MAPE:WG,mape:$G,meanSquaredError:UG,MSE:BG,mse:MG});var zG=Object.freeze({__proto__:null,modelFromJSON:J3});function VG(e){return new Gh(e)}function GG(e){return rV(e)}function YG(e){return oV(e)}var HG=Object.freeze({__proto__:null,l1l2:VG,l1:GG,l2:YG});class FN extends sc{constructor(){super(...arguments);this.model=null}setModel(e){if(!(e instanceof rr))throw new Error("model must be a LayersModel, not some other Container");this.model=e}}function wm(e,t){return et}class WN extends FN{constructor(e){super();if(e==null&&(e={}),e.restoreBestWeights)throw new Pe("restoreBestWeights = True is not implemented in EarlyStopping yet.");this.monitor=e.monitor||"val_loss",this.minDelta=Math.abs(e.minDelta||0),this.patience=e.patience||0,this.verbose=e.verbose||0,this.mode=e.mode||"auto",this.baseline=e.baseline,["auto","min","max"].indexOf(this.mode)===-1&&(console.warn(`EarlyStopping mode '${this.mode}' is invalid. Falling back to mode 'auto'.`),this.mode="auto"),this.mode==="min"?this.monitorFunc=wm:this.mode==="max"?this.monitorFunc=_N:this.monitor.indexOf("acc")!==-1?this.monitorFunc=_N:this.monitorFunc=wm,this.monitorFunc===wm&&(this.minDelta*=-1)}async onTrainBegin(e){this.wait=0,this.stoppedEpoch=0,this.baseline!=null?this.best=this.baseline:this.best=this.monitorFunc===wm?Infinity:-Infinity}async onEpochEnd(e,t){await Hr(t);const n=this.getMonitorValue(t);if(n==null)return;this.monitorFunc(n-this.minDelta,this.best)?(this.best=n,this.wait=0):(this.wait++,this.wait>=this.patience&&(this.stoppedEpoch=e,this.model.stopTraining=!0))}async onTrainEnd(e){this.stoppedEpoch>0&&this.verbose&&console.log(`Epoch ${this.stoppedEpoch}: early stopping.`)}getMonitorValue(e){e==null&&(e={});const t=e[this.monitor];return t==null&&console.warn(`Metric for EarlyStopping ${this.monitor} is not available. Available metrics are: ${Object.keys(e)}`),t}}function qG(e){return new WN(e)}const jG={earlyStopping:qG};var ai;(function(e){e[e.DT_INVALID=0]="DT_INVALID",e[e.DT_FLOAT=1]="DT_FLOAT",e[e.DT_DOUBLE=2]="DT_DOUBLE",e[e.DT_INT32=3]="DT_INT32",e[e.DT_UINT8=4]="DT_UINT8",e[e.DT_INT16=5]="DT_INT16",e[e.DT_INT8=6]="DT_INT8",e[e.DT_STRING=7]="DT_STRING",e[e.DT_COMPLEX64=8]="DT_COMPLEX64",e[e.DT_INT64=9]="DT_INT64",e[e.DT_BOOL=10]="DT_BOOL",e[e.DT_QINT8=11]="DT_QINT8",e[e.DT_QUINT8=12]="DT_QUINT8",e[e.DT_QINT32=13]="DT_QINT32",e[e.DT_BFLOAT16=14]="DT_BFLOAT16",e[e.DT_FLOAT_REF=101]="DT_FLOAT_REF",e[e.DT_DOUBLE_REF=102]="DT_DOUBLE_REF",e[e.DT_INT32_REF=103]="DT_INT32_REF",e[e.DT_UINT8_REF=104]="DT_UINT8_REF",e[e.DT_INT16_REF=105]="DT_INT16_REF",e[e.DT_INT8_REF=106]="DT_INT8_REF",e[e.DT_STRING_REF=107]="DT_STRING_REF",e[e.DT_COMPLEX64_REF=108]="DT_COMPLEX64_REF",e[e.DT_INT64_REF=109]="DT_INT64_REF",e[e.DT_BOOL_REF=110]="DT_BOOL_REF",e[e.DT_QINT8_REF=111]="DT_QINT8_REF",e[e.DT_QUINT8_REF=112]="DT_QUINT8_REF",e[e.DT_QINT32_REF=113]="DT_QINT32_REF",e[e.DT_BFLOAT16_REF=114]="DT_BFLOAT16_REF"})(ai||(ai={}));var $N;(function(e){let t;(function(n){n[n.LEGACY=0]="LEGACY",n[n.V1=1]="V1",n[n.V2=2]="V2"})(t=e.CheckpointFormatVersion||(e.CheckpointFormatVersion={}))})($N||($N={}));const YL={};function KG(e,t){const n={tfOpName:e,category:"custom",inputs:[],attrs:[],customExecutor:t};YL[e]=n}function UN(e){return YL[e]}function XG(e){delete YL[e]}function R(e,t,n,s,i){const o=t.inputParams[e];if(o&&o.inputIndexStart!==void 0){const c=o.inputIndexStart,h=o.inputIndexEnd===0?void 0:o.inputIndexEnd===void 0?c+1:o.inputIndexEnd;if(o.type==="tensor")return ss(t.inputNames[o.inputIndexStart],n,s,i);if(o.type==="tensors"){const f=t.inputNames.slice(c,h);return f.map(b=>ss(b,n,s,i))}const d=ss(t.inputNames.slice(c)[0],n,s,i),m=d.dataSync();return o.type==="number"?m[0]:Ls(d.shape,m)}const a=t.attrParams[e];return a&&a.value}function ss(e,t,n,s){const[i,o]=ds(e);if(s!=null){const c=s.getHashTableHandleByName(i);if(c!=null)return c}const a=n.currentContextIds.find(c=>!!t[Lm(i,c)]);return a!==void 0?t[Lm(i,a)][o]:void 0}function JG(e,t,n){return t[Lm(e,n.currentContextId)]}function or(e,t){const[n,s]=ds(e);return[Lm(n,t&&t.currentContextId),s]}function Lm(e,t){return t?`${e}-${t}`:e}function ds(e){const t=e.split(":");if(t.length===1)return[e,0];const n=t[0];return[n,Number(t[t.length-1])]}function Cte(e,t){const n=[];for(let s=0;sn.json));this.opMappers=t.reduce((n,s)=>(n[s.tfOpName]=s,n),{})}transformGraph(e,t={}){const n=e.node,s=[],i=[],o=[],a=n.reduce((L,x)=>(L[x.name]=this.mapNode(x),x.op.startsWith("Placeholder")?s.push(L[x.name]):x.op==="Const"?i.push(L[x.name]):(x.input==null||x.input.length===0)&&o.push(L[x.name]),L),{});let c=[];const h=[];let d={},m={};t!=null&&(d=this.mapSignatureEntries(t.inputs),m=this.mapSignatureEntries(t.outputs));const f=Object.keys(a);f.forEach(L=>{const x=a[L];x.inputNames.forEach(v=>{const[N]=or(v);x.inputs.push(a[N]),a[N].children.push(x)})}),Object.keys(m).length===0?f.forEach(L=>{const x=a[L];x.children.length===0&&h.push(x)}):Object.keys(m).forEach(L=>{const[x]=or(L),v=a[x];v!=null&&(v.signatureKey=m[L],h.push(v))}),Object.keys(d).length>0?Object.keys(d).forEach(L=>{const[x]=or(L),v=a[x];v&&(v.signatureKey=d[L],c.push(v))}):c=s;let b={};e.library!=null&&e.library.function!=null&&(b=e.library.function.reduce((L,x)=>(L[x.signature.name]=this.mapFunction(x),L),{}));const w={nodes:a,inputs:c,outputs:h,weights:i,placeholders:s,signature:t,functions:b};return o.length>0&&(w.initNodes=o),w}mapSignatureEntries(e){return Object.keys(e||{}).reduce((t,n)=>(t[e[n].name]=n,t),{})}mapNode(e){const t=UN(e.op)||this.opMappers[e.op]||{};e.attr==null&&(e.attr={});const n={name:e.name,op:e.op,category:t.category,inputNames:(e.input||[]).map(s=>s.startsWith("^")?s.substr(1):s),inputs:[],children:[],inputParams:{},attrParams:{},rawAttrs:e.attr};return t.inputs!=null&&(n.inputParams=t.inputs.reduce((s,i)=>(s[i.name]={type:i.type,inputIndexStart:i.start,inputIndexEnd:i.end},s),{})),t.attrs!=null&&(n.attrParams=t.attrs.reduce((s,i)=>{const o=i.type;let a;switch(i.type){case"string":a=HL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=HL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"string[]":a=eS(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=eS(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number":a=jL(e.attr,i.tfName,i.defaultValue||0),a===void 0&&!!i.tfDeprecatedName&&(a=jL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"number[]":a=QL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=QL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool":a=qL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=qL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"bool[]":a=nS(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=nS(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape":a=ZL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=ZL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"shape[]":a=tS(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=tS(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype":a=XL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=XL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"dtype[]":a=JL(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=JL(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"func":a=PN(e.attr,i.tfName,i.defaultValue),a===void 0&&!!i.tfDeprecatedName&&(a=PN(e.attr,i.tfDeprecatedName,i.defaultValue));break;case"tensor":case"tensors":break;default:throw new Error(`Unsupported param type: ${i.type} for op: ${e.op}`)}return s[i.name]={value:a,type:o},s},{})),n}mapFunction(e){const t=e.nodeDef,n=[],s=[];let i={};t!=null&&(i=t.reduce((m,f)=>(m[f.name]=this.mapNode(f),f.op==="Const"&&s.push(m[f.name]),m),{}));const o=[],a=[];e.signature.inputArg.forEach(m=>{const[f]=or(m.name),b={name:f,op:"Placeholder",inputs:[],inputNames:[],category:"graph",inputParams:{},attrParams:{dtype:{value:KL(m.type),type:"dtype"}},children:[]};b.signatureKey=m.name,o.push(b),i[f]=b});const c=Object.keys(i);c.forEach(m=>{const f=i[m];f.inputNames.forEach(b=>{const[w]=or(b);f.inputs.push(i[w]),i[w].children.push(f)})});const h=e.ret;e.signature.outputArg.forEach(m=>{const[f,b]=or(h[m.name]),w=i[f];w!=null&&(w.defaultOutput=b,a.push(w))});const d=this.mapArgsToSignature(e);return{nodes:i,inputs:o,outputs:a,weights:s,placeholders:n,signature:d}}mapArgsToSignature(e){return{methodName:e.signature.name,inputs:e.signature.inputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n),t),{}),outputs:e.signature.outputArg.reduce((t,n)=>(t[n.name]=this.mapArgToTensorInfo(n,e.ret),t),{})}}mapArgToTensorInfo(e,t){let n=e.name;return t!=null&&(n=t[n]),{name:n,dtype:e.type}}}function DY(e){const t=oe().global;if(typeof t.atob!="undefined")return t.atob(e);if(typeof Buffer!="undefined")return new Buffer(e,"base64").toString();throw new Error("Unable to decode base64 in this environment. Missing built-in atob() or Buffer()")}function MN(e,t){const n=Array.isArray(e)?String.fromCharCode.apply(null,e):DY(e);return t?n:n.toLowerCase()}function HL(e,t,n,s=!1){const i=e[t];return i!=null?MN(i.s,s):n}function qL(e,t,n){const s=e[t];return s?s.b:n}function jL(e,t,n){const s=e[t]||{},i=s.i!=null?s.i:s.f!=null?s.f:n;return typeof i=="number"?i:parseInt(i,10)}function KL(e){typeof e=="string"&&(e=ai[e]);switch(e){case ai.DT_FLOAT:return"float32";case ai.DT_INT32:case ai.DT_INT64:case ai.DT_INT8:case ai.DT_UINT8:return"int32";case ai.DT_BOOL:return"bool";case ai.DT_DOUBLE:return"float32";case ai.DT_STRING:return"string";default:return null}}function PN(e,t,n){const s=e[t];return s&&s.func?s.func.name:n}function XL(e,t,n){const s=e[t];return s&&s.type?KL(s.type):n}function JL(e,t,n){const s=e[t];return s&&s.list&&s.list.type?s.list.type.map(i=>KL(i)):n}function zN(e){return e.unknownRank?void 0:e.dim!=null?e.dim.map(t=>typeof t.size=="number"?t.size:parseInt(t.size,10)):[]}function ZL(e,t,n){const s=e[t];return s&&s.shape?zN(s.shape):n}function QL(e,t,n){const s=e[t];return s?((s.list.f&&s.list.f.length?s.list.f:s.list.i)||[]).map(i=>typeof i=="number"?i:parseInt(i,10)):n}function eS(e,t,n,s=!1){const i=e[t];return i&&i.list&&i.list.s?i.list.s.map(o=>MN(o,s)):n}function tS(e,t,n){const s=e[t];return s&&s.list&&s.list.shape?s.list.shape.map(i=>zN(i)):n}function nS(e,t,n){const s=e[t];return s&&s.list&&s.list.b?s.list.b:n}class kY{constructor(e,t,n){this.node=e,this.tensorMap=t,this.context=n,this.inputs=[],this.attrs={},this.inputs=e.inputNames.map(s=>this.getInput(s)),e.rawAttrs!=null&&(this.attrs=Object.keys(e.rawAttrs).reduce((s,i)=>(s[i]=this.getAttr(i),s),{}))}getInput(e){return ss(e,this.tensorMap,this.context)}getAttr(e,t){const n=this.node.rawAttrs[e];if(n.tensor!=null)return ss(e,this.tensorMap,this.context);if(n.i!=null||n.f!=null)return jL(this.node.rawAttrs,e,t);if(n.s!=null)return HL(this.node.rawAttrs,e,t);if(n.b!=null)return qL(this.node.rawAttrs,e,t);if(n.shape!=null)return ZL(this.node.rawAttrs,e,t);if(n.type!=null)return XL(this.node.rawAttrs,e,t);if(n.list!=null){if(n.list.i!=null||n.list.f!=null)return QL(this.node.rawAttrs,e,t);if(n.list.s!=null)return eS(this.node.rawAttrs,e,t);if(n.list.shape!=null)return tS(this.node.rawAttrs,e,t);if(n.list.b!=null)return nS(this.node.rawAttrs,e,t);if(n.list.type!=null)return JL(this.node.rawAttrs,e,t)}return t}}const FY=(e,t,n)=>{switch(e.op){case"BiasAdd":case"AddV2":case"Add":return[be(R("a",e,t,n),R("b",e,t,n))];case"AddN":return[YT(R("tensors",e,t,n))];case"FloorMod":case"Mod":return[mp(R("a",e,t,n),R("b",e,t,n))];case"Mul":return[X(R("a",e,t,n),R("b",e,t,n))];case"RealDiv":case"Div":return[We(R("a",e,t,n),R("b",e,t,n))];case"DivNoNan":return[xb(R("a",e,t,n),R("b",e,t,n))];case"FloorDiv":return[Zd(R("a",e,t,n),R("b",e,t,n))];case"Sub":return[Re(R("a",e,t,n),R("b",e,t,n))];case"Minimum":return[Oo(R("a",e,t,n),R("b",e,t,n))];case"Maximum":return[$s(R("a",e,t,n),R("b",e,t,n))];case"Pow":return[Zs(R("a",e,t,n),R("b",e,t,n))];case"SquaredDifference":return[Ih(R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Rte="arithmetic";const _Y=(e,t,n)=>{switch(e.op){case"Abs":case"ComplexAbs":return[dn(R("x",e,t,n))];case"Acos":return[ob(R("x",e,t,n))];case"Acosh":return[ab(R("x",e,t,n))];case"Asin":return[hb(R("x",e,t,n))];case"Asinh":return[ub(R("x",e,t,n))];case"Atan":return[db(R("x",e,t,n))];case"Atan2":return[pb(R("x",e,t,n),R("y",e,t,n))];case"Atanh":return[mb(R("x",e,t,n))];case"Ceil":return[bb(R("x",e,t,n))];case"Complex":return[ji(R("real",e,t,n),R("imag",e,t,n))];case"Cos":return[hh(R("x",e,t,n))];case"Cosh":return[op(R("x",e,t,n))];case"Elu":return[Ua(R("x",e,t,n))];case"Erf":return[Tb(R("x",e,t,n))];case"Exp":return[Is(R("x",e,t,n))];case"Expm1":return[Ab(R("x",e,t,n))];case"Floor":return[Ma(R("x",e,t,n))];case"Log":return[cs(R("x",e,t,n))];case"Log1p":return[hp(R("x",e,t,n))];case"Imag":return[dh(R("x",e,t,n))];case"Neg":return[Ht(R("x",e,t,n))];case"Reciprocal":return[_b(R("x",e,t,n))];case"Real":return[Ga(R("x",e,t,n))];case"Relu":return[Ni(R("x",e,t,n))];case"Round":return[$b(R("x",e,t,n))];case"Selu":return[bp(R("x",e,t,n))];case"Sigmoid":return[Ti(R("x",e,t,n))];case"Sin":return[wp(R("x",e,t,n))];case"Sign":return[Bb(R("x",e,t,n))];case"Sinh":return[Lp(R("x",e,t,n))];case"Softplus":return[za(R("x",e,t,n))];case"Sqrt":return[Nn(R("x",e,t,n))];case"Square":return[At(R("x",e,t,n))];case"Tanh":return[$a(R("x",e,t,n))];case"Tan":return[zb(R("x",e,t,n))];case"Relu6":case"ClipByValue":return[Jn(R("x",e,t,n),R("clipValueMin",e,t,n),R("clipValueMax",e,t,n))];case"Rsqrt":return[yp(ss(e.inputNames[0],t,n))];case"Prod":return[gp(R("x",e,t,n),R("axes",e,t,n))];case"LeakyRelu":return[lp(R("x",e,t,n),R("alpha",e,t,n))];case"Prelu":return[yh(R("x",e,t,n),R("alpha",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Ote="basic_math";function zs(e,t,n=""){A(WY(e,t),()=>n+` Shapes ${e} and ${t} must match`)}function WY(e,t){if(e.length!==t.length)return!1;for(let n=0;n{(e==null||!e.has(t.tensor.id))&&t.tensor.dispose()}),this.tensors=[],this.closed_=!0,this.idTensor.dispose()}size(){return this.tensors.length}read(e){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||e>=this.size())throw new Error(`Tried to read from index ${e}, but array size is: ${this.size()}`);const t=this.tensors[e];if(t.cleared)throw new Error(`TensorArray ${this.name}: Could not read index ${e} twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?).`);return this.clearAfterRead&&(t.cleared=!0),t.read=!0,t.tensor}readMany(e){return e.map(t=>this.read(t))}write(e,t){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(e<0||!this.dynamicSize&&e>=this.maxSize)throw new Error(`Tried to write to index ${e}, but array is not resizeable and size is: ${this.maxSize}`);const n=this.tensors[e]||{};if(t.dtype!==this.dtype)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, - because the value dtype is ${t.dtype}, but TensorArray dtype is ${this.dtype}.`);if(this.size()===0&&(this.elementShape==null||this.elementShape.length===0)&&(this.elementShape=t.shape),zs(this.elementShape,t.shape,`TensorArray ${this.name}: Could not write to TensorArray index ${e}.`),n.read)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been read.`);if(n.written)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${e}, because it has already been written.`);n.tensor=t,bn(t),n.written=!0,this.tensors[e]=n}writeMany(e,t){if(e.length!==t.length)throw new Error(`TensorArray ${this.name}: could not write multiple tensors,because the index size: ${e.length} is not the same as tensors size: ${t.length}.`);e.forEach((n,s)=>this.write(n,t[s]))}gather(e,t){if(!!t&&t!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${t}`);if(e)e=e.slice(0,this.size());else{e=[];for(let s=0;s=this.maxSize)throw new Error(`Max index must be < array size (${n} vs. ${this.maxSize})`);this.writeMany(e,Qs(t,0))}split(e,t){if(t.dtype!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${t.dtype}`);let n=0;const s=e.map(c=>(n+=c,n));if(n!==t.shape[0])throw new Error(`Expected sum of lengths to be equal to +var F9=Object.create,qm=Object.defineProperty,R9=Object.getPrototypeOf,P9=Object.prototype.hasOwnProperty,O9=Object.getOwnPropertyNames,L9=Object.getOwnPropertyDescriptor,xE=i=>qm(i,"__esModule",{value:!0}),Hm=(i,a)=>()=>(a||(a={exports:{}},i(a.exports,a)),a.exports),jm=(i,a)=>{xE(i);for(var u in a)qm(i,u,{get:a[u],enumerable:!0})},M9=(i,a,u)=>{if(xE(i),a&&typeof a=="object"||typeof a=="function")for(let h of O9(a))!P9.call(i,h)&&h!=="default"&&qm(i,h,{get:()=>a[h],enumerable:!(u=L9(a,h))||u.enumerable});return i},te=i=>i&&i.__esModule?i:M9(qm(i!=null?F9(R9(i)):{},"default",{value:i,enumerable:!0}),i),vE=Hm((dl,wE)=>{"use strict";var B9=function(){if(typeof self!="undefined")return self;if(typeof window!="undefined")return window;if(typeof Is!="undefined")return Is;throw new Error("unable to locate global object")},Is=B9();wE.exports=dl=Is.fetch;Is.fetch&&(dl.default=Is.fetch.bind(Is));dl.Headers=Is.Headers;dl.Request=Is.Request;dl.Response=Is.Response}),ee=Hm((Km,TE)=>{(function(i,a){typeof Km=="object"&&typeof TE!="undefined"?a(Km):typeof define=="function"&&define.amd?define(["exports"],a):(i=i||self,a(i.tf=i.tf||{}))})(Km,function(i){"use strict";let a=1e-7,u=1e-4;class h{constructor(t,e){this.backend=t,this.dataMover=e,this.data=new WeakMap,this.dataIdsCount=0}get(t){return this.data.has(t)||this.dataMover.moveData(this.backend,t),this.data.get(t)}set(t,e){this.dataIdsCount++,this.data.set(t,e)}has(t){return this.data.has(t)}delete(t){return this.dataIdsCount--,this.data.delete(t)}numDataIds(){return this.dataIdsCount}}class d{time(t){return g("time")}read(t){return g("read")}readSync(t){return g("readSync")}numDataIds(){return g("numDataIds")}disposeData(t){return g("disposeData")}write(t,e,r){return g("write")}move(t,e,r,o){return g("move")}memory(){return g("memory")}floatPrecision(){return g("floatPrecision")}epsilon(){return this.floatPrecision()===32?a:u}batchMatMul(t,e,r,o){return g("batchMatMul")}fusedBatchMatMul({a:t,b:e,transposeA:r,transposeB:o,bias:s,activation:c,preluActivationWeights:l}){return g("fusedBatchMatMul")}slice(t,e,r){return g("slice")}stridedSlice(t,e,r,o){return g("stridedSlice")}unstack(t,e){return g("unstack")}reverse(t,e){return g("reverse")}concat(t,e){return g("concat")}neg(t){return g("neg")}add(t,e){return g("add")}addN(t){return g("addN")}subtract(t,e){return g("subtract")}multiply(t,e){return g("multiply")}realDivide(t,e){return g("realDivide")}floorDiv(t,e){return g("floorDiv")}sum(t,e){return g("sum")}prod(t,e){return g("prod")}unsortedSegmentSum(t,e,r){return g("unsortedSegmentSum")}argMin(t,e){return g("argMin")}argMax(t,e){return g("argMax")}equal(t,e){return g("equal")}notEqual(t,e){return g("notEqual")}less(t,e){return g("less")}lessEqual(t,e){return g("lessEqual")}greater(t,e){return g("greater")}greaterEqual(t,e){return g("greaterEqual")}logicalNot(t){return g("logicalNot")}logicalAnd(t,e){return g("logicalAnd")}logicalOr(t,e){return g("logicalOr")}where(t){return g("where")}select(t,e,r){return g("select")}topk(t,e,r){return g("topk")}min(t,e){return g("min")}minimum(t,e){return g("minimum")}mod(t,e){return g("mod")}max(t,e){return g("max")}maximum(t,e){return g("maximum")}all(t,e){return g("all")}any(t,e){return g("any")}squaredDifference(t,e){return g("squaredDifference")}ceil(t){return g("ceil")}floor(t){return g("floor")}round(t){return g("round")}sign(t){return g("sign")}isNaN(t){return g("isNaN")}isInf(t){return g("isInf")}isFinite(t){return g("isFinite")}pow(t,e){return g("pow")}exp(t){return g("exp")}expm1(t){return g("expm1")}softmax(t,e){return g("softmax")}log(t){return g("log")}log1p(t){return g("log1p")}sqrt(t){return g("sqrt")}rsqrt(t){return g("rsqrt")}square(t){return g("square")}reciprocal(t){return g("reciprocal")}relu(t){return g("relu")}relu6(t){return g("relu6")}prelu(t,e){return g("prelu")}elu(t){return g("elu")}eluDer(t,e){return g("eluDer")}selu(t){return g("selu")}int(t){return g("int")}clip(t,e,r){return g("clip")}abs(t){return g("abs")}complexAbs(t){return g("complexAbs")}sigmoid(t){return g("sigmoid")}softplus(t){return g("softplus")}sin(t){return g("sin")}cos(t){return g("cos")}tan(t){return g("tan")}asin(t){return g("asin")}acos(t){return g("acos")}atan(t){return g("atan")}atan2(t,e){return g("atan2")}sinh(t){return g("sinh")}cosh(t){return g("cosh")}tanh(t){return g("tanh")}asinh(t){return g("asinh")}acosh(t){return g("acosh")}atanh(t){return g("atanh")}erf(t){return g("erf")}step(t,e){return g("step")}fusedConv2d({input:t,filter:e,convInfo:r,bias:o,activation:s,preluActivationWeights:c}){return g("fusedConv2d")}conv2d(t,e,r){return g("conv2d")}conv2dDerInput(t,e,r){return g("conv2dDerInput")}conv2dDerFilter(t,e,r){return g("conv2dDerFilter")}fusedDepthwiseConv2D({input:t,filter:e,convInfo:r,bias:o,activation:s,preluActivationWeights:c}){return g("fusedDepthwiseConv2D")}depthwiseConv2D(t,e,r){return g("depthwiseConv2D")}depthwiseConv2DDerInput(t,e,r){return g("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(t,e,r){return g("depthwiseConv2DDerFilter")}conv3d(t,e,r){return g("conv3d")}conv3dDerInput(t,e,r){return g("conv3dDerInput")}conv3dDerFilter(t,e,r){return g("conv3dDerFilter")}maxPool(t,e){return g("maxPool")}maxPoolBackprop(t,e,r,o){return g("maxPoolBackprop")}avgPool(t,e){return g("avgPool")}avgPoolBackprop(t,e,r){return g("avgPoolBackprop")}avgPool3d(t,e){return g("avgPool3d")}avgPool3dBackprop(t,e,r){return g("avgPool3dBackprop")}maxPool3d(t,e){return g("maxPool3d")}maxPool3dBackprop(t,e,r,o){return g("maxPool3dBackprop")}reshape(t,e){return g("reshape")}cast(t,e){return g("cast")}tile(t,e){return g("tile")}pad(t,e,r){return g("pad")}transpose(t,e){return g("transpose")}gather(t,e,r){return g("gather")}gatherND(t,e){return g("gatherND")}scatterND(t,e,r){return g("scatterND")}batchToSpaceND(t,e,r){return g("batchToSpaceND")}spaceToBatchND(t,e,r){return g("spaceToBatchND")}resizeBilinear(t,e,r,o){return g("resizeBilinear")}resizeBilinearBackprop(t,e,r){return g("resizeBilinearBackprop")}resizeNearestNeighbor(t,e,r,o){return g("resizeNearestNeighbor")}resizeNearestNeighborBackprop(t,e,r){return g("resizeNearestNeighborBackprop")}batchNorm(t,e,r,o,s,c){return g("batchNorm")}localResponseNormalization4D(t,e,r,o,s){return g("localResponseNormalization4D")}LRNGrad(t,e,r,o,s,c,l){return g("LRNGrad")}multinomial(t,e,r,o){return g("multinomial")}oneHot(t,e,r,o){return g("oneHot")}cumsum(t,e,r,o){return g("cumsum")}nonMaxSuppression(t,e,r,o,s){return g("nonMaxSuppression")}fft(t){return g("fft")}ifft(t){return g("ifft")}complex(t,e){return g("complex")}real(t){return g("real")}imag(t){return g("imag")}cropAndResize(t,e,r,o,s,c){return g("cropAndResize")}depthToSpace(t,e,r){return g("depthToSpace")}split(t,e,r){return g("split")}sparseToDense(t,e,r,o){return g("sparseToDense")}diag(t){return g("diag")}fill(t,e,r){return g("fill")}onesLike(t){return g("onesLike")}zerosLike(t){return g("zerosLike")}linspace(t,e,r){return g("linspace")}dispose(){return g("dispose")}}function g(n){throw new Error(`'${n}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}function x(n){let t=n.length,e=0,r=0;for(;t>0;)r=Math.random()*t|0,t--,e=n[t],n[t]=n[r],n[r]=e}function w(n,t,e){return Math.max(n,Math.min(t,e))}function k(n){return n%2===0?n:n+1}function C(n){let t=0;for(let e=0;ee+` Shapes ${n} and ${t} must match`)}function et(n){_(n!=null,()=>"The input to the tensor constructor must be a non-null value.")}function tt(n,t=[],e=!1){if(t==null&&(t=[]),Array.isArray(n)||gn(n)&&!e)for(let r=0;r0,e){return new Promise((r,o)=>{let s=0,c=()=>{if(n()){r();return}s++;let l=t(s);if(e!=null&&s>=e){o();return}setTimeout(c,l)};c()})}function Ge(n,t){let e=1,r=-1;for(let s=0;s=0)e*=n[s];else if(n[s]===-1){if(r!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${r} and dim ${s}`);r=s}else if(n[s]<0)throw Error(`Shapes can not be < 0. Found ${n[s]} at dim ${s}`);if(r===-1){if(t>0&&t!==e)throw Error(`Size(${t}) must match the product of shape ${n}`);return n}if(e===0)throw Error(`Cannot infer the missing size in [${n}] when there are 0 elements`);if(t%e!==0)throw Error(`The implicit shape can't be a fractional number. Got ${t} / ${e}`);let o=n.slice();return o[r]=t/e,o}function Vt(n,t){let e=t.length;return n=n==null?t.map((r,o)=>o):[].concat(n),_(n.every(r=>r>=-e&&r`All values in axis param must be in range [-${e}, ${e}) but got axis ${n}`),_(n.every(r=>gt(r)),()=>`All values in axis param must be integers but got axis ${n}`),n.map(r=>r<0?e+r:r)}function ln(n,t){let e=[],r=[],o=t!=null&&Array.isArray(t)&&t.length===0,s=t==null||o?null:Vt(t,n).sort(),c=0;for(let l=0;ll)&&n[l]===1&&(e.push(n[l]),r.push(l)),s[c]<=l&&c++}n[l]!==1&&(e.push(n[l]),r.push(l))}return{newShape:e,keptDims:r}}function Ce(n,t){let e=null;if(n==null||n==="float32")e=new Float32Array(t);else if(n==="int32")e=new Int32Array(t);else if(n==="bool")e=new Uint8Array(t);else throw new Error(`Unknown data type ${n}`);return e}function rr(n,t){let e=null;if(n==null||n==="float32")e=new Float32Array(t);else if(n==="int32")e=new Int32Array(t);else if(n==="bool")e=new Uint8Array(t);else if(n==="string")e=new Array(t);else throw new Error(`Unknown data type ${n}`);return e}function Ys(n,t){for(let e=0;et+=e.length),t}function as(n){return typeof n=="string"||n instanceof String}function Kk(n){return typeof n=="boolean"}function nf(n){return typeof n=="number"}function ic(n){return Array.isArray(n)?ic(n[0]):n instanceof Float32Array?"float32":n instanceof Int32Array||n instanceof Uint8Array?"int32":nf(n)?"float32":as(n)?"string":Kk(n)?"bool":"float32"}function Zs(n){return!!(n&&n.constructor&&n.call&&n.apply)}function rf(n,t){for(let e=t;e=0;--r)e[r]=e[r+1]*n[r+1];return e}function Xk(n,t,e){let r=new Array;if(t.length===1){let o=t[0];for(let s=0;sl*p);for(let l=0;lr*o);if(e===0)return[];if(e!==t.length)throw new Error(`[${n}] does not match the input size ${t.length}.`);return Xk(0,n,t)}function Ob(n,t){let e=ac(n,t);for(let r=0;rr*o,1);if(t==null||t==="float32")return Er(n,new Float32Array(e));if(t==="int32")return Er(n,new Int32Array(e));if(t==="bool")return Er(n,new Uint8Array(e));throw new Error(`Unknown data type ${t}`)}function Mb(n){n.forEach(t=>{_(Number.isInteger(t)&&t>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${n}].`)})}function Kr(n,t,e){if(t===0)return 0;if(t===1)return n[0];let r=n[n.length-1];for(let o=0;o{let[o,s]=r.split(":");this.urlFlags[o]=oP(o,s)})}}}function nP(n){let t={};return n.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(e,...r)=>(rP(t,r[0],r[1]),r.join("="))),t}function rP(n,t,e){n[decodeURIComponent(t)]=decodeURIComponent(e||"")}function oP(n,t){if(t=t.toLowerCase(),t==="true"||t==="false")return t==="true";if(`${+t}`===t)return+t;throw new Error(`Could not parse value flag value ${t} for flag ${n}.`)}function ct(){return i.ENV}i.ENV=null;function sP(n){i.ENV=n}let Bb;function Zk(){if(Bb==null){let n;if(typeof window!="undefined")n=window;else if(typeof global!="undefined")n=global;else if(typeof process!="undefined")n=process;else if(typeof self!="undefined")n=self;else throw new Error("Could not find a global object");Bb=n}return Bb}function iP(){let n=Zk();return n._tfGlobals==null&&(n._tfGlobals=new Map),n._tfGlobals}function Qk(n,t){let e=iP();if(e.has(n))return e.get(n);{let r=t();return e.set(n,r),e.get(n)}}let of="Abs",eu="Acos",nu="Acosh",Hi="Add",zb="AddN",tN="All",eN="Any",Wb="ArgMax",Vb="ArgMin",ru="Asin",ou="Asinh",su="Atan",iu="Atanh",sf="Atan2",au="AvgPool",af="AvgPoolBackprop",Gb="AvgPool3D",nN="AvgPool3DBackprop",cf="BatchMatMul",Ub="BatchToSpaceND",qb="BroadcastTo",cc="Cast",cu="Ceil",lu="ClipByValue",lf="Complex",uu="Concat",uf="Conv2D",Hb="Conv2DBackpropFilter",pf="Conv2DBackpropInput",hf="Conv3D",jb="Conv3DBackpropFilterV2",Kb="Conv3DBackpropInputV2",lc="Cos",pu="Cosh",Xb="Cumsum",rN="CropAndResize",oN="DepthToSpace",ff="DepthwiseConv2dNative",Yb="DepthwiseConv2dNativeBackpropFilter",Jb="DepthwiseConv2dNativeBackpropInput",sN="Diag",df="Dilation2D",mf="Dilation2DBackpropInput",gf="Dilation2DBackpropFilter",uc="Div",hu="Elu",iN="EluGrad",fu="Erf",aN="Equal",du="Exp",mu="Expm1",yf="FFT",Zb="Fill",bf="FlipLeftRight",gu="Floor",Qb="FloorDiv",yu="FusedBatchNorm",tx="GatherV2",cN="GatherNd",lN="Greater",ex="GreaterEqual",bu="Identity",xf="IFFT",wf="Imag",xu="IsFinite",wu="IsInf",vu="IsNan",uN="Less",pN="LessEqual",hN="LinSpace",Tu="Log",ku="Log1p",fN="LogicalAnd",vf="LogicalNot",dN="LogicalOr",nx="LogSoftmax",rx="LRN",mN="LRNBackprop",Nu="Max",ox="Maximum",_u="MaxPool",Tf="MaxPoolBackprop",sx="MaxPool3D",gN="MaxPool3DBackprop",kf="MaxPoolWithArgmax",ix="Mean",ax="Min",cx="Minimum",Cu="MirrorPad",lx="Mod",pc="Multiply",ux="Negate",Su="NotEqual",px="NonMaxSuppressionV3",Nf="NonMaxSuppressionV4",_f="NonMaxSuppressionV5",hx="OnesLike",fx="OneHot",Cf="PadV2",aP="Pool",dx="Pow",Sf="Prelu",yN="Prod",bN="Range",$f="Real",$u="Reciprocal",Iu="Relu",Eu="Reshape",mx="ResizeNearestNeighbor",xN="ResizeNearestNeighborGrad",gx="ResizeBilinear",wN="ResizeBilinearGrad",Du="Relu6",yx="Reverse",Au="Round",Fu="Rsqrt",vN="ScatterNd",bx="SelectV2",Ru="Selu",If="Slice",hc="Sin",Pu="Sinh",Ou="Sign",Lu="Sigmoid",Mu="Softplus",Bu="Sqrt",xx="Sum",Ef="SpaceToBatchND",wx="SplitV",vx="Softmax",fc="SquaredDifference",Df="Square",dc="Sub",TN="SparseToDense",kN="StridedSlice",mc="Tan",zu="Tanh",Tx="Tile",NN="TopK",Wu="Transpose",Af="Unique",kx="Unpack",Nx="UnsortedSegmentSum",_x="ZerosLike",Vu="Step",Ff="FromPixels",Rf="RotateWithOffset",Pf="_FusedMatMul",Of="FusedConv2D",Lf="FusedDepthwiseConv2D";let gc=Qk("kernelRegistry",()=>new Map),Gu=Qk("gradRegistry",()=>new Map);function Cx(n,t){let e=$x(n,t);return gc.get(e)}function Sx(n){return Gu.get(n)}function Mf(n){let t=gc.entries(),e=[];for(;;){let{done:r,value:o}=t.next();if(r)break;let[s,c]=o,[l]=s.split("_");l===n&&e.push(c)}return e}function Bf(n){let{kernelName:t,backendName:e}=n,r=$x(t,e);gc.has(r)&&console.warn(`The kernel '${t}' for backend '${e}' is already registered`),gc.set(r,n)}function _N(n){let{kernelName:t}=n;Gu.has(t)&&(ct().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${t}'`)),Gu.set(t,n)}function cP(n,t){let e=$x(n,t);if(!gc.has(e))throw new Error(`The kernel '${n}' for backend '${t}' is not registered`);gc.delete(e)}function lP(n){if(!Gu.has(n))throw new Error(`The gradient '${n}' for backend is not registered`);Gu.delete(n)}function uP(n,t){let e=Mf(n);e.forEach(r=>{let o=Object.assign({},r,{backendName:t});Bf(o)})}function $x(n,t){return`${t}_${n}`}function CN(n,t){return t==="string"?zf(n):Qs([n],t)}function pP(n,t){return n instanceof Float32Array&&t==="float32"||n instanceof Int32Array&&t==="int32"||n instanceof Uint8Array&&t==="bool"}function Qs(n,t){if(t==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(n)&&(n=tt(n)),ct().getBool("DEBUG")&&Ys(n,t),pP(n,t))return n;if(t==null||t==="float32"||t==="complex64")return new Float32Array(n);if(t==="int32")return new Int32Array(n);if(t==="bool"){let e=new Uint8Array(n.length);for(let r=0;r{o=r()},c=this.backendTimer.time(s);for(let p=0;p{dP(m,f.dtype,t)})}let l={kernelName:t,outputs:o,inputs:e,timeMs:c.then(p=>p.kernelMs),extraInfo:c.then(p=>p.getExtraProfileInfo!=null?p.getExtraProfileInfo():"")};return l}logKernelProfile(t){let{kernelName:e,outputs:r,timeMs:o,inputs:s,extraInfo:c}=t;r.forEach(l=>{Promise.all([l.data(),o,c]).then(p=>{this.logger.logKernelProfile(e,l,p[0],p[1],s,p[2])})})}}function dP(n,t,e){if(t!=="float32")return!1;for(let r=0;r0?N:""} `}}console.log(`%c${p} %c${l} %c${f}D ${y} %c${m} %c${b} %c${c}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function gP(n,t,e){let r={},o={};for(let p=0;pr[N.id]=!0),v=!0,o[f.id]=!0;break}if(v)break}}let s={};s[e.id]=!0;let c={};for(let p=n.length-1;p>=0;p--){let f=n[p],m=f.inputs;for(let y=0;y=0;o--){let s=t[o],c=[];if(s.outputs.forEach(p=>{let f=n[p.id];f!=null?c.push(f):c.push(null)}),s.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${s.kernelName}.`);let l=s.gradient(c);for(let p in s.inputs){if(!(p in l))throw new Error(`Cannot backprop through input ${p}. Available gradients found: ${Object.keys(l)}.`);let f=e(()=>l[p]());if(f.dtype!=="float32")throw new Error(`Error in gradient for op ${s.kernelName}. The gradient of input ${p} must have 'float32' dtype, but has '${f.dtype}'`);let m=s.inputs[p];if(!lt(f.shape,m.shape))throw new Error(`Error in gradient for op ${s.kernelName}. The gradient of input '${p}' has shape '${f.shape}', which does not match the shape of the input '${m.shape}'`);if(n[m.id]==null)n[m.id]=f;else{let y=n[m.id];n[m.id]=r(y,f),y.dispose()}}}}let $N=20,qu=3,Ix=7;function bP(n,t,e,r){let o=Yt(t),s=xP(n,t,e,o),c=t.length,l=Wf(n,t,e,o,s),p=["Tensor"];return r&&(p.push(` dtype: ${e}`),p.push(` rank: ${c}`),p.push(` shape: [${t}]`),p.push(" values:")),p.push(l.map(f=>" "+f).join(` +`)),p.join(` +`)}function xP(n,t,e,r){let o=G(t),s=r[r.length-1],c=new Array(s).fill(0),l=t.length,p=e==="complex64"?ju(n):n;if(l>1)for(let f=0;f$N){let S=qu*c,D=Array.from(n.slice(0,S)),I=Array.from(n.slice((l-qu)*c,l*c));return e==="complex64"&&(D=ju(D),I=ju(I)),["["+D.map((P,E)=>Hu(P,o[E],e)).join(", ")+", ..., "+I.map((P,E)=>Hu(P,o[l-qu+E],e)).join(", ")+"]"]}let N=e==="complex64"?ju(n):Array.from(n);return["["+N.map((S,D)=>Hu(S,o[D],e)).join(", ")+"]"]}let f=t.slice(1),m=r.slice(1),y=r[0]*c,b=[];if(l>$N){for(let N=0;N`Length of values '${o}' does not match the size inferred by the shape '${this.size}'.`)}if(e==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=r||rr(e,this.size),this.strides=Yt(t)}set(t,...e){e.length===0&&(e=[0]),_(e.length===this.rank,()=>`The number of provided coordinates (${e.length}) must match the rank (${this.rank})`);let r=this.locToIndex(e);this.values[r]=t}get(...t){t.length===0&&(t=[0]);let e=0;for(let o of t){if(o<0||o>=this.shape[e]){let s=`Requested out of range element at ${t}. Buffer shape=${this.shape}`;throw new Error(s)}e++}let r=t[t.length-1];for(let o=0;oUu(r))}catch(r){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return t}dataSync(){this.throwIfDisposed();let t=Oo().readSync(this.dataId);if(this.dtype==="string")try{return t.map(e=>Uu(e))}catch(e){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return t}async bytes(){this.throwIfDisposed();let t=await Oo().read(this.dataId);return this.dtype==="string"?t:new Uint8Array(t.buffer)}dispose(){if(this.isDisposed)return;Oo().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(t=!1){return yc.print(this,t)}clone(){return this.throwIfDisposed(),yc.clone(this)}toString(t=!1){let e=this.dataSync();return bP(e,this.shape,this.dtype,t)}cast(t){return this.throwIfDisposed(),yc.cast(this,t)}variable(t=!0,e,r){return this.throwIfDisposed(),Oo().makeVariable(this,t,e,r)}}Object.defineProperty(ot,Symbol.hasInstance,{value:n=>!!n&&n.data!=null&&n.dataSync!=null&&n.throwIfDisposed!=null});class Ku extends ot{constructor(t,e,r,o){super(t.shape,t.dtype,t.dataId,o);this.trainable=e,this.name=r}assign(t){if(t.dtype!==this.dtype)throw new Error(`dtype of the new value (${t.dtype}) and previous value (${this.dtype}) must match`);if(!lt(t.shape,this.shape))throw new Error(`shape of the new value (${t.shape}) and previous value (${this.shape}) must match`);Oo().disposeTensor(this),this.dataId=t.dataId,Oo().incRef(this,null)}dispose(){Oo().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(Ku,Symbol.hasInstance,{value:n=>n instanceof ot&&n.assign!=null&&n.assign instanceof Function});(function(n){n.R0="R0",n.R1="R1",n.R2="R2",n.R3="R3",n.R4="R4",n.R5="R5",n.R6="R6"})(i.Rank||(i.Rank={}));var Ex;(function(n){n.float32="float32",n.int32="int32",n.bool="int32",n.complex64="complex64"})(Ex||(Ex={}));var Dx;(function(n){n.float32="float32",n.int32="int32",n.bool="bool",n.complex64="complex64"})(Dx||(Dx={}));var Ax;(function(n){n.float32="float32",n.int32="float32",n.bool="float32",n.complex64="complex64"})(Ax||(Ax={}));var Fx;(function(n){n.float32="complex64",n.int32="complex64",n.bool="complex64",n.complex64="complex64"})(Fx||(Fx={}));let kP={float32:Ax,int32:Ex,bool:Dx,complex64:Fx};function jn(n,t){if(n==="string"||t==="string"){if(n==="string"&&t==="string")return"string";throw new Error(`Can not upcast ${n} with ${t}`)}return kP[n][t]}function Vf(n){return jn(n,"int32")}function Je(n,t){if(n.dtype===t.dtype)return[n,t];let e=jn(n.dtype,t.dtype);return[n.cast(e),t.cast(e)]}function DN(n,t){_(n.dtype===t.dtype,()=>`The dtypes of the first(${n.dtype}) and second(${t.dtype}) input must match`)}function Gf(n,t){return t.some(e=>e.id===n.id)}function cs(n){let t=[],e=new Set;return AN(n,t,e),t}function AN(n,t,e){if(n==null)return;if(n instanceof ot){t.push(n);return}if(!NP(n))return;let r=n;for(let o in r){let s=r[o];e.has(s)||(e.add(s),AN(s,t,e))}}function NP(n){return Array.isArray(n)||typeof n=="object"}var _P=Object.freeze({__proto__:null,makeTypesMatch:Je,assertTypesMatch:DN,isTensorInList:Gf,getTensorsInContainer:cs});class FN{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(let t in this.registeredVariables)this.registeredVariables[t].dispose()}}class bc{constructor(t){this.ENV=t,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new FN}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;let t=this.getSortedBackends();for(let e=0;e{e.setupFunc!=null&&e.setupFunc(this.backendInstance)})}disposeRegisteredKernels(t){let e=Mf(t);e.forEach(r=>{r.disposeFunc!=null&&r.disposeFunc(this.registry[t])})}initializeBackend(t){let e=this.registryFactory[t];if(e==null)throw new Error(`Cannot initialize backend ${t}, no registration found.`);try{let r=e.factory();if(r&&!(r instanceof d)&&typeof r.then=="function"){let o=++this.pendingBackendInitId,s=r.then(c=>o(othis.registryFactory[e].priority-this.registryFactory[t].priority)}initializeBackendsAndReturnBest(){let t=this.getSortedBackends();for(let e=0;ethis.startScope(r),()=>this.endScope(o),()=>(o=e(),o instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),o))}scopedRun(t,e,r){t();try{let o=r();return e(),o}catch(o){throw e(),o}}nextTensorId(){return bc.nextTensorId++}nextVariableId(){return bc.nextVariableId++}clone(t){let e=this.makeTensorFromDataId(t.dataId,t.shape,t.dtype),r={x:t},o=c=>({x:()=>{let l="float32",p={x:c},f={dtype:l};return X.runKernelFunc(m=>m.cast(c,l),p,null,cc,f)}}),s=[];return this.addTapeNode(this.state.activeScope.name,r,[e],o,s,{}),e}runKernel(t,e,r,o,s){let c=null,l=null;return this.runKernelFunc(c,e,l,t,r,o,s)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(t,e,r){let o=this.backend.numDataIds(),s=0;r.forEach(p=>{s+=p.dtype==="complex64"?3:1});let c=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],l=o-e-s-c;if(l>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${l} data ids) after running '${t}'`)}runKernelFunc(t,e,r,o,s,c,l){let p,f=[],m=this.isTapeOn();o==null&&(o=this.state.activeScope!=null?this.state.activeScope.name:"");let y=this.state.numBytes,b=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let v,T=Cx(o,this.backendName),N;if(T!=null)v=()=>{let D=this.backend.numDataIds();N=T.kernelFunc({inputs:e,attrs:s,backend:this.backend});let I=Array.isArray(N)?N:[N];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(o,D,I);let P=I.map(({dataId:E,shape:L,dtype:B})=>this.makeTensorFromDataId(E,L,B));if(m){let E=this.getTensorsForGradient(o,e,P);if(E==null){l==null&&(l=[]);let L=P.filter((B,q)=>l[q]);E=(c||[]).slice().concat(L)}f=this.saveTensorsForBackwardMode(E)}return P};else{let D=I=>{if(!m)return;f=I.map(P=>this.keep(this.clone(P)))};v=()=>{let I=this.backend.numDataIds();N=this.tidy(()=>t(this.backend,D));let P=Array.isArray(N)?N:[N];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(o,I,P),P}}let S;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?p=v():(S=this.profiler.profileKernel(o,e,()=>v()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(S),p=S.outputs)}),m&&this.addTapeNode(o,e,p,r,f,s),this.state.profiling&&this.state.activeProfile.kernels.push({name:o,bytesAdded:this.state.numBytes-y,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-b,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(e).map(D=>e[D]!=null?e[D].shape:null),outputShapes:p.map(D=>D.shape),kernelTimeMs:S.timeMs,extraInfo:S.extraInfo}),Array.isArray(N)?p:p[0]}saveTensorsForBackwardMode(t){let e=t.map(r=>this.keep(this.clone(r)));return e}getTensorsForGradient(t,e,r){let o=Sx(t);if(o!=null){let s=o.inputsToSave||[],c=o.outputsToSave||[],l;o.saveAllInputs?(_(Array.isArray(e),()=>"saveAllInputs is true, expected inputs to be an array."),l=Object.keys(e).map(f=>e[f])):l=s.map(f=>e[f]);let p=r.filter((f,m)=>c[m]);return l.concat(p)}return null}makeTensor(t,e,r,o){if(t==null)throw new Error("Values passed to engine.makeTensor() are null");r=r||"float32",o=o||this.backend;let s=t;r==="string"&&as(t[0])&&(s=t.map(p=>zf(p)));let c=o.write(s,e,r),l=new ot(e,r,c,this.nextTensorId());if(this.incRef(l,o),r==="string"){let p=this.state.tensorInfo.get(c),f=jk(s);this.state.numBytes+=f-p.bytes,p.bytes=f}return l}makeTensorFromDataId(t,e,r,o){r=r||"float32";let s=new ot(e,r,t,this.nextTensorId());return this.incRef(s,o),s}makeVariable(t,e=!0,r,o){r=r||this.nextVariableId().toString(),o!=null&&o!==t.dtype&&(t=t.cast(o));let s=new Ku(t,e,r,this.nextTensorId());if(this.state.registeredVariables[s.name]!=null)throw new Error(`Variable with name ${s.name} was already registered`);return this.state.registeredVariables[s.name]=s,this.incRef(s,this.backend),s}incRef(t,e){let r=this.state.tensorInfo.has(t.dataId)?this.state.tensorInfo.get(t.dataId).refCount:0;if(this.state.numTensors++,t.dtype==="string"&&this.state.numStringTensors++,r===0){this.state.numDataBuffers++;let o=0;t.dtype!=="complex64"&&t.dtype!=="string"&&(o=t.size*Pb(t.dtype)),this.state.tensorInfo.set(t.dataId,{backend:e||this.backend,dtype:t.dtype,shape:t.shape,bytes:o,refCount:0}),this.state.numBytes+=o}this.state.tensorInfo.get(t.dataId).refCount++,t instanceof Ku||this.track(t)}disposeTensor(t){if(!this.state.tensorInfo.has(t.dataId))return;this.state.numTensors--,t.dtype==="string"&&this.state.numStringTensors--;let e=this.state.tensorInfo.get(t.dataId),r=e.refCount;r<=1?(t.dtype!=="complex64"&&(this.state.numBytes-=e.bytes),this.state.numDataBuffers--,e.backend.disposeData(t.dataId),this.state.tensorInfo.delete(t.dataId)):this.state.tensorInfo.get(t.dataId).refCount--}disposeVariables(){for(let t in this.state.registeredVariables){let e=this.state.registeredVariables[t];this.disposeVariable(e)}}disposeVariable(t){this.disposeTensor(t),this.state.registeredVariables[t.name]!=null&&delete this.state.registeredVariables[t.name]}memory(){let t=this.backend.memory();return t.numTensors=this.state.numTensors,t.numDataBuffers=this.state.numDataBuffers,t.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(t.unreliable=!0,t.reasons==null&&(t.reasons=[]),t.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),t}async profile(t){this.state.profiling=!0;let e=this.state.numBytes,r=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await t(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(o=>o.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-e,this.state.activeProfile.newTensors=this.state.numTensors-r;for(let o of this.state.activeProfile.kernels)o.kernelTimeMs=await o.kernelTimeMs,o.extraInfo=await o.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(t,e,r,o,s,c){let l={id:this.state.nextTapeNodeId++,kernelName:t,inputs:e,outputs:r,saved:s},p=Sx(t);p!=null&&(o=p.gradFunc),o!=null&&(l.gradient=f=>(f=f.map((m,y)=>{if(m==null){let b=r[y],v=ac(b.size,b.dtype);return this.makeTensor(v,b.shape,b.dtype)}return m}),o(f.length>1?f:f[0],s,c))),this.state.activeTape.push(l)}keep(t){return t.kept=!0,t}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(t){let e={track:[],name:"unnamed scope",id:this.state.nextScopeId++};t&&(e.name=t),this.state.scopeStack.push(e),this.state.activeScope=e}endScope(t){let e=cs(t),r=new Set(e.map(s=>s.id));for(let s=0;s{!s.kept&&s.scopeId===o.id&&this.track(s)})}gradients(t,e,r,o=!1){if(_(e.length>0,()=>"gradients() received an empty list of xs."),r!=null&&r.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${r.dtype}'`);let s=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",t));_(s instanceof ot,()=>"The result y returned by f() must be a tensor.");let c=gP(this.state.activeTape,e,s);if(!o&&c.length===0&&e.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{let l={};l[s.id]=r==null?CP(s.shape):r,yP(l,c,f=>this.tidy(f),SP);let p=e.map(f=>l[f.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(f=>{for(let m of f.saved)m.dispose()}),this.state.activeTape=null),{value:s,grads:p}})}customGrad(t){return _(Zs(t),()=>"The f passed in customGrad(f) must be a function."),(...e)=>{_(e.every(s=>s instanceof ot),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let r,o={};return e.forEach((s,c)=>{o[c]=s}),this.runKernelFunc((s,c)=>(r=t(...e,c),_(r.value instanceof ot,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),_(Zs(r.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),r.value),o,(s,c)=>{let l=r.gradFunc(s,c),p=Array.isArray(l)?l:[l];_(p.length===e.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),_(p.every(m=>m instanceof ot),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");let f={};return p.forEach((m,y)=>{f[y]=()=>m}),f})}}readSync(t){let e=this.state.tensorInfo.get(t);return e.backend.readSync(t)}read(t){let e=this.state.tensorInfo.get(t);return e.backend.read(t)}async time(t){let e=or(),r=await this.backend.time(t);return r.wallMs=or()-e,r}track(t){return this.state.activeScope!=null&&(t.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(t)),t}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new FN;for(let t in this.registry)this.disposeRegisteredKernels(t),this.registry[t].dispose(),delete this.registry[t];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}bc.nextTensorId=0,bc.nextVariableId=0;function CP(n){let t=Ob(G(n),"float32");return X.makeTensor(t,n,"float32")}function RN(){let n=Zk();if(n._tfengine==null){let t=new Jk(n);n._tfengine=new bc(t)}return sP(n._tfengine.ENV),wP(()=>n._tfengine),n._tfengine}let X=RN();function SP(n,t){let e={a:n,b:t};return X.runKernelFunc((r,o)=>{let s=r.add(n,t);return o([n,t]),s},e,null,Hi)}function $P(){return typeof navigator!="undefined"&&navigator!=null}function PN(){if($P()){let n=navigator.userAgent||navigator.vendor||window.opera;return/(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(n)||/1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(n.substr(0,4))}return!1}function Rx(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}var IP=Object.freeze({__proto__:null,isMobile:PN,isBrowser:Rx});let ls=ct();ls.registerFlag("DEBUG",()=>!1,n=>{n&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")}),ls.registerFlag("IS_BROWSER",()=>Rx()),ls.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined"),ls.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)),ls.registerFlag("PROD",()=>!1),ls.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>ls.getBool("DEBUG")),ls.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0),ls.registerFlag("IS_TEST",()=>!1);function Lo(n,t){let e=n;if(gn(n))return t==="string"?[]:[n.length];if(!Array.isArray(n))return[];let r=[];for(;Array.isArray(e)||gn(e)&&t!=="string";)r.push(e.length),e=e[0];return Array.isArray(n)&&ct().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&ON(n,r,[]),r}function ON(n,t,e){if(e=e||[],!Array.isArray(n)&&!gn(n)){_(t.length===0,()=>`Element arr[${e.join("][")}] is a primitive, but should be an array/TypedArray of ${t[0]} elements`);return}_(t.length>0,()=>`Element arr[${e.join("][")}] should be a primitive, but is an array of ${n.length} elements`),_(n.length===t[0],()=>`Element arr[${e.join("][")}] should have ${t[0]} elements, but has ${n.length} elements`);let r=t.slice(1);for(let o=0;o=0&&(o=r),LN(r,o,t,e),n==null||!gn(n)&&!Array.isArray(n)&&typeof n!="number"&&typeof n!="boolean"&&typeof n!="string"){let p=n==null?"null":n.constructor.name;throw new Error(`Argument '${t}' passed to '${e}' must be a Tensor or TensorLike, but got '${p}'`)}let s=Lo(n,o);!gn(n)&&!Array.isArray(n)&&(n=[n]);let c=!0,l=o!=="string"?Qs(n,o):tt(n,[],c);return X.makeTensor(l,s,o)}function Xu(n,t,e,r="numeric"){if(!Array.isArray(n))throw new Error(`Argument ${t} passed to ${e} must be a \`Tensor[]\` or \`TensorLike[]\``);let o=n;return o.map((s,c)=>M(s,`${t}[${c}]`,e),r)}let MN="__op";function j(n){let t=Object.keys(n);if(t.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${t.length} keys.`);let e=t[0],r=n[e];e.endsWith("_")&&(e=e.substring(0,e.length-1)),e=e+MN;let o=(...s)=>{X.startScope(e);try{let c=r(...s);return qi(c)&&console.error("Cannot return a Promise inside of tidy."),X.endScope(c),c}catch(c){throw X.endScope(null),c}};return Object.defineProperty(o,"name",{value:e,configurable:!0}),o}function EP(n,t){let e=M(n,"real","complex"),r=M(t,"imag","complex");W(e.shape,r.shape,`real and imag shapes, ${e.shape} and ${r.shape}, must match in call to tf.complex().`);let o=c=>c.complex(e,r),s={real:e,imag:r};return X.runKernelFunc(o,s,null,lf)}let us=j({complex_:EP});function ti(n,t,e,r){if(r==null&&(r=ic(n)),r==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!gn(n)&&!Array.isArray(n)&&typeof n!="number"&&typeof n!="boolean"&&typeof n!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(t!=null){Mb(t);let o=G(t),s=G(e);_(o===s,()=>`Based on the provided shape, [${t}], the tensor should have ${o} values but has ${s}`);for(let c=0;c`Error creating a new Tensor. Inferred shape (${e}) does not match the provided shape (${t}). `)}}return!gn(n)&&!Array.isArray(n)&&(n=[n]),t=t||e,n=r!=="string"?Qs(n,r):tt(n,[],!0),X.makeTensor(n,t,r)}function un(n,t,e){let r=Lo(n,e);return ti(n,t,r,e)}let Px={float32:4,float16:2,int32:4,uint16:2,uint8:1,bool:1,complex64:8};let Uf=4;async function Ox(n,t){let e=[],r=[],o=Array.isArray(n)?n.map(c=>c.name):Object.keys(n);for(let c=0;c{let b=await p.bytes(),v=b.reduce((S,D)=>S+D.length,0)+Uf*b.length,T=new Uint8Array(v),N=0;for(let S=0;S{if(t+=s.byteLength,e.push(s.byteLength===s.buffer.byteLength?s:new s.constructor(s)),!(s instanceof Float32Array||s instanceof Int32Array||s instanceof Uint8Array))throw new Error(`Unsupported TypedArray subtype: ${s.constructor.name}`)});let r=new Uint8Array(t),o=0;return e.forEach(s=>{r.set(new Uint8Array(s.buffer),o),o+=s.byteLength}),r.buffer}let Lx=typeof Buffer!="undefined"&&(typeof Blob=="undefined"||typeof atob=="undefined"||typeof btoa=="undefined");function BN(n){return Lx?Buffer.byteLength(n):new Blob([n]).size}function AP(n){if(Lx)return Buffer.from(n).toString("base64");let t=new Uint8Array(n),e="";for(let r=0,o=t.length;r{t+=o.byteLength});let e=new Uint8Array(t),r=0;return n.forEach(o=>{e.set(new Uint8Array(o),r),r+=o.byteLength}),e.buffer}function zN(n){let t="/";for(n=n.trim();n.endsWith(t);)n=n.slice(0,n.length-1);let e=n.split(t);return e[e.length-1]}function Yu(n){if(n.modelTopology instanceof ArrayBuffer)throw new Error("Expected JSON model topology, received ArrayBuffer.");return{dateSaved:new Date,modelTopologyType:"JSON",modelTopologyBytes:n.modelTopology==null?0:BN(JSON.stringify(n.modelTopology)),weightSpecsBytes:n.weightSpecs==null?0:BN(JSON.stringify(n.weightSpecs)),weightDataBytes:n.weightData==null?0:n.weightData.byteLength}}function RP(){let n=e=>{let r=e<<13,o=0;for(;(r&8388608)===0;)o-=8388608,r<<=1;return r&=~8388608,o+=947912704,r|o},t=new Uint32Array(2048);t[0]=0;for(let e=1;e<1024;e++)t[e]=n(e);for(let e=1024;e<2048;e++)t[e]=939524096+(e-1024<<13);return t}function PP(){let n=new Uint32Array(64);n[0]=0,n[31]=1199570944,n[32]=2147483648,n[63]=3347054592;for(let t=1;t<31;t++)n[t]=t<<23;for(let t=33;t<63;t++)n[t]=2147483648+(t-32<<23);return n}function OP(){let n=new Uint32Array(64);for(let t=0;t<64;t++)n[t]=1024;return n[0]=n[32]=0,n}function LP(){let n=RP(),t=PP(),e=OP();return r=>{let o=new ArrayBuffer(4*r.length),s=new Uint32Array(o);for(let c=0;c>10]+(l&1023)]+t[l>>10];s[c]=p}return new Float32Array(o)}}class Ze{constructor(){this.saveRouters=[],this.loadRouters=[]}static getInstance(){return Ze.instance==null&&(Ze.instance=new Ze),Ze.instance}static registerSaveRouter(t){Ze.getInstance().saveRouters.push(t)}static registerLoadRouter(t){Ze.getInstance().loadRouters.push(t)}static getSaveHandlers(t){return Ze.getHandlers(t,"save")}static getLoadHandlers(t,e){return Ze.getHandlers(t,"load",e)}static getHandlers(t,e,r){let o=[],s=e==="load"?Ze.getInstance().loadRouters:Ze.getInstance().saveRouters;return s.forEach(c=>{let l=c(t,r);l!==null&&o.push(l)}),o}}let MP=n=>Ze.registerSaveRouter(n),BP=n=>Ze.registerLoadRouter(n),Mx=n=>Ze.getSaveHandlers(n),Bx=(n,t)=>Ze.getLoadHandlers(n,t);let jf="tensorflowjs",zx=1,ji="models_store",ei="model_info_store";async function pot(){let n=Wx();return new Promise((t,e)=>{let r=n.deleteDatabase(jf);r.onsuccess=()=>t(),r.onerror=o=>e(o)})}function Wx(){if(!ct().getBool("IS_BROWSER"))throw new Error("Failed to obtain IndexedDB factory because the current environmentis not a web browser.");let n=typeof window=="undefined"?self:window,t=n.indexedDB||n.mozIndexedDB||n.webkitIndexedDB||n.msIndexedDB||n.shimIndexedDB;if(t==null)throw new Error("The current browser does not appear to support IndexedDB.");return t}function Vx(n){let t=n.result;t.createObjectStore(ji,{keyPath:"modelPath"}),t.createObjectStore(ei,{keyPath:"modelPath"})}class Ki{constructor(t){if(this.indexedDB=Wx(),t==null||!t)throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");this.modelPath=t}async save(t){if(t.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");return this.databaseAction(this.modelPath,t)}async load(){return this.databaseAction(this.modelPath)}databaseAction(t,e){return new Promise((r,o)=>{let s=this.indexedDB.open(jf,zx);s.onupgradeneeded=()=>Vx(s),s.onsuccess=()=>{let c=s.result;if(e==null){let l=c.transaction(ji,"readonly"),p=l.objectStore(ji),f=p.get(this.modelPath);f.onsuccess=()=>{if(f.result==null)return c.close(),o(new Error(`Cannot find model with path '${this.modelPath}' in IndexedDB.`));r(f.result.modelArtifacts)},f.onerror=m=>(c.close(),o(f.error)),l.oncomplete=()=>c.close()}else{let l=Yu(e),p=c.transaction(ei,"readwrite"),f=p.objectStore(ei),m=f.put({modelPath:this.modelPath,modelArtifactsInfo:l}),y;m.onsuccess=()=>{y=c.transaction(ji,"readwrite");let b=y.objectStore(ji),v=b.put({modelPath:this.modelPath,modelArtifacts:e,modelArtifactsInfo:l});v.onsuccess=()=>r({modelArtifactsInfo:l}),v.onerror=T=>{f=p.objectStore(ei);let N=f.delete(this.modelPath);N.onsuccess=()=>(c.close(),o(v.error)),N.onerror=S=>(c.close(),o(v.error))}},m.onerror=b=>(c.close(),o(m.error)),p.oncomplete=()=>{y==null?c.close():y.oncomplete=()=>c.close()}}},s.onerror=c=>o(s.error)})}}Ki.URL_SCHEME="indexeddb://";let WN=n=>ct().getBool("IS_BROWSER")&&(!Array.isArray(n)&&n.startsWith(Ki.URL_SCHEME))?zP(n.slice(Ki.URL_SCHEME.length)):null;Ze.registerSaveRouter(WN),Ze.registerLoadRouter(WN);function zP(n){return new Ki(n)}function WP(n){return n.startsWith(Ki.URL_SCHEME)?n.slice(Ki.URL_SCHEME.length):n}class VP{constructor(){this.indexedDB=Wx()}async listModels(){return new Promise((t,e)=>{let r=this.indexedDB.open(jf,zx);r.onupgradeneeded=()=>Vx(r),r.onsuccess=()=>{let o=r.result,s=o.transaction(ei,"readonly"),c=s.objectStore(ei),l=c.getAll();l.onsuccess=()=>{let p={};for(let f of l.result)p[f.modelPath]=f.modelArtifactsInfo;t(p)},l.onerror=p=>(o.close(),e(l.error)),s.oncomplete=()=>o.close()},r.onerror=o=>e(r.error)})}async removeModel(t){return t=WP(t),new Promise((e,r)=>{let o=this.indexedDB.open(jf,zx);o.onupgradeneeded=()=>Vx(o),o.onsuccess=()=>{let s=o.result,c=s.transaction(ei,"readwrite"),l=c.objectStore(ei),p=l.get(t),f;p.onsuccess=()=>{if(p.result==null)return s.close(),r(new Error(`Cannot find model with path '${t}' in IndexedDB.`));{let m=l.delete(t),y=()=>{f=s.transaction(ji,"readwrite");let b=f.objectStore(ji),v=b.delete(t);v.onsuccess=()=>e(p.result.modelArtifactsInfo),v.onerror=T=>r(p.error)};m.onsuccess=y,m.onerror=b=>(y(),s.close(),r(p.error))}},p.onerror=m=>(s.close(),r(p.error)),c.oncomplete=()=>{f==null?s.close():f.oncomplete=()=>s.close()}},o.onerror=s=>r(o.error)})}}let Mo="/",Xi="tensorflowjs_models",VN="info",GP="model_topology",UP="weight_specs",qP="weight_data",HP="model_metadata";function hot(){if(!ct().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("purgeLocalStorageModels() cannot proceed because local storage is unavailable in the current environment.");let n=window.localStorage,t=[];for(let e=0;eo.length){n.removeItem(r);let s=UN(r);t.indexOf(s)===-1&&t.push(s)}}return t}function GN(n){return{info:[Xi,n,VN].join(Mo),topology:[Xi,n,GP].join(Mo),weightSpecs:[Xi,n,UP].join(Mo),weightData:[Xi,n,qP].join(Mo),modelMetadata:[Xi,n,HP].join(Mo)}}function UN(n){let t=n.split(Mo);if(t.length<3)throw new Error(`Invalid key format: ${n}`);return t.slice(1,t.length-1).join(Mo)}function jP(n){return n.startsWith(Yi.URL_SCHEME)?n.slice(Yi.URL_SCHEME.length):n}class Yi{constructor(t){if(!ct().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("The current environment does not support local storage.");if(this.LS=window.localStorage,t==null||!t)throw new Error("For local storage, modelPath must not be null, undefined or empty.");this.modelPath=t,this.keys=GN(this.modelPath)}async save(t){if(t.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");{let e=JSON.stringify(t.modelTopology),r=JSON.stringify(t.weightSpecs),o=Yu(t);try{return this.LS.setItem(this.keys.info,JSON.stringify(o)),this.LS.setItem(this.keys.topology,e),this.LS.setItem(this.keys.weightSpecs,r),this.LS.setItem(this.keys.weightData,AP(t.weightData)),this.LS.setItem(this.keys.modelMetadata,JSON.stringify({format:t.format,generatedBy:t.generatedBy,convertedBy:t.convertedBy,userDefinedMetadata:t.userDefinedMetadata})),{modelArtifactsInfo:o}}catch(s){throw this.LS.removeItem(this.keys.info),this.LS.removeItem(this.keys.topology),this.LS.removeItem(this.keys.weightSpecs),this.LS.removeItem(this.keys.weightData),this.LS.removeItem(this.keys.modelMetadata),new Error(`Failed to save model '${this.modelPath}' to local storage: size quota being exceeded is a possible cause of this failure: modelTopologyBytes=${o.modelTopologyBytes}, weightSpecsBytes=${o.weightSpecsBytes}, weightDataBytes=${o.weightDataBytes}.`)}}}async load(){let t=JSON.parse(this.LS.getItem(this.keys.info));if(t==null)throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);if(t.modelTopologyType!=="JSON")throw new Error("BrowserLocalStorage does not support loading non-JSON model topology yet.");let e={},r=JSON.parse(this.LS.getItem(this.keys.topology));if(r==null)throw new Error(`In local storage, the topology of model '${this.modelPath}' is missing.`);e.modelTopology=r;let o=JSON.parse(this.LS.getItem(this.keys.weightSpecs));if(o==null)throw new Error(`In local storage, the weight specs of model '${this.modelPath}' are missing.`);e.weightSpecs=o;let s=this.LS.getItem(this.keys.modelMetadata);if(s!=null){let l=JSON.parse(s);e.format=l.format,e.generatedBy=l.generatedBy,e.convertedBy=l.convertedBy,e.userDefinedMetadata=l.userDefinedMetadata}let c=this.LS.getItem(this.keys.weightData);if(c==null)throw new Error(`In local storage, the binary weight values of model '${this.modelPath}' are missing.`);return e.weightData=FP(c),e}}Yi.URL_SCHEME="localstorage://";let qN=n=>ct().getBool("IS_BROWSER")&&(!Array.isArray(n)&&n.startsWith(Yi.URL_SCHEME))?KP(n.slice(Yi.URL_SCHEME.length)):null;Ze.registerSaveRouter(qN),Ze.registerLoadRouter(qN);function KP(n){return new Yi(n)}class XP{constructor(){_(ct().getBool("IS_BROWSER"),()=>"Current environment is not a web browser"),_(typeof window=="undefined"||typeof window.localStorage!="undefined",()=>"Current browser does not appear to support localStorage"),this.LS=window.localStorage}async listModels(){let t={},e=Xi+Mo,r=Mo+VN;for(let o=0;o"scheme must not be undefined or null."),t.endsWith(xc)&&(t=t.slice(0,t.indexOf(xc))),_(t.length>0,()=>"scheme must not be an empty string.");let r=br.getInstance();_(r.managers[t]==null,()=>`A model store manager is already registered for scheme '${t}'.`),r.managers[t]=e}static getManager(t){let e=this.getInstance().managers[t];if(e==null)throw new Error(`Cannot find model manager for scheme '${t}'`);return e}static getSchemes(){return Object.keys(this.getInstance().managers)}}function Kf(n){if(n.indexOf(xc)===-1)throw new Error(`The url string provided does not contain a scheme. Supported schemes are: ${br.getSchemes().join(",")}`);return{scheme:n.split(xc)[0],path:n.split(xc)[1]}}async function HN(n,t,e=!1){_(n!==t,()=>`Old path and new path are the same: '${n}'`);let r=Ze.getLoadHandlers(n);_(r.length>0,()=>`Copying failed because no load handler is found for source URL ${n}.`),_(r.length<2,()=>`Copying failed because more than one (${r.length}) load handlers for source URL ${n}.`);let o=r[0],s=Ze.getSaveHandlers(t);_(s.length>0,()=>`Copying failed because no save handler is found for destination URL ${t}.`),_(s.length<2,()=>`Copying failed because more than one (${r.length}) save handlers for destination URL ${t}.`);let c=s[0],l=Kf(n).scheme,p=Kf(n).path,f=l===Kf(n).scheme,m=await o.load();e&&f&&await br.getManager(l).removeModel(p);let y=await c.save(m);return e&&!f&&await br.getManager(l).removeModel(p),y.modelArtifactsInfo}async function YP(){let n=br.getSchemes(),t={};for(let e of n){let r=await br.getManager(e).listModels();for(let o in r){let s=e+xc+o;t[s]=r[o]}}return t}async function JP(n){let t=Kf(n),e=br.getManager(t.scheme);return e.removeModel(t.path)}async function ZP(n,t){let e=!1;return HN(n,t,e)}async function QP(n,t){let e=!0;return HN(n,t,e)}class tO{fetch(t,e){return fetch(t,e)}now(){return performance.now()}encode(t,e){if(e!=="utf-8"&&e!=="utf8")throw new Error(`Browser's encoder only supports utf-8, but got ${e}`);return this.textEncoder==null&&(this.textEncoder=new TextEncoder),this.textEncoder.encode(t)}decode(t,e){return new TextDecoder(e).decode(t)}}if(ct().get("IS_BROWSER")){ct().setPlatform("browser",new tO);try{br.registerManager(Yi.URL_SCHEME,new XP)}catch(n){}try{br.registerManager(Ki.URL_SCHEME,new VP)}catch(n){}}let eO={importFetch:()=>vE()},wc;function fot(){wc=null}function dot(n){wc=n}function mot(){return wc}class nO{constructor(){this.util=require("util"),this.textEncoder=new this.util.TextEncoder}fetch(t,e){return ct().global.fetch!=null?ct().global.fetch(t,e):(wc==null&&(wc=eO.importFetch()),wc(t,e))}now(){let t=process.hrtime();return t[0]*1e3+t[1]/1e6}encode(t,e){if(e!=="utf-8"&&e!=="utf8")throw new Error(`Node built-in encoder only supports utf-8, but got ${e}`);return this.textEncoder.encode(t)}decode(t,e){return t.length===0?"":new this.util.TextDecoder(e).decode(t)}}ct().get("IS_NODE")&&ct().setPlatform("node",new nO);function Se(n,t="float32",e){return t=t||"float32",Mb(n),new hn(n,t,e)}function rO(n,t){let e=M(n,"x","cast");if(!Js(t))throw new Error(`Failed to cast to unknown dtype ${t}`);if(t==="string"&&e.dtype!=="string"||t!=="string"&&e.dtype==="string")throw new Error("Only strings can be casted to strings");let r={x:e},o={dtype:t};return X.runKernelFunc(s=>s.cast(e,t),r,null,cc,o)}let $t=j({cast_:rO});function oO(n){let t=M(n,"x","clone",null),e=()=>X.makeTensorFromDataId(t.dataId,t.shape,t.dtype),r={x:t};return X.runKernelFunc(e,r,null,bu)}let ni=j({clone_:oO});function jN(n,t=!1){console.log(n.toString(t))}RN();let sO={buffer:Se,cast:$t,clone:ni,print:jN};vP(sO);let iO="model",aO=".json",cO=".weights.bin";function KN(n){return new Promise(t=>setTimeout(t)).then(n)}class Ji{constructor(t){if(!ct().getBool("IS_BROWSER"))throw new Error("browserDownloads() cannot proceed because the current environment is not a browser.");t.startsWith(Ji.URL_SCHEME)&&(t=t.slice(Ji.URL_SCHEME.length)),(t==null||t.length===0)&&(t=iO),this.modelTopologyFileName=t+aO,this.weightDataFileName=t+cO}async save(t){if(typeof document=="undefined")throw new Error("Browser downloads are not supported in this environment since `document` is not present");let e=window.URL.createObjectURL(new Blob([t.weightData],{type:"application/octet-stream"}));if(t.modelTopology instanceof ArrayBuffer)throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet.");{let r=[{paths:["./"+this.weightDataFileName],weights:t.weightSpecs}],o={modelTopology:t.modelTopology,format:t.format,generatedBy:t.generatedBy,convertedBy:t.convertedBy,weightsManifest:r},s=window.URL.createObjectURL(new Blob([JSON.stringify(o)],{type:"application/json"})),c=this.jsonAnchor==null?document.createElement("a"):this.jsonAnchor;if(c.download=this.modelTopologyFileName,c.href=s,await KN(()=>c.dispatchEvent(new MouseEvent("click"))),t.weightData!=null){let l=this.weightDataAnchor==null?document.createElement("a"):this.weightDataAnchor;l.download=this.weightDataFileName,l.href=e,await KN(()=>l.dispatchEvent(new MouseEvent("click")))}return{modelArtifactsInfo:Yu(t)}}}}Ji.URL_SCHEME="downloads://";class lO{constructor(t){if(t==null||t.length<1)throw new Error(`When calling browserFiles, at least 1 file is required, but received ${t}`);this.files=t}async load(){let t=this.files[0],e=this.files.slice(1);return new Promise((r,o)=>{let s=new FileReader;s.onload=c=>{let l=JSON.parse(c.target.result),p=l.modelTopology;if(p==null){o(new Error(`modelTopology field is missing from file ${t.name}`));return}e.length===0&&r({modelTopology:p});let f=l.weightsManifest;if(f==null){o(new Error(`weightManifest field is missing from file ${t.name}`));return}let m;try{m=this.checkManifestAndWeightFiles(f,e)}catch(T){o(T);return}let y=[],b=[],v=[];f.forEach(T=>{T.paths.forEach(N=>{b.push(N),v.push(null)}),y.push(...T.weights)}),f.forEach(T=>{T.paths.forEach(N=>{let S=new FileReader;S.onload=D=>{let I=D.target.result,P=b.indexOf(N);v[P]=I,v.indexOf(null)===-1&&r({modelTopology:p,weightSpecs:y,weightData:Hf(v),format:l.format,generatedBy:l.generatedBy,convertedBy:l.convertedBy,userDefinedMetadata:l.userDefinedMetadata})},S.onerror=D=>o(`Failed to weights data from file of path '${N}'.`),S.readAsArrayBuffer(m[N])})})},s.onerror=c=>o(`Failed to read model topology and weights manifest JSON from file '${t.name}'. BrowserFiles supports loading Keras-style tf.Model artifacts only.`),s.readAsText(t)})}checkManifestAndWeightFiles(t,e){let r=[],o=e.map(c=>zN(c.name)),s={};for(let c of t)c.paths.forEach(l=>{let p=zN(l);if(r.indexOf(p)!==-1)throw new Error(`Duplicate file basename found in weights manifest: '${p}'`);if(r.push(p),o.indexOf(p)===-1)throw new Error(`Weight file with basename '${p}' is not provided.`);s[l]=e[o.indexOf(p)]});if(r.length!==e.length)throw new Error(`Mismatch in the number of files in weights manifest (${r.length}) and the number of weight files provided (${e.length}).`);return s}}let uO=n=>ct().getBool("IS_BROWSER")&&(!Array.isArray(n)&&n.startsWith(Ji.URL_SCHEME))?pO(n.slice(Ji.URL_SCHEME.length)):null;Ze.registerSaveRouter(uO);function pO(n="model"){return new Ji(n)}function hO(n){return new lO(n)}function XN(n,t,e,r){c(n),e=e==null?0:e,r=r==null?1:r,l(e,r);let o=0,s=p=>(p.then(f=>{let m=e+ ++o/n.length*(r-e);return t(m),f}),p);function c(p){_(p!=null&&Array.isArray(p)&&p.length>0,()=>"promises must be a none empty array")}function l(p,f){_(p>=0&&p<=1,()=>`Progress fraction must be in range [0, 1], but got startFraction ${p}`),_(f>=0&&f<=1,()=>`Progress fraction must be in range [0, 1], but got endFraction ${f}`),_(f>=p,()=>`startFraction must be no more than endFraction, but got startFraction ${p} and endFraction ${f}`)}return Promise.all(n.map(s))}async function YN(n,t){t==null&&(t={});let e=t.fetchFunc==null?ct().platform.fetch:t.fetchFunc,r=n.map(y=>e(y,t.requestInit,{isBinary:!0})),o=0,s=.5,c=t.onProgress==null?await Promise.all(r):await XN(r,t.onProgress,o,s),l=c.map(y=>y.arrayBuffer()),p=.5,f=1,m=t.onProgress==null?await Promise.all(l):await XN(l,t.onProgress,p,f);return m}async function JN(n,t="",e,r){let o=c=>YN(c,{requestInit:r}),s=ZN(o);return s(n,t,e)}function ZN(n){return async(t,e="",r)=>{let o=t.map(()=>!1),s={},c=r!=null?r.map(()=>!1):[],l=[];if(t.forEach((v,T)=>{let N=0;v.weights.forEach(S=>{let D="quantization"in S?S.quantization.dtype:S.dtype,I=Px[D]*G(S.shape),P=()=>{o[T]=!0,s[T]==null&&(s[T]=[]),s[T].push({manifestEntry:S,groupOffset:N,sizeBytes:I})};r!=null?r.forEach((E,L)=>{E===S.name&&(P(),c[L]=!0)}):P(),l.push(S.name),N+=I})}),!c.every(v=>v)){let v=r.filter((T,N)=>!c[N]);throw new Error(`Could not find weights in manifest with names: ${v.join(", ")}. +Manifest JSON has weights with names: ${l.join(", ")}.`)}let p=o.reduce((v,T,N)=>(T&&v.push(N),v),[]),f=[];p.forEach(v=>{t[v].paths.forEach(T=>{let N=e+(e.endsWith("/")?"":"/")+T;f.push(N)})});let m=await n(f),y={},b=0;return p.forEach(v=>{let T=t[v].paths.length,N=0;for(let E=0;E{let L=S.slice(E.groupOffset,E.groupOffset+E.sizeBytes),B=qf(L,[E.manifestEntry]);for(let q in B)y[q]=B[q]}),b+=T}),y}}let fO="application/octet-stream",dO="application/json";class Gx{constructor(t,e){if(this.DEFAULT_METHOD="POST",e==null&&(e={}),this.weightPathPrefix=e.weightPathPrefix,this.onProgress=e.onProgress,this.weightUrlConverter=e.weightUrlConverter,e.fetchFunc!=null?(_(typeof e.fetchFunc=="function",()=>"Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"),this.fetch=e.fetchFunc):this.fetch=ct().platform.fetch,_(t!=null&&t.length>0,()=>"URL path for http must not be null, undefined or empty."),Array.isArray(t)&&_(t.length===2,()=>`URL paths for http must have a length of 2, (actual length is ${t.length}).`),this.path=t,e.requestInit!=null&&e.requestInit.body!=null)throw new Error("requestInit is expected to have no pre-existing body, but has one.");this.requestInit=e.requestInit||{}}async save(t){if(t.modelTopology instanceof ArrayBuffer)throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet.");let e=Object.assign({method:this.DEFAULT_METHOD},this.requestInit);e.body=new FormData;let r=[{paths:["./model.weights.bin"],weights:t.weightSpecs}],o={modelTopology:t.modelTopology,format:t.format,generatedBy:t.generatedBy,convertedBy:t.convertedBy,userDefinedMetadata:t.userDefinedMetadata,weightsManifest:r};e.body.append("model.json",new Blob([JSON.stringify(o)],{type:dO}),"model.json"),t.weightData!=null&&e.body.append("model.weights.bin",new Blob([t.weightData],{type:fO}),"model.weights.bin");let s=await this.fetch(this.path,e);if(s.ok)return{modelArtifactsInfo:Yu(t),responses:[s]};throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${s.status}.`)}async load(){let t=await this.fetch(this.path,this.requestInit);if(!t.ok)throw new Error(`Request to ${this.path} failed with status code ${t.status}. Please verify this URL points to the model JSON of the model to load.`);let e;try{e=await t.json()}catch(v){let T=`Failed to parse model JSON of response from ${this.path}.`;throw this.path.endsWith(".pb")?T+=" Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository.":T+=" Please make sure the server is serving valid JSON for this request.",new Error(T)}let r=e.modelTopology,o=e.weightsManifest,s=e.generatedBy,c=e.convertedBy,l=e.format,p=e.userDefinedMetadata;if(r==null&&o==null)throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`);let f,m;if(o!=null){let v=await this.loadWeights(o);[f,m]=v}let y={modelTopology:r,weightSpecs:f,weightData:m,userDefinedMetadata:p,generatedBy:s,convertedBy:c,format:l},b=e.modelInitializer;return b&&(y.modelInitializer=b),y}async loadWeights(t){let e=Array.isArray(this.path)?this.path[1]:this.path,[r,o]=mO(e),s=this.weightPathPrefix||r,c=[];for(let m of t)c.push(...m.weights);let l=[],p=[];for(let m of t)for(let y of m.paths)this.weightUrlConverter!=null?p.push(this.weightUrlConverter(y)):l.push(s+y+o);this.weightUrlConverter&&l.push(...await Promise.all(p));let f=await YN(l,{requestInit:this.requestInit,fetchFunc:this.fetch,onProgress:this.onProgress});return[c,Hf(f)]}}Gx.URL_SCHEME_REGEX=/^https?:\/\//;function mO(n){let t=n.lastIndexOf("/"),e=n.lastIndexOf("?"),r=n.substring(0,t),o=e>t?n.substring(e):"";return[r+"/",o]}function Ux(n){return n.match(Gx.URL_SCHEME_REGEX)!=null}let QN=(n,t)=>{if(typeof fetch=="undefined"&&(t==null||t.fetchFunc==null))return null;{let e=!0;if(Array.isArray(n)?e=n.every(r=>Ux(r)):e=Ux(n),e)return qx(n,t)}return null};Ze.registerSaveRouter(QN),Ze.registerLoadRouter(QN);function qx(n,t){return new Gx(n,t)}function Xf(n,t){return qx(n,t)}class Hx{constructor(t){this.modelArtifacts=t}async load(){return this.modelArtifacts}}class gO{constructor(t){this.saveHandler=t}async save(t){return this.saveHandler(t)}}function yO(n,t,e,r){if(arguments.length===1){let o=n.modelTopology!=null||n.weightSpecs!=null;return o?new Hx(n):(console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Hx({modelTopology:n}))}else return console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."),new Hx({modelTopology:n,weightSpecs:t,weightData:e,trainingConfig:r})}function bO(n){return new gO(n)}var xO=Object.freeze({__proto__:null,browserFiles:hO,browserHTTPRequest:Xf,concatenateArrayBuffers:Hf,decodeWeights:qf,encodeWeights:Ox,fromMemory:yO,getLoadHandlers:Bx,getModelArtifactsInfoForJSON:Yu,getSaveHandlers:Mx,http:qx,isHTTPScheme:Ux,loadWeights:JN,registerLoadRouter:BP,registerSaveRouter:MP,weightsLoaderFactory:ZN,withSaveHandler:bO,copyModel:ZP,listModels:YP,moveModel:QP,removeModel:JP});function wO(n,t){let e=M(n,"x","reshape",null),r={x:e},o={shape:t},s=(c,l)=>(t=Ge(t,e.size),_(e.size===G(t),()=>"new shape and old shape must have the same number of elements."),l([e]),c.reshape(e,t));return X.runKernelFunc(s,r,null,Eu,o)}let Q=j({reshape_:wO});function vO(n,t,e=!1,r=!1){let o=M(n,"a","matMul"),s=M(t,"b","matMul");[o,s]=Je(o,s);let c=(f,m)=>{m([o,s]);let y=e?o.shape[o.rank-2]:o.shape[o.rank-1],b=r?s.shape[s.rank-1]:s.shape[s.rank-2],v=e?o.shape[o.rank-1]:o.shape[o.rank-2],T=r?s.shape[s.rank-2]:s.shape[s.rank-1],N=o.shape.slice(0,-2),S=s.shape.slice(0,-2),D=G(N),I=G(S),P=D===I||D===1||I===1;_(o.rank>=2&&s.rank>=2&&P,()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${N}) and (${S}).`),_(y===b,()=>`Error in matMul: inner shapes (${y}) and (${b}) of Tensors with shapes ${o.shape} and ${s.shape} and transposeA=${e} and transposeB=${r} must match.`);let E=D>I?N:S,L=E.concat([v,T]),B=e?Q(o,[D,y,v]):Q(o,[D,v,y]),q=r?Q(s,[I,T,b]):Q(s,[I,b,T]),H=f.batchMatMul(B,q,e,r);return Q(H,L)},l={a:o,b:s},p={transposeA:e,transposeB:r};return X.runKernelFunc(c,l,null,cf,p)}let ge=j({matMul_:vO});function TO(n,t,e=1,r=0){if(t<2)throw new Error(`Error in oneHot: depth must be >=2, but it is ${t}`);let o=M(n,"indices","oneHot","int32"),s=[...o.shape,t],c=(f,m)=>(m([o]),Q(f.oneHot(Q(o,[o.size]),t,e,r),s)),l={indices:o},p={depth:t,onValue:e,offValue:r};return X.runKernelFunc(c,l,null,fx,p)}let Zi=j({oneHot_:TO});function kO(n,t){let e=M(n,"x","transpose");if(t==null&&(t=e.shape.map((s,c)=>c).reverse()),_(e.rank===t.length,()=>`Error in transpose: rank of input ${e.rank} must match length of perm ${t}.`),t.forEach(s=>{_(s>=0&&s`All entries in 'perm' must be between 0 and ${e.rank-1} but got ${t}`)}),e.rank<=1)return e.clone();let r={x:e},o={perm:t};return X.runKernelFunc(s=>s.transpose(e,t),r,null,Wu,o)}let Kt=j({transpose_:kO});function NO(n,t,e){let r=M(n,"labels","confusionMatrix"),o=M(t,"predictions","confusionMatrix");_(e==null||e>0&&Number.isInteger(e),()=>`If provided, numClasses must be a positive integer, but got ${e}`),_(r.rank===1,()=>`Expected the rank of labels to be 1, but got ${r.rank}`),_(o.rank===1,()=>`Expected the rank of predictions to be 1, but got ${o.rank}`),_(r.shape[0]===o.shape[0],()=>`Mismatch in the number of examples: ${r.shape[0]} vs. ${o.shape[0]}. Labels and predictions should have the same number of elements.`),_(e>0&&Number.isInteger(e),()=>`numClasses is required to be a positive integer, but got ${e}`);let s=Zi($t(r,"int32"),e),c=Zi($t(o,"int32"),e),l=Kt(s),p=ge(l,c);return $t(p,"int32")}let _O=j({confusionMatrix_:NO});var CO=Object.freeze({__proto__:null,confusionMatrix:_O});function t_(n,t,e){if(et(n),t!=null&&t.length!==3)throw new Error("tensor3d() requires shape to have three numbers");let r=Lo(n,e);if(r.length!==3&&r.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(r.length===1&&t==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return ti(n,t,r,e)}let vc;function SO(n,t=3){if(t>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(n==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let e=!1,r=!1,o=!1,s=!1,c=!1;if(n.data instanceof Uint8Array)e=!0;else if(typeof ImageData!="undefined"&&n instanceof ImageData)r=!0;else if(typeof HTMLVideoElement!="undefined"&&n instanceof HTMLVideoElement)o=!0;else if(typeof HTMLImageElement!="undefined"&&n instanceof HTMLImageElement)s=!0;else if(n.getContext!=null)c=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${n.constructor.name}`);if(o){let v=2;if(o&&n.readyState element.")}let l=Cx(Ff,X.backendName);if(l!=null){let v={pixels:n},T={numChannels:t};return X.runKernel(Ff,v,T)}let[p,f]=o?[n.videoWidth,n.videoHeight]:[n.width,n.height],m;c?m=n.getContext("2d").getImageData(0,0,p,f).data:r||e?m=n.data:(s||o)&&(vc==null&&(vc=document.createElement("canvas").getContext("2d")),vc.canvas.width=p,vc.canvas.height=f,vc.drawImage(n,0,0,p,f),m=vc.getImageData(0,0,p,f).data);let y;if(t===4)y=new Int32Array(m);else{let v=p*f;y=new Int32Array(v*t);for(let T=0;T4||s===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${s}`);if(e.dtype!=="float32"&&e.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${e.dtype}. Please use float32 or int32 tensors.`);let c=await e.data(),l=e.dtype==="float32"?255:1,p=new Uint8ClampedArray(o*r*4);for(let f=0;f1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${v}.`)}else if(e.dtype==="int32"&&(v<0||v>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${v}.`);s===1?(m[0]=v*l,m[1]=v*l,m[2]=v*l):m[b]=v*l}let y=f*4;p[y+0]=Math.round(m[0]),p[y+1]=Math.round(m[1]),p[y+2]=Math.round(m[2]),p[y+3]=Math.round(m[3])}if(t!=null){t.width=o,t.height=r;let f=t.getContext("2d"),m=new ImageData(p,o,r);f.putImageData(m,0,0)}return e!==n&&e.dispose(),p}let e_=j({fromPixels_:SO});var IO=Object.freeze({__proto__:null,toPixels:$O,fromPixels:e_});function Yf(n,t){if(n.rank<1)throw new Error(`tf.gatherND() expects the input to be rank 1 or higher, but the rank was ${n.rank}.`);if(t.rank<1)throw new Error(`tf.gatherND() expects the indices to be rank 1 or higher, but the rank was ${t.rank}.`);if(t.dtype!=="int32")throw new Error(`tf.gatherND() expects the indices to be int32 type, but the dtype was ${t.dtype}.`);if(t.shape[t.rank-1]>n.rank)throw new Error(`index innermost dimension length must be <= tensor rank; saw: ${t.shape[t.rank-1]} vs. ${n.rank}`);if(n.size===0)throw new Error(`Requested more than 0 entries, but input is empty. Input shape: ${n.shape}.`);let e=t.shape,r=e[e.length-1],o=1;for(let f=0;ff/l),1].slice(0,r);return[c,o,l,p]}var EO=Object.freeze({__proto__:null,prepareAndValidate:Yf});function jx(n,t,e){let r=t.rank>1?t.shape[t.rank-1]:1,o=t.rank>1?t.rank-1:1,s=`Must have updates.shape = indices.shape[:batchDim] + shape[sliceDim:], got updates.shape: ${e.shape}, indices.shape: ${t.shape}, shape: ${n}, sliceDim: ${r}, and batchDim: ${o}.`;if(e.rank1?t.shape[r-1]:1,s=e.length,c=1;for(let y=o;y`Error in slice${r}D: Length of begin ${t} must match the rank of the array (${r}).`),_(r===e.length,()=>`Error in slice${r}D: Length of size ${e} must match the rank of the array (${r}).`);for(let o=0;o`Error in slice${r}D: begin[${o}] + size[${o}] (${t[o]+e[o]}) would overflow input.shape[${o}] (${n.shape[o]})`)}function Jf(n){let t=[],e=0;for(;n>0;)n&1&&t.push(e),n/=2,e++;return t}function Zf(n,t,e){let r=[];for(let o=0;o0){let v=t[0],T=e+1;m=i_(c,v,T,r,n),y=a_(l,v,T,o,n),b=n_(s,v,T,n)}else for(let v=0;v-1)s[l]=0;else{let p=r_(t,e,l),f=r[p];n&1<-1)s[l]=Number.MAX_SAFE_INTEGER;else{let p=r_(t,e,l),f=r[p];n&1<0?c=Number.MIN_SAFE_INTEGER:c=Number.MAX_SAFE_INTEGER);let p=r[o];return c<0&&(c+=p),c=w(0,c,p-1),c}function u_(n,t,e,r,o,s){let c=t[o],l=e[o]||1;(n&1<0?c=Number.MAX_SAFE_INTEGER:c=Number.MIN_SAFE_INTEGER);let p=r[o];return c<0&&(c+=p),l>0?c=w(0,c,p):c=w(-1,c,p-1),c}function Yx(n,t,e){let r=e.length;for(let o=0;o1){r=o;break}for(let o=r+1;o0||e[o]!==n[o])return!1;return!0}function Jx(n,t){let e=n.length>0?n[n.length-1]:1;for(let r=0;r{_(c!==-1,()=>"slice() does not support negative begin indexing.")});let s;return e==null?s=new Array(o).fill(-1):typeof e=="number"?s=[e,...new Array(o-1).fill(-1)]:e.lengthc>=0?c:(_(c===-1,()=>`Negative size values should be exactly -1 but got ${c} for the slice() size at index ${l}.`),n.shape[l]-r[l])),[r,s]}var p_=Object.freeze({__proto__:null,assertParamsValid:Xx,maskToAxes:Jf,computeOutShape:Zf,stridesWithElidedDims:n_,getNormalizedAxes:s_,startIndicesWithElidedDims:i_,stopIndicesWithElidedDims:a_,stridesForAxis:c_,startForAxis:l_,stopForAxis:u_,isSliceContinous:Yx,computeFlatOffset:Jx,parseSliceParams:Qf});class Qi{getClassName(){return this.constructor.className}static fromConfig(t,e){return new t(e)}}class Dr{constructor(){this.classNameMap={}}static getMap(){return Dr.instance==null&&(Dr.instance=new Dr),Dr.instance}static register(t){Dr.getMap().classNameMap[t.className]=[t,t.fromConfig]}}function vt(n){_(n.className!=null,()=>"Class being registered does not have the static className property defined."),_(typeof n.className=="string",()=>"className is required to be a string, but got type "+typeof n.className),_(n.className.length>0,()=>"Class being registered has an empty-string as its className, which is disallowed."),Dr.register(n)}var AO=Object.freeze({__proto__:null,Serializable:Qi,SerializationMap:Dr,registerClass:vt});let FO=.001,h_=.1;function RO(n,t,e){return e==null&&(e=td()),Zx(n,t,(r,o)=>tw(r,o,e))}function td(){return X.backend.floatPrecision()===32?FO:h_}function Zx(n,t,e){let r=!0;if((gn(n)||gn(t))&&(r=!1),gn(n)&&gn(t)&&(r=!0),r){let c=n.constructor.name,l=t.constructor.name;if(c!==l)throw new Error(`Arrays are of different type. Actual: ${c}. Expected: ${l}`)}if(Array.isArray(n)&&Array.isArray(t)){let c=Lo(n),l=Lo(t);if(!lt(c,l))throw new Error(`Arrays have different shapes. Actual: [${c}]. Expected: [${l}]`)}let o=gn(n)?n:tt(n),s=gn(t)?t:tt(t);if(o.length!==s.length)throw new Error(`Arrays have different lengths actual: ${o.length} vs expected: ${s.length}. +Actual: ${o}. +Expected: ${s}.`);for(let c=0;ct.fail(),()=>t())}function OO(n,t){let e=typeof t=="string"||typeof t=="number"||typeof t=="boolean"?[t]:t;return as(n)||as(n[0])||as(t)||as(t[0])?Zx(n,e,(r,o)=>r==o):Zx(n,t,(r,o)=>tw(r,o,0))}function Qx(n,t,e){if(e==null&&(e=td()),!tw(n,t,e))throw new Error(`Numbers differ: actual === ${n}, expected === ${t}`)}function tw(n,t,e){return!isFinite(n)&&!isFinite(t)?!0:!(isNaN(n)||isNaN(t)||Math.abs(n-t)>e)}function LO(n,t,e){for(let r=0;re)throw new Error(`Value out of range:${n[r]} low: ${t}, high: ${e}`)}function MO(n,t){expect(new Float32Array(n)).toEqual(new Float32Array(t))}var BO=Object.freeze({__proto__:null,TEST_EPSILON_FLOAT16:h_,expectArraysClose:RO,testEpsilon:td,expectPromiseToFail:PO,expectArraysEqual:OO,expectNumbersClose:Qx,expectValuesInRange:LO,expectArrayBuffersEqual:MO});let f_="2.7.0";function zO(){ct().set("PROD",!0)}function WO(){ct().set("DEBUG",!0)}function VO(){ct().set("DEPRECATION_WARNINGS_ENABLED",!1),console.warn("TensorFlow.js deprecation warnings have been disabled.")}function yn(n){ct().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(n+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}TP(yn);function GO(){X.disposeVariables()}function ps(){return X}function ed(){return X.memory()}function UO(n){return X.profile(n)}function rt(n,t){return X.tidy(n,t)}function Xt(n){let t=cs(n);t.forEach(e=>e.dispose())}function Sn(n){return X.keep(n)}function qO(n){return X.time(n)}function d_(n){return X.setBackend(n)}function HO(){return X.ready()}function jO(){return X.backendName}function KO(n){X.removeBackend(n)}function XO(n){return X.findBackend(n)}function YO(n){return X.findBackendFactory(n)}function ew(n,t,e=1){return X.registerBackend(n,t,e)}function m_(){return X.backend}function JO(n,t){ct().setPlatform(n,t)}function ZO(n,t){let e=M(n,"a","add"),r=M(t,"b","add");[e,r]=Je(e,r);let o=(c,l)=>{let p=c.add(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,Hi)}let Tt=j({add_:ZO});function QO(n,t){let e=M(n,"a","floorDiv"),r=M(t,"b","floorDiv");[e,r]=Je(e,r);let o=(c,l)=>{let p=c.floorDiv(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,Qb)}let nd=j({floorDiv_:QO});function tL(n,t){let e=M(n,"a","div"),r=M(t,"b","div");if([e,r]=Je(e,r),e.dtype==="int32"&&r.dtype==="int32")return nd(e,r);let o=(l,p)=>{let f=l.realDivide(e,r);return p([e,r]),f},s={a:e,b:r},c={};return X.runKernelFunc(o,s,null,uc,c)}let Bt=j({div_:tL});function eL(n,t){let e=M(n,"a","mul"),r=M(t,"b","mul");[e,r]=Je(e,r);let o=(c,l)=>{let p=c.multiply(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,pc)}let nt=j({mul_:eL});function nL(n){let t=M(n,"x","abs"),e={x:t};return X.runKernelFunc((r,o)=>(o([t]),t.dtype==="complex64"?r.complexAbs(t):r.abs(t)),e,null,of)}let bn=j({abs_:nL});function rL(n){let t=M(n,"x","acos"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.acos(t);return o([t]),s},e,null,eu)}let nw=j({acos_:rL});function oL(n){let t=M(n,"x","acosh"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.acosh(t);return o([t]),s},e,null,nu)}let rw=j({acosh_:oL});function sL(n){_(Array.isArray(n),()=>"The argument passed to tf.addN() must be a list of tensors"),_(n.length>=1,()=>`Must pass at least one tensor to tf.addN(), but got ${n.length}`);let t=n.map((s,c)=>M(s,`tensors${c}`,"addN")),e=t[0];t.forEach(s=>{if(s.dtype!==e.dtype)throw new Error("All tensors passed to tf.addN() must have the same dtype")}),t.forEach(s=>{if(!lt(s.shape,e.shape))throw new Error("All tensors passed to tf.addN() must have the same shape")});let r=(s,c)=>{let l=s.addN(t);return c(t),l},o=t;return X.runKernelFunc(r,o,null,zb)}let g_=j({addN_:sL});function ow(n,t){for(let e=0;en[s]);return[e,o]}function Rn(n,t){let e=t.map(r=>1);return y_(n,e,t)}function sr(n,t,e){_(ow(t,e),()=>`${n} supports only inner-most axes for now. Got axes ${t} and rank-${e} input.`)}function ir(n,t){if(ow(n,t))return null;let e=[];for(let r=0;re.push(r)),e}function Ju(n){return n.map((t,e)=>[e,t]).sort((t,e)=>t[1]-e[1]).map(t=>t[0])}function xr(n,t){let e=[];for(let r=t-n;r{let p=Vt(t,r.shape),f=p,m=ir(f,r.rank);m!=null&&(r=Kt(r,m),f=xr(f.length,r.rank));let y=l.all(r,f);if(e){let b=Rn(y.shape,p);return Q(y,b)}return y},s={x:r},c={axis:t,keepDims:e};return X.runKernelFunc(o,s,null,tN,c)}let rd=j({all_:iL});function aL(n,t=null,e=!1){let r=M(n,"x","any","bool"),o=l=>{let p=Vt(t,r.shape),f=p,m=ir(f,r.rank);m!=null&&(r=Kt(r,m),f=xr(f.length,r.rank));let y=l.any(r,f);if(e){let b=Rn(y.shape,p);return Q(y,b)}return y},s={x:r},c={axis:t,keepDims:e};return X.runKernelFunc(o,s,null,eN,c)}let Zu=j({any_:aL});function cL(n,t=0){let e=M(n,"x","argMax"),r=(c,l)=>{l([e]);let p=Vt(t,e.shape),f=ir(p,e.rank);return f!=null&&(e=Kt(e,f),p=xr(p.length,e.rank)),c.argMax(e,p[0])},o={x:e},s={axis:t};return X.runKernelFunc(r,o,null,Wb,s)}let Qu=j({argMax_:cL});function lL(n,t=0){let e=M(n,"x","argMin"),r=(c,l)=>{l([e]),t==null&&(t=0);let p=Vt(t,e.shape),f=ir(p,e.rank);return f!=null&&(e=Kt(e,f),p=xr(p.length,e.rank)),c.argMin(e,p[0])},o={x:e},s={axis:t};return X.runKernelFunc(r,o,null,Vb,s)}let sw=j({argMin_:lL});function uL(n){let t=M(n,"x","asin"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.asin(t);return o([t]),s},e,null,ru)}let iw=j({asin_:uL});function pL(n){let t=M(n,"x","asinh"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.asinh(t);return o([t]),s},e,null,ou)}let aw=j({asinh_:pL});function hL(n){let t=M(n,"x","atan"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.atan(t);return o([t]),s},e,null,su)}let cw=j({atan_:hL});function fL(n,t){let e=M(n,"a","atan2"),r=M(t,"b","atan2");[e,r]=Je(e,r);let o=(c,l)=>{let p=c.atan2(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,sf)}let lw=j({atan2_:fL});function dL(n){let t=M(n,"x","atanh"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.atanh(t);return o([t]),s},e,null,iu)}let uw=j({atanh_:dL});function od(n,t,e,r,o="NHWC",s){let c=n[3],l=[...t,c],p=si(o);return Un(n,l,e,s,r,null,null,p)}function Kn(n,t,e,r,o,s,c="channelsLast"){let[l,p]=sd(t),f;if(c==="channelsLast")f=[l,p,n[3],n[3]];else if(c==="channelsFirst")f=[l,p,n[1],n[1]];else throw new Error(`Unknown dataFormat ${c}`);return Un(n,f,e,r,o,s,!1,c)}function tp(n,t,e,r,o,s,c="NDHWC"){let[l,p,f]=hw(t),m,y;if(c==="NDHWC")y="channelsLast",m=[l,p,f,n[4],n[4]];else if(c==="NCDHW")y="channelsFirst",m=[l,p,f,n[1],n[1]];else throw new Error(`Unknown dataFormat ${c}`);return ri(n,m,e,r,o,!1,y,s)}function Un(n,t,e,r,o,s,c=!1,l="channelsLast"){let[p,f,m,y]=[-1,-1,-1,-1];if(l==="channelsLast")[p,f,m,y]=n;else if(l==="channelsFirst")[p,y,f,m]=n;else throw new Error(`Unknown dataFormat ${l}`);let[b,v,,T]=t,[N,S]=sd(e),[D,I]=sd(r),P=kc(b,D),E=kc(v,I),{padInfo:L,outHeight:B,outWidth:q}=yL(o,f,m,N,S,P,E,s,l),H=c?T*y:T,Z;return l==="channelsFirst"?Z=[p,H,B,q]:l==="channelsLast"&&(Z=[p,B,q,H]),{batchSize:p,dataFormat:l,inHeight:f,inWidth:m,inChannels:y,outHeight:B,outWidth:q,outChannels:H,padInfo:L,strideHeight:N,strideWidth:S,filterHeight:b,filterWidth:v,effectiveFilterHeight:P,effectiveFilterWidth:E,dilationHeight:D,dilationWidth:I,inShape:n,outShape:Z,filterShape:t}}function ri(n,t,e,r,o,s=!1,c="channelsLast",l){let[p,f,m,y,b]=[-1,-1,-1,-1,-1];if(c==="channelsLast")[p,f,m,y,b]=n;else if(c==="channelsFirst")[p,b,f,m,y]=n;else throw new Error(`Unknown dataFormat ${c}`);let[v,T,N,,S]=t,[D,I,P]=hw(e),[E,L,B]=hw(r),q=kc(v,E),H=kc(T,L),Z=kc(N,B),{padInfo:J,outDepth:it,outHeight:pt,outWidth:ht}=bL(o,f,m,y,D,I,P,q,H,Z,l),dt=s?S*b:S,ft;return c==="channelsFirst"?ft=[p,dt,it,pt,ht]:c==="channelsLast"&&(ft=[p,it,pt,ht,dt]),{batchSize:p,dataFormat:c,inDepth:f,inHeight:m,inWidth:y,inChannels:b,outDepth:it,outHeight:pt,outWidth:ht,outChannels:dt,padInfo:J,strideDepth:D,strideHeight:I,strideWidth:P,filterDepth:v,filterHeight:T,filterWidth:N,effectiveFilterDepth:q,effectiveFilterHeight:H,effectiveFilterWidth:Z,dilationDepth:E,dilationHeight:L,dilationWidth:B,inShape:n,outShape:ft,filterShape:t}}function mL(n,t,e,r,o){r==null&&(r=pw(n,t,e));let s=n[0],c=n[1],l=ta((s-t+2*r)/e+1,o);_(gt(l),()=>`The output # of rows (${l}) must be an integer. Change the stride and/or zero pad parameters`);let p=ta((c-t+2*r)/e+1,o);return _(gt(p),()=>`The output # of columns (${p}) must be an integer. Change the stride and/or zero pad parameters`),[l,p]}function gL(n,t,e,r,o,s){o==null&&(o=pw(n,t,r));let c=n[0],l=n[1],p=n[2],f=ta((c-t+2*o)/r+1,s);_(gt(f),()=>`The output # of depths (${f}) must be an integer. Change the stride and/or zero pad parameters`);let m=ta((l-t+2*o)/r+1,s);_(gt(m),()=>`The output # of rows (${m}) must be an integer. Change the stride and/or zero pad parameters`);let y=ta((p-t+2*o)/r+1,s);return _(gt(y),()=>`The output # of columns (${y}) must be an integer. Change the stride and/or zero pad parameters`),[f,m,y,e]}function pw(n,t,e,r=1){let o=kc(t,r);return Math.floor((n[0]*(e-1)-e+o)/2)}function sd(n){return typeof n=="number"?[n,n,n]:n.length===2?[n[0],n[1],1]:n}function hw(n){return typeof n=="number"?[n,n,n]:n}function kc(n,t){return t<=1?n:n+(n-1)*(t-1)}function yL(n,t,e,r,o,s,c,l,p){let f,m,y;if(typeof n=="number"){let b=n===0?"VALID":"NUMBER";f={top:n,bottom:n,left:n,right:n,type:b};let v=mL([t,e],s,r,n,l);m=v[0],y=v[1]}else if(n==="same"){m=Math.ceil(t/r),y=Math.ceil(e/o);let b=Math.max(0,(m-1)*r+s-t),v=Math.max(0,(y-1)*o+c-e),T=Math.floor(b/2),N=b-T,S=Math.floor(v/2),D=v-S;f={top:T,bottom:N,left:S,right:D,type:"SAME"}}else if(n==="valid")f={top:0,bottom:0,left:0,right:0,type:"VALID"},m=Math.ceil((t-s+1)/r),y=Math.ceil((e-c+1)/o);else if(typeof n=="object"){let b=p==="channelsLast"?n[1][0]:n[2][0],v=p==="channelsLast"?n[1][1]:n[2][1],T=p==="channelsLast"?n[2][0]:n[3][0],N=p==="channelsLast"?n[2][1]:n[3][1],S=b===0&&v===0&&T===0&&N===0?"VALID":"EXPLICIT";f={top:b,bottom:v,left:T,right:N,type:S},m=ta((t-s+b+v)/r+1,l),y=ta((e-c+T+N)/o+1,l)}else throw Error(`Unknown padding parameter: ${n}`);return{padInfo:f,outHeight:m,outWidth:y}}function bL(n,t,e,r,o,s,c,l,p,f,m){let y,b,v,T;if(typeof n=="number"){let N=n===0?"VALID":"NUMBER";y={top:n,bottom:n,left:n,right:n,front:n,back:n,type:N};let S=gL([t,e,r,1],l,1,o,n,m);b=S[0],v=S[1],T=S[2]}else if(n==="same"){b=Math.ceil(t/o),v=Math.ceil(e/s),T=Math.ceil(r/c);let N=(b-1)*o+l-t,S=(v-1)*s+p-e,D=(T-1)*c+f-r,I=Math.floor(N/2),P=N-I,E=Math.floor(S/2),L=S-E,B=Math.floor(D/2),q=D-B;y={top:E,bottom:L,left:B,right:q,front:I,back:P,type:"SAME"}}else if(n==="valid")y={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},b=Math.ceil((t-l+1)/o),v=Math.ceil((e-p+1)/s),T=Math.ceil((r-f+1)/c);else throw Error(`Unknown padding parameter: ${n}`);return{padInfo:y,outDepth:b,outHeight:v,outWidth:T}}function ta(n,t){if(!t)return n;switch(t){case"round":return Math.round(n);case"ceil":return Math.ceil(n);case"floor":return Math.floor(n);default:throw new Error(`Unknown roundingMode ${t}`)}}function oi(n){let[t,e,r]=sd(n);return t===1&&e===1&&r===1}function fn(n,t){return oi(n)||oi(t)}function si(n){if(n==="NHWC")return"channelsLast";if(n==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${n}`)}function xL(n,t,e,r,o){let s=M(n,"x","avgPool","float32"),c=1;_(fn(e,c),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${e} and dilations '${c}'`);let l=s,p=!1;s.rank===3&&(p=!0,l=Q(s,[1,s.shape[0],s.shape[1],s.shape[2]])),_(l.rank===4,()=>`Error in avgPool: x must be rank 4 but got rank ${l.rank}.`),o!=null&&_(gt(r),()=>`Error in avgPool: pad must be an integer when using, dimRoundingMode ${o} but got pad ${r}.`);let f=(v,T)=>{let N=Kn(l.shape,t,e,1,r,o);return T([l]),N.filterWidth===1&&N.filterHeight===1&<(N.inShape,N.outShape)?l.clone():v.avgPool(l,N)},m={x:l},y={filterSize:t,strides:e,pad:r,dimRoundingMode:o},b=X.runKernelFunc(f,m,null,au,y);return b=$t(b,s.dtype),p?Q(b,[b.shape[1],b.shape[2],b.shape[3]]):b}let ep=j({avgPool_:xL});function wL(n,t,e,r,o,s="NDHWC",c){c==null?c=[1,1,1]:yn("dilations is deprecated, this field will be gone in v3.0.0.");let l=M(n,"x","avgPool3d","float32"),p=l,f=!1;l.rank===4&&(f=!0,p=Q(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]])),_(p.rank===5,()=>`Error in avgPool3d: x must be rank 5 but got rank ${p.rank}.`),_(s==="NDHWC",()=>`Error in avgPool3d: Only NDHWC is currently supported, but got dataFormat of ${s}`),_(fn(e,c),()=>`Error in avgPool3d: Either strides or dilations must be 1. Got strides ${e} and dilations '${c}'`),o!=null&&_(gt(r),()=>`Error in avgPool3d: pad must be an integer when using, dimRoundingMode ${o} but got pad ${r}.`);let m=(T,N)=>{c==null&&(c=[1,1,1]);let S=tp(p.shape,t,e,c,r,o,s);return N([p]),T.avgPool3d(p,S)},y={x:p},b={filterSize:t,strides:e,pad:r,dimRoundingMode:o,dataFormat:s,dilations:c},v=X.runKernelFunc(m,y,null,Gb,b);return v=$t(v,p.dtype),f?Q(v,[v.shape[1],v.shape[2],v.shape[3],v.shape[4]]):v}let fw=j({avgPool3d_:wL});function id(n,t){let e=n[0].length;n.forEach((o,s)=>{_(o.length===e,()=>`Error in concat${e}D: rank of tensors[${s}] must be the same as the rank of the rest (${e})`)}),_(t>=0&&t`Error in concat${e}D: axis must be between 0 and ${e-1}.`);let r=n[0];n.forEach((o,s)=>{for(let c=0;c`Error in concat${e}D: Shape of tensors[${s}] (${o}) does not match the shape of the rest (${r}) along the non-concatenated axis ${s}.`)})}function hs(n,t){let e=n[0].slice();for(let r=1;r=1,()=>"Pass at least one tensor to concat");let e=Xu(n,"tensors","concat");e[0].dtype==="complex64"&&e.forEach(c=>{if(c.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor + with dtype ${c.dtype}. `)});let r=(c,l)=>{let p=Vt(t,e[0].shape)[0],f=hs(e.map(b=>b.shape),p);if(G(f)===0)return un([],f);if(e=e.filter(b=>b.size>0),e.length===1)return e[0];let m=e.map(b=>b.shape);id(m,p);let y=c.concat(e,p);return l(e),y},o=e,s={axis:t};return X.runKernelFunc(r,o,null,uu,s)}let Qe=j({concat_:vL});function TL(n){let t=M(n,"x","sigmoid"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.sigmoid(t);return o([s]),s},e,null,Lu)}let Bo=j({sigmoid_:TL});function kL(n,t,e){let r=M(n,"x","slice");if(r.rank===0)throw new Error("Slicing scalar is not possible");let o=(l,p)=>{let[f,m]=Qf(r,t,e);return Xx(r,f,m),p([r]),l.slice(r,f,m)},s={x:r},c={begin:t,size:e};return X.runKernelFunc(o,s,null,If,c)}let ce=j({slice_:kL});function NL(n){let t=M(n,"x","tanh"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.tanh(t);return o([s]),s},e,null,zu)}let Nc=j({tanh_:NL});function _L(n,t,e,r,o,s){let c=M(n,"forgetBias","basicLSTMCell"),l=M(t,"lstmKernel","basicLSTMCell"),p=M(e,"lstmBias","basicLSTMCell"),f=M(r,"data","basicLSTMCell"),m=M(o,"c","basicLSTMCell"),y=M(s,"h","basicLSTMCell"),b=Qe([f,y],1),v=ge(b,l),T=Tt(v,p),N=T.shape[0],S=T.shape[1]/4,D=[N,S],I=ce(T,[0,0],D),P=ce(T,[0,S],D),E=ce(T,[0,S*2],D),L=ce(T,[0,S*3],D),B=Tt(nt(Bo(I),Nc(P)),nt(m,Bo(Tt(c,E)))),q=nt(Nc(B),Bo(L));return[B,q]}let CL=j({basicLSTMCell_:_L});function SL(n,t,e){let r=M(n,"x","batchToSpaceND"),o=t.reduce((p,f)=>p*f);_(r.rank>=1+t.length,()=>`input rank is ${r.rank} but should be > than blockShape.length ${t.length}`),_(e.length===t.length,()=>`crops.length is ${e.length} but should be equal to blockShape.length ${t.length}`),_(r.shape[0]%o===0,()=>`input tensor batch is ${r.shape[0]} but is not divisible by the product of the elements of blockShape ${t.join(" * ")} === ${o}`);let s=p=>p.batchToSpaceND(r,t,e),c={x:r},l={blockShape:t,crops:e};return X.runKernelFunc(s,c,null,Ub,l)}let np=j({batchToSpaceND_:SL});function $L(n){let t;return n.rank===0||n.rank===1?t=Q(n,[1,1,1,n.size]):n.rank===2?t=Q(n,[1,1,n.shape[0],n.shape[1]]):n.rank===3?t=Q(n,[1,n.shape[0],n.shape[1],n.shape[2]]):t=n,t}function IL(n,t,e,r,o,s){s==null&&(s=.001);let c=M(n,"x","batchNorm"),l=M(t,"mean","batchNorm"),p=M(e,"variance","batchNorm"),f;o!=null&&(f=M(o,"scale","batchNorm"));let m;r!=null&&(m=M(r,"offset","batchNorm")),_(l.rank===p.rank,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),_(m==null||l.rank===m.rank,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),_(f==null||l.rank===f.rank,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");let y=$L(c),b=(S,D)=>(D([y,l,p,f]),S.batchNorm(y,ad(l),ad(p),ad(m),ad(f),s)),v={x:y,scale:f,offset:m,mean:l,variance:p},T={varianceEpsilon:s},N=X.runKernelFunc(b,v,null,yu,T);return Q(N,c.shape)}function ad(n){return n==null?null:n.rank===0?Q(n,[n.size]):n.rank===1?n:n.rank===2?Q(n,[1,1,n.shape[0],n.shape[1]]):n.rank===3?Q(n,[1,n.shape[0],n.shape[1],n.shape[2]]):n}let ea=j({batchNorm_:IL});function EL(n,t,e,r,o,s){let c=M(n,"x","batchNorm"),l=M(t,"mean","batchNorm"),p=M(e,"variance","batchNorm"),f;o!=null&&(f=M(o,"scale","batchNorm"));let m;return r!=null&&(m=M(r,"offset","batchNorm")),_(c.rank===2,()=>`Error in batchNorm2D: x must be rank 2 but got rank ${c.rank}.`),_(l.rank===2||l.rank===1,()=>`Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${l.rank}.`),_(p.rank===2||p.rank===1,()=>`Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${p.rank}.`),f!=null&&_(f.rank===2||f.rank===1,()=>`Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${f.rank}.`),m!=null&&_(m.rank===2||m.rank===1,()=>`Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${m.rank}.`),ea(c,l,p,m,f,s)}let b_=j({batchNorm2d_:EL});function DL(n,t,e,r,o,s){let c=M(n,"x","batchNorm"),l=M(t,"mean","batchNorm"),p=M(e,"variance","batchNorm"),f;o!=null&&(f=M(o,"scale","batchNorm"));let m;return r!=null&&(m=M(r,"offset","batchNorm")),_(c.rank===3,()=>`Error in batchNorm3D: x must be rank 3 but got rank ${c.rank}.`),_(l.rank===3||l.rank===1,()=>`Error in batchNorm3D: mean must be rank 3 or rank 1 but got rank ${l.rank}.`),_(p.rank===3||p.rank===1,()=>`Error in batchNorm3D: variance must be rank 3 or rank 1 but got rank ${p.rank}.`),f!=null&&_(f.rank===3||f.rank===1,()=>`Error in batchNorm3D: scale must be rank 3 or rank 1 but got rank ${f.rank}.`),m!=null&&_(m.rank===3||m.rank===1,()=>`Error in batchNorm3D: offset must be rank 3 or rank 1 but got rank ${m.rank}.`),ea(c,l,p,m,f,s)}let x_=j({batchNorm3d_:DL});function AL(n,t,e,r,o,s){let c=M(n,"x","batchNorm"),l=M(t,"mean","batchNorm"),p=M(e,"variance","batchNorm"),f;o!=null&&(f=M(o,"scale","batchNorm"));let m;return r!=null&&(m=M(r,"offset","batchNorm")),_(c.rank===4,()=>`Error in batchNorm4D: x must be rank 4 but got rank ${c.rank}.`),_(l.rank===4||l.rank===1,()=>`Error in batchNorm4D: mean must be rank 4 or rank 1 but got rank ${l.rank}.`),_(p.rank===4||p.rank===1,()=>`Error in batchNorm4D: variance must be rank 4 or rank 1 but got rank ${p.rank}.`),f!=null&&_(f.rank===4||f.rank===1,()=>`Error in batchNorm4D: scale must be rank 4 or rank 1 but got rank ${f.rank}.`),m!=null&&_(m.rank===4||m.rank===1,()=>`Error in batchNorm4D: offset must be rank 4 or rank 1 but got rank ${m.rank}.`),ea(c,l,p,m,f,s)}let w_=j({batchNorm4d_:AL});function FL(n,t){let e=M(n,"broadcastTo","x"),r=e.shape;if(t.some(m=>!(m>0)||m%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${t}].`);if(t.lengthe.rank){let m=e.shape.slice();for(;m.length=0;m--)if(o[m]===t[m])s[m]=1;else if(e.shape[m]!==1)throw new Error(`broadcastTo(): [${r}] cannot be broadcast to [${t}].`);let c=s.map((m,y)=>m>1?y:-1).filter(m=>m>=0);if(c.length===0)return ni(e);let l=m=>m.tile(e,s),p={x:e},f={shape:t,inputShape:o};return X.runKernelFunc(l,p,null,qb,f)}let rp=j({broadcastTo_:FL});function RL(n){let t=M(n,"x","ceil"),e={x:t};return X.runKernelFunc(r=>r.ceil(t),e,null,cu)}let dw=j({ceil_:RL});function PL(n,t,e){let r=M(n,"x","clipByValue");_(t<=e,()=>`Error in clip: min (${t}) must be less than or equal to max (${e}).`);let o={x:r},s={clipValueMin:t,clipValueMax:e};return X.runKernelFunc((c,l)=>{let p=c.clip(r,t,e);return l([r]),p},o,null,lu,s)}let ar=j({clipByValue_:PL});function OL(n){return Qe(n,0)}let v_=j({concat1d_:OL});function LL(n,t){return Qe(n,t)}let T_=j({concat2d_:LL});function ML(n,t){return Qe(n,t)}let k_=j({concat3d_:ML});function BL(n,t){return Qe(n,t)}let N_=j({concat4d_:BL});function zL(n,t,e,r,o="NHWC",s=[1,1],c){let l=M(n,"x","conv2d"),p=M(t,"filter","conv2d"),f=l,m=!1;l.rank===3&&(m=!0,f=Q(l,[1,l.shape[0],l.shape[1],l.shape[2]])),_(f.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${f.rank}.`),_(p.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${p.rank}.`),c!=null&&_(gt(r),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${c} but got pad ${r}.`);let y=o==="NHWC"?f.shape[3]:f.shape[1];_(y===p.shape[2],()=>`Error in conv2d: depth of input (${y}) must match input depth for filter ${p.shape[2]}.`),_(fn(e,s),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${e} and dilations '${s}'`);let b=(S,D)=>{let I=si(o),P=Un(f.shape,p.shape,e,s,r,c,!1,I),E=S.conv2d(f,p,P);return D([f,p]),E},v={x:f,filter:p},T={strides:e,pad:r,dataFormat:o,dilations:s,dimRoundingMode:c},N=X.runKernelFunc(b,v,null,uf,T);return m?Q(N,[N.shape[1],N.shape[2],N.shape[3]]):N}let fs=j({conv2d_:zL});function WL(n,t,e,r,o="NWC",s=1,c){let l=M(n,"x","conv1d"),p=M(t,"filter","conv1d"),f=l,m=!1;l.rank===2&&(m=!0,f=Q(l,[1,l.shape[0],l.shape[1]])),_(f.rank===3,()=>`Error in conv1d: input must be rank 3, but got rank ${f.rank}.`),_(p.rank===3,()=>`Error in conv1d: filter must be rank 3, but got rank ${p.rank}.`),c!=null&&_(gt(r),()=>`Error in conv1d: pad must be an integer when using, dimRoundingMode ${c} but got pad ${r}.`),_(f.shape[2]===p.shape[1],()=>`Error in conv1d: depth of input (${f.shape[2]}) must match input depth for filter ${p.shape[1]}.`),_(fn(e,s),()=>`Error in conv1D: Either stride or dilation must be 1. Got stride ${e} and dilation '${s}'`),_(o==="NWC",()=>`Error in conv1d: got dataFormat of ${o} but only NWC is currently supported.`);let y=Q(p,[1,p.shape[0],p.shape[1],p.shape[2]]),b=Q(f,[f.shape[0],1,f.shape[1],f.shape[2]]),v=[1,e],T=[1,s],N="NHWC",S=fs(b,y,v,r,N,T,c);return m?Q(S,[S.shape[2],S.shape[3]]):Q(S,[S.shape[0],S.shape[2],S.shape[3]])}let cd=j({conv1d_:WL});function VL(n,t,e,r,o,s="NHWC",c){_(n.length===t.rank,()=>`Length of inShape (${n.length}) and rank of dy (${t.rank}) must match`);let l=n,p=t,f=!1;t.rank===3&&(f=!0,p=Q(t,[1,t.shape[0],t.shape[1],t.shape[2]]),l=[1,n[0],n[1],n[2]]),_(l.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${l.length}.`),_(p.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${p.rank}`),_(e.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${e.rank}`);let m=s==="NHWC"?l[3]:l[1],y=s==="NHWC"?p.shape[3]:p.shape[1];_(m===e.shape[2],()=>`Error in conv2dDerInput: depth of input (${m}) must match input depth for filter ${e.shape[2]}.`),_(y===e.shape[3],()=>`Error in conv2dDerInput: depth of output (${y}) must match output depth for filter ${e.shape[3]}.`),c!=null&&_(gt(o),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${c} but got pad ${o}.`);let b=(S,D)=>{let I=1,P=si(s),E=Un(l,e.shape,r,I,o,c,!1,P),L=S.conv2dDerInput(p,e,E);return D([p,e]),L},v={dy:p,filter:e},T={strides:r,pad:o,dataFormat:s,dimRoundingMode:c,inputShape:l},N=X.runKernelFunc(b,v,null,pf,T);return f?Q(N,[N.shape[1],N.shape[2],N.shape[3]]):N}let mw=j({conv2DBackpropInput_:VL});function GL(n,t,e,r,o,s){let c=M(n,"x","conv2dTranspose"),l=M(t,"filter","conv2dTranspose");return mw(e,c,l,r,o,"NHWC",s)}let ld=j({conv2dTranspose_:GL});function UL(n,t,e,r,o="NDHWC",s=[1,1,1]){let c=M(n,"x","conv3d"),l=M(t,"filter","conv3d"),p=c,f=!1;c.rank===4&&(f=!0,p=Q(c,[1,c.shape[0],c.shape[1],c.shape[2],c.shape[3]])),_(p.rank===5,()=>`Error in conv3d: input must be rank 5, but got rank ${p.rank}.`),_(l.rank===5,()=>`Error in conv3d: filter must be rank 5, but got rank ${l.rank}.`),_(p.shape[4]===l.shape[3],()=>`Error in conv3d: depth of input (${p.shape[4]}) must match input depth for filter ${l.shape[3]}.`),_(fn(e,s),()=>`Error in conv3D: Either strides or dilations must be 1. Got strides ${e} and dilations '${s}'`),_(o==="NDHWC",()=>`Error in conv3d: got dataFormat of ${o} but only NDHWC is currently supported.`);let m=(T,N)=>{let S=ri(p.shape,l.shape,e,s,r),D=T.conv3d(p,l,S);return N([p,l]),D},y={x:p,filter:l},b={strides:e,pad:r,dataFormat:o,dilations:s},v=X.runKernelFunc(m,y,null,hf,b);return f?Q(v,[v.shape[1],v.shape[2],v.shape[3],v.shape[4]]):v}let gw=j({conv3d_:UL});function qL(n,t,e,r,o){_(n.length===t.rank,()=>`Length of inShape (${n.length}) and rank of dy (${t.rank}) must match`);let s=n,c=t,l=!1;t.rank===4&&(l=!0,c=Q(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]]),s=[1,n[0],n[1],n[2],n[3]]);let p=s[4],f=c.shape[4];_(s.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${s.length}.`),_(c.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${c.rank}`),_(e.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${e.rank}`),_(p===e.shape[3],()=>`Error in conv3dDerInput: depth of input (${p}) must match input depth for filter ${e.shape[3]}.`),_(f===e.shape[4],()=>`Error in conv3dDerInput: depth of output (${f}) must match output depth for filter ${e.shape[4]}.`);let m=T=>{let N=1,S=ri(s,e.shape,r,N,o);return T.conv3dDerInput(c,e,S)},y={dy:c,filter:e},b={pad:o,strides:r,inputShape:s},v=X.runKernelFunc(m,y,null,Kb,b);return l?Q(v,[v.shape[1],v.shape[2],v.shape[3],v.shape[4]]):v}let __=j({conv3DBackpropInput_:qL});function HL(n,t,e,r,o){let s=M(n,"x","conv3dTranspose"),c=M(t,"filter","conv3dTranspose");return __(e,s,c,r,o)}let jL=j({conv3dTranspose_:HL});function KL(n){let t=M(n,"x","cos"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.cos(t);return o([t]),s},e,null,lc)}let op=j({cos_:KL});function XL(n){let t=M(n,"x","cosh"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.cosh(t);return o([t]),s},e,null,pu)}let ud=j({cosh_:XL});function YL(n,t=0,e=!1,r=!1){let o=M(n,"x","cumsum"),s=(p,f)=>{let m=ir([t],o.rank),y=o;m!=null&&(y=Kt(o,m));let b=xr(1,o.rank)[0],v=p.cumsum(y,b,e,r);if(f([o]),m!=null){let T=Ju(m);v=Kt(v,T)}return v},c={x:o},l={axis:t,exclusive:e,reverse:r};return X.runKernelFunc(s,c,null,Xb,l)}let pd=j({cumsum_:YL});function JL(n,t,e="NHWC"){let r=M(n,"x","depthToSpace"),o=e==="NHWC"?r.shape[1]:r.shape[2],s=e==="NHWC"?r.shape[2]:r.shape[3],c=e==="NHWC"?r.shape[3]:r.shape[1];_(o*t>=0,()=>`Negative dimension size caused by overflow when multiplying + ${o} and ${t} for depthToSpace with input shape + ${r.shape}`),_(s*t>=0,()=>`Negative dimension size caused by overflow when multiplying + ${s} and ${t} for depthToSpace with input shape + ${r.shape}`),_(c%(t*t)===0,()=>`Dimension size must be evenly divisible by ${t*t} but is ${c} for depthToSpace with input shape ${r.shape}`);let l=m=>m.depthToSpace(r,t,e),p={x:r},f={blockSize:t,dataFormat:e};return X.runKernelFunc(l,p,null,oN,f)}let yw=j({depthToSpace_:JL});function ZL(n,t,e,r,o="NHWC",s=[1,1],c){let l=M(n,"x","depthwiseConv2d"),p=M(t,"filter","depthwiseConv2d"),f=l,m=!1;l.rank===3&&(m=!0,f=Q(l,[1,l.shape[0],l.shape[1],l.shape[2]])),_(f.rank===4,()=>`Error in depthwiseConv2d: input must be rank 4, but got rank ${f.rank}.`),_(p.rank===4,()=>`Error in depthwiseConv2d: filter must be rank 4, but got rank ${p.rank}.`),_(f.shape[3]===p.shape[2],()=>`Error in depthwiseConv2d: number of input channels (${f.shape[3]}) must match the inChannels dimension in filter ${p.shape[2]}.`),c!=null&&_(gt(r),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${c} but got pad ${r}.`);let y=(N,S)=>{s==null&&(s=[1,1]),_(fn(e,s),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${e} and dilations '${s}'`);let D=Un(f.shape,p.shape,e,s,r,c,!0),I=N.depthwiseConv2D(f,p,D);return S([f,p]),I},b={x:f,filter:p},v={strides:e,pad:r,dataFormat:o,dilations:s,dimRoundingMode:c},T=X.runKernelFunc(y,b,null,ff,v);return m?Q(T,[T.shape[1],T.shape[2],T.shape[3]]):T}let na=j({depthwiseConv2d_:ZL});function QL(n){let t=M(n,"x","diag"),e=o=>{let s=Q(t,[t.size]),c=o.diag(s),l=[...n.shape,...n.shape];return Q(c,l)},r={x:t};return X.runKernelFunc(e,r,null,sN)}let tM=j({diag_:QL});function eM(n,t,e,r,o=[1,1],s="NHWC"){let c=M(n,"x","dilation2d"),l=M(t,"filter","dilation2d");_(c.rank===3||c.rank===4,()=>`Error in dilation2d: input must be rank 3 or 4, but got rank ${c.rank}.`),_(l.rank===3,()=>`Error in dilation2d: filter must be rank 3, but got rank ${l.rank}.`),_(s==="NHWC",()=>`Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${s}`);let p=c,f=!1;c.rank===3&&(p=Q(c,[1,c.shape[0],c.shape[1],c.shape[2]]),f=!0);let m={x:p,filter:l},y={strides:e,pad:r,dilations:o},b=X.runKernel(df,m,y);return f?Q(b,[b.shape[1],b.shape[2],b.shape[3]]):b}let bw=j({dilation2d_:eM});function ra(n,t){let e=n.length,r=[];for(let o=0;o1&&c===1&&r.unshift(s)}return r}function xn(n,t){let e=[];for(let r=0;r1)&&e.unshift(s)}return e}function le(n,t){let e=[],r=Math.max(n.length,t.length);for(let o=0;oc.equal(e,r),s={a:e,b:r};return X.runKernelFunc(o,s,null,aN)}let po=j({equal_:nM});function rM(n,t,e){let r=M(t,"a","where"),o=M(e,"b","where"),s=M(n,"condition","where","bool"),c=le(r.shape,o.shape),l=rp(r,c),p=rp(o,c);s.rank===1&&_(s.shape[0]===r.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),s.rank!==1&&W(s.shape,p.shape,"Error in where: ");let f=(y,b)=>{let v=y.select(s,l,p);return b([s]),v},m={condition:s,t:l,e:p};return X.runKernelFunc(f,m,null,bx)}let Xn=j({where_:rM});function oM(n){let t=M(n,"x","zerosLike"),e={x:t};return X.runKernelFunc(r=>r.zerosLike(t),e,null,_x)}let re=j({zerosLike_:oM});function sM(n,t){let e=M(n,"a","div"),r=M(t,"b","div");[e,r]=Je(e,r);let o=Bt(e,r),s=re(o),c=po(r,s);return Xn(c,s,o)}let xw=j({divNoNan_:sM});function iM(n,t){let e=M(n,"t1","dot"),r=M(t,"t2","dot");_((e.rank===1||e.rank===2)&&(r.rank===1||r.rank===2),()=>`Error in dot: inputs must all be rank 1 or 2, but got ranks ${e.rank} and ${r.rank}.`);let o=e.rank===1?e.size:e.shape[1],s=r.rank===1?r.size:r.shape[0];if(_(o===s,()=>`Error in dot: inner dimensions of inputs must match, but got ${o} and ${s}.`),e.rank===1&&r.rank===1){let c=Q(e,[1,-1]),l=Q(r,[-1,1]),p=ge(c,l);return Q(p,[])}else if(e.rank===1&&r.rank===2){let c=Q(e,[1,-1]),l=Q(r,[r.shape[0],r.shape[1]]),p=ge(c,l);return Q(p,[p.size])}else if(e.rank===2&&r.rank===1){let c=Q(r,[-1,1]),l=ge(e,c);return Q(l,[l.size])}else{let c=Q(r,[r.shape[0],r.shape[1]]),l=ge(e,c);return l}}let C_=j({dot_:iM});function aM(n){let t=M(n,"x","elu"),e=(o,s)=>{let c=o.elu(t);return s([c]),c},r={x:t};return X.runKernelFunc(e,r,null,hu)}let _c=j({elu_:aM});function cM(n){let t=M(n,"x","erf");_(t.dtype==="int32"||t.dtype==="float32",()=>"Input dtype must be `int32` or `float32`."),t.dtype==="int32"&&(t=$t(t,"float32"));let e={x:t};return X.runKernelFunc((r,o)=>{let s=r.erf(t);return o([t]),s},e,null,fu)}let ww=j({erf_:cM});function lM(n){let t=M(n,"x","exp"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.exp(t);return o([s]),s},e,null,du)}let Ar=j({exp_:lM});function uM(n,t=0){let e=null,r=M(n,"x","expandDims",e);_(t<=r.rank,()=>"Axis must be <= rank of the tensor");let o=r.shape.slice();return t<0&&(_(-(r.rank+1)<=t,()=>`Axis must be in the interval [${-(r.rank+1)}, ${r.rank}]`),t=r.rank+t+1),o.splice(t,0,1),Q(r,o)}let cr=j({expandDims_:uM});function pM(n){let t=M(n,"x","expm1"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.expm1(t);return o([t]),s},e,null,mu)}let vw=j({expm1_:pM});function hM(n,t){let e=null,r=M(n,"x","tile",e);_(r.rank===t.length,()=>`Error in transpose: rank of input ${r.rank} must match length of reps ${t}.`);let o=(p,f)=>{let m=p.tile(r,t);return f([r]),m},s=[r],c={x:r},l={reps:t};return X.runKernelFunc(o,c,null,Tx,l,s)}let ii=j({tile_:hM});function fM(n,t,e,r="float32"){t==null&&(t=n);let o=Se([n,t],r),s=n<=t?n:t;for(let l=0;lo.fill(n,t,e),{},null,Zb,r)}function dM(n){let t=M(n,"x","floor"),e={x:t};return X.runKernelFunc(r=>r.floor(t),e,null,gu)}let Sc=j({floor_:dM});let Tw=30;function sp(n){return n<=Tw?n:rf(n,Math.floor(Math.sqrt(n)))}function mM(n,t){let e=!1,r;for(n<=Tw?(r=n,e=!0):r=rf(n,Math.floor(Math.sqrt(n)));!e;)r>t||r===n?e=!0:r=rf(n,r+1);return r}function gM(n,t,e){let r=[],o=n.length;for(let s=0;s{let m=Vt(e,r.shape)[0],y=S_(r,o,m),b=p.gather(r,Q(o,[o.size]),m);return f([r,o]),Q(b,y.outputShape)};return X.runKernelFunc(l,s,null,tx,c)}let $c=j({gather_:bM});function xM(n,t){let e=M(n,"a","greater"),r=M(t,"b","greater");[e,r]=Je(e,r),le(e.shape,r.shape);let o=c=>c.greater(e,r),s={a:e,b:r};return X.runKernelFunc(o,s,null,lN)}let Fr=j({greater_:xM});function wM(n,t){let e=M(n,"a","greaterEqual"),r=M(t,"b","greaterEqual");[e,r]=Je(e,r),le(e.shape,r.shape);let o=(c,l)=>{let p=c.greaterEqual(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,ex)}let ds=j({greaterEqual_:wM});function vM(n){let t=M(n,"input","imag"),e=o=>o.imag(t),r={input:t};return X.runKernelFunc(e,r,null,wf)}let ip=j({imag_:vM});function TM(n){let t=M(n,"x","isFinite"),e={x:t};return X.runKernelFunc(r=>r.isFinite(t),e,null,xu)}let $_=j({isFinite_:TM});function kM(n){let t=M(n,"x","isInf"),e={x:t};return X.runKernelFunc(r=>r.isInf(t),e,null,wu)}let I_=j({isInf_:kM});function NM(n){let t=M(n,"x","isNaN"),e={x:t};return X.runKernelFunc(r=>r.isNaN(t),e,null,vu)}let E_=j({isNaN_:NM});function _M(n,t){let e=M(n,"a","maximum"),r=M(t,"b","maximum");[e,r]=Je(e,r),e.dtype==="bool"&&(e=$t(e,"int32"),r=$t(r,"int32")),le(e.shape,r.shape);let o=(c,l)=>{let p=c.maximum(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,ox)}let Xr=j({maximum_:_M});function Et(n,t){if((gn(n)&&t!=="string"||Array.isArray(n))&&t!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(t==="string"&&gn(n)&&!(n instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");let e=[],r=[];return ti(n,e,r,t)}function CM(n,t=.2){let e=M(n,"x","leakyRelu");return Xr(nt(Et(t),e),e)}let fd=j({leakyRelu_:CM});function SM(n,t){let e=M(n,"a","less"),r=M(t,"b","less");[e,r]=Je(e,r),le(e.shape,r.shape);let o=c=>c.less(e,r),s={a:e,b:r};return X.runKernelFunc(o,s,null,uN)}let ap=j({less_:SM});function $M(n,t){let e=M(n,"a","lessEqual"),r=M(t,"b","lessEqual");[e,r]=Je(e,r),le(e.shape,r.shape);let o=(c,l)=>{let p=c.lessEqual(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,pN)}let ai=j({lessEqual_:$M});function D_(n,t,e){if(e<=0)throw new Error("The number of values should be positive.");let r={start:n,stop:t,num:e};return X.runKernelFunc(o=>o.linspace(n,t,e),{},null,hN,r)}function IM(n,t=5,e=1,r=1,o=.5){let s=M(n,"x","localResponseNormalization");_(s.rank===4||s.rank===3,()=>`Error in localResponseNormalization: x must be rank 3 or 4 but got + rank ${s.rank}.`),_(gt(t),()=>`Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${t}.`);let c=s,l=!1;s.rank===3&&(l=!0,c=Q(s,[1,s.shape[0],s.shape[1],s.shape[2]]));let p=(b,v)=>{let T=b.localResponseNormalization4D(c,t,e,r,o);return v([c,T]),T},f={x:c},m={depthRadius:t,bias:e,alpha:r,beta:o},y=X.runKernelFunc(p,f,null,rx,m);return l?Q(y,[y.shape[1],y.shape[2],y.shape[3]]):y}let kw=j({localResponseNormalization_:IM});function EM(n){let t=M(n,"x","log"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.log(t);return o([t]),s},e,null,Tu)}let wr=j({log_:EM});function DM(n){let t=M(n,"x","log1p"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.log1p(t);return o([t]),s},e,null,ku)}let dd=j({log1p_:DM});function AM(n){return _(Zs(n),()=>"The f passed in grad(f) must be a function"),(t,e)=>{let r=M(t,"x","tf.grad",null),o=e!=null?M(e,"dy","tf.grad"):null;return X.tidy(()=>{let{value:s,grads:c}=X.gradients(()=>n(r),[r],o);return o!=null&&W(s.shape,o.shape,"The shape of dy passed in grad(f)(x, dy) must match the shape returned by f(x)"),md(c),c[0]})}}function FM(n){return _(Zs(n),()=>"The f passed in grads(f) must be a function"),(t,e)=>{_(Array.isArray(t),()=>"The args passed in grads(f)(args) must be an array of `Tensor`s or `TensorLike`s");let r=Xu(t,"args","tf.grads",null),o=e!=null?M(e,"dy","tf.grads"):null;return X.tidy(()=>{let{value:s,grads:c}=X.gradients(()=>n(...r),r,o);return o!=null&&W(s.shape,o.shape,"The shape of dy passed in grads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),md(c),c})}}function RM(n){return _(Zs(n),()=>"The f passed in valueAndGrad(f) must be a function"),(t,e)=>{_(t instanceof ot,()=>"The x passed in valueAndGrad(f)(x) must be a tensor"),_(e==null||e instanceof ot,()=>"The dy passed in valueAndGrad(f)(x, dy) must be a tensor");let{grads:r,value:o}=X.gradients(()=>n(t),[t],e);return md(r),{grad:r[0],value:o}}}function PM(n){return _(Zs(n),()=>"The f passed in valueAndGrads(f) must be a function"),(t,e)=>{_(Array.isArray(t)&&t.every(o=>o instanceof ot),()=>"The args passed in valueAndGrads(f)(args) must be array of tensors"),_(e==null||e instanceof ot,()=>"The dy passed in valueAndGrads(f)(args, dy) must be a tensor");let r=X.gradients(()=>n(...t),t,e);return e!=null&&W(r.value.shape,e.shape,"The shape of dy passed in valueAndGrads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),md(r.grads),r}}function Nw(n,t){_(Zs(n),()=>"The f passed in variableGrads(f) must be a function"),_(t==null||Array.isArray(t)&&t.every(f=>f instanceof Ku),()=>"The varList passed in variableGrads(f, varList) must be an array of variables");let e=t!=null;if(!e){t=[];for(let f in X.registeredVariables)t.push(X.registeredVariables[f])}let r=e?t.filter(f=>!f.trainable):null,o=t.length;t=t.filter(f=>f.trainable),_(t.length>0,()=>`variableGrads() expects at least one of the input variables to be trainable, but none of the ${o} variables is trainable.`);let s=!0,{value:c,grads:l}=X.gradients(n,t,null,s);_(l.some(f=>f!=null),()=>"Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize()."),_(c.rank===0,()=>`The f passed in variableGrads(f) must return a scalar, but it returned a rank-${c.rank} tensor`);let p={};return t.forEach((f,m)=>{l[m]!=null&&(p[f.name]=l[m])}),r!=null&&r.forEach(f=>p[f.name]=null),{value:c,grads:p}}function zo(n){return X.customGrad(n)}function md(n){let t=n.filter(e=>e==null).length;if(t>0)throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that + the f you passed encloses all operations that lead from x to y.`)}function OM(n){let t=M(n,"x","neg"),e={x:t};return X.runKernelFunc(r=>r.neg(t),e,null,ux)}let tn=j({neg_:OM});function LM(n){let t=M(n,"x","softplus"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.softplus(t);return o([t]),s},e,null,Mu)}let Ic=j({softplus_:LM});function MM(n){let t=M(n,"x","logSigmoid"),e=zo(r=>{let o=tn(Ic(tn(r))),s=c=>{let l=nt(c,Bo(tn(r)));return l};return{value:o,gradFunc:s}});return e(t)}let A_=j({logSigmoid_:MM});function BM(n,t=null,e=!1){let r=M(n,"x","max"),o=(l,p)=>{let f=Vt(t,r.shape),m=f,y=ir(m,r.rank),b=r;y!=null&&(b=Kt(r,y),m=xr(m.length,b.rank));let v=l.max(b,m);y!=null&&b.dispose();let T=v;if(e){let N=Rn(T.shape,Vt(t,r.shape));T=Q(T,N),v.dispose()}return p([r,T]),T},s={x:r},c={reductionIndices:t,keepDims:e};return X.runKernelFunc(o,s,null,Nu,c)}let lr=j({max_:BM});function zM(n,t){let e=M(n,"a","sub"),r=M(t,"b","sub");[e,r]=Je(e,r);let o=(c,l)=>{let p=c.subtract(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,dc)}let Dt=j({sub_:zM});function WM(n,t=null,e=!1){let r=M(n,"x","sum");r.dtype==="bool"&&(r=$t(r,"int32"));let o=(l,p)=>{p([r]);let f=Vt(t,r.shape),m=ir(f,r.rank),y=f,b=r;m!=null&&(b=Kt(r,m),y=xr(y.length,r.rank));let v=l.sum(b,y);if(e){let T=Rn(v.shape,f);v=Q(v,T)}return v},s={x:r},c={axis:t,keepDims:e};return X.runKernelFunc(o,s,null,xx,c)}let zt=j({sum_:WM});function VM(n,t=-1){let e=M(n,"logits","logSoftmax");if(t===-1&&(t=e.rank-1),t!==e.rank-1)throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${e.rank} and axis was ${t}`);let r=(c,l)=>{let p=!0,f=lr(n,t,!0),m=Dt(n,f),y=Dt($t(m,"float32"),wr(zt(Ar(m),t,p)));return l([y]),y},o={logits:e},s={axis:t};return X.runKernelFunc(r,o,null,nx,s)}let gd=j({logSoftmax_:VM});function GM(n,t=null,e=!1){let r=M(n,"x","logSumExp"),o=Vt(t,r.shape),s=lr(r,o,!0),c=Dt(r,s),l=Ar(c),p=zt(l,o),f=wr(p),m=Tt(Q(s,f.shape),f);if(e){let y=Rn(m.shape,o);return Q(m,y)}return m}let _w=j({logSumExp_:GM});function UM(n,t){let e=M(n,"a","logicalAnd","bool"),r=M(t,"b","logicalAnd","bool");le(e.shape,r.shape);let o={a:e,b:r};return X.runKernelFunc(s=>s.logicalAnd(e,r),o,null,fN)}let Yr=j({logicalAnd_:UM});function qM(n){let t=M(n,"x","logicalNot","bool"),e={x:t};return X.runKernelFunc(r=>r.logicalNot(t),e,null,vf)}let cp=j({logicalNot_:qM});function HM(n,t){let e=M(n,"a","logicalOr","bool"),r=M(t,"b","logicalOr","bool");le(e.shape,r.shape);let o={a:e,b:r};return X.runKernelFunc(s=>s.logicalOr(e,r),o,null,dN)}let yd=j({logicalOr_:HM});function jM(n,t){let e=M(n,"a","logicalXor","bool"),r=M(t,"b","logicalXor","bool");return le(e.shape,r.shape),Yr(yd(n,t),cp(Yr(n,t)))}let F_=j({logicalXor_:jM});function KM(n,t,e,r,o){let s=M(n,"x","maxPool"),c=1,l=s,p=!1;s.rank===3&&(p=!0,l=Q(s,[1,s.shape[0],s.shape[1],s.shape[2]])),_(l.rank===4,()=>`Error in maxPool: input must be rank 4 but got rank ${l.rank}.`),_(fn(e,c),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${e} and dilations '${c}'`),o!=null&&_(gt(r),()=>`Error in maxPool: pad must be an integer when using, dimRoundingMode ${o} but got pad ${r}.`);let f=(v,T)=>{let N=Kn(l.shape,t,e,1,r,o),S;return N.filterWidth===1&&N.filterHeight===1&<(N.inShape,N.outShape)?S=l.clone():S=v.maxPool(l,N),T([l,S]),S},m={x:l},y={filterSize:t,strides:e,pad:r,dimRoundingMode:o},b=X.runKernelFunc(f,m,null,_u,y);return p?Q(b,[b.shape[1],b.shape[2],b.shape[3]]):b}let lp=j({maxPool_:KM});function XM(n,t=[1,1,1],e,r,o,s="NDHWC",c){c==null?c=[1,1,1]:yn("dilations is deprecated, this field will be gone in v3.0.0.");let l=M(n,"x","maxPool3d"),p=l,f=!1;l.rank===4&&(f=!0,p=Q(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]])),_(p.rank===5,()=>`Error in maxPool3d: x must be rank 5 but got rank ${p.rank}.`),_(s==="NDHWC",()=>`Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${s}`),_(fn(e,c),()=>`Error in maxPool3d: Either strides or dilations must be 1. Got strides ${e} and dilations '${c}'`),o!=null&&_(gt(r),()=>`Error in maxPool3d: pad must be an integer when using, dimRoundingMode ${o} but got pad ${r}.`);let m=(T,N)=>{c==null&&(c=[1,1,1]);let S=tp(p.shape,t,e,c,r,o,s),D=T.maxPool3d(p,S);return N([p,D]),D},y={x:p},b={filterSize:t,strides:e,pad:r,dimRoundingMode:o,dataFormat:s,dilations:c},v=X.runKernelFunc(m,y,null,sx,b);return f?Q(v,[v.shape[1],v.shape[2],v.shape[3],v.shape[4]]):v}let Cw=j({maxPool3d_:XM});function YM(n,t,e,r,o=!1){let s=M(n,"x","maxPoolWithArgmax"),c={x:s},l={filterSize:t,strides:e,pad:r,includeBatchInIndex:o},p=X.runKernel(kf,c,l);return{result:p[0],indexes:p[1]}}let R_=j({maxPoolWithArgmax_:YM});function xe(n,t="float32"){if(t==="complex64"){let r=xe(n,"float32"),o=xe(n,"float32");return us(r,o)}let e=ac(G(n),t);return X.makeTensor(e,n,t)}function ho(n,t="float32"){if(t==="complex64"){let r=ho(n,"float32"),o=xe(n,"float32");return us(r,o)}let e=Ob(G(n),t);return X.makeTensor(e,n,t)}function JM(n,t=null,e=!1){let r=M(n,"x","mean"),o=Vt(t,r.shape),s=Fn(r.shape,o),c=s[1],l=G(c),p={x:r},f={axis:t,keepDims:e},m=()=>{let b=Et(l),v=b.dtype===r.dtype?r:$t(r,b.dtype),T=Bt(v,b);return zt(T,t,e)},y=zo(b=>{let v=X.runKernelFunc(m,p,null,ix,f),T=N=>{let S=b.shape.slice();o.forEach(P=>{S[P]=1});let D=Q(N,S),I=Bt(nt(D,ho(b.shape,"float32")),l);return I};return{value:v,gradFunc:T}});return y(r)}let en=j({mean_:JM});function ZM(n,t=null,e=!1){let r=M(n,"x","min"),o=(l,p)=>{let f=Vt(t,r.shape),m=f,y=ir(m,r.rank),b=r;y!=null&&(b=Kt(r,y),m=xr(m.length,r.rank));let v=l.min(b,m);y!=null&&b.dispose();let T=v;if(e){let N=Rn(T.shape,f);T=Q(v,N),v.dispose()}return p([r,T]),T},s={x:r},c={axis:t,keepDims:e};return X.runKernelFunc(o,s,null,ax,c)}let Ec=j({min_:ZM});function QM(n,t){let e=M(n,"a","minimum"),r=M(t,"b","minimum");[e,r]=Je(e,r),e.dtype==="bool"&&(e=$t(e,"int32"),r=$t(r,"int32")),le(e.shape,r.shape);let o=(c,l)=>{let p=c.minimum(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,cx)}let oa=j({minimum_:QM});function tB(n,t,e){_(e==="reflect"||e==="symmetric",()=>`Invalid mode. Mode must be either reflect or symmetric. Got ${e}.`);let r=M(n,"x","mirrorPad");if(r.rank===0)throw new Error("mirrorPad(scalar) is not defined. Pass non-scalar to mirrorPad");_(t.length===r.rank,()=>`Padding doesn't match input. Must be ${r.rank}. Got ${t.length}.`);let o=e==="reflect"?1:0;for(let l=0;l"Invalid number of paddings. Must be length of 2 each."),_(t[l][0]>=0&&t[l][0]<=r.shape[l]-o&&t[l][1]>=0&&t[l][1]<=r.shape[l]-o,()=>`Padding in dimension ${l} cannot be greater than or equal to ${r.shape[l]-o} or less than 0 for input of shape ${r.shape}`);let s={paddings:t,mode:e},c={x:r};return X.runKernel(Cu,c,s)}let Sw=j({mirrorPad_:tB});function eB(n,t){let e=M(n,"a","mod"),r=M(t,"b","mod");[e,r]=Je(e,r);let o=(c,l)=>{let p=c.mod(e,r);return l([e,r]),p},s={a:e,b:r};return X.runKernelFunc(o,s,null,lx)}let bd=j({mod_:eB});function nB(n){let t=M(n,"x","square"),e={},r=[t],o=[];return X.runKernelFunc((s,c)=>(c([t]),s.square(t)),{x:t},null,"Square",e,r,o)}let De=j({square_:nB});function rB(n,t=null,e=!1){n=M(n,"x","moments");let r=Vt(t,n.shape),o=en(n,r,e),s=o.shape;e||(s=Rn(o.shape,r));let c=De(Dt($t(n,"float32"),Q(o,s))),l=en(c,r,e);return{mean:o,variance:l}}let xd=j({moments_:rB});function oB(n,t,e,r){let o=M(t,"data","multiRNNCell"),s=Xu(e,"c","multiRNNCell"),c=Xu(r,"h","multiRNNCell"),l=o,p=[];for(let y=0;y2)throw new Error(`Rank of probabilities must be 1 or 2, but is ${c}`);e=e||Math.random();let l=c===1?Q(o,[1,-1]):o,p=X.runKernelFunc(f=>f.multinomial(l,r,t,e),{logits2D:l});return c===1?Q(p,[p.size]):p}let P_=j({multinomial_:iB});function aB(n,t){let e=M(n,"a","notEqual"),r=M(t,"b","notEqual");[e,r]=Je(e,r),le(e.shape,r.shape);let o=c=>c.notEqual(e,r),s={a:e,b:r};return X.runKernelFunc(o,s,null,Su)}let ci=j({notEqual_:aB});function cB(n){let t=M(n,"input","real"),e=o=>o.real(t),r={input:t};return X.runKernelFunc(e,r,null,$f)}let Dc=j({real_:cB});function lB(n){let t=M(n,"x","onesLike"),e=(o,s)=>{if(t.dtype==="complex64"){let c=qn(Dc(t)),l=re(ip(t));return us(c,l)}return o.onesLike(t)},r={x:t};return X.runKernelFunc(e,r,null,hx)}let qn=j({onesLike_:lB});function uB(n,t){let e=M(n,"v1","outerProduct"),r=M(t,"v2","outerProduct");_(e.rank===1&&r.rank===1,()=>`Error in outerProduct: inputs must be rank 1, but got ranks ${e.rank} and ${r.rank}.`);let o=Q(e,[-1,1]),s=Q(r,[1,-1]);return ge(o,s)}let pB=j({outerProduct_:uB});function hB(n,t,e=0){let r=M(n,"x","pad");if(r.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");let o=(l,p)=>(p([r]),l.pad(r,t,e)),s={paddings:t,constantValue:e},c={x:r};return X.runKernelFunc(o,c,null,Cf,s)}let Wo=j({pad_:hB});function fB(n,t,e=0){return _(t.length===2,()=>"Invalid number of paddings. Must be length of 2."),Wo(n,[t],e)}let dB=j({pad1d_:fB});function mB(n,t,e=0){return _(t.length===2&&t[0].length===2&&t[1].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),Wo(n,t,e)}let gB=j({pad2d_:mB});function yB(n,t,e=0){return _(t.length===3&&t[0].length===2&&t[1].length===2&&t[2].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),Wo(n,t,e)}let bB=j({pad3d_:yB});function xB(n,t,e=0){return _(t.length===4&&t[0].length===2&&t[1].length===2&&t[2].length===2&&t[3].length===2,()=>"Invalid number of paddings. Must be length of 2 each."),Wo(n,t,e)}let wB=j({pad4d_:xB});function vB(n,t,e){let r=M(n,"x","spaceToBatchND");_(r.rank>=1+t.length,()=>`input rank ${r.rank} should be > than [blockShape] ${t.length}`),_(e.length===t.length,()=>`paddings.shape[0] ${e.length} must be equal to [blockShape] ${t.length}`),_(r.shape.reduce((l,p,f)=>f>0&&f<=t.length?l&&(p+e[f-1][0]+e[f-1][1])%t[f-1]===0:l,!0),()=>`input spatial dimensions ${r.shape.slice(1)} with paddings ${e.toString()} must be divisible by blockShapes ${t.toString()}`);let o=l=>l.spaceToBatchND(r,t,e),s={x:r},c={blockShape:t,paddings:e};return X.runKernelFunc(o,s,null,Ef,c)}let up=j({spaceToBatchND_:vB});function TB(n,t,e,r,o,s){o==null&&(o=[1,1]),s==null&&(s=1),r===0&&(r="valid");let c=M(n,"x","maxPool"),l=c,p=!1;c.rank===3&&(p=!0,l=Q(c,[1,c.shape[0],c.shape[1],c.shape[2]])),_(fn(s,o),()=>`Error in pool: Either strides or dilations must be 1. Got strides ${s} and dilations '${o}'`);let f=Kn(l.shape,t,s,o,r),m=[f.dilationHeight,f.dilationWidth],y;r==="same"?y=NB([f.filterHeight,f.filterWidth],m):y=[[0,0],[0,0]];let b=m[0]===1&&m[1]===1,[v,T]=kB([f.inHeight,f.inWidth],m,y),N=b?r:"valid",S=b?l:up(l,m,v),D=e==="avg"?()=>ep(S,t,s,N):()=>lp(S,t,s,N),I=D(),P=b?I:np(I,m,T);return p?Q(P,[P.shape[1],P.shape[2],P.shape[3]]):P}function kB(n,t,e){let r=e.map(m=>m[0]),o=e.map(m=>m[1]),s=n.concat(r,o),c=t.map((m,y)=>(m-s[y]%m)%m),l=o.map((m,y)=>m+c[y]),p=t.map((m,y)=>[r[y],l[y]]),f=t.map((m,y)=>[0,c[y]]);return[p,f]}function NB(n,t){let e=n.map((c,l)=>c+(c-1)*(t[l]-1)),r=e.map(c=>c-1),o=r.map(c=>Math.floor(c/2)),s=r.map((c,l)=>c-o[l]);return r.map((c,l)=>[o[l],s[l]])}let O_=j({pool_:TB});function _B(n,t){let e=M(n,"base","pow"),r=M(t,"exp","pow");[e,r]=Je(e,r);let o={a:e,b:r},s=(c,l)=>{let p=c.pow(e,r);return l([e,r,p]),p};return X.runKernelFunc(s,o,null,dx)}let fo=j({pow_:_B});function CB(n,t){let e=M(n,"x","prelu"),r=M(t,"alpha","prelu"),o=(c,l)=>{let p=c.prelu(e,r);return l([e,r]),p},s={x:e,alpha:r};return X.runKernelFunc(o,s,null,Sf)}let pp=j({prelu_:CB});function SB(n,t=null,e=!1){let r=M(n,"x","prod");r.dtype==="bool"&&(r=$t(r,"int32"));let o=l=>{let p=Vt(t,r.shape),f=ir(p,r.rank),m=p,y=r;f!=null&&(y=Kt(r,f),m=xr(m.length,r.rank));let b=l.prod(y,m);if(e){let v=Rn(b.shape,p);b=Q(b,v)}return b},s={x:r},c={axis:t,keepDims:e};return X.runKernelFunc(o,s,null,yN,c)}let wd=j({prod_:SB});function $B(n,t,e){let r=G(n),o=null;if(e==null||e==="float32")o=new Float32Array(r);else if(e==="int32")o=new Int32Array(r);else if(e==="bool")o=new Uint8Array(r);else throw new Error(`Unknown data type ${e}`);for(let s=0;s>>0,b-=p,b*=p,p=b>>>0,b-=p,p+=b*4294967296}return(p>>>0)*23283064365386963e-26};return f}e&&e.exports?e.exports=c:r&&r.amd?r(function(){return c}):this.alea=c})(Ac,n,!1)}),DB=sa(function(n){(function(t,e,r){function o(l){var p=this,f="";p.x=0,p.y=0,p.z=0,p.w=0,p.next=function(){var y=p.x^p.x<<11;return p.x=p.y,p.y=p.z,p.z=p.w,p.w^=p.w>>>19^y^y>>>8},l===(l|0)?p.x=l:f+=l;for(var m=0;m>>0)/4294967296};return y.double=function(){do var b=f.next()>>>11,v=(f.next()>>>0)/4294967296,T=(b+v)/(1<<21);while(T===0);return T},y.int32=f.next,y.quick=y,m&&(typeof m=="object"&&s(m,f),y.state=function(){return s(f,{})}),y}e&&e.exports?e.exports=c:r&&r.amd?r(function(){return c}):this.xor128=c})(Ac,n,!1)}),AB=sa(function(n){(function(t,e,r){function o(l){var p=this,f="";p.next=function(){var y=p.x^p.x>>>2;return p.x=p.y,p.y=p.z,p.z=p.w,p.w=p.v,(p.d=p.d+362437|0)+(p.v=p.v^p.v<<4^(y^y<<1))|0},p.x=0,p.y=0,p.z=0,p.w=0,p.v=0,l===(l|0)?p.x=l:f+=l;for(var m=0;m>>4),p.next()}function s(l,p){return p.x=l.x,p.y=l.y,p.z=l.z,p.w=l.w,p.v=l.v,p.d=l.d,p}function c(l,p){var f=new o(l),m=p&&p.state,y=function(){return(f.next()>>>0)/4294967296};return y.double=function(){do var b=f.next()>>>11,v=(f.next()>>>0)/4294967296,T=(b+v)/(1<<21);while(T===0);return T},y.int32=f.next,y.quick=y,m&&(typeof m=="object"&&s(m,f),y.state=function(){return s(f,{})}),y}e&&e.exports?e.exports=c:r&&r.amd?r(function(){return c}):this.xorwow=c})(Ac,n,!1)}),FB=sa(function(n){(function(t,e,r){function o(l){var p=this;p.next=function(){var m=p.x,y=p.i,b,v,T;return b=m[y],b^=b>>>7,v=b^b<<24,b=m[y+1&7],v^=b^b>>>10,b=m[y+3&7],v^=b^b>>>3,b=m[y+4&7],v^=b^b<<7,b=m[y+7&7],b=b^b<<13,v^=b^b<<9,m[y]=v,p.i=y+1&7,v};function f(m,y){var b,v,T=[];if(y===(y|0))v=T[0]=y;else for(y=""+y,b=0;b0;--b)m.next()}f(p,l)}function s(l,p){return p.x=l.x.slice(),p.i=l.i,p}function c(l,p){l==null&&(l=+new Date);var f=new o(l),m=p&&p.state,y=function(){return(f.next()>>>0)/4294967296};return y.double=function(){do var b=f.next()>>>11,v=(f.next()>>>0)/4294967296,T=(b+v)/(1<<21);while(T===0);return T},y.int32=f.next,y.quick=y,m&&(m.x&&s(m,f),y.state=function(){return s(f,{})}),y}e&&e.exports?e.exports=c:r&&r.amd?r(function(){return c}):this.xorshift7=c})(Ac,n,!1)}),RB=sa(function(n){(function(t,e,r){function o(l){var p=this;p.next=function(){var m=p.w,y=p.X,b=p.i,v,T;return p.w=m=m+1640531527|0,T=y[b+34&127],v=y[b=b+1&127],T^=T<<13,v^=v<<17,T^=T>>>15,v^=v>>>12,T=y[b]=T^v,p.i=b,T+(m^m>>>16)|0};function f(m,y){var b,v,T,N,S,D=[],I=128;for(y===(y|0)?(v=y,y=null):(y=y+"\0",v=0,I=Math.max(I,y.length)),T=0,N=-32;N>>15,v^=v<<4,v^=v>>>13,N>=0&&(S=S+1640531527|0,b=D[N&127]^=v+S,T=b==0?T+1:0);for(T>=128&&(D[(y&&y.length||0)&127]=-1),T=127,N=4*128;N>0;--N)v=D[T+34&127],b=D[T=T+1&127],v^=v<<13,b^=b<<17,v^=v>>>15,b^=b>>>12,D[T]=v^b;m.w=S,m.X=D,m.i=T}f(p,l)}function s(l,p){return p.i=l.i,p.w=l.w,p.X=l.X.slice(),p}function c(l,p){l==null&&(l=+new Date);var f=new o(l),m=p&&p.state,y=function(){return(f.next()>>>0)/4294967296};return y.double=function(){do var b=f.next()>>>11,v=(f.next()>>>0)/4294967296,T=(b+v)/(1<<21);while(T===0);return T},y.int32=f.next,y.quick=y,m&&(m.X&&s(m,f),y.state=function(){return s(f,{})}),y}e&&e.exports?e.exports=c:r&&r.amd?r(function(){return c}):this.xor4096=c})(Ac,n,!1)}),PB=sa(function(n){(function(t,e,r){function o(l){var p=this,f="";p.next=function(){var y=p.b,b=p.c,v=p.d,T=p.a;return y=y<<25^y>>>7^b,b=b-v|0,v=v<<24^v>>>8^T,T=T-y|0,p.b=y=y<<20^y>>>12^b,p.c=b=b-v|0,p.d=v<<16^b>>>16^T,p.a=T-y|0},p.a=0,p.b=0,p.c=2654435769|0,p.d=1367130551,l===Math.floor(l)?(p.a=l/4294967296|0,p.b=l|0):f+=l;for(var m=0;m>>0)/4294967296};return y.double=function(){do var b=f.next()>>>11,v=(f.next()>>>0)/4294967296,T=(b+v)/(1<<21);while(T===0);return T},y.int32=f.next,y.quick=y,m&&(typeof m=="object"&&s(m,f),y.state=function(){return s(f,{})}),y}e&&e.exports?e.exports=c:r&&r.amd?r(function(){return c}):this.tychei=c})(Ac,n,!1)}),ia=sa(function(n){(function(t,e){var r=this,o=256,s=6,c=52,l="random",p=e.pow(o,s),f=e.pow(2,c),m=f*2,y=o-1,b;function v(E,L,B){var q=[];L=L==!0?{entropy:!0}:L||{};var H=D(S(L.entropy?[E,P(t)]:E==null?I():E,3),q),Z=new T(q),J=function(){for(var it=Z.g(s),pt=p,ht=0;it=m;)it/=2,pt/=2,ht>>>=1;return(it+ht)/pt};return J.int32=function(){return Z.g(4)|0},J.quick=function(){return Z.g(4)/4294967296},J.double=J,D(P(Z.S),t),(L.pass||B||function(it,pt,ht,dt){return dt&&(dt.S&&N(dt,Z),it.state=function(){return N(Z,{})}),ht?(e[l]=it,pt):it})(J,H,"global"in L?L.global:this==e,L.state)}e["seed"+l]=v;function T(E){var L,B=E.length,q=this,H=0,Z=q.i=q.j=0,J=q.S=[];for(B||(E=[B++]);H=1||c===0);let l=Math.sqrt(-2*Math.log(c)/c);t=this.mean+this.stdDev*o*l,e=this.mean+this.stdDev*s*l,(!this.truncated||this.isValidTruncated(t))&&(r=!0)}return(!this.truncated||this.isValidTruncated(e))&&(this.nextVal=this.convertValue(e)),this.convertValue(t)}convertValue(t){return this.dtype==null||this.dtype==="float32"?t:Math.round(t)}isValidTruncated(t){return t<=this.upper&&t>=this.lower}}class LB{constructor(t,e,r,o){this.alpha=t,this.beta=1/e,this.dtype=r;let s=o||Math.random();this.randu=Fc(s.toString()),this.randn=new $w(0,1,r,!1,this.randu()),t<1?this.d=t+2/3:this.d=t-1/3,this.c=1/Math.sqrt(9*this.d)}nextValue(){let t,e,r,o,s,c;for(;;){do o=this.randn.nextValue(),c=1+this.c*o;while(c<=0);if(c*=c*c,t=o*o,e=1-.331*t*t,r=.5*t+this.d*(1-c+Math.log(c)),s=this.randu(),sthis.dtype==null||this.dtype==="float32",this.min=t,this.range=e-t,this.dtype=r,o==null&&(o=Math.random()),typeof o=="number"&&(o=o.toString()),!this.canReturnFloat()&&this.range<=1)throw new Error(`The difference between ${t} - ${e} <= 1 and dtype is not float`);this.random=Fc(o)}convertValue(t){return this.canReturnFloat()?t:Math.round(t)}nextValue(){return this.convertValue(this.min+this.range*this.random())}}function xot(n){let t=n.length,e=WB(n),r=zB(n),o=t/6*(Math.pow(e,2)+.25*Math.pow(r-3,2)),s=5.991;if(o>s)throw new Error(`Invalid p-value for JB: ${o}`)}function wot(n,t,e,r){r==null&&(r=td());let o=Iw(n);Qx(o,t,r),Qx(BB(n,o),e,r)}function Iw(n){let t=0;for(let e=0;e{let c=n===t,l=n1;if(c||l||p)return xe([0],r);let f=Math.abs(Math.ceil((t-n)/e)),m=ac(f,r);t{let s=r.reciprocal(t);return o([t]),s},e,null,$u)}let Dw=j({reciprocal_:HB});function jB(n){let t=M(n,"x","relu"),e=(o,s)=>(s([t]),t.dtype==="bool"?$t(t,"int32"):o.relu(t)),r={x:t};return X.runKernelFunc(e,r,null,Iu)}let Vo=j({relu_:jB});function KB(n){let t=M(n,"x","relu6"),e=(o,s)=>(s([t]),t.dtype==="bool"?$t(t,"int32"):o.relu6(t)),r={x:t};return X.runKernelFunc(e,r,null,Du)}let Aw=j({relu6_:KB});function XB(n,t){let e=M(n,"x","reverse"),r=c=>{let l=Vt(t,e.shape);if(e.rank===0)return ni(e);let p=c.reverse(e,l);return Q(p,e.shape)},o={x:e},s={dims:t};return X.runKernelFunc(r,o,null,yx,s)}let Rr=j({reverse_:XB});function YB(n){let t=M(n,"x","reverse");return _(t.rank===1,()=>`Error in reverse1D: x must be rank 1 but got rank ${t.rank}.`),Rr(t,0)}let JB=j({reverse1d_:YB});function ZB(n,t){let e=M(n,"x","reverse");return _(e.rank===2,()=>`Error in reverse2D: x must be rank 2 but got rank ${e.rank}.`),Rr(e,t)}let QB=j({reverse2d_:ZB});function t3(n,t){let e=M(n,"x","reverse");return _(e.rank===3,()=>`Error in reverse3D: x must be rank 3 but got rank ${e.rank}.`),Rr(e,t)}let e3=j({reverse3d_:t3});function n3(n,t){let e=M(n,"x","reverse");return _(e.rank===4,()=>`Error in reverse4D: x must be rank 4 but got rank ${e.rank}.`),Rr(e,t)}let r3=j({reverse4d_:n3});function o3(n){let t=M(n,"x","round"),e={x:t};return X.runKernelFunc(r=>r.round(t),e,null,Au)}let Fw=j({round_:o3});function s3(n){let t=M(n,"x","rsqrt"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.rsqrt(t);return o([t]),s},e,null,Fu)}let vd=j({rsqrt_:s3});function i3(n){let t=M(n,"x","selu"),e=(o,s)=>{let c=o.selu(t);return s([t]),c},r={x:t};return X.runKernelFunc(e,r,null,Ru)}let Td=j({selu_:i3});function a3(n,t,e,r,o,s=[1,1],c="NHWC"){let l=M(n,"x","separableConv2d"),p=M(t,"depthwiseFilter","separableConv2d"),f=M(e,"pointwiseFilter","separableConv2d"),m=l,y=!1;if(l.rank===3&&(y=!0,m=Q(l,[1,l.shape[0],l.shape[1],l.shape[2]])),c==="NCHW")throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported");_(m.rank===4,()=>`Error in separableConv2d: input must be rank 4, but got rank ${m.rank}.`),_(p.rank===4,()=>`Error in separableConv2d: depthwise filter must be rank 4, but got rank ${p.rank}.`),_(f.rank===4,()=>`Error in separableConv2d: pointwise filter must be rank 4, but got rank ${p.rank}.`),_(f.shape[0]===1,()=>`Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${f.shape[0]}.`),_(f.shape[1]===1,()=>`Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${f.shape[1]}.`);let b=p.shape[2],v=p.shape[3];_(f.shape[2]===b*v,()=>`Error in separableConv2d: the third dimension of pointwise filter must be ${b*v}, but got ${f.shape[2]}.`);let T=na(m,p,r,o,c,s),N=1,S=fs(T,f,N,"valid",c);return y?Q(S,[S.shape[1],S.shape[2],S.shape[3]]):S}let Rw=j({separableConv2d_:a3});async function c3(n,t){let e=M(n,"x","setdiff1d"),r=M(t,"y","setdiff1d");_(e.dtype===r.dtype,()=>`x and y should have the same dtype, but got x (${e.dtype}) and y (${r.dtype}).`),_(e.rank===1,()=>`x should be 1D tensor, but got x (${e.shape}).`),_(r.rank===1,()=>`y should be 1D tensor, but got y (${r.shape}).`);let o=await e.data(),s=await r.data(),c=new Set(s),l=0;for(let m=0;mr.sign(t),e,null,Ou)}let Pw=j({sign_:l3});function u3(n){let t=M(n,"x","sin"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.sin(t);return o([t]),s},e,null,hc)}let kd=j({sin_:u3});function p3(n){let t=M(n,"x","sinh"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.sinh(t);return o([t]),s},e,null,Pu)}let Nd=j({sinh_:p3});function h3(n,t,e){let r=M(n,"x","slice1d");return _(r.rank===1,()=>`slice1d expects a rank-1 tensor, but got a rank-${r.rank} tensor`),ce(r,[t],[e])}let _d=j({slice1d_:h3});function f3(n,t,e){let r=M(n,"x","slice2d");return _(r.rank===2,()=>`slice2d expects a rank-2 tensor, but got a rank-${r.rank} tensor`),ce(r,t,e)}let Ow=j({slice2d_:f3});function d3(n,t,e){let r=M(n,"x","slice3d");return _(r.rank===3,()=>`slice3d expects a rank-3 tensor, but got a rank-${r.rank} tensor`),ce(r,t,e)}let Cd=j({slice3d_:d3});function m3(n,t,e){let r=M(n,"x","slice4d");return _(r.rank===4,()=>`slice4d expects a rank-4 tensor, but got a rank-${r.rank} tensor`),ce(r,t,e)}let fp=j({slice4d_:m3});function g3(n,t=-1){let e=M(n,"logits","softmax","float32");if(t===-1&&(t=e.rank-1),t!==e.rank-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${e.rank} and dim was ${t}`);let r={logits:e},o={dim:t};return X.runKernelFunc((s,c)=>{let l=s.softmax(e,t);return c([l]),l},r,null,vx,o)}let ca=j({softmax_:g3});function y3(n){_(n.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${n.dtype}.`);let t={input:n};return X.runKernelFunc(e=>{let r=n.shape[n.shape.length-1],o=n.size/r,s=n.as2D(o,r),c=e.fft(s);return c.reshape(n.shape)},t,null,yf)}let dp=j({fft_:y3});function b3(n){_(n.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${n.dtype}.`);let t={input:n};return X.runKernelFunc(e=>{let r=n.shape[n.shape.length-1],o=n.size/r,s=Q(n,[o,r]),c=e.ifft(s);return Q(c,n.shape)},t,null,xf)}let Rc=j({ifft_:b3});function x3(n){let t=n.shape[n.shape.length-1],e=n.size/t,r;if(t<=2){let o=Q(n,[e,t]);r=Rc(o)}else{let o=[e,2*(t-1)],s=Q(Dc(n),[e,t]),c=Q(ip(n),[e,t]),l=Rr(ce(s,[0,1],[e,t-2]),1),p=nt(Rr(ce(c,[0,1],[e,t-2]),1),Et(-1)),f=Qe([s,l],1),m=Qe([c,p],1),y=Q(us(f,m),[o[0],o[1]]);r=Rc(y)}if(r=Dc(r),n.rank===3&&n.shape[0]!==0){let o=r,s=n.shape[0];r=Q(r,[s,r.shape[0]/s,r.shape[1]]),o.dispose()}return r}let Sd=j({irfft_:x3});function M_(n,t,e=0){let r=[];if(typeof t=="number")_(n.shape[e]%t===0,()=>"Number of splits must evenly divide the axis."),r=new Array(t).fill(n.shape[e]/t);else{let o=t.reduce((c,l)=>(l===-1&&(c+=1),c),0);_(o<=1,()=>"There should be only one negative value in split array.");let s=t.indexOf(-1);if(s!==-1){let c=t.reduce((l,p)=>p>0?l+p:l);t[s]=n.shape[e]-c}_(n.shape[e]===t.reduce((c,l)=>c+l),()=>"The sum of sizes must match the size of the axis dimension."),r=t}return r}function w3(n,t,e=0){let r=M(n,"x","split"),o=(l,p)=>{let f=Vt(e,r.shape)[0],m=M_(r,t,f);return l.split(r,m,f)},s={x:r},c={numOrSizeSplits:t,axis:e};return X.runKernelFunc(o,s,null,wx,c)}let Tr=j({split_:w3});function v3(n,t){_(n.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${n.dtype}`);let e=n.shape[n.shape.length-1],r=n.size/e,o;if(t!=null&&t0),N=n.shape.map(S=>S);N[n.shape.length-1]=t,o=ce(n,T,N),e=t}else if(t!=null&&t>e){let T=n.shape.map(N=>N);T[n.shape.length-1]=t-e,o=Qe([n,xe(T)],n.shape.length-1),e=t}else o=n;let s=re(o),c=Q(us(o,s),[r,e]),l=dp(c),p=Math.floor(e/2)+1,f=Dc(l),m=ip(l),y=Tr(f,[p,e-p],f.shape.length-1),b=Tr(m,[p,e-p],m.shape.length-1),v=o.shape.slice();return v[o.shape.length-1]=p,Q(us(y[0],b[0]),v)}let mp=j({rfft_:v3});function T3(n){let t=M(n,"x","sqrt"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.sqrt(t);return o([t]),s},e,null,Bu)}let Pn=j({sqrt_:T3});function k3(n,t){let e=M(n,"a","squaredDifference"),r=M(t,"b","squaredDifference");[e,r]=Je(e,r),le(e.shape,r.shape);let o=(l,p)=>{let f=l.squaredDifference(e,r);return p([e,r]),f},s={a:e,b:r},c={};return X.runKernelFunc(o,s,null,fc,c)}let gp=j({squaredDifference_:k3});function N3(n,t){let e=M(n,"x","squeeze");return Q(e,ln(e.shape,t).newShape)}let li=j({squeeze_:N3});function _3(n,t=0){let e=Xu(n,"tensors","stack");if(_(e.length>=1,()=>"Pass at least one tensor to tf.stack"),e.length===1)return cr(e[0],t);let r=e[0].rank,o=e[0].shape,s=e[0].dtype;_(t<=r,()=>"Axis must be <= rank of the tensor"),e.forEach(l=>{W(o,l.shape,"All tensors passed to stack must have matching shapes"),_(s===l.dtype,()=>"All tensors passed to stack must have matching dtypes")});let c=e.map(l=>cr(l,t));return Qe(c,t)}let ur=j({stack_:_3});function C3(n,t=0){let e=M(n,"x","step"),r={x:e},o={alpha:t};return X.runKernelFunc(s=>s.step(e,t),r,null,Vu,o)}let Pc=j({step_:C3});function S3(n,t,e,r,o=0,s=0,c=0,l=0,p=0){let f=M(n,"x","stridedSlice"),m=v=>{r==null&&(r=new Array(t.length));let T=Jf(c);if(T.length>1)throw new Error("Multiple ellipses in slice is not allowed.");if(c!==0&&l!==0)throw new Error("Using both ellipsisMask and newAxisMask is not yet supported.");if(c!==0&&p!==0)throw new Error("Using both ellipsisMask and shrinkAxisMask is not yet supported.");let N=f.rank-t.length,S=Jf(l),D=f.shape.slice();S.forEach(J=>{t[J]=0,e[J]=1,D.splice(J,0,1)}),f=Q(f,D);let{begin:I,end:P,strides:E}=s_(f.shape,T,N,t,e,r,o,s,c);t=I,e=P,r=E;let L=Jf(p);L.forEach(J=>{e[J]=t[J]+1,r[J]=1});let B=Zf(t,e,r),q=B.filter((J,it)=>L.indexOf(it)===-1),H=r.every(J=>J===1);if(H)return Q(ce(f,t,B),q);let Z=v.stridedSlice(f,t,e,r);return Q(Z,q)},y={x:f},b={begin:t,end:e,strides:r,beginMask:o,endMask:s,ellipsisMask:c,newAxisMask:l,shrinkAxisMask:p};return X.runKernelFunc(m,y,null,kN,b)}let Lw=j({stridedSlice_:S3});function $3(n){let t=M(n,"x","tan"),e={x:t};return X.runKernelFunc((r,o)=>{let s=r.tan(t);return o([t]),s},e,null,mc)}let Mw=j({tan_:$3});function ui(n,t,e){if(et(n),t!=null&&t.length!==2)throw new Error("tensor2d() requires shape to have two numbers");let r=Lo(n,e);if(r.length!==2&&r.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(r.length===1&&t==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return ti(n,t,r,e)}function Oc(n,t,e){if(et(n),t!=null&&t.length!==4)throw new Error("tensor4d() requires shape to have four numbers");let r=Lo(n,e);if(r.length!==4&&r.length!==1)throw new Error("tensor4d() requires values to be number[][][][] or flat/TypedArray");if(r.length===1&&t==null)throw new Error("tensor4d() requires shape to be provided when `values` are a flat array");return ti(n,t,r,e)}function I3(n,t,e){if(et(n),t!=null&&t.length!==5)throw new Error("tensor5d() requires shape to have five numbers");let r=Lo(n,e);if(r.length!==5&&r.length!==1)throw new Error("tensor5d() requires values to be number[][][][][] or flat/TypedArray");if(r.length===1&&t==null)throw new Error("tensor5d() requires shape to be provided when `values` are a flat array");return ti(n,t,r,e)}function E3(n,t,e){if(et(n),t!=null&&t.length!==6)throw new Error("tensor6d() requires shape to have six numbers");let r=Lo(n,e);if(r.length!==6&&r.length!==1)throw new Error("tensor6d() requires values to be number[][][][][][] or flat/TypedArray");if(r.length===1&&t==null)throw new Error("tensor6d() requires shape to be provided when `values` are a flat array");return t=t||r,ti(n,t,r,e)}function D3(n,t=1,e=!0){let r=M(n,"x","topk");if(r.rank===0)throw new Error("topk() expects the input to be of rank 1 or higher");let o=r.shape[r.shape.length-1];if(t>o)throw new Error(`'k' passed to topk() must be <= the last dimension (${o}) but got ${t}`);let s={x:r},c={k:t,sorted:e},[l,p]=X.runKernelFunc(f=>f.topk(r,t,e),s,null,NN,c);return{values:l,indices:p}}let Bw=j({topk_:D3});function A3(n,t=0,e=1,r,o){if(r!=null&&r==="bool")throw new Error("Unsupported data type $ { dtype }");let s=new $w(t,e,r,!0,o),c=Se(n,r);for(let l=0;l0,()=>"The input tensor must be at least 1D");let r={x:e},o={axis:t},[s,c]=X.runKernel(Af,r,o);return{values:s,indices:c}}let $d=j({unique_:F3});function R3(n,t,e){let r=M(n,"x","unsortedSegmentSum"),o=M(t,"segmentIds","unsortedSegmentSum","int32");_(gt(e),()=>"numSegments must be of dtype int");let s={x:r,segmentIds:o},c={numSegments:e},l=(p,f)=>{let m=p.unsortedSegmentSum(r,o,e);return f([o]),m};return X.runKernelFunc(l,s,null,Nx,c)}let zw=j({unsortedSegmentSum_:R3});function P3(n,t=0){let e=M(n,"x","unstack");_(t>=-e.shape.length&&t`Axis = ${t} is not in [-${e.shape.length}, ${e.shape.length})`),t<0&&(t+=e.shape.length);let r={value:e},o={axis:t},s=c=>c.unstack(e,t);return X.runKernelFunc(s,r,null,kx,o)}let mo=j({unstack_:P3});function B_(n,t=!0,e,r){return X.makeVariable(n,t,e,r)}function Id(n,t){let e=[];for(let s=0;s0,()=>"mask cannot be scalar"),W(l.slice(s,s+c),o.shape,"mask's shape must match the first K dimensions of tensor's shape,");let p=1;for(let N=s;N"Shape mismatch in v and x");let p=Et(1),f=Dt(p,l),m=nt(Dt(c,s),f);if(o){_(r!=null,()=>"When using zeroDebias: true, step is required.");let y=M(r,"step","movingAverage");m=Bt(m,Dt(p,fo(l,y)))}return Tt(s,m)}let nz=j({movingAverage_:ez});function rz(n,t,e){let r=M(n,"indices","scatterND","int32"),o=M(t,"updates","scatterND");Kx(o,r,e);let s=p=>p.scatterND(r,o,e),c={indices:r,updates:o},l={shape:e};return X.runKernelFunc(s,c,null,vN,l)}let nC=j({scatterND_:rz});function oz(n,t,e,r){if(n.dtype!=="int32")throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${n.dtype}.`);if(n.rank>2)throw new Error(`sparseIndices should be a scalar, vector, or matrix, but got shape ${n.shape}.`);let o=n.rank>0?n.shape[0]:1,s=n.rank>1?n.shape[1]:1;if(e.length!==s)throw new Error(`outputShape has incorrect number of elements:, ${e.length}, should be: ${s}.`);let c=t.size;if(!(t.rank===0||t.rank===1&&c===o))throw new Error(`sparseValues has incorrect shape ${t.shape}, should be [] or [${o}]`);if(t.dtype!==r.dtype)throw new Error("sparseValues.dtype must match defaultValues.dtype")}function sz(n,t,e,r=0){let o=M(n,"sparseIndices","sparseToDense","int32"),s=M(t,"sparseValues","sparseToDense"),c=M(r,"defaultValue","sparseToDense",s.dtype);oz(o,s,e,c);let l={sparseIndices:o,sparseValues:s,defaultValue:c},p={outputShape:e};return X.runKernelFunc(f=>f.sparseToDense(o,s,e,c),l,null,TN,p)}let Vw=j({sparseToDense_:sz});function iz(n,t){let e=M(t,"indices","gatherND","int32"),r=M(n,"x","gatherND"),o=c=>c.gatherND(r,e),s={params:r,indices:e};return X.runKernelFunc(o,s,null,cN)}let rC=j({gatherND_:iz});function az(n,t){if(t==null)return n.shape.slice();if(lt(n.shape,t))return t;if(n.shape.length===t.length){let e=[];for(let r=0;r`x has to be a floating point tensor since it's going to be scaled, but got a ${o.dtype} tensor instead.`),_(t>=0&&t<1,()=>`rate must be a float in the range [0, 1), but got ${t}.`),t===0)return n instanceof ot?o.clone():o;let s=az(o,e),c=1-t,l=Bt(Sc(Tt(aa(s,0,1,"float32",r),c)),c);return nt(o,l)}let oC=j({dropout_:cz});function sC(n){return Math.floor(Math.pow(2,Math.ceil(Math.log(n)/Math.log(2))))}function Gw(n,t,e){let r=1-n%2,o=new Float32Array(n);for(let s=0;s1,()=>`inTopK() expects the predictions to be of rank 2 or higher, but got ${r.rank}`),_(r.rank-1===o.rank,()=>`predictions rank should be 1 larger than targets rank, but got predictions rank ${r.rank} and targets rank ${o.rank}`),W(r.shape.slice(0,r.shape.length-1),o.shape,"predictions's shape should be align with the targets' shape, except the last dimension.");let s=r.shape[r.shape.length-1];_(e>0&&e<=s,()=>`'k' passed to inTopK() must be > 0 && <= the predictions last dimension (${s}), but got ${e}`);let c=await r.data(),l=await o.data(),[p,f]=[c.length/s,s],m=Ce("bool",p);for(let y=0;yS.value-N.value),m[y]=0;for(let N=0;N`Error in conv2dDerFilter: input must be rank 4, but got shape ${l.shape}.`),_(p.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${p.shape}.`),_(e.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${e}.`);let f=s==="NHWC"?l.shape[3]:l.shape[1],m=s==="NHWC"?p.shape[3]:p.shape[1];_(f===e[2],()=>`Error in conv2dDerFilter: depth of input ${f}) must match input depth in filter (${e[2]}.`),_(m===e[3],()=>`Error in conv2dDerFilter: depth of dy (${m}) must match output depth for filter (${e[3]}).`),c!=null&&_(gt(o),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${c} but got pad ${o}.`);let y=T=>{let N=1,S=si(s),D=Un(l.shape,e,r,N,o,c,!1,S);return T.conv2dDerFilter(l,p,D)},b={x:l,dy:p},v={strides:r,pad:o,dataFormat:s,dimRoundingMode:c,filterShape:e};return X.runKernelFunc(y,b,null,Hb,v)}let Uw=j({conv2DBackpropFilter_:pz});function Dd(n,t,e){if(e==null||e==="linear")return n;if(e==="relu")return nt(n,Pc(t));throw new Error(`Cannot compute gradient for fused activation ${e}.`)}function Ad(n,t){let e=t,r=xn(n.shape,t.shape);return r.length>0&&(e=zt(e,r)),Q(e,n.shape)}function Fd(n,t,e){if(t==="linear")return n;if(t==="relu")return Vo(n);if(t==="elu")return _c(n);if(t==="relu6")return Aw(n);if(t==="prelu")return pp(n,e);throw new Error(`Unknown fused activation ${t}.`)}let Rd=(n,t)=>{let e=n>0;return!e||t==="linear"};function hz({x:n,filter:t,strides:e,pad:r,dataFormat:o="NHWC",dilations:s=[1,1],dimRoundingMode:c,bias:l,activation:p="linear",preluActivationWeights:f}){if(p=p||"linear",Rd(X.state.gradientDepth,p)===!1){let L=fs(n,t,e,r,o,s,c);return l!=null&&(L=Tt(L,l)),Fd(L,p,f)}let m=M(n,"x","conv2d"),y=M(t,"filter","conv2d"),b=m,v=!1;m.rank===3&&(v=!0,b=Q(m,[1,m.shape[0],m.shape[1],m.shape[2]])),_(b.rank===4,()=>`Error in fused conv2d: input must be rank 4, but got rank ${b.rank}.`),_(y.rank===4,()=>`Error in fused conv2d: filter must be rank 4, but got rank ${y.rank}.`),c!=null&&_(gt(r),()=>`Error in fused conv2d: pad must be an integer when using, dimRoundingMode ${c} but got pad ${r}.`),_(b.shape[3]===y.shape[2],()=>`Error in conv2d: depth of input (${b.shape[3]}) must match input depth for filter ${y.shape[2]}.`),_(fn(e,s),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${e} and dilations '${s}'`),_(o==="NHWC",()=>`Error in conv2d: got dataFormat of ${o} but only NHWC is currently supported.`);let T=Un(b.shape,y.shape,e,s,r,c),N;l!=null&&(N=M(l,"bias","fused conv2d"),[N]=Je(N,m),le(T.outShape,N.shape));let S;f!=null&&(S=M(f,"prelu weights","fused conv2d"));let D=(L,B)=>{let[q,H,Z,J]=B,it=Dd(L,Z,p);_(oi(s),()=>`Error in gradient of fused conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${s}'`);let pt=mw(H.shape,it,q,e,r),ht=Uw(H,it,q.shape,e,r),dt=[pt,ht];if(J!=null){let ft=Ad(J,it);dt.push(ft)}return dt},I=L=>{let B=L.fusedConv2d({input:b,filter:y,convInfo:T,bias:N,activation:p,preluActivationWeights:S});return B},P={x:b,filter:y,bias:N,preluActivationWeights:S},E={strides:e,pad:r,dataFormat:o,dilations:s,dimRoundingMode:c,activation:p};if(l==null){let L=zo((B,q,H)=>{let Z=X.runKernelFunc(I,P,null,Of,E);return H([q,B,Z]),v&&(Z=Q(Z,[Z.shape[1],Z.shape[2],Z.shape[3]])),{value:Z,gradFunc:D}});return L(b,y)}else{let L=zo((B,q,H,Z)=>{let J=X.runKernelFunc(I,P,null,Of,E);return Z([q,B,J,H]),v&&(J=Q(J,[J.shape[1],J.shape[2],J.shape[3]])),{value:J,gradFunc:D}});return L(b,y,N)}}let qw=j({fusedConv2d_:hz});function fz(n,t,e,r,o,s=[1,1],c){let l=n;n.rank===3&&(l=Q(n,[1,n.shape[0],n.shape[1],n.shape[2]]));let p=t;p.rank===3&&(p=Q(t,[1,t.shape[0],t.shape[1],t.shape[2]]));let f=b=>{let v=Un(n.shape,e,r,s,o,c,!0);return b.depthwiseConv2DDerFilter(l,p,v)},m={x:l,dy:p},y={strides:r,pad:o,dimRoundingMode:c,dilations:s,filterShape:e};return X.runKernelFunc(f,m,null,Yb,y)}let iC=j({depthwiseConv2dNativeBackpropFilter_:fz});function dz(n,t,e,r,o,s=[1,1],c){let l=t,p=!1;t.rank===3&&(p=!0,l=Q(t,[1,t.shape[0],t.shape[1],t.shape[2]]));let f=v=>{let T=Un(n,e.shape,r,s,o,c,!0);return v.depthwiseConv2DDerInput(l,e,T)},m={dy:l,filter:e},y={strides:r,pad:o,dimRoundingMode:c,dilations:s,inputShape:n},b=X.runKernelFunc(f,m,null,Jb,y);return p?Q(b,[b.shape[1],b.shape[2],b.shape[3]]):b}let aC=j({depthwiseConv2dNativeBackpropInput_:dz});function mz({x:n,filter:t,strides:e,pad:r,dataFormat:o="NHWC",dilations:s=[1,1],dimRoundingMode:c,bias:l,activation:p="linear",preluActivationWeights:f}){if(Rd(X.state.gradientDepth,p)===!1){let L=na(n,t,e,r,o,s,c);return l!=null&&(L=Tt(L,l)),Fd(L,p,f)}let m=M(n,"x","depthwiseConv2d"),y=M(t,"filter","depthwiseConv2d"),b=m,v=!1;m.rank===3&&(v=!0,b=Q(m,[1,m.shape[0],m.shape[1],m.shape[2]])),_(b.rank===4,()=>`Error in fused depthwiseConv2d: input must be rank 4, but got rank ${b.rank}.`),_(y.rank===4,()=>`Error in fused depthwiseConv2d: filter must be rank 4, but got rank ${y.rank}.`),_(b.shape[3]===y.shape[2],()=>`Error in fused depthwiseConv2d: number of input channels (${b.shape[3]}) must match the inChannels dimension in filter ${y.shape[2]}.`),s==null&&(s=[1,1]),_(fn(e,s),()=>`Error in fused depthwiseConv2d: Either strides or dilations must be 1. Got strides ${e} and dilations '${s}'`),c!=null&&_(gt(r),()=>`Error in fused depthwiseConv2d: pad must be an integer when using dimRoundingMode ${c} but got pad ${r}.`);let T=Un(b.shape,y.shape,e,s,r,c,!0),N;l!=null&&(N=M(l,"bias","fused conv2d"),[N]=Je(N,m),le(T.outShape,N.shape));let S;f!=null&&(S=M(f,"prelu weights","fused depthwiseConv2d"));let D=(L,B)=>{_(oi(s),()=>`Error in gradient of fused depthwiseConv2d: dilation rates greater than 1 are not yet supported. Got dilations '${s}'`);let[q,H,Z,J]=B,it=Dd(L,Z,p),pt=aC(H.shape,it,q,e,r,s,c),ht=iC(H,it,q.shape,e,r,s,c);if(J!=null){let dt=Ad(N,it);return[pt,ht,dt]}return[pt,ht]},I=L=>{let B=L.fusedDepthwiseConv2D({input:b,filter:y,convInfo:T,bias:N,activation:p,preluActivationWeights:S});return B},P={x:b,filter:y,bias:N,preluActivationWeights:S},E={strides:e,pad:r,dataFormat:o,dilations:s,dimRoundingMode:c,activation:p};if(l==null){let L=zo((B,q,H)=>{let Z=X.runKernelFunc(I,P,null,Lf,E);return H([q,B,Z]),v&&(Z=Q(Z,[Z.shape[1],Z.shape[2],Z.shape[3]])),{value:Z,gradFunc:D}});return L(b,y)}else{let L=zo((B,q,H,Z)=>{let J=X.runKernelFunc(I,P,null,Lf,E);return Z([q,B,J,H]),v&&(J=Q(J,[J.shape[1],J.shape[2],J.shape[3]])),{value:J,gradFunc:D}});return L(b,y,N)}}let cC=j({fusedDepthwiseConv2d_:mz});function gz({a:n,b:t,transposeA:e=!1,transposeB:r=!1,bias:o,activation:s="linear",preluActivationWeights:c}){if(Rd(X.state.gradientDepth,s)===!1){let J=ge(n,t,e,r);return o!=null&&(J=Tt(J,o)),Fd(J,s,c)}let l=M(n,"a","fused matMul"),p=M(t,"b","fused matMul");[l,p]=Je(l,p);let f=e?l.shape[l.rank-2]:l.shape[l.rank-1],m=r?p.shape[p.rank-1]:p.shape[p.rank-2],y=e?l.shape[l.rank-1]:l.shape[l.rank-2],b=r?p.shape[p.rank-2]:p.shape[p.rank-1],v=l.shape.slice(0,-2),T=p.shape.slice(0,-2),N=G(v),S=G(T);_(l.rank>=2&&p.rank>=2&&l.rank===p.rank,()=>`Error in fused matMul: inputs must have the same rank of at least 2, got ranks ${l.rank} and ${p.rank}.`),_(lt(v,T),()=>`Error in fused matMul: outer dimensions (${v}) and (${T}) of Tensors with shapes ${l.shape} and ${p.shape} must match.`),_(f===m,()=>`Error in fused matMul: inner shapes (${f}) and (${m}) of Tensors with shapes ${l.shape} and ${p.shape} and transposeA=${e} and transposeB=${r} must match.`);let D=l.shape.slice(0,-2).concat([y,b]),I=e?Q(l,[N,f,y]):Q(l,[N,y,f]),P=r?Q(p,[S,b,m]):Q(p,[S,m,b]),E;o!=null&&(E=M(o,"bias","fused matMul"),[E]=Je(E,l),le(D,E.shape));let L;c!=null&&(L=M(c,"prelu weights","fused matMul"));let B=(J,it)=>{let[pt,ht,dt,ft]=it,ut=Dd(Q(J,dt.shape),dt,s),bt,yt;if(!e&&!r?(bt=ge(ut,ht,!1,!0),yt=ge(pt,ut,!0,!1)):!e&&r?(bt=ge(ut,ht,!1,!1),yt=ge(ut,pt,!0,!1)):e&&!r?(bt=ge(ht,ut,!1,!0),yt=ge(pt,ut,!1,!1)):(bt=ge(ht,ut,!0,!0),yt=ge(ut,pt,!0,!0)),o!=null){let xt=Ad(ft,ut);return[bt,yt,xt]}else return[bt,yt]},q=J=>{let it=J.fusedBatchMatMul({a:I,b:P,transposeA:e,transposeB:r,bias:E,activation:s,preluActivationWeights:L});return it},H={a:I,b:P,bias:E,preluActivationWeights:L},Z={transposeA:e,transposeB:r,activation:s};if(o==null){let J=zo((it,pt,ht)=>{let dt=X.runKernelFunc(q,H,null,Pf,Z);return ht([it,pt,dt]),{value:Q(dt,D),gradFunc:B}});return J(I,P)}else{let J=zo((it,pt,ht,dt)=>{let ft=X.runKernelFunc(q,H,null,Pf,Z);return dt([it,pt,ft,ht]),{value:Q(ft,D),gradFunc:B}});return J(I,P,E)}}let Pd=j({fusedMatMul_:gz});var yz=Object.freeze({__proto__:null,conv2d:qw,depthwiseConv2d:cC,matMul:Pd});function bz(n){return Gw(n,.54,.46)}let xz=j({hammingWindow_:bz});function wz(n){return Gw(n,.5,.5)}let lC=j({hannWindow_:wz});function vz(n,t,e,r=!1,o=0){let s=0,c=[];for(;s+t<=n.size;)c.push(ce(n,s,t)),s+=e;if(r)for(;s`Error in cropAndResize: image must be rank 4,but got rank ${c.rank}.`),_(l.rank===2&&l.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${f},4] but had shape ${l.shape}.`),_(p.rank===1&&p.shape[0]===f,()=>`Error in cropAndResize: boxInd must be have size [${f}] but had shape ${l.shape}.`),_(r.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${r.length}.`),_(r[0]>=1&&r[1]>=1,()=>`cropSize must be atleast [1,1], but was ${r}`),_(o==="bilinear"||o==="nearest",()=>`method must be bilinear or nearest, but was ${o}`);let m=T=>T.cropAndResize(c,l,p,r,o,s),y={image:c,boxes:l,boxInd:p},b={method:o,extrapolationValue:s,cropSize:r},v=X.runKernelFunc(m,y,null,rN,b);return v}let _z=j({cropAndResize_:Nz});function Cz(n){let t=M(n,"image","flipLeftRight","float32");_(t.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${t.rank}.`);let e={image:t},r=X.runKernel(bf,e,{});return r}let Sz=j({flipLeftRight_:Cz});function $z(n,t,e=0,r=.5){let o=M(n,"image","rotateWithOffset","float32");_(o.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${o.rank}.`);let s={image:o},c={radians:t,fillValue:e,center:r},l=X.runKernel(Rf,s,c);return l}let Iz=j({rotateWithOffset_:$z});function Lc(n,t,e,r,o,s){r==null&&(r=.5),o==null&&(o=Number.NEGATIVE_INFINITY),s==null&&(s=0);let c=n.shape[0];return e=Math.min(e,c),_(0<=r&&r<=1,()=>`iouThreshold must be in [0, 1], but was '${r}'`),_(n.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${n.rank}'`),_(n.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${n.shape[1]}`),_(t.rank===1,()=>"scores must be a 1D tensor"),_(t.shape[0]===c,()=>`scores has incompatible shape with boxes. Expected ${c}, but was ${t.shape[0]}`),_(0<=s&&s<=1,()=>`softNmsSigma must be in [0, 1], but was '${s}'`),{maxOutputSize:e,iouThreshold:r,scoreThreshold:o,softNmsSigma:s}}function Ez(n,t,e,r=.5,o=Number.NEGATIVE_INFINITY){let s=M(n,"boxes","nonMaxSuppression"),c=M(t,"scores","nonMaxSuppression"),l=Lc(s,c,e,r,o);e=l.maxOutputSize,r=l.iouThreshold,o=l.scoreThreshold;let p={maxOutputSize:e,iouThreshold:r,scoreThreshold:o};return X.runKernelFunc(f=>f.nonMaxSuppression(s,c,e,r,o),{boxes:s,scores:c},null,px,p)}let Dz=j({nonMaxSuppression_:Ez});function Az(n,t,e){let r=Fz(n,t,e),o=r<0?-(r+1):r;n.splice(o,0,t)}function Fz(n,t,e){return Pz(n,t,e||Rz)}function Rz(n,t){return n>t?1:n>>1);let l=e(t,n[s]);l>0?r=s+1:(o=s,c=!l)}return c?r:-r-1}function Od(n,t,e,r,o){return Hw(n,t,e,r,o,0).selectedIndices}function Ld(n,t,e,r,o,s){return Hw(n,t,e,r,o,0,!1,s,!0)}function Md(n,t,e,r,o,s){return Hw(n,t,e,r,o,s,!0)}function Hw(n,t,e,r,o,s,c=!1,l=!1,p=!1){let f=[];for(let S=0;So&&f.push({score:t[S],boxIndex:S,suppressBeginIndex:0});f.sort(pC);let m=s>0?-.5/s:0,y=[],b=[];for(;y.length0;){let S=f.pop(),{score:D,boxIndex:I,suppressBeginIndex:P}=S;if(D=P;--L){let B=Oz(n,I,y[L]);if(B>=r){E=!0;break}if(S.score=S.score*Lz(r,m,B),S.score<=o)break}S.suppressBeginIndex=y.length,E||(S.score===D?(y.push(I),b.push(S.score)):S.score>o&&Az(f,S,pC))}let v=y.length,T=e-v;l&&T>0&&(y.push(...new Array(T).fill(0)),b.push(...new Array(T).fill(0)));let N={selectedIndices:vr(y,"int32")};return c&&(N.selectedScores=vr(b,"float32")),p&&(N.validOutputs=Et(v,"int32")),N}function Oz(n,t,e){let r=n.subarray(t*4,t*4+4),o=n.subarray(e*4,e*4+4),s=Math.min(r[0],r[2]),c=Math.min(r[1],r[3]),l=Math.max(r[0],r[2]),p=Math.max(r[1],r[3]),f=Math.min(o[0],o[2]),m=Math.min(o[1],o[3]),y=Math.max(o[0],o[2]),b=Math.max(o[1],o[3]),v=(l-s)*(p-c),T=(y-f)*(b-m);if(v<=0||T<=0)return 0;let N=Math.max(s,f),S=Math.max(c,m),D=Math.min(l,y),I=Math.min(p,b),P=Math.max(D-N,0)*Math.max(I-S,0);return P/(v+T-P)}function Lz(n,t,e){let r=Math.exp(t*e*e);return e<=n?r:0}function pC(n,t){return n.score-t.score||n.score===t.score&&t.boxIndex-n.boxIndex}async function Mz(n,t,e,r=.5,o=Number.NEGATIVE_INFINITY){let s=M(n,"boxes","nonMaxSuppressionAsync"),c=M(t,"scores","nonMaxSuppressionAsync"),l=Lc(s,c,e,r,o);e=l.maxOutputSize,r=l.iouThreshold,o=l.scoreThreshold;let p=await Promise.all([s.data(),c.data()]),f=p[0],m=p[1],y=Od(f,m,e,r,o);return s!==n&&s.dispose(),c!==t&&c.dispose(),y}let Bz=Mz;function zz(n,t,e,r=.5,o=Number.NEGATIVE_INFINITY,s=0){let c=M(n,"boxes","nonMaxSuppression"),l=M(t,"scores","nonMaxSuppression"),p=Lc(c,l,e,r,o,s);e=p.maxOutputSize,r=p.iouThreshold,o=p.scoreThreshold,s=p.softNmsSigma;let f={boxes:c,scores:l},m={maxOutputSize:e,iouThreshold:r,scoreThreshold:o,softNmsSigma:s},y=X.runKernel(_f,f,m);return{selectedIndices:y[0],selectedScores:y[1]}}let Wz=j({nonMaxSuppressionWithScore_:zz});async function Vz(n,t,e,r=.5,o=Number.NEGATIVE_INFINITY,s=0){let c=M(n,"boxes","nonMaxSuppressionAsync"),l=M(t,"scores","nonMaxSuppressionAsync"),p=Lc(c,l,e,r,o,s);e=p.maxOutputSize,r=p.iouThreshold,o=p.scoreThreshold,s=p.softNmsSigma;let f=await Promise.all([c.data(),l.data()]),m=f[0],y=f[1],b=Md(m,y,e,r,o,s);return c!==n&&c.dispose(),l!==t&&l.dispose(),b}let Gz=Vz;function Uz(n,t,e,r=.5,o=Number.NEGATIVE_INFINITY,s=!1){let c=M(n,"boxes","nonMaxSuppression"),l=M(t,"scores","nonMaxSuppression"),p=Lc(c,l,e,r,o,null),f=p.maxOutputSize,m=p.iouThreshold,y=p.scoreThreshold,b={boxes:c,scores:l},v={maxOutputSize:f,iouThreshold:m,scoreThreshold:y,padToMaxOutputSize:s},T=X.runKernel(Nf,b,v);return{selectedIndices:T[0],validOutputs:T[1]}}let qz=j({nonMaxSuppressionPadded_:Uz});async function Hz(n,t,e,r=.5,o=Number.NEGATIVE_INFINITY,s=!1){let c=M(n,"boxes","nonMaxSuppressionAsync"),l=M(t,"scores","nonMaxSuppressionAsync"),p=Lc(c,l,e,r,o,null),f=p.maxOutputSize,m=p.iouThreshold,y=p.scoreThreshold,[b,v]=await Promise.all([c.data(),l.data()]),T=Ld(b,v,f,m,y,s);return c!==n&&c.dispose(),l!==t&&l.dispose(),T}let jz=Hz;function Kz(n,t,e=!1){let r=M(n,"images","resizeBilinear");_(r.rank===3||r.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${r.rank}.`),_(t.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${t}.`);let o=r,s=!1;r.rank===3&&(s=!0,o=Q(r,[1,r.shape[0],r.shape[1],r.shape[2]]));let[c,l]=t,p=(b,v)=>(v([o]),b.resizeBilinear(o,c,l,e)),f={images:o},m={alignCorners:e,size:t},y=X.runKernelFunc(p,f,null,gx,m);return s?Q(y,[y.shape[1],y.shape[2],y.shape[3]]):y}let hC=j({resizeBilinear_:Kz});function Xz(n,t,e=!1){let r=M(n,"images","resizeNearestNeighbor");_(r.rank===3||r.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${r.rank}.`),_(t.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${t}.`),_(r.dtype==="float32"||r.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let o=r,s=!1;r.rank===3&&(s=!0,o=Q(r,[1,r.shape[0],r.shape[1],r.shape[2]]));let[c,l]=t,p={images:o},f={alignCorners:e,size:t},m=(b,v)=>(v([o]),b.resizeNearestNeighbor(o,c,l,e)),y=X.runKernelFunc(m,p,null,mx,f);return s?Q(y,[y.shape[1],y.shape[2],y.shape[3]]):y}let fC=j({resizeNearestNeighbor_:Xz});function Yz(n,t,e){_(t%1===0,()=>`bandPart(): numLower must be an integer, got ${t}.`),_(e%1===0,()=>`bandPart(): numUpper must be an integer, got ${e}.`);let r=M(n,"a","bandPart");_(r.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${r.rank}.`);let o=r.shape,[s,c]=r.shape.slice(-2);if(!(t<=s))throw new Error(`bandPart(): numLower (${t}) must not be greater than the number of rows (${s}).`);if(!(e<=c))throw new Error(`bandPart(): numUpper (${e}) must not be greater than the number of columns (${c}).`);t<0&&(t=s),e<0&&(e=c);let l=Q(hp(0,s,1,"int32"),[-1,1]),p=hp(0,c,1,"int32"),f=Dt(l,p),m=Yr(ai(f,Et(+t,"int32")),ds(f,Et(-e,"int32"))),y=xe([s,c],r.dtype);return Q(ur(mo(Q(r,[-1,s,c])).map(b=>Xn(m,b,y))),o)}let Jz=j({bandPart_:Yz});function Zz(n){let t;if(Array.isArray(n)){t=!1,_(n!=null&&n.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");let o=n[0].shape[0];for(let s=1;s`Gram-Schmidt: Non-unique lengths found in the input vectors: (${n[s].shape[0]} vs. ${o})`)}else t=!0,n=Tr(n,n.shape[0],0).map(o=>li(o,[0]));_(n.length<=n[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${n.length}) exceeds number of dimensions (${n[0].shape[0]}).`);let e=[],r=n;for(let o=0;o{let s=r[o];if(o>0)for(let c=0;c=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${n.rank}`),n.rank===2)return dC(n,t);{let e=n.shape.slice(0,n.shape.length-2).reduce((p,f)=>p*f),r=mo(Q(n,[e,n.shape[n.shape.length-2],n.shape[n.shape.length-1]]),0),o=[],s=[];r.forEach(p=>{let[f,m]=dC(p,t);o.push(f),s.push(m)});let c=Q(ur(o,0),n.shape),l=Q(ur(s,0),n.shape);return[c,l]}}function dC(n,t=!1){return X.tidy(()=>{_(n.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${n.shape.length}D Tensor.`);let e=n.shape[0],r=n.shape[1],o=hd(e),s=ni(n),c=ui([[1]],[1,1]),l=ni(c),p=e>=r?r:e;for(let f=0;f{let v=ce(s,[f,f],[e-f,1]),T=Ed(v),N=ce(s,[f,f],[1,1]),S=Xn(Fr(N,0),ui([[-1]]),ui([[1]])),D=Dt(N,nt(S,T)),I=Bt(v,D);I.shape[0]===1?l=ni(c):l=Qe([c,ce(I,[1,0],[I.shape[0]-1,I.shape[1]])],0);let P=tn(Bt(ge(S,D),T)),E=ce(s,[f,0],[e-f,r]),L=nt(P,l),B=Kt(l);if(f===0)s=Dt(E,ge(L,ge(B,E)));else{let Z=Dt(E,ge(L,ge(B,E)));s=Qe([ce(s,[0,0],[f,r]),Z],0)}let q=Kt(L),H=ce(o,[0,f],[e,o.shape[1]-f]);if(f===0)o=Dt(H,ge(ge(H,l),q));else{let Z=Dt(H,ge(ge(H,l),q));o=Qe([ce(o,[0,0],[e,f]),Z],1)}return[l,s,o]}),Xt([m,y,b])}return!t&&e>r&&(o=ce(o,[0,0],[e,r]),s=ce(s,[0,0],[r,r])),[o,s]})}let eW=j({qr_:tW});(function(n){n[n.NONE=0]="NONE",n[n.MEAN=1]="MEAN",n[n.SUM=2]="SUM",n[n.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(i.Reduction||(i.Reduction={}));function nW(n,t,e=i.Reduction.SUM_BY_NONZERO_WEIGHTS){let r=M(n,"losses","computeWeightedLoss"),o=null;t!=null&&(o=M(t,"weights","computeWeightedLoss"));let s=o==null?r:nt(r,o);if(e===i.Reduction.NONE)return s;if(e===i.Reduction.SUM)return zt(s);if(e===i.Reduction.MEAN){if(o==null)return en(s);{let c=r.size/o.size,l=Bt(zt(s),zt(o));return c>1?Bt(l,Et(c)):l}}if(e===i.Reduction.SUM_BY_NONZERO_WEIGHTS){if(o==null)return Bt(zt(s),Et(r.size));{let c=nt(o,ho(r.shape)),l=$t(zt(ci(c,Et(0))),"float32");return Bt(zt(s),l)}}throw Error(`Unknown reduction: ${e}`)}let ms=j({computeWeightedLoss_:nW});function rW(n,t,e,r=i.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=M(n,"labels","absoluteDifference"),s=M(t,"predictions","absoluteDifference"),c=null;e!=null&&(c=M(e,"weights","absoluteDifference")),W(o.shape,s.shape,"Error in absoluteDifference: ");let l=bn(Dt(o,s));return ms(l,c,r)}let oW=j({absoluteDifference_:rW});function sW(n,t,e,r,o=i.Reduction.SUM_BY_NONZERO_WEIGHTS){let s=M(n,"labels","cosineDistance"),c=M(t,"predictions","cosineDistance"),l=null;r!=null&&(l=M(r,"weights","cosineDistance")),W(s.shape,c.shape,"Error in cosineDistance: ");let p=Et(1),f=Dt(p,zt(nt(s,c),e,!0));return ms(f,l,o)}let iW=j({cosineDistance_:sW});function aW(n,t,e,r=i.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=M(n,"labels","hingeLoss"),s=M(t,"predictions","hingeLoss"),c=null;e!=null&&(c=M(e,"weights","hingeLoss")),W(o.shape,s.shape,"Error in hingeLoss: ");let l=Et(1);o=Dt(nt(Et(2),o),l);let p=Vo(Dt(l,nt(o,s)));return ms(p,c,r)}let cW=j({hingeLoss_:aW});function lW(n,t,e,r=1,o=i.Reduction.SUM_BY_NONZERO_WEIGHTS){let s=M(n,"labels","huberLoss"),c=M(t,"predictions","huberLoss"),l=null;e!=null&&(l=M(e,"weights","huberLoss")),W(s.shape,c.shape,"Error in huberLoss: ");let p=Et(r),f=bn(Dt(c,s)),m=oa(f,p),y=Dt(f,m),b=Tt(nt(Et(.5),De(m)),nt(p,y));return ms(b,l,o)}let uW=j({huberLoss_:lW});function pW(n,t,e,r=1e-7,o=i.Reduction.SUM_BY_NONZERO_WEIGHTS){let s=M(n,"labels","logLoss"),c=M(t,"predictions","logLoss"),l=null;e!=null&&(l=M(e,"weights","logLoss")),W(s.shape,c.shape,"Error in logLoss: ");let p=Et(1),f=Et(r),m=tn(nt(s,wr(Tt(c,f)))),y=nt(Dt(p,s),wr(Tt(Dt(p,c),f))),b=Dt(m,y);return ms(b,l,o)}let hW=j({logLoss_:pW});function fW(n,t,e,r=i.Reduction.SUM_BY_NONZERO_WEIGHTS){let o=M(n,"labels","meanSquaredError"),s=M(t,"predictions","meanSquaredError"),c=null;e!=null&&(c=M(e,"weights","meanSquaredError")),W(o.shape,s.shape,"Error in meanSquaredError: ");let l=gp(o,s);return ms(l,c,r)}let dW=j({meanSquaredError_:fW});function mW(n,t){let e=M(n,"labels","sigmoidCrossEntropyWithLogits"),r=M(t,"logits","sigmoidCrossEntropyWithLogits");W(e.shape,r.shape,"Error in sigmoidCrossEntropyWithLogits: ");let o=Vo(r),s=nt(r,e),c=dd(Ar(tn(bn(r))));return Tt(Dt(o,s),c)}function gW(n,t,e,r=0,o=i.Reduction.SUM_BY_NONZERO_WEIGHTS){let s=M(n,"multiClassLabels","sigmoidCrossEntropy"),c=M(t,"logits","sigmoidCrossEntropy"),l=null;if(e!=null&&(l=M(e,"weights","sigmoidCrossEntropy")),W(s.shape,c.shape,"Error in sigmoidCrossEntropy: "),r>0){let f=Et(r),m=Et(1),y=Et(.5);s=Tt(nt(s,Dt(m,f)),nt(y,f))}let p=mW(s,c);return ms(p,l,o)}let yW=j({sigmoidCrossEntropy_:gW});function bW(n,t,e=-1){if(e===-1&&(e=t.rank-1),e!==t.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${t.rank} and dim was ${e}`);let r=zo((o,s,c)=>{let l=!0,p=_w(s,[e],l),f=Dt($t(s,"float32"),p);c([o,f]);let m=tn(nt(f,o)),y=zt(m,[e]),b=(v,T)=>{let[N,S]=T,D=Rn(v.shape,[e]);return[nt(Q(v,D),Dt($t(N,"float32"),Ar(S))),nt(Q(v,D),Dt(Ar(S),$t(N,"float32")))]};return{value:y,gradFunc:b}});return r(n,t)}function xW(n,t,e,r=0,o=i.Reduction.SUM_BY_NONZERO_WEIGHTS){let s=M(n,"onehotLabels","softmaxCrossEntropy"),c=M(t,"logits","softmaxCrossEntropy"),l=null;if(e!=null&&(l=M(e,"weights","softmaxCrossEntropy")),W(s.shape,c.shape,"Error in softmaxCrossEntropy: "),r>0){let f=Et(r),m=Et(1),y=Et(s.shape[1]);s=Tt(nt(s,Dt(m,f)),Bt(f,y))}let p=bW(s,c);return ms(p,l,o)}let wW=j({softmaxCrossEntropy_:xW});let vW={fft:dp,ifft:Rc,rfft:mp,irfft:Sd},TW={hammingWindow:xz,hannWindow:lC,frame:uC,stft:kz},pi={flipLeftRight:Sz,resizeNearestNeighbor:fC,resizeBilinear:hC,rotateWithOffset:Iz,cropAndResize:_z,nonMaxSuppression:Dz,nonMaxSuppressionAsync:Bz,nonMaxSuppressionWithScore:Wz,nonMaxSuppressionWithScoreAsync:Gz,nonMaxSuppressionPadded:qz,nonMaxSuppressionPaddedAsync:jz},mC={bandPart:Jz,gramSchmidt:Qz,qr:eW},kW={absoluteDifference:oW,computeWeightedLoss:ms,cosineDistance:iW,hingeLoss:cW,huberLoss:uW,logLoss:hW,meanSquaredError:dW,sigmoidCrossEntropy:yW,softmaxCrossEntropy:wW};class gs extends Qi{minimize(t,e=!1,r){let{value:o,grads:s}=this.computeGradients(t,r);if(r!=null){let c=r.map(l=>({name:l.name,tensor:s[l.name]}));this.applyGradients(c)}else this.applyGradients(s);return Xt(s),e?o:(o.dispose(),null)}get iterations(){return this.iterations_==null&&(this.iterations_=0),this.iterations_}incrementIterations(){this.iterations_=this.iterations+1}computeGradients(t,e){return Nw(t,e)}dispose(){this.iterations_!=null&&Xt(this.iterations_)}async saveIterations(){return this.iterations_==null&&(this.iterations_=0),{name:"iter",tensor:Et(this.iterations_,"int32")}}async getWeights(){throw new Error("getWeights() is not implemented for this optimizer yet.")}async setWeights(t){throw new Error(`setWeights() is not implemented for this optimizer class ${this.getClassName()}`)}async extractIterations(t){return this.iterations_=(await t[0].tensor.data())[0],t.slice(1)}}Object.defineProperty(gs,Symbol.hasInstance,{value:n=>n.minimize!=null&&n.computeGradients!=null&&n.applyGradients!=null});class bp extends gs{constructor(t,e,r=null){super();this.learningRate=t,this.rho=e,this.epsilon=r,this.accumulatedGrads=[],this.accumulatedUpdates=[],r==null&&(this.epsilon=X.backend.epsilon())}applyGradients(t){let e=Array.isArray(t)?t.map(r=>r.name):Object.keys(t);e.forEach((r,o)=>{let s=X.registeredVariables[r],c=!1;this.accumulatedGrads[o]==null&&(this.accumulatedGrads[o]={originalName:`${r}/accum_grad`,variable:rt(()=>re(s).variable(c))}),this.accumulatedUpdates[o]==null&&(this.accumulatedUpdates[o]={originalName:`${r}/accum_var`,variable:rt(()=>re(s).variable(c))});let l=Array.isArray(t)?t[o].tensor:t[r];if(l==null)return;let p=this.accumulatedGrads[o].variable,f=this.accumulatedUpdates[o].variable;rt(()=>{let m=Tt(nt(p,this.rho),nt(De(l),1-this.rho)),y=nt(Bt(Pn(Tt(f,this.epsilon)),Pn(Tt(p,this.epsilon))),l),b=Tt(nt(f,this.rho),nt(De(y),1-this.rho));p.assign(m),f.assign(b);let v=Tt(nt(y,-this.learningRate),s);s.assign(v)})}),this.incrementIterations()}dispose(){this.accumulatedUpdates!=null&&(Xt(this.accumulatedGrads.map(t=>t.variable)),Xt(this.accumulatedUpdates.map(t=>t.variable)))}async getWeights(){let t=[...this.accumulatedGrads,...this.accumulatedUpdates];return[await this.saveIterations()].concat(t.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(t){t=await this.extractIterations(t);let e=t.length/2,r=!1;this.accumulatedGrads=t.slice(0,e).map(o=>({originalName:o.name,variable:o.tensor.variable(r)})),this.accumulatedUpdates=t.slice(e,e*2).map(o=>({originalName:o.name,variable:o.tensor.variable(r)}))}getConfig(){return{learningRate:this.learningRate,rho:this.rho,epsilon:this.epsilon}}static fromConfig(t,e){return new t(e.learningRate,e.rho,e.epsilon)}}bp.className="Adadelta",vt(bp);class xp extends gs{constructor(t,e=.1){super();this.learningRate=t,this.initialAccumulatorValue=e,this.accumulatedGrads=[]}applyGradients(t){let e=Array.isArray(t)?t.map(r=>r.name):Object.keys(t);e.forEach((r,o)=>{let s=X.registeredVariables[r];if(this.accumulatedGrads[o]==null){let p=!1;this.accumulatedGrads[o]={originalName:`${r}/accumulator`,variable:rt(()=>Cc(s.shape,this.initialAccumulatorValue).variable(p))}}let c=Array.isArray(t)?t[o].tensor:t[r];if(c==null)return;let l=this.accumulatedGrads[o].variable;rt(()=>{let p=Tt(l,De(c));l.assign(p);let f=Tt(nt(Bt(c,Pn(Tt(p,X.backend.epsilon()))),-this.learningRate),s);s.assign(f)})}),this.incrementIterations()}dispose(){this.accumulatedGrads!=null&&Xt(this.accumulatedGrads.map(t=>t.variable))}async getWeights(){return[await this.saveIterations()].concat(this.accumulatedGrads.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(t){t=await this.extractIterations(t);let e=!1;this.accumulatedGrads=t.map(r=>({originalName:r.name,variable:r.tensor.variable(e)}))}getConfig(){return{learningRate:this.learningRate,initialAccumulatorValue:this.initialAccumulatorValue}}static fromConfig(t,e){return new t(e.learningRate,e.initialAccumulatorValue)}}xp.className="Adagrad",vt(xp);class wp extends gs{constructor(t,e,r,o=null){super();this.learningRate=t,this.beta1=e,this.beta2=r,this.epsilon=o,this.accumulatedFirstMoment=[],this.accumulatedSecondMoment=[],rt(()=>{this.accBeta1=Et(e).variable(),this.accBeta2=Et(r).variable()}),o==null&&(this.epsilon=X.backend.epsilon())}applyGradients(t){let e=Array.isArray(t)?t.map(r=>r.name):Object.keys(t);rt(()=>{let r=Dt(1,this.accBeta1),o=Dt(1,this.accBeta2);e.forEach((s,c)=>{let l=X.registeredVariables[s],p=!1;this.accumulatedFirstMoment[c]==null&&(this.accumulatedFirstMoment[c]={originalName:`${s}/m`,variable:rt(()=>re(l).variable(p))}),this.accumulatedSecondMoment[c]==null&&(this.accumulatedSecondMoment[c]={originalName:`${s}/v`,variable:rt(()=>re(l).variable(p))});let f=Array.isArray(t)?t[c].tensor:t[s];if(f==null)return;let m=this.accumulatedFirstMoment[c].variable,y=this.accumulatedSecondMoment[c].variable,b=Tt(nt(m,this.beta1),nt(f,1-this.beta1)),v=Tt(nt(y,this.beta2),nt(De(f),1-this.beta2)),T=Bt(b,r),N=Bt(v,o);m.assign(b),y.assign(v);let S=Tt(nt(Bt(T,Tt(Pn(N),this.epsilon)),-this.learningRate),l);l.assign(S)}),this.accBeta1.assign(nt(this.accBeta1,this.beta1)),this.accBeta2.assign(nt(this.accBeta2,this.beta2))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.accBeta2.dispose(),this.accumulatedFirstMoment!=null&&Xt(this.accumulatedFirstMoment.map(t=>t.variable)),this.accumulatedSecondMoment!=null&&Xt(this.accumulatedSecondMoment.map(t=>t.variable))}async getWeights(){let t=[...this.accumulatedFirstMoment,...this.accumulatedSecondMoment];return[await this.saveIterations()].concat(t.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(t){t=await this.extractIterations(t),rt(()=>{this.accBeta1.assign(fo(this.beta1,this.iterations_+1)),this.accBeta2.assign(fo(this.beta2,this.iterations_+1))});let e=t.length/2,r=!1;this.accumulatedFirstMoment=t.slice(0,e).map(o=>({originalName:o.name,variable:o.tensor.variable(r)})),this.accumulatedSecondMoment=t.slice(e,e*2).map(o=>({originalName:o.name,variable:o.tensor.variable(r)}))}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon}}static fromConfig(t,e){return new t(e.learningRate,e.beta1,e.beta2,e.epsilon)}}wp.className="Adam",vt(wp);class vp extends gs{constructor(t,e,r,o=null,s=0){super();this.learningRate=t,this.beta1=e,this.beta2=r,this.epsilon=o,this.decay=s,this.accumulatedFirstMoment=[],this.accumulatedWeightedInfNorm=[],rt(()=>{this.iteration=Et(0).variable(),this.accBeta1=Et(e).variable()}),o==null&&(this.epsilon=X.backend.epsilon())}applyGradients(t){let e=Array.isArray(t)?t.map(r=>r.name):Object.keys(t);rt(()=>{let r=Dt(1,this.accBeta1),o=Bt(-this.learningRate,Tt(nt(this.iteration,this.decay),1));e.forEach((s,c)=>{let l=X.registeredVariables[s],p=!1;this.accumulatedFirstMoment[c]==null&&(this.accumulatedFirstMoment[c]={originalName:`${s}/m`,variable:re(l).variable(p)}),this.accumulatedWeightedInfNorm[c]==null&&(this.accumulatedWeightedInfNorm[c]={originalName:`${s}/v`,variable:re(l).variable(p)});let f=Array.isArray(t)?t[c].tensor:t[s];if(f==null)return;let m=this.accumulatedFirstMoment[c].variable,y=this.accumulatedWeightedInfNorm[c].variable,b=Tt(nt(m,this.beta1),nt(f,1-this.beta1)),v=nt(y,this.beta2),T=bn(f),N=Xr(v,T);m.assign(b),y.assign(N);let S=Tt(nt(Bt(o,r),Bt(b,Tt(N,this.epsilon))),l);l.assign(S)}),this.iteration.assign(Tt(this.iteration,1)),this.accBeta1.assign(nt(this.accBeta1,this.beta1))}),this.incrementIterations()}dispose(){this.accBeta1.dispose(),this.iteration.dispose(),this.accumulatedFirstMoment!=null&&Xt(this.accumulatedFirstMoment.map(t=>t.variable)),this.accumulatedWeightedInfNorm!=null&&Xt(this.accumulatedWeightedInfNorm.map(t=>t.variable))}async getWeights(){throw new Error("getWeights() is not implemented for Adamax yet.")}async setWeights(t){throw new Error("setWeights() is not implemented for Adamax yet.")}getConfig(){return{learningRate:this.learningRate,beta1:this.beta1,beta2:this.beta2,epsilon:this.epsilon,decay:this.decay}}static fromConfig(t,e){return new t(e.learningRate,e.beta1,e.beta2,e.epsilon,e.decay)}}vp.className="Adamax",vt(vp);class Mc extends gs{constructor(t){super();this.learningRate=t,this.setLearningRate(t)}applyGradients(t){let e=Array.isArray(t)?t.map(r=>r.name):Object.keys(t);e.forEach((r,o)=>{let s=Array.isArray(t)?t[o].tensor:t[r];if(s==null)return;let c=X.registeredVariables[r];rt(()=>{let l=Tt(nt(this.c,s),c);c.assign(l)})}),this.incrementIterations()}setLearningRate(t){this.learningRate=t,this.c!=null&&this.c.dispose(),this.c=Sn(Et(-t))}dispose(){this.c.dispose()}async getWeights(){return[await this.saveIterations()]}async setWeights(t){if(t=await this.extractIterations(t),t.length!==0)throw new Error("SGD optimizer does not have settable weights.")}getConfig(){return{learningRate:this.learningRate}}static fromConfig(t,e){return new t(e.learningRate)}}Mc.className="SGD",vt(Mc);class Tp extends Mc{constructor(t,e,r=!1){super(t);this.learningRate=t,this.momentum=e,this.useNesterov=r,this.accumulations=[],this.m=Et(this.momentum)}applyGradients(t){let e=Array.isArray(t)?t.map(r=>r.name):Object.keys(t);e.forEach((r,o)=>{let s=X.registeredVariables[r];if(this.accumulations[o]==null){let p=!1;this.accumulations[o]={originalName:`${r}/momentum`,variable:rt(()=>re(s).variable(p))}}let c=this.accumulations[o].variable,l=Array.isArray(t)?t[o].tensor:t[r];if(l==null)return;rt(()=>{let p,f=Tt(nt(this.m,c),l);this.useNesterov?p=Tt(nt(this.c,Tt(l,nt(f,this.m))),s):p=Tt(nt(this.c,f),s),c.assign(f),s.assign(p)})}),this.incrementIterations()}dispose(){this.m.dispose(),this.accumulations!=null&&Xt(this.accumulations.map(t=>t.variable))}setMomentum(t){this.momentum=t}async getWeights(){return[await this.saveIterations()].concat(this.accumulations.map(t=>({name:t.originalName,tensor:t.variable})))}async setWeights(t){t=await this.extractIterations(t);let e=!1;this.accumulations=t.map(r=>({originalName:r.name,variable:r.tensor.variable(e)}))}getConfig(){return{learningRate:this.learningRate,momentum:this.momentum,useNesterov:this.useNesterov}}static fromConfig(t,e){return new t(e.learningRate,e.momentum,e.useNesterov)}}Tp.className="Momentum",vt(Tp);class kp extends gs{constructor(t,e=.9,r=0,o=null,s=!1){super();if(this.learningRate=t,this.decay=e,this.momentum=r,this.epsilon=o,this.accumulatedMeanSquares=[],this.accumulatedMoments=[],this.accumulatedMeanGrads=[],this.centered=s,o==null&&(this.epsilon=X.backend.epsilon()),t==null)throw new Error("learningRate for RMSPropOptimizer must be defined.")}applyGradients(t){let e=Array.isArray(t)?t.map(r=>r.name):Object.keys(t);e.forEach((r,o)=>{let s=X.registeredVariables[r],c=!1;this.accumulatedMeanSquares[o]==null&&(this.accumulatedMeanSquares[o]={originalName:`${r}/rms`,variable:rt(()=>re(s).variable(c))}),this.accumulatedMoments[o]==null&&(this.accumulatedMoments[o]={originalName:`${r}/momentum`,variable:rt(()=>re(s).variable(c))}),this.accumulatedMeanGrads[o]==null&&this.centered&&(this.accumulatedMeanGrads[o]={originalName:`${r}/mg`,variable:rt(()=>re(s).variable(c))});let l=Array.isArray(t)?t[o].tensor:t[r];if(l==null)return;let p=this.accumulatedMeanSquares[o].variable,f=this.accumulatedMoments[o].variable;rt(()=>{let m=Tt(nt(p,this.decay),nt(De(l),1-this.decay));if(this.centered){let y=this.accumulatedMeanGrads[o].variable,b=Tt(nt(y,this.decay),nt(l,1-this.decay)),v=Bt(nt(l,this.learningRate),Pn(Dt(m,Tt(De(b),this.epsilon)))),T=Tt(nt(f,this.momentum),v);p.assign(m),y.assign(b),f.assign(T);let N=Dt(s,T);s.assign(N)}else{let y=Tt(nt(p,this.decay),nt(De(l),1-this.decay)),b=Tt(nt(f,this.momentum),Bt(nt(l,this.learningRate),Pn(Tt(y,this.epsilon))));p.assign(y),f.assign(b);let v=Dt(s,b);s.assign(v)}})}),this.incrementIterations()}dispose(){this.accumulatedMeanSquares!=null&&Xt(this.accumulatedMeanSquares.map(t=>t.variable)),this.accumulatedMeanGrads!=null&&this.centered&&Xt(this.accumulatedMeanGrads.map(t=>t.variable)),this.accumulatedMoments!=null&&Xt(this.accumulatedMoments.map(t=>t.variable))}async getWeights(){let t=[...this.accumulatedMeanSquares,...this.accumulatedMoments];return this.centered&&t.push(...this.accumulatedMeanGrads),[await this.saveIterations()].concat(t.map(e=>({name:e.originalName,tensor:e.variable})))}async setWeights(t){t=await this.extractIterations(t);let e=this.centered?t.length/3:t.length/2,r=!1;this.accumulatedMeanSquares=t.slice(0,e).map(o=>({originalName:o.name,variable:o.tensor.variable(r)})),this.accumulatedMoments=t.slice(e,e*2).map(o=>({originalName:o.name,variable:o.tensor.variable(r)})),this.centered&&(this.accumulatedMeanGrads=t.slice(e*2,e*3).map(o=>({originalName:o.name,variable:o.tensor.variable(r)})))}getConfig(){return{learningRate:this.learningRate,decay:this.decay,momentum:this.momentum,epsilon:this.epsilon,centered:this.centered}}static fromConfig(t,e){return new t(e.learningRate,e.decay,e.momentum,e.epsilon,e.centered)}}kp.className="RMSProp",vt(kp);class la{static sgd(t){return new Mc(t)}static momentum(t,e,r=!1){return new Tp(t,e,r)}static rmsprop(t,e=.9,r=0,o=null,s=!1){return new kp(t,e,r,o,s)}static adam(t=.001,e=.9,r=.999,o=null){return new wp(t,e,r,o)}static adadelta(t=.001,e=.95,r=null){return new bp(t,e,r)}static adamax(t=.002,e=.9,r=.999,o=null,s=0){return new vp(t,e,r,o,s)}static adagrad(t,e=.1){return new xp(t,e)}}let ua={sgd:la.sgd,momentum:la.momentum,adadelta:la.adadelta,adagrad:la.adagrad,rmsprop:la.rmsprop,adamax:la.adamax,adam:la.adam};let NW=(()=>typeof requestAnimationFrame!="undefined"?requestAnimationFrame:typeof setImmediate!="undefined"?setImmediate:n=>n())();function Bd(){return new Promise(n=>NW(()=>n()))}function jw(n,t,e){let r=e*(typeof n=="number"?n:n[0]),o=t*(typeof n=="number"?n:n[1]);return[r,o]}function Np(n,t,e,r=!0){let o=[];if(r)o=o.concat(t.slice(0)),o.push(n[0]/e),o=o.concat(n.slice(1));else{o=o.concat(n[0]);let s=t.length;for(let c=0;c=t*2+1||c%2===1?s.push(c):o.push(c);r.push(...o),r.push(0),r.push(...s)}return r}function Cp(n,t,e,r=!0){let o=[];r?o.push(n[0]/e):o.push(n[0]*e);for(let s=1;s{let c=[...o];c[e]=s;let l=ce(n,r,c);return r[e]+=s,l})}function iv(n,t){let e=new Array(n.rank);for(let o=0;oP.value-I.value);let N=y*r,S=p.subarray(N,N+r),D=f.subarray(N,N+r);for(let I=0;I{let[e]=t;return{x:()=>nt(n,Pc($t(e,"float32"),-1))}}};let IW={kernelName:eu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>{let r=De($t(e,"float32")),o=Pn(Dt(Et(1),r));return tn(Bt(n,o))}}}};let EW={kernelName:nu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>{let r=Pn(Dt(De($t(e,"float32")),1));return Bt(n,r)}}}};let DW={kernelName:Hi,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=le(e.shape,r.shape),s=()=>{let l=n,p=xn(e.shape,o);return p.length>0&&(l=zt(l,p)),Q(l,e.shape)},c=()=>{let l=n,p=xn(r.shape,o);return p.length>0&&(l=zt(l,p)),Q(l,r.shape)};return{a:s,b:c}}};let AW={kernelName:zb,saveAllInputs:!0,gradFunc:(n,t)=>{let e={};return t.forEach((r,o)=>{e[o]=()=>n.clone()}),e}};let FW={kernelName:Wb,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>re(e)}}};let RW={kernelName:Vb,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>re(e)}}};let PW={kernelName:ru,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>Bt(n,Pn(Dt(Et(1),De($t(e,"float32")))))}}};let OW={kernelName:ou,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>{let r=Pn(Tt(Et(1),De($t(e,"float32"))));return Bt(n,r)}}}};let LW={kernelName:sf,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=le(e.shape,r.shape),s=()=>{let l=Tt(De(e),De(r)),p=nt(n,Bt(r,l)),f=xn(e.shape,o);return f.length>0&&(p=zt(p,f)),Q(p,e.shape)},c=()=>{let l=Tt(De(e),De(r)),p=tn(nt(n,Bt(e,l))),f=xn(r.shape,o);return f.length>0&&(p=zt(p,f)),Q(p,r.shape)};return{a:s,b:c}}};let MW={kernelName:su,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>Bt(n,Tt(De($t(e,"float32")),1))}}};let BW={kernelName:iu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>Bt(n,Dt(Et(1),De($t(e,"float32"))))}}};function zW(n,t,e,r,o=[1,1,1],s,c){let l=M(n,"dy","avgPool3dBackprop"),p=M(t,"input","avgPool3dBackprop"),f=l,m=p,y=!1;p.rank===4&&(y=!0,f=Q(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]]),m=Q(p,[1,p.shape[0],p.shape[1],p.shape[2],p.shape[3]])),_(f.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${f.rank}.`),_(m.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${m.rank}.`),_(fn(r,o),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${r} and dilations '${o}'`),c!=null&&_(gt(s),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${c} but got pad ${s}.`);let b=S=>{let D=tp(m.shape,e,r,o,s,c);return S.avgPool3dBackprop(f,m,D)},v={dy:f,input:m},T={filterSize:e,strides:r,dilations:o,pad:s,dimRoundingMode:c},N=X.runKernelFunc(b,v,null,nN,T);return y?Q(N,[N.shape[1],N.shape[2],N.shape[3],N.shape[4]]):N}let WW=j({avgPool3dBackprop_:zW});let VW={kernelName:Gb,inputsToSave:["x"],gradFunc:(n,t,e)=>{let[r]=t,{filterSize:o,strides:s,dilations:c,pad:l,dimRoundingMode:p}=e,f=c==null?[1,1,1]:c;return{x:()=>WW(n,r,o,s,f,l,p)}}};function GW(n,t,e,r,o){let s=M(n,"dy","avgPoolBackprop"),c=M(t,"input","avgPoolBackprop");_(c.rank===s.rank,()=>`Rank of input (${c.rank}) does not match rank of dy (${s.rank})`);let l=c,p=s,f=!1;c.rank===3&&(f=!0,l=Q(c,[1,c.shape[0],c.shape[1],c.shape[2]]),p=Q(s,[1,s.shape[0],s.shape[1],s.shape[2]])),_(p.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${p.rank}.`),_(l.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${l.rank}.`);let m=T=>{let N=Kn(l.shape,e,r,1,o);return T.avgPoolBackprop(p,l,N)},y={dy:p,input:l},b={filterSize:e,strides:r,pad:o},v=X.runKernelFunc(m,y,null,af,b);return f?Q(v,[v.shape[1],v.shape[2],v.shape[3]]):v}let UW=j({avgPoolBackprop_:GW});let qW={kernelName:au,inputsToSave:["x"],gradFunc:(n,t,e)=>{let[r]=t,{filterSize:o,strides:s,pad:c}=e;return{x:()=>UW(n,r,o,s,c)}}};let HW={kernelName:cf,inputsToSave:["a","b"],gradFunc:(n,t,e)=>{let[r,o]=t,{transposeA:s,transposeB:c}=e;return!s&&!c?{a:()=>ge(n,o,!1,!0),b:()=>ge(r,n,!0,!1)}:!s&&c?{a:()=>ge(n,o,!1,!1),b:()=>ge(n,r,!0,!1)}:s&&!c?{a:()=>ge(o,n,!1,!0),b:()=>ge(r,n,!1,!1)}:{a:()=>ge(o,n,!0,!0),b:()=>ge(n,r,!0,!0)}}};let jW={kernelName:Ub,gradFunc:(n,t,e)=>{let{blockShape:r,crops:o}=e;return{x:()=>up(n,r,o)}}};let KW={kernelName:qb,gradFunc:(n,t,e)=>{let r=e,o=r.inputShape,s=r.shape,c=Array.from(s);for(let p=o.length-1;p>=0;p--)if(o[p]===s[p])c[p]=1;else if(o[p]!==1)throw new Error(`broadcastTo(): [${o}] cannot be broadcast to [${s}].`);let l=[];for(let p=0;p1&&l.push(p);return{x:()=>zt(n,l,!0)}}};let XW={kernelName:cc,gradFunc:n=>({x:()=>n.clone()})};let YW={kernelName:cu,gradFunc:n=>({x:()=>re(n)})};let JW={kernelName:lu,inputsToSave:["x"],gradFunc:(n,t,e)=>{let[r]=t,{clipValueMin:o,clipValueMax:s}=e;return{x:()=>Xn(Yr(ds(r,o),ai(r,s)),n,re(n))}}};let ZW={kernelName:uu,saveAllInputs:!0,gradFunc:(n,t,e)=>{let r=t.map(p=>p.shape),{axis:o}=e,s=Vt(o,t[0].shape)[0],c=r.map(p=>p[s]),l=Tr(n,c,s);return l.map(p=>()=>p)}};let QW={kernelName:uf,inputsToSave:["x","filter"],gradFunc:(n,t,e)=>{let[r,o]=t,{dilations:s,strides:c,pad:l,dataFormat:p}=e;return _(oi(s),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${s}'`),{x:()=>mw(r.shape,n,o,c,l,p),filter:()=>Uw(r,n,o.shape,c,l,p)}}};let t4={kernelName:pf,inputsToSave:["dy","filter"],gradFunc:(n,t,e)=>{let[r,o]=t,{strides:s,pad:c,dataFormat:l,dimRoundingMode:p}=e;return{dy:()=>fs(n,o,s,c,l,1,p),filter:()=>Uw(n,r,o.shape,s,c,l,p)}}};function e4(n,t,e,r,o){let s=n;n.rank===4&&(s=Q(n,[1,n.shape[0],n.shape[1],n.shape[2],n.shape[3]]));let c=t;c.rank===4&&(c=Q(t,[1,t.shape[0],t.shape[1],t.shape[2],t.shape[3]])),_(s.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${s.shape}.`),_(c.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${c.shape}.`),_(e.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${e}.`),_(s.shape[4]===e[3],()=>`Error in conv3dDerFilter: depth of input ${s.shape[4]}) must match input depth in filter (${e[3]}.`),_(c.shape[4]===e[4],()=>`Error in conv3dDerFilter: depth of dy (${c.shape[4]}) must match output depth for filter (${e[4]}).`);let l=m=>{let y=1,b=ri(s.shape,e,r,y,o);return m.conv3dDerFilter(s,c,b)},p={x:s,dy:c},f={strides:r,pad:o,filterShape:e};return X.runKernelFunc(l,p,null,jb,f)}let n4=j({conv3DBackpropFilter_:e4});let r4={kernelName:hf,inputsToSave:["x","filter"],gradFunc:(n,t,e)=>{let{dilations:r,strides:o,pad:s}=e;_(oi(r),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${r}'`);let[c,l]=t;return{x:()=>__(c.shape,n,l,o,s),filter:()=>n4(c,n,l.shape,o,s)}}};let o4={kernelName:lc,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(tn(kd($t(e,"float32"))),n)}}};let s4={kernelName:pu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(Nd($t(e,"float32")),n)}}};let i4={kernelName:Xb,inputsToSave:["x"],gradFunc:(n,t,e)=>{let[r]=t,{axis:o,exclusive:s,reverse:c}=e;return{x:()=>{let l=ir([o],r.rank),p=pd(n,o,s,!c);return l!=null&&(p=Kt(p,l)),p}}}};let a4={kernelName:ff,inputsToSave:["x","filter"],gradFunc:(n,t,e)=>{let{dilations:r,strides:o,pad:s,dimRoundingMode:c}=e,l=r==null?[1,1]:r;_(oi(l),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${l}'`);let[p,f]=t;return _(p.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${p.rank}.`),_(f.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${f.rank}.`),_(p.shape[3]===f.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${p.shape[3]}) must match the inChannels dimension in filter ${f.shape[2]}.`),_(fn(o,l),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${o} and dilations '${l}'.`),c!=null&&_(gt(s),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${c} but got pad ${s}.`),{x:()=>aC(p.shape,n,f,o,s,r,c),filter:()=>iC(p,n,f.shape,o,s,r,c)}}};let c4={kernelName:df,inputsToSave:["x","filter"],gradFunc:(n,t,e)=>{let[r,o]=t,s={x:r,filter:o,dy:n},c={x:r,filter:o,dy:n};return{x:()=>X.runKernel(mf,s,e),filter:()=>X.runKernel(gf,c,e)}}};let l4={kernelName:uc,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=le(e.shape,r.shape),s=()=>{let l=Bt(n,$t(r,"float32")),p=xn(e.shape,o);return p.length>0?Q(zt(l,p),e.shape):l},c=()=>{let l=nt(n,$t(e,"float32")),p=xn(r.shape,o);p.length>0&&(l=Q(zt(l,p),r.shape));let f=De(r);return tn(Bt(l,$t(f,"float32")))};return{a:s,b:c}}};let u4={kernelName:hu,outputsToSave:[!0],gradFunc:(n,t)=>{let[e]=t,r=s=>s.eluDer(n,e),o={dy:n,y:e};return{x:()=>X.runKernelFunc(r,o,null,iN)}}};let p4={kernelName:fu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t,r=nt(Ar(tn(De(e))),2/Math.sqrt(Math.PI));return{x:()=>nt(n,r)}}};let h4={kernelName:du,outputsToSave:[!0],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(n,e)}}};let f4={kernelName:mu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(n,Ar(e))}}};let d4={kernelName:gu,gradFunc:n=>({x:()=>re(n)})};let m4={kernelName:Qb,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=le(e.shape,r.shape),s=()=>{let l=Bt(n,$t(r,"float32")),p=xn(e.shape,o);return p.length>0?Q(zt(l,p),e.shape):l},c=()=>{let l=nt(n,$t(e,"float32")),p=xn(r.shape,o);p.length>0&&(l=Q(zt(l,p),r.shape));let f=De(r);return tn(Bt(l,$t(f,"float32")))};return{a:s,b:c}}};let g4={kernelName:yu,inputsToSave:["x","mean","variance","scale"],gradFunc:(n,t,e)=>{let{varianceEpsilon:r}=e,[o,s,c,l]=t,p=l==null?Et(1):l,f=xn(s.shape,o.shape),m=[];if(s.rank===1){for(let E=0;Es.rank===1?Q(nt(nt(n,ii(Q(v,[1,1,1,s.shape[0]]),m)),p),o.shape):Q(nt(nt(n,v),p),o.shape),S=()=>{let E=nt(nt(v,Et(-1)),b);return s.rank===1&&(E=zt(E,f)),Q(E,s.shape)},D=()=>{let E=nt(nt(T,y),b);return s.rank===1&&(E=zt(E,f)),Q(E,s.shape)},I=()=>{let E=nt(y,v),L=nt(n,E);return s.rank===1&&(L=zt(L,f)),Q(L,s.shape)},P=()=>{let E=n;return s.rank===1&&(E=zt(E,f)),Q(E,s.shape)};return{x:N,mean:S,variance:D,scale:I,offset:P}}};let y4={kernelName:tx,inputsToSave:["x","indices"],gradFunc:(n,t,e)=>{let[r,o]=t,{axis:s}=e,c=Vt(s,r.shape)[0],l=()=>{let p=r.shape,f=o.size,m=p.slice(0,c),y=m.length,b=p.slice(s,p.length).slice(1),v=b.length,T=kC(0,y),N=kC(y+1,y+1+v),S=NC([m,[f],b]),D=Q(n,S),I=Q(o,[f]),P=NC([[y],T,N]),E=Kt(D,P),L=zw(E,I,r.shape[c]),B=Ju(P);return L=Kt(L,B),L};return{x:l,indices:()=>o}}};function kC(n,t){let e=[];for(let r=n;r{let[e,r]=t;return{a:()=>re(e),b:()=>re(r)}}};let x4={kernelName:bu,gradFunc:n=>({x:()=>$t(n,"float32")})};let w4={kernelName:xu,gradFunc:n=>({x:()=>re(n)})};let v4={kernelName:wu,gradFunc:n=>({x:()=>re(n)})};let T4={kernelName:vu,gradFunc:n=>({x:()=>re(n)})};let k4={kernelName:ku,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>Bt(n,Tt(e,1))}}};let N4={kernelName:Tu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>Bt(n,$t(e,"float32"))}}};let _4={kernelName:nx,inputsToSave:[],outputsToSave:[!0],gradFunc:(n,t,e)=>{let[r]=t,{axis:o}=e;return{logits:()=>{let s=!0,c=Ar(r);return Dt(n,nt(zt(n,o,s),c))}}}};function C4(n,t,e,r=5,o=1,s=1,c=.5){let l=m=>m.LRNGrad(e,n,t,r,o,s,c),p={x:n,y:t,dy:e},f={depthRadius:r,bias:o,alpha:s,beta:c};return X.runKernelFunc(l,p,null,mN,f)}let S4=j({localResponseNormalizationBackprop_:C4});let $4={kernelName:rx,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(n,t,e)=>{let[r,o]=t,{depthRadius:s,bias:c,alpha:l,beta:p}=e;return{x:()=>S4(r,o,n,s,c,l,p)}}};function _C(n,t,e,r){return t.rank{let o=nt(n,$t(po(e,t),n.dtype));return o}}}let CC={kernelName:Nu,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(n,t,e)=>{let r=e,{reductionIndices:o}=r,s=t[0],c=t[1],l=Vt(o,s.shape),p=_C(n,c,s,l);return{x:()=>p.x()}}};let I4={kernelName:ox,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=()=>nt(n,$t(ds(e,r),"float32")),s=()=>nt(n,$t(ap(e,r),"float32"));return{a:o,b:s}}};function E4(n,t,e,r,o,s=[1,1,1],c,l){let p=M(n,"dy","maxPool3dBackprop"),f=M(t,"input","maxPool3dBackprop"),m=M(e,"output","maxPool3dBackprop"),y=p,b=f,v=m,T=!1;f.rank===4&&(T=!0,y=Q(p,[1,p.shape[0],p.shape[1],p.shape[2],p.shape[3]]),b=Q(f,[1,f.shape[0],f.shape[1],f.shape[2],f.shape[3]]),v=Q(m,[1,m.shape[0],m.shape[1],m.shape[2],m.shape[3]])),_(y.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${y.rank}.`),_(b.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${b.rank}.`),_(v.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${v.rank}.`),_(fn(o,s),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${o} and dilations '${s}'`),l!=null&&_(gt(c),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${l} but got pad ${c}.`);let N=P=>{let E=tp(b.shape,r,o,s,c,l);return P.maxPool3dBackprop(y,b,v,E)},S={dy:y,input:b,output:v},D={filterSize:r,strides:o,dilations:s,pad:c,dimRoundingMode:l},I=X.runKernelFunc(N,S,null,gN,D);return T?Q(I,[I.shape[1],I.shape[2],I.shape[3],I.shape[4]]):I}let D4=j({maxPool3dBackprop_:E4});let A4={kernelName:sx,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(n,t,e)=>{let[r,o]=t,{filterSize:s,strides:c,dilations:l,pad:p,dimRoundingMode:f}=e,m=l==null?[1,1,1]:l;return{x:()=>D4(n,r,o,s,c,m,p,f)}}};function F4(n,t,e,r,o,s,c){let l=M(n,"dy","maxPoolBackprop"),p=M(t,"input","maxPoolBackprop"),f=M(e,"output","maxPoolBackprop");_(p.rank===l.rank,()=>`Rank of input (${p.rank}) does not match rank of dy (${l.rank})`),_(l.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${l.rank}.`),_(p.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${p.rank}.`),c!=null&&_(gt(s),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${c} but got pad ${s}.`);let m=v=>{let T=Kn(p.shape,r,o,1,s,c);return v.maxPoolBackprop(l,p,f,T)},y={dy:l,input:p,output:f},b={filterSize:r,strides:o,pad:s,dimRoundingMode:c};return X.runKernelFunc(m,y,null,Tf,b)}let R4=j({maxPoolBackprop_:F4});let P4={kernelName:_u,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(n,t,e)=>{let[r,o]=t,{filterSize:s,strides:c,pad:l}=e;return{x:()=>R4(n,r,o,s,c,l)}}};let O4={kernelName:ax,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(n,t,e)=>{let r=e,{axis:o}=r,[s,c]=t,l=Vt(o,s.shape),p=_C(n,c,s,l);return{x:()=>p.x()}}};let L4={kernelName:cx,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=()=>nt(n,$t(ai(e,r),"float32")),s=()=>nt(n,$t(Fr(e,r),"float32"));return{a:o,b:s}}};let M4={kernelName:Cu,inputsToSave:["x"],gradFunc:(n,t,e)=>{let r=t[0],{paddings:o}=e,s=o.map(c=>c[0]);return{x:()=>ce(n,s,r.shape)}}};let B4={kernelName:lx,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=le(e.shape,r.shape),s=()=>{let l=xn(e.shape,o);return l.length>0?Q(zt(n,l),e.shape):n},c=()=>{let l=nt(n,tn(Sc(Bt(e,r)))),p=xn(r.shape,o);return p.length>0?Q(zt(l,p),r.shape):l};return{a:s,b:c}}};let z4={kernelName:pc,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=le(e.shape,r.shape),s=()=>{let l=nt(n,$t(r,"float32")),p=xn(e.shape,o);return p.length>0?Q(zt(l,p),e.shape):l},c=()=>{let l=nt(n,$t(e,"float32")),p=xn(r.shape,o);return p.length>0?Q(zt(l,p),r.shape):l};return{a:s,b:c}}};let W4={kernelName:ux,gradFunc:n=>({x:()=>tn(n)})};let V4={kernelName:fx,inputsToSave:["indices"],gradFunc:(n,t)=>{let e=t[0];return{indices:()=>xe(e.shape,"float32")}}};let G4={kernelName:hx,gradFunc:n=>({x:()=>re(n)})};let SC={kernelName:Cf,inputsToSave:["x"],gradFunc:(n,t,e)=>{let r=t[0],{paddings:o}=e,s=o.map(c=>c[0]);return{x:()=>ce(n,s,r.shape)}}};let U4={kernelName:dx,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(n,t)=>{let[e,r,o]=t,s=e,c=r,l=le(s.shape,c.shape),p=()=>{let m=$t(c,"float32"),y=nt(n,nt(m,fo(s,Dt(m,Et(1))))),b=xn(s.shape,l);return b.length>0&&(y=zt(y,b)),Q(y,s.shape)},f=()=>{let m=Fr(s,0),y=Xn(m,wr(s),re(s)),b=nt(n,nt(o,y)),v=xn(c.shape,l);return v.length>0&&(b=zt(b,v)),Q(b,c.shape)};return{a:p,b:f}}};let q4={kernelName:Sf,inputsToSave:["x","alpha"],gradFunc:(n,t)=>{let[e,r]=t,o=Fr(e,0);return{x:()=>Xn(o,n,nt(n,r)),alpha:()=>{let s=Xn(o,re(n),nt(n,e)),c=xn(r.shape,n.shape);return c.length>0&&(s=zt(s,c)),Q(s,r.shape)}}}};let H4={kernelName:$u,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>Bt(n,tn(De(e)))}}};let j4={kernelName:Du,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t,r=nt(ai(e,6),Pc(e));return{x:()=>nt(n,$t(r,"float32"))}}};let K4={kernelName:Iu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(n,$t(Pc(e),"float32"))}}};let X4={kernelName:Eu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>Q(n,e.shape)}}};let Y4={kernelName:gx,inputsToSave:["images"],gradFunc:(n,t,e)=>{let[r]=t,o=l=>{let{alignCorners:p}=e;return l.resizeBilinearBackprop(n,r,p)},s={images:r},c=()=>X.runKernelFunc(o,s,null,wN,e);return{images:c}}};let J4={kernelName:mx,inputsToSave:["images"],gradFunc:(n,t,e)=>{let[r]=t,o=l=>{let{alignCorners:p}=e;return l.resizeNearestNeighborBackprop(n,r,p)},s={images:r},c=()=>X.runKernelFunc(o,s,null,xN,e);return{images:c}}};let Z4={kernelName:yx,gradFunc:(n,t,e)=>{let{dims:r}=e,o=Vt(r,n.shape);return{x:()=>Rr(n,o)}}};let Q4={kernelName:Au,gradFunc:n=>({x:()=>re(n)})};let tV={kernelName:Fu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>tn(Bt(n,nt(fo(e,1.5),2)))}}};let eV={kernelName:bx,inputsToSave:["condition"],gradFunc:(n,t)=>{let[e]=t;return{condition:()=>$t(re(e),"float32"),t:()=>nt(n,$t(e,n.dtype)),e:()=>nt(n,$t(cp(e),n.dtype))}}};let nV={kernelName:Ru,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>{let r=Fr(e,Et(0)),o=Et(zd),s=Et(Wd),c=nt(n,s),l=nt(nt(n,o),Ar($t(e,"float32")));return Xn(r,c,l)}}}};let rV={kernelName:Lu,outputsToSave:[!0],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(n,nt(e,Dt(Et(1),e)))}}};let oV={kernelName:Ou,gradFunc:n=>({x:()=>re(n)})};let sV={kernelName:hc,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(op($t(e,"float32")),n)}}};let iV={kernelName:Pu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(ud($t(e,"float32")),n)}}};let aV={kernelName:If,inputsToSave:["x"],gradFunc:(n,t,e)=>{let[r]=t,{begin:o,size:s}=e,c=r.shape,[l,p]=Qf(r,o,s),f=[];for(let m=0;mWo(n,f)}}};let cV={kernelName:vx,outputsToSave:[!0],gradFunc:(n,t,e)=>{let[r]=t,{dim:o}=e,s=!0,c=nt(n,r);return{logits:()=>Dt(c,nt(zt(c,[o],s),r))}}};let lV={kernelName:Mu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(n,Bo(e))}}};let $C={kernelName:Ef,gradFunc:(n,t,e)=>{let{blockShape:r,paddings:o}=e;return{x:()=>np(n,r,o)}}};let IC={kernelName:wx,gradFunc:(n,t,e)=>{let{axis:r}=e;return{x:()=>Qe(n,r)}}};let uV={kernelName:Bu,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>Bt(n,nt(Pn($t(e,"float32")),2))}}};let pV={kernelName:Df,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(n,nt($t(e,"float32"),2))}}};let hV={kernelName:fc,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=Et(2),s=()=>nt(n,nt(o,Dt(e,r))),c=()=>nt(n,nt(o,Dt(r,e)));return{a:s,b:c}}};let fV={kernelName:Vu,gradFunc:n=>({x:()=>re(n)})};let dV={kernelName:dc,inputsToSave:["a","b"],gradFunc:(n,t)=>{let[e,r]=t,o=le(e.shape,r.shape),s=()=>{let l=n,p=xn(e.shape,o);return p.length>0&&(l=zt(l,p)),Q(l,e.shape)},c=()=>{let l=n,p=xn(r.shape,o);return p.length>0&&(l=zt(l,p)),Q(tn(l),r.shape)};return{a:s,b:c}}};let mV={kernelName:xx,inputsToSave:["x"],gradFunc:(n,t,e)=>{let[r]=t,o=r.shape.slice(),{axis:s}=e,c=Vt(s,r.shape);c.forEach(f=>{o[f]=1});let l=Q(n,o),p=nt(l,ho(r.shape,"float32"));return{x:()=>p}}};let gV={kernelName:mc,inputsToSave:["x"],gradFunc:(n,t)=>{let[e]=t;return{x:()=>Bt(n,De(op(e)))}}};let yV={kernelName:zu,outputsToSave:[!0],gradFunc:(n,t)=>{let[e]=t;return{x:()=>nt(Dt(Et(1),De(e)),n)}}};let bV={kernelName:Tx,inputsToSave:["x"],gradFunc:(n,t,e)=>{let[r]=t,{reps:o}=e,s=()=>{let c=re(r);if(r.rank===1)for(let l=0;l{let r=e,{perm:o}=r,s=Ju(o);return{x:()=>Kt(n,s)}}};let wV={kernelName:kx,gradFunc:(n,t,e)=>{let r=e,{axis:o}=r;return{value:()=>ur(n,o)}}};let vV={kernelName:Nx,inputsToSave:["segmentIds"],gradFunc:(n,t)=>{let[e]=t,r=()=>TV(n,e);return{x:r}}};function TV(n,t){let e=Xr(t,re(t)),r=$c(n,e),o=ds(t,Et(0,"int32")),s=r.rank-o.rank;for(let l=0;l({x:()=>re(n)})};let NV=[$W,IW,EW,DW,AW,FW,RW,PW,OW,LW,MW,BW,VW,qW,HW,jW,KW,XW,YW,JW,ZW,t4,QW,r4,o4,s4,i4,a4,c4,l4,u4,p4,h4,f4,m4,d4,g4,y4,b4,x4,w4,v4,T4,k4,N4,_4,$4,CC,CC,I4,A4,P4,O4,L4,M4,B4,z4,W4,V4,G4,SC,SC,U4,q4,H4,j4,K4,X4,Y4,J4,Z4,Q4,tV,eV,nV,rV,oV,sV,iV,aV,cV,lV,$C,$C,IC,IC,uV,hV,pV,fV,dV,mV,gV,yV,bV,xV,wV,vV,kV];for(let n of NV)_N(n);ot.prototype.abs=function(){return this.throwIfDisposed(),bn(this)};ot.prototype.acos=function(){return this.throwIfDisposed(),nw(this)};ot.prototype.acosh=function(){return this.throwIfDisposed(),rw(this)};ot.prototype.addStrict=function(n){return this.throwIfDisposed(),H_(this,n)};ot.prototype.add=function(n){return this.throwIfDisposed(),Tt(this,n)};ot.prototype.all=function(n,t){return this.throwIfDisposed(),rd(this,n,t)};ot.prototype.any=function(n,t){return this.throwIfDisposed(),Zu(this,n,t)};ot.prototype.argMax=function(n){return this.throwIfDisposed(),Qu(this,n)};ot.prototype.argMin=function(n){return this.throwIfDisposed(),sw(this,n)};ot.prototype.asScalar=function(){return this.throwIfDisposed(),_(this.size===1,()=>"The array must have only 1 element."),Q(this,[])};ot.prototype.asType=function(n){return this.throwIfDisposed(),$t(this,n)};ot.prototype.as1D=function(){return this.throwIfDisposed(),Q(this,[this.size])};ot.prototype.as2D=function(n,t){return this.throwIfDisposed(),Q(this,[n,t])};ot.prototype.as3D=function(n,t,e){return this.throwIfDisposed(),Q(this,[n,t,e])};ot.prototype.as4D=function(n,t,e,r){return this.throwIfDisposed(),Q(this,[n,t,e,r])};ot.prototype.as5D=function(n,t,e,r,o){return this.throwIfDisposed(),Q(this,[n,t,e,r,o])};ot.prototype.asin=function(){return this.throwIfDisposed(),iw(this)};ot.prototype.asinh=function(){return this.throwIfDisposed(),aw(this)};ot.prototype.atan=function(){return this.throwIfDisposed(),cw(this)};ot.prototype.atan2=function(n){return this.throwIfDisposed(),lw(this,n)};ot.prototype.atanh=function(){return this.throwIfDisposed(),uw(this)};ot.prototype.avgPool=function(n,t,e,r){return this.throwIfDisposed(),ep(this,n,t,e,r)};ot.prototype.batchToSpaceND=function(n,t){return this.throwIfDisposed(),np(this,n,t)};ot.prototype.batchNorm=function(n,t,e,r,o){return this.throwIfDisposed(),ea(this,n,t,e,r,o)};ot.prototype.broadcastTo=function(n){return this.throwIfDisposed(),rp(this,n)};ot.prototype.cast=function(n){return this.throwIfDisposed(),$t(this,n)};ot.prototype.ceil=function(){return this.throwIfDisposed(),dw(this)};ot.prototype.clipByValue=function(n,t){return this.throwIfDisposed(),ar(this,n,t)};ot.prototype.concat=function(n,t){return this.throwIfDisposed(),n instanceof ot&&(n=[n]),Qe([this,...n],t)};ot.prototype.conv1d=function(n,t,e,r,o,s){return this.throwIfDisposed(),cd(this,n,t,e,r,o,s)};ot.prototype.conv2dTranspose=function(n,t,e,r,o){return this.throwIfDisposed(),ld(this,n,t,e,r,o)};ot.prototype.conv2d=function(n,t,e,r,o,s){return this.throwIfDisposed(),fs(this,n,t,e,r,o,s)};ot.prototype.cos=function(){return this.throwIfDisposed(),op(this)};ot.prototype.cosh=function(){return this.throwIfDisposed(),ud(this)};ot.prototype.cumsum=function(n,t,e){return this.throwIfDisposed(),pd(this,n,t,e)};ot.prototype.depthToSpace=function(n,t){return this.throwIfDisposed(),yw(this,n,t)};ot.prototype.depthwiseConv2D=function(n,t,e,r,o,s){return yn("depthwiseConv2D is deprecated, use depthwiseConv2d instead"),this.throwIfDisposed(),na(this,n,t,e,r,o,s)};ot.prototype.depthwiseConv2d=function(n,t,e,r,o,s){return this.throwIfDisposed(),na(this,n,t,e,r,o,s)};ot.prototype.dilation2d=function(n,t,e,r,o){return this.throwIfDisposed(),bw(this,n,t,e,r,o)};ot.prototype.divNoNan=function(n){return this.throwIfDisposed(),xw(this,n)};ot.prototype.divStrict=function(n){return this.throwIfDisposed(),j_(this,n)};ot.prototype.div=function(n){return this.throwIfDisposed(),Bt(this,n)};ot.prototype.dot=function(n){return this.throwIfDisposed(),C_(this,n)};ot.prototype.elu=function(){return this.throwIfDisposed(),_c(this)};ot.prototype.equalStrict=function(n){return this.throwIfDisposed(),z_(this,n)};ot.prototype.equal=function(n){return this.throwIfDisposed(),po(this,n)};ot.prototype.erf=function(){return this.throwIfDisposed(),ww(this)};ot.prototype.exp=function(){return this.throwIfDisposed(),Ar(this)};ot.prototype.expandDims=function(n){return this.throwIfDisposed(),cr(this,n)};ot.prototype.expm1=function(){return this.throwIfDisposed(),vw(this)};ot.prototype.fft=function(){return this.throwIfDisposed(),dp(this)};ot.prototype.flatten=function(){return this.throwIfDisposed(),Q(this,[this.size])};ot.prototype.floor=function(){return this.throwIfDisposed(),Sc(this)};ot.prototype.floorDiv=function(n){return this.throwIfDisposed(),nd(this,n)};ot.prototype.gather=function(n,t){return this.throwIfDisposed(),$c(this,n,t)};ot.prototype.greaterEqualStrict=function(n){return this.throwIfDisposed(),W_(this,n)};ot.prototype.greaterEqual=function(n){return this.throwIfDisposed(),ds(this,n)};ot.prototype.greaterStrict=function(n){return this.throwIfDisposed(),V_(this,n)};ot.prototype.greater=function(n){return this.throwIfDisposed(),Fr(this,n)};ot.prototype.ifft=function(){return this.throwIfDisposed(),Rc(this)};ot.prototype.irfft=function(){return this.throwIfDisposed(),Sd(this)};ot.prototype.isFinite=function(){return this.throwIfDisposed(),$_(this)};ot.prototype.isInf=function(){return this.throwIfDisposed(),I_(this)};ot.prototype.isNaN=function(){return this.throwIfDisposed(),E_(this)};ot.prototype.leakyRelu=function(n){return this.throwIfDisposed(),fd(this,n)};ot.prototype.lessEqualStrict=function(n){return this.throwIfDisposed(),G_(this,n)};ot.prototype.lessEqual=function(n){return this.throwIfDisposed(),ai(this,n)};ot.prototype.lessStrict=function(n){return this.throwIfDisposed(),U_(this,n)};ot.prototype.less=function(n){return this.throwIfDisposed(),ap(this,n)};ot.prototype.localResponseNormalization=function(n,t,e,r){return this.throwIfDisposed(),kw(this,n,t,e,r)};ot.prototype.logSigmoid=function(){return this.throwIfDisposed(),A_(this)};ot.prototype.logSoftmax=function(n){return this.throwIfDisposed(),gd(this,n)};ot.prototype.logSumExp=function(n,t){return this.throwIfDisposed(),_w(this,n,t)};ot.prototype.log=function(){return this.throwIfDisposed(),wr(this)};ot.prototype.log1p=function(){return this.throwIfDisposed(),dd(this)};ot.prototype.logicalAnd=function(n){return this.throwIfDisposed(),Yr(this,n)};ot.prototype.logicalNot=function(){return this.throwIfDisposed(),cp(this)};ot.prototype.logicalOr=function(n){return this.throwIfDisposed(),yd(this,n)};ot.prototype.logicalXor=function(n){return this.throwIfDisposed(),F_(this,n)};ot.prototype.matMul=function(n,t,e){return this.throwIfDisposed(),ge(this,n,t,e)};ot.prototype.maxPool=function(n,t,e,r){return this.throwIfDisposed(),lp(this,n,t,e,r)};ot.prototype.max=function(n,t){return this.throwIfDisposed(),lr(this,n,t)};ot.prototype.maximumStrict=function(n){return this.throwIfDisposed(),K_(this,n)};ot.prototype.maximum=function(n){return this.throwIfDisposed(),Xr(this,n)};ot.prototype.mean=function(n,t){return this.throwIfDisposed(),en(this,n,t)};ot.prototype.min=function(n,t){return this.throwIfDisposed(),Ec(this,n,t)};ot.prototype.minimumStrict=function(n){return this.throwIfDisposed(),X_(this,n)};ot.prototype.minimum=function(n){return this.throwIfDisposed(),oa(this,n)};ot.prototype.mirrorPad=function(n,t){return this.throwIfDisposed(),Sw(this,n,t)};ot.prototype.modStrict=function(n){return this.throwIfDisposed(),Y_(this,n)};ot.prototype.mod=function(n){return this.throwIfDisposed(),bd(this,n)};ot.prototype.mulStrict=function(n){return this.throwIfDisposed(),J_(this,n)};ot.prototype.mul=function(n){return this.throwIfDisposed(),nt(this,n)};ot.prototype.neg=function(){return this.throwIfDisposed(),tn(this)};ot.prototype.norm=function(n,t,e){return this.throwIfDisposed(),Ed(this,n,t,e)};ot.prototype.notEqualStrict=function(n){return this.throwIfDisposed(),q_(this,n)};ot.prototype.notEqual=function(n){return this.throwIfDisposed(),ci(this,n)};ot.prototype.oneHot=function(n,t=1,e=0){return this.throwIfDisposed(),Zi(this,n,t,e)};ot.prototype.onesLike=function(){return this.throwIfDisposed(),qn(this)};ot.prototype.pad=function(n,t){return this.throwIfDisposed(),Wo(this,n,t)};ot.prototype.pool=function(n,t,e,r,o){return this.throwIfDisposed(),O_(this,n,t,e,r,o)};ot.prototype.powStrict=function(n){return this.throwIfDisposed(),Z_(this,n)};ot.prototype.pow=function(n){return this.throwIfDisposed(),fo(this,n)};ot.prototype.prelu=function(n){return this.throwIfDisposed(),pp(this,n)};ot.prototype.prod=function(n,t){return this.throwIfDisposed(),wd(this,n,t)};ot.prototype.reciprocal=function(){return this.throwIfDisposed(),Dw(this)};ot.prototype.relu=function(){return this.throwIfDisposed(),Vo(this)};ot.prototype.relu6=function(){return this.throwIfDisposed(),Aw(this)};ot.prototype.reshapeAs=function(n){return this.throwIfDisposed(),Q(this,n.shape)};ot.prototype.reshape=function(n){return this.throwIfDisposed(),Q(this,n)};ot.prototype.resizeBilinear=function(n,t){return this.throwIfDisposed(),hC(this,n,t)};ot.prototype.resizeNearestNeighbor=function(n,t){return this.throwIfDisposed(),fC(this,n,t)};ot.prototype.reverse=function(n){return this.throwIfDisposed(),Rr(this,n)};ot.prototype.rfft=function(){return this.throwIfDisposed(),mp(this)};ot.prototype.round=function(){return this.throwIfDisposed(),Fw(this)};ot.prototype.rsqrt=function(){return this.throwIfDisposed(),vd(this)};ot.prototype.selu=function(){return this.throwIfDisposed(),Td(this)};ot.prototype.separableConv2d=function(n,t,e,r,o,s){return this.throwIfDisposed(),Rw(this,n,t,e,r,o,s)};ot.prototype.sigmoid=function(){return this.throwIfDisposed(),Bo(this)};ot.prototype.sign=function(){return this.throwIfDisposed(),Pw(this)};ot.prototype.sin=function(){return this.throwIfDisposed(),kd(this)};ot.prototype.sinh=function(){return this.throwIfDisposed(),Nd(this)};ot.prototype.slice=function(n,t){return this.throwIfDisposed(),ce(this,n,t)};ot.prototype.softmax=function(n){return this.throwIfDisposed(),ca(this,n)};ot.prototype.softplus=function(){return this.throwIfDisposed(),Ic(this)};ot.prototype.spaceToBatchND=function(n,t){return this.throwIfDisposed(),up(this,n,t)};ot.prototype.split=function(n,t){return this.throwIfDisposed(),Tr(this,n,t)};ot.prototype.sqrt=function(){return this.throwIfDisposed(),Pn(this)};ot.prototype.square=function(){return this.throwIfDisposed(),De(this)};ot.prototype.squaredDifference=function(n){return this.throwIfDisposed(),gp(this,n)};ot.prototype.squaredDifferenceStrict=function(n){return this.throwIfDisposed(),Q_(this,n)};ot.prototype.squeeze=function(n){return this.throwIfDisposed(),li(this,n)};ot.prototype.stack=function(n,t){this.throwIfDisposed();let e=n instanceof ot?[this,n]:[this,...n];return ur(e,t)};ot.prototype.step=function(n){return this.throwIfDisposed(),Pc(this,n)};ot.prototype.stridedSlice=function(n,t,e,r,o,s,c,l){return this.throwIfDisposed(),Lw(this,n,t,e,r,o,s,c,l)};ot.prototype.subStrict=function(n){return this.throwIfDisposed(),tC(this,n)};ot.prototype.sub=function(n){return this.throwIfDisposed(),Dt(this,n)};ot.prototype.sum=function(n,t){return this.throwIfDisposed(),zt(this,n,t)};ot.prototype.tan=function(){return this.throwIfDisposed(),Mw(this)};ot.prototype.tanh=function(){return this.throwIfDisposed(),Nc(this)};ot.prototype.tile=function(n){return this.throwIfDisposed(),ii(this,n)};ot.prototype.toBool=function(){return this.throwIfDisposed(),$t(this,"bool")};ot.prototype.toFloat=function(){return this.throwIfDisposed(),$t(this,"float32")};ot.prototype.toInt=function(){return this.throwIfDisposed(),$t(this,"int32")};ot.prototype.topk=function(n,t){return this.throwIfDisposed(),Bw(this,n,t)};ot.prototype.transpose=function(n){return this.throwIfDisposed(),Kt(this,n)};ot.prototype.unique=function(n){return this.throwIfDisposed(),$d(this,n)};ot.prototype.unsortedSegmentSum=function(n,t){return this.throwIfDisposed(),zw(this,n,t)};ot.prototype.unstack=function(n){return this.throwIfDisposed(),mo(this,n)};ot.prototype.where=function(n,t){return this.throwIfDisposed(),Xn(n,this,t)};ot.prototype.zerosLike=function(){return this.throwIfDisposed(),re(this)};let Vd;function wn(){return Vd==null&&(Vd=m_().epsilon()),Vd}function vot(n){Vd=n}function go(){return"channelsLast"}class Go extends Error{constructor(t){super(t);Object.setPrototypeOf(this,Go.prototype)}}class Jr extends Error{constructor(t){super(t);Object.setPrototypeOf(this,Jr.prototype)}}class Y extends Error{constructor(t){super(t);Object.setPrototypeOf(this,Y.prototype)}}class Ut extends Error{constructor(t){super(t);Object.setPrototypeOf(this,Ut.prototype)}}class cv extends Error{constructor(t){super(t);Object.setPrototypeOf(this,cv.prototype)}}class EC extends Error{constructor(t){super(t);Object.setPrototypeOf(this,EC.prototype)}}function pa(n,t){if(Array.isArray(n)){let e=[];for(let r=0;re.toUpperCase())}let Zr={};function lv(n){if(n==null)return null;let t={};return t.className=n.getClassName(),t.config=n.getConfig(),t}function uv(n){if(n==null||typeof n!="object")return;if(Array.isArray(n))n.forEach(t=>uv(t));else{let t=Object.keys(n);for(let e of t){let r=n[e];r!=null&&typeof r=="object"&&(!Array.isArray(r)&&r.type==="ndarray"&&typeof r.value=="number"?n[e]=r.value:uv(r))}}}function Sp(n,t={},e={},r="object",o=!1){if(typeof n=="string"){let s=n,c;if(s in e)c=e[s];else if(s in Zr)c=Zr[s];else if(c=t[s],c==null)throw new Y(`Unknown ${r}: ${n}. This may be due to one of the following reasons: +1. The ${r} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. +2. The custom ${r} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);return c}else{let s=n;if(s.className==null||s.config==null)throw new Y(`${r}: Improper config format: ${JSON.stringify(s)}. +'className' and 'config' must set.`);let c=s.className,l,p;if(c in e?[l,p]=e[c]:c in Zr?[l,p]=Zr.className:c in t&&([l,p]=t[c]),l==null)throw new Y(`Unknown ${r}: ${c}. This may be due to one of the following reasons: +1. The ${r} is defined in Python, in which case it needs to be ported to TensorFlow.js or your JavaScript code. +2. The custom ${r} is defined in JavaScript, but is not registered properly with tf.serialization.registerClass().`);if(p!=null){let f={};for(let v of Object.keys(Zr))f[v]=Zr[v];for(let v of Object.keys(e))f[v]=e[v];let m=s.config;m.customObjects=f;let y=Object.assign({},Zr);for(let v of Object.keys(e))Zr[v]=e[v];uv(s.config);let b=p(l,s.config,e,o);return Zr=Object.assign({},y),b}else{let f=Object.assign({},Zr);for(let y of Object.keys(e))Zr[y]=e[y];let m=new l(s.config);return Zr=Object.assign({},f),m}}}function _V(n,t){return nt?1:0}function Gd(n,t){return-1*_V(n,t)}function kot(n){switch(n){case"float32":return"float32";default:throw new Y(`Invalid dtype: ${n}`)}}function Not(n,t){if(n==null||t==null)return n===t;if(n.length!==t.length)return!1;for(let e=0;e=0),Pr(r>=e),Array.isArray(n)&&n.length>=e&&n.length<=r&&n.every(o=>typeof o===t)}function $n(n,t){Array.isArray(n)?(_(n.length>0,()=>`${t} is unexpectedly an empty array.`),n.forEach((e,r)=>$n(e,`element ${r+1} of ${t}`))):_(Number.isInteger(n)&&n>0,()=>`Expected ${t} to be a positive integer, but got ${AC(n)}.`)}function AC(n){return n===null?"null":Array.isArray(n)?"["+n.map(t=>AC(t)).join(",")+"]":typeof n=="string"?`"${n}"`:`${n}`}function SV(n,t){let e=or(),r,o=(...s)=>{let c=or();return c-e0,"arrayOfValues is empty");for(let t of n)Pr(Array.isArray(t),"one of the values is not an array"),Pr(t.length>0,"one of the values is empty");return n.reduce((t,e)=>t.length===0?e.map(r=>[r]):e.map(r=>t.map(o=>[...o,r])).reduce((r,o)=>r.concat(o),[]),[])}function hv(n,t){return rt(()=>Pn(zt(nt(n,n),t,!0)))}class $p extends Qi{getConfig(){return{}}}class fv extends $p{constructor(t){super();this.defaultMaxValue=2,this.defaultAxis=0,this.maxValue=t.maxValue!=null?t.maxValue:this.defaultMaxValue,this.axis=t.axis!=null?t.axis:this.defaultAxis}apply(t){return rt(()=>{let e=hv(t,this.axis),r=ar(e,0,this.maxValue);return nt(t,Bt(r,Tt(wn(),e)))})}getConfig(){return{maxValue:this.maxValue,axis:this.axis}}}fv.className="MaxNorm",vt(fv);class dv extends $p{constructor(t){super();this.defaultAxis=0,this.axis=t.axis!=null?t.axis:this.defaultAxis}apply(t){return rt(()=>Bt(t,Tt(wn(),hv(t,this.axis))))}getConfig(){return{axis:this.axis}}}dv.className="UnitNorm",vt(dv);class mv extends $p{apply(t){return Vo(t)}}mv.className="NonNeg",vt(mv);class gv extends $p{constructor(t){super();this.defaultMinValue=0,this.defaultMaxValue=1,this.defaultRate=1,this.defaultAxis=0,this.minValue=t.minValue!=null?t.minValue:this.defaultMinValue,this.maxValue=t.maxValue!=null?t.maxValue:this.defaultMaxValue,this.rate=t.rate!=null?t.rate:this.defaultRate,this.axis=t.axis!=null?t.axis:this.defaultAxis}apply(t){return rt(()=>{let e=hv(t,this.axis),r=Tt(nt(this.rate,ar(e,this.minValue,this.maxValue)),nt(1-this.rate,e));return nt(t,Bt(r,Tt(wn(),e)))})}getConfig(){return{minValue:this.minValue,maxValue:this.maxValue,rate:this.rate,axis:this.axis}}}gv.className="MinMaxNorm",vt(gv);let RC={maxNorm:"MaxNorm",minMaxNorm:"MinMaxNorm",nonNeg:"NonNeg",unitNorm:"UnitNorm"};function vn(n){return lv(n)}function PC(n,t={}){return Sp(n,Dr.getMap().classNameMap,t,"constraint")}function Tn(n){if(n==null)return null;if(typeof n=="string"){let t=n in RC?RC[n]:n,e={className:t,config:{}};return PC(e)}else return n instanceof $p?n:PC(n)}function $V(n){return new fv(n)}function IV(n){return new dv(n)}function EV(){return new mv}function DV(n){return new gv(n)}var AV=Object.freeze({__proto__:null,maxNorm:$V,unitNorm:IV,nonNeg:EV,minMaxNorm:DV});let FV=["channelsFirst","channelsLast"],RV=["valid","same","causal"],PV=["max","avg"],OV=["sum","mul","concat","ave"],Cot=["temporal"];let Wc=new Map;function nn(n){zc(FV,"DataFormat",n)}function Or(n){zc(RV,"PaddingMode",n)}function OC(n){zc(PV,"PoolMode",n)}let Ip=[],LC="/";function fa(n,t){Ip.push(n);try{let e=t();return Ip.pop(),e}catch(e){throw Ip.pop(),e}}function LV(){return Ip.length===0?"":Ip.join(LC)+LC}function MC(n){if(!zC(n))throw new Error("Not a valid tensor name: '"+n+"'");return LV()+n}function BC(n){if(!zC(n))throw new Error("Not a valid tensor name: '"+n+"'");Wc.has(n)||Wc.set(n,0);let t=Wc.get(n);if(Wc.set(n,Wc.get(n)+1),t>0){let e=`${n}_${t}`;return Wc.set(e,1),e}else return n}let MV=new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/);function zC(n){return!!n.match(MV)}function BV(n){return n===parseInt(n.toString(),10)}function fi(n,t,e){t==null&&(t=0),e==null&&(e=n.length);let r=1;for(let o=t;oo-s),e=Math.floor((t.length-1)/2),r=Math.ceil((t.length-1)/2);return e===r?t[e]:(t[e]+t[r])/2}function yo(n,t){if(t0?t.reduce((e,r)=>e*r):1}function Ep(n,t){return n.asType(t)}function Dp(n,t=-1){let e=n.shape.slice();return t<0&&(t=e.length+t+1),e.splice(t,0,1),n.reshape(e)}function VV(n,t){return rt(()=>{if(n.shape.length!==2)throw new Y(`repeat() expects a rank-2 tensor, but received a rank-${n.shape.length} tensor.`);let e=Dp(n,1);return xv(e,[1,t,1])})}function GV(n){let t=[fi(n.shape)];return n.reshape(t)}function UV(n){if(n.rank<=1)throw new Y(`batchFlatten requires a minimum rank of 2. Got rank: ${n.rank}.`);let t=[n.shape[0],fi(n.shape,1)];return n.reshape(t)}function da(n,t,e){return rt(()=>{switch(n.rank){case 1:return _d(n,t,e);case 2:return Ow(n,[t,0],[e,n.shape[1]]);case 3:return Cd(n,[t,0,0],[e,n.shape[1],n.shape[2]]);case 4:return fp(n,[t,0,0,0],[e,n.shape[1],n.shape[2],n.shape[3]]);case 5:return ce(n,[t,0,0,0,0],[e,n.shape[1],n.shape[2],n.shape[3],n.shape[4]]);case 6:return ce(n,[t,0,0,0,0,0],[e,n.shape[1],n.shape[2],n.shape[3],n.shape[4],n.shape[5]]);default:throw new Y(`sliceAlongFirstAxis() received an unsupported tensor rank: ${n.rank}`)}})}function yv(n,t,e){return rt(()=>{switch(n.rank){case 1:return _d(n,t,e);case 2:return Ow(n,[0,t],[n.shape[0],e]);case 3:return Cd(n,[0,0,t],[n.shape[0],n.shape[1],e]);case 4:return fp(n,[0,0,0,t],[n.shape[0],n.shape[1],n.shape[2],e]);default:throw new Y(`sliceAlongLastAxis() received an unsupported tensor rank: ${n.rank}`)}})}function qd(n,t,e,r){return rt(()=>{switch(n.rank){case 1:return _d(n,t,e);case 2:switch(r){case 1:return da(n,t,e);case 2:return yv(n,t,e);default:throw new Y(`The axis is not within the rank of the tensor ${r}`)}case 3:switch(r){case 1:return da(n,t,e);case 2:return Cd(n,[0,t,0],[n.shape[0],e,n.shape[2]]);case 3:return yv(n,t,e);default:throw new Y(`The axis is not within the rank of the tensor ${r}`)}case 4:switch(r){case 1:return da(n,t,e);case 2:return fp(n,[0,t,0,0],[n.shape[0],e,n.shape[2],n.shape[3]]);case 3:return fp(n,[0,0,t,0],[n.shape[0],n.shape[1],e,n.shape[3]]);case 4:return yv(n,t,e);default:throw new Y(`The axis is not within the rank of the tensor ${r}`)}default:throw new Y(`sliceAlongLastAxis() received an unsupported tensor rank: ${n.rank}`)}})}function bv(n,t=-1){let e;return t<0&&(e=n[0].rank,e!==0?t=e:t=0),t===n[0].rank&&(t=-1),Qe(n,t)}function VC(n,t){switch(n.rank){case 1:return v_([n,t]);case 2:return T_([n,t],0);case 3:return k_([n,t],0);case 4:return N_([n,t],0);default:throw new Y(`concatAlongFirstAxis() received an unsupported tensor rank: ${n.rank}`)}}function xv(n,t){if(Array.isArray(t)||(t=[t]),n.rank!==t.length)throw new Y(`The length of input n (${t.length}) does not match the number of dimensions in input x (${n.rank})`);return ii(n,t)}function Hd(n,t=0,e=1,r,o){return Ew(n,t,e,r,o)}function Uo(n,t,e,r){if(n.rank<2||t.rank<2)throw new Ut(`dot requires both inputs to be rank >= 2 but got x shape = ${n.shape} and y shape = ${t.shape}`);if(t.rank>=3){let o=n.shape.slice(-1)[0],s=t.shape.slice(-2)[0];if(o!==s)throw new Ut(`If rank y >= 3, then the second last dim of y must equal the last dim of x but got x shape = ${n.shape} and y shape = ${t.shape}`)}if(n.rank===2&&t.rank===2){let o=!1,s=!1;return Pd({a:n,b:t,transposeA:o,transposeB:s,bias:r?wv(n.rank,r,go()):null,activation:e})}else{let o=n.shape.slice(),s=o.pop();n=n.reshape([-1,s]);let c=t.shape.slice(),l=c.pop(),p=c.pop(),f=[...c,l],m=Array.from({length:t.rank},(T,N)=>N===0?t.rank-2:N<=t.rank-2?N-1:N);t=t.transpose(m).reshape([p,-1]);let y=[...o,...f],b=!1,v=!1;return Pd({a:n,b:t,transposeA:b,transposeB:v,bias:r?wv(n.rank,r,go()):null,activation:e}).reshape(y)}}function Fot(n){return rt(()=>{let t=re(n),e=qn(n);return Xn(po(n,t),t,Xn(Fr(n,re(n)),e,nt(-1,e)))})}function Rot(n,t){return rt(()=>{if(n.rank!==1)throw new Error("Only 1D one-hot tensors are supported in the deeplearn backend, at present.");return n=n.toInt(),Zi(n,t).toFloat()})}function GC(n,t,e){return rt(()=>(Array.isArray(t)?t=vr(t,"int32"):t=t.toInt(),$c(n,t,e)))}function Ap(n){return nt(n,n)}function Pot(n,t){return rt(()=>{if(typeof t=="number"&&(t=Et(Math.round(t),"int32")),t.dtype!=="int32")throw new Ut(`Non-int32 dtype (${t.dtype}) is not supported by pow() yet`);return fo(n,t)})}function wv(n,t,e){let r=t.shape;if(t.rank!==1&&t.rank!==n)throw new Y(`Unexpected bias dimensions: ${t.rank}; expected it to be 1 or ${n}`);if(n===5){if(e==="channelsFirst")return r.length===1?t.reshape([1,r[0],1,1,1]):t.reshape([1,r[3],r[0],r[1],r[2]]);if(e==="channelsLast")return r.length===1?t.reshape([1,1,1,1,r[0]]):t.reshape([1].concat(r))}else if(n===4){if(e==="channelsFirst")return r.length===1?t.reshape([1,r[0],1,1]):t.reshape([1,r[2],r[0],r[1]]);if(e==="channelsLast")return r.length===1?t.reshape([1,1,1,r[0]]):t.reshape([1].concat(r))}else if(n===3){if(e==="channelsFirst")return r.length===1?t.reshape([1,r[0],1]):t.reshape([1,r[1],r[0]]);if(e==="channelsLast")return r.length===1?t.reshape([1,1,r[0]]):t.reshape([1].concat(r))}else if(n<3)return t;throw new Y(`Unsupported input rank by biasAdd: ${t.rank}`)}function qo(n,t,e){return rt(()=>(e==null&&(e=go()),nn(e),n.add(wv(n.rank,t,e))))}function qV(n,t=1){if(t!==1)throw new Ut(`Support for alpha values other than 1 (${t}) is not implemented yet.`);return _c(n)}function HV(n){return rt(()=>Bt(n,bn(n).add(1)))}function UC(n,t,e,r){return rt(()=>oC(n,t,e,r))}function jV(n){return rt(()=>{let t=Tt(.5,nt(.2,n));return ar(t,0,1)})}function Fp(n,t,e=!1){return e?n():t()}let KV=["fanIn","fanOut","fanAvg"],XV=["normal","uniform","truncatedNormal"],Oot=["Zeros","Ones","Constant","RandomNormal","RandomUniform","TruncatedNormal","VarianceScaling","Orthogonal","Identity"];function YV(n){zc(KV,"FanMode",n)}function JV(n){zc(XV,"Distribution",n)}class Qr extends Qi{fromConfigUsesCustomObjects(){return!1}getConfig(){return{}}}class vv extends Qr{apply(t,e){return xe(t,e)}}vv.className="Zeros",vt(vv);class jd extends Qr{apply(t,e){return ho(t,e)}}jd.className="Ones",vt(jd);class Tv extends Qr{constructor(t){super();if(typeof t!="object")throw new Y(`Expected argument of type ConstantConfig but got ${t}`);if(t.value===void 0)throw new Y(`config must have value set but got ${t}`);this.value=t.value}apply(t,e){return rt(()=>nt(Et(this.value),ho(t,e)))}getConfig(){return{value:this.value}}}Tv.className="Constant",vt(Tv);class kv extends Qr{constructor(t){super();this.DEFAULT_MINVAL=-.05,this.DEFAULT_MAXVAL=.05,this.minval=t.minval||this.DEFAULT_MINVAL,this.maxval=t.maxval||this.DEFAULT_MAXVAL,this.seed=t.seed}apply(t,e){return aa(t,this.minval,this.maxval,e)}getConfig(){return{minval:this.minval,maxval:this.maxval,seed:this.seed}}}kv.className="RandomUniform",vt(kv);class Nv extends Qr{constructor(t){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=t.mean||this.DEFAULT_MEAN,this.stddev=t.stddev||this.DEFAULT_STDDEV,this.seed=t.seed}apply(t,e){if(e=e||"float32",e!=="float32"&&e!=="int32")throw new Ut(`randomNormal does not support dType ${e}.`);return Hd(t,this.mean,this.stddev,e,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}Nv.className="RandomNormal",vt(Nv);class _v extends Qr{constructor(t){super();this.DEFAULT_MEAN=0,this.DEFAULT_STDDEV=.05,this.mean=t.mean||this.DEFAULT_MEAN,this.stddev=t.stddev||this.DEFAULT_STDDEV,this.seed=t.seed}apply(t,e){if(e=e||"float32",e!=="float32"&&e!=="int32")throw new Ut(`truncatedNormal does not support dType ${e}.`);return yp(t,this.mean,this.stddev,e,this.seed)}getConfig(){return{mean:this.mean,stddev:this.stddev,seed:this.seed}}}_v.className="TruncatedNormal",vt(_v);class Cv extends Qr{constructor(t){super();this.gain=t.gain!=null?t.gain:1}apply(t,e){return rt(()=>{if(t.length!==2||t[0]!==t[1])throw new Y("Identity matrix initializer can only be used for 2D square matrices.");return nt(this.gain,hd(t[0]))})}getConfig(){return{gain:this.gain}}}Cv.className="Identity",vt(Cv);function ZV(n,t="channelsLast"){let e,r;if(nn(t),n.length===2)e=n[0],r=n[1];else if([3,4,5].indexOf(n.length)!==-1){if(t==="channelsFirst"){let o=fi(n,2);e=n[1]*o,r=n[0]*o}else if(t==="channelsLast"){let o=fi(n,0,n.length-2);e=n[n.length-2]*o,r=n[n.length-1]*o}}else{let o=fi(n);e=Math.sqrt(o),r=Math.sqrt(o)}return[e,r]}class hr extends Qr{constructor(t){super();if(t.scale<0)throw new Y(`scale must be a positive float. Got: ${t.scale}`);this.scale=t.scale==null?1:t.scale,this.mode=t.mode==null?"fanIn":t.mode,YV(this.mode),this.distribution=t.distribution==null?"normal":t.distribution,JV(this.distribution),this.seed=t.seed}apply(t,e){let r=ZV(t),o=r[0],s=r[1],c=this.scale;if(this.mode==="fanIn"?c/=Math.max(1,o):this.mode==="fanOut"?c/=Math.max(1,s):c/=Math.max(1,(o+s)/2),this.distribution==="normal"){let l=Math.sqrt(c);if(e=e||"float32",e!=="float32"&&e!=="int32")throw new Ut(`${this.getClassName()} does not support dType ${e}.`);return yp(t,0,l,e,this.seed)}else{let l=Math.sqrt(3*c);return aa(t,-l,l,e)}}getConfig(){return{scale:this.scale,mode:this.mode,distribution:this.distribution,seed:this.seed}}}hr.className="VarianceScaling",vt(hr);class Kd extends hr{constructor(t){super({scale:1,mode:"fanAvg",distribution:"uniform",seed:t==null?null:t.seed})}getClassName(){return hr.className}}Kd.className="GlorotUniform",vt(Kd);class Xd extends hr{constructor(t){super({scale:1,mode:"fanAvg",distribution:"normal",seed:t==null?null:t.seed})}getClassName(){return hr.className}}Xd.className="GlorotNormal",vt(Xd);class Yd extends hr{constructor(t){super({scale:2,mode:"fanIn",distribution:"normal",seed:t==null?null:t.seed})}getClassName(){return hr.className}}Yd.className="HeNormal",vt(Yd);class Jd extends hr{constructor(t){super({scale:2,mode:"fanIn",distribution:"uniform",seed:t==null?null:t.seed})}getClassName(){return hr.className}}Jd.className="HeUniform",vt(Jd);class Zd extends hr{constructor(t){super({scale:1,mode:"fanIn",distribution:"normal",seed:t==null?null:t.seed})}getClassName(){return hr.className}}Zd.className="LeCunNormal",vt(Zd);class Qd extends hr{constructor(t){super({scale:1,mode:"fanIn",distribution:"uniform",seed:t==null?null:t.seed})}getClassName(){return hr.className}}Qd.className="LeCunNormal",vt(Qd);class Sv extends Qr{constructor(t){super();if(this.DEFAULT_GAIN=1,this.gain=t.gain==null?this.DEFAULT_GAIN:t.gain,this.seed=t.seed,this.seed!=null)throw new Ut("Random seed is not implemented for Orthogonal Initializer yet.")}apply(t,e){return rt(()=>{if(t.length<2)throw new Ut("Shape must be at least 2D.");t[0]*t[1]>2e3&&console.warn(`Orthogonal initializer is being called on a matrix with more than 2000 (${t[0]*t[1]}) elements: Slowness may result.`);let r=t[0]>t[1]?[t[1],t[0]]:t,o=Hd(r,0,1,"float32"),s=mC.gramSchmidt(o);return t[0]>t[1]&&(s=s.transpose()),nt(this.gain,s)})}getConfig(){return{gain:this.gain,seed:this.seed}}}Sv.className="Orthogonal",vt(Sv);let qC={constant:"Constant",glorotNormal:"GlorotNormal",glorotUniform:"GlorotUniform",heNormal:"HeNormal",heUniform:"HeUniform",identity:"Identity",leCunNormal:"LeCunNormal",leCunUniform:"LeCunUniform",ones:"Ones",orthogonal:"Orthogonal",randomNormal:"RandomNormal",randomUniform:"RandomUniform",truncatedNormal:"TruncatedNormal",varianceScaling:"VarianceScaling",zeros:"Zeros"};function HC(n,t={}){return Sp(n,Dr.getMap().classNameMap,t,"initializer")}function rn(n){return lv(n)}function je(n){if(typeof n=="string"){let t=n in qC?qC[n]:n;if(t==="GlorotNormal")return new Xd;if(t==="GlorotUniform")return new Kd;if(t==="HeNormal")return new Yd;if(t==="HeUniform")return new Jd;if(t==="LeCunNormal")return new Zd;if(t==="LeCunUniform")return new Qd;{let e={};return e.className=t,e.config={},HC(e)}}else return n instanceof Qr?n:HC(n)}function QV(){return new vv}function tG(){return new jd}function eG(n){return new Tv(n)}function nG(n){return new kv(n)}function rG(n){return new Nv(n)}function oG(n){return new _v(n)}function sG(n){return new Cv(n)}function iG(n){return new hr(n)}function aG(n){return new Kd(n)}function cG(n){return new Xd(n)}function lG(n){return new Yd(n)}function uG(n){return new Jd(n)}function pG(n){return new Zd(n)}function hG(n){return new Qd(n)}function fG(n){return new Sv(n)}var dG=Object.freeze({__proto__:null,zeros:QV,ones:tG,constant:eG,randomUniform:nG,randomNormal:rG,truncatedNormal:oG,identity:sG,varianceScaling:iG,glorotUniform:aG,glorotNormal:cG,heNormal:lG,heUniform:uG,leCunNormal:pG,leCunUniform:hG,orthogonal:fG});let mG=0;function jC(){return mG++}let tm={};function em(n=""){return n in tm||(tm[n]=0),tm[n]+=1,n+tm[n].toString()}function $v(n){return Array.isArray(n)&&Array.isArray(n[0])}function nm(n){return n.length===0?[]:Array.isArray(n[0])?n:[n]}function Qt(n){let t;if(Array.isArray(n)){if(n.length!==1)throw new Y(`Expected Tensor length to be 1; got ${n.length}`);t=n[0]}else t=n;return t}function Re(n){if(Array.isArray(n)&&Array.isArray(n[0])){if(n.length===1)return n=n,n[0];throw new Y(`Expected exactly 1 Shape; got ${n.length}`)}else return n}function rm(n){let t=0;for(let e of n)e.shape.length===0?t+=1:t+=e.shape.reduce((r,o)=>r*o);return t}let KC="Variable";class bo{constructor(t,e="float32",r=KC,o=!0,s=null){this.dtype=e==null?"float32":e,this.shape=t.shape,this.id=jC(),r=r==null?KC:r,this.originalName=MC(r),this.name=BC(this.originalName),this.trainable_=o,this.constraint=s,this.val=B_(t,this.trainable_,this.name,this.dtype)}read(){return this.assertNotDisposed(),this.val}write(t){return this.assertNotDisposed(),gG(this.val,t),this.val.id!==t.id&&(this.val.assign(t),this.constraint!=null&&this.val.assign(this.constraint.apply(this.val))),this}dispose(){this.assertNotDisposed(),this.val.dispose()}assertNotDisposed(){if(this.val.isDisposed)throw new Error(`LayersVariable ${this.name} is already disposed.`)}get trainable(){return this.trainable_}set trainable(t){this.trainable_=t,this.val.trainable=t}}function gG(n,t){if(n.shape.toString()!==t.shape.toString())throw new Error("Shape mismatch: "+JSON.stringify(n.shape)+" vs. "+JSON.stringify(t.shape))}function Lot(n,t,e,r){return new bo(n,t,e,!0,r)}function Mot(n,t,e){return new bo(xe(n),t,e)}function Bot(n,t,e){return new bo(re(n),t,e)}function zot(n,t,e){let r=ho(n);return new bo(r,t,e)}function Wot(n,t,e){let r=qn(n);return new bo(r,t,e)}function Vot(n,t,e){return new bo(hd(n),t,e)}function Got(n,t,e,r,o,s="randomUniform"){return new bo(aa(n,t,e,r),r,s)}function Uot(n,t=0,e=1,r,o,s="truncatedNormal"){if(r=r||"float32",r!=="float32"&&r!=="int32")throw new Ut(`randomNormal does not support dType ${r}.`);return new bo(yp(n,t,e,r,o),r,s)}function qot(n,t=0,e=1,r,o,s="randomNormal"){if(r=r||"float32",r!=="float32"&&r!=="int32")throw new Ut(`randomNormalVariable does not support dType ${r}.`);return new bo(Ew(n,t,e,r,o),r,s)}function Hot(n,t){return n.write(t)}function jot(n,t){return n.write(Tt(n.read(),t))}function Kot(n,t){return n.write(Dt(n.read(),t))}function Iv(n){return n.map(t=>t.read())}function Ev(n){n.forEach(t=>{let e=t[0];e.write(t[1])})}function Xot(n,t){let e=t.map(o=>o.read()),r=Nw(n,e);return t.map(o=>r.grads[o.name])}class In{constructor(t){this.dtype=t.dtype,this.shape=t.shape,t.shape!=null?this.ndim=t.shape.length:this.ndim=t.ndim,this.maxNDim=t.maxNDim,this.minNDim=t.minNDim,this.axes=t.axes||{}}}class xo{constructor(t,e,r,o,s,c,l){this.dtype=t,this.shape=e,this.sourceLayer=r,this.inputs=o,this.callArgs=s,this.outputTensorIndex=l,this.id=jC(),c!=null&&(this.originalName=MC(c),this.name=BC(this.originalName)),this.rank=e.length}}let yG=0;class om{constructor(t,e){this.callArgs=e,this.id=yG++,this.outboundLayer=t.outboundLayer,this.inboundLayers=t.inboundLayers,this.nodeIndices=t.nodeIndices,this.tensorIndices=t.tensorIndices,this.inputTensors=t.inputTensors,this.outputTensors=t.outputTensors,this.inputMasks=t.inputMasks,this.outputMasks=t.outputMasks,this.inputShapes=t.inputShapes,this.outputShapes=t.outputShapes;for(let r of t.inboundLayers)r!=null&&r.outboundNodes.push(this);t.outboundLayer.inboundNodes.push(this)}getConfig(){let t=[];for(let e of this.inboundLayers)e!=null?t.push(e.name):t.push(null);return{outboundLayer:this.outboundLayer?this.outboundLayer.name:null,inboundLayers:t,nodeIndices:this.nodeIndices,tensorIndices:this.tensorIndices}}}let bG=0;class ye extends Qi{constructor(t={}){super();this._callHook=null,this._addedWeightNames=[],this._stateful=!1,this.id=bG++,this.activityRegularizer=null,this.inputSpec=null,this.supportsMasking=!1,this._trainableWeights=[],this._nonTrainableWeights=[],this._losses=[],this._updates=[],this._built=!1,this.inboundNodes=[],this.outboundNodes=[];let e=t.name;if(!e){let r=this.getClassName();e=bs(r)+"_"+em(r)}if(this.name=e,this.trainable_=t.trainable==null?!0:t.trainable,t.inputShape!=null||t.batchInputShape!=null){let r;if(t.batchInputShape!=null)r=t.batchInputShape;else if(t.inputShape!=null){let s=null;t.batchSize!=null&&(s=t.batchSize),r=[s].concat(t.inputShape)}this.batchInputShape=r;let o=t.dtype;o==null&&(o=t.inputDType),o==null&&(o="float32"),this.dtype=o}t.weights!=null?this.initialWeights=t.weights:this.initialWeights=null,this._refCount=null,this.fastWeightInitDuringBuild=!1}static nodeKey(t,e){return t.name+"_ib-"+e.toString()}getNodeAtIndex(t,e){if(this.inboundNodes.length===0)throw new Jr(`The layer has never been called and thus has no defined ${e}.`);if(this.inboundNodes.length<=t)throw new Y(`Asked to get ${e} at node ${t}, but the layer has only ${this.inboundNodes.length} inbound nodes.`);return this.inboundNodes[t]}getInputAt(t){return pr(this.getNodeAtIndex(t,"input").inputTensors)}getOutputAt(t){return pr(this.getNodeAtIndex(t,"output").outputTensors)}get input(){if(this.inboundNodes.length>1)throw new Go(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer input" is ill-defined. Use \`getInputAt(nodeIndex)\` instead.`);if(this.inboundNodes.length===0)throw new Go(`Layer ${this.name} is not connected, no input to return.`);return pr(this.getNodeAtIndex(0,"input").inputTensors)}get output(){if(this.inboundNodes.length===0)throw new Go(`Layer ${this.name} has no inbound nodes.`);if(this.inboundNodes.length>1)throw new Go(`Layer ${this.name} has multiple inbound nodes, hence the notion of "layer output" is ill-defined. Use \`getOutputAt(nodeIndex)\` instead.`);return pr(this.getNodeAtIndex(0,"output").outputTensors)}get losses(){return this._losses}calculateLosses(){return this.losses.map(t=>t())}get updates(){return this._updates}get built(){return this._built}set built(t){this._built=t}get trainable(){return this.trainable_}set trainable(t){this._trainableWeights.forEach(e=>e.trainable=t),this.trainable_=t}get trainableWeights(){return this.trainable_?this._trainableWeights.filter(t=>t.trainable):[]}set trainableWeights(t){this._trainableWeights=t}get nonTrainableWeights(){return this.trainable?this._trainableWeights.filter(t=>!t.trainable).concat(this._nonTrainableWeights):this._trainableWeights.concat(this._nonTrainableWeights)}set nonTrainableWeights(t){this._nonTrainableWeights=t}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}get stateful(){return this._stateful}resetStates(){if(!this.stateful)throw new Error("Cannot call the resetStates() method of a non-stateful Layer object.")}assertInputCompatibility(t){if(t=ze(t),this.inputSpec==null||this.inputSpec.length===0)return;let e=ze(this.inputSpec);if(t.length!==e.length)throw new Y(`Layer ${this.name} expects ${e.length} inputs, but it received ${t.length} input tensors. Input received: ${t}`);for(let r=0;rs.maxNDim)throw new Y(`Input ${r} is incompatible with layer ${this.name}: expected max_ndim=${s.maxNDim}, found ndim=${c}`);if(s.minNDim!=null&&c=0?l[f]:l[l.length+f];if(m!=null&&[m,null].indexOf(y)===-1)throw new Y(`Input ${r} is incompatible with layer ${this.name}: expected axis ${f} of input shape to have value ${m} but got shape ${l}.`)}}if(s.shape!=null)for(let l=0;l{if(!this.built){this.assertInputCompatibility(t);let c=[];for(let l of ze(t))c.push(l.shape);this.build(pr(c)),this.built=!0,this.initialWeights&&this.setWeights(this.initialWeights),this._refCount===null&&s&&(this._refCount=1)}if(this.assertInputCompatibility(t),s){let c=this.call(t,e),l=ze(c),p=[];for(let f of l)r.indexOf(f)!==-1&&(f=f.clone()),p.push(f);if(c=pr(p),this.activityRegularizer!=null)throw new Ut("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return c}else{let c=xG(t),l=this.computeOutputShape(c),p,f=wG(t);if(this.warnOnIncompatibleInputShape(Array.isArray(t)?c[0]:c),l!=null&&l.length>0&&Array.isArray(l[0])?p=l.map((m,y)=>new xo(f,m,this,ze(t),e,this.name,y)):p=new xo(f,l,this,ze(t),e,this.name),this.addInboundNode(t,p,null,null,c,l,e),this._refCount++,this.activityRegularizer!=null)throw new Ut("Layer invocation in the presence of activity regularizer(s) is not supported yet.");return p}})}warnOnIncompatibleInputShape(t){if(this.batchInputShape==null)return;if(t.length!==this.batchInputShape.length)console.warn(`The rank of the input tensor provided (shape: ${JSON.stringify(t)}) does not match that of the batchInputShape (${JSON.stringify(this.batchInputShape)}) of the layer ${this.name}`);else{let e=!1;this.batchInputShape.forEach((r,o)=>{r!=null&&t[o]!=null&&t[o]!==r&&(e=!0)}),e&&console.warn(`The shape of the input tensor (${JSON.stringify(t)}) does not match the expectation of layer ${this.name}: ${JSON.stringify(this.batchInputShape)}`)}}get outputShape(){if(this.inboundNodes==null||this.inboundNodes.length===0)throw new Go(`The layer ${this.name} has never been called and thus has no defined output shape.`);let t=[];for(let e of this.inboundNodes){let r=JSON.stringify(e.outputShapes);t.indexOf(r)===-1&&t.push(r)}if(t.length===1){let e=this.inboundNodes[0].outputShapes;return Array.isArray(e)&&Array.isArray(e[0])&&e.length===1?e[0]:e}else throw new Go(`The layer ${this.name} has multiple inbound nodes with different output shapes. Hence the notion of "output shape" is ill-defined for the layer.`)}countParams(){if(!this.built)throw new Jr(`You tried to call countParams() on ${this.name}, but the layer is not built yet. Build it first by calling build(batchInputShape).`);return rm(this.weights)}build(t){this.built=!0}getWeights(t=!1){return Iv(t?this.trainableWeights:this.weights)}setWeights(t){rt(()=>{let e=this.weights;if(e.length!==t.length)throw new Y(`You called setWeights(weights) on layer "${this.name}" with a weight list of length ${t.length}, but the layer was expecting ${e.length} weights. Provided weights: ${t}...`);if(e.length===0)return;let r=[],o=Iv(e);for(let s=0;ss.apply(f.read())),c==null&&(c=!0),c?this._trainableWeights.push(f):this._nonTrainableWeights.push(f),f}setFastWeightInitDuringBuild(t){this.fastWeightInitDuringBuild=t}addLoss(t){if(t==null||Array.isArray(t)&&t.length===0)return;t=ze(t),this._losses!==void 0&&this._losses!==null&&this.losses.push(...t)}computeOutputShape(t){return t}computeMask(t,e){if(!this.supportsMasking){if(e!=null)if(Array.isArray(e))e.forEach(r=>{if(r!=null)throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`)});else throw new TypeError(`Layer ${this.name} does not support masking, but was passed an inputMask.`);return null}return e}addInboundNode(t,e,r,o,s,c,l=null){let p=ze(t);e=ze(e),r=ze(r),o=ze(o),s=nm(s),c=nm(c);let f=[],m=[],y=[];for(let b of p)f.push(b.sourceLayer),m.push(b.nodeIndex),y.push(b.tensorIndex);new om({outboundLayer:this,inboundLayers:f,nodeIndices:m,tensorIndices:y,inputTensors:p,outputTensors:e,inputMasks:r,outputMasks:o,inputShapes:s,outputShapes:c},l);for(let b=0;bt.dispose()),this.weights.length}assertNotDisposed(){if(this._refCount===0)throw new Error(`Layer '${this.name}' is already disposed.`)}dispose(){if(!this.built)throw new Error(`Cannot dispose Layer ${this.name} because it has not been built yet.`);if(this._refCount===null)throw new Error(`Cannot dispose Layer ${this.name} because it has not been used yet.`);this.assertNotDisposed();let t=0;return--this._refCount===0&&(t=this.disposeWeights()),{refCountAfterDispose:this._refCount,numDisposedVariables:t}}}function xG(n){n=ze(n);let t=[];for(let e of n)t.push(e.shape);return pr(t)}function wG(n){return"float32"}function XC(n,t,e){if((t==null||e!=null&&e>0)&&(t=n.sourceLayer,e=n.nodeIndex),t.inboundNodes.length===0)return[n];{let r=t.inboundNodes[e];if(r.inboundLayers.length===0)return r.inputTensors;{let o=[];for(let s=0;s0){let o=await Promise.all(t);for(let s=0;sTt(this.totals[o],nt(s,r)));this.totals[o]=l,c!=null&&c.dispose()}}}async onEpochEnd(t,e){if(e!=null)for(let r of this.params.metrics){if(this.totals[r]==null)continue;typeof this.totals[r]=="number"?e[r]=this.totals[r]/this.seen:rt(()=>{let o=nt(Bt(1,this.seen),this.totals[r]);e[r]=o,this.totals[r].dispose(),Sn(e[r])})}}}class tS extends Uc{async onTrainBegin(t){this.epoch=[],this.history={}}async onEpochEnd(t,e){e==null&&(e={}),this.epoch.push(t);for(let r in e)this.history[r]==null&&(this.history[r]=[]),this.history[r].push(e[r])}async syncData(){let t=[],e=[],r=[];for(let s in this.history){let c=this.history[s];for(let l=0;lnew eS(r,t))}class Lr{constructor(){}static registerCallbackConstructor(t,e){_(t>=0&&Number.isInteger(t),()=>`Verbosity level is expected to be an integer >= 0, but got ${t}`),Lr.checkForDuplicate(e),Lr.constructors[t]==null&&(Lr.constructors[t]=[]),Lr.constructors[t].push(e)}static checkForDuplicate(t){for(let e in Lr.constructors){let r=Lr.constructors[+e];r.forEach(o=>{if(o===t)throw new Y("Duplicate callback constructor.")})}}static clear(){Lr.constructors={}}static createCallbacks(t){let e=[];for(let r in Lr.constructors){let o=+r;t>=o&&e.push(...Lr.constructors[o])}return e.map(r=>new r)}}Lr.constructors={};function rS(n,t,e,r,o,s,c,l,p){let f=new tS,m=[new TG,...Lr.createCallbacks(t)];n!=null&&m.push(...n),m.push(f);let y=new QC(m);return y.setParams({epochs:e,initialEpoch:r,samples:o,steps:s,batchSize:c,verbose:t,doValidation:l,metrics:p}),{callbackList:y,history:f}}function wo(n,t={},e=!1){return Sp(n,Dr.getMap().classNameMap,t,"layer",e)}function sm(n,t){return rt(()=>{n.dtype!=="float32"&&(n=n.asType("float32"));let e=zt(Ap(n),t,!0),r=Cc(e.shape,wn()),o=Pn(Xr(e,r));return Bt(n,o)})}function xs(n,t){return rt(()=>en(Ap(Dt(t,n)),-1))}function qc(n,t){return rt(()=>en(bn(Dt(t,n)),-1))}function gi(n,t){return rt(()=>{let e=Dt(n,t),r=ar(bn(n),wn(),Number.MAX_VALUE),o=bn(Bt(e,r));return nt(100,en(o,-1))})}function Dv(n,t){return rt(()=>{let e=ar(t,wn(),Number.MAX_VALUE),r=wr(Tt(1,e)),o=ar(n,wn(),Number.MAX_VALUE),s=wr(Tt(1,o));return en(Ap(Dt(r,s)),-1)})}function kG(n,t){return rt(()=>{let e=Xr(0,Dt(1,nt(n,t)));return en(Ap(e),-1)})}function NG(n,t){return rt(()=>{let e=Xr(0,Dt(1,nt(n,t)));return en(e,-1)})}function _G(n,t){return rt(()=>{let e=zt(nt(n,t),-1),r=lr(nt(Dt(1,n),t),-1);return Xr(0,Tt(1,Dt(r,e)))})}function CG(n,t){return rt(()=>{let e=Math.log(2),r=Dt(t,n),o=Dt(Tt(r,Ic(nt(-2,r))),e);return en(o,-1)})}function Rp(n,t,e=!1){return rt(()=>{if(e)t=ca(t);else{let r=zt(t,t.shape.length-1,!0);t=Bt(t,r)}return t=ar(t,wn(),1-wn()),tn(zt(nt(n.toFloat(),wr(t)),t.shape.length-1))})}function im(n,t,e=!1){return rt(()=>{let r=Sc(GV(n)).toInt();t=ar(t,wn(),1-wn());let o=t.shape,s=Zi(r,o[o.length-1]).reshape(o);return Rp(s,t,e)})}function SG(n,t){if(!lt(n.shape,t.shape))throw new Y(`logits and labels must have the same shape, but got shapes ${JSON.stringify(n.shape)} and ${JSON.stringify(t.shape)}`);return rt(()=>{let e=t.relu(),r=t.abs().neg();return e.sub(t.mul(n)).add(r.exp().log1p())})}function am(n,t){return rt(()=>{let e;return e=ar(t,wn(),1-wn()),e=wr(Bt(e,Dt(1,e))),en(SG(n,e),-1)})}function Av(n,t){return rt(()=>{let e=ar(n,wn(),1),r=ar(t,wn(),1);return zt(nt(n,wr(Bt(e,r))),-1)})}function $G(n,t){return rt(()=>{let e=wr(Tt(wn(),t));return en(Dt(t,nt(n,e)),-1)})}function cm(n,t){return rt(()=>{let e=sm(n,-1),r=sm(t,-1),o=nt(e,r);return tn(zt(o,-1))})}let Yot=xs,Jot=xs,Zot=qc,Qot=qc,tst=gi,est=gi,nst=Dv,rst=Dv,ost=Av,sst=Av,ist=cm,lm={meanSquaredError:xs,meanAbsoluteError:qc,meanAbsolutePercentageError:gi,meanSquaredLogarithmicError:Dv,squaredHinge:kG,hinge:NG,categoricalHinge:_G,logcosh:CG,categoricalCrossentropy:Rp,sparseCategoricalCrossentropy:im,binaryCrossentropy:am,kullbackLeiblerDivergence:Av,poisson:$G,cosineProximity:cm};function Fv(n){if(typeof n=="string"){if(n in lm)return lm[n];let t=`Unknown loss ${n}`;throw n.toLowerCase().includes("softmaxcrossentropy")&&(t=`Unknown loss ${n}. Use "categoricalCrossentropy" as the string name for tf.losses.softmaxCrossEntropy`),new Y(t)}else return n}function Rv(n,t){return rt(()=>{let e=nt(.5,qn(t)),r=Ep(Fr(t,e),n.dtype);return en(po(n,r),-1)})}function Pv(n,t){return rt(()=>Ep(po(Qu(n,-1),Qu(t,-1)),"float32"))}function oS(n,t){return rt(()=>Yr(n.equal(1),t.equal(1)).sum().cast("float32"))}function IG(n,t){return rt(()=>Yr(n.equal(1),t.equal(0)).sum().cast("float32"))}function EG(n,t){return rt(()=>Yr(n.equal(0),t.equal(1)).sum().cast("float32"))}function sS(n,t){return rt(()=>{let e=oS(n,t),r=EG(n,t),o=e.add(r);return Xn(Fr(o,0),e.div(o),0).cast("float32")})}function DG(n,t){return rt(()=>{let e=oS(n,t),r=IG(n,t),o=e.add(r);return Xn(Fr(o,0),e.div(o),0).cast("float32")})}function iS(n,t){return am(n,t)}function aS(n,t){return n.rank===t.rank&&(n=n.squeeze([n.rank-1])),t=t.argMax(-1),t.dtype!==n.dtype&&(t=t.asType(n.dtype)),po(n,t).asType("float32")}function ast(n,t){throw new Ut}function cst(n,t){throw new Ut}let AG=xs,FG=xs,RG=qc,PG=qc,OG=gi,LG=gi,Ov=Rp,MG=cm,cS=im,um={binaryAccuracy:Rv,categoricalAccuracy:Pv,precision:sS,categoricalCrossentropy:Ov,sparseCategoricalCrossentropy:cS,mse:AG,MSE:FG,mae:RG,MAE:PG,mape:OG,MAPE:LG,cosine:MG};function BG(n){if(typeof n=="string"&&n in um)return um[n];if(typeof n!="string"&&n!=null)return n;throw new Y(`Unknown metric ${n}`)}function pm(n){if(Pr(n!==null,`Unknown LossOrMetricFn ${n}`),typeof n=="string")return n;{let t;for(let e of Object.keys(lm))if(lm[e]===n){t=e;break}if(t!==void 0)return t;for(let e of Object.keys(um))if(um[e]===n){t=e;break}return t!==void 0?t:n.name}}function zG(n){let t={Adagrad:()=>ua.adagrad(.01),Adadelta:()=>ua.adadelta(1,.95,wn()),Adam:()=>ua.adam(.001,.9,.999,wn()),Adamax:()=>ua.adamax(.002,.9,.999,wn(),0),RMSProp:()=>ua.rmsprop(.001,.9,0,wn()),SGD:()=>ua.sgd(.01)};if(t.adagrad=t.Adagrad,t.adadelta=t.Adadelta,t.adam=t.Adam,t.adamax=t.Adamax,t.rmsprop=t.RMSProp,t.sgd=t.SGD,n in t)return t[n]();throw new Y(`Unknown Optimizer ${n}`)}let lS=1*1024*1024;function uS(n,t,e=!1){if(n==null||typeof n!="object"||Object.getPrototypeOf(n)!==Object.prototype||!Lv(n))throw new Error("User-defined metadata is expected to be a JSON object, but is not.");if(e){let r=JSON.stringify(n);r.length>lS&&console.warn(`User-defined metadata of model "${t}" is too large in size (length=${r.length} when serialized). It is not recommended to store such large objects in user-defined metadata. Please make sure its serialized length is <= ${lS}.`)}}function Lv(n){if(n===null)return!0;if(typeof n=="object")if(Object.getPrototypeOf(n)===Object.prototype){let t=Object.keys(n);for(let e of t){if(typeof e!="string")return!1;if(!Lv(n[e]))return!1}return!0}else if(Array.isArray(n)){for(let t of n)if(!Lv(t))return!1;return!0}else return!1;else{let t=typeof n;return t==="string"||t==="number"||t==="boolean"}}function WG(n,t,e,r=console.log){let o=GG(n),s=["Layer (type)","Output shape","Param #"];o?(t=t||65,e=e||[.45,.85,1]):(t=t||98,e=e||[.33,.55,.67,1]),e[e.length-1]<=1&&(e=e.map(m=>Math.floor(t*m)));let c;if(!o){s.push("Receives inputs"),c=[];for(let m in n.nodesByDepth)c.push(...n.nodesByDepth[m])}r("_".repeat(t)),hm(s,e,r),r("=".repeat(t));let l=n.layers;for(let m=0;m1||o.length===1&&o[0].inboundLayers.length>1){t=!1;break}r.push(...o)}if(t)for(let o of n.layers){let s=!1;for(let c of o.inboundNodes)if(r.indexOf(c)!==-1)if(s){t=!1;break}else s=!0;if(!t)break}return t}function hm(n,t,e=console.log){let r="";for(let o=0;o0&&(r=r.slice(0,r.length-1)+" "),r+=n[o],r=r.slice(0,t[o]),r+=" ".repeat(t[o]-r.length);e(r)}function UG(n,t,e){let r;try{r=JSON.stringify(n.outputShape)}catch(l){r="multiple"}let o=n.name,s=n.getClassName(),c=[`${o} (${s})`,r,n.countParams().toString()];hm(c,t,e)}function qG(n,t,e,r){let o;try{o=JSON.stringify(n.outputShape)}catch(m){o="multiple"}let s=[];for(let m of n.inboundNodes){if(e!=null&&e.length>0&&e.indexOf(m)===-1)continue;for(let y=0;yT.name),p=[],f=t.names();for(let T of l)f.indexOf(T)!==-1?p.push(t.getValue(T)):p.push(null);r!=null&&(r.maxNumTensors=-Infinity,r.minNumTensors=Infinity);let m=l.join(",")+"|"+t.names().join(","),y,b;if(Bv[m]==null){let T=jG(c,t);y=T.sorted,b=T.recipientCounts,Bv[m]=y,hS[m]=b}y=Bv[m],b={},o||Object.assign(b,hS[m]);let v=new yi(t);for(let T=0;Tr.maxNumTensors&&(r.maxNumTensors=Z),Z0,()=>"Expected at least one fetch, got none");let e=[],r={};if(n.length===1){let o=fS(n[0],t);e=o.sorted,r=o.recipientMap}else{let o=new Set;for(let s of n){let{sorted:c,recipientMap:l}=fS(s,t);for(let p of c)o.has(p.name)||(e.push(p),o.add(p.name));for(let p in l)r[p]==null&&(r[p]=new Set),l[p].forEach(f=>r[p].add(f))}}return{sorted:e,recipientCounts:KG(r)}}function KG(n){let t={};for(let e in n)t[e]=n[e].size;return t}function fS(n,t){let e=new Set,r=[],o={};for(let l of t.names())e.add(l);let s=[],c=[];for(s.push(n);s.length>0;){let l=s[s.length-1];if(e.has(l.name)){s.pop();continue}let p=c[c.length-1]===s.length-1;if(l.inputs.length===0||p)s.pop(),r.push(l),e.add(l.name),p&&c.pop();else{c.push(s.length-1);for(let f of l.inputs){if(o[f.name]==null&&(o[f.name]=new Set),o[f.name].add(l.name),e.has(f.name))continue;s.push(f)}}}return{sorted:r,recipientMap:o}}function XG(n){let t;if(n.sourceLayer.inboundNodes.length===1)t=n.sourceLayer.output;else{let e=null;for(let r=0;rI.name)}`);hi(this.outputs).length!==this.outputs.length&&console.warn(`The list of outputs passed to the model is redundant. All outputs should only appear once. Found: ${this.outputs.map(I=>I.name)}`),this.inputLayers=[],this.inputLayersNodeIndices=[],this.inputLayersTensorIndices=[],this.outputLayers=[],this.outputLayersNodeIndices=[],this.outputLayersTensorIndices=[],this.layers=[],this.internalContainerRefs=[];for(let I of this.outputs){let P=I.sourceLayer,E=I.nodeIndex,L=I.tensorIndex;this.outputLayers.push(P),this.outputLayersNodeIndices.push(E),this.outputLayersTensorIndices.push(L)}for(let I of this.inputs){let P=I.sourceLayer,E=I.nodeIndex,L=I.tensorIndex;Pr(E===0,"input layer has >1 nodes"),Pr(L===0,"input layer has >1 tensors"),this.inputLayers.push(P),this.inputLayersNodeIndices.push(E),this.inputLayersTensorIndices.push(L)}this.inputNames=[],this.outputNames=[],this.feedInputShapes=[],this.feedInputNames=[],this.feedOutputNames=[];for(let I=0;II.shape),this.internalOutputShapes=this.outputs.map(I=>I.shape);let e={},r={},o={},s={},c={},l=[],p=(I,P,E,L,B,q)=>{(L==null||B==null||q==null)&&(L=I.sourceLayer,B=I.nodeIndex,q=I.tensorIndex);let H=L.inboundNodes[B];if(E.indexOf(H)!==-1)throw new Jr(`The tensor ${I.name} at layer "${L.name}" is part of a cycle.`);if(P.indexOf(H)!==-1)return;this.containerNodes.add(vo.nodeKey(L,B)),L.id in c||(c[L.id]=Object.keys(c).length),E.indexOf(H)===-1&&E.push(H);let Z=H.inboundLayers.length;for(let J=0;J=0;)E.splice(E.indexOf(H),1);l.push(H)},f=[],m=[];for(let I of this.outputs)p(I,f,m);let y=l.slice().reverse();for(let I of y){r[I.id]=I,I.id in e||(e[I.id]=0);let P=e[I.id],E=o[I.outboundLayer.id]==null?0:o[I.outboundLayer.id];P=Math.max(P,E),o[I.outboundLayer.id]=P,s[I.outboundLayer.id]=I.outboundLayer,e[I.id]=P;for(let L=0;LparseInt(I,10)).sort(Gd);this.layers=[];for(let I of T){let P=v[I];P.sort((E,L)=>{let B=c[E.id],q=c[L.id];return Bq?1:0});for(let E of P)E instanceof vo&&this.internalContainerRefs.push(E),this.layers.push(E)}this.layersByDepth=v,T=Object.keys(b).map(I=>parseInt(I,10)).sort(Gd);let N=this.inputs.slice(),S=[];for(let I of T)for(let P of b[I]){let E=P.outboundLayer;if(E!=null){for(let L of P.inputTensors)if(N.indexOf(L)===-1)throw new Jr(`Graph disconnected: cannot obtain value for tensor ${L} at layer "${E.name}". The following previous layers were accessed without issue: ${S}`);for(let L of P.outputTensors)N.push(L);S.push(E.name)}}this.nodesByDepth=b;let D=this.layers.map(I=>I.name);for(let I of D){let P=D.filter(E=>E===I).length;if(P!==1)throw new Jr(`The name "${I}" is used ${P} times in the model. All layer names should be unique. Layer names: `+JSON.stringify(D))}this.outboundNodes=[],this.inboundNodes=[],new om({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:this.inputs.map(I=>null),outputMasks:this.outputs.map(I=>null),inputShapes:this.inputs.map(I=>I.shape),outputShapes:this.outputs.map(I=>I.shape)}),this.built=!0,this._refCount=1}assertNotDisposed(){if(this._refCount===0)throw new Error(`Container '${this.name}' is already disposed.`)}dispose(){this.assertNotDisposed();let t={refCountAfterDispose:null,numDisposedVariables:0};if(--this._refCount===0){for(let e of this.layers)t.numDisposedVariables+=e.dispose().numDisposedVariables;for(let e of this.internalContainerRefs)t.numDisposedVariables+=e.dispose().numDisposedVariables}return t.refCountAfterDispose=this._refCount,t}get trainable(){return this.trainable_}set trainable(t){this.layers.forEach(e=>{e._trainableWeights.forEach(r=>r.trainable=t)}),this.trainable_=t}get trainableWeights(){if(this._trainableWeights.length>0)throw new Y("Container instance unexpectedly contains _trainableWeights.The trainable weights of a Container are a union of the trainable weights of its consituent Layers. Its own _trainableWeights must remain an empty Array.");if(!this.trainable)return[];let t=[];for(let e of this.layers)t=t.concat(e.trainableWeights);return t}get nonTrainableWeights(){let t=[];for(let e of this.layers)t.push(...e.nonTrainableWeights);if(!this.trainable){let e=[];for(let r of this.layers)e.push(...r.trainableWeights);return e.concat(t)}return t}get weights(){return this.trainableWeights.concat(this.nonTrainableWeights)}loadWeights(t,e=!0){let r={},o=0;for(let c of this.layers)for(let l of c.weights){if(r[l.originalName]!=null)throw new Y(`Duplicate weight name: ${l.originalName}`);r[l.originalName]=l,o++}let s=[];for(let c in t){let l=c;if(r[c]==null){let p=c.split("/"),f=p.slice(0,-2).concat([p[p.length-1]]);l=f.join("/")}if(r[l]!=null)s.push([r[l],t[c]]);else if(e)throw new Y(`Provided weight data has no target variable: ${c}`);delete r[l]}if(e){let c=[];for(let l in r)c.push(l);if(c.length>0)throw new Y(`${c.length} of ${o} weights are not set: ${c}`)}Ev(s)}updatedConfig(){let t=this.getConfig(),e={};return e.className=this.getClassName(),e.config=t,e.kerasVersion=`tfjs-layers ${fm}`,e.backend="TensorFlow.js",e}toJSON(t,e=!0){let r=Mv(this.updatedConfig());return e?JSON.stringify(r):r}call(t,e){return rt(()=>{t=ze(t);let r=new yi;for(let o=0;o{t=ze(t);let r;return e==null?r=pa(null,t.length):r=ze(e),this.runInternalGraph(t,r)[1]})}computeOutputShape(t){let e=nm(t);if(e.length!==this.inputLayers.length)throw new Y(`Invalid inputShape argument ${t}: model has ${this.inputLayers.length} tensor inputs.`);let r={};for(let l=0;lparseInt(l,10)).sort(Gd);if(o.length>1)for(let l of o){let p=this.nodesByDepth[l];for(let f of p){let m=f.outboundLayer;if(this.inputLayers.map(N=>N.id).indexOf(m.id)!==-1)continue;let y=[];for(let N=0;NparseInt(p,10)).sort(Gd);for(let p of o){let f=this.nodesByDepth[p];for(let m of f){let y=m.outboundLayer,b=m.inputTensors,v=m.outputTensors,T=new Array;for(let N of b)N.id in r&&T.push(r[N.id]);if(T.length===b.length){let N={},S,D,I,P;if(m.callArgs!=null&&(N=m.callArgs),T.length===1){let[E,L]=T[0];N.mask==null&&(N.mask=L),I=ze(y.call(E,N)),P=ze(y.computeMask(E,L)),S=[E],D=[L]}else S=T.map(E=>E[0]),D=T.map(E=>E[1]),N.mask==null&&(N.mask=D),I=ze(y.call(S,N)),P=ze(y.computeMask(S,D));if(y.activityRegularizer)throw new Ut("LayersModel invocation with concrete Tensor value(s) in the presence of activity regularizer(s) is not supported yet.");for(let E=0;E{let t=[];for(let e of this.layers)for(let r=0;r0){let N=[];for(let S=0;S0&&S.apply(pr(I),P)}function f(S){let D=S.name,I=wo(S,e.customObjects!=null?e.customObjects:{});I.setFastWeightInitDuringBuild(o),s[D]=I;let P=S.inboundNodes;P.forEach(E=>{if(!(E instanceof Array))throw new Y(`Corrupted configuration, expected array for nodeData: ${E}`);l(I,E)})}let m=e.name,y=e.layers;for(let S of y)f(S);for(;!CV(c);)for(let S of y){let D=s[S.name];if(D.name in c){let I=c[D.name];delete c[D.name];for(let P of I)p(D,P)}}let b=[],v=[],T=e.inputLayers;for(let S of T){let D=S[0],I=S[1],P=S[2];Pr(D in s);let E=s[D],L=E.inboundNodes[I].outputTensors;b.push(L[P])}let N=e.outputLayers;for(let S of N){let D=S[0],I=S[1],P=S[2];Pr(D in s);let E=s[D],L=E.inboundNodes[I].outputTensors;v.push(L[P])}return new t({inputs:b,outputs:v,name:m})}get stateful(){if(this._stateful)throw new Y("Container instance unexpectedly has _stateful = true. The statefulness of a Container is determined by the Layers it contains. Its _stateful property must remain the default false.");for(let t of this.layers)if(t.stateful)return!0;return!1}resetStates(){rt(()=>{this.layers.forEach(t=>{t.stateful&&t.resetStates()})})}}function dS(n,t,e){let r=t.length;if(n==null||Array.isArray(n)&&n.length===0)return t.map(o=>null);if(r===1)return Array.isArray(n)&&n.length===1?n:typeof n=="object"&&t[0]in n?[n[t[0]]]:[n];if(Array.isArray(n)){if(n.length!==r)throw new Error(`Provided ${e} is an array of ${n.length} element(s), but the model has ${r} outputs. Make sure a set of weights is provided for each model output.`);return n}else if(typeof n=="object"&&Object.keys(n).length>0&&typeof n[Object.keys(n)[0]]=="object"){let o=[];return t.forEach(s=>{s in n?o.push(n[s]):o.push(null)}),o}else throw new Error(`The model has multiple (${r}) outputs, so ${e} must be either an array with ${r} elements or an object with ${t} keys. Provided ${e} not understood: ${JSON.stringify(n)}`)}function mS(n,t){return dS(n,t,"classWeight")}function lst(n,t){return dS(n,t,"sampleWeight")}async function gS(n,t,e,r){if(t!=null||r!=null)throw new Error("Support sampleWeight is not implemented yet");if(e!=null){let o=rt(()=>{if(n.shape.length===1)return n.clone();if(n.shape.length===2)if(n.shape[1]>1){let l=1;return n.argMax(l)}else{if(n.shape[1]===1)return n.reshape([n.shape[0]]);throw new Error(`Encountered unexpected last-dimension size (${n.shape[1]}) during handling of class weights. The size is expected to be >= 1.`)}else throw new Error(`Unexpected rank of target (y) tensor (${n.rank}) during handling of class weights. The rank is expected to be 1 or 2.`)}),s=Array.from(await o.data());Xt(o);let c=[];return s.forEach(l=>{if(e[l]==null)throw new Error(`classWeight must contain all classes in the training data. The class ${l} exists in the data but not in classWeight`);c.push(e[l])}),vr(c,"float32")}else return null}function YG(n,t){return nt(n,t)}let JG=32;function yS(n,t){let e,r,o=t;e=o.xs,r=o.ys,_(e!=null&&r!=null,()=>`A Dataset iterator for fitDataset() is expected to generate objects of the form \`{xs: xVal, ys: yVal}\`, where the two values may be \`tf.Tensor\`, an array of Tensors, or a map of string to Tensor. The provided Dataset instead generates ${t}`);let s=bS("input",n.inputNames,e),c=bS("output",n.outputNames,r),l=s[0].shape[0];_(s.length===n.inputs.length,()=>`LayersModel has ${n.inputs.length} inputs, but the dataset provides ${s.length} inputs. (Expected input keys: ${JSON.stringify(n.inputNames)})`),_(c.length===n.outputs.length,()=>`LayersModel has ${n.outputs.length} outputs, but the dataset provides ${c.length} outputs. (Expected output keys: ${JSON.stringify(n.outputNames)})`);for(let p=0;p`Batch size mismatch: input ${n.inputNames[p]} has ${s[p].shape[0]}; expected ${l} based on input ${n.inputNames[0]}.`);for(let p=0;p`Batch size mismatch: output ${n.outputNames[p]} has ${c[p].shape[0]}; expected ${l} based on input ${n.inputNames[0]}.`);return{xs:s,ys:c}}function bS(n,t,e){if(e instanceof ot)return[e];if(Array.isArray(e))return _(e.length===t.length,()=>`Received an array of ${e.length} Tensors, but expected ${t.length} to match the ${n} keys ${t}.`),e;{let r=[];for(let o of t){if(e[o]==null)throw new Y(`The feature data generated by the dataset lacks the required ${n} key '${o}'.`);r.push(e[o])}return r}}function ZG(n){if(n.length===3)throw new Ut("Validation with sample weights is not implemented yet.");return{xs:n[0],ys:n[1]}}async function QG(n,t,e){let r=e.batchesPerEpoch!=null;if(_(n.optimizer!=null,()=>"You must compile a model before training/testing. Use LayersModel.compile(modelCompileConfig)."),_(e!=null,()=>"For fitDataset(), the 2nd argument (config) is required, but it is not provided in this call."),_(e.epochs!=null&&e.epochs>0&&Number.isInteger(e.epochs),()=>`For fitDataset(), config.epochs is expected to be a positive integer, but got ${e.epochs}`),_(!r||e.batchesPerEpoch>0&&Number.isInteger(e.batchesPerEpoch),()=>`For fitDataset(), config.batchesPerEpoch is expected to be a positive integer if specified, but got ${e.batchesPerEpoch}`),_(e.validationSplit==null,()=>"`validationSplit` is not supported by `fitDataset()`. Use validationData instead."),n.isTraining)throw new Error("Cannot start training because another fit() call is ongoing.");n.isTraining=!0;try{let o=e.validationData!=null,s,c;if(o)if(xS(e.validationData))_(e.validationBatches==null||e.validationBatches>0&&Number.isInteger(e.validationBatches),()=>`For fitDataset() with dataset-based validation, config.validationBatches is expected not to be provided, or to be a positive integer, but got ${e.validationBatches}`);else{let S=ZG(e.validationData);s=S.xs,c=S.ys}let l=n.makeTrainFunction(),p=n.getDedupedMetricsNames(),f;o?f=p.slice().concat(p.map(S=>"val_"+S)):f=p.slice();let m=nS(e.callbacks,e.yieldEvery),y=e.verbose==null?1:e.verbose,{callbackList:b,history:v}=rS(m,y,e.epochs,null,null,tU(t,e),null,o,f);b.setModel(n),n.history=v,await b.onTrainBegin(),n.stopTraining_=!1;let T=e.initialEpoch==null?0:e.initialEpoch,N=await t.iterator();for(;T=e.batchesPerEpoch:P.done){if(o){let E;xS(e.validationData)?E=ze(await n.evaluateDataset(e.validationData,{batches:e.validationBatches})):E=ze(n.evaluate(s,c,{batchSize:e.validationBatchSize==null?JG:e.validationBatchSize,verbose:0}));for(let L=0;L0)throw new Ut("Verbose mode is not implemented yet.");_(!r||e.batches>0&&Number.isInteger(e.batches),()=>`Test loop expects \`batches\` to be a positive integer, but received ${JSON.stringify(e.batches)}`);let c=eU(t)?t:await t.iterator(),l=0,p=0;for(;r?p{if(f.value){let{xs:m,ys:y}=yS(n,f.value),b=m.concat(y),v=rt(()=>o(b));if(Xt(b),p===0)for(let N=0;NTt(s[N],nt(T,S))),p>0&&Xt(D)}Xt(v),l+=T,++p}return s}),f.done){r&&console.warn(`Your dataset iterator ran out of data during evaluateDataset(). Interrupting evalution. Make sure that your dataset can generate at least \`batches\` batches (in this case, ${e.batches} batches). You may need to use the repeat() function when building your dataset.`);break}}for(let f=0;f0&&Number.isInteger(n),()=>`batchSize is required to be a positive integer, but got ${n}`)}function Lp(n,t,e){return n==null?[null]:Array.isArray(n)?n.map(r=>da(r,t,e-t)):da(n,t,e-t)}function Wv(n,t){return rt(()=>n==null?null:Array.isArray(n)?n.map(e=>Wv(e,t)):GC(n,t.dtype==="int32"?t:t.toInt()))}function Vv(n,t){let e=[],r=0,o=null;for(;r=n&&(o=n),e.push([r,o]),r=o;return e}async function rU(n,t,e,r,o,s,c,l,p,f,m,y,b,v,T){o==null&&(o=32),s==null&&(s=1),m==null&&(m=!0),b==null&&(b=0);let N=!1;if(p!=null&&f!=null&&(N=!0),T!=null&&(N=!0,v==null))throw new Y("Can only use `validationSteps` when doing step-wise training, i.e., `stepsPerEpoch` must be set.");let S=n.checkNumSamples(e,o,v,"steps_per_epoch"),D;S!=null&&(D=yo(0,S)),c==null&&(c=1);let{callbackList:I,history:P}=rS(l,c,s,b,S,v,o,N,y);I.setModel(n),n.history=P,await I.onTrainBegin(),n.stopTraining_=!1;for(let E=b;E{let J=q[H][0],it=q[H][1],pt=da(B,J,it-J);Z.batch=H,Z.size=it-J;let ht=Wv(e,pt),dt=t(ht);for(let ft=0;ft0){if(T=!0,r.validationData.length===2)c=r.validationData[0],l=r.validationData[1];else throw r.validationData.length===3?new Ut("validationData including sample weights is not supported yet."):new Y(`When passing validation data, it must contain 2 (valX, valY) or 3 (valX, valY, valSampleWeight) items; ${r.validationData} is invalid.`);let q=!0,H=await n.standardizeUserData(c,l,null,null,q,y);p=H[0],f=H[1],N=p.concat(f)}else if(r.validationSplit!=null&&r.validationSplit>0&&r.validationSplit<1){T=!0;let q=Math.floor(o[0].shape[0]*(1-r.validationSplit)),H=o[0].shape[0];p=Lp(o,q,H),o=Lp(o,0,q),f=Lp(s,q,H),s=Lp(s,0,q),N=p.concat(f)}else r.validationSteps!=null&&(T=!0);let S=o.concat(s).concat(m);n.checkTrainableWeightsConsistency();let D=n.makeTrainFunction(),I=n.getDedupedMetricsNames(),P,E;T?(n.makeTestFunction(),P=n.testFunction,E=I.slice().concat(I.map(q=>"val_"+q))):(P=null,N=[],E=I.slice());let L=nS(r.callbacks,r.yieldEvery),B=await rU(n,D,S,I,y,r.epochs,r.verbose,L,P,N,r.shuffle,E,r.initialEpoch,null,null);return B}finally{n.isTraining=!1,ma(o,t),ma(s,e),ma(p,c),ma(f,l),m!=null&&Xt(m)}}function wS(n){let t=[];n instanceof ot&&(n=[n]);for(let e=0;ee.push(o.id));else if(t!=null)for(let o in t){let s=t[o];e.push(s.id)}let r=[];if(n instanceof ot)e.indexOf(n.id)===-1&&r.push(n);else if(Array.isArray(n))n.forEach(o=>{e.indexOf(o.id)===-1&&r.push(o)});else if(n!=null)for(let o in n){let s=n[o];e.indexOf(s.id)===-1&&r.push(s)}r.forEach(o=>{o.isDisposed||o.dispose()})}function sU(n){return n instanceof ot}function Gv(n){return Array.isArray(n)}function vS(n){return!sU(n)&&!Gv(n)}function TS(n,t,e,r=!0,o=""){if(t==null||t.length===0){if(n!=null){let c=!1;if(Gv(n)&&n.length>0)c=!0;else if(vS(n)){for(let l in n)if(n.hasOwnProperty(l)){c=!0;break}}else c=!0;if(c)throw new Y(`Error when checking model ${o} expected no data, but got ${n}`)}return[]}if(n==null)return t.map(c=>null);let s;if(vS(n)){n=n,s=[];for(let c of t){if(n[c]==null)throw new Y(`No data provided for "${c}". Need data for each key in: ${t}`);s.push(n[c])}}else if(Gv(n)){if(n=n,n.length!==t.length)throw new Y(`Error when checking model ${o}: the Array of Tensors that you are passing to your model is not the size the model expected. Expected to see ${t.length} Tensor(s), but instead got the following list of Tensor(s): ${n}`);s=n}else{if(n=n,t.length>1)throw new Y(`The model ${o} expects ${t.length} Tensor(s), but only received one Tensor. Found: Tensor with shape ${n.shape}`);s=[n]}if(s=wS(s),e!=null)for(let c=0;c=0&&f!==m)throw new Y(`Error when checking ${o}: expected ${t[c]} to have shape [${e[c]}], but got array with shape [${l.shape}].`)}}return s}function iU(n,t,e){let r=hi(n.map(s=>s.shape[0]));r.sort();let o=hi(t.map(s=>s.shape[0]));if(o.sort(),r.length>1)throw new Y(`All input Tensors (x) should have the same number of samples. Got array shapes: ${JSON.stringify(n.map(s=>s.shape))}`);if(o.length>1)throw new Y(`All target Tensors (y) should have the same number of samples. Got array shapes: ${JSON.stringify(t.map(s=>s.shape))}`);if(r.length>0&&o.length>0&&!lt(r,o))throw new Y(`Input Tensors should have the same number of samples as target Tensors. Found ${r[0]} input sample(s) and ${o[0]} target sample(s).`)}function aU(n,t,e){let r=[xs,am,Rp];for(let o=0;o1)throw new Y(`The model expects ${t.length} ${o} Tensors, but only received one Tensor. Found: array with shape ${JSON.stringify(n.shape)}.`);s=[n]}if(e!=null)for(let c=0;c[]);let e;if(typeof n=="string"||typeof n=="function")e=[n];else if(Array.isArray(n)||typeof n=="object")e=n;else throw new TypeError(`Type of metrics argument not understood. Expected an string,function, Array, or Object, found: ${n}`);if(Array.isArray(e))return t.map(r=>e);{let r=[];for(let o of t){let s=e.hasOwnProperty(o)?e[o]:[];Array.isArray(s)||(s=[s]),r.push(s)}return r}}let lU="layers-model";class ws extends vo{constructor(t){super(t);this.isTraining=!1}summary(t,e,r=console.log){if(!this.built)throw new Y("This model has never been called, thus its weights have not been created yet. So no summary can be displayed. Build the model first (e.g., by calling it on some test data).");WG(this,t,e,r)}compile(t){if(t.loss==null&&(t.loss=[]),this.loss=t.loss,typeof t.optimizer=="string")this.optimizer_=zG(t.optimizer),this.isOptimizerOwned=!0;else{if(!(t.optimizer instanceof gs))throw new Y("User-defined optimizer must be an instance of tf.Optimizer.");this.optimizer_=t.optimizer,this.isOptimizerOwned=!1}let e=[];if(!Array.isArray(t.loss)&&typeof t.loss!="string"&&typeof t.loss!="function"){t.loss=t.loss;for(let c in t.loss)if(this.outputNames.indexOf(c)===-1)throw new Y(`Unknown entry in loss dictionary: "${c}". Only expected the following keys: ${this.outputNames}`);for(let c of this.outputNames)t.loss[c]==null&&console.warn(`Output "${c}" is missing from loss dictionary. We assume this was done on purpose, and we will not be expecting data to be passed to ${c} during training`),e.push(Fv(t.loss[c]))}else if(Array.isArray(t.loss)){if(t.loss.length!==this.outputs.length)throw new Y(`When passing an Array as loss, it should have one entry per model output. The model has ${this.outputs.length} output(s), but you passed loss=${t.loss}.`);let c=t.loss;e=c.map(l=>Fv(l))}else{let c=Fv(t.loss);this.outputs.forEach(l=>{e.push(c)})}this.lossFunctions=e,this.feedOutputNames=[],this.feedOutputShapes=[],this.feedLossFns=[];for(let c=0;c{for(let c=0;c1&&(this.metricsTensors.push([l,c]),this.metricsNames.push(this.outputNames[c]+"_loss"))}});let o=cU(t.metrics,this.outputNames),s=(c,l,p)=>{this.outputNames.length>1&&(l=this.outputNames[c]+"_"+l),this.metricsNames.push(l),this.metricsTensors.push([p,c])};fa("metric",()=>{for(let c=0;c{let m="",y,b,v;for(let T of f){if(typeof T=="string"&&["accuracy","acc","crossentropy","ce"].indexOf(T)!==-1){let S=this.internalOutputShapes[c];S[S.length-1]===1||this.lossFunctions[c]===am?["accuracy","acc"].indexOf(T)!==-1?b=Rv:["crossentropy","ce"].indexOf(T)!==-1&&(b=iS):this.lossFunctions[c]===im?["accuracy","acc"].indexOf(T)!==-1?b=aS:["crossentropy","ce"].indexOf(T)!==-1&&(b=cS):["accuracy","acc"].indexOf(T)!==-1?b=Pv:["crossentropy","ce"].indexOf(T)!==-1&&(b=Ov);let D;["accuracy","acc"].indexOf(T)!==-1?D="acc":["crossentropy","ce"].indexOf(T)!==-1&&(D="ce"),v=b,y=m+D}else{let S=BG(T);v=S,y=m+pm(T)}let N;fa(y,()=>{N=v}),s(c,y,N)}};p(l)}}),this.collectedTrainableWeights=this.trainableWeights}checkTrainableWeightsConsistency(){if(this.collectedTrainableWeights==null)return;this.trainableWeights.length!==this.collectedTrainableWeights.length&&console.warn("Discrepancy between trainableweights and collected trainable weights. Did you set `model.trainable` without calling `model.compile()` afterwards?")}evaluate(t,e,r={}){let o=r.batchSize==null?32:r.batchSize;zv(o);let s=!0,c=this.standardizeUserDataXY(t,e,s,o);try{let l=c[0].concat(c[1]);this.makeTestFunction();let p=this.testFunction,f=this.testLoop(p,l,o,r.verbose,r.steps);return pr(f)}finally{ma(c[0],t),ma(c[1],e)}}async evaluateDataset(t,e){return this.makeTestFunction(),nU(this,t,e)}checkNumSamples(t,e,r,o="steps"){let s;if(r!=null){if(s=null,e!=null)throw new Y(`If ${o} is set, batchSize must be null or undefined.Got batchSize = ${e}`)}else if(t!=null)Array.isArray(t)?s=t[0].shape[0]:s=t.shape[0];else throw new Y(`Either the input data should have a defined shape, or ${o} shoud be specified.`);return s}execute(t,e){if(Array.isArray(e)&&e.length===0)throw new Y("`outputs` is an empty Array, which is not allowed.");let r=Array.isArray(e),o=r?e:[e],s=this.retrieveSymbolicTensors(o),c=new yi;if(t instanceof ot&&(t=[t]),Array.isArray(t)){if(t.length!==this.inputs.length)throw new Y(`The number of inputs provided (${t.length}) does not match the number of inputs of this model (${this.inputs.length}).`);for(let p=0;pl.name);for(let l=0;l0){let o=[];throw e.forEach((s,c)=>{s==null&&o.push(t[c])}),new Y(`Cannot find SymbolicTensors for output name(s): ${JSON.stringify(o)}`)}return e}predictLoop(t,e=32,r=!1){return rt(()=>{let o=this.checkNumSamples(t);if(r)throw new Ut("Verbose predictLoop() is not implemented yet.");let s=Vv(o,e),c=this.outputs.map(l=>[]);for(let l=0;l{let f=s[l][0],m=s[l][1],y=Lp(t,f,m),b=[];if(Array.isArray(y))for(let T=0;Tc[m].push(f))}return pr(c.map(l=>Qe(l,0)))})}predict(t,e={}){let r=wS(t);kS(r,this.inputNames,this.feedInputShapes,!1);try{let o=e.batchSize==null?32:e.batchSize;return zv(o),this.predictLoop(r,o)}finally{ma(r,t)}}predictOnBatch(t){kS(t,this.inputNames,this.feedInputShapes,!0);let e=(Array.isArray(t)?t[0]:t).shape[0];return this.predictLoop(t,e)}standardizeUserDataXY(t,e,r=!0,o){if(this.optimizer_==null)throw new Jr("You must compile a model before training/testing. Use LayersModel.compile(modelCompileArgs).");let s=[];for(let c=0;c0&&t[0].shape[0]%o!==0)throw new Y(`In a stateful network, you should only pass inputs with a number of samples that is divisible by the batch size ${o}. Found: ${t[0].shape[0]} sample(s).`);return[t,e]}async standardizeUserData(t,e,r,o,s=!0,c){let[l,p]=this.standardizeUserDataXY(t,e,s,c);if(r!=null)throw new Error("sample weight is not supported yet.");let f=null;if(o!=null){let m=mS(o,this.outputNames);f=[];for(let y=0;y{let c=this.checkNumSamples(e,r,s,"steps"),l=[];if(o>0)throw new Ut("Verbose mode is not implemented yet.");if(s!=null)throw new Ut("steps mode in testLoop() is not implemented yet");{let p=Vv(c,r),f=vr(yo(0,c));for(let m=0;m1){let c=DC(t.slice(0,r),o);s+=`_${c}`}e.push(s)}return e}makeTrainFunction(){return t=>{let e=[],r=t.slice(0,this.inputs.length),o=t.slice(this.inputs.length,this.inputs.length+this.outputs.length),s=t.slice(this.inputs.length+this.outputs.length,this.inputs.length+this.outputs.length*2),c=[],l=()=>{let y=[];for(let N=0;N1&&N{T=Tt(T,N)}),T},p=this.collectedTrainableWeights.map(y=>y.read()),f=!0,m=this.optimizer_.minimize(l,f,p);return[m].concat(c)}}makeTestFunction(){this.testFunction=t=>rt(()=>{let e=[],r,o=t.slice(0,this.inputs.length),s=t.slice(this.inputs.length,this.inputs.length+this.outputs.length),c=[];for(let f=0;fbs(e))}else{let e=Object.keys(this.loss);t={};let r=this.loss;for(let o of e)if(typeof r[o]=="string")t[o]=bs(r[o]);else throw new Error("Serialization of non-string loss is not supported.")}return t}getMetricIdentifiers(){if(typeof this.metrics=="string"||typeof this.metrics=="function")return[bs(pm(this.metrics))];if(Array.isArray(this.metrics))return this.metrics.map(t=>bs(pm(t)));{let t={};for(let e in this.metrics)t[e]=bs(pm(this.metrics[e]));return t}}getTrainingConfig(){return{loss:this.getLossIdentifiers(),metrics:this.getMetricIdentifiers(),optimizer_config:{class_name:this.optimizer.getClassName(),config:this.optimizer.getConfig()}}}loadTrainingConfig(t){if(t.weighted_metrics!=null)throw new Error("Loading weight_metrics is not supported yet.");if(t.loss_weights!=null)throw new Error("Loading loss_weights is not supported yet.");if(t.sample_weight_mode!=null)throw new Error("Loading sample_weight_mode is not supported yet.");let e=Pp(t.optimizer_config),r=wo(e),o;if(typeof t.loss=="string")o=ha(t.loss);else if(Array.isArray(t.loss))o=t.loss.map(c=>ha(c));else if(t.loss!=null){o={};for(let c in t.loss)o[c]=ha(t.loss[c])}let s;if(Array.isArray(t.metrics))s=t.metrics.map(c=>ha(c));else if(t.metrics!=null){s={};for(let c in t.metrics)s[c]=ha(t.metrics[c])}this.compile({loss:o,metrics:s,optimizer:r})}async save(t,e){if(typeof t=="string"){let f=Mx(t);if(f.length===0)throw new Y(`Cannot find any save handlers for URL '${t}'`);if(f.length>1)throw new Y(`Found more than one (${f.length}) save handlers for URL '${t}'`);t=f[0]}if(t.save==null)throw new Y("LayersModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");let r=await Ox(this.getNamedWeights(e)),o=!1,s=null,c=this.toJSON(s,o),l={modelTopology:c,format:lU,generatedBy:`TensorFlow.js tfjs-layers v${fm}`,convertedBy:null},p=e==null?!1:e.includeOptimizer;if(p&&this.optimizer!=null){l.trainingConfig=this.getTrainingConfig();let f="optimizer",{data:m,specs:y}=await Ox(await this.optimizer.getWeights(),f);r.specs.push(...y),r.data=Hf([r.data,m])}if(this.userDefinedMetadata!=null){let f=!0;uS(this.userDefinedMetadata,this.name,f),l.userDefinedMetadata=this.userDefinedMetadata}return l.weightData=r.data,l.weightSpecs=r.specs,t.save(l)}setUserDefinedMetadata(t){uS(t,this.name),this.userDefinedMetadata=t}getUserDefinedMetadata(){return this.userDefinedMetadata}}ws.className="Model",vt(ws);class NS extends ws{}NS.className="Functional",vt(NS);async function uU(n,t){"modelTopology"in n||(n={modelTopology:n}),n=n;let e=n.modelTopology;e.model_config!=null&&(e=e.model_config);let r=Pp(e),o=wo(r,t);if(n.weightsManifest!=null){let s=await JN(n.weightsManifest,n.pathPrefix,o.weights.map(l=>l.originalName)),c={};for(let l of o.weights)c[l.originalName]=s[l.originalName];o.loadWeights(c),Xt(s)}return o}async function pU(n,t){if(t==null&&(t={}),typeof n=="string"){let e=Bx(n,t);if(e.length===0)e.push(Xf(n,t));else if(e.length>1)throw new Y(`Found more than one (${e.length}) load handlers for URL '${n}'`);n=e[0]}return hU(n,void 0,t)}async function hU(n,t,e){if(e==null&&(e={}),n.load==null)throw new Y("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");let r=await n.load(),o=r.modelTopology;o.model_config!=null&&(o=o.model_config);let s=e.strict==null?!0:e.strict,c=r.weightData!=null&&r.weightSpecs!=null&&s,l=wo(Pp(o),t,c),p=r.trainingConfig;if(p!=null&&l.loadTrainingConfig(p),r.userDefinedMetadata!=null&&l.setUserDefinedMetadata(r.userDefinedMetadata),r.weightData!=null){if(r.weightSpecs==null)throw new Y("LayersModel artifacts contains weight data, but not weight specs. Therefore loading of weights cannot proceed.");let{modelWeights:f,optimizerWeights:m}=fU(r.weightData,r.weightSpecs);l.loadWeights(f,s),l.optimizer!=null&&m.length>0&&await l.optimizer.setWeights(m),Xt(f),Xt(m.map(y=>y.tensor))}return l}function fU(n,t){let e=qf(n,t),r={},o=[];return t.forEach(s=>{s.group==="optimizer"?o.push({name:s.name,tensor:e[s.name]}):r[s.name]=e[s.name]}),{modelWeights:r,optimizerWeights:o}}class ga extends ws{constructor(t){super({inputs:[],outputs:[]});if(t=t||{},this.trainable=!0,this.built=!1,this.name=t.name!=null?t.name:em("sequential_"),t.layers!=null)for(let e of t.layers)this.add(e)}checkShape(t){let e=t.inboundNodes[0].outputTensors[0].shape;if(e.some(r=>r<0))throw new Y(`Negative dimension size caused by adding layer ${t.name} with input shape [${t.inboundNodes[0].inputTensors[0].shape}]`)}add(t){let e=t instanceof ga||t instanceof ws,r;if(e){if(r=t,r.outputs.length!==1)throw new Y("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");if(r.inputs.length!==1)throw new Y("All layers in a Sequential model should have a single input tensor. For multi-input layers, use the functional API.")}if(this.outputs.length===0){if(t.inboundNodes.length===0){if(t.batchInputShape==null)throw new Y("The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.");let o=YC({batchShape:t.batchInputShape,dtype:t.dtype,name:t.name+"_input"});t.apply(o)}if(e)this.outputs=r.outputs,this.inputs=r.inputs;else{if(t.inboundNodes.length!==1)throw new Y(`A layer added to a Sequential model must not already be connected somewhere else. LayersModel received layer ${t.name} which has ${t.inboundNodes.length} pre-existing inbound connections.`);if(t.inboundNodes[0].outputTensors.length!==1)throw new Y("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(t),this.outputs=[t.inboundNodes[0].outputTensors[0]],this.inputs=XC(this.outputs[0])}this.inboundNodes=[],new om({outboundLayer:this,inboundLayers:[],nodeIndices:[],tensorIndices:[],inputTensors:this.inputs,outputTensors:this.outputs,inputMasks:pa(null,this.inputs.length),outputMasks:[null],inputShapes:this.inputs.map(o=>o.shape),outputShapes:this.outputs[0].shape})}else{let o=t.apply(this.outputs[0]);if(Array.isArray(o))throw new TypeError("All layers in a Sequential model should have a single output tensor. For multi-output layers, use the functional API.");this.checkShape(t),this.outputs=[o],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}this.layers.push(t),this.built=!1}pop(){if(this.layers.length===0)throw new TypeError("There are no layers in the model.");if(this.layers.pop(),this.layers.length===0)this.outputs=[],this.inboundNodes=[],this.outboundNodes=[];else{let t=this.layers.length-1;this.layers[t].outboundNodes=[],this.outputs=[this.layers[t].output],this.inboundNodes[0].outputTensors=this.outputs,this.inboundNodes[0].outputShapes=[this.outputs[0].shape]}}call(t,e){return this.model==null&&this.build(),this.model.call(t,e)}build(t){if(Re(t),this.inputs.length===0||this.outputs.length===0)throw new TypeError("Sequential model cannot be built: model is empty. Add some layers first.");this.model=new ws({inputs:this.inputs,outputs:this.outputs[0],name:this.name+"_model"}),this.model.trainable=this.trainable,this.supportsMasking=this.model.supportsMasking,this.inputLayers=this.model.inputLayers,this.inputLayersNodeIndices=this.model.inputLayersNodeIndices,this.inputLayersTensorIndices=this.model.inputLayersTensorIndices,this.outputLayers=this.model.outputLayers,this.outputLayersNodeIndices=this.model.outputLayersNodeIndices,this.outputLayersTensorIndices=this.model.outputLayersTensorIndices,this.nodesByDepth=this.model.nodesByDepth,this.containerNodes=this.model.containerNodes,this.outputNames=this.model.outputNames,this.inputNames=this.model.inputNames,this.built=!0}countParams(){return this.built||this.build(),super.countParams()}summary(t,e,r=console.log){this.built||this.build(),super.summary(t,e,r)}setWeights(t){this.model==null&&this.build(),this.model.setWeights(t)}evaluate(t,e,r={}){if(!this.built)throw new Jr("The model needs to be compiled before being used.");return this.model.evaluate(t,e,r)}async evaluateDataset(t,e){if(!this.built)throw new Jr("The model needs to be compiled before being used.");return this.model.evaluateDataset(t,e)}predict(t,e={}){return this.model==null&&this.build(),this.model.predict(t,e)}predictOnBatch(t){return this.model==null&&this.build(),this.model.predictOnBatch(t)}compile(t){this.build(),this.model.compile(t),this.optimizer_=this.model.optimizer,this.isOptimizerOwned=this.model.isOptimizerOwned,this.loss=this.model.loss,this.metrics=this.model.metrics,this.metricsTensors=this.model.metricsTensors,this.metricsNames=this.model.metricsNames}get optimizer(){return this.model==null?void 0:this.model.optimizer}set optimizer(t){this.model.optimizer=t}async fit(t,e,r={}){if(!this.built)throw new Jr("The model needs to be compiled before being used.");return this.model.fit(t,e,r)}async fitDataset(t,e){if(!this.built)throw new Jr("The model needs to be compiled before being used.");return this.model.fitDataset(t,e)}async trainOnBatch(t,e){return this.model.trainOnBatch(t,e)}static fromConfig(t,e,r={},o=!1){let s,c={};if(e instanceof Array){if(!(e[0].className!=null)||e[0].className==="Merge")throw new Y("Legacy serialization format not supported yet.");s=e}else _(e.layers!=null,()=>"When the config data for a Sequential model is not an Array, it must be an Object that contains the 'layers' field."),s=e.layers,delete e.layers,c=e;let l=new t(c);if(!(l instanceof ga))throw new Ut(`Sequential.fromConfig called on non-Sequential input: ${l}`);for(let p of s){let f=void 0,m=wo(p,f,o);o&&m.setFastWeightInitDuringBuild(!0),l.add(m)}return l}set stopTraining(t){if(this.model==null)throw new Y("Cannot set the stopTraining property of a sequential model before it is compiled.");this.model.stopTraining=t}get stopTraining(){if(this.model==null)throw new Y("Cannot get the stopTraining property of a sequential model before it is compiled.");return this.model.stopTraining}getConfig(){let t=[];for(let e of this.layers){let r={};r.className=e.getClassName(),r.config=e.getConfig(),t.push(r)}return{name:this.name,layers:t}}}ga.className="Sequential",vt(ga);function dU(n){return new ws(n)}function mU(n){return new ga(n)}function gU(n,t){return t==null&&(t={}),pU(n,t)}function _S(n){return YC(n)}function yU(n,t){Lr.registerCallbackConstructor(n,t)}class kr extends Qi{getConfig(){return{}}}class CS extends kr{apply(t,e=1){return qV(t,e)}}CS.className="elu",vt(CS);class SS extends kr{apply(t){return Td(t)}}SS.className="selu",vt(SS);class $S extends kr{apply(t){return Vo(t)}}$S.className="relu",vt($S);class IS extends kr{apply(t){return rt(()=>oa(6,Vo(t)))}}IS.className="relu6",vt(IS);class ES extends kr{apply(t){return t}}ES.className="linear",vt(ES);class DS extends kr{apply(t){return Bo(t)}}DS.className="sigmoid",vt(DS);class AS extends kr{apply(t){return jV(t)}}AS.className="hardSigmoid",vt(AS);class FS extends kr{apply(t){return Ic(t)}}FS.className="softplus",vt(FS);class RS extends kr{apply(t){return HV(t)}}RS.className="softsign",vt(RS);class PS extends kr{apply(t){return Nc(t)}}PS.className="tanh",vt(PS);class Uv extends kr{apply(t,e=-1){return ca(t,e)}}Uv.className="softmax",vt(Uv);class OS extends kr{apply(t,e=-1){return gd(t,e)}}OS.className="logSoftmax",vt(OS);class LS extends kr{apply(t,e=1){return rt(()=>Bo(t.mul(e)).mul(t))}}LS.className="swish",vt(LS);function bi(n){return n.getClassName()}function qv(n,t={}){return Sp(n,Dr.getMap().classNameMap,t,"activation")}function xi(n){if(n==null){let t={};return t.className="linear",t.config={},qv(t)}if(typeof n=="string"){let t={};return t.className=n,t.config={},qv(t)}else return n instanceof kr?n:qv(n)}function Hv(n){if(n!=null&&typeof n!="object")throw new Error(`Argument to L1L2 regularizer's constructor is expected to be an object, but received: ${n}`)}class MS extends Qi{}class Mp extends MS{constructor(t){super();Hv(t),this.l1=t==null||t.l1==null?.01:t.l1,this.l2=t==null||t.l2==null?.01:t.l2,this.hasL1=this.l1!==0,this.hasL2=this.l2!==0}apply(t){return rt(()=>{let e=xe([1]);return this.hasL1&&(e=Tt(e,zt(nt(this.l1,bn(t))))),this.hasL2&&(e=Tt(e,zt(nt(this.l2,Ap(t))))),e.asScalar()})}getConfig(){return{l1:this.l1,l2:this.l2}}static fromConfig(t,e){return new t({l1:e.l1,l2:e.l2})}}Mp.className="L1L2",vt(Mp);function bU(n){return Hv(n),new Mp({l1:n!=null?n.l1:null,l2:0})}function xU(n){return Hv(n),new Mp({l2:n!=null?n.l2:null,l1:0})}let BS={l1l2:"L1L2"};function Pe(n){return lv(n)}function zS(n,t={}){return Sp(n,Dr.getMap().classNameMap,t,"regularizer")}function Ke(n){if(n==null)return null;if(typeof n=="string"){let t=n in BS?BS[n]:n,e={className:t,config:{}};return zS(e)}else return n instanceof MS?n:zS(n)}class jv extends ye{constructor(t){super(t==null?{}:t);this.supportsMasking=!0,t!=null&&(this.maxValue=t.maxValue)}call(t,e){t=Qt(t);let r=Vo(t);return this.maxValue!=null&&(r=ar(r,0,this.maxValue)),r}computeOutputShape(t){return t}getConfig(){let t={maxValue:this.maxValue},e=super.getConfig();return Object.assign(t,e),t}}jv.className="ReLU",vt(jv);class Kv extends ye{constructor(t){super(t==null?{}:t);this.DEFAULT_ALPHA=.3,t==null&&(t={}),this.alpha=t.alpha==null?this.DEFAULT_ALPHA:t.alpha}call(t,e){let r=Qt(t);return fd(r,this.alpha)}computeOutputShape(t){return t}getConfig(){let t={alpha:this.alpha},e=super.getConfig();return Object.assign(t,e),t}}Kv.className="LeakyReLU",vt(Kv);class Xv extends ye{constructor(t){super(t==null?{}:t);if(this.DEFAULT_ALPHA_INITIALIZER="zeros",t==null&&(t={}),this.supportsMasking=!0,this.alphaInitializer=je(t.alphaInitializer||this.DEFAULT_ALPHA_INITIALIZER),this.alphaRegularizer=Ke(t.alphaRegularizer),this.alphaConstraint=Tn(t.alphaConstraint),t.sharedAxes==null)this.sharedAxes=null;else if(Array.isArray(t.sharedAxes))this.sharedAxes=t.sharedAxes;else if(typeof t.sharedAxes=="number")this.sharedAxes=[t.sharedAxes];else throw new Y(`Expected sharedAxes to be a number or an array of numbers, but got ${t.sharedAxes}`)}build(t){t=Re(t);let e=t.slice(1);if(this.sharedAxes!=null)for(let o of this.sharedAxes)e[o-1]=1;this.alpha=this.addWeight("alpha",e,"float32",this.alphaInitializer,this.alphaRegularizer,!0,this.alphaConstraint);let r={};if(this.sharedAxes!=null)for(let o=1;o(nn(t),t==="channelsFirst"?Kt(n,[0,2,3,1]):n))}function WS(n,t){return rt(()=>(nn(t),t==="channelsFirst"?Kt(n,[0,2,3,4,1]):n))}function VS(n,t,e,r=1,o="valid",s,c=1){return rt(()=>{if(s==null&&(s=go()),nn(s),n.shape.length!==3)throw new Y(`The input of a conv1dWithBias operation should be 3, but is ${n.shape.length} instead.`);if(t.shape.length!==3)throw new Y(`The kernel for a conv1dWithBias operation should be 3, but is ${t.shape.length} instead`);if(e!=null&&e.shape.length!==1)throw new Y(`The bias for a conv1dWithBias operation should be 1, but is ${t.shape.length} instead`);if(s==="channelsFirst"&&(n=Kt(n,[0,2,1])),o==="causal")throw new Ut("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");let l=cd(n,t,r,o==="same"?"same":"valid","NWC",c);return e!=null&&(l=qo(l,e)),l})}function ust(n,t,e=1,r="valid",o,s=1){return rt(()=>(nn(o),VS(n,t,null,e,r,o,s)))}function pst(n,t,e=[1,1],r="valid",o,s){return rt(()=>(nn(o),t0(n,t,null,e,r,o,s)))}function t0(n,t,e,r=[1,1],o="valid",s,c,l=null){return rt(()=>{if(s==null&&(s=go()),nn(s),n.rank!==3&&n.rank!==4)throw new Y(`conv2dWithBiasActivation expects input to be of rank 3 or 4, but received ${n.rank}.`);if(t.rank!==3&&t.rank!==4)throw new Y(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, but received ${n.rank}.`);let p=Qv(n,s);if(o==="causal")throw new Ut("The support for CAUSAL padding mode in conv1dWithBias is not implemented yet.");return p=qw({x:p,filter:t,strides:r,pad:o==="same"?"same":"valid",dilations:c,dataFormat:"NHWC",bias:e,activation:l}),s==="channelsFirst"&&(p=Kt(p,[0,3,1,2])),p})}function hst(n,t,e=[1,1,1],r="valid",o,s){return rt(()=>(nn(o),GS(n,t,null,e,r,o,s)))}function GS(n,t,e,r=[1,1,1],o="valid",s,c){return rt(()=>{if(s==null&&(s=go()),nn(s),n.rank!==4&&n.rank!==5)throw new Y(`conv3dWithBias expects input to be of rank 4 or 5, but received ${n.rank}.`);if(t.rank!==4&&t.rank!==5)throw new Y(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ${n.rank}.`);let l=WS(n,s);if(o==="causal")throw new Ut("The support for CAUSAL padding mode in conv3dWithBias is not implemented yet.");return l=gw(l,t,r,o==="same"?"same":"valid","NDHWC",c),e!=null&&(l=qo(l,e)),s==="channelsFirst"&&(l=Kt(l,[0,4,1,2,3])),l})}class mm extends ye{constructor(t,e){super(e);if(this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",mm.verifyArgs(e),this.rank=t,$n(this.rank,"rank"),this.rank!==1&&this.rank!==2&&this.rank!==3)throw new Ut(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is not implemented yet.`);if(this.kernelSize=Hc(e.kernelSize,t,"kernelSize"),this.strides=Hc(e.strides==null?1:e.strides,t,"strides"),this.padding=e.padding==null?"valid":e.padding,Or(this.padding),this.dataFormat=e.dataFormat==null?"channelsLast":e.dataFormat,nn(this.dataFormat),this.activation=xi(e.activation),this.useBias=e.useBias==null?!0:e.useBias,this.biasInitializer=je(e.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.biasConstraint=Tn(e.biasConstraint),this.biasRegularizer=Ke(e.biasRegularizer),this.activityRegularizer=Ke(e.activityRegularizer),this.dilationRate=Hc(e.dilationRate==null?1:e.dilationRate,t,"dilationRate"),this.rank===1&&Array.isArray(this.dilationRate)&&this.dilationRate.length!==1)throw new Y(`dilationRate must be a number or an array of a single number for 1D convolution, but received ${JSON.stringify(this.dilationRate)}`);if(this.rank===2){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==2)throw new Y(`dilationRate must be a number or array of two numbers for 2D convolution, but received ${JSON.stringify(this.dilationRate)}`)}else if(this.rank===3){if(typeof this.dilationRate=="number")this.dilationRate=[this.dilationRate,this.dilationRate,this.dilationRate];else if(this.dilationRate.length!==3)throw new Y(`dilationRate must be a number or array of three numbers for 3D convolution, but received ${JSON.stringify(this.dilationRate)}`)}}static verifyArgs(t){if(Pr("kernelSize"in t,"required key 'kernelSize' not in config"),typeof t.kernelSize!="number"&&!pv(t.kernelSize,"number",1,3))throw new Y(`BaseConv expects config.kernelSize to be number or number[] with length 1, 2, or 3, but received ${JSON.stringify(t.kernelSize)}.`)}getConfig(){let t={kernelSize:this.kernelSize,strides:this.strides,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,activation:bi(this.activation),useBias:this.useBias,biasInitializer:rn(this.biasInitializer),biasRegularizer:Pe(this.biasRegularizer),activityRegularizer:Pe(this.activityRegularizer),biasConstraint:vn(this.biasConstraint)},e=super.getConfig();return Object.assign(t,e),t}}class jc extends mm{constructor(t,e){super(t,e);this.kernel=null,jc.verifyArgs(e),this.filters=e.filters,$n(this.filters,"filters"),this.kernelInitializer=je(e.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.kernelConstraint=Tn(e.kernelConstraint),this.kernelRegularizer=Ke(e.kernelRegularizer)}build(t){t=Re(t);let e=this.dataFormat==="channelsFirst"?1:t.length-1;if(t[e]==null)throw new Y(`The channel dimension of the input should be defined. Found ${t[e]}`);let r=t[e],o=this.kernelSize.concat([r,this.filters]);this.kernel=this.addWeight("kernel",o,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[{ndim:this.rank+2,axes:{[e]:r}}],this.built=!0}call(t,e){return rt(()=>{t=Qt(t);let r,o=this.bias==null?null:this.bias.read(),s=FC(this.activation.getClassName());if(s!=null&&this.rank===2)r=t0(t,this.kernel.read(),o,this.strides,this.padding,this.dataFormat,this.dilationRate,s);else{if(this.rank===1)r=VS(t,this.kernel.read(),o,this.strides[0],this.padding,this.dataFormat,this.dilationRate[0]);else if(this.rank===2)r=t0(t,this.kernel.read(),o,this.strides,this.padding,this.dataFormat,this.dilationRate);else if(this.rank===3)r=GS(t,this.kernel.read(),o,this.strides,this.padding,this.dataFormat,this.dilationRate);else throw new Ut("convolutions greater than 3D are not implemented yet.");this.activation!=null&&(r=this.activation.apply(r))}return r})}computeOutputShape(t){t=Re(t);let e=[],r=this.dataFormat==="channelsLast"?t.slice(1,t.length-1):t.slice(2);for(let s=0;s 0 but got ${JSON.stringify(t.filters)}`)}}class Kc extends jc{constructor(t){super(2,t);Kc.verifyArgs(t)}getConfig(){let t=super.getConfig();return delete t.rank,t}static verifyArgs(t){if(typeof t.kernelSize!="number"&&!pv(t.kernelSize,"number",1,2))throw new Y(`Conv2D expects config.kernelSize to be number or number[] with length 1 or 2, but received ${JSON.stringify(t.kernelSize)}.`)}}Kc.className="Conv2D",vt(Kc);class Bp extends jc{constructor(t){super(3,t);Bp.verifyArgs(t)}getConfig(){let t=super.getConfig();return delete t.rank,t}static verifyArgs(t){if(typeof t.kernelSize!="number"&&!(Array.isArray(t.kernelSize)&&(t.kernelSize.length===1||t.kernelSize.length===3)))throw new Y(`Conv3D expects config.kernelSize to be number or [number, number, number], but received ${JSON.stringify(t.kernelSize)}.`)}}Bp.className="Conv3D",vt(Bp);class e0 extends Kc{constructor(t){super(t);if(this.inputSpec=[new In({ndim:4})],this.padding!=="same"&&this.padding!=="valid")throw new Y(`Conv2DTranspose currently supports only padding modes 'same' and 'valid', but received padding mode ${this.padding}`)}build(t){if(t=Re(t),t.length!==4)throw new Y("Input should have rank 4; Received input shape: "+JSON.stringify(t));let e=this.dataFormat==="channelsFirst"?1:t.length-1;if(t[e]==null)throw new Y("The channel dimension of the inputs should be defined. Found `None`.");let r=t[e],o=this.kernelSize.concat([this.filters,r]);this.kernel=this.addWeight("kernel",o,"float32",this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.filters],"float32",this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint)),this.inputSpec=[new In({ndim:4,axes:{[e]:r}})],this.built=!0}call(t,e){return rt(()=>{let r=Qt(t);if(r.shape.length!==4)throw new Y(`Conv2DTranspose.call() expects input tensor to be rank-4, but received a tensor of rank-${r.shape.length}`);let o=r.shape,s=o[0],c,l;this.dataFormat==="channelsFirst"?(c=2,l=3):(c=1,l=2);let p=o[c],f=o[l],m=this.kernelSize[0],y=this.kernelSize[1],b=this.strides[0],v=this.strides[1],T=dm(p,b,m,this.padding),N=dm(f,v,y,this.padding),S=[s,T,N,this.filters];this.dataFormat!=="channelsLast"&&(r=Kt(r,[0,2,3,1]));let D=ld(r,this.kernel.read(),S,this.strides,this.padding);return this.dataFormat!=="channelsLast"&&(D=Kt(D,[0,3,1,2])),this.bias!=null&&(D=qo(D,this.bias.read(),this.dataFormat)),this.activation!=null&&(D=this.activation.apply(D)),D})}computeOutputShape(t){t=Re(t);let e=t.slice(),r,o,s;this.dataFormat==="channelsFirst"?(r=1,o=2,s=3):(r=3,o=1,s=2);let c=this.kernelSize[0],l=this.kernelSize[1],p=this.strides[0],f=this.strides[1];return e[r]=this.filters,e[o]=dm(e[o],p,c,this.padding),e[s]=dm(e[s],f,l,this.padding),e}getConfig(){let t=super.getConfig();return delete t.dilationRate,t}}e0.className="Conv2DTranspose",vt(e0);class US extends jc{constructor(t,e){super(t,e);if(this.DEFAULT_DEPTHWISE_INITIALIZER="glorotUniform",this.DEFAULT_POINTWISE_INITIALIZER="glorotUniform",this.depthwiseKernel=null,this.pointwiseKernel=null,e.filters==null)throw new Y("The `filters` configuration field is required by SeparableConv, but is unspecified.");if(e.kernelInitializer!=null||e.kernelRegularizer!=null||e.kernelConstraint!=null)throw new Y("Fields kernelInitializer, kernelRegularizer and kernelConstraint are invalid for SeparableConv2D. Use depthwiseInitializer, depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, pointwiseRegularizer and pointwiseConstraint instead.");if(e.padding!=null&&e.padding!=="same"&&e.padding!=="valid")throw new Y(`SeparableConv${this.rank}D supports only padding modes: 'same' and 'valid', but received ${JSON.stringify(e.padding)}`);this.depthMultiplier=e.depthMultiplier==null?1:e.depthMultiplier,this.depthwiseInitializer=je(e.depthwiseInitializer||this.DEFAULT_DEPTHWISE_INITIALIZER),this.depthwiseRegularizer=Ke(e.depthwiseRegularizer),this.depthwiseConstraint=Tn(e.depthwiseConstraint),this.pointwiseInitializer=je(e.depthwiseInitializer||this.DEFAULT_POINTWISE_INITIALIZER),this.pointwiseRegularizer=Ke(e.pointwiseRegularizer),this.pointwiseConstraint=Tn(e.pointwiseConstraint)}build(t){if(t=Re(t),t.length{t=Qt(t);let r;if(this.rank===1)throw new Ut("1D separable convolution is not implemented yet.");return this.rank===2&&(this.dataFormat==="channelsFirst"&&(t=Kt(t,[0,2,3,1])),r=Rw(t,this.depthwiseKernel.read(),this.pointwiseKernel.read(),this.strides,this.padding,this.dilationRate,"NHWC")),this.useBias&&(r=qo(r,this.bias.read(),this.dataFormat)),this.activation!=null&&(r=this.activation.apply(r)),this.dataFormat==="channelsFirst"&&(r=Kt(r,[0,3,1,2])),r})}getConfig(){let t=super.getConfig();return delete t.rank,delete t.kernelInitializer,delete t.kernelRegularizer,delete t.kernelConstraint,t.depthwiseInitializer=rn(this.depthwiseInitializer),t.pointwiseInitializer=rn(this.pointwiseInitializer),t.depthwiseRegularizer=Pe(this.depthwiseRegularizer),t.pointwiseRegularizer=Pe(this.pointwiseRegularizer),t.depthwiseConstraint=vn(this.depthwiseConstraint),t.pointwiseConstraint=vn(this.pointwiseConstraint),t}}US.className="SeparableConv";class n0 extends US{constructor(t){super(2,t)}}n0.className="SeparableConv2D",vt(n0);class zp extends jc{constructor(t){super(1,t);zp.verifyArgs(t),this.inputSpec=[{ndim:3}]}getConfig(){let t=super.getConfig();return delete t.rank,delete t.dataFormat,t}static verifyArgs(t){if(typeof t.kernelSize!="number"&&!pv(t.kernelSize,"number",1,1))throw new Y(`Conv1D expects config.kernelSize to be number or number[] with length 1, but received ${JSON.stringify(t.kernelSize)}.`)}}zp.className="Conv1D",vt(zp);class r0 extends ye{constructor(t){super(t);typeof t.cropping=="number"?this.cropping=[[t.cropping,t.cropping],[t.cropping,t.cropping]]:typeof t.cropping[0]=="number"?this.cropping=[[t.cropping[0],t.cropping[0]],[t.cropping[1],t.cropping[1]]]:this.cropping=t.cropping,this.dataFormat=t.dataFormat===void 0?"channelsLast":t.dataFormat,this.inputSpec=[{ndim:4}]}computeOutputShape(t){return this.dataFormat==="channelsFirst"?[t[0],t[1],t[2]-this.cropping[0][0]-this.cropping[0][1],t[3]-this.cropping[1][0]-this.cropping[1][1]]:[t[0],t[1]-this.cropping[0][0]-this.cropping[0][1],t[2]-this.cropping[1][0]-this.cropping[1][1],t[3]]}call(t,e){return rt(()=>{if(t=Qt(t),this.dataFormat==="channelsLast"){let r=qd(t,this.cropping[0][0],t.shape[1]-this.cropping[0][0]-this.cropping[0][1],2);return qd(r,this.cropping[1][0],t.shape[2]-this.cropping[1][1]-this.cropping[1][0],3)}else{let r=qd(t,this.cropping[0][0],t.shape[2]-this.cropping[0][0]-this.cropping[0][1],3);return qd(r,this.cropping[1][0],t.shape[3]-this.cropping[1][1]-this.cropping[1][0],4)}})}getConfig(){let t={cropping:this.cropping,dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}r0.className="Cropping2D",vt(r0);class o0 extends ye{constructor(t){super(t);this.DEFAULT_SIZE=[2,2],this.inputSpec=[{ndim:4}],this.size=t.size==null?this.DEFAULT_SIZE:t.size,this.dataFormat=t.dataFormat==null?"channelsLast":t.dataFormat}computeOutputShape(t){if(this.dataFormat==="channelsFirst"){let e=t[2]==null?null:this.size[0]*t[2],r=t[3]==null?null:this.size[1]*t[3];return[t[0],t[1],e,r]}else{let e=t[1]==null?null:this.size[0]*t[1],r=t[2]==null?null:this.size[1]*t[2];return[t[0],e,r,t[3]]}}call(t,e){return rt(()=>{let r=Qt(t),o=r.shape;if(this.dataFormat==="channelsFirst"){r=Kt(r,[0,2,3,1]);let s=this.size[0]*o[2],c=this.size[1]*o[3],l=r.resizeNearestNeighbor([s,c]);return Kt(l,[0,3,1,2])}else{let s=this.size[0]*o[1],c=this.size[1]*o[2];return r.resizeNearestNeighbor([s,c])}})}getConfig(){let t={size:this.size,dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}o0.className="UpSampling2D",vt(o0);function wU(n,t,e=[1,1],r="valid",o,s){return rt(()=>{o==null&&(o=go()),nn(o);let c=Qv(n,o);if(n.rank!==4)throw new Y(`Input for depthwiseConv2d is required to be 4-D, but is instead ${n.rank}-D`);if(t.rank!==4)throw new Y(`depthwiseKernel is required to be 4-D, but is instead ${t.rank}-D`);return c=na(c,t,e,r==="same"?"same":"valid","NHWC",s),o==="channelsFirst"&&(c=Kt(c,[0,3,1,2])),c})}class s0 extends mm{constructor(t){super(2,t);this.depthwiseKernel=null,this.depthMultiplier=t.depthMultiplier==null?1:t.depthMultiplier,this.depthwiseInitializer=je(t.depthwiseInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.depthwiseConstraint=Tn(t.depthwiseConstraint),this.depthwiseRegularizer=Ke(t.depthwiseRegularizer)}build(t){if(t=Re(t),t.length<4)throw new Y(`Inputs to DepthwiseConv2D should have rank 4. Received input shape: ${JSON.stringify(t)}.`);let e=this.dataFormat==="channelsFirst"?1:3;if(t[e]==null||t[e]<0)throw new Y(`The channel dimension of the inputs to DepthwiseConv2D should be defined, but is not (${t[e]}).`);let r=t[e],o=[this.kernelSize[0],this.kernelSize[1],r,this.depthMultiplier];this.depthwiseKernel=this.addWeight("depthwise_kernel",o,null,this.depthwiseInitializer,this.depthwiseRegularizer,!0,this.depthwiseConstraint),this.useBias?this.bias=this.addWeight("bias",[r*this.depthMultiplier],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(t,e){return rt(()=>{t=Qt(t);let r=wU(t,this.depthwiseKernel.read(),this.strides,this.padding,this.dataFormat,null);return this.useBias&&(r=qo(r,this.bias.read(),this.dataFormat)),this.activation!=null&&(r=this.activation.apply(r)),r})}computeOutputShape(t){t=Re(t);let e=this.dataFormat==="channelsFirst"?t[2]:t[1],r=this.dataFormat==="channelsFirst"?t[3]:t[2],o=this.dataFormat==="channelsFirst"?t[1]*this.depthMultiplier:t[3]*this.depthMultiplier,s=To(e,this.kernelSize[0],this.padding,this.strides[0]),c=To(r,this.kernelSize[1],this.padding,this.strides[1]);return this.dataFormat==="channelsFirst"?[t[0],o,s,c]:[t[0],s,c,o]}getConfig(){let t=super.getConfig();return t.depthMultiplier=this.depthMultiplier,t.depthwiseInitializer=rn(this.depthwiseInitializer),t.depthwiseRegularizer=Pe(this.depthwiseRegularizer),t.depthwiseConstraint=vn(this.depthwiseRegularizer),t}}s0.className="DepthwiseConv2D",vt(s0);function qS(n,t,e,r){if(Array.isArray(n)){if(t!=null||e!=null)throw new Y("When inputs is an array, neither initialState or constants should be provided");r!=null&&(e=n.slice(n.length-r,n.length),n=n.slice(0,n.length-r)),n.length>1&&(t=n.slice(1,n.length)),n=n[0]}function o(s){return s==null||Array.isArray(s)?s:[s]}return t=o(t),e=o(e),{inputs:n,initialState:t,constants:e}}function HS(n,t,e,r=!1,o,s,c=!1,l=!1){return rt(()=>{let p=t.shape.length;if(p<3)throw new Y(`Input should be at least 3D, but is ${p}D.`);let f=[1,0].concat(yo(2,p));if(t=Kt(t,f),s!=null)throw new Ut("The rnn() functoin of the deeplearn.js backend does not support constants yet.");c&&console.warn("Backend rnn(): the unroll = true option is not applicable to the imperative deeplearn.js backend."),o!=null&&(o=o.asType("bool").asType("float32"),o.rank===p-1&&(o=cr(o,-1)),o=Kt(o,f)),r&&(t=Rr(t,0),o!=null&&(o=Rr(o,0)));let m=[],y,b=e,v=t.shape[0],T=mo(t),N;o!=null&&(N=mo(o));for(let D=0;Dn(I,b));if(o==null)y=P[0],b=P[1];else{let E=rt(()=>{let L=N[D],B=qn(L).sub(L),q=P[0].mul(L).add(b[0].mul(B)),H=b.map((Z,J)=>P[1][J].mul(L).add(Z.mul(B)));return{output:q,newStates:H}});y=E.output,b=E.newStates}l&&m.push(y)}let S;if(l){let D=1;S=ur(m,D)}return[y,S,b]})}class ko extends ye{constructor(t){super(t);let e;if(t.cell==null)throw new Y("cell property is missing for the constructor of RNN.");if(Array.isArray(t.cell)?e=new bm({cells:t.cell}):e=t.cell,e.stateSize==null)throw new Y("The RNN cell should have an attribute `stateSize` (tuple of integers, one integer per RNN state).");this.cell=e,this.returnSequences=t.returnSequences==null?!1:t.returnSequences,this.returnState=t.returnState==null?!1:t.returnState,this.goBackwards=t.goBackwards==null?!1:t.goBackwards,this._stateful=t.stateful==null?!1:t.stateful,this.unroll=t.unroll==null?!1:t.unroll,this.supportsMasking=!0,this.inputSpec=[new In({ndim:3})],this.stateSpec=null,this.states_=null,this.numConstants=null,this.keptStates=[]}getStates(){if(this.states_==null){let t=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;return yo(0,t).map(e=>null)}else return this.states_}setStates(t){this.states_=t}computeOutputShape(t){$v(t)&&(t=t[0]),t=t;let e=this.cell.stateSize;Array.isArray(e)||(e=[e]);let r=e[0],o;if(this.returnSequences?o=[t[0],t[1],r]:o=[t[0],r],this.returnState){let s=[];for(let c of e)s.push([t[0],c]);return[o].concat(s)}else return o}computeMask(t,e){return rt(()=>{Array.isArray(e)&&(e=e[0]);let r=this.returnSequences?e:null;if(this.returnState){let o=this.states.map(s=>null);return[r].concat(o)}else return r})}get states(){if(this.states_==null){let t=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1,e=[];for(let r=0;rl.shape[l.shape.length-1]),c))throw new Y(`An initialState was passed that is not compatible with cell.stateSize. Received stateSpec=${this.stateSpec}; However cell.stateSize is ${this.cell.stateSize}`)}else this.stateSpec=c.map(l=>new In({shape:[null,l]}));this.stateful&&this.resetStates()}resetStates(t,e=!1){rt(()=>{if(!this.stateful)throw new Go("Cannot call resetStates() on an RNN Layer that is not stateful.");let r=this.inputSpec[0].shape[0];if(r==null)throw new Y("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.states_==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(o=>xe([r,o])):this.states_=[xe([r,this.cell.stateSize])];else if(t==null)Xt(this.states_),this.keptStates!=null&&(Xt(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(o=>xe([r,o])):this.states_[0]=xe([r,this.cell.stateSize]);else{if(Array.isArray(t)||(t=[t]),t.length!==this.states_.length)throw new Y(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${t.length} state value(s). Input received: ${t}`);e===!0?this.keptStates.push(this.states_.slice()):Xt(this.states_);for(let o=0;oSn(o.clone()))})}apply(t,e){let r=e==null?null:e.initialState,o=e==null?null:e.constants;e==null&&(e={});let s=qS(t,r,o,this.numConstants);t=s.inputs,r=s.initialState,o=s.constants;let c=[],l=[];if(r!=null){e.initialState=r,c=c.concat(r),this.stateSpec=[];for(let f of r)this.stateSpec.push(new In({shape:f.shape}));l=l.concat(this.stateSpec)}o!=null&&(e.constants=o,c=c.concat(o),this.numConstants=o.length);let p=c[0]instanceof xo;if(p){let f=[t].concat(c),m=this.inputSpec.concat(l),y=this.inputSpec;this.inputSpec=m;let b=super.apply(f,e);return this.inputSpec=y,b}else return super.apply(t,e)}call(t,e){return rt(()=>{let r=e==null?null:e.mask,o=e==null?null:e.training,s=e==null?null:e.initialState;t=Qt(t),s==null&&(this.stateful?s=this.states_:s=this.getInitialState(t));let c=Array.isArray(this.cell.stateSize)?this.cell.stateSize.length:1;if(s.length!==c)throw new Y(`RNN Layer has ${c} state(s) but was passed ${s.length} initial state(s).`);this.unroll&&console.warn("Ignoring unroll = true for RNN layer, due to imperative backend.");let l={training:o},p=(T,N)=>{let S=this.cell.call([T].concat(N),l);return[S[0],S.slice(1)]},f=HS(p,t,s,this.goBackwards,r,null,this.unroll,this.returnSequences),m=f[0],y=f[1],b=f[2];this.stateful&&this.resetStates(b,o);let v=this.returnSequences?y:m;return this.returnState?[v].concat(b):v})}getInitialState(t){return rt(()=>{let e=xe(t.shape);return e=zt(e,[1,2]),e=Dp(e),Array.isArray(this.cell.stateSize)?this.cell.stateSize.map(r=>r>1?xv(e,[1,r]):e):this.cell.stateSize>1?[xv(e,[1,this.cell.stateSize])]:[e]})}get trainableWeights(){return this.trainable?this.cell.trainableWeights:[]}get nonTrainableWeights(){return this.trainable?this.cell.nonTrainableWeights:this.cell.weights}setFastWeightInitDuringBuild(t){super.setFastWeightInitDuringBuild(t),this.cell!=null&&this.cell.setFastWeightInitDuringBuild(t)}getConfig(){let t=super.getConfig(),e={returnSequences:this.returnSequences,returnState:this.returnState,goBackwards:this.goBackwards,stateful:this.stateful,unroll:this.unroll};this.numConstants!=null&&(e.numConstants=this.numConstants);let r=this.cell.getConfig();return this.getClassName()===ko.className&&(e.cell={className:this.cell.getClassName(),config:r}),Object.assign({},r,t,e)}static fromConfig(t,e,r={}){let o=e.cell,s=wo(o,r);return new t(Object.assign(e,{cell:s}))}}ko.className="RNN",vt(ko);class Xc extends ye{}class gm extends Xc{constructor(t){super(t);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=t.units,$n(this.units,"units"),this.activation=xi(t.activation==null?this.DEFAULT_ACTIVATION:t.activation),this.useBias=t.useBias==null?!0:t.useBias,this.kernelInitializer=je(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=je(t.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=je(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=Ke(t.kernelRegularizer),this.recurrentRegularizer=Ke(t.recurrentRegularizer),this.biasRegularizer=Ke(t.biasRegularizer),this.kernelConstraint=Tn(t.kernelConstraint),this.recurrentConstraint=Tn(t.recurrentConstraint),this.biasConstraint=Tn(t.biasConstraint),this.dropout=Vc([1,di([0,t.dropout==null?0:t.dropout])]),this.recurrentDropout=Vc([1,di([0,t.recurrentDropout==null?0:t.recurrentDropout])]),this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(t){t=Re(t),this.kernel=this.addWeight("kernel",[t[t.length-1],this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(t,e){return rt(()=>{if(t=t,t.length!==2)throw new Y(`SimpleRNNCell expects 2 input Tensors, got ${t.length}.`);let r=t[1];t=t[0];let o=e.training==null?!1:e.training;0qn(t),rate:this.dropout,training:o})),0qn(r),rate:this.recurrentDropout,training:o}));let s,c=this.dropoutMask,l=this.recurrentDropoutMask;c!=null?s=Uo(nt(t,c),this.kernel.read()):s=Uo(t,this.kernel.read()),this.bias!=null&&(s=qo(s,this.bias.read())),l!=null&&(r=nt(r,l));let p=Tt(s,Uo(r,this.recurrentKernel.read()));return this.activation!=null&&(p=this.activation.apply(p)),[p,p]})}getConfig(){let t=super.getConfig(),e={units:this.units,activation:bi(this.activation),useBias:this.useBias,kernelInitializer:rn(this.kernelInitializer),recurrentInitializer:rn(this.recurrentInitializer),biasInitializer:rn(this.biasInitializer),kernelRegularizer:Pe(this.kernelRegularizer),recurrentRegularizer:Pe(this.recurrentRegularizer),biasRegularizer:Pe(this.biasRegularizer),activityRegularizer:Pe(this.activityRegularizer),kernelConstraint:vn(this.kernelConstraint),recurrentConstraint:vn(this.recurrentConstraint),biasConstraint:vn(this.biasConstraint),dropout:this.dropout,recurrentDropout:this.recurrentDropout};return Object.assign({},t,e)}}gm.className="SimpleRNNCell",vt(gm);class i0 extends ko{constructor(t){t.cell=new gm(t),super(t)}call(t,e){return rt(()=>{this.cell.dropoutMask!=null&&(Xt(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(Xt(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);let r=e==null?null:e.mask,o=e==null?null:e.training,s=e==null?null:e.initialState;return super.call(t,{mask:r,training:o,initialState:s})})}static fromConfig(t,e){return new t(e)}}i0.className="SimpleRNN",vt(i0);class ym extends Xc{constructor(t){super(t);if(this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",t.resetAfter)throw new Y("GRUCell does not support reset_after parameter set to true.");this.units=t.units,$n(this.units,"units"),this.activation=xi(t.activation===void 0?this.DEFAULT_ACTIVATION:t.activation),this.recurrentActivation=xi(t.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:t.recurrentActivation),this.useBias=t.useBias==null?!0:t.useBias,this.kernelInitializer=je(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=je(t.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=je(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelRegularizer=Ke(t.kernelRegularizer),this.recurrentRegularizer=Ke(t.recurrentRegularizer),this.biasRegularizer=Ke(t.biasRegularizer),this.kernelConstraint=Tn(t.kernelConstraint),this.recurrentConstraint=Tn(t.recurrentConstraint),this.biasConstraint=Tn(t.biasConstraint),this.dropout=Vc([1,di([0,t.dropout==null?0:t.dropout])]),this.recurrentDropout=Vc([1,di([0,t.recurrentDropout==null?0:t.recurrentDropout])]),this.implementation=t.implementation,this.stateSize=this.units,this.dropoutMask=null,this.recurrentDropoutMask=null}build(t){t=Re(t);let e=t[t.length-1];this.kernel=this.addWeight("kernel",[e,this.units*3],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*3],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias?this.bias=this.addWeight("bias",[this.units*3],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint):this.bias=null,this.built=!0}call(t,e){return rt(()=>{if(t=t,t.length!==2)throw new Y(`GRUCell expects 2 input Tensors (inputs, h, c), got ${t.length}.`);let r=e.training==null?!1:e.training,o=t[1];t=t[0],0qn(t),rate:this.dropout,training:r,count:3})),0qn(o),rate:this.recurrentDropout,training:r,count:3}));let s=this.dropoutMask,c=this.recurrentDropoutMask,l,p,f;0{this.cell.dropoutMask!=null&&(Xt(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(Xt(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);let r=e==null?null:e.mask,o=e==null?null:e.training,s=e==null?null:e.initialState;return super.call(t,{mask:r,training:o,initialState:s})})}static fromConfig(t,e){return e.implmentation===0&&(e.implementation=1),new t(e)}}a0.className="GRU",vt(a0);class Wp extends Xc{constructor(t){super(t);this.DEFAULT_ACTIVATION="tanh",this.DEFAULT_RECURRENT_ACTIVATION="hardSigmoid",this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_RECURRENT_INITIALIZER="orthogonal",this.DEFAULT_BIAS_INITIALIZER="zeros",this.units=t.units,$n(this.units,"units"),this.activation=xi(t.activation===void 0?this.DEFAULT_ACTIVATION:t.activation),this.recurrentActivation=xi(t.recurrentActivation===void 0?this.DEFAULT_RECURRENT_ACTIVATION:t.recurrentActivation),this.useBias=t.useBias==null?!0:t.useBias,this.kernelInitializer=je(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.recurrentInitializer=je(t.recurrentInitializer||this.DEFAULT_RECURRENT_INITIALIZER),this.biasInitializer=je(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.unitForgetBias=t.unitForgetBias,this.kernelRegularizer=Ke(t.kernelRegularizer),this.recurrentRegularizer=Ke(t.recurrentRegularizer),this.biasRegularizer=Ke(t.biasRegularizer),this.kernelConstraint=Tn(t.kernelConstraint),this.recurrentConstraint=Tn(t.recurrentConstraint),this.biasConstraint=Tn(t.biasConstraint),this.dropout=Vc([1,di([0,t.dropout==null?0:t.dropout])]),this.recurrentDropout=Vc([1,di([0,t.recurrentDropout==null?0:t.recurrentDropout])]),this.implementation=t.implementation,this.stateSize=[this.units,this.units],this.dropoutMask=null,this.recurrentDropoutMask=null}build(t){var e;t=Re(t);let r=t[t.length-1];this.kernel=this.addWeight("kernel",[r,this.units*4],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.recurrentKernel=this.addWeight("recurrent_kernel",[this.units,this.units*4],null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint);let o;if(this.useBias){if(this.unitForgetBias){let s=this.biasInitializer,c=this.units;o=new(e=class extends Qr{apply(p,f){let m=s.apply([c]),y=new jd().apply([c]),b=s.apply([c*2]);return VC(VC(m,y),b)}},e.className="CustomInit",e)}else o=this.biasInitializer;this.bias=this.addWeight("bias",[this.units*4],null,o,this.biasRegularizer,!0,this.biasConstraint)}else this.bias=null;this.built=!0}call(t,e){return rt(()=>{let r=e.training==null?!1:e.training;if(t=t,t.length!==3)throw new Y(`LSTMCell expects 3 input Tensors (inputs, h, c), got ${t.length}.`);let o=t[1],s=t[2];t=t[0],0qn(t),rate:this.dropout,training:r,count:4})),0qn(o),rate:this.recurrentDropout,training:r,count:4}));let c=this.dropoutMask,l=this.recurrentDropoutMask,p,f,m,y;0{this.cell.dropoutMask!=null&&(Xt(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(Xt(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null);let r=e==null?null:e.mask,o=e==null?null:e.training,s=e==null?null:e.initialState;return super.call(t,{mask:r,training:o,initialState:s})})}static fromConfig(t,e){return e.implmentation===0&&(e.implementation=1),new t(e)}}c0.className="LSTM",vt(c0);class bm extends Xc{constructor(t){super(t);this.cells=t.cells}get stateSize(){let t=[];for(let e of this.cells.slice().reverse())Array.isArray(e.stateSize)?t.push(...e.stateSize):t.push(e.stateSize);return t}call(t,e){return rt(()=>{t=t;let r=t.slice(1),o=[];for(let l of this.cells.slice().reverse())Array.isArray(l.stateSize)?o.push(r.splice(0,l.stateSize.length)):o.push(r.splice(0,1));o.reverse();let s=[],c;for(let l=0;l{fa(`RNNCell_${o}`,()=>{r.build(t),Array.isArray(r.stateSize)?e=r.stateSize[0]:e=r.stateSize,t=[t[0],e]})}),this.built=!0}getConfig(){let t=super.getConfig(),e=s=>({className:s.getClassName(),config:s.getConfig()}),r=this.cells.map(e),o={cells:r};return Object.assign({},t,o)}static fromConfig(t,e,r={}){let o=[];for(let s of e.cells)o.push(wo(s,r));return new t({cells:o})}get trainableWeights(){if(!this.trainable)return[];let t=[];for(let e of this.cells)t.push(...e.trainableWeights);return t}get nonTrainableWeights(){let t=[];for(let e of this.cells)t.push(...e.nonTrainableWeights);if(!this.trainable){let e=[];for(let r of this.cells)e.push(...r.trainableWeights);return e.concat(t)}return t}getWeights(){let t=[];for(let e of this.cells)t.push(...e.weights);return Iv(t)}setWeights(t){let e=[];for(let r of this.cells){let o=r.weights.length,s=t.splice(o);for(let c=0;cUC(t(),e),c=()=>Fp(s,t,r);if(!o||o<=1)return Sn(c().clone());let l=Array(o).fill(void 0).map(c);return l.map(p=>Sn(p.clone()))}var vU=function(n,t){var e={};for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&t.indexOf(r)<0&&(e[r]=n[r]);if(n!=null&&typeof Object.getOwnPropertySymbols=="function")for(var o=0,r=Object.getOwnPropertySymbols(n);o{if(this.cell.dropoutMask!=null&&(Xt(this.cell.dropoutMask),this.cell.dropoutMask=null),this.cell.recurrentDropoutMask!=null&&(Xt(this.cell.recurrentDropoutMask),this.cell.recurrentDropoutMask=null),e&&e.constants)throw new Y("ConvRNN2D cell does not support constants");let r=e==null?null:e.mask,o=e==null?null:e.training,s=e==null?null:e.initialState;return super.call(t,{mask:r,training:o,initialState:s})})}computeOutputShape(t){let e=this.computeSingleOutputShape(t);return this.returnSequences||(e=[e[0],...e.slice(2)]),this.returnState&&(e=[e,...Array(2).fill([t[0],...e.slice(-3)])]),e}getInitialState(t){return rt(()=>{let{stateSize:e}=this.cell,r=t.shape,o=this.computeSingleOutputShape(r),s=[o[0],...o.slice(2)],c=xe(s);return Array.isArray(e)?Array(e.length).fill(c):[c]})}resetStates(t,e=!1){rt(()=>{if(!this.stateful)throw new Go("Cannot call resetStates() on an RNN Layer that is not stateful.");let r=this.inputSpec[0].shape,o=this.computeSingleOutputShape(r),s=[o[0],...o.slice(2)],c=r[0];if(c==null)throw new Y("If an RNN is stateful, it needs to know its batch size. Specify the batch size of your input tensors: \n- If using a Sequential model, specify the batch size by passing a `batchInputShape` option to your first layer.\n- If using the functional API, specify the batch size by passing a `batchShape` option to your Input layer.");if(this.getStates()==null)Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>xe(s)):this.states_=[xe(s)];else if(t==null)Xt(this.states_),this.keptStates!=null&&(Xt(this.keptStates),this.keptStates=[]),Array.isArray(this.cell.stateSize)?this.states_=this.cell.stateSize.map(()=>xe(s)):this.states_[0]=xe(s);else{if(Array.isArray(t)||(t=[t]),t.length!==this.states_.length)throw new Y(`Layer ${this.name} expects ${this.states_.length} state(s), but it received ${t.length} state value(s). Input received: ${t}`);e?this.keptStates.push(this.states_.slice()):Xt(this.states_);for(let l=0;lSn(l.clone()))})}computeSingleOutputShape(t){let{dataFormat:e,filters:r,kernelSize:o,padding:s,strides:c,dilationRate:l}=this.cell,p=e==="channelsFirst",f=t[p?3:2],m=t[p?4:3],y=To(f,o[0],s,c[0],l[0]),b=To(m,o[1],s,c[1],l[1]),v=[...t.slice(0,2),...p?[r,y,b]:[y,b,r]];return v}}jS.className="ConvRNN2D";class xm extends Wp{constructor(t){let{filters:e,kernelSize:r,strides:o,padding:s,dataFormat:c,dilationRate:l}=t;super(Object.assign({},t,{units:e}));this.filters=e,$n(this.filters,"filters"),this.kernelSize=Hc(r,2,"kernelSize"),this.kernelSize.forEach(p=>$n(p,"kernelSize")),this.strides=Hc(o||1,2,"strides"),this.strides.forEach(p=>$n(p,"strides")),this.padding=s||"valid",Or(this.padding),this.dataFormat=c||"channelsLast",nn(this.dataFormat),this.dilationRate=Hc(l||1,2,"dilationRate"),this.dilationRate.forEach(p=>$n(p,"dilationRate"))}build(t){var e;t=Re(t);let r=this.dataFormat==="channelsFirst"?1:t.length-1;if(t[r]==null)throw new Y(`The channel dimension of the input should be defined. Found ${t[r]}`);let o=t[r],s=4,c=this.kernelSize.concat([o,this.filters*s]);this.kernel=this.addWeight("kernel",c,null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint);let l=this.kernelSize.concat([this.filters,this.filters*s]);if(this.recurrentKernel=this.addWeight("recurrent_kernel",l,null,this.recurrentInitializer,this.recurrentRegularizer,!0,this.recurrentConstraint),this.useBias){let p;if(this.unitForgetBias){let f=this.biasInitializer,m=this.filters;p=new(e=class extends Qr{apply(b,v){let T=f.apply([m]),N=ho([m]),S=f.apply([m*2]);return bv([T,N,S])}},e.className="CustomInit",e)}else p=this.biasInitializer;this.bias=this.addWeight("bias",[this.filters*s],null,p,this.biasRegularizer,!0,this.biasConstraint)}this.built=!0}call(t,e){return rt(()=>{if(t.length!==3)throw new Y(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ${t.length}.`);let r=e.training||!1,o=t[0],s=t[1],c=t[2],l=4;0qn(o),rate:this.dropout,training:r,count:l}));let p=this.dropoutMask,f=(kt,Nt,At)=>!Nt||!Nt[At]?kt:nt(Nt[At],kt),m=f(o,p,0),y=f(o,p,1),b=f(o,p,2),v=f(o,p,3);0qn(s),rate:this.recurrentDropout,training:r,count:l}));let T=this.recurrentDropoutMask,N=f(s,T,0),S=f(s,T,1),D=f(s,T,2),I=f(s,T,3),P=3,[E,L,B,q]=Tr(this.kernel.read(),l,P),[H,Z,J,it]=this.useBias?Tr(this.bias.read(),l):[null,null,null,null];m=this.inputConv(m,E,H,this.padding),y=this.inputConv(y,L,Z,this.padding),b=this.inputConv(b,B,J,this.padding),v=this.inputConv(v,q,it,this.padding);let[pt,ht,dt,ft]=Tr(this.recurrentKernel.read(),l,P);N=this.recurrentConv(N,pt),S=this.recurrentConv(S,ht),D=this.recurrentConv(D,dt),I=this.recurrentConv(I,ft);let ut=this.recurrentActivation.apply(Tt(m,N)),bt=this.recurrentActivation.apply(Tt(y,S)),yt=Tt(nt(bt,c),nt(ut,this.activation.apply(Tt(b,D)))),xt=nt(this.recurrentActivation.apply(Tt(v,I)),this.activation.apply(yt));return[xt,xt,yt]})}getConfig(){let t=super.getConfig(),{units:e}=t,r=vU(t,["units"]),o={filters:this.filters,kernelSize:this.kernelSize,padding:this.padding,dataFormat:this.dataFormat,dilationRate:this.dilationRate,strides:this.strides};return Object.assign({},r,o)}inputConv(t,e,r,o){let s=fs(t,e,this.strides,o||"valid",this.dataFormat==="channelsFirst"?"NCHW":"NHWC",this.dilationRate);return r?qo(s,r,this.dataFormat):s}recurrentConv(t,e){let r=1;return fs(t,e,r,"same",this.dataFormat==="channelsFirst"?"NCHW":"NHWC")}}xm.className="ConvLSTM2DCell",vt(xm);class l0 extends jS{constructor(t){let e=new xm(t);super(Object.assign({},t,{cell:e}))}static fromConfig(t,e){return new t(e)}}l0.className="ConvLSTM2D",vt(l0);class wm extends ye{constructor(t){super(t);this.rate=Math.max(Math.min(t.rate,1),0),this.noiseShape=t.noiseShape,this.seed=t.seed,this.supportsMasking=!0}getNoiseShape(t){if(this.noiseShape==null)return this.noiseShape;let e=t.shape,r=[];for(let o=0;o{this.invokeCallHook(t,e);let r=Qt(t);if(0UC(r,this.rate,s,this.seed),()=>r,o);return c}return t})}getConfig(){let t={rate:this.rate,noiseShape:this.noiseShape,seed:this.seed},e=super.getConfig();return Object.assign(t,e),t}dispose(){return super.dispose()}}wm.className="Dropout",vt(wm);class u0 extends wm{constructor(t){super(t);this.inputSpec=[{ndim:3}]}getNoiseShape(t){let e=t.shape;return[e[0],1,e[2]]}}u0.className="SpatialDropout1D",vt(u0);class p0 extends ye{constructor(t){super(t);if(this.activation=null,this.useBias=!0,this.kernel=null,this.bias=null,this.DEFAULT_KERNEL_INITIALIZER="glorotNormal",this.DEFAULT_BIAS_INITIALIZER="zeros",t.batchInputShape==null&&t.inputShape==null&&t.inputDim!=null){let e=null;t.batchSize!=null&&(e=t.batchSize),this.batchInputShape=[e,t.inputDim]}this.units=t.units,$n(this.units,"units"),this.activation=xi(t.activation),t.useBias!=null&&(this.useBias=t.useBias),this.kernelInitializer=je(t.kernelInitializer||this.DEFAULT_KERNEL_INITIALIZER),this.biasInitializer=je(t.biasInitializer||this.DEFAULT_BIAS_INITIALIZER),this.kernelConstraint=Tn(t.kernelConstraint),this.biasConstraint=Tn(t.biasConstraint),this.kernelRegularizer=Ke(t.kernelRegularizer),this.biasRegularizer=Ke(t.biasRegularizer),this.activityRegularizer=Ke(t.activityRegularizer),this.supportsMasking=!0,this.inputSpec=[{minNDim:2}]}build(t){t=Re(t);let e=t[t.length-1];this.kernel==null&&(this.kernel=this.addWeight("kernel",[e,this.units],null,this.kernelInitializer,this.kernelRegularizer,!0,this.kernelConstraint),this.useBias&&(this.bias=this.addWeight("bias",[this.units],null,this.biasInitializer,this.biasRegularizer,!0,this.biasConstraint))),this.inputSpec=[{minNDim:2,axes:{[-1]:e}}],this.built=!0}computeOutputShape(t){t=Re(t);let e=t.slice();return e[e.length-1]=this.units,e}call(t,e){return rt(()=>{this.invokeCallHook(t,e);let r=Qt(t),o=FC(this.activation.getClassName()),s;return o!=null?s=Uo(r,this.kernel.read(),o,this.bias?this.bias.read():null):(s=Uo(r,this.kernel.read()),this.bias!=null&&(s=qo(s,this.bias.read())),this.activation!=null&&(s=this.activation.apply(s))),s})}getConfig(){let t={units:this.units,activation:bi(this.activation),useBias:this.useBias,kernelInitializer:rn(this.kernelInitializer),biasInitializer:rn(this.biasInitializer),kernelRegularizer:Pe(this.kernelRegularizer),biasRegularizer:Pe(this.biasRegularizer),activityRegularizer:Pe(this.activityRegularizer),kernelConstraint:vn(this.kernelConstraint),biasConstraint:vn(this.biasConstraint)},e=super.getConfig();return Object.assign(t,e),t}}p0.className="Dense",vt(p0);class h0 extends ye{constructor(t){t=t||{},super(t),this.inputSpec=[{minNDim:3}],this.dataFormat=t.dataFormat}computeOutputShape(t){t=Re(t);for(let e of t.slice(1))if(e==null)throw new Y(`The shape of the input to "Flatten" is not fully defined (got ${t.slice(1)}). Make sure to pass a complete "input_shape" or "batch_input_shape" argument to the first layer in your model.`);return[t[0],fi(t,1)]}call(t,e){return rt(()=>{this.invokeCallHook(t,e);let r=Qt(t);if(this.dataFormat==="channelsFirst"&&r.rank>1){let o=[0];for(let s=2;s{this.invokeCallHook(t,e);let r=Qt(t);return this.activation.apply(r)})}getConfig(){let t={activation:bi(this.activation)},e=super.getConfig();return Object.assign(t,e),t}}f0.className="Activation",vt(f0);class d0 extends ye{constructor(t){super(t);this.n=t.n,this.inputSpec=[{ndim:2}]}computeOutputShape(t){return[t[0],this.n,t[1]]}call(t,e){return rt(()=>(t=Qt(t),VV(t,this.n)))}getConfig(){let t={n:this.n},e=super.getConfig();return Object.assign(t,e),t}}d0.className="RepeatVector",vt(d0);class m0 extends ye{constructor(t){super(t);this.targetShape=t.targetShape;for(let e=0;e{this.invokeCallHook(t,e);let r=Qt(t),o=r.shape,s=o.slice(0,1).concat(this.fixUnknownDimension(o.slice(1),this.targetShape));return r.reshape(s)})}getConfig(){let t={targetShape:this.targetShape},e=super.getConfig();return Object.assign(t,e),t}}m0.className="Reshape",vt(m0);class g0 extends ye{constructor(t){super(t);if(t.dims==null)throw new Error("Required configuration field `dims` is missing during Permute constructor call.");if(!Array.isArray(t.dims))throw new Error(`Permute constructor requires \`dims\` to be an Array, but received ${t.dims} instead.`);let e=yo(1,t.dims.length+1);if(!lt(t.dims.slice().sort(),e))throw new Error("Invalid permutation `dims`: "+JSON.stringify(t.dims)+" `dims` must contain consecutive integers starting from 1.");this.dims=t.dims,this.dimsIncludingBatch=[0].concat(this.dims),this.inputSpec=[new In({ndim:this.dims.length+1})]}computeOutputShape(t){t=Re(t);let e=t.slice();return this.dims.forEach((r,o)=>{e[o+1]=t[r]}),e}call(t,e){return Kt(Qt(t),this.dimsIncludingBatch)}getConfig(){let t={dims:this.dims},e=super.getConfig();return Object.assign(t,e),t}}g0.className="Permute",vt(g0);class y0 extends ye{constructor(t){super(t==null?{}:t);this.supportsMasking=!0,t!=null?this.maskValue=t.maskValue==null?0:t.maskValue:this.maskValue=0}computeOutputShape(t){return t}getConfig(){let t=super.getConfig(),e={maskValue:this.maskValue};return Object.assign(e,t),e}computeMask(t,e){let r=Qt(t),o=-1;return Zu(ci(r,this.maskValue),o)}call(t,e){return rt(()=>{this.invokeCallHook(t,e);let r=Qt(t),o=-1,s=!0,c=Zu(ci(r,this.maskValue),o,s),l=r.mul(c.asType(r.dtype));return l})}}y0.className="Masking",vt(y0);class b0 extends ye{constructor(t){super(t);if(this.embeddings=null,this.DEFAULT_EMBEDDINGS_INITIALIZER="randomUniform",t.batchInputShape==null&&t.inputShape==null){let e=null;t.batchSize!=null&&(e=t.batchSize),t.inputLength==null?this.batchInputShape=[e,null]:this.batchInputShape=[e].concat(ze(t.inputLength))}this.inputDim=t.inputDim,$n(this.inputDim,"inputDim"),this.outputDim=t.outputDim,$n(this.outputDim,"outputDim"),this.embeddingsInitializer=je(t.embeddingsInitializer||this.DEFAULT_EMBEDDINGS_INITIALIZER),this.embeddingsRegularizer=Ke(t.embeddingsRegularizer),this.activityRegularizer=Ke(t.activityRegularizer),this.embeddingsConstraint=Tn(t.embeddingsConstraint),this.maskZero=t.maskZero,this.supportsMasking=t.maskZero,this.inputLength=t.inputLength}build(t){this.embeddings=this.addWeight("embeddings",[this.inputDim,this.outputDim],this.dtype,this.embeddingsInitializer,this.embeddingsRegularizer,!0,this.embeddingsConstraint),this.built=!0}warnOnIncompatibleInputShape(t){}computeMask(t,e){return rt(()=>this.maskZero?(t=Qt(t),ci(t,re(t))):null)}computeOutputShape(t){if(t=Re(t),this.inputLength==null)return[...t,this.outputDim];let e=ze(this.inputLength);if(e.length!==t.length-1)throw new Y(`"inputLength" is ${this.inputLength}, but received input shape has shape ${t}`);{let r=0;for(let o=0;o{this.invokeCallHook(t,e);let r=Qt(t);r.dtype!=="int32"&&(r=Ep(r,"int32"));let o=GC(this.embeddings.read(),r.as1D());return o.reshape(Re(this.computeOutputShape(r.shape)))})}getConfig(){let t={inputDim:this.inputDim,outputDim:this.outputDim,embeddingsInitializer:rn(this.embeddingsInitializer),embeddingsRegularizer:Pe(this.embeddingsRegularizer),activityRegularizer:Pe(this.activityRegularizer),embeddingsConstraint:vn(this.embeddingsConstraint),maskZero:this.maskZero,inputLength:this.inputLength},e=super.getConfig();return Object.assign(t,e),t}}b0.className="Embedding",vt(b0);class ya extends ye{constructor(t){super(t||{});this.supportsMasking=!0}mergeFunction(t){throw new Ut}computeElementwiseOpOutputShape(t,e){if(t==null||e==null)return null;if(t.length1)throw new Y(`Can not merge tensors with different batch sizes. Got tensors with shapes: ${JSON.stringify(t)}.`);let r=t[0]==null?null:t[0].slice(1);for(let s=1;ss.length);t.indexOf(null)===-1&&hi(o).length===1?this.reshapeRequired=!1:this.reshapeRequired=!0}call(t,e){return rt(()=>{if(t=t,this.reshapeRequired){let r=[],o=t.map(s=>s.rank);if(o.indexOf(null)===-1){let s=di(o);for(let c of t){let l=c.rank;for(let p=0;p1){let m=yo(1,f).concat([0]);r.push(Kt(p,m)),s=!0}else r.push(p)}let c=this.mergeFunction(r),l=c.rank;if(s){if(l==null){let p=c.shape,f=p.length,m=p[f-1],y=[m].concat(p.slice(0,p.length-1));c=Kt(c.reshape([-1,m]),[1,0]).reshape(y)}else if(l>1){let p=[l-1].concat(yo(0,l-1));c=Kt(c,p)}}return c}}else return this.mergeFunction(t)})}computeOutputShape(t){t=t;let e;t[0]==null?e=null:e=t[0].slice(1);for(let o=1;o{if(e==null)return null;if(!Array.isArray(e))throw new Y("`mask` should be an Array");if(!Array.isArray(t))throw new Y("`inputs` should be an Array");if(e.length!==t.length)throw new Y(`The Array 'inputs' and 'mask' are expected to have the same length, but have different lengths (${t.length} vs ${e.length})`);if(e.every(o=>o==null))return null;e=e.map(o=>o==null?o:cr(o,0));let r=e[0];for(let o=1;o{let e=t[0].clone();for(let r=1;r{let e=t[0].clone();for(let r=1;r{let e=t[0].clone();for(let r=1;r{let e=t[0];for(let r=1;r{let e=t[0];for(let r=1;r1)throw new Y("A `Concatenate` layer requires inputs with matching shapes except for the concat axis. Got input shapes: "+JSON.stringify(t))}mergeFunction(t){return rt(()=>bv(t,this.axis))}computeOutputShape(t){if(!(Array.isArray(t)&&Array.isArray(t[0])))throw new Y("A `Concatenate` layer should be called on a list of inputs.");let e=t,r=e[0].slice(),o=this.axis<0?r.length+this.axis:this.axis;for(let s of e.slice(1)){if(r[o]==null||s[o]==null){r[o]=null;break}r[o]+=s[o]}return r}computeMask(t,e){if(e==null)return null;if(!Array.isArray(e))throw new Y("`mask` should be an array for Concatenate");if(!Array.isArray(t))throw new Y("`inputs` should be an array for Concatenate");if(e.length!==t.length)throw new Y(`Mismatch in the length of mask (${e.length}) and the legnth of inputs (${t.length})`);return rt(()=>{let r=!0;if(e.forEach(c=>{if(c!=null){r=!1;return}}),r)return null;let o=[];for(let c=0;c3||t.shape.length>3)throw new Ut("batchDot is not implemented for tensors of 4D or higher rank yet");if(_(n.shape.length>=2,()=>`batchDot requires the rank of x to be >= 2, but got ${n.shape.length}`),_(n.shape.length>=2,()=>`batchDot requires the rank of y to be >= 2, but got ${t.shape.length}`),typeof e=="number"&&(e=[e,e]),n.dtype==="complex64"||t.dtype==="complex64")throw new Ut("batchDot is not implemented for complex64-type Tensors yet.");let r=n.shape.length,o=t.shape.length;e==null&&(e=[r-1,o-2]);let s=e;return rt(()=>{let c;if(r>o){c=r-o;let p=[];for(let f=0;fr){c=o-r;let p=[];for(let f=0;f0){let p;r>o?p=r+o-3:p=r-1;let f=[];for(let m=p;m"A `Dot` layer should be called on a list of exactly 2 inputs.");let e=t[0],r=t[1];if(e.length>3||r.length>3)throw new Ut("Dot layer does not support tensors of 4D or higher rank yet.");let o=this.interpretAxes(e,r);if(e[o[0]]!==r[o[1]])throw new Y(`Dimension incompatibility: ${e[o[0]]} !== ${r[o[1]]}`)}mergeFunction(t){if(t.length!==2)throw new Y(`A \`Dot\` layer must be called on exactly 2 inputs, but received ${t.length} input(s).`);let e=t[0],r=t[1],o;return Array.isArray(this.axes)?o=this.axes.map((s,c)=>Kp(s,t[c].shape.length)):o=[Kp(this.axes,e.shape.length),Kp(this.axes,r.shape.length)],this.normalize&&(e=sm(e,o[0]),r=sm(r,o[1])),TU(e,r,o)}interpretAxes(t,e){let r;return Array.isArray(this.axes)?r=this.axes:r=[Kp(this.axes,t.length),Kp(this.axes,e.length)],r}computeOutputShape(t){_(Array.isArray(t)&&t.length===2&&Array.isArray(t[0])&&Array.isArray(t[1]),()=>"A `Dot` layer should be called on a list of exactly 2 inputs.");let e=t[0].slice(),r=t[1].slice();if(e.length>3||r.length>3)throw new Ut("Dot layer does not support tensors of 4D or higher rank yet.");let o=this.interpretAxes(e,r);e.splice(o[0],1),r.splice(o[1],1),r.splice(0,1);let s=e.concat(r);return s.length===1&&s.push(1),s}computeMask(t,e){return null}getConfig(){let t={axes:this.axes,normalize:this.normalize},e=super.getConfig();return Object.assign(t,e),t}}x0.className="Dot",vt(x0);class w0 extends ye{constructor(t){super(t);this.supportsMasking=!0,this.stddev=t.stddev}computeOutputShape(t){return t}getConfig(){let t=super.getConfig(),e={stddev:this.stddev};return Object.assign(e,t),e}call(t,e){return rt(()=>{this.invokeCallHook(t,e);let r=Qt(t),o=()=>Hd(r.shape,0,this.stddev).add(r),s=Fp(o,()=>r,e.training||!1);return s})}}w0.className="GaussianNoise",vt(w0);class v0 extends ye{constructor(t){super(t);this.supportsMasking=!0,this.rate=t.rate}computeOutputShape(t){return t}getConfig(){let t=super.getConfig(),e={rate:this.rate};return Object.assign(e,t),e}call(t,e){return rt(()=>{this.invokeCallHook(t,e);let r=Qt(t);if(this.rate>0&&this.rate<1){let o=()=>{let s=Math.sqrt(this.rate/(1-this.rate));return r.mul(Hd(r.shape,1,s))};return Fp(o,()=>r,e.training||!1)}return r})}}v0.className="GaussianDropout",vt(v0);class T0 extends ye{constructor(t){super(t);this.supportsMasking=!0,this.rate=t.rate,this.noiseShape=t.noiseShape}_getNoiseShape(t){return this.noiseShape||Qt(t).shape}computeOutputShape(t){return t}getConfig(){let t=super.getConfig(),e={rate:this.rate};return Object.assign(e,t),e}call(t,e){return rt(()=>{if(this.rate<1&&this.rate>0){let r=this._getNoiseShape(t),o=()=>{let s=Qt(t),c=1.6732632423543772,l=1.0507009873554805,p=-c*l,f=ds(aa(r),this.rate);f=Ep(f,"float32");let m=((1-this.rate)*(1+this.rate*p**2))**-.5,y=-m*p*this.rate,b=s.mul(f).add(f.add(-1).mul(p));return b.mul(m).add(y)};return Fp(o,()=>Qt(t),e.training||!1)}return t})}}T0.className="AlphaDropout",vt(T0);function Xp(n,t,e,r,o,s=.001){let c;if(n.rank===2)c=b_(n,t,e,r,o,s);else if(n.rank===3)c=x_(n,t,e,r,o,s);else if(n.rank===4)c=w_(n,t,e,r,o,s);else throw new Ut(`batchNormalization is not implemented for array of rank ${n.rank} yet`);return c}function kU(n,t,e,r,o=.001){return rt(()=>{let s=xd(n,r),c=s.mean,l=s.variance,p=Xp(n,c,l,e,t,o);return[p,c,l]})}function NU(n,t,e,r,o=.001){return rt(()=>{let s=xd(n,r),c=s.mean,l=s.variance,p=[];for(let T of yo(0,n.rank))r.indexOf(T)!==-1?p.push(1):p.push(n.shape[T]);let f=c.reshape(p),m=l.reshape(p),y=t==null?null:t.reshape(p),b=e==null?null:e.reshape(p),v=Xp(n,f,m,b,y,o);return[v,c,l]})}function _U(n,t,e,r,o=.001){return lt(r.slice().sort(),yo(0,n.rank-1))?kU(n,t,e,r,o):NU(n,t,e,r,o)}class k0 extends ye{constructor(t){t==null&&(t={}),super(t),this.supportsMasking=!0,this.axis=t.axis==null?-1:t.axis,this.momentum=t.momentum==null?.99:t.momentum,this.epsilon=t.epsilon==null?.001:t.epsilon,this.center=t.center==null?!0:t.center,this.scale=t.scale==null?!0:t.scale,this.betaInitializer=je(t.betaInitializer||"zeros"),this.gammaInitializer=je(t.gammaInitializer||"ones"),this.movingMeanInitializer=je(t.movingMeanInitializer||"zeros"),this.movingVarianceInitializer=je(t.movingVarianceInitializer||"ones"),this.betaConstraint=Tn(t.betaConstraint),this.gammaConstraint=Tn(t.gammaConstraint),this.betaRegularizer=Ke(t.betaRegularizer),this.gammaRegularizer=Ke(t.gammaRegularizer)}build(t){t=Re(t);let e=this.axis>=0?this.axis:this.axis+t.length,r=t[e];if(r==null)throw new Y(`Axis ${e} of input tensor should have a defined dimension but the layer received an input with shape ${JSON.stringify(t)}.`);this.inputSpec=[new In({ndim:t.length,axes:{[e]:r}})];let o=[r];this.scale&&(this.gamma=this.addWeight("gamma",o,null,this.gammaInitializer,this.gammaRegularizer,!0,this.gammaConstraint)),this.center&&(this.beta=this.addWeight("beta",o,null,this.betaInitializer,this.betaRegularizer,!0,this.betaConstraint)),this.movingMean=this.addWeight("moving_mean",o,null,this.movingMeanInitializer,null,!1),this.movingVariance=this.addWeight("moving_variance",o,null,this.movingVarianceInitializer,null,!1),this.built=!0}call(t,e){return rt(()=>{let r=e.training==null?!1:e.training,o=Qt(t),s=o.shape,c=s.length,l=yo(0,c),p=this.axis>=0?this.axis:this.axis+c;l.splice(p,1);let f=pa(1,c);f[p]=s[p];let m=l.slice();m.sort();let y=!lt(m,yo(0,c).slice(0,c-1)),b=()=>{if(y){let I=this.movingMean.read().reshape(f),P=this.movingVariance.read().reshape(f),E=this.center?this.beta.read().reshape(f):null,L=this.scale?this.gamma.read().reshape(f):null;return Xp(o,I,P,E,L,this.epsilon)}else return Xp(o,this.movingMean.read(),this.movingVariance.read(),this.beta==null?null:this.beta.read(),this.gamma==null?null:this.gamma.read(),this.epsilon)};if(!r)return b();let[v,T,N]=_U(o,this.gamma.read(),this.beta.read(),l,this.epsilon),S=(I,P,E)=>{rt(()=>{let L=1-E,B=I.read(),q=B.sub(P).mul(L);I.write(B.sub(q))})},D=()=>{S(this.movingMean,T,this.momentum),S(this.movingVariance,N,this.momentum)};return D(),v})}getConfig(){let t={axis:this.axis,momentum:this.momentum,epsilon:this.epsilon,center:this.center,scale:this.scale,betaInitializer:rn(this.betaInitializer),gammaInitializer:rn(this.gammaInitializer),movingMeanInitializer:rn(this.movingMeanInitializer),movingVarianceInitializer:rn(this.movingVarianceInitializer),betaRegularizer:Pe(this.betaRegularizer),gammaRegularizer:Pe(this.gammaRegularizer),betaConstraint:vn(this.betaConstraint),gammaConstraint:vn(this.gammaConstraint)},e=super.getConfig();return Object.assign(t,e),t}}k0.className="BatchNormalization",vt(k0);class N0 extends ye{constructor(t){if(t==null&&(t={}),super(t),this.axis=t.axis==null?-1:t.axis,typeof this.axis=="number"){if(!Number.isInteger(this.axis))throw new Error(`Expected axis to be an integer, but received ${this.axis}`)}else if(Array.isArray(this.axis)){for(let e of this.axis)if(!Number.isInteger(e))throw new Error(`Expected axis to be an array of integers, but received ${JSON.stringify(this.axis)}`)}else throw new Error(`Expected axis to be an integer or an array of integers, but received ${JSON.stringify(this.axis)}`);this.epsilon=t.epsilon==null?.001:t.epsilon,this.center=t.center==null?!0:t.center,this.scale=t.scale==null?!0:t.scale,this.betaInitializer=je(t.betaInitializer||"zeros"),this.gammaInitializer=je(t.gammaInitializer||"ones"),this.betaRegularizer=Ke(t.betaRegularizer),this.gammaRegularizer=Ke(t.gammaRegularizer),this.supportsMasking=!0}build(t){t=Re(t);let e=t.length;typeof this.axis=="number"&&(this.axis=[this.axis]);for(let s=0;s=e)throw new Error(`Invalid axis: ${s}`);if(this.axis.length!==hi(this.axis).length)throw new Error(`Found duplicate axes in: ${this.axis}`);let r=this.axis.map(s=>t[s]),o=!0;this.scale?this.gamma=this.addWeight("gamma",r,"float32",this.gammaInitializer,this.gammaRegularizer,o):this.gamma=null,this.center?this.beta=this.addWeight("beta",r,"float32",this.betaInitializer,this.betaRegularizer,o):this.beta=null,this.built=!0}call(t,e){let r=Qt(t),o=r.shape,s=o.length;return rt(()=>{let c=!0,{mean:l,variance:p}=xd(r,this.axis,c),f=pa(1,s);for(let N of this.axis)f[N]=o[N];let m=N=>N!=null&&N.shape.length!==s&&this.axis!==[s-1]?N.reshape(f):N,y=m(this.gamma.read()),b=m(this.beta.read()),v=[],T=[];for(let N=0;N{if(n.rank!==3)throw new Y(`temporalPadding expects input tensor to be 3-D, but received a ${n.rank}-D tensor.`);if(t==null&&(t=[1,1]),t.length!==2)throw new Y(`temporalPadding expects input padding pattern to be a length-2 array, but received a length-${t.length} array.`);let e=[[0,0],t,[0,0]];return Wo(n,e)})}function CU(n,t,e){return rt(()=>{if(n.rank!==4)throw new Y(`temporalPadding expects input tensor to be 4-D, but received a ${n.rank}-D tensor.`);if(t==null&&(t=[[1,1],[1,1]]),t.length!==2||t[0].length!==2||t[1].length!==2)throw new Y("spatial2dPadding expects `padding` to be an Array of two Arrays, each of which is an Array of two integers.");if(e==null&&(e=go()),e!=="channelsLast"&&e!=="channelsFirst")throw new Y(`Unknown data format: ${e}. Supported data formats are 'channelsLast' and 'channelsFirst.`);let r;return e==="channelsFirst"?r=[[0,0],[0,0],t[0],t[1]]:r=[[0,0],t[0],t[1],[0,0]],Wo(n,r)})}class _0 extends ye{constructor(t){if(t==null&&(t={}),super(t),this.dataFormat=t.dataFormat==null?go():t.dataFormat,t.padding==null)this.padding=[[1,1],[1,1]];else if(typeof t.padding=="number")this.padding=[[t.padding,t.padding],[t.padding,t.padding]];else{if(t.padding=t.padding,t.padding.length!==2)throw new Y(`ZeroPadding2D expects padding to be a length-2 array, but received a length-${t.padding.length} array.`);let e,r;if(typeof t.padding[0]=="number")e=[t.padding[0],t.padding[0]],r=[t.padding[1],t.padding[1]];else{if(t.padding=t.padding,t.padding[0].length!==2)throw new Y(`ZeroPadding2D expects height padding to be a length-2 array, but received a length-${t.padding[0].length} array.`);if(e=t.padding[0],t.padding[1].length!==2)throw new Y(`ZeroPadding2D expects width padding to be a length-2 array, but received a length-${t.padding[1].length} array.`);r=t.padding[1]}this.padding=[e,r]}this.inputSpec=[new In({ndim:4})]}computeOutputShape(t){t=Re(t);let e,r;return this.dataFormat==="channelsFirst"?(t[2]!=null&&t[2]>=0?e=t[2]+this.padding[0][0]+this.padding[0][1]:e=null,t[3]!=null&&t[3]>=0?r=t[3]+this.padding[1][0]+this.padding[1][1]:r=null,[t[0],t[1],e,r]):(t[1]!=null&&t[1]>=0?e=t[1]+this.padding[0][0]+this.padding[0][1]:e=null,t[2]!=null&&t[2]>=0?r=t[2]+this.padding[1][0]+this.padding[1][1]:r=null,[t[0],e,r,t[3]])}call(t,e){return rt(()=>CU(Qt(t),this.padding,this.dataFormat))}getConfig(){let t={padding:this.padding,dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}_0.className="ZeroPadding2D",vt(_0);function vm(n,t,e,r,o,s){return rt(()=>{nn(o),OC(s),Or(r),e==null&&(e=[1,1]),r==null&&(r="valid"),o==null&&(o=go()),s==null&&(s="max"),n=Qv(n,o);let c,l=r==="same"?"same":"valid";return s==="max"?c=lp(n,t,e,l):c=ep(n,t,e,l),o==="channelsFirst"&&(c=Kt(c,[0,3,1,2])),c})}function KS(n,t,e,r,o,s){return rt(()=>{nn(o),OC(s),Or(r),e==null&&(e=[1,1,1]),r==null&&(r="valid"),o==null&&(o=go()),s==null&&(s="max"),n=WS(n,o);let c,l=r==="same"?"same":"valid";return s==="max"?c=Cw(n,t,e,l):c=fw(n,t,e,l),o==="channelsFirst"&&(c=Kt(c,[0,4,1,2,3])),c})}class XS extends ye{constructor(t){if(t.poolSize==null&&(t.poolSize=2),super(t),typeof t.poolSize=="number")this.poolSize=[t.poolSize];else if(Array.isArray(t.poolSize)&&t.poolSize.length===1&&typeof t.poolSize[0]=="number")this.poolSize=t.poolSize;else throw new Y(`poolSize for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(t.poolSize)}`);if($n(this.poolSize,"poolSize"),t.strides==null)this.strides=this.poolSize;else if(typeof t.strides=="number")this.strides=[t.strides];else if(Array.isArray(t.strides)&&t.strides.length===1&&typeof t.strides[0]=="number")this.strides=t.strides;else throw new Y(`strides for 1D convolutional layer must be a number or an Array of a single number, but received ${JSON.stringify(t.strides)}`);$n(this.strides,"strides"),this.padding=t.padding==null?"valid":t.padding,Or(this.padding),this.inputSpec=[new In({ndim:3})]}computeOutputShape(t){t=Re(t);let e=To(t[1],this.poolSize[0],this.padding,this.strides[0]);return[t[0],e,t[2]]}call(t,e){return rt(()=>{this.invokeCallHook(t,e),t=Dp(Qt(t),2);let r=this.poolingFunction(Qt(t),[this.poolSize[0],1],[this.strides[0],1],this.padding,"channelsLast");return li(r,[2])})}getConfig(){let t={poolSize:this.poolSize,padding:this.padding,strides:this.strides},e=super.getConfig();return Object.assign(t,e),t}}class C0 extends XS{constructor(t){super(t)}poolingFunction(t,e,r,o,s){return nn(s),Or(o),vm(t,e,r,o,s,"max")}}C0.className="MaxPooling1D",vt(C0);class S0 extends XS{constructor(t){super(t)}poolingFunction(t,e,r,o,s){return nn(s),Or(o),vm(t,e,r,o,s,"avg")}}S0.className="AveragePooling1D",vt(S0);class YS extends ye{constructor(t){if(t.poolSize==null&&(t.poolSize=[2,2]),super(t),this.poolSize=Array.isArray(t.poolSize)?t.poolSize:[t.poolSize,t.poolSize],t.strides==null)this.strides=this.poolSize;else if(Array.isArray(t.strides)){if(t.strides.length!==2)throw new Y(`If the strides property of a 2D pooling layer is an Array, it is expected to have a length of 2, but received length ${t.strides.length}.`);this.strides=t.strides}else this.strides=[t.strides,t.strides];$n(this.poolSize,"poolSize"),$n(this.strides,"strides"),this.padding=t.padding==null?"valid":t.padding,this.dataFormat=t.dataFormat==null?"channelsLast":t.dataFormat,nn(this.dataFormat),Or(this.padding),this.inputSpec=[new In({ndim:4})]}computeOutputShape(t){t=Re(t);let e=this.dataFormat==="channelsFirst"?t[2]:t[1],r=this.dataFormat==="channelsFirst"?t[3]:t[2];return e=To(e,this.poolSize[0],this.padding,this.strides[0]),r=To(r,this.poolSize[1],this.padding,this.strides[1]),this.dataFormat==="channelsFirst"?[t[0],t[1],e,r]:[t[0],e,r,t[3]]}call(t,e){return rt(()=>(this.invokeCallHook(t,e),this.poolingFunction(Qt(t),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){let t={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}class $0 extends YS{constructor(t){super(t)}poolingFunction(t,e,r,o,s){return nn(s),Or(o),vm(t,e,r,o,s,"max")}}$0.className="MaxPooling2D",vt($0);class I0 extends YS{constructor(t){super(t)}poolingFunction(t,e,r,o,s){return nn(s),Or(o),vm(t,e,r,o,s,"avg")}}I0.className="AveragePooling2D",vt(I0);class JS extends ye{constructor(t){if(t.poolSize==null&&(t.poolSize=[2,2,2]),super(t),this.poolSize=Array.isArray(t.poolSize)?t.poolSize:[t.poolSize,t.poolSize,t.poolSize],t.strides==null)this.strides=this.poolSize;else if(Array.isArray(t.strides)){if(t.strides.length!==3)throw new Y(`If the strides property of a 3D pooling layer is an Array, it is expected to have a length of 3, but received length ${t.strides.length}.`);this.strides=t.strides}else this.strides=[t.strides,t.strides,t.strides];$n(this.poolSize,"poolSize"),$n(this.strides,"strides"),this.padding=t.padding==null?"valid":t.padding,this.dataFormat=t.dataFormat==null?"channelsLast":t.dataFormat,nn(this.dataFormat),Or(this.padding),this.inputSpec=[new In({ndim:5})]}computeOutputShape(t){t=Re(t);let e=this.dataFormat==="channelsFirst"?t[2]:t[1],r=this.dataFormat==="channelsFirst"?t[3]:t[2],o=this.dataFormat==="channelsFirst"?t[4]:t[3];return e=To(e,this.poolSize[0],this.padding,this.strides[0]),r=To(r,this.poolSize[1],this.padding,this.strides[1]),o=To(o,this.poolSize[2],this.padding,this.strides[2]),this.dataFormat==="channelsFirst"?[t[0],t[1],e,r,o]:[t[0],e,r,o,t[4]]}call(t,e){return rt(()=>(this.invokeCallHook(t,e),this.poolingFunction(Qt(t),this.poolSize,this.strides,this.padding,this.dataFormat)))}getConfig(){let t={poolSize:this.poolSize,padding:this.padding,strides:this.strides,dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}class E0 extends JS{constructor(t){super(t)}poolingFunction(t,e,r,o,s){return nn(s),Or(o),KS(t,e,r,o,s,"max")}}E0.className="MaxPooling3D",vt(E0);class D0 extends JS{constructor(t){super(t)}poolingFunction(t,e,r,o,s){return nn(s),Or(o),KS(t,e,r,o,s,"avg")}}D0.className="AveragePooling3D",vt(D0);class ZS extends ye{constructor(t){super(t);this.inputSpec=[new In({ndim:3})]}computeOutputShape(t){return[t[0],t[2]]}call(t,e){throw new Ut}}class A0 extends ZS{constructor(t){super(t||{})}call(t,e){return rt(()=>{let r=Qt(t);return en(r,1)})}}A0.className="GlobalAveragePooling1D",vt(A0);class F0 extends ZS{constructor(t){super(t||{})}call(t,e){return rt(()=>{let r=Qt(t);return lr(r,1)})}}F0.className="GlobalMaxPooling1D",vt(F0);class QS extends ye{constructor(t){super(t);this.dataFormat=t.dataFormat==null?"channelsLast":t.dataFormat,nn(this.dataFormat),this.inputSpec=[new In({ndim:4})]}computeOutputShape(t){return t=t,this.dataFormat==="channelsLast"?[t[0],t[3]]:[t[0],t[1]]}call(t,e){throw new Ut}getConfig(){let t={dataFormat:this.dataFormat},e=super.getConfig();return Object.assign(t,e),t}}class R0 extends QS{call(t,e){return rt(()=>{let r=Qt(t);return this.dataFormat==="channelsLast"?en(r,[1,2]):en(r,[2,3])})}}R0.className="GlobalAveragePooling2D",vt(R0);class P0 extends QS{call(t,e){return rt(()=>{let r=Qt(t);return this.dataFormat==="channelsLast"?lr(r,[1,2]):lr(r,[2,3])})}}P0.className="GlobalMaxPooling2D",vt(P0);class t$ extends ye{constructor(t){super(t);this.layer=t.layer}build(t){this.built=!0}get trainable(){return this.layer!=null?this.layer.trainable:!1}set trainable(t){this.layer!=null&&(this.layer.trainable=t)}get trainableWeights(){return this.layer.trainableWeights}get nonTrainableWeights(){return this.layer.nonTrainableWeights}get updates(){return this.layer._updates}get losses(){return this.layer.losses}getWeights(){return this.layer.getWeights()}setWeights(t){this.layer.setWeights(t)}getConfig(){let t={layer:{className:this.layer.getClassName(),config:this.layer.getConfig()}},e=super.getConfig();return Object.assign(t,e),t}setFastWeightInitDuringBuild(t){super.setFastWeightInitDuringBuild(t),this.layer!=null&&this.layer.setFastWeightInitDuringBuild(t)}static fromConfig(t,e,r={}){let o=e.layer,s=wo(o,r);delete e.layer;let c={layer:s};return Object.assign(c,e),new t(c)}}class O0 extends t${constructor(t){super(t);this.supportsMasking=!0}build(t){if(t=Re(t),t.length<3)throw new Y(`TimeDistributed layer expects an input shape >= 3D, but received input shape ${JSON.stringify(t)}`);this.inputSpec=[{shape:t}];let e=[t[0]].concat(t.slice(2));this.layer.built||(this.layer.build(e),this.layer.built=!0),super.build(t)}computeOutputShape(t){t=Re(t);let e=[t[0]].concat(t.slice(2)),r=this.layer.computeOutputShape(e),o=t[1];return[r[0],o].concat(r.slice(1))}call(t,e){return rt(()=>{t=Qt(t);let r=(c,l)=>{let p=Qt(this.layer.call(c,e));return[p,[]]},o=HS(r,t,[],!1,null,null,!1,!0),s=o[1];return s})}}O0.className="TimeDistributed",vt(O0);function SU(n){zc(OV,"BidirectionalMergeMode",n)}let $U="concat";class L0 extends t${constructor(t){super(t);let e=t.layer.getConfig(),r={};r.className=t.layer.getClassName(),r.config=e,this.forwardLayer=wo(r),e.goBackwards=!(e.goBackwards===!0);let o={};if(o.className=t.layer.getClassName(),o.config=e,this.backwardLayer=wo(o),this.forwardLayer.name="forward_"+this.forwardLayer.name,this.backwardLayer.name="backward_"+this.backwardLayer.name,this.mergeMode=t.mergeMode===void 0?$U:t.mergeMode,SU(this.mergeMode),t.weights)throw new Ut("weights support is not implemented for Bidirectional layer yet.");this._stateful=t.layer.stateful,this.returnSequences=t.layer.returnSequences,this.returnState=t.layer.returnState,this.supportsMasking=!0,this._trainable=!0,this.inputSpec=t.layer.inputSpec,this.numConstants=null}get trainable(){return this._trainable}set trainable(t){this._trainable=t,this.forwardLayer!=null&&(this.forwardLayer.trainable=t),this.backwardLayer!=null&&(this.backwardLayer.trainable=t)}getWeights(){return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights())}setWeights(t){let e=t.length,r=Math.floor(e/2);this.forwardLayer.setWeights(t.slice(0,r)),this.backwardLayer.setWeights(t.slice(r))}computeOutputShape(t){let e=this.forwardLayer.computeOutputShape(t);Array.isArray(e)&&Array.isArray(e[0])||(e=[e]),e=e;let r,o,s;return this.returnState&&(s=e.slice(1)),r=e[0],r=r,this.mergeMode==="concat"?(r[r.length-1]*=2,o=[r]):this.mergeMode==null?o=[r,r.slice()]:o=[r],this.returnState?this.mergeMode==null?o.concat(s).concat(s.slice()):[r].concat(s).concat(s.slice()):pr(o)}apply(t,e){let r=e==null?null:e.initialState,o=e==null?null:e.constants;e==null&&(e={});let s=qS(t,r,o,this.numConstants);if(t=s.inputs,r=s.initialState,o=s.constants,Array.isArray(t)&&(r=t.slice(1),t=t[0]),(r==null||r.length===0)&&o==null)return super.apply(t,e);let c=[],l=[];if(r!=null){let f=r.length;if(f%2>0)throw new Y("When passing `initialState` to a Bidrectional RNN, the state should be an Array containing the states of the underlying RNNs.");e.initialState=r,c.push(...r);let m=r.map(y=>new In({shape:y.shape}));this.forwardLayer.stateSpec=m.slice(0,f/2),this.backwardLayer.stateSpec=m.slice(f/2),l.push(...m)}if(o!=null)throw new Ut("Support for constants in Bidirectional layers is not implemented yet.");let p=c[0]instanceof xo;for(let f of c)if(f instanceof xo!==p)throw new Y("The initial state of a Bidirectional layer cannot be specified as a mix of symbolic and non-symbolic tensors");if(p){let f=[t].concat(c),m=this.inputSpec.concat(l),y=this.inputSpec;this.inputSpec=m;let b=super.apply(f,e);return this.inputSpec=y,b}else return super.apply(t,e)}call(t,e){return rt(()=>{let r=e.initialState,o,s;if(r==null)o=this.forwardLayer.call(t,e),s=this.backwardLayer.call(t,e);else{let p=r.slice(0,r.length/2),f=r.slice(r.length/2);o=this.forwardLayer.call(t,Object.assign(e,{initialState:p})),s=this.backwardLayer.call(t,Object.assign(e,{initialState:f}))}let c;this.returnState&&(Array.isArray(o)&&(c=o.slice(1).concat(s.slice(1))),o=o[0],s=s[0]),this.returnSequences&&(s=Rr(s,1));let l;return this.mergeMode==="concat"?l=bv([o,s]):this.mergeMode==="sum"?l=Tt(o,s):this.mergeMode==="ave"?l=nt(.5,Tt(o,s)):this.mergeMode==="mul"?l=nt(o,s):this.mergeMode==null&&(l=[o,s]),this.returnState?this.mergeMode==null?l.concat(c):[l].concat(c):l})}resetStates(t){this.forwardLayer.resetStates(),this.backwardLayer.resetStates()}build(t){fa(this.forwardLayer.name,()=>{this.forwardLayer.build(t)}),fa(this.backwardLayer.name,()=>{this.backwardLayer.build(t)}),this.built=!0}computeMask(t,e){Array.isArray(e)&&(e=e[0]);let r;if(this.returnSequences?this.mergeMode==null?r=[e,e]:r=e:this.mergeMode==null?r=[null,null]:r=null,this.returnState){let o=this.forwardLayer.states,s=o.map(c=>null);return Array.isArray(r)?r.concat(s).concat(s):[r].concat(s).concat(s)}else return r}get trainableWeights(){return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights)}get nonTrainableWeights(){return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights)}setFastWeightInitDuringBuild(t){super.setFastWeightInitDuringBuild(t),this.forwardLayer!=null&&this.forwardLayer.setFastWeightInitDuringBuild(t),this.backwardLayer!=null&&this.backwardLayer.setFastWeightInitDuringBuild(t)}getConfig(){let t={mergeMode:this.mergeMode},e=super.getConfig();return Object.assign(t,e),t}static fromConfig(t,e){let r=wo(e.layer);if(delete e.layer,e.numConstants!=null)throw new Ut("Deserialization of a Bidirectional layer with numConstants present is not supported yet.");let o=e;return o.layer=r,new t(o)}}L0.className="Bidirectional",vt(L0);function IU(n){return new Gc(n)}function EU(n){return new Yv(n)}function DU(n){return new jv(n)}function AU(n){return new Kv(n)}function FU(n){return new Xv(n)}function RU(n){return new Zv(n)}function PU(n){return new Jv(n)}function OU(n){return new zp(n)}function LU(n){return new Kc(n)}function MU(n){return new e0(n)}function BU(n){return new Bp(n)}function zU(n){return new n0(n)}function WU(n){return new r0(n)}function VU(n){return new o0(n)}function GU(n){return new s0(n)}function UU(n){return new f0(n)}function qU(n){return new p0(n)}function HU(n){return new wm(n)}function jU(n){return new u0(n)}function KU(n){return new h0(n)}function XU(n){return new d0(n)}function YU(n){return new m0(n)}function JU(n){return new g0(n)}function ZU(n){return new b0(n)}function QU(n){return new Vp(n)}function tq(n){return new Up(n)}function eq(n){return new jp(n)}function nq(n){return new qp(n)}function rq(n){return new Hp(n)}function oq(n){return new Gp(n)}function sq(n){return new x0(n)}function iq(n){return new k0(n)}function aq(n){return new N0(n)}function cq(n){return new _0(n)}function M0(n){return new S0(n)}function lq(n){return M0(n)}function uq(n){return M0(n)}function B0(n){return new I0(n)}function pq(n){return B0(n)}function hq(n){return B0(n)}function z0(n){return new D0(n)}function fq(n){return z0(n)}function dq(n){return z0(n)}function mq(n){return new A0(n)}function gq(n){return new R0(n)}function e$(n){return new F0(n)}function n$(n){return new P0(n)}function r$(n){return new C0(n)}function o$(n){return new $0(n)}function yq(n){return new E0(n)}function bq(n){return new a0(n)}function xq(n){return new ym(n)}function wq(n){return new c0(n)}function vq(n){return new Wp(n)}function Tq(n){return new i0(n)}function kq(n){return new gm(n)}function Nq(n){return new l0(n)}function _q(n){return new xm(n)}function Cq(n){return new ko(n)}function Sq(n){return new bm(n)}function $q(n){return new L0(n)}function Iq(n){return new O0(n)}let Eq=e$,Dq=n$,Aq=r$,Fq=o$;function Rq(n){return new w0(n)}function Pq(n){return new v0(n)}function Oq(n){return new T0(n)}function Lq(n){return new y0(n)}var Mq=Object.freeze({__proto__:null,inputLayer:IU,elu:EU,reLU:DU,leakyReLU:AU,prelu:FU,softmax:RU,thresholdedReLU:PU,conv1d:OU,conv2d:LU,conv2dTranspose:MU,conv3d:BU,separableConv2d:zU,cropping2D:WU,upSampling2d:VU,depthwiseConv2d:GU,activation:UU,dense:qU,dropout:HU,spatialDropout1d:jU,flatten:KU,repeatVector:XU,reshape:YU,permute:JU,embedding:ZU,add:QU,average:tq,concatenate:eq,maximum:nq,minimum:rq,multiply:oq,dot:sq,batchNormalization:iq,layerNormalization:aq,zeroPadding2d:cq,averagePooling1d:M0,avgPool1d:lq,avgPooling1d:uq,averagePooling2d:B0,avgPool2d:pq,avgPooling2d:hq,averagePooling3d:z0,avgPool3d:fq,avgPooling3d:dq,globalAveragePooling1d:mq,globalAveragePooling2d:gq,globalMaxPooling1d:e$,globalMaxPooling2d:n$,maxPooling1d:r$,maxPooling2d:o$,maxPooling3d:yq,gru:bq,gruCell:xq,lstm:wq,lstmCell:vq,simpleRNN:Tq,simpleRNNCell:kq,convLstm2d:Nq,convLstm2dCell:_q,rnn:Cq,stackedRNNCells:Sq,bidirectional:$q,timeDistributed:Iq,globalMaxPool1d:Eq,globalMaxPool2d:Dq,maxPool1d:Aq,maxPool2d:Fq,Layer:ye,RNN:ko,RNNCell:Xc,input:_S,gaussianNoise:Rq,gaussianDropout:Pq,alphaDropout:Oq,masking:Lq});function Bq(n,t){return Rv(n,t)}function zq(n,t){return iS(n,t)}function Wq(n,t){return aS(n,t)}function Vq(n,t){return Pv(n,t)}function Gq(n,t){return Ov(n,t)}function Uq(n,t){return sS(n,t)}function qq(n,t){return DG(n,t)}function Hq(n,t){return cm(n,t)}function jq(n,t){return qc(n,t)}function Kq(n,t){return gi(n,t)}function Xq(n,t){return gi(n,t)}function Yq(n,t){return gi(n,t)}function Jq(n,t){return xs(n,t)}function Zq(n,t){return xs(n,t)}function Qq(n,t){return xs(n,t)}var tH=Object.freeze({__proto__:null,binaryAccuracy:Bq,binaryCrossentropy:zq,sparseCategoricalAccuracy:Wq,categoricalAccuracy:Vq,categoricalCrossentropy:Gq,precision:Uq,recall:qq,cosineProximity:Hq,meanAbsoluteError:jq,meanAbsolutePercentageError:Kq,MAPE:Xq,mape:Yq,meanSquaredError:Jq,MSE:Zq,mse:Qq});var eH=Object.freeze({__proto__:null,modelFromJSON:uU});function nH(n){return new Mp(n)}function rH(n){return bU(n)}function oH(n){return xU(n)}var sH=Object.freeze({__proto__:null,l1l2:nH,l1:rH,l2:oH});class s$ extends Uc{constructor(){super(...arguments);this.model=null}setModel(t){if(!(t instanceof ws))throw new Error("model must be a LayersModel, not some other Container");this.model=t}}function Tm(n,t){return nt}class a$ extends s${constructor(t){super();if(t==null&&(t={}),t.restoreBestWeights)throw new Ut("restoreBestWeights = True is not implemented in EarlyStopping yet.");this.monitor=t.monitor||"val_loss",this.minDelta=Math.abs(t.minDelta||0),this.patience=t.patience||0,this.verbose=t.verbose||0,this.mode=t.mode||"auto",this.baseline=t.baseline,["auto","min","max"].indexOf(this.mode)===-1&&(console.warn(`EarlyStopping mode '${this.mode}' is invalid. Falling back to mode 'auto'.`),this.mode="auto"),this.mode==="min"?this.monitorFunc=Tm:this.mode==="max"?this.monitorFunc=i$:this.monitor.indexOf("acc")!==-1?this.monitorFunc=i$:this.monitorFunc=Tm,this.monitorFunc===Tm&&(this.minDelta*=-1)}async onTrainBegin(t){this.wait=0,this.stoppedEpoch=0,this.baseline!=null?this.best=this.baseline:this.best=this.monitorFunc===Tm?Infinity:-Infinity}async onEpochEnd(t,e){await mi(e);let r=this.getMonitorValue(e);if(r==null)return;this.monitorFunc(r-this.minDelta,this.best)?(this.best=r,this.wait=0):(this.wait++,this.wait>=this.patience&&(this.stoppedEpoch=t,this.model.stopTraining=!0))}async onTrainEnd(t){this.stoppedEpoch>0&&this.verbose&&console.log(`Epoch ${this.stoppedEpoch}: early stopping.`)}getMonitorValue(t){t==null&&(t={});let e=t[this.monitor];return e==null&&console.warn(`Metric for EarlyStopping ${this.monitor} is not available. Available metrics are: ${Object.keys(t)}`),e}}function iH(n){return new a$(n)}let aH={earlyStopping:iH};var No;(function(n){n[n.DT_INVALID=0]="DT_INVALID",n[n.DT_FLOAT=1]="DT_FLOAT",n[n.DT_DOUBLE=2]="DT_DOUBLE",n[n.DT_INT32=3]="DT_INT32",n[n.DT_UINT8=4]="DT_UINT8",n[n.DT_INT16=5]="DT_INT16",n[n.DT_INT8=6]="DT_INT8",n[n.DT_STRING=7]="DT_STRING",n[n.DT_COMPLEX64=8]="DT_COMPLEX64",n[n.DT_INT64=9]="DT_INT64",n[n.DT_BOOL=10]="DT_BOOL",n[n.DT_QINT8=11]="DT_QINT8",n[n.DT_QUINT8=12]="DT_QUINT8",n[n.DT_QINT32=13]="DT_QINT32",n[n.DT_BFLOAT16=14]="DT_BFLOAT16",n[n.DT_FLOAT_REF=101]="DT_FLOAT_REF",n[n.DT_DOUBLE_REF=102]="DT_DOUBLE_REF",n[n.DT_INT32_REF=103]="DT_INT32_REF",n[n.DT_UINT8_REF=104]="DT_UINT8_REF",n[n.DT_INT16_REF=105]="DT_INT16_REF",n[n.DT_INT8_REF=106]="DT_INT8_REF",n[n.DT_STRING_REF=107]="DT_STRING_REF",n[n.DT_COMPLEX64_REF=108]="DT_COMPLEX64_REF",n[n.DT_INT64_REF=109]="DT_INT64_REF",n[n.DT_BOOL_REF=110]="DT_BOOL_REF",n[n.DT_QINT8_REF=111]="DT_QINT8_REF",n[n.DT_QUINT8_REF=112]="DT_QUINT8_REF",n[n.DT_QINT32_REF=113]="DT_QINT32_REF",n[n.DT_BFLOAT16_REF=114]="DT_BFLOAT16_REF"})(No||(No={}));var c$;(function(n){let t;(function(e){e[e.LEGACY=0]="LEGACY",e[e.V1=1]="V1",e[e.V2=2]="V2"})(t=n.CheckpointFormatVersion||(n.CheckpointFormatVersion={}))})(c$||(c$={}));let W0={};function cH(n,t){let e={tfOpName:n,category:"custom",inputs:[],attrs:[],customExecutor:t};W0[n]=e}function l$(n){return W0[n]}function lH(n){delete W0[n]}function A(n,t,e,r,o){let s=t.inputParams[n];if(s&&s.inputIndexStart!==void 0){let l=s.inputIndexStart,p=s.inputIndexEnd===0?void 0:s.inputIndexEnd===void 0?l+1:s.inputIndexEnd;if(s.type==="tensor")return fr(t.inputNames[s.inputIndexStart],e,r,o);if(s.type==="tensors"){let y=t.inputNames.slice(l,p);return y.map(b=>fr(b,e,r,o))}let f=fr(t.inputNames.slice(l)[0],e,r,o),m=f.dataSync();return s.type==="number"?m[0]:Er(f.shape,m)}let c=t.attrParams[n];return c&&c.value}function fr(n,t,e,r){let[o,s]=Nr(n);if(r!=null){let l=r.getHashTableHandleByName(o);if(l!=null)return l}let c=e.currentContextIds.find(l=>!!t[km(o,l)]);return c!==void 0?t[km(o,c)][s]:void 0}function uH(n,t,e){return t[km(n,e.currentContextId)]}function vs(n,t){let[e,r]=Nr(n);return[km(e,t&&t.currentContextId),r]}function km(n,t){return t?`${n}-${t}`:n}function Nr(n){let t=n.split(":");if(t.length===1)return[n,0];let e=t[0];return[e,Number(t[t.length-1])]}function vst(n,t){let e=[];for(let r=0;rr.json));this.opMappers=e.reduce((r,o)=>(r[o.tfOpName]=o,r),{})}transformGraph(t,e={}){let r=t.node,o=[],s=[],c=[],l=r.reduce((N,S)=>(N[S.name]=this.mapNode(S),S.op.startsWith("Placeholder")?o.push(N[S.name]):S.op==="Const"?s.push(N[S.name]):(S.input==null||S.input.length===0)&&c.push(N[S.name]),N),{}),p=[],f=[],m={},y={};e!=null&&(m=this.mapSignatureEntries(e.inputs),y=this.mapSignatureEntries(e.outputs));let b=Object.keys(l);b.forEach(N=>{let S=l[N];S.inputNames.forEach(D=>{let[I]=vs(D);S.inputs.push(l[I]),l[I].children.push(S)})}),Object.keys(y).length===0?b.forEach(N=>{let S=l[N];S.children.length===0&&f.push(S)}):Object.keys(y).forEach(N=>{let[S]=vs(N),D=l[S];D!=null&&(D.signatureKey=y[N],f.push(D))}),Object.keys(m).length>0?Object.keys(m).forEach(N=>{let[S]=vs(N),D=l[S];D&&(D.signatureKey=m[N],p.push(D))}):p=o;let v={};t.library!=null&&t.library.function!=null&&(v=t.library.function.reduce((N,S)=>(N[S.signature.name]=this.mapFunction(S),N),{}));let T={nodes:l,inputs:p,outputs:f,weights:s,placeholders:o,signature:e,functions:v};return c.length>0&&(T.initNodes=c),T}mapSignatureEntries(t){return Object.keys(t||{}).reduce((e,r)=>(e[t[r].name]=r,e),{})}mapNode(t){let e=l$(t.op)||this.opMappers[t.op]||{};t.attr==null&&(t.attr={});let r={name:t.name,op:t.op,category:e.category,inputNames:(t.input||[]).map(o=>o.startsWith("^")?o.substr(1):o),inputs:[],children:[],inputParams:{},attrParams:{},rawAttrs:t.attr};return e.inputs!=null&&(r.inputParams=e.inputs.reduce((o,s)=>(o[s.name]={type:s.type,inputIndexStart:s.start,inputIndexEnd:s.end},o),{})),e.attrs!=null&&(r.attrParams=e.attrs.reduce((o,s)=>{let c=s.type,l;switch(s.type){case"string":l=V0(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=V0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"string[]":l=Y0(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=Y0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"number":l=U0(t.attr,s.tfName,s.defaultValue||0),l===void 0&&!!s.tfDeprecatedName&&(l=U0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"number[]":l=X0(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=X0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"bool":l=G0(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=G0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"bool[]":l=Z0(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=Z0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"shape":l=K0(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=K0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"shape[]":l=J0(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=J0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"dtype":l=H0(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=H0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"dtype[]":l=j0(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=j0(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"func":l=h$(t.attr,s.tfName,s.defaultValue),l===void 0&&!!s.tfDeprecatedName&&(l=h$(t.attr,s.tfDeprecatedName,s.defaultValue));break;case"tensor":case"tensors":break;default:throw new Error(`Unsupported param type: ${s.type} for op: ${t.op}`)}return o[s.name]={value:l,type:c},o},{})),r}mapFunction(t){let e=t.nodeDef,r=[],o=[],s={};e!=null&&(s=e.reduce((y,b)=>(y[b.name]=this.mapNode(b),b.op==="Const"&&o.push(y[b.name]),y),{}));let c=[],l=[];t.signature.inputArg.forEach(y=>{let[b]=vs(y.name),v={name:b,op:"Placeholder",inputs:[],inputNames:[],category:"graph",inputParams:{},attrParams:{dtype:{value:q0(y.type),type:"dtype"}},children:[]};v.signatureKey=y.name,c.push(v),s[b]=v});let p=Object.keys(s);p.forEach(y=>{let b=s[y];b.inputNames.forEach(v=>{let[T]=vs(v);b.inputs.push(s[T]),s[T].children.push(b)})});let f=t.ret;t.signature.outputArg.forEach(y=>{let[b,v]=vs(f[y.name]),T=s[b];T!=null&&(T.defaultOutput=v,l.push(T))});let m=this.mapArgsToSignature(t);return{nodes:s,inputs:c,outputs:l,weights:o,placeholders:r,signature:m}}mapArgsToSignature(t){return{methodName:t.signature.name,inputs:t.signature.inputArg.reduce((e,r)=>(e[r.name]=this.mapArgToTensorInfo(r),e),{}),outputs:t.signature.outputArg.reduce((e,r)=>(e[r.name]=this.mapArgToTensorInfo(r,t.ret),e),{})}}mapArgToTensorInfo(t,e){let r=t.name;return e!=null&&(r=e[r]),{name:r,dtype:t.type}}}function qH(n){let t=ct().global;if(typeof t.atob!="undefined")return t.atob(n);if(typeof Buffer!="undefined")return new Buffer(n,"base64").toString();throw new Error("Unable to decode base64 in this environment. Missing built-in atob() or Buffer()")}function p$(n,t){let e=Array.isArray(n)?String.fromCharCode.apply(null,n):qH(n);return t?e:e.toLowerCase()}function V0(n,t,e,r=!1){let o=n[t];return o!=null?p$(o.s,r):e}function G0(n,t,e){let r=n[t];return r?r.b:e}function U0(n,t,e){let r=n[t]||{},o=r.i!=null?r.i:r.f!=null?r.f:e;return typeof o=="number"?o:parseInt(o,10)}function q0(n){typeof n=="string"&&(n=No[n]);switch(n){case No.DT_FLOAT:return"float32";case No.DT_INT32:case No.DT_INT64:case No.DT_INT8:case No.DT_UINT8:return"int32";case No.DT_BOOL:return"bool";case No.DT_DOUBLE:return"float32";case No.DT_STRING:return"string";default:return null}}function h$(n,t,e){let r=n[t];return r&&r.func?r.func.name:e}function H0(n,t,e){let r=n[t];return r&&r.type?q0(r.type):e}function j0(n,t,e){let r=n[t];return r&&r.list&&r.list.type?r.list.type.map(o=>q0(o)):e}function f$(n){return n.unknownRank?void 0:n.dim!=null?n.dim.map(t=>typeof t.size=="number"?t.size:parseInt(t.size,10)):[]}function K0(n,t,e){let r=n[t];return r&&r.shape?f$(r.shape):e}function X0(n,t,e){let r=n[t];return r?((r.list.f&&r.list.f.length?r.list.f:r.list.i)||[]).map(o=>typeof o=="number"?o:parseInt(o,10)):e}function Y0(n,t,e,r=!1){let o=n[t];return o&&o.list&&o.list.s?o.list.s.map(s=>p$(s,r)):e}function J0(n,t,e){let r=n[t];return r&&r.list&&r.list.shape?r.list.shape.map(o=>f$(o)):e}function Z0(n,t,e){let r=n[t];return r&&r.list&&r.list.b?r.list.b:e}class HH{constructor(t,e,r){this.node=t,this.tensorMap=e,this.context=r,this.inputs=[],this.attrs={},this.inputs=t.inputNames.map(o=>this.getInput(o)),t.rawAttrs!=null&&(this.attrs=Object.keys(t.rawAttrs).reduce((o,s)=>(o[s]=this.getAttr(s),o),{}))}getInput(t){return fr(t,this.tensorMap,this.context)}getAttr(t,e){let r=this.node.rawAttrs[t];if(r.tensor!=null)return fr(t,this.tensorMap,this.context);if(r.i!=null||r.f!=null)return U0(this.node.rawAttrs,t,e);if(r.s!=null)return V0(this.node.rawAttrs,t,e);if(r.b!=null)return G0(this.node.rawAttrs,t,e);if(r.shape!=null)return K0(this.node.rawAttrs,t,e);if(r.type!=null)return H0(this.node.rawAttrs,t,e);if(r.list!=null){if(r.list.i!=null||r.list.f!=null)return X0(this.node.rawAttrs,t,e);if(r.list.s!=null)return Y0(this.node.rawAttrs,t,e);if(r.list.shape!=null)return J0(this.node.rawAttrs,t,e);if(r.list.b!=null)return Z0(this.node.rawAttrs,t,e);if(r.list.type!=null)return j0(this.node.rawAttrs,t,e)}return e}}let jH=(n,t,e)=>{switch(n.op){case"BiasAdd":case"AddV2":case"Add":return[Tt(A("a",n,t,e),A("b",n,t,e))];case"AddN":return[g_(A("tensors",n,t,e))];case"FloorMod":case"Mod":return[bd(A("a",n,t,e),A("b",n,t,e))];case"Mul":return[nt(A("a",n,t,e),A("b",n,t,e))];case"RealDiv":case"Div":return[Bt(A("a",n,t,e),A("b",n,t,e))];case"DivNoNan":return[xw(A("a",n,t,e),A("b",n,t,e))];case"FloorDiv":return[nd(A("a",n,t,e),A("b",n,t,e))];case"Sub":return[Dt(A("a",n,t,e),A("b",n,t,e))];case"Minimum":return[oa(A("a",n,t,e),A("b",n,t,e))];case"Maximum":return[Xr(A("a",n,t,e),A("b",n,t,e))];case"Pow":return[fo(A("a",n,t,e),A("b",n,t,e))];case"SquaredDifference":return[gp(A("a",n,t,e),A("b",n,t,e))];default:throw TypeError(`Node type ${n.op} is not implemented`)}},Tst="arithmetic";let KH=(n,t,e)=>{switch(n.op){case"Abs":case"ComplexAbs":return[bn(A("x",n,t,e))];case"Acos":return[nw(A("x",n,t,e))];case"Acosh":return[rw(A("x",n,t,e))];case"Asin":return[iw(A("x",n,t,e))];case"Asinh":return[aw(A("x",n,t,e))];case"Atan":return[cw(A("x",n,t,e))];case"Atan2":return[lw(A("x",n,t,e),A("y",n,t,e))];case"Atanh":return[uw(A("x",n,t,e))];case"Ceil":return[dw(A("x",n,t,e))];case"Complex":return[us(A("real",n,t,e),A("imag",n,t,e))];case"Cos":return[op(A("x",n,t,e))];case"Cosh":return[ud(A("x",n,t,e))];case"Elu":return[_c(A("x",n,t,e))];case"Erf":return[ww(A("x",n,t,e))];case"Exp":return[Ar(A("x",n,t,e))];case"Expm1":return[vw(A("x",n,t,e))];case"Floor":return[Sc(A("x",n,t,e))];case"Log":return[wr(A("x",n,t,e))];case"Log1p":return[dd(A("x",n,t,e))];case"Imag":return[ip(A("x",n,t,e))];case"Neg":return[tn(A("x",n,t,e))];case"Reciprocal":return[Dw(A("x",n,t,e))];case"Real":return[Dc(A("x",n,t,e))];case"Relu":return[Vo(A("x",n,t,e))];case"Round":return[Fw(A("x",n,t,e))];case"Selu":return[Td(A("x",n,t,e))];case"Sigmoid":return[Bo(A("x",n,t,e))];case"Sin":return[kd(A("x",n,t,e))];case"Sign":return[Pw(A("x",n,t,e))];case"Sinh":return[Nd(A("x",n,t,e))];case"Softplus":return[Ic(A("x",n,t,e))];case"Sqrt":return[Pn(A("x",n,t,e))];case"Square":return[De(A("x",n,t,e))];case"Tanh":return[Nc(A("x",n,t,e))];case"Tan":return[Mw(A("x",n,t,e))];case"Relu6":case"ClipByValue":return[ar(A("x",n,t,e),A("clipValueMin",n,t,e),A("clipValueMax",n,t,e))];case"Rsqrt":return[vd(fr(n.inputNames[0],t,e))];case"Prod":return[wd(A("x",n,t,e),A("axes",n,t,e))];case"LeakyRelu":return[fd(A("x",n,t,e),A("alpha",n,t,e))];case"Prelu":return[pp(A("x",n,t,e),A("alpha",n,t,e))];default:throw TypeError(`Node type ${n.op} is not implemented`)}},kst="basic_math";function to(n,t,e=""){_(XH(n,t),()=>e+` Shapes ${n} and ${t} must match`)}function XH(n,t){if(n.length!==t.length)return!1;for(let e=0;e{(t==null||!t.has(e.tensor.id))&&e.tensor.dispose()}),this.tensors=[],this.closed_=!0,this.idTensor.dispose()}size(){return this.tensors.length}read(t){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(t<0||t>=this.size())throw new Error(`Tried to read from index ${t}, but array size is: ${this.size()}`);let e=this.tensors[t];if(e.cleared)throw new Error(`TensorArray ${this.name}: Could not read index ${t} twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?).`);return this.clearAfterRead&&(e.cleared=!0),e.read=!0,e.tensor}readMany(t){return t.map(e=>this.read(e))}write(t,e){if(this.closed_)throw new Error(`TensorArray ${this.name} has already been closed.`);if(t<0||!this.dynamicSize&&t>=this.maxSize)throw new Error(`Tried to write to index ${t}, but array is not resizeable and size is: ${this.maxSize}`);let r=this.tensors[t]||{};if(e.dtype!==this.dtype)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${t}, + because the value dtype is ${e.dtype}, but TensorArray dtype is ${this.dtype}.`);if(this.size()===0&&(this.elementShape==null||this.elementShape.length===0)&&(this.elementShape=e.shape),to(this.elementShape,e.shape,`TensorArray ${this.name}: Could not write to TensorArray index ${t}.`),r.read)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${t}, because it has already been read.`);if(r.written)throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${t}, because it has already been written.`);r.tensor=e,Sn(e),r.written=!0,this.tensors[t]=r}writeMany(t,e){if(t.length!==e.length)throw new Error(`TensorArray ${this.name}: could not write multiple tensors,because the index size: ${t.length} is not the same as tensors size: ${e.length}.`);t.forEach((r,o)=>this.write(r,e[o]))}gather(t,e){if(!!e&&e!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${e}`);if(t)t=t.slice(0,this.size());else{t=[];for(let o=0;o=this.maxSize)throw new Error(`Max index must be < array size (${r} vs. ${this.maxSize})`);this.writeMany(t,mo(e,0))}split(t,e){if(e.dtype!==this.dtype)throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${e.dtype}`);let r=0,o=t.map(p=>(r+=p,r));if(r!==e.shape[0])throw new Error(`Expected sum of lengths to be equal to tensor.shape[0], but sum of lengths is - ${n}, and tensor's shape is: ${t.shape}`);if(!this.dynamicSize&&e.length!==this.maxSize)throw new Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${e.length}), and the TensorArray is not marked as dynamically resizeable`);const i=n===0?0:t.size/n,o=[];Q(()=>{t=K(t,[1,n,i]);for(let c=0;c{if(n!==i.dtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${i.dtype}`);zs(t,i.shape,"TensorList shape mismatch: "),bn(i)}),this.idTensor=Ce(0),this.maxNumElements=s,bn(this.idTensor)}get id(){return this.idTensor.id}copy(){return new nu([...this.tensors],this.elementShape,this.elementDtype)}clearAndClose(e){this.tensors.forEach(t=>{(e==null||!e.has(t.id))&&t.dispose()}),this.tensors.length=0,this.idTensor.dispose()}size(){return this.tensors.length}stack(e,t,n=-1){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(n!==-1&&this.tensors.length!==n)throw new Error(`Operation expected a list with ${n} elements but got a list with ${this.tensors.length} elements.`);return zs(e,this.elementShape,"TensorList shape mismatch: "),Q(()=>{const s=this.tensors.map(i=>K(i,e));return es(s,0)})}popBack(e,t){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);if(this.size()===0)throw new Error("Trying to pop from an empty list.");const n=this.tensors.pop();return zs(n.shape,e,"TensorList shape mismatch: "),K(n,e)}pushBack(e){if(e.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${this.elementDtype}`);if(zs(e.shape,this.elementShape,"TensorList shape mismatch: "),this.maxNumElements===this.size())throw new Error("Trying to push element into a full list.");bn(e),this.tensors.push(e)}resize(e){if(e<0)throw new Error(`TensorListResize expects size to be non-negative. Got: ${e}`);if(this.maxNumElements!==-1&&e>this.maxNumElements)throw new Error(`TensorListResize input size ${e} is greater maxNumElement ${this.maxNumElements}.`);this.tensors.length=e}getItem(e,t,n){if(n!==this.elementDtype)throw new Error(`Invalid data types; op elements ${n}, but list elements ${this.elementDtype}`);if(e<0||e>this.tensors.length)throw new Error(`Trying to access element ${e} in a list with ${this.tensors.length} elements.`);if(this.tensors[e]==null)throw new Error(`element at index ${e} is null.`);return zs(this.tensors[e].shape,t,"TensorList shape mismatch: "),this.tensors[e]}setItem(e,t){if(t.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t.dtype}, but list elements ${this.elementDtype}`);if(e<0||this.maxNumElements!==-1&&e>=this.maxNumElements)throw new Error(`Trying to set element ${e} in a list with max ${this.maxNumElements} elements.`);zs(this.elementShape,t.shape,"TensorList shape mismatch: "),bn(t),this.tensors[e]=t}gather(e,t,n){if(t!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t}, but list elements ${this.elementDtype}`);return zs(this.elementShape,n,"TensorList shape mismatch: "),e=e.slice(0,this.size()),e.length===0?sn([],[0].concat(this.elementShape)):Q(()=>{const s=e.map(i=>K(this.tensors[i],n));return es(s,0)})}concat(e,t){if(!!e&&e!==this.elementDtype)throw new Error(`TensorList dtype is ${this.elementDtype} but concat requested dtype ${e}`);return zs(this.elementShape,t,"TensorList shape mismatch: "),this.size()===0?sn([],[0].concat(this.elementShape)):Q(()=>{const n=this.tensors.map(s=>K(s,t));return Yt(n,0)})}}function UY(e,t,n){const s=e.dtype;if(e.shape.length<1)throw new Error(`Tensor must be at least a vector, but saw shape: ${e.shape}`);if(e.dtype!==n)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${n}`);const i=e.shape.slice(1);zs(i,t,"TensorList shape mismatch: ");const o=Qs(e);return new nu(o,t,s)}function BY(e,t,n){return new nu([],e,t,n)}function MY(e,t,n,s){if(t.length!==e.shape[0])throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${t.length} vs. ${e.shape[0]}`);const i=Math.max(...t);if(s!=null&&s!==-1&&i>=s)throw new Error(`Max index must be < array size (${i} vs. ${s})`);const o=new nu([],n,e.dtype,s),a=Qs(e,0);return t.forEach((c,h)=>{o.setItem(c,a[h])}),o}function PY(e,t,n){let s=0;const i=t.map(h=>(s+=h,s));if(s!==e.shape[0])throw new Error(`Expected sum of lengths to be equal to + ${r}, and tensor's shape is: ${e.shape}`);if(!this.dynamicSize&&t.length!==this.maxSize)throw new Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${t.length}), and the TensorArray is not marked as dynamically resizeable`);let s=r===0?0:e.size/r,c=[];rt(()=>{e=Q(e,[1,r,s]);for(let p=0;p{if(r!==s.dtype)throw new Error(`Invalid data types; op elements ${r}, but list elements ${s.dtype}`);to(e,s.shape,"TensorList shape mismatch: "),Sn(s)}),this.idTensor=Et(0),this.maxNumElements=o,Sn(this.idTensor)}get id(){return this.idTensor.id}copy(){return new Yc([...this.tensors],this.elementShape,this.elementDtype)}clearAndClose(t){this.tensors.forEach(e=>{(t==null||!t.has(e.id))&&e.dispose()}),this.tensors.length=0,this.idTensor.dispose()}size(){return this.tensors.length}stack(t,e,r=-1){if(e!==this.elementDtype)throw new Error(`Invalid data types; op elements ${e}, but list elements ${this.elementDtype}`);if(r!==-1&&this.tensors.length!==r)throw new Error(`Operation expected a list with ${r} elements but got a list with ${this.tensors.length} elements.`);return to(t,this.elementShape,"TensorList shape mismatch: "),rt(()=>{let o=this.tensors.map(s=>Q(s,t));return ur(o,0)})}popBack(t,e){if(e!==this.elementDtype)throw new Error(`Invalid data types; op elements ${e}, but list elements ${this.elementDtype}`);if(this.size()===0)throw new Error("Trying to pop from an empty list.");let r=this.tensors.pop();return to(r.shape,t,"TensorList shape mismatch: "),Q(r,t)}pushBack(t){if(t.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${t.dtype}, but list elements ${this.elementDtype}`);if(to(t.shape,this.elementShape,"TensorList shape mismatch: "),this.maxNumElements===this.size())throw new Error("Trying to push element into a full list.");Sn(t),this.tensors.push(t)}resize(t){if(t<0)throw new Error(`TensorListResize expects size to be non-negative. Got: ${t}`);if(this.maxNumElements!==-1&&t>this.maxNumElements)throw new Error(`TensorListResize input size ${t} is greater maxNumElement ${this.maxNumElements}.`);this.tensors.length=t}getItem(t,e,r){if(r!==this.elementDtype)throw new Error(`Invalid data types; op elements ${r}, but list elements ${this.elementDtype}`);if(t<0||t>this.tensors.length)throw new Error(`Trying to access element ${t} in a list with ${this.tensors.length} elements.`);if(this.tensors[t]==null)throw new Error(`element at index ${t} is null.`);return to(this.tensors[t].shape,e,"TensorList shape mismatch: "),this.tensors[t]}setItem(t,e){if(e.dtype!==this.elementDtype)throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${this.elementDtype}`);if(t<0||this.maxNumElements!==-1&&t>=this.maxNumElements)throw new Error(`Trying to set element ${t} in a list with max ${this.maxNumElements} elements.`);to(this.elementShape,e.shape,"TensorList shape mismatch: "),Sn(e),this.tensors[t]=e}gather(t,e,r){if(e!==this.elementDtype)throw new Error(`Invalid data types; op elements ${e}, but list elements ${this.elementDtype}`);return to(this.elementShape,r,"TensorList shape mismatch: "),t=t.slice(0,this.size()),t.length===0?un([],[0].concat(this.elementShape)):rt(()=>{let o=t.map(s=>Q(this.tensors[s],r));return ur(o,0)})}concat(t,e){if(!!t&&t!==this.elementDtype)throw new Error(`TensorList dtype is ${this.elementDtype} but concat requested dtype ${t}`);return to(this.elementShape,e,"TensorList shape mismatch: "),this.size()===0?un([],[0].concat(this.elementShape)):rt(()=>{let r=this.tensors.map(o=>Q(o,e));return Qe(r,0)})}}function JH(n,t,e){let r=n.dtype;if(n.shape.length<1)throw new Error(`Tensor must be at least a vector, but saw shape: ${n.shape}`);if(n.dtype!==e)throw new Error(`Invalid data types; op elements ${n.dtype}, but list elements ${e}`);let o=n.shape.slice(1);to(o,t,"TensorList shape mismatch: ");let s=mo(n);return new Yc(s,t,r)}function ZH(n,t,e){return new Yc([],n,t,e)}function QH(n,t,e,r){if(t.length!==n.shape[0])throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${t.length} vs. ${n.shape[0]}`);let o=Math.max(...t);if(r!=null&&r!==-1&&o>=r)throw new Error(`Max index must be < array size (${o} vs. ${r})`);let s=new Yc([],e,n.dtype,r),c=mo(n,0);return t.forEach((l,p)=>{s.setItem(l,c[p])}),s}function tj(n,t,e){let r=0,o=t.map(p=>(r+=p,r));if(r!==n.shape[0])throw new Error(`Expected sum of lengths to be equal to tensor.shape[0], but sum of lengths is - ${s}, and tensor's shape is: ${e.shape}`);const o=s===0?0:e.size/s,a=Q(()=>{const h=[];e=K(e,[1,s,o]);for(let d=0;d{switch(e.op){case"If":case"StatelessIf":{const s=R("thenBranch",e,t,n),i=R("elseBranch",e,t,n),o=R("cond",e,t,n),a=R("args",e,t,n),c=await o.data();return c[0]?n.functionMap[s].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap):n.functionMap[i].executeFunctionAsync(a,n.tensorArrayMap,n.tensorListMap)}case"While":case"StatelessWhile":{const s=R("body",e,t,n),i=R("cond",e,t,n),o=R("args",e,t,n),a=await n.functionMap[i].executeFunctionAsync(o,n.tensorArrayMap,n.tensorListMap),c=o.map(m=>m.id);let h=await a[0].data();a.forEach(m=>{!m.kept&&c.indexOf(m.id)===-1&&m.dispose()});let d=o;for(;h[0];){const m=d;d=await n.functionMap[s].executeFunctionAsync(d,n.tensorArrayMap,n.tensorListMap);const f=d.map(w=>w.id);m.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&f.indexOf(w.id)===-1&&w.dispose()});const b=await n.functionMap[i].executeFunctionAsync(d,n.tensorArrayMap,n.tensorListMap);h=await b[0].data(),b.forEach(w=>{!w.kept&&c.indexOf(w.id)===-1&&f.indexOf(w.id)===-1&&w.dispose()})}return d}case"LoopCond":{const s=R("pred",e,t,n);return[ar(s)]}case"Switch":{const s=R("pred",e,t,n);let i=R("data",e,t,n);return i.kept||(i=ar(i)),(await s.data())[0]?[void 0,i]:[i,void 0]}case"Merge":{const s=e.inputNames.find(i=>ss(i,t,n)!==void 0);if(s){const i=ss(s,t,n);return[ar(i)]}return}case"Enter":{const s=R("frameName",e,t,n),i=R("tensor",e,t,n);return n.enterFrame(s),[ar(i)]}case"Exit":{const s=R("tensor",e,t,n);return n.exitFrame(),[ar(s)]}case"NextIteration":{const s=R("tensor",e,t,n);return n.nextIteration(),[ar(s)]}case"TensorArrayV3":{const s=R("size",e,t,n),i=R("dtype",e,t,n),o=R("elementShape",e,t,n),a=R("dynamicSize",e,t,n),c=R("clearAfterRead",e,t,n),h=R("identicalElementShapes",e,t,n),d=R("name",e,t,n),m=new $Y(d,i,s,o,h,a,c);return n.addTensorArray(m),[m.idTensor,Ce(1)]}case"TensorArrayWriteV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.write(i,o),[a.idTensor]}case"TensorArrayReadV3":{const s=R("tensorArrayId",e,t,n),i=R("index",e,t,n),o=n.getTensorArray(s.id);return[o.read(i)]}case"TensorArrayGatherV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("dtype",e,t,n),a=n.getTensorArray(s.id);return[a.gather(i,o)]}case"TensorArrayScatterV3":{const s=R("tensorArrayId",e,t,n),i=R("indices",e,t,n),o=R("tensor",e,t,n),a=n.getTensorArray(s.id);return a.scatter(i,o),[a.idTensor]}case"TensorArrayConcatV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id),o=R("dtype",e,t,n);return[i.concat(o)]}case"TensorArraySplitV3":{const s=R("tensorArrayId",e,t,n),i=R("tensor",e,t,n),o=R("lengths",e,t,n),a=n.getTensorArray(s.id);return a.split(o,i),[a.idTensor]}case"TensorArraySizeV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return[Ce(i.size(),"int32")]}case"TensorArrayCloseV3":{const s=R("tensorArrayId",e,t,n),i=n.getTensorArray(s.id);return i.clearAndClose(),[i.idTensor]}case"TensorListSetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("tensor",e,t,n),a=n.getTensorList(s.id);return a.setItem(i,o),[a.idTensor]}case"TensorListGetItem":{const s=R("tensorListId",e,t,n),i=R("index",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.getItem(i,o,a)]}case"TensorListScatterV2":case"TensorListScatter":{const s=R("indices",e,t,n),i=R("tensor",e,t,n),o=R("elementShape",e,t,n),a=R("numElements",e,t,n),c=MY(i,s,o,a);return n.addTensorList(c),[c.idTensor]}case"TensorListReserve":{const s=R("elementShape",e,t,n),i=R("elementDType",e,t,n),o=R("numElements",e,t,n),a=BY(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListGather":{const s=R("tensorListId",e,t,n),i=R("indices",e,t,n),o=R("elementShape",e,t,n),a=R("elementDType",e,t,n),c=n.getTensorList(s.id);return[c.gather(i,a,o)]}case"TensorListStack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=R("numElements",e,t,n),c=n.getTensorList(s.id);return[c.stack(i,o,a)]}case"TensorListFromTensor":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=UY(s,i,o);return n.addTensorList(a),[a.idTensor]}case"TensorListConcat":{const s=R("tensorListId",e,t,n),i=n.getTensorList(s.id),o=R("dtype",e,t,n),a=R("elementShape",e,t,n);return[i.concat(o,a)]}case"TensorListPushBack":{const s=R("tensorListId",e,t,n),i=R("tensor",e,t,n),o=n.getTensorList(s.id);return o.pushBack(i),[o.idTensor]}case"TensorListPopBack":{const s=R("tensorListId",e,t,n),i=R("elementShape",e,t,n),o=R("elementDType",e,t,n),a=n.getTensorList(s.id);return[a.popBack(i,o)]}case"TensorListSplit":{const s=R("tensor",e,t,n),i=R("elementShape",e,t,n),o=R("lengths",e,t,n),a=PY(s,o,i);return n.addTensorList(a),[a.idTensor]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Ete="control";function VN(e,t,n){const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=s==="fusedbatchnorm",h=R("numArgs",e,t,n);if(o){if(a&&h!==2)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&h!==1)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd must have one extra argument: bias.")}if(c)throw new Error("FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported.");const d=R("strides",e,t,n),m=Sm(e,t,n),f=R("dataFormat",e,t,n).toUpperCase(),b=R("dilations",e,t,n),[w,L]=R("args",e,t,n);return{stride:d,pad:m,dataFormat:f,dilations:b,biasArg:w,preluArg:L,activationFunc:i}}const VY=(e,t,n)=>{switch(e.op){case"Conv1D":{const s=R("stride",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilation",e,t,n);return[ip(R("x",e,t,n),R("filter",e,t,n),s,i,o,a)]}case"Conv2D":{const s=R("strides",e,t,n),i=Sm(e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[Ji(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,o,[a[1],a[2]])]}case"_FusedConv2D":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:h,activationFunc:d}=VN(e,t,n);return[Kb({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:d,preluActivationWeights:h})]}case"FusedDepthwiseConv2dNative":{const{stride:s,pad:i,dataFormat:o,dilations:a,biasArg:c,preluArg:h,activationFunc:d}=VN(e,t,n);return[$A({x:R("x",e,t,n),filter:R("filter",e,t,n),strides:[s[1],s[2]],pad:i,dataFormat:o,dilations:[a[1],a[2]],bias:c,activation:d,preluActivationWeights:h})]}case"Conv2DBackpropInput":case"Conv2dTranspose":{const s=R("outputShape",e,t,n),i=R("strides",e,t,n),o=Sm(e,t,n);return[rp(R("x",e,t,n),R("filter",e,t,n),s,[i[1],i[2]],o)]}case"DepthwiseConv2dNative":case"DepthwiseConv2d":{const s=R("strides",e,t,n),i=Sm(e,t,n),o=R("dilations",e,t,n),a=R("dataFormat",e,t,n).toUpperCase();return[Co(R("input",e,t,n),R("filter",e,t,n),[s[1],s[2]],i,a,[o[1],o[2]])]}case"Conv3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dataFormat",e,t,n).toUpperCase(),a=R("dilations",e,t,n);return[Lb(R("x",e,t,n),R("filter",e,t,n),[s[1],s[2],s[3]],i,o,[a[1],a[2],a[3]])]}case"AvgPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[ah(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPool":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[fh(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i)]}case"MaxPoolWithArgmax":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n),a=R("includeBatchInIndex",e,t,n),{result:c,indexes:h}=lA(R("x",e,t,n),[o[1],o[2]],[s[1],s[2]],i,a);return[c,h]}case"AvgPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[yb(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"MaxPool3D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("kernelSize",e,t,n);return[Ob(R("x",e,t,n),[o[1],o[2],o[3]],[s[1],s[2],s[3]],i)]}case"Dilation2D":{const s=R("strides",e,t,n),i=R("pad",e,t,n),o=R("dilations",e,t,n),a=s[1],c=s[2],h=o[1],d=o[2];return[Ib(R("x",e,t,n),R("filter",e,t,n),[a,c],i,[h,d],"NHWC")]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Dte="convolution";const GY=(e,t,n)=>{switch(e.op){case"Fill":{const s=R("shape",e,t,n),i=R("dtype",e,t,n),o=R("value",e,t,n);return[Ba(s,o,i)]}case"LinSpace":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("num",e,t,n);return[oA(s,i,o)]}case"Multinomial":{const s=R("logits",e,t,n),i=R("numSamples",e,t,n),o=R("seed",e,t,n);return[hA(s,i,o)]}case"OneHot":{const s=R("indices",e,t,n),i=R("depth",e,t,n),o=R("onValue",e,t,n),a=R("offValue",e,t,n);return[To(s,i,o,a)]}case"Ones":return[Js(R("shape",e,t,n),R("dtype",e,t,n))];case"OnesLike":return[Fn(R("x",e,t,n))];case"RandomUniform":return[ko(R("shape",e,t,n),R("minval",e,t,n),R("maxval",e,t,n),R("dtype",e,t,n))];case"Range":{const s=R("start",e,t,n),i=R("stop",e,t,n),o=R("step",e,t,n);return[bh(s,i,o,R("dtype",e,t,n))]}case"TruncatedNormal":{const s=R("shape",e,t,n),i=R("mean",e,t,n),o=R("stdDev",e,t,n),a=R("seed",e,t,n);return[xh(s,i,o,R("dtype",e,t,n),a)]}case"Zeros":return[dt(R("shape",e,t,n),R("dtype",e,t,n))];case"ZerosLike":return[et(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},kte="creation";function sS(e,t,n){const s=R("boxes",e,t,n),i=R("scores",e,t,n),o=R("maxOutputSize",e,t,n),a=R("iouThreshold",e,t,n),c=R("scoreThreshold",e,t,n),h=R("softNmsSigma",e,t,n);return{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}}const YY=async(e,t,n)=>{switch(e.op){case"NonMaxSuppressionV5":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}=sS(e,t,n),d=await zr.nonMaxSuppressionWithScoreAsync(s,i,o,a,c,h);return[d.selectedIndices,d.selectedScores]}case"NonMaxSuppressionV4":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=sS(e,t,n),h=R("padToMaxOutputSize",e,t,n),d=await zr.nonMaxSuppressionPaddedAsync(s,i,o,a,c,h);return[d.selectedIndices,d.validOutputs]}case"NonMaxSuppressionV3":case"NonMaxSuppressionV2":{const{boxes:s,scores:i,maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=sS(e,t,n);return[await zr.nonMaxSuppressionAsync(s,i,o,a,c)]}case"Where":{const s=Ae(R("condition",e,t,n),"bool"),i=[await Yb(s)];return s.dispose(),i}case"ListDiff":return dA(R("x",e,t,n),R("y",e,t,n));default:throw TypeError(`Node type ${e.op} is not implemented`)}},Fte="dynamic";const HY=(e,t,n)=>{switch(e.op){case"TopKV2":{const s=R("x",e,t,n),i=R("k",e,t,n),o=R("sorted",e,t,n),a=Vb(s,i,o);return[a.values,a.indices]}case"Unique":{const s=R("x",e,t,n),i=Tp(s);return[i.values,i.indices]}case"UniqueV2":{const s=R("x",e,t,n),i=R("axis",e,t,n),o=Tp(s,i);return[o.values,o.indices]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},_te="evaluation";const qY=(e,t,n)=>{switch(e.op){case"Const":return t[e.name];case"PlaceholderWithDefault":const s=R("default",e,t,n);return[ss(e.name,t,n)||s];case"Placeholder":return[ss(e.name,t,n)];case"Identity":case"StopGradient":case"FakeQuantWithMinMaxVars":{const d=R("x",e,t,n);return[ar(d)]}case"IdentityN":return R("x",e,t,n).map(d=>ar(d));case"Snapshot":const i=R("x",e,t,n);return[ar(i)];case"Shape":return[ls(R("x",e,t,n).shape,"int32")];case"ShapeN":return R("x",e,t,n).map(d=>ls(d.shape));case"Size":return[Ce(R("x",e,t,n).size,"int32")];case"Rank":return[Ce(R("x",e,t,n).rank,"int32")];case"NoOp":return[Ce(1)];case"Print":const o=R("x",e,t,n),a=R("data",e,t,n),c=R("message",e,t,n),h=R("summarize",e,t,n);console.warn("The graph has a tf.print() operation,usually used for debugging, which slows down performance."),console.log(c);for(let d=0;de.dispose()),this.tensorMap.clear(),this.handle.dispose()}size(){return this.tensorMap.size}async import(e,t){this.checkKeyAndValueTensor(e,t);const n=await e.data();return this.tensorMap.forEach(s=>s.dispose()),this.tensorMap.clear(),Q(()=>{const s=Qs(t),i=n.length,o=s.length;A(i===o,()=>`The number of elements doesn't match, keys has ${i} elements, the values has ${o} elements.`);for(let a=0;a{const s=[];for(let i=0;i{switch(e.op){case"HashTable":case"HashTableV2":{const i=R("keyDType",e,t,n),o=R("valueDType",e,t,n),a=new jY(i,o);return s.addHashTable(e.name,a),[a.handle]}case"LookupTableImport":case"LookupTableImportV2":{const i=R("tableHandle",e,t,n,s),o=R("keys",e,t,n),a=R("values",e,t,n),c=s.getHashTableById(i.id);return[await c.import(o,a)]}case"LookupTableFind":case"LookupTableFindV2":{const i=R("tableHandle",e,t,n,s),o=R("keys",e,t,n),a=R("defaultValue",e,t,n),c=s.getHashTableById(i.id);return[await c.find(o,a)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},$te="hash_table";const XY=(e,t,n)=>{switch(e.op){case"ResizeBilinear":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[zr.resizeBilinear(s,[i[0],i[1]],o)]}case"ResizeNearestNeighbor":{const s=R("images",e,t,n),i=R("size",e,t,n),o=R("alignCorners",e,t,n);return[zr.resizeNearestNeighbor(s,[i[0],i[1]],o)]}case"CropAndResize":{const s=R("image",e,t,n),i=R("boxes",e,t,n),o=R("boxInd",e,t,n),a=R("cropSize",e,t,n),c=R("method",e,t,n),h=R("extrapolationValue",e,t,n);return[zr.cropAndResize(s,i,o,a,c,h)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Ute="image";const JY=(e,t,n)=>{switch(e.op){case"Equal":return[Xs(R("a",e,t,n),R("b",e,t,n))];case"NotEqual":return[Br(R("a",e,t,n),R("b",e,t,n))];case"Greater":return[xs(R("a",e,t,n),R("b",e,t,n))];case"GreaterEqual":return[Zi(R("a",e,t,n),R("b",e,t,n))];case"Less":return[ph(R("a",e,t,n),R("b",e,t,n))];case"LessEqual":return[Ur(R("a",e,t,n),R("b",e,t,n))];case"LogicalAnd":return[Us(R("a",e,t,n),R("b",e,t,n))];case"LogicalNot":return[mh(R("a",e,t,n))];case"LogicalOr":return[pp(R("a",e,t,n),R("b",e,t,n))];case"Select":case"SelectV2":return[Bn(R("condition",e,t,n),R("a",e,t,n),R("b",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Bte="logical";const ZY=(e,t,n)=>{switch(e.op){case"BatchMatMul":case"BatchMatMulV2":case"MatMul":return[ct(R("a",e,t,n),R("b",e,t,n),R("transposeA",e,t,n),R("transposeB",e,t,n))];case"Transpose":return[Ye(R("x",e,t,n),R("perm",e,t,n))];case"_FusedMatMul":const[s,i]=R("fusedOps",e,t,n),o=s==="biasadd",a=i==="prelu",c=R("numArgs",e,t,n);if(o){if(a&&c!==2)throw new Error("Fused MatMul with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!a&&c!==1)throw new Error("Fused MatMul with BiasAdd must have one extra argument: bias.")}const[h,d]=R("args",e,t,n);return[Ep({a:R("a",e,t,n),b:R("b",e,t,n),transposeA:R("transposeA",e,t,n),transposeB:R("transposeB",e,t,n),bias:h,activation:i,preluActivationWeights:d})];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Mte="matrices";const QY=(e,t,n)=>{switch(e.op){case"FusedBatchNorm":case"FusedBatchNormV2":return[No(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"FusedBatchNormV3":return[No(R("x",e,t,n),R("mean",e,t,n),R("variance",e,t,n),R("offset",e,t,n),R("scale",e,t,n),R("epsilon",e,t,n))];case"LRN":return[Nb(R("x",e,t,n),R("radius",e,t,n),R("bias",e,t,n),R("alpha",e,t,n),R("beta",e,t,n))];case"Softmax":return[Fo(R("x",e,t,n))];case"LogSoftmax":return[dp(R("x",e,t,n))];case"SparseToDense":return[Hb(R("sparseIndices",e,t,n),R("outputShape",e,t,n),R("sparseValues",e,t,n),R("defaultValue",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Pte="normalization";const eH=(e,t,n)=>{switch(e.op){case"Max":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Qn(R("x",e,t,n),s,i)]}case"Mean":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[qt(R("x",e,t,n),s,i)]}case"Min":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Va(R("x",e,t,n),s,i)]}case"Sum":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[$e(R("x",e,t,n),s,i)]}case"All":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[Qd(R("x",e,t,n),s,i)]}case"Any":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[ih(R("x",e,t,n),s,i)]}case"ArgMax":{const s=R("axis",e,t,n);return[rh(R("x",e,t,n),s)]}case"ArgMin":{const s=R("axis",e,t,n);return[lb(R("x",e,t,n),s)]}case"Prod":{const s=R("axis",e,t,n),i=R("keepDims",e,t,n);return[gp(R("x",e,t,n),s,i)]}case"Cumsum":{const s=R("axis",e,t,n),i=R("exclusive",e,t,n),o=R("reverse",e,t,n);return[ap(R("x",e,t,n),s,i,o)]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},zte="reduction";const tH=(e,t,n)=>{switch(e.op){case"ConcatV2":case"Concat":{const s=R("n",e,t,n),i=R("axis",e,t,n);let o=R("tensors",e,t,n);return o=o.slice(0,s),[Yt(o,i)]}case"GatherV2":case"Gather":{const s=R("axis",e,t,n),i=R("x",e,t,n),o=R("indices",e,t,n);return[Pa(i,Ae(o,"int32"),s)]}case"ReverseV2":case"Reverse":{const s=R("axis",e,t,n),i=R("x",e,t,n);return[Ts(i,s)]}case"Slice":{const s=R("begin",e,t,n),i=R("size",e,t,n);return[tt(R("x",e,t,n),s,i)]}case"StridedSlice":{const s=R("begin",e,t,n),i=R("end",e,t,n),o=R("strides",e,t,n),a=R("beginMask",e,t,n),c=R("endMask",e,t,n),h=R("ellipsisMask",e,t,n),d=R("newAxisMask",e,t,n),m=R("shrinkAxisMask",e,t,n),f=R("x",e,t,n);return[Pb(f,s,i,o,a,c,h,d,m)]}case"Pack":return Q(()=>{const s=R("axis",e,t,n),i=R("tensors",e,t,n),o=i[0].shape,a=Mr(i[0]).shape,c=i.map(h=>{const d=ae(h.shape,o);if(!d&&!ae(Mr(h).shape,a))throw new Error("the input tensors shape does not match");return d?h:K(h,o)});return[es(c,s)]});case"Unpack":{const s=R("axis",e,t,n),i=R("tensor",e,t,n);return Qs(i,s)}case"Tile":{const s=R("reps",e,t,n);return[$r(R("x",e,t,n),s)]}case"Split":case"SplitV":{const s=R("axis",e,t,n),i=R("numOrSizeSplits",e,t,n),o=R("x",e,t,n);return hs(o,i,s)}case"ScatterNd":{const s=R("indices",e,t,n),i=R("values",e,t,n),o=R("shape",e,t,n);return[EA(s,i,o)]}case"GatherNd":{const s=R("x",e,t,n),i=R("indices",e,t,n);return[DA(s,i)]}case"SparseToDense":{const s=R("sparseIndices",e,t,n),i=R("outputShape",e,t,n),o=R("sparseValues",e,t,n),a=R("defaultValue",e,t,n);return[Hb(s,o,i,o.dtype===a.dtype?a:Ae(a,o.dtype))]}default:throw TypeError(`Node type ${e.op} is not implemented`)}},Vte="slice_join";const nH=(e,t,n)=>{switch(e.op){case"FFT":return[Lh(R("x",e,t,n))];case"IFFT":return[qa(R("x",e,t,n))];case"RFFT":return[Sh(R("x",e,t,n))];case"IRFFT":return[xp(R("x",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Gte="spectral";const sH=(e,t,n)=>{switch(e.op){case"Cast":return[Ae(R("x",e,t,n),R("dtype",e,t,n))];case"ExpandDims":{const s=R("axis",e,t,n);return[Zn(R("x",e,t,n),s)]}case"Squeeze":{const s=R("axis",e,t,n);return[Mr(R("x",e,t,n),s)]}case"Reshape":return[K(R("x",e,t,n),R("shape",e,t,n))];case"MirrorPad":return[Eb(R("x",e,t,n),R("padding",e,t,n),R("mode",e,t,n))];case"PadV2":case"Pad":return[vi(R("x",e,t,n),R("padding",e,t,n),R("constantValue",e,t,n))];case"SpaceToBatchND":{const s=R("blockShape",e,t,n),i=R("paddings",e,t,n);return[gh(R("x",e,t,n),s,i)]}case"BatchToSpaceND":{const s=R("blockShape",e,t,n),i=R("crops",e,t,n);return[ch(R("x",e,t,n),s,i)]}case"DepthToSpace":{const s=R("blockSize",e,t,n),i=R("dataFormat",e,t,n).toUpperCase();return[Sb(R("x",e,t,n),s,i)]}case"BroadcastTo":return[lh(R("x",e,t,n),R("shape",e,t,n))];default:throw TypeError(`Node type ${e.op} is not implemented`)}},Yte="transformation";function GN(e,t,n,s){const i=((o,a,c)=>{switch(o.category){case"arithmetic":return Q(()=>FY(o,a,c));case"basic_math":return Q(()=>_Y(o,a,c));case"control":return zY(o,a,c);case"convolution":return Q(()=>VY(o,a,c));case"creation":return Q(()=>GY(o,a,c));case"dynamic":return YY(o,a,c);case"evaluation":return Q(()=>HY(o,a,c));case"image":return Q(()=>XY(o,a,c));case"graph":return Q(()=>qY(o,a,c));case"logical":return Q(()=>JY(o,a,c));case"matrices":return Q(()=>ZY(o,a,c));case"normalization":return Q(()=>QY(o,a,c));case"reduction":return Q(()=>eH(o,a,c));case"slice_join":return Q(()=>tH(o,a,c));case"spectral":return Q(()=>nH(o,a,c));case"transformation":return Q(()=>sH(o,a,c));case"hash_table":return KY(o,a,c,s);case"custom":const h=UN(o.op);if(h&&h.customExecutor)return h.customExecutor(new kY(o,a,c));throw TypeError(`Custom op ${o.op} is not registered.`);default:throw TypeError(`Unknown op '${o.op}'. File an issue at https://github.com/tensorflow/tfjs/issues so we can add it, or register a custom execution with tf.registerOp()`)}})(e,t,n);return bo(i)?i.then(o=>[].concat(o)):[].concat(i)}class YN{constructor(e={},t={},n={},s={}){this.weightMap=e,this.tensorArrayMap=t,this.tensorListMap=n,this.functionMap=s,this.rootContext={id:0,frameName:"",iterationId:0},this.contexts=[this.rootContext],this.lastId=0,this.generateCurrentContextIds()}newFrame(e,t){return{id:e,frameName:t,iterationId:0}}set currentContext(e){this.contexts!==e&&(this.contexts=e,this.generateCurrentContextIds())}get currentContext(){return this.contexts}get currentContextId(){return this._currentContextIds[0]}get currentContextIds(){return this._currentContextIds}generateCurrentContextIds(){const e=[];for(let t=0;tt.id===0&&t.iterationId===0?"":`${t.frameName}-${t.iterationId}`).join("/"):""}enterFrame(e){this.contexts&&(this.lastId++,this.contexts=this.contexts.slice(),this.contexts.push(this.newFrame(this.lastId,e)),this._currentContextIds.unshift(this.contextIdforContexts(this.contexts)))}exitFrame(){if(this.contexts&&this.contexts.length>1)this.contexts=this.contexts.slice(),this.contexts.splice(-1),this.currentContextIds.shift();else throw new Error("Cannot exit frame, the context is empty")}nextIteration(){if(this.contexts&&this.contexts.length>0){this.contexts=this.contexts.slice(),this.lastId++;const e=Object.assign({},this.contexts[this.contexts.length-1]);e.iterationId+=1,e.id=this.lastId,this.contexts.splice(-1,1,e),this._currentContextIds.splice(0,1,this.contextIdforContexts(this.contexts))}else throw new Error("Cannot increase frame iteration, the context is empty")}getWeight(e){return this.weightMap[e]}addTensorArray(e){this.tensorArrayMap[e.id]=e}getTensorArray(e){return this.tensorArrayMap[e]}addTensorList(e){this.tensorListMap[e.id]=e}getTensorList(e){return this.tensorListMap[e]}dispose(e){for(const t in this.tensorArrayMap)this.tensorArrayMap[t].clearAndClose(e);for(const t in this.tensorListMap)this.tensorListMap[t].clearAndClose(e)}}function HN(e,t,n,s){const i=new Set,o=[];let a=null,c=null;const h=new Set,d=Object.keys(e).map(b=>ds(b)[0]);let m=[];s!=null&&(m=s.map(b=>ds(b.name)[0]));const f=[...t];for(;f.length>0;){const b=f.pop();if((qN(b)||cH(b)||lH(b))&&(a==null&&(a=b,c=a.children.map(w=>w.name).filter(w=>i.has(w)))),i.add(b.name),n[b.name]!=null)continue;if(d.indexOf(b.name)!==-1)continue;if(m.indexOf(b.name)!==-1)continue;if(b.inputs.length===0){o.push(b.name);continue}b.inputs.forEach(w=>{if(h.has(w.name))return;h.add(w.name),f.push(w)})}return{inputs:e,outputs:t,usedNodes:i,missingInputs:o,dynamicNode:a,syncInputs:c}}function iH(e,t,n){const{usedNodes:s,inputs:i}=n,o=[],a=Object.keys(i).map(m=>ds(m)[0]).map(m=>e.nodes[m]),c=e.initNodes;a.forEach(m=>{s.has(m.name)&&o.push(m)}),e.weights.forEach(m=>{s.has(m.name)&&o.push(m)}),c!=null&&c.forEach(m=>{s.has(m.name)&&o.push(m)});const h=new Set,d=[];for(;o.length>0;){const m=o.pop();h.add(m.name),t[m.name]||d.push(m),m.children.forEach(f=>{!h.has(f.name)&&s.has(f.name)&&f.inputs.every(b=>h.has(b.name))&&o.push(f)})}return d}const rH=["Switch","Merge","Enter","Exit","NextIteration","StatelessIf","StatelessWhile","if","While"],oH=["NonMaxSuppressionV2","NonMaxSuppressionV3","NonMaxSuppressionV5","Where"],aH=["HashTable","HashTableV2","LookupTableImport","LookupTableImportV2","LookupTableFind","LookupTableFindV2"];function qN(e){return rH.indexOf(e.op)>=0}function cH(e){return oH.indexOf(e.op)>=0}function lH(e){return aH.indexOf(e.op)>=0}class iS{constructor(e,t){this.graph=e,this.parent=t,this.compiledMap=new Map,this._weightMap={},this.SEPERATOR=",",this._functions={},this._functionExecutorMap={},this._outputs=e.outputs,this._inputs=e.inputs,this._initNodes=e.initNodes,this._signature=e.signature,this._functions=e.functions,e.functions!=null&&Object.keys(e.functions).forEach(n=>{this._functionExecutorMap[n]=new iS(e.functions[n],this)})}get weightIds(){return this.parent?this.parent.weightIds:this._weightIds}get functionExecutorMap(){return this.parent?this.parent.functionExecutorMap:this._functionExecutorMap}get weightMap(){return this.parent?this.parent.weightMap:this._weightMap}set weightMap(e){const t=Object.keys(e).map(n=>e[n].map(s=>s.id));this._weightIds=[].concat(...t),this._weightMap=e}set resourceManager(e){this._resourceManager=e}get inputs(){return this._inputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get outputs(){return this._outputs.map(e=>({name:e.name,shape:e.attrParams.shape?e.attrParams.shape.value:void 0,dtype:e.attrParams.dtype?e.attrParams.dtype.value:void 0}))}get inputNodes(){return this._inputs.map(e=>e.signatureKey||e.name)}get outputNodes(){return this._outputs.map(e=>{const t=e.signatureKey||e.name;return e.defaultOutput?`${t}:${e.defaultOutput}`:t})}get functions(){return Object.keys(this._functions).reduce((e,t)=>(e[t]=this._functions[t].signature,e),{})}getCompilationKey(e,t){const n=e.map(i=>i.name).sort(),s=t.map(i=>i.name).sort();return n.join(this.SEPERATOR)+"--"+s.join(this.SEPERATOR)}compile(e,t){const n=HN(e,t,this.weightMap,this._initNodes),{missingInputs:s,dynamicNode:i,syncInputs:o}=n;if(i!=null)throw new Error(`This execution contains the node '${i.name}', which has the dynamic op '${i.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${o}]`);if(s.length>0){const a=t.map(h=>h.name),c=Object.keys(e);throw new Error(`Cannot compute the outputs [${a}] from the provided inputs [${c}]. Missing the following inputs: [${s}]`)}return iH(this.graph,this.weightMap,n)}execute(e,t){e=this.mapInputs(e);const n=Object.keys(e).sort();this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t);const s=n.map(m=>this.graph.nodes[ds(m)[0]]),i=t.map(m=>ds(m)[0]);let o=i.map(m=>this.graph.nodes[m]);o.length===0&&(o=this._outputs);const a=this.getCompilationKey(s,o);let c=this.compiledMap.get(a);c==null&&(c=this.compile(e,o),this.compiledMap.set(a,c));const h={},d={};return Q(()=>{const m=new YN(this.weightMap,h,d,this.functionExecutorMap),f=Object.assign({},this.weightMap);Object.keys(e).forEach(L=>{const[x,v]=ds(L),N=[];N[v]=e[L],f[x]=N});const b=this.getFrozenTensorIds(f),w={};for(let L=0;Lss(L,f,m))})}getFrozenTensorIds(e){const t=[].concat.apply([],Object.keys(e).map(n=>e[n]).map(n=>n.map(s=>s.id)));return new Set(t)}checkTensorForDisposal(e,t,n,s,i,o,a){if(t.category==="control"||o.indexOf(e)!==-1)return;n[e].forEach(c=>{c!=null&&(a[c.id]=(a[c.id]||0)+t.children.length)}),t.inputs.forEach(c=>{if(c.category!=="control"){const h=JG(c.name,n,s);h!=null&&h.forEach(d=>{if(d&&!i.has(d.id)){const m=a[d.id];m===1?(d.dispose(),delete a[d.id]):m!=null&&a[d.id]--}})}})}async executeAsync(e,t){return this._executeAsync(e,t)}async _executeAsync(e,t,n=!1,s={},i={}){n||(e=this.mapInputs(e),this.checkInputs(e),this.checkInputShapeAndType(e),t=this.mapOutputs(t),this.checkOutputs(t));const o=new YN(this.weightMap,s,i,this.functionExecutorMap),a=await this.executeWithControlFlow(e,o,t,n),c=t.map(f=>ss(f,a,o)),h=c.map(f=>f.id),d=Object.keys(e).map(f=>e[f].id),m=new Set([...h,...d,...this.weightIds]);return Object.keys(a).forEach(f=>{const b=a[f];b.forEach(w=>{w&&!w.isDisposed&&!m.has(w.id)&&w.dispose()})}),this.parent==null&&o.dispose(m),c}async executeFunctionAsync(e,t,n){const s=e.reduce((i,o,a)=>(i[this.inputs[a].name]=o,i),{});return this._executeAsync(s,this.outputNodes,!0,t,n)}async executeWithControlFlow(e,t,n,s){const i=Object.keys(e),o=i.map(O=>this.graph.nodes[ds(O)[0]]),a=n.map(O=>ds(O)[0]);let c=a.map(O=>this.graph.nodes[O]);c.length===0&&(c=this._outputs);const{usedNodes:h,missingInputs:d,dynamicNode:m,syncInputs:f}=HN(e,c,this.weightMap,this._initNodes),b=[...o,...this.graph.weights,...this._initNodes||[]].map(O=>({node:O,contexts:t.currentContext})),w=Object.assign({},this.weightMap);Object.keys(e).forEach(O=>{const[E,k]=ds(O),F=[];F[k]=e[O],w[E]=F});const L={},x=this.getFrozenTensorIds(w),v={};for(;b.length>0;){const O=this.processStack(o,b,t,w,v,x,a,L,h);await Promise.all(O)}m==null&&!s&&console.warn("This model execution did not contain any nodes with control flow or dynamic output shapes. You can use model.execute() instead.");const N=c.filter(O=>!qN(O)&&!ss(O.name,w,t)).map(O=>O.name);if(N.length>0){let O="";throw m!=null&&(O=`Alternatively, to avoid the dynamic ops, use model.execute() and specify the inputs [${f}]`),new Error(`Cannot compute the outputs [${N}] from the provided inputs [${i}]. Consider providing the following inputs: [${d}]. ${O}`)}return w}processStack(e,t,n,s,i,o,a,c,h){const d=[];for(;t.length>0;){const m=t.pop();n.currentContext=m.contexts;let f="";if(m.node.op==="Enter"&&R("isConstant",m.node,s,n)&&([f]=or(m.node.name,n)),s[m.node.name]==null){const b=GN(m.node,s,n,this._resourceManager);f||([f]=or(m.node.name,n));const w=n.currentContext;bo(b)?d.push(b.then(L=>(s[f]=L,n.currentContext=w,this.checkTensorForDisposal(f,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,h),L))):(s[f]=b,this.checkTensorForDisposal(f,m.node,s,n,o,a,c),this.processChildNodes(m.node,t,n,s,i,h))}else this.processChildNodes(m.node,t,n,s,i,h)}return d}processChildNodes(e,t,n,s,i,o){e.children.forEach(a=>{const[c]=or(a.name,n);if(i[c]||!o.has(a.name))return;a.op==="Merge"?a.inputNames.some(h=>!!ss(h,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a})):a.inputNames.every(h=>!!ss(h,s,n))&&(i[c]=!0,t.push({contexts:n.currentContext,node:a}))})}dispose(){Object.keys(this.weightMap).forEach(e=>this.weightMap[e].forEach(t=>t.dispose()))}checkInputShapeAndType(e){Object.keys(e).forEach(t=>{const n=e[t],[s]=ds(t),i=this.graph.nodes[s];if(i.attrParams.shape&&i.attrParams.shape.value){const o=i.attrParams.shape.value,a=o.length===n.shape.length&&n.shape.every((c,h)=>o[h]===-1||o[h]===c);A(a,()=>`The shape of dict['${i.name}'] provided in model.execute(dict) must be [${o}], but was [${n.shape}]`)}i.attrParams.dtype&&i.attrParams.dtype.value&&A(n.dtype===i.attrParams.dtype.value,()=>`The dtype of dict['${i.name}'] provided in model.execute(dict) must be ${i.attrParams.dtype.value}, but was ${n.dtype}`)})}mapInputs(e){const t={};for(const n in e)if(this._signature!=null&&this._signature.inputs!=null&&this._signature.inputs[n]!=null){const s=this._signature.inputs[n];t[s.name]=e[n]}else t[n]=e[n];return t}checkInputs(e){const t=Object.keys(e).filter(n=>{const[s]=ds(n);return this.graph.nodes[s]==null});if(t.length>0)throw new Error(`The dict provided in model.execute(dict) has keys: [${t}] that are not part of graph`)}mapOutputs(e){return e.map(t=>{if(this._signature!=null&&this._signature.outputs!=null&&this._signature.outputs[t]!=null){const n=this._signature.outputs[t];return n.name}return t},{})}checkOutputs(e){e.forEach(t=>{const[n]=ds(t);if(!this.graph.nodes[n])throw new Error(`The output '${t}' is not found in the graph`)})}}class hH{constructor(e={},t={}){this.hashTableNameToHandle=e,this.hashTableMap=t}addHashTable(e,t){this.hashTableNameToHandle[e]=t.handle,this.hashTableMap[t.id]=t}getHashTableHandleByName(e){return this.hashTableNameToHandle[e]}getHashTableById(e){return this.hashTableMap[e]}dispose(){for(const e in this.hashTableMap)this.hashTableMap[e].clearAndClose(),delete this.hashTableMap[e];for(const e in this.hashTableNameToHandle)this.hashTableNameToHandle[e].dispose(),delete this.hashTableNameToHandle[e]}}const uH="?tfjs-format=file",dH="model.json";class jN{constructor(e,t={}){this.modelUrl=e,this.loadOptions=t,this.version="n/a",t==null&&(this.loadOptions={}),this.resourceManager=new hH}get modelVersion(){return this.version}get inputNodes(){return this.executor.inputNodes}get outputNodes(){return this.executor.outputNodes}get inputs(){return this.executor.inputs}get outputs(){return this.executor.outputs}get weights(){return this.executor.weightMap}findIOHandler(){const e=this.modelUrl;if(e.load!=null)this.handler=e;else if(this.loadOptions.requestInit!=null)this.handler=Yd(e,this.loadOptions);else{const t=Vy(e,this.loadOptions);if(t.length===0)t.push(Yd(e,this.loadOptions));else if(t.length>1)throw new Error(`Found more than one (${t.length}) load handlers for URL '${[e]}'`);this.handler=t[0]}}async load(){if(this.findIOHandler(),this.handler.load==null)throw new Error("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");const e=await this.handler.load();return this.loadSync(e)}loadSync(e){this.artifacts=e;const t=this.artifacts.modelTopology;let n={};this.artifacts.userDefinedMetadata!=null&&(n=this.artifacts.userDefinedMetadata.signature),this.version=`${t.versions.producer}.${t.versions.minConsumer}`;const s=Pd(this.artifacts.weightData,this.artifacts.weightSpecs);if(this.executor=new iS(BN.Instance.transformGraph(t,n)),this.executor.weightMap=this.convertTensorMapToTensorsMap(s),this.executor.resourceManager=this.resourceManager,e.modelInitializer!=null){const i=BN.Instance.transformGraph(e.modelInitializer);this.initializer=new iS(i),this.initializer.weightMap=this.executor.weightMap,this.initializer.resourceManager=this.resourceManager,this.initializer.executeAsync({},[])}return!0}async save(e,t){if(typeof e=="string"){const n=zy(e);if(n.length===0)throw new Error(`Cannot find any save handlers for URL '${e}'`);if(n.length>1)throw new Error(`Found more than one (${n.length}) save handlers for URL '${e}'`);e=n[0]}if(e.save==null)throw new Error("GraphModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");return e.save(this.artifacts)}predict(e,t){return this.execute(e,this.outputNodes)}normalizeInputs(e){if(!(e instanceof ee)&&!Array.isArray(e))return e;if(e=Array.isArray(e)?e:[e],e.length!==this.inputNodes.length)throw new Error(`Input tensor count mismatch,the graph model has ${this.inputNodes.length} placeholders, while there are ${e.length} input tensors.`);return this.inputNodes.reduce((t,n,s)=>(t[n]=e[s],t),{})}normalizeOutputs(e){return e=e||this.outputNodes,Array.isArray(e)?e:[e]}execute(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=this.executor.execute(e,t);return n.length>1?n:n[0]}async executeAsync(e,t){e=this.normalizeInputs(e),t=this.normalizeOutputs(t);const n=await this.executor.executeAsync(e,t);return n.length>1?n:n[0]}convertTensorMapToTensorsMap(e){return Object.keys(e).reduce((t,n)=>(t[n]=[e[n]],t),{})}dispose(){this.executor.dispose(),this.initializer&&this.initializer.dispose(),this.resourceManager.dispose()}}async function pH(e,t={}){if(e==null)throw new Error("modelUrl in loadGraphModel() cannot be null. Please provide a url or an IOHandler that loads the model");t==null&&(t={}),t.fromTFHub&&(e.load==null&&(e.endsWith("/")||(e=e+"/"),e=`${e}${dH}${uH}`));const n=new jN(e,t);return await n.load(),n}const KN="2.7.0";function mH(e,t){return Im(e,t)}function Im(e,t,n=new Map,s=new Set){if(e==null)return null;if(s.has(e))throw new Error("Circular references are not supported.");if(n.has(e))return n.get(e);const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep map function may not return both a value and recurse=true.");if(i.recurse)if(cc(e)){const o=Array.isArray(e)?[]:{};s.add(e);for(const a in e){const c=e[a],h=Im(c,t,n,s);o[a]=h}return s.delete(e),o}else throw new Error(`Can't recurse into non-iterable type: ${e}`);else return n.set(e,i.value),i.value}function fH(e,t=JN){return XN(e,t)}function XN(e,t,n=new Set){const s=e[0];if(n.has(s))throw new Error("Circular references are not supported.");const i=t(e);if(i.recurse&&i.value!==null)throw new Error("A deep zip function may not return both a value and recurse=true.");if(i.recurse)if(cc(s)){const o=Array.isArray(s)?[]:{};n.add(s);for(const a in s){const c=e.map(d=>d[a]),h=XN(c,t,n);o[a]=h}return n.delete(s),o}else throw new Error(`Can't recurse into non-iterable type: ${s}`);else return i.value}function JN(e){return e===null?null:cc(e[0])?{value:null,recurse:!0}:{value:e,recurse:!1}}async function ZN(e,t){const n=new Map;Im(e,t,n);for(const i of Array.from(n.keys())){const o=n.get(i);if(bo(o)){const a=await o;n.set(i,a)}}const s=Im(e,t,n);return s}function cc(e){return e!=null&&!ArrayBuffer.isView(e)&&(Array.isArray(e)||typeof e=="object"&&!(e instanceof ee))}function gH(e){return e==null||yH(e)||Array.isArray(e)||typeof e=="object"&&e instanceof ee||hn(e)}function yH(e){return e===null||typeof e!="object"&&typeof e!="function"}function bH(e){return mH(e,wH)}function wH(e){return e instanceof ee?{value:e.clone(),recurse:!1}:cc(e)?{value:null,recurse:!0}:{value:e,recurse:!1}}class QN{constructor(e){if(this.capacity=e,this.begin=0,this.end=0,e==null)throw new RangeError("Can't create a ring buffer of unknown capacity.");if(e<1)throw new RangeError("Can't create ring buffer of capacity < 1.");this.data=new Array(e),this.doubledCapacity=2*e}wrap(e){for(;e<0;)e+=this.doubledCapacity;return e%this.doubledCapacity}get(e){if(e<0)throw new RangeError("Can't get item at a negative index.");return this.data[e%this.capacity]}set(e,t){if(e<0)throw new RangeError("Can't set item at a negative index.");this.data[e%this.capacity]=t}length(){let e=this.end-this.begin;return e<0&&(e=this.doubledCapacity+e),e}isFull(){return this.length()===this.capacity}isEmpty(){return this.length()===0}push(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.set(this.end,e),this.end=this.wrap(this.end+1)}pushAll(e){for(const t of e)this.push(t)}pop(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");this.end=this.wrap(this.end-1);const e=this.get(this.end);return this.set(this.end,void 0),e}unshift(e){if(this.isFull())throw new RangeError("Ring buffer is full.");this.begin=this.wrap(this.begin-1),this.set(this.begin,e)}shift(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const e=this.get(this.begin);return this.set(this.begin,void 0),this.begin=this.wrap(this.begin+1),e}shuffleExcise(e){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");const t=this.wrap(this.begin+e),n=this.get(t);return this.set(t,this.pop()),n}}class rS extends QN{constructor(){super(rS.INITIAL_CAPACITY)}isFull(){return!1}push(e){super.isFull()&&this.expand(),super.push(e)}unshift(e){super.isFull()&&this.expand(),super.unshift(e)}expand(){const e=this.capacity*2,t=new Array(e),n=this.length();for(let s=0;s({value:t++,done:!1}))}function su(e){return new IH(e)}function t0(e,t){return new s0(e,t)}function qte(e,t,n){return t0(su(e).take(t),n)}function LH(e,t=Jr.FAIL){return new EH(e,t)}class Sn{async toArray(){const e=[];let t=await this.next();for(;!t.done;)e.push(t.value),t=await this.next();return e}async toArrayForTest(){const e=this.prefetch(100),t=[];let n=await e.next();for(;!n.done;)t.push(n.value),n=await e.next();return t}async resolveFully(){let e=await this.next();for(;!e.done;)e=await this.next()}async resolveWhile(e){let t=await this.next(),n=e(t.value);for(;!t.done&&n;)t=await this.next(),n=e(t.value)}handleErrors(e){return new RH(this,e)}filter(e){return new NH(this,e)}map(e){return new CH(this,e)}mapAsync(e){return new n0(this,e)}serialMapAsync(e){return new n0(this,e).serial()}flatmap(e){return new OH(this,e)}async forEachAsync(e){return this.map(e).resolveFully()}async serialForEach(e){return this.serialMapAsync(e).resolveWhile(t=>t===!0)}rowMajorBatch(e,t=!0){return new vH(this,e,t)}columnMajorBatch(e,t=!0,n=JN){const s=this.rowMajorBatch(e,t);return s.map(i=>fH(i,n))}concatenate(e,t){return new s0(e0([this,e]),t)}take(e){return e<0||e==null?this:new AH(this,e)}skip(e){return e<0||e==null?this:new TH(this,e)}prefetch(e){return new i0(this,e)}shuffle(e,t){return new DH(this,e,t)}serial(){return new xH(this)}}class SH extends Sn{constructor(e){super();this.items=e,this.trav=0}summary(){return`Array of ${this.items.length} items`}async next(){if(this.trav>=this.items.length)return{value:null,done:!0};const e=this.items[this.trav];return this.trav++,{value:bH(e),done:!1}}}class IH extends Sn{constructor(e){super();this.nextFn=e}summary(){return"Function call"}async next(){try{return this.nextFn()}catch(e){throw e.message=`Error thrown while iterating through a dataset: ${e.message}`,e}}}class xH extends Sn{constructor(e){super();this.upstream=e,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Serial`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){return this.upstream.next()}}class TH extends Sn{constructor(e,t){super();this.upstream=e,this.maxCount=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Skip`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.count++ Take`}async next(){return this.count++>=this.maxCount?{value:null,done:!0}:this.upstream.next()}}class vH extends Sn{constructor(e,t,n=!0){super();this.upstream=e,this.batchSize=t,this.enableSmallLastBatch=n,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> RowMajorBatch`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){const e=[];for(;e.length0?{value:e,done:!1}:{value:null,done:!0};e.push(t.value)}return{value:e,done:!1}}}class NH extends Sn{constructor(e,t){super();this.upstream=e,this.predicate=t,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Filter`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;){const e=await this.upstream.next();if(e.done||this.predicate(e.value))return e;He(e.value)}}}class CH extends Sn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Map`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Hi(e.value),n=this.transform(e.value),s=Hi(n);for(const i of t)Bd(i,s)||i.dispose();return{value:n,done:!1}}}class RH extends Sn{constructor(e,t){super();this.upstream=e,this.handler=t,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> handleErrors`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;)try{return await this.upstream.next()}catch(e){if(!this.handler(e))return{value:null,done:!0}}}}class n0 extends Sn{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> AsyncMap`}async next(){const e=await this.upstream.next();if(e.done)return{value:null,done:!0};const t=Hi(e.value),n=await this.transform(e.value),s=Hi(n);for(const i of t)Bd(i,s)||i.dispose();return{value:n,done:!1}}}class oS extends Sn{constructor(){super();this.outputQueue=new rS,this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.outputQueue.length()===0;)if(!await this.pump())return{value:null,done:!0};return{value:this.outputQueue.shift(),done:!1}}}class OH extends oS{constructor(e,t){super();this.upstream=e,this.transform=t}summary(){return`${this.upstream.summary()} -> Flatmap`}async pump(){const e=await this.upstream.next();if(e.done)return!1;const t=Hi(e.value),n=this.transform(e.value),s=Hi(n);this.outputQueue.pushAll(n);for(const i of t)Bd(i,s)||i.dispose();return!0}}class s0 extends Sn{constructor(e,t){super();this.baseErrorHandler=t,this.lastRead=null,this.iterator=null,this.moreIterators=e}summary(){const e="TODO: fill in upstream of chained summaries";return`${e} -> Chained`}async next(){return this.lastRead=this.readFromChain(this.lastRead),this.lastRead}async readFromChain(e){if(await e,this.iterator==null){const n=await this.moreIterators.next();if(n.done)return{value:null,done:!0};this.iterator=n.value,this.baseErrorHandler!=null&&(this.iterator=this.iterator.handleErrors(this.baseErrorHandler))}const t=await this.iterator.next();return t.done?(this.iterator=null,this.readFromChain(e)):t}}var Jr;(function(e){e[e.FAIL=0]="FAIL",e[e.SHORTEST=1]="SHORTEST",e[e.LONGEST=2]="LONGEST"})(Jr||(Jr={}));class EH extends Sn{constructor(e,t=Jr.FAIL){super();this.iterators=e,this.mismatchMode=t,this.count=0,this.currentPromise=null}summary(){const e="TODO: fill in upstream of zip summaries";return`{${e}} -> Zip`}async nextState(e){await e;let t=0,n=0;function s(o){if(o instanceof Sn){const a=o.next();return{value:a.then(c=>(t++,c.done&&n++,c.value)),recurse:!1}}else return{value:null,recurse:!0}}const i=await ZN(this.iterators,s);if(t===n)return{value:null,done:!0};if(n>0)switch(this.mismatchMode){case Jr.FAIL:throw new Error(`Zipped streams should have the same length. Mismatched at element ${this.count}.`);case Jr.SHORTEST:return{value:null,done:!0};case Jr.LONGEST:default:}return this.count++,{value:i,done:!1}}async next(){return this.currentPromise=this.nextState(this.currentPromise),this.currentPromise}}class i0 extends Sn{constructor(e,t){super();this.upstream=e,this.bufferSize=t,this.buffer=new QN(t)}summary(){return`${this.upstream.summary()} -> Prefetch`}refill(){for(;!this.buffer.isFull();){const e=this.upstream.next();this.buffer.push(e)}}next(){return this.refill(),this.buffer.shift()}}class DH extends i0{constructor(e,t,n){super(e,t);this.upstream=e,this.windowSize=t,this.upstreamExhausted=!1,this.random=Ha(n||jn().toString()),this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}randomInt(e){return Math.floor(this.random()*e)}chooseIndex(){return this.randomInt(this.buffer.length())}async serialNext(){for(this.upstreamExhausted||this.refill();!this.buffer.isEmpty();){const e=this.chooseIndex(),t=await this.buffer.shuffleExcise(e);if(t.done)this.upstreamExhausted=!0;else return this.refill(),t}return{value:null,done:!0}}}class lc{constructor(){this.size=null}batch(e,t=!0){const n=this;A(e>0,()=>`batchSize needs to be positive, but it is - ${e}`);let s;return this.size===Infinity||this.size==null?s=this.size:t?s=Math.ceil(this.size/e):s=Math.floor(this.size/e),ps(async()=>(await n.iterator()).columnMajorBatch(e,t,_H),s)}concatenate(e){const t=this;let n;return this.size===Infinity||e.size===Infinity?n=Infinity:this.size!=null&&e.size!=null?n=this.size+e.size:n=null,ps(async()=>(await t.iterator()).concatenate(await e.iterator()),n)}filter(e){const t=this;let n;return this.size===Infinity?n=Infinity:n=null,ps(async()=>(await t.iterator()).filter(s=>Q(()=>e(s))),n)}async forEachAsync(e){return(await this.iterator()).forEachAsync(e)}map(e){const t=this;return ps(async()=>(await t.iterator()).map(n=>Q(()=>e(n))),this.size)}mapAsync(e){const t=this;return ps(async()=>(await t.iterator()).mapAsync(e),this.size)}prefetch(e){if(e==null)throw new RangeError("`Dataset.prefetch()` requires bufferSize to be specified.");const t=this;return ps(async()=>(await t.iterator()).prefetch(e),this.size)}repeat(e){const t=this;let n;return this.size!=null&&e>0?n=this.size*e:e===0?n=0:this.size!=null&&(e===void 0||e<0)?n=Infinity:n=null,ps(async()=>{const s=su(async()=>({value:await t.iterator(),done:!1}));return t0(s.take(e))},n)}skip(e){const t=this;let n;return this.size!=null&&e>=0&&this.size>=e?n=this.size-e:this.size!=null&&(this.size(await t.iterator()).skip(e),n)}shuffle(e,t,n=!0){if(e==null||e<0)throw this.size==null?new RangeError("`Dataset.shuffle()` requires bufferSize to be specified."):new RangeError(`\`Dataset.shuffle()\` requires bufferSize to be specified. If your data fits in main memory (for regular JS objects), and/or GPU memory (for \`tf.Tensor\`s), consider setting bufferSize to the dataset size (${this.size} elements)`);const s=this,i=Ha(t||jn().toString());return ps(async()=>{let o=i.int32();return n&&(o+=i.int32()),(await s.iterator()).shuffle(e,o.toString())},this.size)}take(e){const t=this;let n;return this.size!=null&&this.size>e?n=e:this.size!=null&&this.size<=e?n=this.size:n=null,ps(async()=>(await t.iterator()).take(e),n)}async toArray(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArray()}async toArrayForTest(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArrayForTest()}}lc.MAX_BUFFER_SIZE=1e4;function ps(e,t=null){return new class extends lc{constructor(){super(...arguments);this.size=t}async iterator(){return e()}}}function kH(e){return ps(async()=>e0(e),e.length)}function FH(e){if(!cc(e))throw new Error("The argument to zip() must be an object or array.");let t;if(Array.isArray(e))for(let n=0;n{const n=await ZN(e,s=>{if(s instanceof lc)return{value:s.iterator(),recurse:!1};if(cc(s))return{value:null,recurse:!0};throw new Error("Leaves of the structure passed to zip() must be Datasets, not primitives.")});return LH(n,Jr.SHORTEST)},t)}function _H(e){if(e===null)return null;const t=e[0];if(gH(t)){const n=WH(e);return{value:n,recurse:!1}}return{value:null,recurse:!0}}function WH(e){if(e.length===0)throw new Error("Can't make a batch of zero elements.");return e[0]instanceof ee?es(e):sn(e)}class r0 extends lc{constructor(e){super();this.input=e}async iterator(){const e=await this.input.iterator(),t=e.decodeUTF8(),n=t.split(` -`).map(s=>(s.endsWith("\r")&&(s=s.slice(0,-1)),s));return n}}const xm='"',iu=Symbol("out"),o0=Symbol("field"),Tm=Symbol("quote"),aS=Symbol("quoteafterquote"),a0=Symbol("quoteinquote");class c0 extends lc{constructor(e,t){super();this.input=e,this.hasHeader=!0,this.fullColumnNames=null,this.columnNamesValidated=!1,this.columnConfigs=null,this.configuredColumnsOnly=!1,this.delimiter=",",this.delimWhitespace=!1,this.base=new r0(e),t||(t={}),this.hasHeader=!(t.hasHeader===!1),this.fullColumnNames=t.columnNames,this.columnConfigs=t.columnConfigs,this.configuredColumnsOnly=t.configuredColumnsOnly,t.delimWhitespace?(A(t.delimiter==null,()=>"Delimiter should not be provided when delimWhitespace is true."),this.delimWhitespace=!0,this.delimiter=" "):this.delimiter=t.delimiter?t.delimiter:","}async columnNames(){return this.columnNamesValidated||await this.setColumnNames(),this.configuredColumnsOnly?Object.keys(this.columnConfigs):this.fullColumnNames}async setColumnNames(){const e=await this.maybeReadHeaderLine();if(!this.fullColumnNames&&!e)throw new Error("Column names must be provided if there is no header line.");this.fullColumnNames&&e&&A(e.length===this.fullColumnNames.length,()=>"The length of provided columnNames ("+this.fullColumnNames.length.toString()+") does not match the length of the header line read from file ("+e.length.toString()+")."),this.fullColumnNames||(this.fullColumnNames=e);const t=this.fullColumnNames.reduce((s,i)=>(s[i]=s[i]+1||1,s),{}),n=Object.keys(t).filter(s=>t[s]>1);if(A(n.length===0,()=>"Duplicate column names found: "+n.toString()),this.columnConfigs)for(const s of Object.keys(this.columnConfigs)){const i=this.fullColumnNames.indexOf(s);if(i===-1)throw new Error('The key "'+s+'" provided in columnConfigs does not match any of the column names ('+this.fullColumnNames.toString()+").")}this.columnNamesValidated=!0}async maybeReadHeaderLine(){if(this.hasHeader){const e=await this.base.iterator(),t=await e.next();if(t.done)throw new Error("No data was found for CSV parsing.");const n=t.value,s=this.parseRow(n,!1);return s}else return null}async iterator(){this.columnNamesValidated||await this.setColumnNames();let e=await this.base.iterator();return this.hasHeader&&(e=e.skip(1)),e.map(t=>this.makeDataElement(t))}makeDataElement(e){const t=this.parseRow(e),n={},s={};for(let i=0;i14||!Number.isInteger(t))throw new Error(`Invalid fftSize: it must be a power of 2 between 2 to 4 and 2 to 14, but got ${this.fftSize}`);if(this.numFrames=e.numFramesPerSpectrogram||43,this.sampleRateHz=e.sampleRateHz,this.columnTruncateLength=e.columnTruncateLength||this.fftSize,this.audioTrackConstraints=e.audioTrackConstraints,this.smoothingTimeConstant=e.smoothingTimeConstant||0,this.includeSpectrogram=!(e.includeSpectrogram===!1),this.includeWaveform=e.includeWaveform===!0,!this.includeSpectrogram&&!this.includeWaveform)throw new Error("Both includeSpectrogram and includeWaveform are false. At least one type of data should be returned.")}summary(){return"microphone"}static async create(e={}){if(oe().get("IS_NODE"))throw new Error("microphone API is only supported in browser environment.");const t=new l0(e);return await t.start(),t}async start(){try{this.stream=await navigator.mediaDevices.getUserMedia({audio:this.audioTrackConstraints==null?!0:this.audioTrackConstraints,video:!1})}catch(n){throw new Error(`Error thrown while initializing video stream: ${n.message}`)}if(!this.stream)throw new Error("Could not obtain audio from microphone.");const e=window.AudioContext||window.webkitAudioContext;if(this.audioContext=new e,!this.sampleRateHz)this.sampleRateHz=this.audioContext.sampleRate;else if(this.audioContext.sampleRate!==this.sampleRateHz)throw new Error(`Mismatch in sampling rate: Expected: ${this.sampleRateHz}; Actual: ${this.audioContext.sampleRate}`);const t=this.audioContext.createMediaStreamSource(this.stream);this.analyser=this.audioContext.createAnalyser(),this.analyser.fftSize=this.fftSize*2,this.analyser.smoothingTimeConstant=this.smoothingTimeConstant,t.connect(this.analyser),this.freqData=new Float32Array(this.fftSize),this.timeData=new Float32Array(this.fftSize);return}async next(){if(this.isClosed)return{value:null,done:!0};let e,t;const n=await this.getAudioData();if(this.includeSpectrogram){const s=this.flattenQueue(n.freqDataQueue);e=this.getTensorFromAudioDataArray(s,[this.numFrames,this.columnTruncateLength,1])}if(this.includeWaveform){const s=this.flattenQueue(n.timeDataQueue);t=this.getTensorFromAudioDataArray(s,[this.numFrames*this.fftSize,1])}return{value:{spectrogram:e,waveform:t},done:!1}}async capture(){return(await this.next()).value}async getAudioData(){const e=[],t=[];let n=0;return new Promise(s=>{const i=setInterval(()=>{this.includeSpectrogram&&(this.analyser.getFloatFrequencyData(this.freqData),this.freqData[0]===-Infinity&&s({freqDataQueue:e,timeDataQueue:t}),e.push(this.freqData.slice(0,this.columnTruncateLength))),this.includeWaveform&&(this.analyser.getFloatTimeDomainData(this.timeData),t.push(this.timeData.slice())),++n===this.numFrames&&(clearInterval(i),s({freqDataQueue:e,timeDataQueue:t}))},this.fftSize/this.sampleRateHz*1e3)})}stop(){this.isClosed||(this.isClosed=!0,this.analyser.disconnect(),this.audioContext.close(),this.stream!=null&&this.stream.getTracks().length>0&&this.stream.getTracks()[0].stop())}toArray(){throw new Error("Can not convert infinite audio stream to array.")}getSampleRate(){return this.sampleRateHz}flattenQueue(e){const t=e[0].length,n=new Float32Array(e.length*t);return e.forEach((s,i)=>n.set(s,i*t)),n}getTensorFromAudioDataArray(e,t){const n=new Float32Array(P(t));return n.set(e,n.length-e.length),sn(n,t)}}class h0 extends Sn{constructor(e,t){super();if(this.webcamVideoElement=e,this.webcamConfig=t,this.isClosed=!0,this.resize=!1,this.needToResize())if(this.resize=!0,this.cropSize=[this.webcamConfig.resizeHeight,this.webcamConfig.resizeWidth],this.cropBoxInd=ls([0],"int32"),this.webcamConfig.centerCrop){const n=this.webcamConfig.resizeWidth*1/this.webcamVideoElement.width,s=this.webcamConfig.resizeHeight*1/this.webcamVideoElement.height,i=(1-n)/2,o=(1-s)/2,a=i+n,c=s+o;this.cropBox=Pr([o,i,c,a],[1,4])}else this.cropBox=Pr([0,0,1,1],[1,4])}summary(){return"webcam"}static async create(e,t={}){if(oe().get("IS_NODE"))throw new Error("tf.data.webcam is only supported in browser environment.");if(!e){if(e=document.createElement("video"),!t.resizeWidth||!t.resizeHeight)throw new Error("Please provide webcam video element, or resizeWidth and resizeHeight to create a hidden video element.");e.width=t.resizeWidth,e.height=t.resizeHeight}const n=new h0(e,t);return await n.start(),n}async start(){this.webcamConfig.facingMode&&A(this.webcamConfig.facingMode==="user"||this.webcamConfig.facingMode==="environment",()=>`Invalid webcam facing mode: ${this.webcamConfig.facingMode}. Please provide 'user' or 'environment'`);try{this.stream=await navigator.mediaDevices.getUserMedia({video:{deviceId:this.webcamConfig.deviceId,facingMode:this.webcamConfig.facingMode?this.webcamConfig.facingMode:"user",width:this.webcamVideoElement.width,height:this.webcamVideoElement.height}})}catch(e){throw e.message=`Error thrown while initializing video stream: ${e.message}`,e}if(!this.stream)throw new Error("Could not obtain video from webcam.");try{this.webcamVideoElement.srcObject=this.stream}catch(e){console.log(e),this.webcamVideoElement.src=window.URL.createObjectURL(this.stream)}return this.webcamVideoElement.play(),this.isClosed=!1,new Promise(e=>{this.webcamVideoElement.onloadedmetadata=()=>{e()}})}async next(){if(this.isClosed)return{value:null,done:!0};let e;try{e=OT(this.webcamVideoElement)}catch(t){throw new Error(`Error thrown converting video to pixels: ${JSON.stringify(t)}`)}if(this.resize)try{return{value:this.cropAndResizeFrame(e),done:!1}}catch(t){throw new Error(`Error thrown cropping the video: ${t.message}`)}finally{e.dispose()}else return{value:e,done:!1}}needToResize(){return!!(this.webcamConfig.resizeWidth&&this.webcamConfig.resizeHeight&&(this.webcamVideoElement.width!==this.webcamConfig.resizeWidth||this.webcamVideoElement.height!==this.webcamConfig.resizeHeight))}cropAndResizeFrame(e){return Q(()=>{const t=e.toFloat().expandDims(0);let n;n=zr.cropAndResize(t,this.cropBox,this.cropBoxInd,this.cropSize,"bilinear");const s=n.shape;return n.reshape(s.slice(1))})}async capture(){return(await this.next()).value}stop(){const e=this.stream.getTracks();e.forEach(t=>t.stop());try{this.webcamVideoElement.srcObject=null}catch(t){console.log(t),this.webcamVideoElement.src=null}this.isClosed=!0}toArray(){throw new Error("Can not convert infinite video stream to array.")}}class u0{}class d0 extends Sn{split(e){return new $H(this,e)}}class $H extends d0{constructor(e,t){super();this.upstream=e,this.impl=new UH(e,t)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class UH extends oS{constructor(e,t){super();this.upstream=e,this.separator=t,this.carryover=""}summary(){return`${this.upstream.summary()} -> Split('${this.separator}')`}async pump(){const e=await this.upstream.next();if(e.done)return this.carryover===""?!1:(this.outputQueue.push(this.carryover),this.carryover="",!0);const t=e.value.split(this.separator);t[0]=this.carryover+t[0];for(const n of t.slice(0,-1))this.outputQueue.push(n);return this.carryover=t[t.length-1],!0}}class BH extends Sn{decodeUTF8(){return new MH(this)}}class MH extends d0{constructor(e){super();this.upstream=e,this.impl=new PH(e)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class PH extends oS{constructor(e){super();if(this.upstream=e,oe().get("IS_BROWSER"))this.decoder=new TextDecoder("utf-8");else{const{StringDecoder:t}=require("string_decoder");this.decoder=new t("utf8")}}summary(){return`${this.upstream.summary()} -> Utf8`}async pump(){const e=await this.upstream.next();let t;if(e.done)return!1;t=e.value;let n;return oe().get("IS_BROWSER")?n=this.decoder.decode(t,{stream:!0}):n=this.decoder.write(Buffer.from(t.buffer)),this.outputQueue.push(n),!0}}class p0 extends BH{constructor(e,t={}){super();this.file=e,this.options=t,A(e instanceof Uint8Array||(oe().get("IS_BROWSER")?e instanceof File||e instanceof Blob:!1),()=>"FileChunkIterator only supports File, Blob and Uint8Array right now."),this.offset=t.offset||0,this.chunkSize=t.chunkSize||1024*1024}summary(){return`FileChunks ${this.file}`}async next(){if(this.offset>=(this.file instanceof Uint8Array?this.file.byteLength:this.file.size))return{value:null,done:!0};const e=new Promise((t,n)=>{const s=this.offset+this.chunkSize;if(this.file instanceof Uint8Array)t(new Uint8Array(this.file.slice(this.offset,s)));else{const i=new FileReader;i.onload=a=>{let c=i.result;if(c instanceof ArrayBuffer&&(c=new Uint8Array(c)),!(c instanceof Uint8Array))return n(new TypeError("FileReader returned unknown type."));t(c)},i.onabort=a=>n(new Error("Aborted")),i.onerror=a=>n(new Error(a.type));const o=this.file.slice(this.offset,s);i.readAsArrayBuffer(o)}this.offset=s});return{value:await e,done:!1}}}async function zH(e,t={}){let n,s;typeof e=="string"?n=e:(n=e.url,s=VH(e));const i=await nT(n,s);if(i.ok){const o=new Uint8Array(await i.arrayBuffer());return new p0(o,t)}else throw new Error(i.statusText)}const VH=e=>{const t={method:e.method,headers:e.headers,body:e.body,mode:e.mode,credentials:e.credentials,cache:e.cache,redirect:e.redirect,referrer:e.referrer,integrity:e.integrity};return t};function m0(e){return typeof e=="string"&&e.substr(0,7)==="file://"}class f0 extends u0{constructor(e,t={}){super();this.input=e,this.options=t}async iterator(){if(m0(this.input)&&oe().get("IS_NODE")){const e=require("fs");this.input=e.readFileSync(this.input.substr(7))}return new p0(this.input,this.options)}}class g0 extends u0{constructor(e,t={}){super();this.url=e,this.fileOptions=t}async iterator(){return m0(this.url)?new f0(this.url,this.fileOptions).iterator():zH(this.url,this.fileOptions)}}function GH(e,t={}){return new c0(new g0(e),t)}function YH(e){const t=su(e);return ps(async()=>t)}function HH(e){return ps(async()=>{const t=await e();return su(()=>t.next())})}async function qH(e,t){return h0.create(e,t)}async function jH(e){return l0.create(e)}const y0="2.7.0";var KH=Object.freeze({__proto__:null,array:kH,Dataset:lc,zip:FH,CSVDataset:c0,TextLineDataset:r0,csv:GH,func:YH,generator:HH,microphone:jH,webcam:qH,FileDataSource:f0,URLDataSource:g0,version_data:y0});function Te(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&A(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the CPU backend.`)})}const XH=Dp,JH=lw,ZH=hw,QH=uw,eq=Ap;class tq extends y{constructor(){super();this.blockSize=48,this.firstUse=!0,this.data=new p(this,Ki())}write(e,t,n){this.firstUse&&(this.firstUse=!1,oe().get("IS_NODE")&&Za(` + ${r}, and tensor's shape is: ${n.shape}`);let s=r===0?0:n.size/r,c=rt(()=>{let p=[];n=Q(n,[1,r,s]);for(let f=0;f{switch(n.op){case"If":case"StatelessIf":{let r=A("thenBranch",n,t,e),o=A("elseBranch",n,t,e),s=A("cond",n,t,e),c=A("args",n,t,e),l=await s.data();return l[0]?e.functionMap[r].executeFunctionAsync(c,e.tensorArrayMap,e.tensorListMap):e.functionMap[o].executeFunctionAsync(c,e.tensorArrayMap,e.tensorListMap)}case"While":case"StatelessWhile":{let r=A("body",n,t,e),o=A("cond",n,t,e),s=A("args",n,t,e),c=await e.functionMap[o].executeFunctionAsync(s,e.tensorArrayMap,e.tensorListMap),l=s.map(m=>m.id),p=await c[0].data();c.forEach(m=>{!m.kept&&l.indexOf(m.id)===-1&&m.dispose()});let f=s;for(;p[0];){let m=f;f=await e.functionMap[r].executeFunctionAsync(f,e.tensorArrayMap,e.tensorListMap);let y=f.map(v=>v.id);m.forEach(v=>{!v.kept&&l.indexOf(v.id)===-1&&y.indexOf(v.id)===-1&&v.dispose()});let b=await e.functionMap[o].executeFunctionAsync(f,e.tensorArrayMap,e.tensorListMap);p=await b[0].data(),b.forEach(v=>{!v.kept&&l.indexOf(v.id)===-1&&y.indexOf(v.id)===-1&&v.dispose()})}return f}case"LoopCond":{let r=A("pred",n,t,e);return[Ts(r)]}case"Switch":{let r=A("pred",n,t,e),o=A("data",n,t,e);return o.kept||(o=Ts(o)),(await r.data())[0]?[void 0,o]:[o,void 0]}case"Merge":{let r=n.inputNames.find(o=>fr(o,t,e)!==void 0);if(r){let o=fr(r,t,e);return[Ts(o)]}return}case"Enter":{let r=A("frameName",n,t,e),o=A("tensor",n,t,e);return e.enterFrame(r),[Ts(o)]}case"Exit":{let r=A("tensor",n,t,e);return e.exitFrame(),[Ts(r)]}case"NextIteration":{let r=A("tensor",n,t,e);return e.nextIteration(),[Ts(r)]}case"TensorArrayV3":{let r=A("size",n,t,e),o=A("dtype",n,t,e),s=A("elementShape",n,t,e),c=A("dynamicSize",n,t,e),l=A("clearAfterRead",n,t,e),p=A("identicalElementShapes",n,t,e),f=A("name",n,t,e),m=new YH(f,o,r,s,p,c,l);return e.addTensorArray(m),[m.idTensor,Et(1)]}case"TensorArrayWriteV3":{let r=A("tensorArrayId",n,t,e),o=A("index",n,t,e),s=A("tensor",n,t,e),c=e.getTensorArray(r.id);return c.write(o,s),[c.idTensor]}case"TensorArrayReadV3":{let r=A("tensorArrayId",n,t,e),o=A("index",n,t,e),s=e.getTensorArray(r.id);return[s.read(o)]}case"TensorArrayGatherV3":{let r=A("tensorArrayId",n,t,e),o=A("indices",n,t,e),s=A("dtype",n,t,e),c=e.getTensorArray(r.id);return[c.gather(o,s)]}case"TensorArrayScatterV3":{let r=A("tensorArrayId",n,t,e),o=A("indices",n,t,e),s=A("tensor",n,t,e),c=e.getTensorArray(r.id);return c.scatter(o,s),[c.idTensor]}case"TensorArrayConcatV3":{let r=A("tensorArrayId",n,t,e),o=e.getTensorArray(r.id),s=A("dtype",n,t,e);return[o.concat(s)]}case"TensorArraySplitV3":{let r=A("tensorArrayId",n,t,e),o=A("tensor",n,t,e),s=A("lengths",n,t,e),c=e.getTensorArray(r.id);return c.split(s,o),[c.idTensor]}case"TensorArraySizeV3":{let r=A("tensorArrayId",n,t,e),o=e.getTensorArray(r.id);return[Et(o.size(),"int32")]}case"TensorArrayCloseV3":{let r=A("tensorArrayId",n,t,e),o=e.getTensorArray(r.id);return o.clearAndClose(),[o.idTensor]}case"TensorListSetItem":{let r=A("tensorListId",n,t,e),o=A("index",n,t,e),s=A("tensor",n,t,e),c=e.getTensorList(r.id);return c.setItem(o,s),[c.idTensor]}case"TensorListGetItem":{let r=A("tensorListId",n,t,e),o=A("index",n,t,e),s=A("elementShape",n,t,e),c=A("elementDType",n,t,e),l=e.getTensorList(r.id);return[l.getItem(o,s,c)]}case"TensorListScatterV2":case"TensorListScatter":{let r=A("indices",n,t,e),o=A("tensor",n,t,e),s=A("elementShape",n,t,e),c=A("numElements",n,t,e),l=QH(o,r,s,c);return e.addTensorList(l),[l.idTensor]}case"TensorListReserve":{let r=A("elementShape",n,t,e),o=A("elementDType",n,t,e),s=A("numElements",n,t,e),c=ZH(r,o,s);return e.addTensorList(c),[c.idTensor]}case"TensorListGather":{let r=A("tensorListId",n,t,e),o=A("indices",n,t,e),s=A("elementShape",n,t,e),c=A("elementDType",n,t,e),l=e.getTensorList(r.id);return[l.gather(o,c,s)]}case"TensorListStack":{let r=A("tensorListId",n,t,e),o=A("elementShape",n,t,e),s=A("elementDType",n,t,e),c=A("numElements",n,t,e),l=e.getTensorList(r.id);return[l.stack(o,s,c)]}case"TensorListFromTensor":{let r=A("tensor",n,t,e),o=A("elementShape",n,t,e),s=A("elementDType",n,t,e),c=JH(r,o,s);return e.addTensorList(c),[c.idTensor]}case"TensorListConcat":{let r=A("tensorListId",n,t,e),o=e.getTensorList(r.id),s=A("dtype",n,t,e),c=A("elementShape",n,t,e);return[o.concat(s,c)]}case"TensorListPushBack":{let r=A("tensorListId",n,t,e),o=A("tensor",n,t,e),s=e.getTensorList(r.id);return s.pushBack(o),[s.idTensor]}case"TensorListPopBack":{let r=A("tensorListId",n,t,e),o=A("elementShape",n,t,e),s=A("elementDType",n,t,e),c=e.getTensorList(r.id);return[c.popBack(o,s)]}case"TensorListSplit":{let r=A("tensor",n,t,e),o=A("elementShape",n,t,e),s=A("lengths",n,t,e),c=tj(r,s,o);return e.addTensorList(c),[c.idTensor]}default:throw TypeError(`Node type ${n.op} is not implemented`)}},Nst="control";function d$(n,t,e){let[r,o]=A("fusedOps",n,t,e),s=r==="biasadd",c=o==="prelu",l=r==="fusedbatchnorm",p=A("numArgs",n,t,e);if(s){if(c&&p!==2)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!c&&p!==1)throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd must have one extra argument: bias.")}if(l)throw new Error("FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported.");let f=A("strides",n,t,e),m=Nm(n,t,e),y=A("dataFormat",n,t,e).toUpperCase(),b=A("dilations",n,t,e),[v,T]=A("args",n,t,e);return{stride:f,pad:m,dataFormat:y,dilations:b,biasArg:v,preluArg:T,activationFunc:o}}let nj=(n,t,e)=>{switch(n.op){case"Conv1D":{let r=A("stride",n,t,e),o=A("pad",n,t,e),s=A("dataFormat",n,t,e).toUpperCase(),c=A("dilation",n,t,e);return[cd(A("x",n,t,e),A("filter",n,t,e),r,o,s,c)]}case"Conv2D":{let r=A("strides",n,t,e),o=Nm(n,t,e),s=A("dataFormat",n,t,e).toUpperCase(),c=A("dilations",n,t,e);return[fs(A("x",n,t,e),A("filter",n,t,e),[r[1],r[2]],o,s,[c[1],c[2]])]}case"_FusedConv2D":{let{stride:r,pad:o,dataFormat:s,dilations:c,biasArg:l,preluArg:p,activationFunc:f}=d$(n,t,e);return[qw({x:A("x",n,t,e),filter:A("filter",n,t,e),strides:[r[1],r[2]],pad:o,dataFormat:s,dilations:[c[1],c[2]],bias:l,activation:f,preluActivationWeights:p})]}case"FusedDepthwiseConv2dNative":{let{stride:r,pad:o,dataFormat:s,dilations:c,biasArg:l,preluArg:p,activationFunc:f}=d$(n,t,e);return[cC({x:A("x",n,t,e),filter:A("filter",n,t,e),strides:[r[1],r[2]],pad:o,dataFormat:s,dilations:[c[1],c[2]],bias:l,activation:f,preluActivationWeights:p})]}case"Conv2DBackpropInput":case"Conv2dTranspose":{let r=A("outputShape",n,t,e),o=A("strides",n,t,e),s=Nm(n,t,e);return[ld(A("x",n,t,e),A("filter",n,t,e),r,[o[1],o[2]],s)]}case"DepthwiseConv2dNative":case"DepthwiseConv2d":{let r=A("strides",n,t,e),o=Nm(n,t,e),s=A("dilations",n,t,e),c=A("dataFormat",n,t,e).toUpperCase();return[na(A("input",n,t,e),A("filter",n,t,e),[r[1],r[2]],o,c,[s[1],s[2]])]}case"Conv3D":{let r=A("strides",n,t,e),o=A("pad",n,t,e),s=A("dataFormat",n,t,e).toUpperCase(),c=A("dilations",n,t,e);return[gw(A("x",n,t,e),A("filter",n,t,e),[r[1],r[2],r[3]],o,s,[c[1],c[2],c[3]])]}case"AvgPool":{let r=A("strides",n,t,e),o=A("pad",n,t,e),s=A("kernelSize",n,t,e);return[ep(A("x",n,t,e),[s[1],s[2]],[r[1],r[2]],o)]}case"MaxPool":{let r=A("strides",n,t,e),o=A("pad",n,t,e),s=A("kernelSize",n,t,e);return[lp(A("x",n,t,e),[s[1],s[2]],[r[1],r[2]],o)]}case"MaxPoolWithArgmax":{let r=A("strides",n,t,e),o=A("pad",n,t,e),s=A("kernelSize",n,t,e),c=A("includeBatchInIndex",n,t,e),{result:l,indexes:p}=R_(A("x",n,t,e),[s[1],s[2]],[r[1],r[2]],o,c);return[l,p]}case"AvgPool3D":{let r=A("strides",n,t,e),o=A("pad",n,t,e),s=A("kernelSize",n,t,e);return[fw(A("x",n,t,e),[s[1],s[2],s[3]],[r[1],r[2],r[3]],o)]}case"MaxPool3D":{let r=A("strides",n,t,e),o=A("pad",n,t,e),s=A("kernelSize",n,t,e);return[Cw(A("x",n,t,e),[s[1],s[2],s[3]],[r[1],r[2],r[3]],o)]}case"Dilation2D":{let r=A("strides",n,t,e),o=A("pad",n,t,e),s=A("dilations",n,t,e),c=r[1],l=r[2],p=s[1],f=s[2];return[bw(A("x",n,t,e),A("filter",n,t,e),[c,l],o,[p,f],"NHWC")]}default:throw TypeError(`Node type ${n.op} is not implemented`)}},_st="convolution";let rj=(n,t,e)=>{switch(n.op){case"Fill":{let r=A("shape",n,t,e),o=A("dtype",n,t,e),s=A("value",n,t,e);return[Cc(r,s,o)]}case"LinSpace":{let r=A("start",n,t,e),o=A("stop",n,t,e),s=A("num",n,t,e);return[D_(r,o,s)]}case"Multinomial":{let r=A("logits",n,t,e),o=A("numSamples",n,t,e),s=A("seed",n,t,e);return[P_(r,o,s)]}case"OneHot":{let r=A("indices",n,t,e),o=A("depth",n,t,e),s=A("onValue",n,t,e),c=A("offValue",n,t,e);return[Zi(r,o,s,c)]}case"Ones":return[ho(A("shape",n,t,e),A("dtype",n,t,e))];case"OnesLike":return[qn(A("x",n,t,e))];case"RandomUniform":return[aa(A("shape",n,t,e),A("minval",n,t,e),A("maxval",n,t,e),A("dtype",n,t,e))];case"Range":{let r=A("start",n,t,e),o=A("stop",n,t,e),s=A("step",n,t,e);return[hp(r,o,s,A("dtype",n,t,e))]}case"TruncatedNormal":{let r=A("shape",n,t,e),o=A("mean",n,t,e),s=A("stdDev",n,t,e),c=A("seed",n,t,e);return[yp(r,o,s,A("dtype",n,t,e),c)]}case"Zeros":return[xe(A("shape",n,t,e),A("dtype",n,t,e))];case"ZerosLike":return[re(A("x",n,t,e))];default:throw TypeError(`Node type ${n.op} is not implemented`)}},Cst="creation";function Q0(n,t,e){let r=A("boxes",n,t,e),o=A("scores",n,t,e),s=A("maxOutputSize",n,t,e),c=A("iouThreshold",n,t,e),l=A("scoreThreshold",n,t,e),p=A("softNmsSigma",n,t,e);return{boxes:r,scores:o,maxOutputSize:s,iouThreshold:c,scoreThreshold:l,softNmsSigma:p}}let oj=async(n,t,e)=>{switch(n.op){case"NonMaxSuppressionV5":{let{boxes:r,scores:o,maxOutputSize:s,iouThreshold:c,scoreThreshold:l,softNmsSigma:p}=Q0(n,t,e),f=await pi.nonMaxSuppressionWithScoreAsync(r,o,s,c,l,p);return[f.selectedIndices,f.selectedScores]}case"NonMaxSuppressionV4":{let{boxes:r,scores:o,maxOutputSize:s,iouThreshold:c,scoreThreshold:l}=Q0(n,t,e),p=A("padToMaxOutputSize",n,t,e),f=await pi.nonMaxSuppressionPaddedAsync(r,o,s,c,l,p);return[f.selectedIndices,f.validOutputs]}case"NonMaxSuppressionV3":case"NonMaxSuppressionV2":{let{boxes:r,scores:o,maxOutputSize:s,iouThreshold:c,scoreThreshold:l}=Q0(n,t,e);return[await pi.nonMaxSuppressionAsync(r,o,s,c,l)]}case"Where":{let r=$t(A("condition",n,t,e),"bool"),o=[await Ww(r)];return r.dispose(),o}case"ListDiff":return L_(A("x",n,t,e),A("y",n,t,e));default:throw TypeError(`Node type ${n.op} is not implemented`)}},Sst="dynamic";let sj=(n,t,e)=>{switch(n.op){case"TopKV2":{let r=A("x",n,t,e),o=A("k",n,t,e),s=A("sorted",n,t,e),c=Bw(r,o,s);return[c.values,c.indices]}case"Unique":{let r=A("x",n,t,e),o=$d(r);return[o.values,o.indices]}case"UniqueV2":{let r=A("x",n,t,e),o=A("axis",n,t,e),s=$d(r,o);return[s.values,s.indices]}default:throw TypeError(`Node type ${n.op} is not implemented`)}},$st="evaluation";let ij=(n,t,e)=>{switch(n.op){case"Const":return t[n.name];case"PlaceholderWithDefault":let r=A("default",n,t,e);return[fr(n.name,t,e)||r];case"Placeholder":return[fr(n.name,t,e)];case"Identity":case"StopGradient":case"FakeQuantWithMinMaxVars":{let f=A("x",n,t,e);return[Ts(f)]}case"IdentityN":return A("x",n,t,e).map(f=>Ts(f));case"Snapshot":let o=A("x",n,t,e);return[Ts(o)];case"Shape":return[vr(A("x",n,t,e).shape,"int32")];case"ShapeN":return A("x",n,t,e).map(f=>vr(f.shape));case"Size":return[Et(A("x",n,t,e).size,"int32")];case"Rank":return[Et(A("x",n,t,e).rank,"int32")];case"NoOp":return[Et(1)];case"Print":let s=A("x",n,t,e),c=A("data",n,t,e),l=A("message",n,t,e),p=A("summarize",n,t,e);console.warn("The graph has a tf.print() operation,usually used for debugging, which slows down performance."),console.log(l);for(let f=0;ft.dispose()),this.tensorMap.clear(),this.handle.dispose()}size(){return this.tensorMap.size}async import(t,e){this.checkKeyAndValueTensor(t,e);let r=await t.data();return this.tensorMap.forEach(o=>o.dispose()),this.tensorMap.clear(),rt(()=>{let o=mo(e),s=r.length,c=o.length;_(s===c,()=>`The number of elements doesn't match, keys has ${s} elements, the values has ${c} elements.`);for(let l=0;l{let o=[];for(let s=0;s{switch(n.op){case"HashTable":case"HashTableV2":{let o=A("keyDType",n,t,e),s=A("valueDType",n,t,e),c=new aj(o,s);return r.addHashTable(n.name,c),[c.handle]}case"LookupTableImport":case"LookupTableImportV2":{let o=A("tableHandle",n,t,e,r),s=A("keys",n,t,e),c=A("values",n,t,e),l=r.getHashTableById(o.id);return[await l.import(s,c)]}case"LookupTableFind":case"LookupTableFindV2":{let o=A("tableHandle",n,t,e,r),s=A("keys",n,t,e),c=A("defaultValue",n,t,e),l=r.getHashTableById(o.id);return[await l.find(s,c)]}default:throw TypeError(`Node type ${n.op} is not implemented`)}},Est="hash_table";let lj=(n,t,e)=>{switch(n.op){case"ResizeBilinear":{let r=A("images",n,t,e),o=A("size",n,t,e),s=A("alignCorners",n,t,e);return[pi.resizeBilinear(r,[o[0],o[1]],s)]}case"ResizeNearestNeighbor":{let r=A("images",n,t,e),o=A("size",n,t,e),s=A("alignCorners",n,t,e);return[pi.resizeNearestNeighbor(r,[o[0],o[1]],s)]}case"CropAndResize":{let r=A("image",n,t,e),o=A("boxes",n,t,e),s=A("boxInd",n,t,e),c=A("cropSize",n,t,e),l=A("method",n,t,e),p=A("extrapolationValue",n,t,e);return[pi.cropAndResize(r,o,s,c,l,p)]}default:throw TypeError(`Node type ${n.op} is not implemented`)}},Dst="image";let uj=(n,t,e)=>{switch(n.op){case"Equal":return[po(A("a",n,t,e),A("b",n,t,e))];case"NotEqual":return[ci(A("a",n,t,e),A("b",n,t,e))];case"Greater":return[Fr(A("a",n,t,e),A("b",n,t,e))];case"GreaterEqual":return[ds(A("a",n,t,e),A("b",n,t,e))];case"Less":return[ap(A("a",n,t,e),A("b",n,t,e))];case"LessEqual":return[ai(A("a",n,t,e),A("b",n,t,e))];case"LogicalAnd":return[Yr(A("a",n,t,e),A("b",n,t,e))];case"LogicalNot":return[cp(A("a",n,t,e))];case"LogicalOr":return[yd(A("a",n,t,e),A("b",n,t,e))];case"Select":case"SelectV2":return[Xn(A("condition",n,t,e),A("a",n,t,e),A("b",n,t,e))];default:throw TypeError(`Node type ${n.op} is not implemented`)}},Ast="logical";let pj=(n,t,e)=>{switch(n.op){case"BatchMatMul":case"BatchMatMulV2":case"MatMul":return[ge(A("a",n,t,e),A("b",n,t,e),A("transposeA",n,t,e),A("transposeB",n,t,e))];case"Transpose":return[Kt(A("x",n,t,e),A("perm",n,t,e))];case"_FusedMatMul":let[r,o]=A("fusedOps",n,t,e),s=r==="biasadd",c=o==="prelu",l=A("numArgs",n,t,e);if(s){if(c&&l!==2)throw new Error("Fused MatMul with BiasAdd and Prelu must have two extra arguments: bias and alpha.");if(!c&&l!==1)throw new Error("Fused MatMul with BiasAdd must have one extra argument: bias.")}let[p,f]=A("args",n,t,e);return[Pd({a:A("a",n,t,e),b:A("b",n,t,e),transposeA:A("transposeA",n,t,e),transposeB:A("transposeB",n,t,e),bias:p,activation:o,preluActivationWeights:f})];default:throw TypeError(`Node type ${n.op} is not implemented`)}},Fst="matrices";let hj=(n,t,e)=>{switch(n.op){case"FusedBatchNorm":case"FusedBatchNormV2":return[ea(A("x",n,t,e),A("mean",n,t,e),A("variance",n,t,e),A("offset",n,t,e),A("scale",n,t,e),A("epsilon",n,t,e))];case"FusedBatchNormV3":return[ea(A("x",n,t,e),A("mean",n,t,e),A("variance",n,t,e),A("offset",n,t,e),A("scale",n,t,e),A("epsilon",n,t,e))];case"LRN":return[kw(A("x",n,t,e),A("radius",n,t,e),A("bias",n,t,e),A("alpha",n,t,e),A("beta",n,t,e))];case"Softmax":return[ca(A("x",n,t,e))];case"LogSoftmax":return[gd(A("x",n,t,e))];case"SparseToDense":return[Vw(A("sparseIndices",n,t,e),A("outputShape",n,t,e),A("sparseValues",n,t,e),A("defaultValue",n,t,e))];default:throw TypeError(`Node type ${n.op} is not implemented`)}},Rst="normalization";let fj=(n,t,e)=>{switch(n.op){case"Max":{let r=A("axis",n,t,e),o=A("keepDims",n,t,e);return[lr(A("x",n,t,e),r,o)]}case"Mean":{let r=A("axis",n,t,e),o=A("keepDims",n,t,e);return[en(A("x",n,t,e),r,o)]}case"Min":{let r=A("axis",n,t,e),o=A("keepDims",n,t,e);return[Ec(A("x",n,t,e),r,o)]}case"Sum":{let r=A("axis",n,t,e),o=A("keepDims",n,t,e);return[zt(A("x",n,t,e),r,o)]}case"All":{let r=A("axis",n,t,e),o=A("keepDims",n,t,e);return[rd(A("x",n,t,e),r,o)]}case"Any":{let r=A("axis",n,t,e),o=A("keepDims",n,t,e);return[Zu(A("x",n,t,e),r,o)]}case"ArgMax":{let r=A("axis",n,t,e);return[Qu(A("x",n,t,e),r)]}case"ArgMin":{let r=A("axis",n,t,e);return[sw(A("x",n,t,e),r)]}case"Prod":{let r=A("axis",n,t,e),o=A("keepDims",n,t,e);return[wd(A("x",n,t,e),r,o)]}case"Cumsum":{let r=A("axis",n,t,e),o=A("exclusive",n,t,e),s=A("reverse",n,t,e);return[pd(A("x",n,t,e),r,o,s)]}default:throw TypeError(`Node type ${n.op} is not implemented`)}},Pst="reduction";let dj=(n,t,e)=>{switch(n.op){case"ConcatV2":case"Concat":{let r=A("n",n,t,e),o=A("axis",n,t,e),s=A("tensors",n,t,e);return s=s.slice(0,r),[Qe(s,o)]}case"GatherV2":case"Gather":{let r=A("axis",n,t,e),o=A("x",n,t,e),s=A("indices",n,t,e);return[$c(o,$t(s,"int32"),r)]}case"ReverseV2":case"Reverse":{let r=A("axis",n,t,e),o=A("x",n,t,e);return[Rr(o,r)]}case"Slice":{let r=A("begin",n,t,e),o=A("size",n,t,e);return[ce(A("x",n,t,e),r,o)]}case"StridedSlice":{let r=A("begin",n,t,e),o=A("end",n,t,e),s=A("strides",n,t,e),c=A("beginMask",n,t,e),l=A("endMask",n,t,e),p=A("ellipsisMask",n,t,e),f=A("newAxisMask",n,t,e),m=A("shrinkAxisMask",n,t,e),y=A("x",n,t,e);return[Lw(y,r,o,s,c,l,p,f,m)]}case"Pack":return rt(()=>{let r=A("axis",n,t,e),o=A("tensors",n,t,e),s=o[0].shape,c=li(o[0]).shape,l=o.map(p=>{let f=lt(p.shape,s);if(!f&&!lt(li(p).shape,c))throw new Error("the input tensors shape does not match");return f?p:Q(p,s)});return[ur(l,r)]});case"Unpack":{let r=A("axis",n,t,e),o=A("tensor",n,t,e);return mo(o,r)}case"Tile":{let r=A("reps",n,t,e);return[ii(A("x",n,t,e),r)]}case"Split":case"SplitV":{let r=A("axis",n,t,e),o=A("numOrSizeSplits",n,t,e),s=A("x",n,t,e);return Tr(s,o,r)}case"ScatterNd":{let r=A("indices",n,t,e),o=A("values",n,t,e),s=A("shape",n,t,e);return[nC(r,o,s)]}case"GatherNd":{let r=A("x",n,t,e),o=A("indices",n,t,e);return[rC(r,o)]}case"SparseToDense":{let r=A("sparseIndices",n,t,e),o=A("outputShape",n,t,e),s=A("sparseValues",n,t,e),c=A("defaultValue",n,t,e);return[Vw(r,s,o,s.dtype===c.dtype?c:$t(c,s.dtype))]}default:throw TypeError(`Node type ${n.op} is not implemented`)}},Ost="slice_join";let mj=(n,t,e)=>{switch(n.op){case"FFT":return[dp(A("x",n,t,e))];case"IFFT":return[Rc(A("x",n,t,e))];case"RFFT":return[mp(A("x",n,t,e))];case"IRFFT":return[Sd(A("x",n,t,e))];default:throw TypeError(`Node type ${n.op} is not implemented`)}},Lst="spectral";let gj=(n,t,e)=>{switch(n.op){case"Cast":return[$t(A("x",n,t,e),A("dtype",n,t,e))];case"ExpandDims":{let r=A("axis",n,t,e);return[cr(A("x",n,t,e),r)]}case"Squeeze":{let r=A("axis",n,t,e);return[li(A("x",n,t,e),r)]}case"Reshape":return[Q(A("x",n,t,e),A("shape",n,t,e))];case"MirrorPad":return[Sw(A("x",n,t,e),A("padding",n,t,e),A("mode",n,t,e))];case"PadV2":case"Pad":return[Wo(A("x",n,t,e),A("padding",n,t,e),A("constantValue",n,t,e))];case"SpaceToBatchND":{let r=A("blockShape",n,t,e),o=A("paddings",n,t,e);return[up(A("x",n,t,e),r,o)]}case"BatchToSpaceND":{let r=A("blockShape",n,t,e),o=A("crops",n,t,e);return[np(A("x",n,t,e),r,o)]}case"DepthToSpace":{let r=A("blockSize",n,t,e),o=A("dataFormat",n,t,e).toUpperCase();return[yw(A("x",n,t,e),r,o)]}case"BroadcastTo":return[rp(A("x",n,t,e),A("shape",n,t,e))];default:throw TypeError(`Node type ${n.op} is not implemented`)}},Mst="transformation";function m$(n,t,e,r){let o=((s,c,l)=>{switch(s.category){case"arithmetic":return rt(()=>jH(s,c,l));case"basic_math":return rt(()=>KH(s,c,l));case"control":return ej(s,c,l);case"convolution":return rt(()=>nj(s,c,l));case"creation":return rt(()=>rj(s,c,l));case"dynamic":return oj(s,c,l);case"evaluation":return rt(()=>sj(s,c,l));case"image":return rt(()=>lj(s,c,l));case"graph":return rt(()=>ij(s,c,l));case"logical":return rt(()=>uj(s,c,l));case"matrices":return rt(()=>pj(s,c,l));case"normalization":return rt(()=>hj(s,c,l));case"reduction":return rt(()=>fj(s,c,l));case"slice_join":return rt(()=>dj(s,c,l));case"spectral":return rt(()=>mj(s,c,l));case"transformation":return rt(()=>gj(s,c,l));case"hash_table":return cj(s,c,l,r);case"custom":let p=l$(s.op);if(p&&p.customExecutor)return p.customExecutor(new HH(s,c,l));throw TypeError(`Custom op ${s.op} is not registered.`);default:throw TypeError(`Unknown op '${s.op}'. File an issue at https://github.com/tensorflow/tfjs/issues so we can add it, or register a custom execution with tf.registerOp()`)}})(n,t,e);return qi(o)?o.then(s=>[].concat(s)):[].concat(o)}class g${constructor(t={},e={},r={},o={}){this.weightMap=t,this.tensorArrayMap=e,this.tensorListMap=r,this.functionMap=o,this.rootContext={id:0,frameName:"",iterationId:0},this.contexts=[this.rootContext],this.lastId=0,this.generateCurrentContextIds()}newFrame(t,e){return{id:t,frameName:e,iterationId:0}}set currentContext(t){this.contexts!==t&&(this.contexts=t,this.generateCurrentContextIds())}get currentContext(){return this.contexts}get currentContextId(){return this._currentContextIds[0]}get currentContextIds(){return this._currentContextIds}generateCurrentContextIds(){let t=[];for(let e=0;ee.id===0&&e.iterationId===0?"":`${e.frameName}-${e.iterationId}`).join("/"):""}enterFrame(t){this.contexts&&(this.lastId++,this.contexts=this.contexts.slice(),this.contexts.push(this.newFrame(this.lastId,t)),this._currentContextIds.unshift(this.contextIdforContexts(this.contexts)))}exitFrame(){if(this.contexts&&this.contexts.length>1)this.contexts=this.contexts.slice(),this.contexts.splice(-1),this.currentContextIds.shift();else throw new Error("Cannot exit frame, the context is empty")}nextIteration(){if(this.contexts&&this.contexts.length>0){this.contexts=this.contexts.slice(),this.lastId++;let t=Object.assign({},this.contexts[this.contexts.length-1]);t.iterationId+=1,t.id=this.lastId,this.contexts.splice(-1,1,t),this._currentContextIds.splice(0,1,this.contextIdforContexts(this.contexts))}else throw new Error("Cannot increase frame iteration, the context is empty")}getWeight(t){return this.weightMap[t]}addTensorArray(t){this.tensorArrayMap[t.id]=t}getTensorArray(t){return this.tensorArrayMap[t]}addTensorList(t){this.tensorListMap[t.id]=t}getTensorList(t){return this.tensorListMap[t]}dispose(t){for(let e in this.tensorArrayMap)this.tensorArrayMap[e].clearAndClose(t);for(let e in this.tensorListMap)this.tensorListMap[e].clearAndClose(t)}}function y$(n,t,e,r){let o=new Set,s=[],c=null,l=null,p=new Set,f=Object.keys(n).map(b=>Nr(b)[0]),m=[];r!=null&&(m=r.map(b=>Nr(b.name)[0]));let y=[...t];for(;y.length>0;){let b=y.pop();if((b$(b)||vj(b)||Tj(b))&&(c==null&&(c=b,l=c.children.map(v=>v.name).filter(v=>o.has(v)))),o.add(b.name),e[b.name]!=null)continue;if(f.indexOf(b.name)!==-1)continue;if(m.indexOf(b.name)!==-1)continue;if(b.inputs.length===0){s.push(b.name);continue}b.inputs.forEach(v=>{if(p.has(v.name))return;p.add(v.name),y.push(v)})}return{inputs:n,outputs:t,usedNodes:o,missingInputs:s,dynamicNode:c,syncInputs:l}}function yj(n,t,e){let{usedNodes:r,inputs:o}=e,s=[],c=Object.keys(o).map(m=>Nr(m)[0]).map(m=>n.nodes[m]),l=n.initNodes;c.forEach(m=>{r.has(m.name)&&s.push(m)}),n.weights.forEach(m=>{r.has(m.name)&&s.push(m)}),l!=null&&l.forEach(m=>{r.has(m.name)&&s.push(m)});let p=new Set,f=[];for(;s.length>0;){let m=s.pop();p.add(m.name),t[m.name]||f.push(m),m.children.forEach(y=>{!p.has(y.name)&&r.has(y.name)&&y.inputs.every(b=>p.has(b.name))&&s.push(y)})}return f}let bj=["Switch","Merge","Enter","Exit","NextIteration","StatelessIf","StatelessWhile","if","While"],xj=["NonMaxSuppressionV2","NonMaxSuppressionV3","NonMaxSuppressionV5","Where"],wj=["HashTable","HashTableV2","LookupTableImport","LookupTableImportV2","LookupTableFind","LookupTableFindV2"];function b$(n){return bj.indexOf(n.op)>=0}function vj(n){return xj.indexOf(n.op)>=0}function Tj(n){return wj.indexOf(n.op)>=0}class _m{constructor(t,e){this.graph=t,this.parent=e,this.compiledMap=new Map,this._weightMap={},this.SEPERATOR=",",this._functions={},this._functionExecutorMap={},this._outputs=t.outputs,this._inputs=t.inputs,this._initNodes=t.initNodes,this._signature=t.signature,this._functions=t.functions,t.functions!=null&&Object.keys(t.functions).forEach(r=>{this._functionExecutorMap[r]=new _m(t.functions[r],this)})}get weightIds(){return this.parent?this.parent.weightIds:this._weightIds}get functionExecutorMap(){return this.parent?this.parent.functionExecutorMap:this._functionExecutorMap}get weightMap(){return this.parent?this.parent.weightMap:this._weightMap}set weightMap(t){let e=Object.keys(t).map(r=>t[r].map(o=>o.id));this._weightIds=[].concat(...e),this._weightMap=t}set resourceManager(t){this._resourceManager=t}get inputs(){return this._inputs.map(t=>({name:t.name,shape:t.attrParams.shape?t.attrParams.shape.value:void 0,dtype:t.attrParams.dtype?t.attrParams.dtype.value:void 0}))}get outputs(){return this._outputs.map(t=>({name:t.name,shape:t.attrParams.shape?t.attrParams.shape.value:void 0,dtype:t.attrParams.dtype?t.attrParams.dtype.value:void 0}))}get inputNodes(){return this._inputs.map(t=>t.signatureKey||t.name)}get outputNodes(){return this._outputs.map(t=>{let e=t.signatureKey||t.name;return t.defaultOutput?`${e}:${t.defaultOutput}`:e})}get functions(){return Object.keys(this._functions).reduce((t,e)=>(t[e]=this._functions[e].signature,t),{})}getCompilationKey(t,e){let r=t.map(s=>s.name).sort(),o=e.map(s=>s.name).sort();return r.join(this.SEPERATOR)+"--"+o.join(this.SEPERATOR)}compile(t,e){let r=y$(t,e,this.weightMap,this._initNodes),{missingInputs:o,dynamicNode:s,syncInputs:c}=r;if(s!=null)throw new Error(`This execution contains the node '${s.name}', which has the dynamic op '${s.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${c}]`);if(o.length>0){let l=e.map(f=>f.name),p=Object.keys(t);throw new Error(`Cannot compute the outputs [${l}] from the provided inputs [${p}]. Missing the following inputs: [${o}]`)}return yj(this.graph,this.weightMap,r)}execute(t,e){t=this.mapInputs(t);let r=Object.keys(t).sort();this.checkInputs(t),this.checkInputShapeAndType(t),e=this.mapOutputs(e),this.checkOutputs(e);let o=r.map(y=>this.graph.nodes[Nr(y)[0]]),s=e.map(y=>Nr(y)[0]),c=s.map(y=>this.graph.nodes[y]);c.length===0&&(c=this._outputs);let l=this.getCompilationKey(o,c),p=this.compiledMap.get(l);p==null&&(p=this.compile(t,c),this.compiledMap.set(l,p));let f={},m={};return rt(()=>{let y=new g$(this.weightMap,f,m,this.functionExecutorMap),b=Object.assign({},this.weightMap);Object.keys(t).forEach(N=>{let[S,D]=Nr(N),I=[];I[D]=t[N],b[S]=I});let v=this.getFrozenTensorIds(b),T={};for(let N=0;Nfr(N,b,y))})}getFrozenTensorIds(t){let e=[].concat.apply([],Object.keys(t).map(r=>t[r]).map(r=>r.map(o=>o.id)));return new Set(e)}checkTensorForDisposal(t,e,r,o,s,c,l){if(e.category==="control"||c.indexOf(t)!==-1)return;r[t].forEach(p=>{p!=null&&(l[p.id]=(l[p.id]||0)+e.children.length)}),e.inputs.forEach(p=>{if(p.category!=="control"){let f=uH(p.name,r,o);f!=null&&f.forEach(m=>{if(m&&!s.has(m.id)){let y=l[m.id];y===1?(m.dispose(),delete l[m.id]):y!=null&&l[m.id]--}})}})}async executeAsync(t,e){return this._executeAsync(t,e)}async _executeAsync(t,e,r=!1,o={},s={}){r||(t=this.mapInputs(t),this.checkInputs(t),this.checkInputShapeAndType(t),e=this.mapOutputs(e),this.checkOutputs(e));let c=new g$(this.weightMap,o,s,this.functionExecutorMap),l=await this.executeWithControlFlow(t,c,e,r),p=e.map(b=>fr(b,l,c)),f=p.map(b=>b.id),m=Object.keys(t).map(b=>t[b].id),y=new Set([...f,...m,...this.weightIds]);return Object.keys(l).forEach(b=>{let v=l[b];v.forEach(T=>{T&&!T.isDisposed&&!y.has(T.id)&&T.dispose()})}),this.parent==null&&c.dispose(y),p}async executeFunctionAsync(t,e,r){let o=t.reduce((s,c,l)=>(s[this.inputs[l].name]=c,s),{});return this._executeAsync(o,this.outputNodes,!0,e,r)}async executeWithControlFlow(t,e,r,o){let s=Object.keys(t),c=s.map(P=>this.graph.nodes[Nr(P)[0]]),l=r.map(P=>Nr(P)[0]),p=l.map(P=>this.graph.nodes[P]);p.length===0&&(p=this._outputs);let{usedNodes:f,missingInputs:m,dynamicNode:y,syncInputs:b}=y$(t,p,this.weightMap,this._initNodes),v=[...c,...this.graph.weights,...this._initNodes||[]].map(P=>({node:P,contexts:e.currentContext})),T=Object.assign({},this.weightMap);Object.keys(t).forEach(P=>{let[E,L]=Nr(P),B=[];B[L]=t[P],T[E]=B});let N={},S=this.getFrozenTensorIds(T),D={};for(;v.length>0;){let P=this.processStack(c,v,e,T,D,S,l,N,f);await Promise.all(P)}y==null&&!o&&console.warn("This model execution did not contain any nodes with control flow or dynamic output shapes. You can use model.execute() instead.");let I=p.filter(P=>!b$(P)&&!fr(P.name,T,e)).map(P=>P.name);if(I.length>0){let P="";throw y!=null&&(P=`Alternatively, to avoid the dynamic ops, use model.execute() and specify the inputs [${b}]`),new Error(`Cannot compute the outputs [${I}] from the provided inputs [${s}]. Consider providing the following inputs: [${m}]. ${P}`)}return T}processStack(t,e,r,o,s,c,l,p,f){let m=[];for(;e.length>0;){let y=e.pop();r.currentContext=y.contexts;let b="";if(y.node.op==="Enter"&&A("isConstant",y.node,o,r)&&([b]=vs(y.node.name,r)),o[y.node.name]==null){let v=m$(y.node,o,r,this._resourceManager);b||([b]=vs(y.node.name,r));let T=r.currentContext;qi(v)?m.push(v.then(N=>(o[b]=N,r.currentContext=T,this.checkTensorForDisposal(b,y.node,o,r,c,l,p),this.processChildNodes(y.node,e,r,o,s,f),N))):(o[b]=v,this.checkTensorForDisposal(b,y.node,o,r,c,l,p),this.processChildNodes(y.node,e,r,o,s,f))}else this.processChildNodes(y.node,e,r,o,s,f)}return m}processChildNodes(t,e,r,o,s,c){t.children.forEach(l=>{let[p]=vs(l.name,r);if(s[p]||!c.has(l.name))return;l.op==="Merge"?l.inputNames.some(f=>!!fr(f,o,r))&&(s[p]=!0,e.push({contexts:r.currentContext,node:l})):l.inputNames.every(f=>!!fr(f,o,r))&&(s[p]=!0,e.push({contexts:r.currentContext,node:l}))})}dispose(){Object.keys(this.weightMap).forEach(t=>this.weightMap[t].forEach(e=>e.dispose()))}checkInputShapeAndType(t){Object.keys(t).forEach(e=>{let r=t[e],[o]=Nr(e),s=this.graph.nodes[o];if(s.attrParams.shape&&s.attrParams.shape.value){let c=s.attrParams.shape.value,l=c.length===r.shape.length&&r.shape.every((p,f)=>c[f]===-1||c[f]===p);_(l,()=>`The shape of dict['${s.name}'] provided in model.execute(dict) must be [${c}], but was [${r.shape}]`)}s.attrParams.dtype&&s.attrParams.dtype.value&&_(r.dtype===s.attrParams.dtype.value,()=>`The dtype of dict['${s.name}'] provided in model.execute(dict) must be ${s.attrParams.dtype.value}, but was ${r.dtype}`)})}mapInputs(t){let e={};for(let r in t)if(this._signature!=null&&this._signature.inputs!=null&&this._signature.inputs[r]!=null){let o=this._signature.inputs[r];e[o.name]=t[r]}else e[r]=t[r];return e}checkInputs(t){let e=Object.keys(t).filter(r=>{let[o]=Nr(r);return this.graph.nodes[o]==null});if(e.length>0)throw new Error(`The dict provided in model.execute(dict) has keys: [${e}] that are not part of graph`)}mapOutputs(t){return t.map(e=>{if(this._signature!=null&&this._signature.outputs!=null&&this._signature.outputs[e]!=null){let r=this._signature.outputs[e];return r.name}return e},{})}checkOutputs(t){t.forEach(e=>{let[r]=Nr(e);if(!this.graph.nodes[r])throw new Error(`The output '${e}' is not found in the graph`)})}}class kj{constructor(t={},e={}){this.hashTableNameToHandle=t,this.hashTableMap=e}addHashTable(t,e){this.hashTableNameToHandle[t]=e.handle,this.hashTableMap[e.id]=e}getHashTableHandleByName(t){return this.hashTableNameToHandle[t]}getHashTableById(t){return this.hashTableMap[t]}dispose(){for(let t in this.hashTableMap)this.hashTableMap[t].clearAndClose(),delete this.hashTableMap[t];for(let t in this.hashTableNameToHandle)this.hashTableNameToHandle[t].dispose(),delete this.hashTableNameToHandle[t]}}let Nj="?tfjs-format=file",_j="model.json";class x${constructor(t,e={}){this.modelUrl=t,this.loadOptions=e,this.version="n/a",e==null&&(this.loadOptions={}),this.resourceManager=new kj}get modelVersion(){return this.version}get inputNodes(){return this.executor.inputNodes}get outputNodes(){return this.executor.outputNodes}get inputs(){return this.executor.inputs}get outputs(){return this.executor.outputs}get weights(){return this.executor.weightMap}findIOHandler(){let t=this.modelUrl;if(t.load!=null)this.handler=t;else if(this.loadOptions.requestInit!=null)this.handler=Xf(t,this.loadOptions);else{let e=Bx(t,this.loadOptions);if(e.length===0)e.push(Xf(t,this.loadOptions));else if(e.length>1)throw new Error(`Found more than one (${e.length}) load handlers for URL '${[t]}'`);this.handler=e[0]}}async load(){if(this.findIOHandler(),this.handler.load==null)throw new Error("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented.");let t=await this.handler.load();return this.loadSync(t)}loadSync(t){this.artifacts=t;let e=this.artifacts.modelTopology,r={};this.artifacts.userDefinedMetadata!=null&&(r=this.artifacts.userDefinedMetadata.signature),this.version=`${e.versions.producer}.${e.versions.minConsumer}`;let o=qf(this.artifacts.weightData,this.artifacts.weightSpecs);if(this.executor=new _m(u$.Instance.transformGraph(e,r)),this.executor.weightMap=this.convertTensorMapToTensorsMap(o),this.executor.resourceManager=this.resourceManager,t.modelInitializer!=null){let s=u$.Instance.transformGraph(t.modelInitializer);this.initializer=new _m(s),this.initializer.weightMap=this.executor.weightMap,this.initializer.resourceManager=this.resourceManager,this.initializer.executeAsync({},[])}return!0}async save(t,e){if(typeof t=="string"){let r=Mx(t);if(r.length===0)throw new Error(`Cannot find any save handlers for URL '${t}'`);if(r.length>1)throw new Error(`Found more than one (${r.length}) save handlers for URL '${t}'`);t=r[0]}if(t.save==null)throw new Error("GraphModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined.");return t.save(this.artifacts)}predict(t,e){return this.execute(t,this.outputNodes)}normalizeInputs(t){if(!(t instanceof ot)&&!Array.isArray(t))return t;if(t=Array.isArray(t)?t:[t],t.length!==this.inputNodes.length)throw new Error(`Input tensor count mismatch,the graph model has ${this.inputNodes.length} placeholders, while there are ${t.length} input tensors.`);return this.inputNodes.reduce((e,r,o)=>(e[r]=t[o],e),{})}normalizeOutputs(t){return t=t||this.outputNodes,Array.isArray(t)?t:[t]}execute(t,e){t=this.normalizeInputs(t),e=this.normalizeOutputs(e);let r=this.executor.execute(t,e);return r.length>1?r:r[0]}async executeAsync(t,e){t=this.normalizeInputs(t),e=this.normalizeOutputs(e);let r=await this.executor.executeAsync(t,e);return r.length>1?r:r[0]}convertTensorMapToTensorsMap(t){return Object.keys(t).reduce((e,r)=>(e[r]=[t[r]],e),{})}dispose(){this.executor.dispose(),this.initializer&&this.initializer.dispose(),this.resourceManager.dispose()}}async function Cj(n,t={}){if(n==null)throw new Error("modelUrl in loadGraphModel() cannot be null. Please provide a url or an IOHandler that loads the model");t==null&&(t={}),t.fromTFHub&&(n.load==null&&(n.endsWith("/")||(n=n+"/"),n=`${n}${_j}${Nj}`));let e=new x$(n,t);return await e.load(),e}let w$="2.7.0";function Sj(n,t){return Cm(n,t)}function Cm(n,t,e=new Map,r=new Set){if(n==null)return null;if(r.has(n))throw new Error("Circular references are not supported.");if(e.has(n))return e.get(n);let o=t(n);if(o.recurse&&o.value!==null)throw new Error("A deep map function may not return both a value and recurse=true.");if(o.recurse)if(Jc(n)){let s=Array.isArray(n)?[]:{};r.add(n);for(let c in n){let l=n[c],p=Cm(l,t,e,r);s[c]=p}return r.delete(n),s}else throw new Error(`Can't recurse into non-iterable type: ${n}`);else return e.set(n,o.value),o.value}function $j(n,t=T$){return v$(n,t)}function v$(n,t,e=new Set){let r=n[0];if(e.has(r))throw new Error("Circular references are not supported.");let o=t(n);if(o.recurse&&o.value!==null)throw new Error("A deep zip function may not return both a value and recurse=true.");if(o.recurse)if(Jc(r)){let s=Array.isArray(r)?[]:{};e.add(r);for(let c in r){let l=n.map(f=>f[c]),p=v$(l,t,e);s[c]=p}return e.delete(r),s}else throw new Error(`Can't recurse into non-iterable type: ${r}`);else return o.value}function T$(n){return n===null?null:Jc(n[0])?{value:null,recurse:!0}:{value:n,recurse:!1}}async function k$(n,t){let e=new Map;Cm(n,t,e);for(let o of Array.from(e.keys())){let s=e.get(o);if(qi(s)){let c=await s;e.set(o,c)}}let r=Cm(n,t,e);return r}function Jc(n){return n!=null&&!ArrayBuffer.isView(n)&&(Array.isArray(n)||typeof n=="object"&&!(n instanceof ot))}function Ij(n){return n==null||Ej(n)||Array.isArray(n)||typeof n=="object"&&n instanceof ot||gn(n)}function Ej(n){return n===null||typeof n!="object"&&typeof n!="function"}function Dj(n){return Sj(n,Aj)}function Aj(n){return n instanceof ot?{value:n.clone(),recurse:!1}:Jc(n)?{value:null,recurse:!0}:{value:n,recurse:!1}}class N${constructor(t){if(this.capacity=t,this.begin=0,this.end=0,t==null)throw new RangeError("Can't create a ring buffer of unknown capacity.");if(t<1)throw new RangeError("Can't create ring buffer of capacity < 1.");this.data=new Array(t),this.doubledCapacity=2*t}wrap(t){for(;t<0;)t+=this.doubledCapacity;return t%this.doubledCapacity}get(t){if(t<0)throw new RangeError("Can't get item at a negative index.");return this.data[t%this.capacity]}set(t,e){if(t<0)throw new RangeError("Can't set item at a negative index.");this.data[t%this.capacity]=e}length(){let t=this.end-this.begin;return t<0&&(t=this.doubledCapacity+t),t}isFull(){return this.length()===this.capacity}isEmpty(){return this.length()===0}push(t){if(this.isFull())throw new RangeError("Ring buffer is full.");this.set(this.end,t),this.end=this.wrap(this.end+1)}pushAll(t){for(let e of t)this.push(e)}pop(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");this.end=this.wrap(this.end-1);let t=this.get(this.end);return this.set(this.end,void 0),t}unshift(t){if(this.isFull())throw new RangeError("Ring buffer is full.");this.begin=this.wrap(this.begin-1),this.set(this.begin,t)}shift(){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");let t=this.get(this.begin);return this.set(this.begin,void 0),this.begin=this.wrap(this.begin+1),t}shuffleExcise(t){if(this.isEmpty())throw new RangeError("Ring buffer is empty.");let e=this.wrap(this.begin+t),r=this.get(e);return this.set(e,this.pop()),r}}class Sm extends N${constructor(){super(Sm.INITIAL_CAPACITY)}isFull(){return!1}push(t){super.isFull()&&this.expand(),super.push(t)}unshift(t){super.isFull()&&this.expand(),super.unshift(t)}expand(){let t=this.capacity*2,e=new Array(t),r=this.length();for(let o=0;o({value:t++,done:!1}))}function Yp(n){return new Pj(n)}function C$(n,t){return new $$(n,t)}function zst(n,t,e){return C$(Yp(n).take(t),e)}function Fj(n,t=vi.FAIL){return new Uj(n,t)}class En{async toArray(){let t=[],e=await this.next();for(;!e.done;)t.push(e.value),e=await this.next();return t}async toArrayForTest(){let t=this.prefetch(100),e=[],r=await t.next();for(;!r.done;)e.push(r.value),r=await t.next();return e}async resolveFully(){let t=await this.next();for(;!t.done;)t=await this.next()}async resolveWhile(t){let e=await this.next(),r=t(e.value);for(;!e.done&&r;)e=await this.next(),r=t(e.value)}handleErrors(t){return new Vj(this,t)}filter(t){return new zj(this,t)}map(t){return new Wj(this,t)}mapAsync(t){return new S$(this,t)}serialMapAsync(t){return new S$(this,t).serial()}flatmap(t){return new Gj(this,t)}async forEachAsync(t){return this.map(t).resolveFully()}async serialForEach(t){return this.serialMapAsync(t).resolveWhile(e=>e===!0)}rowMajorBatch(t,e=!0){return new Bj(this,t,e)}columnMajorBatch(t,e=!0,r=T$){let o=this.rowMajorBatch(t,e);return o.map(s=>$j(s,r))}concatenate(t,e){return new $$(_$([this,t]),e)}take(t){return t<0||t==null?this:new Mj(this,t)}skip(t){return t<0||t==null?this:new Lj(this,t)}prefetch(t){return new I$(this,t)}shuffle(t,e){return new qj(this,t,e)}serial(){return new Oj(this)}}class Rj extends En{constructor(t){super();this.items=t,this.trav=0}summary(){return`Array of ${this.items.length} items`}async next(){if(this.trav>=this.items.length)return{value:null,done:!0};let t=this.items[this.trav];return this.trav++,{value:Dj(t),done:!1}}}class Pj extends En{constructor(t){super();this.nextFn=t}summary(){return"Function call"}async next(){try{return this.nextFn()}catch(t){throw t.message=`Error thrown while iterating through a dataset: ${t.message}`,t}}}class Oj extends En{constructor(t){super();this.upstream=t,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Serial`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){return this.upstream.next()}}class Lj extends En{constructor(t,e){super();this.upstream=t,this.maxCount=e,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Skip`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.count++ Take`}async next(){return this.count++>=this.maxCount?{value:null,done:!0}:this.upstream.next()}}class Bj extends En{constructor(t,e,r=!0){super();this.upstream=t,this.batchSize=e,this.enableSmallLastBatch=r,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> RowMajorBatch`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){let t=[];for(;t.length0?{value:t,done:!1}:{value:null,done:!0};t.push(e.value)}return{value:t,done:!1}}}class zj extends En{constructor(t,e){super();this.upstream=t,this.predicate=e,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> Filter`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;){let t=await this.upstream.next();if(t.done||this.predicate(t.value))return t;Xt(t.value)}}}class Wj extends En{constructor(t,e){super();this.upstream=t,this.transform=e}summary(){return`${this.upstream.summary()} -> Map`}async next(){let t=await this.upstream.next();if(t.done)return{value:null,done:!0};let e=cs(t.value),r=this.transform(t.value),o=cs(r);for(let s of e)Gf(s,o)||s.dispose();return{value:r,done:!1}}}class Vj extends En{constructor(t,e){super();this.upstream=t,this.handler=e,this.count=0,this.lastRead=Promise.resolve({value:null,done:!1})}summary(){return`${this.upstream.summary()} -> handleErrors`}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;;)try{return await this.upstream.next()}catch(t){if(!this.handler(t))return{value:null,done:!0}}}}class S$ extends En{constructor(t,e){super();this.upstream=t,this.transform=e}summary(){return`${this.upstream.summary()} -> AsyncMap`}async next(){let t=await this.upstream.next();if(t.done)return{value:null,done:!0};let e=cs(t.value),r=await this.transform(t.value),o=cs(r);for(let s of e)Gf(s,o)||s.dispose();return{value:r,done:!1}}}class t1 extends En{constructor(){super();this.outputQueue=new Sm,this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}async serialNext(){for(;this.outputQueue.length()===0;)if(!await this.pump())return{value:null,done:!0};return{value:this.outputQueue.shift(),done:!1}}}class Gj extends t1{constructor(t,e){super();this.upstream=t,this.transform=e}summary(){return`${this.upstream.summary()} -> Flatmap`}async pump(){let t=await this.upstream.next();if(t.done)return!1;let e=cs(t.value),r=this.transform(t.value),o=cs(r);this.outputQueue.pushAll(r);for(let s of e)Gf(s,o)||s.dispose();return!0}}class $$ extends En{constructor(t,e){super();this.baseErrorHandler=e,this.lastRead=null,this.iterator=null,this.moreIterators=t}summary(){let t="TODO: fill in upstream of chained summaries";return`${t} -> Chained`}async next(){return this.lastRead=this.readFromChain(this.lastRead),this.lastRead}async readFromChain(t){if(await t,this.iterator==null){let r=await this.moreIterators.next();if(r.done)return{value:null,done:!0};this.iterator=r.value,this.baseErrorHandler!=null&&(this.iterator=this.iterator.handleErrors(this.baseErrorHandler))}let e=await this.iterator.next();return e.done?(this.iterator=null,this.readFromChain(t)):e}}var vi;(function(n){n[n.FAIL=0]="FAIL",n[n.SHORTEST=1]="SHORTEST",n[n.LONGEST=2]="LONGEST"})(vi||(vi={}));class Uj extends En{constructor(t,e=vi.FAIL){super();this.iterators=t,this.mismatchMode=e,this.count=0,this.currentPromise=null}summary(){let t="TODO: fill in upstream of zip summaries";return`{${t}} -> Zip`}async nextState(t){await t;let e=0,r=0;function o(c){if(c instanceof En){let l=c.next();return{value:l.then(p=>(e++,p.done&&r++,p.value)),recurse:!1}}else return{value:null,recurse:!0}}let s=await k$(this.iterators,o);if(e===r)return{value:null,done:!0};if(r>0)switch(this.mismatchMode){case vi.FAIL:throw new Error(`Zipped streams should have the same length. Mismatched at element ${this.count}.`);case vi.SHORTEST:return{value:null,done:!0};case vi.LONGEST:default:}return this.count++,{value:s,done:!1}}async next(){return this.currentPromise=this.nextState(this.currentPromise),this.currentPromise}}class I$ extends En{constructor(t,e){super();this.upstream=t,this.bufferSize=e,this.buffer=new N$(e)}summary(){return`${this.upstream.summary()} -> Prefetch`}refill(){for(;!this.buffer.isFull();){let t=this.upstream.next();this.buffer.push(t)}}next(){return this.refill(),this.buffer.shift()}}class qj extends I${constructor(t,e,r){super(t,e);this.upstream=t,this.windowSize=e,this.upstreamExhausted=!1,this.random=Fc(r||or().toString()),this.lastRead=Promise.resolve({value:null,done:!1})}async next(){return this.lastRead=this.lastRead.then(()=>this.serialNext()),this.lastRead}randomInt(t){return Math.floor(this.random()*t)}chooseIndex(){return this.randomInt(this.buffer.length())}async serialNext(){for(this.upstreamExhausted||this.refill();!this.buffer.isEmpty();){let t=this.chooseIndex(),e=await this.buffer.shuffleExcise(t);if(e.done)this.upstreamExhausted=!0;else return this.refill(),e}return{value:null,done:!0}}}class Zc{constructor(){this.size=null}batch(t,e=!0){let r=this;_(t>0,()=>`batchSize needs to be positive, but it is + ${t}`);let o;return this.size===Infinity||this.size==null?o=this.size:e?o=Math.ceil(this.size/t):o=Math.floor(this.size/t),_r(async()=>(await r.iterator()).columnMajorBatch(t,e,Kj),o)}concatenate(t){let e=this,r;return this.size===Infinity||t.size===Infinity?r=Infinity:this.size!=null&&t.size!=null?r=this.size+t.size:r=null,_r(async()=>(await e.iterator()).concatenate(await t.iterator()),r)}filter(t){let e=this,r;return this.size===Infinity?r=Infinity:r=null,_r(async()=>(await e.iterator()).filter(o=>rt(()=>t(o))),r)}async forEachAsync(t){return(await this.iterator()).forEachAsync(t)}map(t){let e=this;return _r(async()=>(await e.iterator()).map(r=>rt(()=>t(r))),this.size)}mapAsync(t){let e=this;return _r(async()=>(await e.iterator()).mapAsync(t),this.size)}prefetch(t){if(t==null)throw new RangeError("`Dataset.prefetch()` requires bufferSize to be specified.");let e=this;return _r(async()=>(await e.iterator()).prefetch(t),this.size)}repeat(t){let e=this,r;return this.size!=null&&t>0?r=this.size*t:t===0?r=0:this.size!=null&&(t===void 0||t<0)?r=Infinity:r=null,_r(async()=>{let o=Yp(async()=>({value:await e.iterator(),done:!1}));return C$(o.take(t))},r)}skip(t){let e=this,r;return this.size!=null&&t>=0&&this.size>=t?r=this.size-t:this.size!=null&&(this.size(await e.iterator()).skip(t),r)}shuffle(t,e,r=!0){if(t==null||t<0)throw this.size==null?new RangeError("`Dataset.shuffle()` requires bufferSize to be specified."):new RangeError(`\`Dataset.shuffle()\` requires bufferSize to be specified. If your data fits in main memory (for regular JS objects), and/or GPU memory (for \`tf.Tensor\`s), consider setting bufferSize to the dataset size (${this.size} elements)`);let o=this,s=Fc(e||or().toString());return _r(async()=>{let c=s.int32();return r&&(c+=s.int32()),(await o.iterator()).shuffle(t,c.toString())},this.size)}take(t){let e=this,r;return this.size!=null&&this.size>t?r=t:this.size!=null&&this.size<=t?r=this.size:r=null,_r(async()=>(await e.iterator()).take(t),r)}async toArray(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArray()}async toArrayForTest(){if(this.size===Infinity)throw new Error("Can not convert infinite data stream to array.");return(await this.iterator()).toArrayForTest()}}Zc.MAX_BUFFER_SIZE=1e4;function _r(n,t=null){return new class extends Zc{constructor(){super(...arguments);this.size=t}async iterator(){return n()}}}function Hj(n){return _r(async()=>_$(n),n.length)}function jj(n){if(!Jc(n))throw new Error("The argument to zip() must be an object or array.");let t;if(Array.isArray(n))for(let e=0;e{let e=await k$(n,r=>{if(r instanceof Zc)return{value:r.iterator(),recurse:!1};if(Jc(r))return{value:null,recurse:!0};throw new Error("Leaves of the structure passed to zip() must be Datasets, not primitives.")});return Fj(e,vi.SHORTEST)},t)}function Kj(n){if(n===null)return null;let t=n[0];if(Ij(t)){let e=Xj(n);return{value:e,recurse:!1}}return{value:null,recurse:!0}}function Xj(n){if(n.length===0)throw new Error("Can't make a batch of zero elements.");return n[0]instanceof ot?ur(n):un(n)}class E$ extends Zc{constructor(t){super();this.input=t}async iterator(){let t=await this.input.iterator(),e=t.decodeUTF8(),r=e.split(` +`).map(o=>(o.endsWith("\r")&&(o=o.slice(0,-1)),o));return r}}let $m='"',Jp=Symbol("out"),D$=Symbol("field"),Im=Symbol("quote"),e1=Symbol("quoteafterquote"),A$=Symbol("quoteinquote");class F$ extends Zc{constructor(t,e){super();this.input=t,this.hasHeader=!0,this.fullColumnNames=null,this.columnNamesValidated=!1,this.columnConfigs=null,this.configuredColumnsOnly=!1,this.delimiter=",",this.delimWhitespace=!1,this.base=new E$(t),e||(e={}),this.hasHeader=!(e.hasHeader===!1),this.fullColumnNames=e.columnNames,this.columnConfigs=e.columnConfigs,this.configuredColumnsOnly=e.configuredColumnsOnly,e.delimWhitespace?(_(e.delimiter==null,()=>"Delimiter should not be provided when delimWhitespace is true."),this.delimWhitespace=!0,this.delimiter=" "):this.delimiter=e.delimiter?e.delimiter:","}async columnNames(){return this.columnNamesValidated||await this.setColumnNames(),this.configuredColumnsOnly?Object.keys(this.columnConfigs):this.fullColumnNames}async setColumnNames(){let t=await this.maybeReadHeaderLine();if(!this.fullColumnNames&&!t)throw new Error("Column names must be provided if there is no header line.");this.fullColumnNames&&t&&_(t.length===this.fullColumnNames.length,()=>"The length of provided columnNames ("+this.fullColumnNames.length.toString()+") does not match the length of the header line read from file ("+t.length.toString()+")."),this.fullColumnNames||(this.fullColumnNames=t);let e=this.fullColumnNames.reduce((o,s)=>(o[s]=o[s]+1||1,o),{}),r=Object.keys(e).filter(o=>e[o]>1);if(_(r.length===0,()=>"Duplicate column names found: "+r.toString()),this.columnConfigs)for(let o of Object.keys(this.columnConfigs)){let s=this.fullColumnNames.indexOf(o);if(s===-1)throw new Error('The key "'+o+'" provided in columnConfigs does not match any of the column names ('+this.fullColumnNames.toString()+").")}this.columnNamesValidated=!0}async maybeReadHeaderLine(){if(this.hasHeader){let t=await this.base.iterator(),e=await t.next();if(e.done)throw new Error("No data was found for CSV parsing.");let r=e.value,o=this.parseRow(r,!1);return o}else return null}async iterator(){this.columnNamesValidated||await this.setColumnNames();let t=await this.base.iterator();return this.hasHeader&&(t=t.skip(1)),t.map(e=>this.makeDataElement(e))}makeDataElement(t){let e=this.parseRow(t),r={},o={};for(let s=0;s14||!Number.isInteger(e))throw new Error(`Invalid fftSize: it must be a power of 2 between 2 to 4 and 2 to 14, but got ${this.fftSize}`);if(this.numFrames=t.numFramesPerSpectrogram||43,this.sampleRateHz=t.sampleRateHz,this.columnTruncateLength=t.columnTruncateLength||this.fftSize,this.audioTrackConstraints=t.audioTrackConstraints,this.smoothingTimeConstant=t.smoothingTimeConstant||0,this.includeSpectrogram=!(t.includeSpectrogram===!1),this.includeWaveform=t.includeWaveform===!0,!this.includeSpectrogram&&!this.includeWaveform)throw new Error("Both includeSpectrogram and includeWaveform are false. At least one type of data should be returned.")}summary(){return"microphone"}static async create(t={}){if(ct().get("IS_NODE"))throw new Error("microphone API is only supported in browser environment.");let e=new n1(t);return await e.start(),e}async start(){try{this.stream=await navigator.mediaDevices.getUserMedia({audio:this.audioTrackConstraints==null?!0:this.audioTrackConstraints,video:!1})}catch(r){throw new Error(`Error thrown while initializing video stream: ${r.message}`)}if(!this.stream)throw new Error("Could not obtain audio from microphone.");let t=window.AudioContext||window.webkitAudioContext;if(this.audioContext=new t,!this.sampleRateHz)this.sampleRateHz=this.audioContext.sampleRate;else if(this.audioContext.sampleRate!==this.sampleRateHz)throw new Error(`Mismatch in sampling rate: Expected: ${this.sampleRateHz}; Actual: ${this.audioContext.sampleRate}`);let e=this.audioContext.createMediaStreamSource(this.stream);this.analyser=this.audioContext.createAnalyser(),this.analyser.fftSize=this.fftSize*2,this.analyser.smoothingTimeConstant=this.smoothingTimeConstant,e.connect(this.analyser),this.freqData=new Float32Array(this.fftSize),this.timeData=new Float32Array(this.fftSize);return}async next(){if(this.isClosed)return{value:null,done:!0};let t,e,r=await this.getAudioData();if(this.includeSpectrogram){let o=this.flattenQueue(r.freqDataQueue);t=this.getTensorFromAudioDataArray(o,[this.numFrames,this.columnTruncateLength,1])}if(this.includeWaveform){let o=this.flattenQueue(r.timeDataQueue);e=this.getTensorFromAudioDataArray(o,[this.numFrames*this.fftSize,1])}return{value:{spectrogram:t,waveform:e},done:!1}}async capture(){return(await this.next()).value}async getAudioData(){let t=[],e=[],r=0;return new Promise(o=>{let s=setInterval(()=>{this.includeSpectrogram&&(this.analyser.getFloatFrequencyData(this.freqData),this.freqData[0]===-Infinity&&o({freqDataQueue:t,timeDataQueue:e}),t.push(this.freqData.slice(0,this.columnTruncateLength))),this.includeWaveform&&(this.analyser.getFloatTimeDomainData(this.timeData),e.push(this.timeData.slice())),++r===this.numFrames&&(clearInterval(s),o({freqDataQueue:t,timeDataQueue:e}))},this.fftSize/this.sampleRateHz*1e3)})}stop(){this.isClosed||(this.isClosed=!0,this.analyser.disconnect(),this.audioContext.close(),this.stream!=null&&this.stream.getTracks().length>0&&this.stream.getTracks()[0].stop())}toArray(){throw new Error("Can not convert infinite audio stream to array.")}getSampleRate(){return this.sampleRateHz}flattenQueue(t){let e=t[0].length,r=new Float32Array(t.length*e);return t.forEach((o,s)=>r.set(o,s*e)),r}getTensorFromAudioDataArray(t,e){let r=new Float32Array(G(e));return r.set(t,r.length-t.length),un(r,e)}}class r1 extends En{constructor(t,e){super();if(this.webcamVideoElement=t,this.webcamConfig=e,this.isClosed=!0,this.resize=!1,this.needToResize())if(this.resize=!0,this.cropSize=[this.webcamConfig.resizeHeight,this.webcamConfig.resizeWidth],this.cropBoxInd=vr([0],"int32"),this.webcamConfig.centerCrop){let r=this.webcamConfig.resizeWidth*1/this.webcamVideoElement.width,o=this.webcamConfig.resizeHeight*1/this.webcamVideoElement.height,s=(1-r)/2,c=(1-o)/2,l=s+r,p=o+c;this.cropBox=ui([c,s,p,l],[1,4])}else this.cropBox=ui([0,0,1,1],[1,4])}summary(){return"webcam"}static async create(t,e={}){if(ct().get("IS_NODE"))throw new Error("tf.data.webcam is only supported in browser environment.");if(!t){if(t=document.createElement("video"),!e.resizeWidth||!e.resizeHeight)throw new Error("Please provide webcam video element, or resizeWidth and resizeHeight to create a hidden video element.");t.width=e.resizeWidth,t.height=e.resizeHeight}let r=new r1(t,e);return await r.start(),r}async start(){this.webcamConfig.facingMode&&_(this.webcamConfig.facingMode==="user"||this.webcamConfig.facingMode==="environment",()=>`Invalid webcam facing mode: ${this.webcamConfig.facingMode}. Please provide 'user' or 'environment'`);try{this.stream=await navigator.mediaDevices.getUserMedia({video:{deviceId:this.webcamConfig.deviceId,facingMode:this.webcamConfig.facingMode?this.webcamConfig.facingMode:"user",width:this.webcamVideoElement.width,height:this.webcamVideoElement.height}})}catch(t){throw t.message=`Error thrown while initializing video stream: ${t.message}`,t}if(!this.stream)throw new Error("Could not obtain video from webcam.");try{this.webcamVideoElement.srcObject=this.stream}catch(t){console.log(t),this.webcamVideoElement.src=window.URL.createObjectURL(this.stream)}return this.webcamVideoElement.play(),this.isClosed=!1,new Promise(t=>{this.webcamVideoElement.onloadedmetadata=()=>{t()}})}async next(){if(this.isClosed)return{value:null,done:!0};let t;try{t=e_(this.webcamVideoElement)}catch(e){throw new Error(`Error thrown converting video to pixels: ${JSON.stringify(e)}`)}if(this.resize)try{return{value:this.cropAndResizeFrame(t),done:!1}}catch(e){throw new Error(`Error thrown cropping the video: ${e.message}`)}finally{t.dispose()}else return{value:t,done:!1}}needToResize(){return!!(this.webcamConfig.resizeWidth&&this.webcamConfig.resizeHeight&&(this.webcamVideoElement.width!==this.webcamConfig.resizeWidth||this.webcamVideoElement.height!==this.webcamConfig.resizeHeight))}cropAndResizeFrame(t){return rt(()=>{let e=t.toFloat().expandDims(0),r;r=pi.cropAndResize(e,this.cropBox,this.cropBoxInd,this.cropSize,"bilinear");let o=r.shape;return r.reshape(o.slice(1))})}async capture(){return(await this.next()).value}stop(){let t=this.stream.getTracks();t.forEach(e=>e.stop());try{this.webcamVideoElement.srcObject=null}catch(e){console.log(e),this.webcamVideoElement.src=null}this.isClosed=!0}toArray(){throw new Error("Can not convert infinite video stream to array.")}}class R${}class P$ extends En{split(t){return new Yj(this,t)}}class Yj extends P${constructor(t,e){super();this.upstream=t,this.impl=new Jj(t,e)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class Jj extends t1{constructor(t,e){super();this.upstream=t,this.separator=e,this.carryover=""}summary(){return`${this.upstream.summary()} -> Split('${this.separator}')`}async pump(){let t=await this.upstream.next();if(t.done)return this.carryover===""?!1:(this.outputQueue.push(this.carryover),this.carryover="",!0);let e=t.value.split(this.separator);e[0]=this.carryover+e[0];for(let r of e.slice(0,-1))this.outputQueue.push(r);return this.carryover=e[e.length-1],!0}}class Zj extends En{decodeUTF8(){return new Qj(this)}}class Qj extends P${constructor(t){super();this.upstream=t,this.impl=new t6(t)}summary(){return this.impl.summary()}async next(){return this.impl.next()}}class t6 extends t1{constructor(t){super();if(this.upstream=t,ct().get("IS_BROWSER"))this.decoder=new TextDecoder("utf-8");else{let{StringDecoder:e}=require("string_decoder");this.decoder=new e("utf8")}}summary(){return`${this.upstream.summary()} -> Utf8`}async pump(){let t=await this.upstream.next(),e;if(t.done)return!1;e=t.value;let r;return ct().get("IS_BROWSER")?r=this.decoder.decode(e,{stream:!0}):r=this.decoder.write(Buffer.from(e.buffer)),this.outputQueue.push(r),!0}}class O$ extends Zj{constructor(t,e={}){super();this.file=t,this.options=e,_(t instanceof Uint8Array||(ct().get("IS_BROWSER")?t instanceof File||t instanceof Blob:!1),()=>"FileChunkIterator only supports File, Blob and Uint8Array right now."),this.offset=e.offset||0,this.chunkSize=e.chunkSize||1024*1024}summary(){return`FileChunks ${this.file}`}async next(){if(this.offset>=(this.file instanceof Uint8Array?this.file.byteLength:this.file.size))return{value:null,done:!0};let t=new Promise((e,r)=>{let o=this.offset+this.chunkSize;if(this.file instanceof Uint8Array)e(new Uint8Array(this.file.slice(this.offset,o)));else{let s=new FileReader;s.onload=l=>{let p=s.result;if(p instanceof ArrayBuffer&&(p=new Uint8Array(p)),!(p instanceof Uint8Array))return r(new TypeError("FileReader returned unknown type."));e(p)},s.onabort=l=>r(new Error("Aborted")),s.onerror=l=>r(new Error(l.type));let c=this.file.slice(this.offset,o);s.readAsArrayBuffer(c)}this.offset=o});return{value:await t,done:!1}}}async function e6(n,t={}){let e,r;typeof n=="string"?e=n:(e=n.url,r=n6(n));let o=await SN(e,r);if(o.ok){let s=new Uint8Array(await o.arrayBuffer());return new O$(s,t)}else throw new Error(o.statusText)}let n6=n=>{let t={method:n.method,headers:n.headers,body:n.body,mode:n.mode,credentials:n.credentials,cache:n.cache,redirect:n.redirect,referrer:n.referrer,integrity:n.integrity};return t};function L$(n){return typeof n=="string"&&n.substr(0,7)==="file://"}class M$ extends R${constructor(t,e={}){super();this.input=t,this.options=e}async iterator(){if(L$(this.input)&&ct().get("IS_NODE")){let t=require("fs");this.input=t.readFileSync(this.input.substr(7))}return new O$(this.input,this.options)}}class B$ extends R${constructor(t,e={}){super();this.url=t,this.fileOptions=e}async iterator(){return L$(this.url)?new M$(this.url,this.fileOptions).iterator():e6(this.url,this.fileOptions)}}function r6(n,t={}){return new F$(new B$(n),t)}function o6(n){let t=Yp(n);return _r(async()=>t)}function s6(n){return _r(async()=>{let t=await n();return Yp(()=>t.next())})}async function i6(n,t){return r1.create(n,t)}async function a6(n){return n1.create(n)}let z$="2.7.0";var c6=Object.freeze({__proto__:null,array:Hj,Dataset:Zc,zip:jj,CSVDataset:F$,TextLineDataset:E$,csv:r6,func:o6,generator:s6,microphone:a6,webcam:i6,FileDataSource:M$,URLDataSource:B$,version_data:z$});function Ct(n,t){Array.isArray(n)||(n=[n]),n.forEach(e=>{e!=null&&_(e.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the CPU backend.`)})}let l6=Od,u6=sv,p6=iv,h6=av,f6=Id;class d6 extends d{constructor(){super();this.blockSize=48,this.firstUse=!0,this.data=new h(this,ps())}write(t,e,r){this.firstUse&&(this.firstUse=!1,ct().get("IS_NODE")&&Bc(` ============================ -Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed things up dramatically, install our node backend, which binds to TensorFlow C++, by running npm i @tensorflow/tfjs-node, or npm i @tensorflow/tfjs-node-gpu if you have CUDA. Then call require('@tensorflow/tfjs-node'); (-gpu suffix for CUDA) at the start of your program. Visit https://github.com/tensorflow/tfjs-node for more details. -============================`));const s={};return this.data.set(s,{values:e,dtype:n,refCount:1}),s}makeTensorInfo(e,t,n){let s;if(t==="string"&&n!=null&&n.length>0&&Yi(n[0])){const i=n.map(o=>Wd(o));s=this.write(i,e,t)}else s=this.write(n,e,t);return{dataId:s,shape:e,dtype:t}}incRef(e){const t=this.data.get(e);t.refCount++}decRef(e){if(this.data.has(e)){const t=this.data.get(e);t.refCount--}}move(e,t,n,s){this.data.set(e,{values:t,dtype:s,refCount:1})}numDataIds(){return this.data.numDataIds()}async read(e){return this.readSync(e)}readSync(e){const{dtype:t,complexTensorInfos:n}=this.data.get(e);if(t==="complex64"){const s=this.readSync(n.real.dataId),i=this.readSync(n.imag.dataId);return tr(s,i)}return this.data.get(e).values}bufferSync(e){const t=this.readSync(e.dataId);let n=t;if(e.dtype==="string")try{n=t.map(s=>Kl(s))}catch(s){throw new Error("Failed to decode encoded string bytes into utf-8")}return wt(e.shape,e.dtype,n)}makeOutput(e,t,n){const s=this.write(e,t,n);return Ki().makeTensorFromDataId(s,t,n,this)}disposeData(e){if(this.data.has(e)){const{complexTensorInfos:t}=this.data.get(e);t!=null&&(this.disposeData(t.real.dataId),this.disposeData(t.imag.dataId)),this.data.delete(e)}}disposeIntermediateTensorInfo(e){const t=e.dataId;if(this.data.has(t)){const n=this.data.get(t);n.refCount--,n.refCount<1&&this.disposeData(t)}}async time(e){const t=jn();e();const n=jn()-t;return{kernelMs:n}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}stridedSlice(e,t,n,s){Te(e,"stridedSlice");const i=jd(t,n,s);if(i.some(c=>c===0))return sn([],i);const o=wt(i,e.dtype),a=this.bufferSync(e);for(let c=0;ca[c]=e.shape[c]-1-a[c]),n.set(s.get(...a),...o)}return n.toTensor()}neg(e){return Te(e,"neg"),X(Ce(-1),e)}addN(e){Te(e,"addN");const t=e.map(i=>this.readSync(i.dataId)),n=wt(e[0].shape,e[0].dtype),s=n.values;for(let i=0;iMath.pow(n,s))}floorDiv(e,t){Te([e,t],"floorDiv");const n=(i,o)=>Math.floor(i/o),s="int32";return this.broadcastedBinaryOp(e,t,s,n)}sum(e,t){Te(e,"sum"),Kn("sum",t,e.rank);const[n,s]=An(e.shape,t),i=$n(e.dtype,"int32"),o=dt(n,i),a=P(s),c=this.readSync(o.dataId),h=this.readSync(e.dataId);for(let d=0;df&&(f=L,b=w)}c[d]=b}return o}cumsum(e,t,n,s){if(Te(e,"cumsum"),t!==e.rank-1)throw new Error(`backend.cumsum in CPU expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=$n(e.dtype,"int32"),o=dt(e.shape,i),a=this.readSync(o.dataId),c=this.readSync(e.dataId),h=e.shape[e.rank-1],d=s?(m,f)=>m+h-f-1:(m,f)=>m+f;for(let m=0;mn===s?1:0)}notEqual(e,t){return Te([e,t],"notEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n!==s?1:0)}less(e,t){return Te([e,t],"less"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>nn<=s?1:0)}greater(e,t){return Te([e,t],"greater"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>s?1:0)}greaterEqual(e,t){return Te([e,t],"greaterEqual"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n>=s?1:0)}logicalAnd(e,t){return Te([e,t],"logicalAnd"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n&&s)}logicalOr(e,t){return Te([e,t],"logicalOr"),this.broadcastedBinaryOp(e,t,"bool",(n,s)=>n||s)}select(e,t,n){Te([e,t,n],"select");const s=this.readSync(e.dataId),i=this.readSync(t.dataId),o=this.readSync(n.dataId),a=dt(t.shape,$n(t.dtype,n.dtype)),c=this.readSync(a.dataId);let h=0;const d=e.rank===0||e.rank>1||t.rank===1?1:P(t.shape.slice(1));for(let m=0;mMath.min(n,s))}mod(e,t){return Te([e,t],"mod"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>{const i=n%s;return n<0&&s<0||n>=0&&s>=0?i:(i+s)%s})}maximum(e,t){return Te([e,t],"maximum"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.max(n,s))}all(e,t){Te(e,"all"),Kn("all",t,e.rank);const[n,s]=An(e.shape,t),i=dt(n,e.dtype),o=P(s),a=this.readSync(i.dataId),c=this.readSync(e.dataId);for(let h=0;h{const i=n-s;return i*i})}eluDer(e,t){Te([e,t],"eluDer");const n=new Float32Array(t.size),s=this.readSync(t.dataId),i=this.readSync(e.dataId);for(let o=0;o=1?n[o]=i[o]:n[o]=i[o]*(a+1)}return this.makeOutput(n,t.shape,"float32")}atan2(e,t){return Te([e,t],"atan2"),this.broadcastedBinaryOp(e,t,e.dtype,(n,s)=>Math.atan2(n,s))}tile(e,t){return Te(e,"tile"),ZH(this.bufferSync(e),t)}gather(e,t,n){Te([e,t],"gather");const s=e.shape.slice(),i=this.readSync(t.dataId);s[n]=i.length;const o=wt(s,e.dtype),a=this.bufferSync(e);for(let c=0;cd*m),i=Oh(e.shape,t,s),o=Eh(i.length,t.length),a=Dh(e.shape,t,s),c=Zb(n,t.length),h=Qb(a,n,t.length);return Ye(e.reshape(i),o).reshape(a).slice(c,h)}pool3d(e,t,n){Te(e,"pool3d");const s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,h=t.dilationWidth,d=t.effectiveFilterDepth,m=t.effectiveFilterHeight,f=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,L=t.padInfo.left,x=n==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,v=this.readSync(e.dataId),N=wt(t.outShape,e.dtype),O=N.values,E=t.outShape[1]*t.outShape[2]*t.outShape[3]*t.outShape[4],k=t.outShape[2]*t.outShape[3]*t.outShape[4],F=t.outShape[3]*t.outShape[4],U=t.outShape[4];for(let $=0;$ze?ze=Ft:n==="avg"&&(ht+=Ft,it++),isNaN(ze))break}if(isNaN(ze))break}if(isNaN(ze))break}const rt=Ue+Z;O[rt]=n==="avg"?ht/it:ze}}}}return N.toTensor()}avgPool3d(e,t){return Te(e,"avgPool3d"),this.pool3d(e,t,"avg").toFloat()}avgPool3dBackprop(e,t,n){Te([e,t],"avgPool3dBackprop");const s=n.strideDepth,i=n.strideHeight,o=n.strideWidth,a=n.filterDepth,c=n.filterHeight,h=n.filterWidth,d=n.dilationDepth,m=n.dilationHeight,f=n.dilationWidth,b=n.effectiveFilterDepth,w=n.effectiveFilterHeight,L=n.effectiveFilterWidth,x=b-1-n.padInfo.front,v=L-1-n.padInfo.left,N=w-1-n.padInfo.top,O=wt(t.shape,"float32"),E=1/(a*c*h),k=this.bufferSync(e);for(let F=0;F=n.outDepth||Math.floor(me)!==me)continue;for(let ce=0;ce=n.outHeight||Math.floor(ye)!==ye)continue;for(let pe=0;pe=n.outWidth||Math.floor(we)!==we)continue;const Se=k.get(F,me,ye,we,U);he+=Se}}}O.set(he*E,F,$,Y,j,U)}return O.toTensor()}maxPool3d(e,t){return Te(e,"maxPool3d"),this.pool3d(e,t,"max").toFloat()}maxPool3dPositions(e,t){const n=wt(t.outShape,"int32"),s=t.strideDepth,i=t.strideHeight,o=t.strideWidth,a=t.dilationDepth,c=t.dilationHeight,h=t.dilationWidth,d=t.effectiveFilterDepth,m=t.effectiveFilterHeight,f=t.effectiveFilterWidth,b=t.padInfo.front,w=t.padInfo.top,L=t.padInfo.left,x=this.bufferSync(e);for(let v=0;v=ue&&(ue=Oe,me=ye*m*f+we*m+xe)}}}n.set(me,v,O,U,Z,N)}}}return n.toTensor()}maxPool3dBackprop(e,t,n,s){Te([t,n],"maxPool3dBackprop");const i=this.maxPool3dPositions(t,s),o=s.strideDepth,a=s.strideHeight,c=s.strideWidth,h=s.dilationDepth,d=s.dilationHeight,m=s.dilationWidth,f=s.effectiveFilterDepth,b=s.effectiveFilterHeight,w=s.effectiveFilterWidth,L=f-1-s.padInfo.front,x=w-1-s.padInfo.left,v=b-1-s.padInfo.top,N=wt(t.shape,"float32"),O=this.bufferSync(i),E=this.bufferSync(e);for(let k=0;k=s.outDepth||Math.floor(ue)!==ue)continue;for(let me=0;me=s.outHeight||Math.floor(ce)!==ce)continue;for(let ye=0;ye=s.outWidth||Math.floor(pe)!==pe)continue;const we=f*b*w-1-O.get(k,ue,ce,pe,F),Se=he*b*w+me*w+ye,xe=we===Se?1:0;if(xe===0)continue;const Oe=E.get(k,ue,ce,pe,F);de+=Oe*xe}}}N.set(de,k,U,$,Y,F)}return N.toTensor()}resizeBilinear(e,t,n,s){Te(e,"resizeBilinear");const[i,o,a,c]=e.shape,h=this.readSync(e.dataId),d=new Float32Array(P([i,t,n,c])),m=[s&&t>1?o-1:o,s&&n>1?a-1:a],f=[s&&t>1?t-1:t,s&&n>1?n-1:n];let b=0;const w=m[0]/f[0],L=m[1]/f[1];for(let x=0;x1?i-1:i,n&&h>1?o-1:o],f=[n&&c>1?c-1:c,n&&h>1?h-1:h],b=m[0]/f[0],w=m[1]/f[1],L=this.readSync(e.dataId);let x=0;for(let v=0;v1?o-1:o,s&&n>1?a-1:a],f=[s&&t>1?t-1:t,s&&n>1?n-1:n],b=m[0]/f[0],w=m[1]/f[1];let L=0;for(let x=0;x1?i-1:i,n&&h>1?o-1:o],b=[n&&c>1?c-1:c,n&&h>1?h-1:h],w=f[0]/b[0],L=f[1]/b[1],x=1/w,v=1/L,N=Math.ceil(x)*2+2,O=Math.ceil(v)*2+2;for(let E=0;E=c)continue;const ye=k+ce*e.strides[1],pe=ce*w,we=Math.min(i-1,n?Math.round(pe):Math.floor(pe));if(F!==we)continue;for(let Se=0;Se=h)continue;const Oe=ye+xe*e.strides[2],Ne=xe*L,De=Math.min(o-1,n?Math.round(Ne):Math.floor(Ne));j===De&&(ue+=m[Oe+he])}}d[Z+he]=ue}}}}return Ka(d,t.shape,t.dtype)}localResponseNormalization4D(e,t,n,s,i){Te(e,"localResponseNormalization4D");const o=e.shape[3],a=o-1,c=this.readSync(e.dataId),h=e.size,d=new Float32Array(h);function m(f){const b=f%o;let w=f-b+Math.max(0,b-t);const L=f-b+Math.min(b+t,a);let x=0;for(;w<=L;w++){const v=c[w];x+=v*v}return x}for(let f=0;f=0&&o[a]`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${n}`),A(t>1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=e.shape[1],o=e.shape[2],a=e.shape[3],c=i*t,h=o*t,d=a/(t*t),m=this.readSync(e.dataId),f=new Float32Array(s*c*h*d);let b=0;for(let w=0;wx[E]=0);const v=f.locToIndex(x),N=L.slice(-t.rank);d.forEach(E=>N[E]=0);const O=b.locToIndex(N);m[w]=s(a[v],c[O])}}return o.toTensor()}split(e,t,n){return JH(e,t,n)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}cropAndResize(e,t,n,s,i,o){const[a,c,h,d]=e.shape,m=t.shape[0],[f,b]=s,w=wt([m,f,b,d],"float32"),L=this.readSync(t.dataId),x=this.readSync(n.dataId),v=this.readSync(e.dataId),N=e.strides,O=w.strides;for(let E=0;E=a)continue;const Z=f>1?($-F)*(c-1)/(f-1):0,ie=b>1?(Y-U)*(h-1)/(b-1):0;for(let de=0;de1?F*(c-1)+de*Z:.5*(F+$)*(c-1);if(he<0||he>c-1){for(let ue=0;ue1?U*(h-1)+ye*ie:.5*(U+Y)*(h-1);if(pe<0||pe>h-1){for(let Oe=0;Oe1?U*(h-1)+ue*ie:.5*(U+Y)*(h-1);if(me<0||me>h-1){for(let pe=0;pe=e.size/a)throw new Error(`Invalid indices: ${b} does not index into ${e.shape}`);for(let L=0;L=s/i)throw new Error(`Invalid indices: ${x} does not index into ${n}`);for(let N=0;N{const{x:t}=e.inputs,n=e.backend;let s=new Float32Array(P(t.shape));if(t.dtype!=="complex64"){const i=n.data.get(t.dataId).values;s=b0(i)}else{const i=n.data.get(t.dataId),o=i.complexTensorInfos.real,a=i.complexTensorInfos.imag,c=n.data.get(o.dataId).values,h=n.data.get(a.dataId).values;for(let d=0;d{const a=nt(t,n),c=a.length,h=je(a),d=P(a),m=bt(o,d),f=t.length,b=n.length,w=je(t),L=je(n),x=Ro(t,a),v=Ro(n,a);if(x.length+v.length===0)for(let N=0;NE[$]=0);const k=_s(E,f,w),F=O.slice(-b);v.forEach($=>F[$]=0);const U=_s(F,b,L);m[N]=e(s[k],i[U])}return[m,a]}}function ci(e){const{inputs:t,backend:n}=e,{real:s,imag:i}=t,o=n.data.get(s.dataId).values,a=n.data.get(i.dataId).values,c=n.makeTensorInfo(s.shape,"complex64"),h=n.data.get(c.dataId);return h.complexTensorInfos={real:n.makeTensorInfo(s.shape,"float32",o),imag:n.makeTensorInfo(i.shape,"float32",a)},c}const iq={kernelName:rd,backendName:"cpu",kernelFunc:ci};function Go(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const rq={kernelName:xl,backendName:"cpu",kernelFunc:Go};function ru(e){const{inputs:t,backend:n}=e,{input:s}=t,i=n.data.get(s.dataId).complexTensorInfos.real,o=n.data.get(i.dataId).values;return n.makeTensorInfo(i.shape,i.dtype,o)}const oq={kernelName:Td,backendName:"cpu",kernelFunc:ru};function ou(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{dtype:o}=s;if(o==="complex64"){if(i.dtype==="complex64")return Go({inputs:{x:i},backend:n});const a=dt(i.shape),c=ou({inputs:{x:i},backend:n,attrs:{dtype:"float32"}}),h=ci({inputs:{real:c,imag:a},backend:n});return a.dispose(),n.disposeIntermediateTensorInfo(c),h}if(i.dtype==="complex64"){const a=ru({inputs:{input:i},backend:n}),c=ou({inputs:{x:a},backend:n,attrs:{dtype:o}});return n.disposeIntermediateTensorInfo(a),c}if(!ba(i.dtype,o)){const a=Go({inputs:{x:i},backend:n});return{dataId:a.dataId,shape:a.shape,dtype:o}}if(o==="int32"){const a=n.data.get(i.dataId).values,c=Int32Array.from(a);return n.makeTensorInfo(i.shape,"int32",c)}if(o==="bool"){const a=n.data.get(i.dataId).values,c=Or([0],i.dtype),[h,d]=Zr((m,f)=>m!==f?1:0)(i.shape,[],a,c,"bool");return n.makeTensorInfo(d,"bool",h)}throw new Error(`Error in Cast: failed to cast ${i.dtype} to ${o}`)}const aq={kernelName:Sa,backendName:"cpu",kernelFunc:ou};function hc(e,t,n,s){return n==null?({inputs:i,backend:o})=>{const{a,b:c}=i,h=o;Te([a,c],e);const d=h.data.get(a.dataId).values,m=h.data.get(c.dataId).values,f=s||a.dtype,[b,w]=t(a.shape,c.shape,d,m,f);return h.makeTensorInfo(w,f,b)}:({inputs:i,backend:o})=>{const{a,b:c}=i,h=o;if(a.dtype==="complex64"||c.dtype==="complex64"){const d=ou({inputs:{x:a},backend:h,attrs:{dtype:"complex64"}}),m=h.data.get(d.dataId),f=m.complexTensorInfos.real,b=m.complexTensorInfos.imag,w=h.data.get(f.dataId).values,L=h.data.get(b.dataId).values,x=ou({inputs:{x:c},backend:h,attrs:{dtype:"complex64"}}),v=h.data.get(x.dataId),N=v.complexTensorInfos.real,O=v.complexTensorInfos.imag,E=h.data.get(N.dataId).values,k=h.data.get(O.dataId).values,[F,U,$]=n(a.shape,c.shape,w,L,E,k),Y=h.makeTensorInfo($,"float32",F),j=h.makeTensorInfo($,"float32",U),Z=ci({inputs:{real:Y,imag:j},backend:h});return h.disposeIntermediateTensorInfo(d),h.disposeIntermediateTensorInfo(x),h.disposeIntermediateTensorInfo(Y),h.disposeIntermediateTensorInfo(j),Z}else{const d=h.data.get(a.dataId).values,m=h.data.get(c.dataId).values,f=s||a.dtype,[b,w]=t(a.shape,c.shape,d,m,f);return h.makeTensorInfo(w,f,b)}}}function cS(e){return(t,n,s,i,o,a)=>{const c=nt(t,n),h=P(c),d=c.length,m=je(c),f=bt("float32",h),b=bt("float32",h),w=Ro(t,c),L=Ro(n,c),x=tr(s,i),v=tr(o,a),N=t.length,O=je(t),E=n.length,k=je(n);if(w.length+L.length===0)for(let F=0;F$[de]=0);const Y=_s($,N,O),j=U.slice(-E);L.forEach(de=>j[de]=0);const Z=_s(j,E,k),ie=e(x[Y*2],x[Y*2+1],v[Z*2],v[Z*2+1]);f[F]=ie.real,b[F]=ie.imag}return[f,b,c]}}const w0=Zr((e,t)=>e+t),cq=cS((e,t,n,s)=>({real:e+n,imag:t+s})),au=hc(wo,w0,cq),lq={kernelName:wo,backendName:"cpu",kernelFunc:au};function uc(e){return(t,n,s)=>{const i=bt(n,t.length);for(let o=0;o{const{x:a}=s;if(Te(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,h=c.data.get(a.dataId).values,d=P(a.shape),m=n||a.dtype,f=ws(m,d);for(let b=0;b{const{x:a}=s;if(Te(a,e),a.dtype==="string"||n==="string")throw new Error("unaryKernelFunc does not support string input/output");const c=o,h=c.data.get(a.dataId).values,d=n||a.dtype,m=t(h,d,i);return c.makeTensorInfo(a.shape,d,m)}}const L0=uc(e=>Math.ceil(e)),hq=dc(pl,L0),uq={kernelName:pl,backendName:"cpu",kernelFunc:hq};const S0=uc(e=>Math.exp(e)),dq=dc(wl,S0),pq={kernelName:wl,backendName:"cpu",kernelFunc:dq};const I0=uc(e=>Math.expm1(e)),mq=dc(Ll,I0),fq={kernelName:Ll,backendName:"cpu",kernelFunc:mq};const x0=uc(e=>Math.floor(e)),gq=dc(Sl,x0),yq={kernelName:Sl,backendName:"cpu",kernelFunc:gq};const T0=uc(e=>Math.log(e)),bq=dc(Nl,T0),wq={kernelName:Nl,backendName:"cpu",kernelFunc:bq};function A0(e,t,n,s){const i=bt(s,P(n));for(let o=0;oc&&(c=d)}i[o]=c}return i}const v0=Zr((e,t)=>e*t),Lq=cS((e,t,n,s)=>({real:e*n-t*s,imag:e*s+t*n})),N0=hc(Ta,v0,Lq),Sq={kernelName:Ta,backendName:"cpu",kernelFunc:N0};const C0=Zr((e,t)=>e!==t?1:0),Iq=hc(Dl,C0,null,"bool"),xq={kernelName:Dl,backendName:"cpu",kernelFunc:Iq};const R0=uc(e=>1/Math.sqrt(e)),Tq=dc(Ul,R0),Aq={kernelName:Ul,backendName:"cpu",kernelFunc:Tq};function O0(e,t,n,s,i){const o=eb(s,t,n),a=P(n),c=je(s);if(o){const d=tb(t,c);return e.subarray(d,d+a)}const h=bt(i,a);for(let d=0;dx+t[v]),L=_s(w,s.length,c);h[d]=e[L]}return h}function lS(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{begin:o,size:a}=s;Te(i,"slice");const[c,h]=Kd(i,o,a);Qy(i,c,h);const d=n.data.get(i.dataId).values,m=O0(d,c,h,i.shape,i.dtype);return n.makeTensorInfo(h,i.dtype,m)}const vq={kernelName:Ad,backendName:"cpu",kernelFunc:lS};const E0=Zr((e,t)=>{const n=e-t;return n*n}),Nq=hc(va,E0),Cq={kernelName:va,backendName:"cpu",kernelFunc:Nq};const D0=Zr((e,t)=>e-t),Rq=cS((e,t,n,s)=>({real:e-n,imag:t-s})),k0=hc(Na,D0,Rq),Oq={kernelName:Na,backendName:"cpu",kernelFunc:k0};function hS(e,t,n,s,i){const o=t.length,a=P(t),c=je(t),h=je(i),d=bt(n,P(i));for(let m=0;m{for(let v=0;vnew tq,1);const _0=xt(yl,e=>e>=0?e:Math.exp(e)-1),kq={kernelName:yl,backendName:"cpu",kernelFunc:_0};const Fq=Zr((e,t)=>e<0?t*e:e);function W0(e){const{inputs:t,backend:n}=e,{x:s,alpha:i}=t;Te([s,i],"prelu");const o=n.data.get(s.dataId).values,a=n.data.get(i.dataId).values,[c,h]=Fq(s.shape,i.shape,o,a,s.dtype);return n.makeTensorInfo(h,s.dtype,c)}const _q={kernelName:xd,backendName:"cpu",kernelFunc:W0};const $0=xt(Fl,e=>Math.max(0,e)),Wq={kernelName:Fl,backendName:"cpu",kernelFunc:$0};const U0=xt(Wl,e=>Math.min(Math.max(0,e),6)),$q={kernelName:Wl,backendName:"cpu",kernelFunc:U0};function uS(e,t,n,s){if(n==="linear")return Go({inputs:{x:t},backend:e});if(n==="relu")return $0({inputs:{x:t},backend:e});if(n==="elu")return _0({inputs:{x:t},backend:e});if(n==="relu6")return U0({inputs:{x:t},backend:e});if(n==="prelu")return W0({inputs:{x:t,alpha:s},backend:e});throw new Error(`Activation ${n} has not been implemented for the CPU backend.`)}function Di(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{shape:o}=s,a=P(i.shape),c=Vt(o,a),h=P(c);A(a===h,()=>`The new shape (${c}) has ${h} elements and the old shape (${i.shape}) has ${a} elements. The new shape and old shape must have the same number of elements.`),n.incRef(i.dataId);const d=n.data.get(i.dataId);if(d.complexTensorInfos!=null){const m=d.complexTensorInfos.real,f=d.complexTensorInfos.imag;m.shape=c,f.shape=c}return{dataId:i.dataId,shape:c,dtype:i.dtype}}const Uq={kernelName:_l,backendName:"cpu",kernelFunc:Di};function B0(e){const{inputs:t,backend:n,attrs:s}=e,{a:i,b:o}=t,{transposeA:a,transposeB:c}=s;Te([i,o],"matMul");const h=i.shape.length,d=o.shape.length,m=a?i.shape[h-2]:i.shape[h-1],f=c?o.shape[d-1]:o.shape[d-2],b=a?i.shape[h-1]:i.shape[h-2],w=c?o.shape[d-2]:o.shape[d-1],L=i.shape.slice(0,-2),x=o.shape.slice(0,-2),v=P(L),N=P(x),O=v===N||v===1||N===1;A(h>=2&&d>=2&&O,()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${L}) and (${x}).`);const E=v>N?i.shape.slice(0,-2):o.shape.slice(0,-2),k=E.concat([b,w]);A(m===f,()=>`Error in matMul: inner shapes (${m}) and (${f}) of Tensors with shapes ${i.shape} and ${o.shape} and transposeA=${a} and transposeB=${c} must match.`);const F=a?[v,m,b]:[v,b,m],U=c?[N,w,f]:[N,f,w],$=Di({inputs:{x:i},backend:n,attrs:{shape:F}}),Y=Di({inputs:{x:o},backend:n,attrs:{shape:U}}),j=a?$.shape[1]:$.shape[2],Z=a?$.shape[2]:$.shape[1],ie=c?Y.shape[1]:Y.shape[2],de=Math.max(v,N),he=n.data.get($.dataId).values,ue=n.data.get(Y.dataId).values,me=je($.shape),ce=je(Y.shape),[ye,pe,we]=a?[me[0],1,me[1]]:[me[0],me[1],1],[Se,xe,Oe]=c?[1,ce[1],ce[0]]:[ce[1],1,ce[0]],Ne=Z*ie,De=wt([de,Z,ie],$.dtype),Ue=De.values,ze=n.blockSize;for(let ht=0;htMath.acos(e)),Vq={kernelName:ol,backendName:"cpu",kernelFunc:zq};const Gq=xt(al,e=>Math.acosh(e)),Yq={kernelName:al,backendName:"cpu",kernelFunc:Gq};const Hq=xt(cl,e=>Math.asin(e)),qq={kernelName:cl,backendName:"cpu",kernelFunc:Hq};const jq=xt(ll,e=>Math.asinh(e)),Kq={kernelName:ll,backendName:"cpu",kernelFunc:jq};const Xq=xt(hl,e=>Math.atan(e)),Jq={kernelName:hl,backendName:"cpu",kernelFunc:Xq};const Zq=xt(ul,e=>Math.atanh(e)),Qq={kernelName:ul,backendName:"cpu",kernelFunc:Zq};function dS(e,t,n,s,i,o){const a=i.strideHeight,c=i.strideWidth,h=i.dilationHeight,d=i.dilationWidth,m=i.effectiveFilterHeight,f=i.effectiveFilterWidth,b=i.padInfo.top,w=i.padInfo.left,L=o==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,x=wt(i.outShape,n),v=x.values,N=i.outShape[1]*i.outShape[2]*i.outShape[3],O=i.outShape[2]*i.outShape[3],E=i.outShape[3];for(let k=0;kye?ye=Ue:o==="avg"&&(pe+=Ue,we++)}if(isNaN(ye))break}const Se=de+he*E+$;v[Se]=o==="avg"?pe/we:ye}}}return x}function M0(e,t,n,s,i=!1,o=!1){const a=wt(s.outShape,"int32"),c=s.strideHeight,h=s.strideWidth,d=s.dilationHeight,m=s.dilationWidth,f=s.effectiveFilterHeight,b=s.effectiveFilterWidth,w=s.padInfo.top,L=s.padInfo.left,x=wt(t,n,e);for(let v=0;vZ&&(Z=ce,i?ie=o?((v*s.inHeight+de)*s.inWidth+ue)*s.inChannels+N:(de*s.inWidth+ue)*s.inChannels+N:ie=he*b+me)}}a.set(ie,v,O,U,N)}}return a}function e4(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;Te(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:h}=s,d=1;A(cn(a,d),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Un(i.shape,o,a,d,c,h);let f;if(m.filterWidth===1&&m.filterHeight===1&&ae(m.inShape,m.outShape))f=Go({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=je(i.shape),L=dS(b,i.shape,i.dtype,w,m,"avg");f=n.makeTensorInfo(m.outShape,i.dtype,L.values)}return f}const t4={kernelName:dl,backendName:"cpu",kernelFunc:e4};function n4(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;Te([i,o],"avgPoolBackprop");const{filterSize:c,strides:h,pad:d}=s,m=Un(a.shape,c,h,1,d),f=m.strideHeight,b=m.strideWidth,w=m.filterHeight,L=m.filterWidth,x=m.dilationHeight,v=m.dilationWidth,N=m.effectiveFilterHeight,O=m.effectiveFilterWidth,E=O-1-m.padInfo.left,k=N-1-m.padInfo.top,F=wt(a.shape,"float32"),U=1/(w*L),$=n.data.get(i.dataId).values,Y=wt(i.shape,"float32",$);for(let j=0;j=m.outHeight||Math.floor(ye)!==ye)continue;for(let pe=0;pe=m.outWidth||Math.floor(we)!==we)continue;const Se=Y.get(j,ye,we,Z);me+=Se}}F.set(me*U,j,ie,de,Z)}return n.makeTensorInfo(F.shape,F.dtype,F.values)}const s4={kernelName:sd,backendName:"cpu",kernelFunc:n4};function i4(e){const{inputs:t,backend:n,attrs:s}=e,{x:i,scale:o,offset:a,mean:c,variance:h}=t;A(c.shape.length===h.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),A(a==null||c.shape.length===a.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),A(o==null||c.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks."),Te([i,c,h,o,a],"batchNorm");let{varianceEpsilon:d}=s;d==null&&(d=.001);const m=n.data.get(i.dataId).values,f=n.data.get(c.dataId).values,b=n.data.get(h.dataId).values,w=o?n.data.get(o.dataId).values:new Float32Array([1]),L=a?n.data.get(a.dataId).values:new Float32Array([0]),x=new Float32Array(m.length),v=L.length,N=w.length,O=b.length,E=f.length;let k=0,F=0,U=0,$=0;for(let Y=0;Y=v&&(k=0),F>=E&&(F=0),U>=N&&(U=0),$>=O&&($=0);return n.makeTensorInfo(i.shape,i.dtype,x)}const r4={kernelName:Il,backendName:"cpu",kernelFunc:i4};const o4=xt(ml,(e,t)=>{const n=t;return e>n.clipValueMax?n.clipValueMax:ew.shape),o);if(P(a)===0)return n.makeTensorInfo(a,t[0].dtype,[]);const c=t.filter(w=>P(w.shape)>0);if(c.length===1)return c[0];const h=c.map(w=>w.shape);if(np(h,o),c[0].dtype==="complex64"){const w=c.map(O=>ru({inputs:{input:O},backend:n})),L=c.map(O=>Am({inputs:{input:O},backend:n})),x=cu({inputs:w,backend:n,attrs:{axis:o}}),v=cu({inputs:L,backend:n,attrs:{axis:o}}),N=ci({inputs:{real:x,imag:v},backend:n});return w.forEach(O=>n.disposeIntermediateTensorInfo(O)),L.forEach(O=>n.disposeIntermediateTensorInfo(O)),n.disposeIntermediateTensorInfo(x),n.disposeIntermediateTensorInfo(v),N}const d=c.map(w=>{const L=P(w.shape.slice(o)),x=[-1,L];return Di({inputs:{x:w},backend:n,attrs:{shape:x}})});a=Xi(d.map(w=>w.shape),1);const m=bt(c[0].dtype,P(a));if(d[0].shape[0]===1){let w=0;d.forEach(L=>{const x=n.data.get(L.dataId).values,v=P(L.shape);m.set(x,w),w+=v})}else{let w=0;d.forEach(L=>{const x=n.data.get(L.dataId).values;let v=0;for(let N=0;Nw.shape),o),b=n.makeTensorInfo(f,t[0].dtype,m);return d.forEach(w=>n.disposeIntermediateTensorInfo(w)),b}const l4={kernelName:fl,backendName:"cpu",kernelFunc:cu};function P0(e){const{inputs:t,backend:n,attrs:s}=e,{x:i,filter:o}=t,{strides:a,pad:c,dataFormat:h,dilations:d,dimRoundingMode:m}=s;Te([i,o],"conv2d");const f=Wr(h),b=kn(i.shape,o.shape,a,d,c,m,!1,f),w=b.filterHeight,L=b.filterWidth,x=b.dilationHeight,v=b.dilationWidth,N=b.padInfo.left,O=b.padInfo.top,E=b.dataFormat==="channelsLast",k=new an(b.outShape,i.dtype),F=je(i.shape),U=je(o.shape),$=F[0],Y=E?F[1]:F[2],j=E?F[2]:1,Z=E?1:F[1],ie=k.strides[0],de=E?k.strides[1]:k.strides[2],he=E?k.strides[2]:1,ue=E?1:k.strides[1],me=n.data.get(i.dataId).values,ce=n.data.get(o.dataId).values,ye=k.values;for(let pe=0;pe=b.inHeight)continue;const ze=De*U[0],ht=we+Ue*Y;for(let it=0;it=b.inWidth)continue;const rn=ze+ut*U[1],Ut=ht+Dt*j;let kt=rn;for(let Ft=0;Ft=d.inDepth)continue;const pe=ce*j[0],we=ie+ye*Y[1];for(let Se=0;Se=d.inHeight)continue;const Ue=pe+Ne*j[1],ze=we+De*Y[2];for(let ht=0;ht=d.inWidth)continue;const Dt=Ue+mt*j[2],rn=ze+ut*d.inChannels;let Ut=Dt;for(let kt=0;ktMath.cos(e)),I4={kernelName:Ia,backendName:"cpu",kernelFunc:S4};const x4=xt(gl,e=>Math.cosh(e)),T4={kernelName:gl,backendName:"cpu",kernelFunc:x4};function z0(e){const{inputs:t,backend:n,attrs:s}=e,{x:i,filter:o}=t,{strides:a,pad:c,dilations:h,dimRoundingMode:d}=s;Te([i,o],"depthwiseConv2DNative");const m=je(i.shape),f=je(o.shape);let b=h;b==null&&(b=[1,1]),A(cn(a,b),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${a} and dilations '${b}'`);const w=kn(i.shape,o.shape,a,b,c,d,!0),{filterHeight:L,filterWidth:x,dilationHeight:v,dilationWidth:N,padInfo:O}=w,E=O.left,k=O.top,F=w.outChannels/w.inChannels,U=new an(w.outShape,i.dtype),$=n.data.get(i.dataId).values,Y=n.data.get(o.dataId).values,j=U.values;for(let Z=0;Z=w.inHeight)continue;const pe=ce*f[0],we=ie+ye*m[1];for(let Se=0;Se=w.inWidth)continue;const Ue=pe+Ne*f[1],ze=we+De*w.inChannels;let ht=xe,it=Ue;for(let rt=0;rt{const{x:s,filter:i}=e,{strides:o,pad:a,dilations:c}=n,h=t,d=h.data.get(s.dataId).values,m=s.shape.length,f=h.data.get(i.dataId).values,b=i.shape.length,{batchSize:w,inHeight:L,inWidth:x,inChannels:v,outHeight:N,outWidth:O,padInfo:E,strideHeight:k,strideWidth:F,filterHeight:U,filterWidth:$,dilationHeight:Y,dilationWidth:j,outShape:Z}=ep(s.shape,i.shape,o,a,"NHWC",c),ie=P(Z),de=Z.length,he=ws(s.dtype,ie);for(let me=0;me=0&&De=0&&zexe&&(xe=rt)}}}const Oe=_s([me,ce,pe,Se],de,je(Z));he[Oe]=xe}}}const ue=h.write(Or(he,s.dtype),Z,s.dtype);return{dataId:ue,shape:Z,dtype:s.dtype}}};const E4={kernelName:dd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:h}=n,d=t,m=Ls(s.shape,d.data.get(s.dataId).values),f=Ls(i.shape,d.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:L,inChannels:x,outHeight:v,outWidth:N,padInfo:O,strideHeight:E,strideWidth:k,filterHeight:F,filterWidth:U,dilationHeight:$,dilationWidth:Y,outShape:j}=ep(s.shape,i.shape,a,c,"NHWC",h);A(o.rank===j.length,()=>`Error in ${dd}, dy must have the same rank as output ${j.length}, but got ${o.rank}`);const Z=Ls(j,d.data.get(o.dataId).values),ie=Pg(i.shape,i.dtype);for(let he=0;he=0&&Ne=0&&Uewe&&(we=ze,Se=Oe,xe=De)}}}ie[Se][xe][pe]+=Z[he][ue][ce][pe]}}}const de=d.write(Or(ie,s.dtype),i.shape,i.dtype);return{dataId:de,shape:i.shape,dtype:i.dtype}}};const D4={kernelName:ud,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{x:s,filter:i,dy:o}=e,{strides:a,pad:c,dilations:h}=n,d=t,m=Ls(s.shape,d.data.get(s.dataId).values),f=Ls(i.shape,d.data.get(i.dataId).values),{batchSize:b,inHeight:w,inWidth:L,inChannels:x,outHeight:v,outWidth:N,padInfo:O,strideHeight:E,strideWidth:k,filterHeight:F,filterWidth:U,dilationHeight:$,dilationWidth:Y,outShape:j}=ep(s.shape,i.shape,a,c,"NHWC",h);A(o.rank===j.length,()=>`Error in ${ud}, dy must have the same rank as output ${j.length}, but got ${o.rank}`);const Z=Ls(j,d.data.get(o.dataId).values),ie=Pg(s.shape,s.dtype);for(let he=0;he=0&&Ne=0&&Uewe&&(we=ze,Se=Ne,xe=Ue)}}}ie[he][Se][xe][pe]+=Z[he][ue][ce][pe]}}}const de=d.write(Or(ie,s.dtype),s.shape,s.dtype);return{dataId:de,shape:s.shape,dtype:s.dtype}}};const k4=Zr((e,t)=>e/t),F4=hc(xa,k4),pS={kernelName:xa,backendName:"cpu",kernelFunc:F4};const _4=ew,W4=tw,$4=nw,U4=sw,B4=iw,M4=rw,P4=xt(bl,e=>{const t=Math.sign(e),n=Math.abs(e),s=1/(1+_4*n);return t*(1-((((M4*s+B4)*s+U4)*s+$4)*s+W4)*s*Math.exp(-n*n))}),z4={kernelName:bl,backendName:"cpu",kernelFunc:P4};function V0(e,t,n){const s=e.shape,i=s[0],o=s[1],a=n.data.get(e.dataId),c=a.complexTensorInfos.real,h=a.complexTensorInfos.imag,d=[i,o],m=P(d),f=bt("float32",m),b=bt("float32",m);for(let v=0;v{const{image:s}=e,i=n,o=bt(s.dtype,P(s.shape)),[a,c,h,d]=s.shape,m=i.data.get(s.dataId).values;for(let b=0;b=0&&FNumber.isFinite(e)?1:0,"bool"),rj={kernelName:Tl,backendName:"cpu",kernelFunc:ij};const oj=xt(Al,e=>Math.abs(e)===Infinity?1:0,"bool"),aj={kernelName:Al,backendName:"cpu",kernelFunc:oj};const cj=xt(vl,e=>Number.isNaN(e)?1:0,"bool"),lj={kernelName:vl,backendName:"cpu",kernelFunc:cj};const hj=xt(Cl,e=>Math.log1p(e)),uj={kernelName:Cl,backendName:"cpu",kernelFunc:hj};const dj=xt(yd,e=>e?0:1,"bool"),pj={kernelName:yd,backendName:"cpu",kernelFunc:dj};const mj={kernelName:Rl,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n;let c=s.shape;const h=c.length,d=qe(i,c);let m=d;const f=Xn(m,h);let b=a.data.get(s.dataId).values;if(f!=null){const E=new Array(h);for(let k=0;k`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Un(i.shape,o,a,d,c,h);let f;if(m.filterWidth===1&&m.filterHeight===1&&ae(m.inShape,m.outShape))f=Go({inputs:{x:i},backend:n});else{const b=n.data.get(i.dataId).values,w=je(i.shape),L=dS(b,i.shape,i.dtype,w,m,"max");f=n.makeTensorInfo(m.outShape,i.dtype,L.values)}return f}const gj={kernelName:Ol,backendName:"cpu",kernelFunc:fj};function yj(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;Te([o,a],"maxPoolBackprop");const{filterSize:h,strides:d,pad:m,dimRoundingMode:f}=s,b=Un(c.shape,h,d,1,m,f),w=n.data.get(c.dataId).values,L=wt(b.outShape,c.dtype,M0(w,c.shape,c.dtype,b).values),x=b.strideHeight,v=b.strideWidth,N=b.dilationHeight,O=b.dilationWidth,E=b.effectiveFilterHeight,k=b.effectiveFilterWidth,F=k-1-b.padInfo.left,U=E-1-b.padInfo.top,$=wt(c.shape,"float32"),Y=n.data.get(i.dataId).values,j=wt(i.shape,"float32",Y);for(let Z=0;Z=b.outHeight||Math.floor(pe)!==pe)continue;for(let we=0;we=b.outWidth||Math.floor(Se)!==Se)continue;const xe=E*k-1-L.get(Z,pe,Se,ie),Oe=ye*k+we,Ne=xe===Oe?1:0;if(Ne===0)continue;const De=j.get(Z,pe,Se,ie);ce+=De*Ne}}$.set(ce,Z,de,he,ie)}return n.makeTensorInfo($.shape,$.dtype,$.values)}const bj={kernelName:bd,backendName:"cpu",kernelFunc:yj};function wj(e,t,n,s,i){const o=je(t),a=dS(e,t,n,o,i,"max"),c=M0(e,t,n,i,!0,s);return[a.values,c.values]}const Lj={kernelName:wd,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,h=n;Te(s,"MaxPoolWithArgmax");const d=h.data.get(s.dataId).values,m=Un(s.shape,i,o,[1,1],a),[f,b]=wj(d,s.shape,s.dtype,c,m),w=h.write(f,m.outShape,s.dtype),L=h.write(b,m.outShape,s.dtype);return[{dataId:w,shape:m.outShape,dtype:s.dtype},{dataId:L,shape:m.outShape,dtype:"int32"}]}};function Sj(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{paddings:o,mode:a}=s;Te(i,"mirrorPad");const c=o.map((E,k)=>E[0]+i.shape[k]+E[1]),h=o.map(E=>E[0]),d=o.map((E,k)=>E[0]+i.shape[k]),m=a==="reflect"?0:1,f=n.data.get(i.dataId).values,b=i.shape.length,w=je(i.shape),L=P(c),x=c.length,v=je(c),N=bt(i.dtype,L);for(let E=0;E=d[U]&&(k[U]=(d[U]-1)*2-k[U]+m);k=k.map((U,$)=>U-h[$]);const F=_s(k,b,w);N[E]=f[F]}const O=n.write(N,c,i.dtype);return{dataId:O,shape:c,dtype:i.dtype}}const Ij={kernelName:El,backendName:"cpu",kernelFunc:Sj};const xj=kp,Tj={kernelName:Ld,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:h}=n,d=t;Te(s,"NonMaxSuppressionPadded");const m=d.data.get(s.dataId).values,f=d.data.get(i.dataId).values,{selectedIndices:b,validOutputs:w}=xj(m,f,o,a,c,h);return[b,w]}};const Aj=Fp,vj={kernelName:Sd,backendName:"cpu",kernelFunc:({inputs:e,backend:t,attrs:n})=>{const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}=n,d=t;Te(s,"NonMaxSuppressionWithScore");const m=d.data.get(s.dataId).values,f=d.data.get(i.dataId).values,b=o,w=a,L=c,x=h,{selectedIndices:v,selectedScores:N}=Aj(m,f,b,w,L,x);return[v,N]}};function Nj(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{paddings:o,constantValue:a}=s;Te(i,"pad");const c=o.map((O,E)=>O[0]+i.shape[E]+O[1]),h=o.map(O=>O[0]),d=n.data.get(i.dataId).values,m=P(i.shape),f=i.shape.length,b=je(i.shape),w=P(c),L=c.length,x=je(c),v=bt(i.dtype,w);a!==0&&v.fill(a);for(let O=0;OU+h[$]),F=_s(k,L,x);v[F]=d[O]}const N=n.write(v,c,i.dtype);return{dataId:N,shape:c,dtype:i.dtype}}const G0={kernelName:Id,backendName:"cpu",kernelFunc:Nj};const Cj=xt(kl,e=>1/e),Rj={kernelName:kl,backendName:"cpu",kernelFunc:Cj};const Oj={kernelName:Od,backendName:"cpu",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:i,fillValue:o,center:a}=t,c=n,h=bt(s.dtype,P(s.shape)),[d,m,f,b]=s.shape,[w,L]=Jb(a,m,f),x=255,v=Math.sin(i),N=Math.cos(i),O=c.data.get(s.dataId).values;for(let k=0;k=0&&ue=0&&me{const t=Math.floor(e);return e-t<.5?Math.floor(e):e-t>.5?Math.ceil(e):t%2===0?t:t+1}),Dj={kernelName:$l,backendName:"cpu",kernelFunc:Ej};const kj=Wp,Fj=$p,_j=xt(Bl,e=>e>=0?Fj*e:kj*(Math.exp(e)-1)),Wj={kernelName:Bl,backendName:"cpu",kernelFunc:_j};const $j=xt(zl,e=>1/(1+Math.exp(-e))),Uj={kernelName:zl,backendName:"cpu",kernelFunc:$j};const Bj=xt(Pl,e=>e<0?-1:e>0?1:0),Mj={kernelName:Pl,backendName:"cpu",kernelFunc:Bj};const Pj=xt(Aa,e=>Math.sin(e)),zj={kernelName:Aa,backendName:"cpu",kernelFunc:Pj};const Vj=xt(Ml,e=>Math.sinh(e)),Gj={kernelName:Ml,backendName:"cpu",kernelFunc:Vj};const Yj=11920928955078125e-23,Y0=Math.log(Yj)+2,Hj=xt(Vl,e=>{const t=e>-Y0,n=eMath.sqrt(e)),Zj={kernelName:Gl,backendName:"cpu",kernelFunc:Jj};const Qj={kernelName:Nd,backendName:"cpu",kernelFunc:({inputs:e,backend:t})=>{const{x:n}=e,s=t;Te(n,"square");const i=s.data.get(n.dataId).values,o=new Float32Array(i.length);for(let c=0;c{const n=t;return isNaN(e)?NaN:e>0?1:n.alpha}),tK={kernelName:ql,backendName:"cpu",kernelFunc:eK};const nK=xt(Ca,e=>Math.tan(e)),sK={kernelName:Ca,backendName:"cpu",kernelFunc:nK};const iK=xt(Yl,e=>Math.tanh(e)),rK={kernelName:Yl,backendName:"cpu",kernelFunc:iK};function oK(e){const{inputs:t,attrs:n,backend:s}=e,{axis:i}=n,{x:o}=t;Te(o,"unique");const a=s.data.get(o.dataId).values,{outputValues:c,outputShape:h,indices:d}=F0(a,i,o.shape,o.dtype);return[s.makeTensorInfo(h,o.dtype,c),s.makeTensorInfo([d.length],"int32",d)]}const aK={kernelName:Cd,backendName:"cpu",kernelFunc:oK};const cK=[Pq,sq,Vq,Yq,lq,qq,Kq,Jq,Qq,t4,s4,Bq,r4,aq,uq,a4,iq,l4,d4,m4,h4,b4,L4,g4,I4,T4,A4,N4,R4,O4,D4,E4,pS,kq,z4,pq,fq,q4,K4,J4,yq,Q4,tj,rq,sj,c4,rj,aj,lj,wq,uj,pj,gj,bj,Lj,mj,Ij,Sq,Tj,vj,xq,G0,_q,oq,Rj,Wq,$q,Uq,Oj,Dj,Aq,Wj,Uj,Mj,zj,Gj,vq,qj,Xj,Zj,Qj,Cq,tK,Oq,sK,rK,jj,aK];for(const e of cK)_d(e);const Qr={},fS={alpha:!1,antialias:!1,premultipliedAlpha:!1,preserveDrawingBuffer:!1,depth:!1,stencil:!1,failIfMajorPerformanceCaveat:!0};function jte(e){delete Qr[e]}function lK(e,t){Qr[e]=t}function ki(e){if(!(e in Qr)){const n=uK(e);if(n!==null)Qr[e]=n;else return console.log("Could not get context for WebGL version",e),null}const t=Qr[e];return t.isContextLost()?(delete Qr[e],ki(e)):(t.disable(t.DEPTH_TEST),t.disable(t.STENCIL_TEST),t.disable(t.BLEND),t.disable(t.DITHER),t.disable(t.POLYGON_OFFSET_FILL),t.disable(t.SAMPLE_COVERAGE),t.enable(t.SCISSOR_TEST),t.enable(t.CULL_FACE),t.cullFace(t.BACK),Qr[e])}function hK(e){if(typeof OffscreenCanvas!="undefined"&&e===2)return new OffscreenCanvas(300,150);if(typeof document!="undefined")return document.createElement("canvas");throw new Error("Cannot create a canvas in this context")}function uK(e){if(e!==1&&e!==2)throw new Error("Cannot get WebGL rendering context, WebGL is disabled.");const t=hK(e);return t.addEventListener("webglcontextlost",n=>{n.preventDefault(),delete Qr[e]},!1),e===1?t.getContext("webgl",fS)||t.getContext("experimental-webgl",fS):t.getContext("webgl2",fS)}var lu;(function(e){e[e.DENSE=0]="DENSE",e[e.SHARED_BATCH=1]="SHARED_BATCH"})(lu||(lu={}));var Ns;(function(e){e[e.RENDER=0]="RENDER",e[e.UPLOAD=1]="UPLOAD",e[e.PIXELS=2]="PIXELS",e[e.DOWNLOAD=3]="DOWNLOAD"})(Ns||(Ns={}));var Cn;(function(e){e[e.UNPACKED_FLOAT16=0]="UNPACKED_FLOAT16",e[e.UNPACKED_FLOAT32=1]="UNPACKED_FLOAT32",e[e.PACKED_4X1_UNSIGNED_BYTE=2]="PACKED_4X1_UNSIGNED_BYTE",e[e.PACKED_2X2_FLOAT32=3]="PACKED_2X2_FLOAT32",e[e.PACKED_2X2_FLOAT16=4]="PACKED_2X2_FLOAT16"})(Cn||(Cn={}));function hu(e,t){return[t,e]}function dK(e,t){return e*t}function Kte(e,t){return[t*4,e]}function uu(e){const t=P(e),n=Math.ceil(t/4);return Ve(n)}function Xte(e,t){if(e%t!==0)throw new Error(`unpackedSize (${e}) must be a multiple of ${t}`);return e/t}function Jte(e,t,n){const s=e.length*n/4;if(t.length= ${s}`);let i=0;for(let o=0;oe.getExtension(t),'Extension "'+t+'" not supported on this browser.')}function wK(e,t){const n=cr(e,()=>e.createShader(e.VERTEX_SHADER),"Unable to create vertex WebGLShader.");if(Ee(e,()=>e.shaderSource(n,t)),Ee(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw console.log(e.getShaderInfoLog(n)),new Error("Failed to compile vertex shader.");return n}function LK(e,t){const n=cr(e,()=>e.createShader(e.FRAGMENT_SHADER),"Unable to create fragment WebGLShader.");if(Ee(e,()=>e.shaderSource(n,t)),Ee(e,()=>e.compileShader(n)),e.getShaderParameter(n,e.COMPILE_STATUS)===!1)throw IK(t,e.getShaderInfoLog(n)),new Error("Failed to compile fragment shader.");return n}const SK=/ERROR: [0-9]+:([0-9]+):/g;function IK(e,t){const n=SK.exec(t);if(n==null){console.log(`Couldn't parse line number in error: ${t}`),console.log(e);return}const s=+n[1],i=e.split(` -`),o=i.length.toString().length+2,a=i.map((f,b)=>pt((b+1).toString(),o)+f);let c=0;for(let f=0;f0&&as(r[0])){let s=r.map(c=>zf(c));o=this.write(s,t,e)}else o=this.write(r,t,e);return{dataId:o,shape:t,dtype:e}}incRef(t){let e=this.data.get(t);e.refCount++}decRef(t){if(this.data.has(t)){let e=this.data.get(t);e.refCount--}}move(t,e,r,o){this.data.set(t,{values:e,dtype:o,refCount:1})}numDataIds(){return this.data.numDataIds()}async read(t){return this.readSync(t)}readSync(t){let{dtype:e,complexTensorInfos:r}=this.data.get(t);if(e==="complex64"){let o=this.readSync(r.real.dataId),s=this.readSync(r.imag.dataId);return ys(o,s)}return this.data.get(t).values}bufferSync(t){let e=this.readSync(t.dataId),r=e;if(t.dtype==="string")try{r=e.map(o=>Uu(o))}catch(o){throw new Error("Failed to decode encoded string bytes into utf-8")}return Se(t.shape,t.dtype,r)}makeOutput(t,e,r){let o=this.write(t,e,r);return ps().makeTensorFromDataId(o,e,r,this)}disposeData(t){if(this.data.has(t)){let{complexTensorInfos:e}=this.data.get(t);e!=null&&(this.disposeData(e.real.dataId),this.disposeData(e.imag.dataId)),this.data.delete(t)}}disposeIntermediateTensorInfo(t){let e=t.dataId;if(this.data.has(e)){let r=this.data.get(e);r.refCount--,r.refCount<1&&this.disposeData(e)}}async time(t){let e=or();t();let r=or()-e;return{kernelMs:r}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}stridedSlice(t,e,r,o){Ct(t,"stridedSlice");let s=Zf(e,r,o);if(s.some(p=>p===0))return un([],s);let c=Se(s,t.dtype),l=this.bufferSync(t);for(let p=0;pl[p]=t.shape[p]-1-l[p]),r.set(o.get(...l),...c)}return r.toTensor()}neg(t){return Ct(t,"neg"),nt(Et(-1),t)}addN(t){Ct(t,"addN");let e=t.map(s=>this.readSync(s.dataId)),r=Se(t[0].shape,t[0].dtype),o=r.values;for(let s=0;sMath.pow(r,o))}floorDiv(t,e){Ct([t,e],"floorDiv");let r=(s,c)=>Math.floor(s/c),o="int32";return this.broadcastedBinaryOp(t,e,o,r)}sum(t,e){Ct(t,"sum"),sr("sum",e,t.rank);let[r,o]=Fn(t.shape,e),s=jn(t.dtype,"int32"),c=xe(r,s),l=G(o),p=this.readSync(c.dataId),f=this.readSync(t.dataId);for(let m=0;mb&&(b=N,v=T)}p[m]=v}return c}cumsum(t,e,r,o){if(Ct(t,"cumsum"),e!==t.rank-1)throw new Error(`backend.cumsum in CPU expects an inner-most axis=${t.rank-1} but got axis=${e}`);let s=jn(t.dtype,"int32"),c=xe(t.shape,s),l=this.readSync(c.dataId),p=this.readSync(t.dataId),f=t.shape[t.rank-1],m=o?(y,b)=>y+f-b-1:(y,b)=>y+b;for(let y=0;yr===o?1:0)}notEqual(t,e){return Ct([t,e],"notEqual"),this.broadcastedBinaryOp(t,e,"bool",(r,o)=>r!==o?1:0)}less(t,e){return Ct([t,e],"less"),this.broadcastedBinaryOp(t,e,"bool",(r,o)=>rr<=o?1:0)}greater(t,e){return Ct([t,e],"greater"),this.broadcastedBinaryOp(t,e,"bool",(r,o)=>r>o?1:0)}greaterEqual(t,e){return Ct([t,e],"greaterEqual"),this.broadcastedBinaryOp(t,e,"bool",(r,o)=>r>=o?1:0)}logicalAnd(t,e){return Ct([t,e],"logicalAnd"),this.broadcastedBinaryOp(t,e,"bool",(r,o)=>r&&o)}logicalOr(t,e){return Ct([t,e],"logicalOr"),this.broadcastedBinaryOp(t,e,"bool",(r,o)=>r||o)}select(t,e,r){Ct([t,e,r],"select");let o=this.readSync(t.dataId),s=this.readSync(e.dataId),c=this.readSync(r.dataId),l=xe(e.shape,jn(e.dtype,r.dtype)),p=this.readSync(l.dataId),f=0,m=t.rank===0||t.rank>1||e.rank===1?1:G(e.shape.slice(1));for(let y=0;yMath.min(r,o))}mod(t,e){return Ct([t,e],"mod"),this.broadcastedBinaryOp(t,e,t.dtype,(r,o)=>{let s=r%o;return r<0&&o<0||r>=0&&o>=0?s:(s+o)%o})}maximum(t,e){return Ct([t,e],"maximum"),this.broadcastedBinaryOp(t,e,t.dtype,(r,o)=>Math.max(r,o))}all(t,e){Ct(t,"all"),sr("all",e,t.rank);let[r,o]=Fn(t.shape,e),s=xe(r,t.dtype),c=G(o),l=this.readSync(s.dataId),p=this.readSync(t.dataId);for(let f=0;f{let s=r-o;return s*s})}eluDer(t,e){Ct([t,e],"eluDer");let r=new Float32Array(e.size),o=this.readSync(e.dataId),s=this.readSync(t.dataId);for(let c=0;c=1?r[c]=s[c]:r[c]=s[c]*(l+1)}return this.makeOutput(r,e.shape,"float32")}atan2(t,e){return Ct([t,e],"atan2"),this.broadcastedBinaryOp(t,e,t.dtype,(r,o)=>Math.atan2(r,o))}tile(t,e){return Ct(t,"tile"),p6(this.bufferSync(t),e)}gather(t,e,r){Ct([t,e],"gather");let o=t.shape.slice(),s=this.readSync(e.dataId);o[r]=s.length;let c=Se(o,t.dtype),l=this.bufferSync(t);for(let p=0;pm*y),s=Np(t.shape,e,o),c=_p(s.length,e.length),l=Cp(t.shape,e,o),p=Kw(r,e.length),f=Xw(l,r,e.length);return Kt(t.reshape(s),c).reshape(l).slice(p,f)}pool3d(t,e,r){Ct(t,"pool3d");let o=e.strideDepth,s=e.strideHeight,c=e.strideWidth,l=e.dilationDepth,p=e.dilationHeight,f=e.dilationWidth,m=e.effectiveFilterDepth,y=e.effectiveFilterHeight,b=e.effectiveFilterWidth,v=e.padInfo.front,T=e.padInfo.top,N=e.padInfo.left,S=r==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,D=this.readSync(t.dataId),I=Se(e.outShape,t.dtype),P=I.values,E=e.outShape[1]*e.outShape[2]*e.outShape[3]*e.outShape[4],L=e.outShape[2]*e.outShape[3]*e.outShape[4],B=e.outShape[3]*e.outShape[4],q=e.outShape[4];for(let H=0;Hoe?oe=Le:r==="avg"&&(de+=Le,ie++),isNaN(oe))break}if(isNaN(oe))break}if(isNaN(oe))break}let we=Ht+it;P[we]=r==="avg"?de/ie:oe}}}}return I.toTensor()}avgPool3d(t,e){return Ct(t,"avgPool3d"),this.pool3d(t,e,"avg").toFloat()}avgPool3dBackprop(t,e,r){Ct([t,e],"avgPool3dBackprop");let o=r.strideDepth,s=r.strideHeight,c=r.strideWidth,l=r.filterDepth,p=r.filterHeight,f=r.filterWidth,m=r.dilationDepth,y=r.dilationHeight,b=r.dilationWidth,v=r.effectiveFilterDepth,T=r.effectiveFilterHeight,N=r.effectiveFilterWidth,S=v-1-r.padInfo.front,D=N-1-r.padInfo.left,I=T-1-r.padInfo.top,P=Se(e.shape,"float32"),E=1/(l*p*f),L=this.bufferSync(t);for(let B=0;B=r.outDepth||Math.floor(ut)!==ut)continue;for(let bt=0;bt=r.outHeight||Math.floor(yt)!==yt)continue;for(let xt=0;xt=r.outWidth||Math.floor(kt)!==kt)continue;let Nt=L.get(B,ut,yt,kt,q);dt+=Nt}}}P.set(dt*E,B,H,Z,J,q)}return P.toTensor()}maxPool3d(t,e){return Ct(t,"maxPool3d"),this.pool3d(t,e,"max").toFloat()}maxPool3dPositions(t,e){let r=Se(e.outShape,"int32"),o=e.strideDepth,s=e.strideHeight,c=e.strideWidth,l=e.dilationDepth,p=e.dilationHeight,f=e.dilationWidth,m=e.effectiveFilterDepth,y=e.effectiveFilterHeight,b=e.effectiveFilterWidth,v=e.padInfo.front,T=e.padInfo.top,N=e.padInfo.left,S=this.bufferSync(t);for(let D=0;D=ft&&(ft=It,ut=yt*y*b+kt*y+At)}}}r.set(ut,D,P,q,it,I)}}}return r.toTensor()}maxPool3dBackprop(t,e,r,o){Ct([e,r],"maxPool3dBackprop");let s=this.maxPool3dPositions(e,o),c=o.strideDepth,l=o.strideHeight,p=o.strideWidth,f=o.dilationDepth,m=o.dilationHeight,y=o.dilationWidth,b=o.effectiveFilterDepth,v=o.effectiveFilterHeight,T=o.effectiveFilterWidth,N=b-1-o.padInfo.front,S=T-1-o.padInfo.left,D=v-1-o.padInfo.top,I=Se(e.shape,"float32"),P=this.bufferSync(s),E=this.bufferSync(t);for(let L=0;L=o.outDepth||Math.floor(ft)!==ft)continue;for(let ut=0;ut=o.outHeight||Math.floor(bt)!==bt)continue;for(let yt=0;yt=o.outWidth||Math.floor(xt)!==xt)continue;let kt=b*v*T-1-P.get(L,ft,bt,xt,B),Nt=dt*v*T+ut*T+yt,At=kt===Nt?1:0;if(At===0)continue;let It=E.get(L,ft,bt,xt,B);ht+=It*At}}}I.set(ht,L,q,H,Z,B)}return I.toTensor()}resizeBilinear(t,e,r,o){Ct(t,"resizeBilinear");let[s,c,l,p]=t.shape,f=this.readSync(t.dataId),m=new Float32Array(G([s,e,r,p])),y=[o&&e>1?c-1:c,o&&r>1?l-1:l],b=[o&&e>1?e-1:e,o&&r>1?r-1:r],v=0,T=y[0]/b[0],N=y[1]/b[1];for(let S=0;S1?s-1:s,r&&f>1?c-1:c],b=[r&&p>1?p-1:p,r&&f>1?f-1:f],v=y[0]/b[0],T=y[1]/b[1],N=this.readSync(t.dataId),S=0;for(let D=0;D1?c-1:c,o&&r>1?l-1:l],b=[o&&e>1?e-1:e,o&&r>1?r-1:r],v=y[0]/b[0],T=y[1]/b[1],N=0;for(let S=0;S1?s-1:s,r&&f>1?c-1:c],v=[r&&p>1?p-1:p,r&&f>1?f-1:f],T=b[0]/v[0],N=b[1]/v[1],S=1/T,D=1/N,I=Math.ceil(S)*2+2,P=Math.ceil(D)*2+2;for(let E=0;E=p)continue;let yt=L+bt*t.strides[1],xt=bt*T,kt=Math.min(s-1,r?Math.round(xt):Math.floor(xt));if(B!==kt)continue;for(let Nt=0;Nt=f)continue;let It=yt+At*t.strides[2],St=At*N,Lt=Math.min(c-1,r?Math.round(St):Math.floor(St));J===Lt&&(ft+=y[It+dt])}}m[it+dt]=ft}}}}return Oc(m,e.shape,e.dtype)}localResponseNormalization4D(t,e,r,o,s){Ct(t,"localResponseNormalization4D");let c=t.shape[3],l=c-1,p=this.readSync(t.dataId),f=t.size,m=new Float32Array(f);function y(b){let v=b%c,T=b-v+Math.max(0,v-e),N=b-v+Math.min(v+e,l),S=0;for(;T<=N;T++){let D=p[T];S+=D*D}return S}for(let b=0;b=0&&c[l]`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${r}`),_(e>1,()=>`blockSize should be > 1 for depthToSpace, but was: ${e}`);let o=t.shape[0],s=t.shape[1],c=t.shape[2],l=t.shape[3],p=s*e,f=c*e,m=l/(e*e),y=this.readSync(t.dataId),b=new Float32Array(o*p*f*m),v=0;for(let T=0;TS[E]=0);let D=b.locToIndex(S),I=N.slice(-e.rank);m.forEach(E=>I[E]=0);let P=v.locToIndex(I);y[T]=o(l[D],p[P])}}return c.toTensor()}split(t,e,r){return u6(t,e,r)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}cropAndResize(t,e,r,o,s,c){let[l,p,f,m]=t.shape,y=e.shape[0],[b,v]=o,T=Se([y,b,v,m],"float32"),N=this.readSync(e.dataId),S=this.readSync(r.dataId),D=this.readSync(t.dataId),I=t.strides,P=T.strides;for(let E=0;E=l)continue;let it=b>1?(H-B)*(p-1)/(b-1):0,pt=v>1?(Z-q)*(f-1)/(v-1):0;for(let ht=0;ht1?B*(p-1)+ht*it:.5*(B+H)*(p-1);if(dt<0||dt>p-1){for(let ft=0;ft1?q*(f-1)+yt*pt:.5*(q+Z)*(f-1);if(xt<0||xt>f-1){for(let It=0;It1?q*(f-1)+ft*pt:.5*(q+Z)*(f-1);if(ut<0||ut>f-1){for(let xt=0;xt=t.size/l)throw new Error(`Invalid indices: ${v} does not index into ${t.shape}`);for(let N=0;N=o/s)throw new Error(`Invalid indices: ${S} does not index into ${r}`);for(let I=0;I{let{x:t}=n.inputs,e=n.backend,r=new Float32Array(G(t.shape));if(t.dtype!=="complex64"){let o=e.data.get(t.dataId).values;r=W$(o)}else{let o=e.data.get(t.dataId),s=o.complexTensorInfos.real,c=o.complexTensorInfos.imag,l=e.data.get(s.dataId).values,p=e.data.get(c.dataId).values;for(let f=0;f{let c=le(t,e),l=c.length,p=Yt(c),f=G(c),m=Ce(s,f),y=t.length,b=e.length,v=Yt(t),T=Yt(e),N=ra(t,c),S=ra(e,c);if(N.length+S.length===0)for(let D=0;DP[q]=0);let E=Kr(P,y,v),L=I.slice(-b);S.forEach(q=>L[q]=0);let B=Kr(L,b,T);m[D]=n(r[E],o[B])}return[m,c]}}function _o(n){let{inputs:t,backend:e}=n,{real:r,imag:o}=t,s=e.data.get(r.dataId).values,c=e.data.get(o.dataId).values,l=e.makeTensorInfo(r.shape,"complex64"),p=e.data.get(l.dataId);return p.complexTensorInfos={real:e.makeTensorInfo(r.shape,"float32",s),imag:e.makeTensorInfo(o.shape,"float32",c)},l}let y6={kernelName:lf,backendName:"cpu",kernelFunc:_o};function ba(n){let{inputs:t,backend:e}=n,{x:r}=t;return e.incRef(r.dataId),{dataId:r.dataId,shape:r.shape,dtype:r.dtype}}let b6={kernelName:bu,backendName:"cpu",kernelFunc:ba};function Zp(n){let{inputs:t,backend:e}=n,{input:r}=t,o=e.data.get(r.dataId).complexTensorInfos.real,s=e.data.get(o.dataId).values;return e.makeTensorInfo(o.shape,o.dtype,s)}let x6={kernelName:$f,backendName:"cpu",kernelFunc:Zp};function Qp(n){let{inputs:t,backend:e,attrs:r}=n,{x:o}=t,{dtype:s}=r;if(s==="complex64"){if(o.dtype==="complex64")return ba({inputs:{x:o},backend:e});let c=xe(o.shape),l=Qp({inputs:{x:o},backend:e,attrs:{dtype:"float32"}}),p=_o({inputs:{real:l,imag:c},backend:e});return c.dispose(),e.disposeIntermediateTensorInfo(l),p}if(o.dtype==="complex64"){let c=Zp({inputs:{input:o},backend:e}),l=Qp({inputs:{x:c},backend:e,attrs:{dtype:s}});return e.disposeIntermediateTensorInfo(c),l}if(!sc(o.dtype,s)){let c=ba({inputs:{x:o},backend:e});return{dataId:c.dataId,shape:c.shape,dtype:s}}if(s==="int32"){let c=e.data.get(o.dataId).values,l=Int32Array.from(c);return e.makeTensorInfo(o.shape,"int32",l)}if(s==="bool"){let c=e.data.get(o.dataId).values,l=Qs([0],o.dtype),[p,f]=Ti((m,y)=>m!==y?1:0)(o.shape,[],c,l,"bool");return e.makeTensorInfo(f,"bool",p)}throw new Error(`Error in Cast: failed to cast ${o.dtype} to ${s}`)}let w6={kernelName:cc,backendName:"cpu",kernelFunc:Qp};function Qc(n,t,e,r){return e==null?({inputs:o,backend:s})=>{let{a:c,b:l}=o,p=s;Ct([c,l],n);let f=p.data.get(c.dataId).values,m=p.data.get(l.dataId).values,y=r||c.dtype,[b,v]=t(c.shape,l.shape,f,m,y);return p.makeTensorInfo(v,y,b)}:({inputs:o,backend:s})=>{let{a:c,b:l}=o,p=s;if(c.dtype==="complex64"||l.dtype==="complex64"){let f=Qp({inputs:{x:c},backend:p,attrs:{dtype:"complex64"}}),m=p.data.get(f.dataId),y=m.complexTensorInfos.real,b=m.complexTensorInfos.imag,v=p.data.get(y.dataId).values,T=p.data.get(b.dataId).values,N=Qp({inputs:{x:l},backend:p,attrs:{dtype:"complex64"}}),S=p.data.get(N.dataId),D=S.complexTensorInfos.real,I=S.complexTensorInfos.imag,P=p.data.get(D.dataId).values,E=p.data.get(I.dataId).values,[L,B,q]=e(c.shape,l.shape,v,T,P,E),H=p.makeTensorInfo(q,"float32",L),Z=p.makeTensorInfo(q,"float32",B),J=_o({inputs:{real:H,imag:Z},backend:p});return p.disposeIntermediateTensorInfo(f),p.disposeIntermediateTensorInfo(N),p.disposeIntermediateTensorInfo(H),p.disposeIntermediateTensorInfo(Z),J}else{let f=p.data.get(c.dataId).values,m=p.data.get(l.dataId).values,y=r||c.dtype,[b,v]=t(c.shape,l.shape,f,m,y);return p.makeTensorInfo(v,y,b)}}}function o1(n){return(t,e,r,o,s,c)=>{let l=le(t,e),p=G(l),f=l.length,m=Yt(l),y=Ce("float32",p),b=Ce("float32",p),v=ra(t,l),T=ra(e,l),N=ys(r,o),S=ys(s,c),D=t.length,I=Yt(t),P=e.length,E=Yt(e);if(v.length+T.length===0)for(let L=0;Lq[pt]=0);let H=Kr(q,D,I),Z=B.slice(-P);T.forEach(pt=>Z[pt]=0);let J=Kr(Z,P,E),it=n(N[H*2],N[H*2+1],S[J*2],S[J*2+1]);y[L]=it.real,b[L]=it.imag}return[y,b,l]}}let V$=Ti((n,t)=>n+t),v6=o1((n,t,e,r)=>({real:n+e,imag:t+r})),th=Qc(Hi,V$,v6),T6={kernelName:Hi,backendName:"cpu",kernelFunc:th};function tl(n){return(t,e,r)=>{let o=Ce(e,t.length);for(let s=0;s{let{x:c}=r;if(Ct(c,n),c.dtype==="string"||e==="string")throw new Error("unaryKernelFunc does not support string input/output");let l=s,p=l.data.get(c.dataId).values,f=G(c.shape),m=e||c.dtype,y=rr(m,f);for(let b=0;b{let{x:c}=r;if(Ct(c,n),c.dtype==="string"||e==="string")throw new Error("unaryKernelFunc does not support string input/output");let l=s,p=l.data.get(c.dataId).values,f=e||c.dtype,m=t(p,f,o);return l.makeTensorInfo(c.shape,f,m)}}let G$=tl(n=>Math.ceil(n)),k6=el(cu,G$),N6={kernelName:cu,backendName:"cpu",kernelFunc:k6};let U$=tl(n=>Math.exp(n)),_6=el(du,U$),C6={kernelName:du,backendName:"cpu",kernelFunc:_6};let q$=tl(n=>Math.expm1(n)),S6=el(mu,q$),$6={kernelName:mu,backendName:"cpu",kernelFunc:S6};let H$=tl(n=>Math.floor(n)),I6=el(gu,H$),E6={kernelName:gu,backendName:"cpu",kernelFunc:I6};let j$=tl(n=>Math.log(n)),D6=el(Tu,j$),A6={kernelName:Tu,backendName:"cpu",kernelFunc:D6};function K$(n,t,e,r){let o=Ce(r,G(e));for(let s=0;sl&&(l=f)}o[s]=l}return o}let X$=Ti((n,t)=>n*t),F6=o1((n,t,e,r)=>({real:n*e-t*r,imag:n*r+t*e})),Y$=Qc(pc,X$,F6),R6={kernelName:pc,backendName:"cpu",kernelFunc:Y$};let J$=Ti((n,t)=>n!==t?1:0),P6=Qc(Su,J$,null,"bool"),O6={kernelName:Su,backendName:"cpu",kernelFunc:P6};let Z$=tl(n=>1/Math.sqrt(n)),L6=el(Fu,Z$),M6={kernelName:Fu,backendName:"cpu",kernelFunc:L6};function Q$(n,t,e,r,o){let s=Yx(r,t,e),c=G(e),l=Yt(r);if(s){let f=Jx(t,l);return n.subarray(f,f+c)}let p=Ce(o,c);for(let f=0;fN+t[S]),T=Kr(v,r.length,l);p[f]=n[T]}return p}function s1(n){let{inputs:t,backend:e,attrs:r}=n,{x:o}=t,{begin:s,size:c}=r;Ct(o,"slice");let[l,p]=Qf(o,s,c);Xx(o,l,p);let f=e.data.get(o.dataId).values,m=Q$(f,l,p,o.shape,o.dtype);return e.makeTensorInfo(p,o.dtype,m)}let B6={kernelName:If,backendName:"cpu",kernelFunc:s1};let tI=Ti((n,t)=>{let e=n-t;return e*e}),z6=Qc(fc,tI),W6={kernelName:fc,backendName:"cpu",kernelFunc:z6};let eI=Ti((n,t)=>n-t),V6=o1((n,t,e,r)=>({real:n-e,imag:t-r})),nI=Qc(dc,eI,V6),G6={kernelName:dc,backendName:"cpu",kernelFunc:nI};function i1(n,t,e,r,o){let s=t.length,c=G(t),l=Yt(t),p=Yt(o),f=Ce(e,G(o));for(let m=0;m{for(let S=0;Snew d6,1);let oI=Ee(hu,n=>n>=0?n:Math.exp(n)-1),H6={kernelName:hu,backendName:"cpu",kernelFunc:oI};let j6=Ti((n,t)=>n<0?t*n:n);function sI(n){let{inputs:t,backend:e}=n,{x:r,alpha:o}=t;Ct([r,o],"prelu");let s=e.data.get(r.dataId).values,c=e.data.get(o.dataId).values,[l,p]=j6(r.shape,o.shape,s,c,r.dtype);return e.makeTensorInfo(p,r.dtype,l)}let K6={kernelName:Sf,backendName:"cpu",kernelFunc:sI};let iI=Ee(Iu,n=>Math.max(0,n)),X6={kernelName:Iu,backendName:"cpu",kernelFunc:iI};let aI=Ee(Du,n=>Math.min(Math.max(0,n),6)),Y6={kernelName:Du,backendName:"cpu",kernelFunc:aI};function a1(n,t,e,r){if(e==="linear")return ba({inputs:{x:t},backend:n});if(e==="relu")return iI({inputs:{x:t},backend:n});if(e==="elu")return oI({inputs:{x:t},backend:n});if(e==="relu6")return aI({inputs:{x:t},backend:n});if(e==="prelu")return sI({inputs:{x:t,alpha:r},backend:n});throw new Error(`Activation ${e} has not been implemented for the CPU backend.`)}function Ho(n){let{inputs:t,backend:e,attrs:r}=n,{x:o}=t,{shape:s}=r,c=G(o.shape),l=Ge(s,c),p=G(l);_(c===p,()=>`The new shape (${l}) has ${p} elements and the old shape (${o.shape}) has ${c} elements. The new shape and old shape must have the same number of elements.`),e.incRef(o.dataId);let f=e.data.get(o.dataId);if(f.complexTensorInfos!=null){let m=f.complexTensorInfos.real,y=f.complexTensorInfos.imag;m.shape=l,y.shape=l}return{dataId:o.dataId,shape:l,dtype:o.dtype}}let J6={kernelName:Eu,backendName:"cpu",kernelFunc:Ho};function cI(n){let{inputs:t,backend:e,attrs:r}=n,{a:o,b:s}=t,{transposeA:c,transposeB:l}=r;Ct([o,s],"matMul");let p=o.shape.length,f=s.shape.length,m=c?o.shape[p-2]:o.shape[p-1],y=l?s.shape[f-1]:s.shape[f-2],b=c?o.shape[p-1]:o.shape[p-2],v=l?s.shape[f-2]:s.shape[f-1],T=o.shape.slice(0,-2),N=s.shape.slice(0,-2),S=G(T),D=G(N),I=S===D||S===1||D===1;_(p>=2&&f>=2&&I,()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${T}) and (${N}).`);let P=S>D?o.shape.slice(0,-2):s.shape.slice(0,-2),E=P.concat([b,v]);_(m===y,()=>`Error in matMul: inner shapes (${m}) and (${y}) of Tensors with shapes ${o.shape} and ${s.shape} and transposeA=${c} and transposeB=${l} must match.`);let L=c?[S,m,b]:[S,b,m],B=l?[D,v,y]:[D,y,v],q=Ho({inputs:{x:o},backend:e,attrs:{shape:L}}),H=Ho({inputs:{x:s},backend:e,attrs:{shape:B}}),Z=c?q.shape[1]:q.shape[2],J=c?q.shape[2]:q.shape[1],it=l?H.shape[1]:H.shape[2],pt=Math.max(S,D),ht=e.data.get(q.dataId).values,dt=e.data.get(H.dataId).values,ft=Yt(q.shape),ut=Yt(H.shape),[bt,yt,xt]=c?[ft[0],1,ft[1]]:[ft[0],ft[1],1],[kt,Nt,At]=l?[1,ut[1],ut[0]]:[ut[1],1,ut[0]],It=J*it,St=Se([pt,J,it],q.dtype),Lt=St.values,Ht=e.blockSize;for(let oe=0;oeMath.acos(n)),nK={kernelName:eu,backendName:"cpu",kernelFunc:eK};let rK=Ee(nu,n=>Math.acosh(n)),oK={kernelName:nu,backendName:"cpu",kernelFunc:rK};let sK=Ee(ru,n=>Math.asin(n)),iK={kernelName:ru,backendName:"cpu",kernelFunc:sK};let aK=Ee(ou,n=>Math.asinh(n)),cK={kernelName:ou,backendName:"cpu",kernelFunc:aK};let lK=Ee(su,n=>Math.atan(n)),uK={kernelName:su,backendName:"cpu",kernelFunc:lK};let pK=Ee(iu,n=>Math.atanh(n)),hK={kernelName:iu,backendName:"cpu",kernelFunc:pK};function c1(n,t,e,r,o,s){let c=o.strideHeight,l=o.strideWidth,p=o.dilationHeight,f=o.dilationWidth,m=o.effectiveFilterHeight,y=o.effectiveFilterWidth,b=o.padInfo.top,v=o.padInfo.left,T=s==="max"?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,N=Se(o.outShape,e),S=N.values,D=o.outShape[1]*o.outShape[2]*o.outShape[3],I=o.outShape[2]*o.outShape[3],P=o.outShape[3];for(let E=0;Ebt?bt=Lt:s==="avg"&&(yt+=Lt,xt++)}if(isNaN(bt))break}let kt=pt+ht*P+q;S[kt]=s==="avg"?yt/xt:bt}}}return N}function lI(n,t,e,r,o=!1,s=!1){let c=Se(r.outShape,"int32"),l=r.strideHeight,p=r.strideWidth,f=r.dilationHeight,m=r.dilationWidth,y=r.effectiveFilterHeight,b=r.effectiveFilterWidth,v=r.padInfo.top,T=r.padInfo.left,N=Se(t,e,n);for(let S=0;SJ&&(J=ut,o?it=s?((S*r.inHeight+pt)*r.inWidth+dt)*r.inChannels+D:(pt*r.inWidth+dt)*r.inChannels+D:it=ht*b+ft)}}c.set(it,S,I,B,D)}}return c}function fK(n){let{inputs:t,backend:e,attrs:r}=n,{x:o}=t;Ct(o,"avgPool");let{filterSize:s,strides:c,pad:l,dimRoundingMode:p}=r,f=1;_(fn(c,f),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${c} and dilations '${f}'`);let m=Kn(o.shape,s,c,f,l,p),y;if(m.filterWidth===1&&m.filterHeight===1&<(m.inShape,m.outShape))y=ba({inputs:{x:o},backend:e});else{let b=e.data.get(o.dataId).values,v=Yt(o.shape),T=c1(b,o.shape,o.dtype,v,m,"avg");y=e.makeTensorInfo(m.outShape,o.dtype,T.values)}return y}let dK={kernelName:au,backendName:"cpu",kernelFunc:fK};function mK(n){let{inputs:t,backend:e,attrs:r}=n,{dy:o,input:s}=t,c=s;Ct([o,s],"avgPoolBackprop");let{filterSize:l,strides:p,pad:f}=r,m=Kn(c.shape,l,p,1,f),y=m.strideHeight,b=m.strideWidth,v=m.filterHeight,T=m.filterWidth,N=m.dilationHeight,S=m.dilationWidth,D=m.effectiveFilterHeight,I=m.effectiveFilterWidth,P=I-1-m.padInfo.left,E=D-1-m.padInfo.top,L=Se(c.shape,"float32"),B=1/(v*T),q=e.data.get(o.dataId).values,H=Se(o.shape,"float32",q);for(let Z=0;Z=m.outHeight||Math.floor(bt)!==bt)continue;for(let yt=0;yt=m.outWidth||Math.floor(xt)!==xt)continue;let kt=H.get(Z,bt,xt,J);ft+=kt}}L.set(ft*B,Z,it,pt,J)}return e.makeTensorInfo(L.shape,L.dtype,L.values)}let gK={kernelName:af,backendName:"cpu",kernelFunc:mK};function yK(n){let{inputs:t,backend:e,attrs:r}=n,{x:o,scale:s,offset:c,mean:l,variance:p}=t;_(l.shape.length===p.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),_(c==null||l.shape.length===c.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),_(s==null||l.shape.length===s.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks."),Ct([o,l,p,s,c],"batchNorm");let{varianceEpsilon:f}=r;f==null&&(f=.001);let m=e.data.get(o.dataId).values,y=e.data.get(l.dataId).values,b=e.data.get(p.dataId).values,v=s?e.data.get(s.dataId).values:new Float32Array([1]),T=c?e.data.get(c.dataId).values:new Float32Array([0]),N=new Float32Array(m.length),S=T.length,D=v.length,I=b.length,P=y.length,E=0,L=0,B=0,q=0;for(let H=0;H=S&&(E=0),L>=P&&(L=0),B>=D&&(B=0),q>=I&&(q=0);return e.makeTensorInfo(o.shape,o.dtype,N)}let bK={kernelName:yu,backendName:"cpu",kernelFunc:yK};let xK=Ee(lu,(n,t)=>{let e=t;return n>e.clipValueMax?e.clipValueMax:nv.shape),s);if(G(c)===0)return e.makeTensorInfo(c,t[0].dtype,[]);let l=t.filter(v=>G(v.shape)>0);if(l.length===1)return l[0];let p=l.map(v=>v.shape);if(id(p,s),l[0].dtype==="complex64"){let v=l.map(I=>Zp({inputs:{input:I},backend:e})),T=l.map(I=>Em({inputs:{input:I},backend:e})),N=eh({inputs:v,backend:e,attrs:{axis:s}}),S=eh({inputs:T,backend:e,attrs:{axis:s}}),D=_o({inputs:{real:N,imag:S},backend:e});return v.forEach(I=>e.disposeIntermediateTensorInfo(I)),T.forEach(I=>e.disposeIntermediateTensorInfo(I)),e.disposeIntermediateTensorInfo(N),e.disposeIntermediateTensorInfo(S),D}let f=l.map(v=>{let T=G(v.shape.slice(s)),N=[-1,T];return Ho({inputs:{x:v},backend:e,attrs:{shape:N}})});c=hs(f.map(v=>v.shape),1);let m=Ce(l[0].dtype,G(c));if(f[0].shape[0]===1){let v=0;f.forEach(T=>{let N=e.data.get(T.dataId).values,S=G(T.shape);m.set(N,v),v+=S})}else{let v=0;f.forEach(T=>{let N=e.data.get(T.dataId).values,S=0;for(let D=0;Dv.shape),s),b=e.makeTensorInfo(y,t[0].dtype,m);return f.forEach(v=>e.disposeIntermediateTensorInfo(v)),b}let TK={kernelName:uu,backendName:"cpu",kernelFunc:eh};function uI(n){let{inputs:t,backend:e,attrs:r}=n,{x:o,filter:s}=t,{strides:c,pad:l,dataFormat:p,dilations:f,dimRoundingMode:m}=r;Ct([o,s],"conv2d");let y=si(p),b=Un(o.shape,s.shape,c,f,l,m,!1,y),v=b.filterHeight,T=b.filterWidth,N=b.dilationHeight,S=b.dilationWidth,D=b.padInfo.left,I=b.padInfo.top,P=b.dataFormat==="channelsLast",E=new hn(b.outShape,o.dtype),L=Yt(o.shape),B=Yt(s.shape),q=L[0],H=P?L[1]:L[2],Z=P?L[2]:1,J=P?1:L[1],it=E.strides[0],pt=P?E.strides[1]:E.strides[2],ht=P?E.strides[2]:1,dt=P?1:E.strides[1],ft=e.data.get(o.dataId).values,ut=e.data.get(s.dataId).values,bt=E.values;for(let yt=0;yt=b.inHeight)continue;let Ht=St*B[0],oe=xt+Lt*H;for(let de=0;de=b.inWidth)continue;let on=Ht+pe*B[1],sn=oe+Xe*Z,Ae=on;for(let Ye=0;Ye=f.inDepth)continue;let yt=ut*Z[0],xt=it+bt*H[1];for(let kt=0;kt=f.inHeight)continue;let Lt=yt+It*Z[1],Ht=xt+St*H[2];for(let oe=0;oe=f.inWidth)continue;let Xe=Lt+we*Z[2],on=Ht+pe*f.inChannels,sn=Xe;for(let Ae=0;AeMath.cos(n)),PK={kernelName:lc,backendName:"cpu",kernelFunc:RK};let OK=Ee(pu,n=>Math.cosh(n)),LK={kernelName:pu,backendName:"cpu",kernelFunc:OK};function pI(n){let{inputs:t,backend:e,attrs:r}=n,{x:o,filter:s}=t,{strides:c,pad:l,dilations:p,dimRoundingMode:f}=r;Ct([o,s],"depthwiseConv2DNative");let m=Yt(o.shape),y=Yt(s.shape),b=p;b==null&&(b=[1,1]),_(fn(c,b),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${c} and dilations '${b}'`);let v=Un(o.shape,s.shape,c,b,l,f,!0),{filterHeight:T,filterWidth:N,dilationHeight:S,dilationWidth:D,padInfo:I}=v,P=I.left,E=I.top,L=v.outChannels/v.inChannels,B=new hn(v.outShape,o.dtype),q=e.data.get(o.dataId).values,H=e.data.get(s.dataId).values,Z=B.values;for(let J=0;J=v.inHeight)continue;let yt=ut*y[0],xt=it+bt*m[1];for(let kt=0;kt=v.inWidth)continue;let Lt=yt+It*y[1],Ht=xt+St*v.inChannels,oe=Nt,de=Lt;for(let ie=0;ie{let{x:r,filter:o}=n,{strides:s,pad:c,dilations:l}=e,p=t,f=p.data.get(r.dataId).values,m=r.shape.length,y=p.data.get(o.dataId).values,b=o.shape.length,{batchSize:v,inHeight:T,inWidth:N,inChannels:S,outHeight:D,outWidth:I,padInfo:P,strideHeight:E,strideWidth:L,filterHeight:B,filterWidth:q,dilationHeight:H,dilationWidth:Z,outShape:J}=od(r.shape,o.shape,s,c,"NHWC",l),it=G(J),pt=J.length,ht=rr(r.dtype,it);for(let ft=0;ft=0&&St=0&&HtNt&&(Nt=ie)}}}let At=Kr([ft,ut,yt,kt],pt,Yt(J));ht[At]=Nt}}}let dt=p.write(Qs(ht,r.dtype),J,r.dtype);return{dataId:dt,shape:J,dtype:r.dtype}}};let UK={kernelName:gf,backendName:"cpu",kernelFunc:({inputs:n,backend:t,attrs:e})=>{let{x:r,filter:o,dy:s}=n,{strides:c,pad:l,dilations:p}=e,f=t,m=Er(r.shape,f.data.get(r.dataId).values),y=Er(o.shape,f.data.get(o.dataId).values),{batchSize:b,inHeight:v,inWidth:T,inChannels:N,outHeight:S,outWidth:D,padInfo:I,strideHeight:P,strideWidth:E,filterHeight:L,filterWidth:B,dilationHeight:q,dilationWidth:H,outShape:Z}=od(r.shape,o.shape,c,l,"NHWC",p);_(s.rank===Z.length,()=>`Error in ${gf}, dy must have the same rank as output ${Z.length}, but got ${s.rank}`);let J=Er(Z,f.data.get(s.dataId).values),it=Lb(o.shape,o.dtype);for(let ht=0;ht=0&&It=0&&Ltxt&&(xt=Ht,kt=At,Nt=St)}}}it[kt][Nt][yt]+=J[ht][dt][ut][yt]}}}let pt=f.write(Qs(it,r.dtype),o.shape,o.dtype);return{dataId:pt,shape:o.shape,dtype:o.dtype}}};let qK={kernelName:mf,backendName:"cpu",kernelFunc:({inputs:n,backend:t,attrs:e})=>{let{x:r,filter:o,dy:s}=n,{strides:c,pad:l,dilations:p}=e,f=t,m=Er(r.shape,f.data.get(r.dataId).values),y=Er(o.shape,f.data.get(o.dataId).values),{batchSize:b,inHeight:v,inWidth:T,inChannels:N,outHeight:S,outWidth:D,padInfo:I,strideHeight:P,strideWidth:E,filterHeight:L,filterWidth:B,dilationHeight:q,dilationWidth:H,outShape:Z}=od(r.shape,o.shape,c,l,"NHWC",p);_(s.rank===Z.length,()=>`Error in ${mf}, dy must have the same rank as output ${Z.length}, but got ${s.rank}`);let J=Er(Z,f.data.get(s.dataId).values),it=Lb(r.shape,r.dtype);for(let ht=0;ht=0&&It=0&&Ltxt&&(xt=Ht,kt=It,Nt=Lt)}}}it[ht][kt][Nt][yt]+=J[ht][dt][ut][yt]}}}let pt=f.write(Qs(it,r.dtype),r.shape,r.dtype);return{dataId:pt,shape:r.shape,dtype:r.dtype}}};let HK=Ti((n,t)=>n/t),jK=Qc(uc,HK),l1={kernelName:uc,backendName:"cpu",kernelFunc:jK};let KK=Yw,XK=Jw,YK=Zw,JK=Qw,ZK=tv,QK=ev,t5=Ee(fu,n=>{let t=Math.sign(n),e=Math.abs(n),r=1/(1+KK*e);return t*(1-((((QK*r+ZK)*r+JK)*r+YK)*r+XK)*r*Math.exp(-e*e))}),e5={kernelName:fu,backendName:"cpu",kernelFunc:t5};function hI(n,t,e){let r=n.shape,o=r[0],s=r[1],c=e.data.get(n.dataId),l=c.complexTensorInfos.real,p=c.complexTensorInfos.imag,f=[o,s],m=G(f),y=Ce("float32",m),b=Ce("float32",m);for(let S=0;S{let{image:r}=n,o=e,s=Ce(r.dtype,G(r.shape)),[c,l,p,f]=r.shape,m=o.data.get(r.dataId).values;for(let b=0;b=0&&LNumber.isFinite(n)?1:0,"bool"),b5={kernelName:xu,backendName:"cpu",kernelFunc:y5};let x5=Ee(wu,n=>Math.abs(n)===Infinity?1:0,"bool"),w5={kernelName:wu,backendName:"cpu",kernelFunc:x5};let v5=Ee(vu,n=>Number.isNaN(n)?1:0,"bool"),T5={kernelName:vu,backendName:"cpu",kernelFunc:v5};let k5=Ee(ku,n=>Math.log1p(n)),N5={kernelName:ku,backendName:"cpu",kernelFunc:k5};let _5=Ee(vf,n=>n?0:1,"bool"),C5={kernelName:vf,backendName:"cpu",kernelFunc:_5};let S5={kernelName:Nu,backendName:"cpu",kernelFunc:({inputs:n,attrs:t,backend:e})=>{let{x:r}=n,{reductionIndices:o,keepDims:s}=t,c=e,l=r.shape,p=l.length,f=Vt(o,l),m=f,y=ir(m,p),b=c.data.get(r.dataId).values;if(y!=null){let P=new Array(p);for(let E=0;E`Error in maxPool: Either strides or dilations must be 1. Got strides ${c} and dilations '${f}'`);let m=Kn(o.shape,s,c,f,l,p),y;if(m.filterWidth===1&&m.filterHeight===1&<(m.inShape,m.outShape))y=ba({inputs:{x:o},backend:e});else{let b=e.data.get(o.dataId).values,v=Yt(o.shape),T=c1(b,o.shape,o.dtype,v,m,"max");y=e.makeTensorInfo(m.outShape,o.dtype,T.values)}return y}let I5={kernelName:_u,backendName:"cpu",kernelFunc:$5};function E5(n){let{inputs:t,backend:e,attrs:r}=n,{dy:o,input:s,output:c}=t,l=s;Ct([s,c],"maxPoolBackprop");let{filterSize:p,strides:f,pad:m,dimRoundingMode:y}=r,b=Kn(l.shape,p,f,1,m,y),v=e.data.get(l.dataId).values,T=Se(b.outShape,l.dtype,lI(v,l.shape,l.dtype,b).values),N=b.strideHeight,S=b.strideWidth,D=b.dilationHeight,I=b.dilationWidth,P=b.effectiveFilterHeight,E=b.effectiveFilterWidth,L=E-1-b.padInfo.left,B=P-1-b.padInfo.top,q=Se(l.shape,"float32"),H=e.data.get(o.dataId).values,Z=Se(o.shape,"float32",H);for(let J=0;J=b.outHeight||Math.floor(yt)!==yt)continue;for(let xt=0;xt=b.outWidth||Math.floor(kt)!==kt)continue;let Nt=P*E-1-T.get(J,yt,kt,it),At=bt*E+xt,It=Nt===At?1:0;if(It===0)continue;let St=Z.get(J,yt,kt,it);ut+=St*It}}q.set(ut,J,pt,ht,it)}return e.makeTensorInfo(q.shape,q.dtype,q.values)}let D5={kernelName:Tf,backendName:"cpu",kernelFunc:E5};function A5(n,t,e,r,o){let s=Yt(t),c=c1(n,t,e,s,o,"max"),l=lI(n,t,e,o,!0,r);return[c.values,l.values]}let F5={kernelName:kf,backendName:"cpu",kernelFunc:({inputs:n,attrs:t,backend:e})=>{let{x:r}=n,{filterSize:o,strides:s,pad:c,includeBatchInIndex:l}=t,p=e;Ct(r,"MaxPoolWithArgmax");let f=p.data.get(r.dataId).values,m=Kn(r.shape,o,s,[1,1],c),[y,b]=A5(f,r.shape,r.dtype,l,m),v=p.write(y,m.outShape,r.dtype),T=p.write(b,m.outShape,r.dtype);return[{dataId:v,shape:m.outShape,dtype:r.dtype},{dataId:T,shape:m.outShape,dtype:"int32"}]}};function R5(n){let{inputs:t,backend:e,attrs:r}=n,{x:o}=t,{paddings:s,mode:c}=r;Ct(o,"mirrorPad");let l=s.map((P,E)=>P[0]+o.shape[E]+P[1]),p=s.map(P=>P[0]),f=s.map((P,E)=>P[0]+o.shape[E]),m=c==="reflect"?0:1,y=e.data.get(o.dataId).values,b=o.shape.length,v=Yt(o.shape),T=G(l),N=l.length,S=Yt(l),D=Ce(o.dtype,T);for(let P=0;P=f[B]&&(E[B]=(f[B]-1)*2-E[B]+m);E=E.map((B,q)=>B-p[q]);let L=Kr(E,b,v);D[P]=y[L]}let I=e.write(D,l,o.dtype);return{dataId:I,shape:l,dtype:o.dtype}}let P5={kernelName:Cu,backendName:"cpu",kernelFunc:R5};let O5=Ld,L5={kernelName:Nf,backendName:"cpu",kernelFunc:({inputs:n,backend:t,attrs:e})=>{let{boxes:r,scores:o}=n,{maxOutputSize:s,iouThreshold:c,scoreThreshold:l,padToMaxOutputSize:p}=e,f=t;Ct(r,"NonMaxSuppressionPadded");let m=f.data.get(r.dataId).values,y=f.data.get(o.dataId).values,{selectedIndices:b,validOutputs:v}=O5(m,y,s,c,l,p);return[b,v]}};let M5=Md,B5={kernelName:_f,backendName:"cpu",kernelFunc:({inputs:n,backend:t,attrs:e})=>{let{boxes:r,scores:o}=n,{maxOutputSize:s,iouThreshold:c,scoreThreshold:l,softNmsSigma:p}=e,f=t;Ct(r,"NonMaxSuppressionWithScore");let m=f.data.get(r.dataId).values,y=f.data.get(o.dataId).values,b=s,v=c,T=l,N=p,{selectedIndices:S,selectedScores:D}=M5(m,y,b,v,T,N);return[S,D]}};function z5(n){let{inputs:t,backend:e,attrs:r}=n,{x:o}=t,{paddings:s,constantValue:c}=r;Ct(o,"pad");let l=s.map((I,P)=>I[0]+o.shape[P]+I[1]),p=s.map(I=>I[0]),f=e.data.get(o.dataId).values,m=G(o.shape),y=o.shape.length,b=Yt(o.shape),v=G(l),T=l.length,N=Yt(l),S=Ce(o.dtype,v);c!==0&&S.fill(c);for(let I=0;IB+p[q]),L=Kr(E,T,N);S[L]=f[I]}let D=e.write(S,l,o.dtype);return{dataId:D,shape:l,dtype:o.dtype}}let fI={kernelName:Cf,backendName:"cpu",kernelFunc:z5};let W5=Ee($u,n=>1/n),V5={kernelName:$u,backendName:"cpu",kernelFunc:W5};let G5={kernelName:Rf,backendName:"cpu",kernelFunc:({inputs:n,attrs:t,backend:e})=>{let{image:r}=n,{radians:o,fillValue:s,center:c}=t,l=e,p=Ce(r.dtype,G(r.shape)),[f,m,y,b]=r.shape,[v,T]=jw(c,m,y),N=255,S=Math.sin(o),D=Math.cos(o),I=l.data.get(r.dataId).values;for(let E=0;E=0&&dt=0&&ft{let t=Math.floor(n);return n-t<.5?Math.floor(n):n-t>.5?Math.ceil(n):t%2===0?t:t+1}),q5={kernelName:Au,backendName:"cpu",kernelFunc:U5};let H5=zd,j5=Wd,K5=Ee(Ru,n=>n>=0?j5*n:H5*(Math.exp(n)-1)),X5={kernelName:Ru,backendName:"cpu",kernelFunc:K5};let Y5=Ee(Lu,n=>1/(1+Math.exp(-n))),J5={kernelName:Lu,backendName:"cpu",kernelFunc:Y5};let Z5=Ee(Ou,n=>n<0?-1:n>0?1:0),Q5={kernelName:Ou,backendName:"cpu",kernelFunc:Z5};let t8=Ee(hc,n=>Math.sin(n)),e8={kernelName:hc,backendName:"cpu",kernelFunc:t8};let n8=Ee(Pu,n=>Math.sinh(n)),r8={kernelName:Pu,backendName:"cpu",kernelFunc:n8};let o8=11920928955078125e-23,dI=Math.log(o8)+2,s8=Ee(Mu,n=>{let t=n>-dI,e=nMath.sqrt(n)),p8={kernelName:Bu,backendName:"cpu",kernelFunc:u8};let h8={kernelName:Df,backendName:"cpu",kernelFunc:({inputs:n,backend:t})=>{let{x:e}=n,r=t;Ct(e,"square");let o=r.data.get(e.dataId).values,s=new Float32Array(o.length);for(let l=0;l{let e=t;return isNaN(n)?NaN:n>0?1:e.alpha}),d8={kernelName:Vu,backendName:"cpu",kernelFunc:f8};let m8=Ee(mc,n=>Math.tan(n)),g8={kernelName:mc,backendName:"cpu",kernelFunc:m8};let y8=Ee(zu,n=>Math.tanh(n)),b8={kernelName:zu,backendName:"cpu",kernelFunc:y8};function x8(n){let{inputs:t,attrs:e,backend:r}=n,{axis:o}=e,{x:s}=t;Ct(s,"unique");let c=r.data.get(s.dataId).values,{outputValues:l,outputShape:p,indices:f}=rI(c,o,s.shape,s.dtype);return[r.makeTensorInfo(p,s.dtype,l),r.makeTensorInfo([f.length],"int32",f)]}let w8={kernelName:Af,backendName:"cpu",kernelFunc:x8};let v8=[tK,g6,nK,oK,T6,iK,cK,uK,hK,dK,gK,Z6,bK,w6,N6,wK,y6,TK,_K,SK,kK,DK,FK,IK,PK,LK,MK,zK,VK,GK,qK,UK,l1,H6,e5,C6,$6,i5,c5,u5,E6,h5,d5,b6,g5,vK,b5,w5,T5,A6,N5,C5,I5,D5,F5,S5,P5,R6,L5,B5,O6,fI,K6,x6,V5,X6,Y6,J6,G5,q5,M6,X5,J5,Q5,e8,r8,B6,i8,l8,p8,h8,W6,d8,G6,g8,b8,a8,w8];for(let n of v8)Bf(n);let ki={},p1={alpha:!1,antialias:!1,premultipliedAlpha:!1,preserveDrawingBuffer:!1,depth:!1,stencil:!1,failIfMajorPerformanceCaveat:!0};function Wst(n){delete ki[n]}function T8(n,t){ki[n]=t}function jo(n){if(!(n in ki)){let e=N8(n);if(e!==null)ki[n]=e;else return console.log("Could not get context for WebGL version",n),null}let t=ki[n];return t.isContextLost()?(delete ki[n],jo(n)):(t.disable(t.DEPTH_TEST),t.disable(t.STENCIL_TEST),t.disable(t.BLEND),t.disable(t.DITHER),t.disable(t.POLYGON_OFFSET_FILL),t.disable(t.SAMPLE_COVERAGE),t.enable(t.SCISSOR_TEST),t.enable(t.CULL_FACE),t.cullFace(t.BACK),ki[n])}function k8(n){if(typeof OffscreenCanvas!="undefined"&&n===2)return new OffscreenCanvas(300,150);if(typeof document!="undefined")return document.createElement("canvas");throw new Error("Cannot create a canvas in this context")}function N8(n){if(n!==1&&n!==2)throw new Error("Cannot get WebGL rendering context, WebGL is disabled.");let t=k8(n);return t.addEventListener("webglcontextlost",e=>{e.preventDefault(),delete ki[n]},!1),n===1?t.getContext("webgl",p1)||t.getContext("experimental-webgl",p1):t.getContext("webgl2",p1)}var nh;(function(n){n[n.DENSE=0]="DENSE",n[n.SHARED_BATCH=1]="SHARED_BATCH"})(nh||(nh={}));var Mr;(function(n){n[n.RENDER=0]="RENDER",n[n.UPLOAD=1]="UPLOAD",n[n.PIXELS=2]="PIXELS",n[n.DOWNLOAD=3]="DOWNLOAD"})(Mr||(Mr={}));var On;(function(n){n[n.UNPACKED_FLOAT16=0]="UNPACKED_FLOAT16",n[n.UNPACKED_FLOAT32=1]="UNPACKED_FLOAT32",n[n.PACKED_4X1_UNSIGNED_BYTE=2]="PACKED_4X1_UNSIGNED_BYTE",n[n.PACKED_2X2_FLOAT32=3]="PACKED_2X2_FLOAT32",n[n.PACKED_2X2_FLOAT16=4]="PACKED_2X2_FLOAT16"})(On||(On={}));function rh(n,t){return[t,n]}function _8(n,t){return n*t}function Vst(n,t){return[t*4,n]}function oh(n){let t=G(n),e=Math.ceil(t/4);return Gt(e)}function Gst(n,t){if(n%t!==0)throw new Error(`unpackedSize (${n}) must be a multiple of ${t}`);return n/t}function Ust(n,t,e){let r=n.length*e/4;if(t.length= ${r}`);let o=0;for(let s=0;sn.getExtension(t),'Extension "'+t+'" not supported on this browser.')}function A8(n,t){let e=ks(n,()=>n.createShader(n.VERTEX_SHADER),"Unable to create vertex WebGLShader.");if(Rt(n,()=>n.shaderSource(e,t)),Rt(n,()=>n.compileShader(e)),n.getShaderParameter(e,n.COMPILE_STATUS)===!1)throw console.log(n.getShaderInfoLog(e)),new Error("Failed to compile vertex shader.");return e}function F8(n,t){let e=ks(n,()=>n.createShader(n.FRAGMENT_SHADER),"Unable to create fragment WebGLShader.");if(Rt(n,()=>n.shaderSource(e,t)),Rt(n,()=>n.compileShader(e)),n.getShaderParameter(e,n.COMPILE_STATUS)===!1)throw P8(t,n.getShaderInfoLog(e)),new Error("Failed to compile fragment shader.");return e}let R8=/ERROR: [0-9]+:([0-9]+):/g;function P8(n,t){let e=R8.exec(t);if(e==null){console.log(`Couldn't parse line number in error: ${t}`),console.log(n);return}let r=+e[1],o=n.split(` +`),s=o.length.toString().length+2,c=o.map((y,b)=>fe((b+1).toString(),s)+y),l=0;for(let y=0;ye.createProgram(),"Unable to create WebGLProgram.")}function TK(e,t){if(Ee(e,()=>e.linkProgram(t)),e.getProgramParameter(t,e.LINK_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Failed to link vertex and fragment shaders.")}function yS(e,t){if(Ee(e,()=>e.validateProgram(t)),e.getProgramParameter(t,e.VALIDATE_STATUS)===!1)throw console.log(e.getProgramInfoLog(t)),new Error("Shader program validation failed.")}function AK(e,t){const n=cr(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Ee(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n)),Ee(e,()=>e.bufferData(e.ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function vK(e,t){const n=cr(e,()=>e.createBuffer(),"Unable to create WebGLBuffer");return Ee(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,n)),Ee(e,()=>e.bufferData(e.ELEMENT_ARRAY_BUFFER,t,e.STATIC_DRAW)),n}function Zte(){return oe().getNumber("WEBGL_VERSION")===2?1:4}function NK(e){return cr(e,()=>e.createTexture(),"Unable to create WebGLTexture.")}function CK(e,t){const n=oe().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(e<=0||t<=0){const s=`[${e}x${t}]`;throw new Error("Requested texture size "+s+" is invalid.")}if(e>n||t>n){const s=`[${e}x${t}]`,i=`[${n}x${n}]`;throw new Error("Requested texture size "+s+" greater than WebGL maximum on this browser / GPU "+i+".")}}function RK(e){return cr(e,()=>e.createFramebuffer(),"Unable to create WebGLFramebuffer.")}function q0(e,t,n,s,i,o,a){const c=e.getAttribLocation(t,n);return c===-1?!1:(Ee(e,()=>e.bindBuffer(e.ARRAY_BUFFER,s)),Ee(e,()=>e.vertexAttribPointer(c,i,e.FLOAT,!1,o,a)),Ee(e,()=>e.enableVertexAttribArray(c)),!0)}function OK(e,t,n){K0(e,n),Ee(e,()=>e.activeTexture(e.TEXTURE0+n)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,t))}function Qte(e,t){K0(e,t),Ee(e,()=>e.activeTexture(e.TEXTURE0+t)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function EK(e,t,n){return cr(e,()=>e.getUniformLocation(t,n),'uniform "'+n+'" not present in program.')}function DK(e,t,n){return e.getUniformLocation(t,n)}function kK(e,t,n,s){Ee(e,()=>OK(e,t,s)),Ee(e,()=>e.uniform1i(n,s))}function ene(e){Ee(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Ee(e,()=>e.viewport(0,0,e.canvas.width,e.canvas.height)),Ee(e,()=>e.scissor(0,0,e.canvas.width,e.canvas.height))}function bS(e,t,n){Ee(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,n)),Ee(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,t,0))}function j0(e,t){Ee(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,t)),Ee(e,()=>e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,null,0))}function Nm(e){const t=e.checkFramebufferStatus(e.FRAMEBUFFER);if(t!==e.FRAMEBUFFER_COMPLETE)throw new Error("Error binding framebuffer: "+FK(e,t))}function FK(e,t){switch(t){case e.FRAMEBUFFER_INCOMPLETE_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT";case e.FRAMEBUFFER_INCOMPLETE_DIMENSIONS:return"FRAMEBUFFER_INCOMPLETE_DIMENSIONS";case e.FRAMEBUFFER_UNSUPPORTED:return"FRAMEBUFFER_UNSUPPORTED";default:return`unknown error ${t}`}}function cr(e,t,n){const s=Ee(e,()=>t());if(s==null)throw new Error(n);return s}function K0(e,t){const n=e.MAX_COMBINED_TEXTURE_IMAGE_UNITS-1,s=t+e.TEXTURE0;if(sn){const i=`[gl.TEXTURE0, gl.TEXTURE${n}]`;throw new Error(`textureUnit must be in ${i}.`)}}function mc(e,t=2){return P(e.slice(0,e.length-t))}function fc(e){if(e.length===0)throw Error("Cannot get rows and columns of an empty shape array.");return[e.length>1?e[e.length-2]:1,e[e.length-1]]}function wS(e){let t=[1,1,1];const n=e.length===0||e.length===1&&e[0]===1;return n||(t=[mc(e),...fc(e)]),t}function _K(e,t=!1){let n=oe().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(t&&(n=n*2,e=e.map((i,o)=>o>=e.length-2?T(e[o]):e[o]),e.length===1&&(e=[2,e[0]])),e.length!==2){const i=ln(e);e=i.newShape}let s=P(e);if(e.length<=1&&s<=n)return[1,s];if(e.length===2&&e[0]<=n&&e[1]<=n)return e;if(e.length===3&&e[0]*e[1]<=n&&e[2]<=n)return[e[0]*e[1],e[2]];if(e.length===3&&e[0]<=n&&e[1]*e[2]<=n)return[e[0],e[1]*e[2]];if(e.length===4&&e[0]*e[1]*e[2]<=n&&e[3]<=n)return[e[0]*e[1]*e[2],e[3]];if(e.length===4&&e[0]<=n&&e[1]*e[2]*e[3]<=n)return[e[0],e[1]*e[2]*e[3]];if(t){const i=mc(e);let o=2,a=2;return e.length&&([o,a]=fc(e)),s=i*(o/2)*(a/2),Ve(s).map(c=>c*2)}return Ve(s)}function Cm(e){return e%2===0}function Rm(e,t){if(e=e.slice(-2),t=t.slice(-2),ae(e,t))return!0;if(!e.length||!t.length)return!0;if(e[0]===0||e[1]===0||t[0]===0||t[1]===0)return!0;if(e.length!==t.length){const n=e.slice(-1)[0],s=t.slice(-1)[0];if(n===s)return!0;if(Cm(n)&&Cm(s)&&(e[0]===1||t[0]===1))return!0}return e[1]===t[1]&&Cm(e[0])&&Cm(t[0])}let Om,Em;function WK(e){if(Om==null){const t=ki(e);Om=t.getParameter(t.MAX_TEXTURE_SIZE)}return Om}function tne(){Om=null}function nne(){Em=null}function $K(e){if(Em==null){const t=ki(e);Em=t.getParameter(t.MAX_TEXTURE_IMAGE_UNITS)}return Math.min(16,Em)}function UK(e){if(e===0)return 0;let t;const n=ki(e);return Vs(n,"EXT_disjoint_timer_query_webgl2")&&e===2?t=2:Vs(n,"EXT_disjoint_timer_query")?t=1:t=0,t}function Vs(e,t){const n=e.getExtension(t);return n!=null}function X0(e){try{const t=ki(e);if(t!=null)return!0}catch(t){return console.log("Error when getting WebGL context: ",t),!1}return!1}function BK(e){if(e===0)return!1;const t=ki(e);if(e===1){if(!Vs(t,"OES_texture_float"))return!1}else if(!Vs(t,"EXT_color_buffer_float"))return!1;const n=LS(t);return n}function MK(e){if(e===0)return!1;const t=ki(e);if(e===1){if(!Vs(t,"OES_texture_float"))return!1;if(!Vs(t,"WEBGL_color_buffer_float"))return!1}else{if(Vs(t,"EXT_color_buffer_float"))return LS(t);const s="EXT_color_buffer_half_float";if(Vs(t,s)){const i=t.getExtension(s);return PK(t,i)}return!1}const n=LS(t);return n}function LS(e){const t=gS(e),n=e.createTexture();e.bindTexture(e.TEXTURE_2D,n);const s=1,i=1;e.texImage2D(e.TEXTURE_2D,0,t.internalFormatFloat,s,i,0,t.textureFormatFloat,t.textureTypeFloat,null);const o=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,o),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,n,0);const a=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(n),e.deleteFramebuffer(o),a}function PK(e,t){const n=gS(e,t),s=e.createTexture();e.bindTexture(e.TEXTURE_2D,s);const i=1,o=1;e.texImage2D(e.TEXTURE_2D,0,n.internalFormatHalfFloat,i,o,0,n.textureFormatFloat,n.textureTypeHalfFloat,null);const a=e.createFramebuffer();e.bindFramebuffer(e.FRAMEBUFFER,a),e.framebufferTexture2D(e.FRAMEBUFFER,e.COLOR_ATTACHMENT0,e.TEXTURE_2D,s,0);const c=e.checkFramebufferStatus(e.FRAMEBUFFER)===e.FRAMEBUFFER_COMPLETE;return e.bindTexture(e.TEXTURE_2D,null),e.bindFramebuffer(e.FRAMEBUFFER,null),e.deleteTexture(s),e.deleteFramebuffer(a),c}function zK(e){if(e!==2)return!1;const t=ki(e),n=t.fenceSync!=null;return n}function du(e,t){Array.isArray(e)||(e=[e]),e.forEach(n=>{n!=null&&A(n.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the WebGL backend.`)})}const Ge=oe();Ge.registerFlag("HAS_WEBGL",()=>Ge.getNumber("WEBGL_VERSION")>0),Ge.registerFlag("WEBGL_VERSION",()=>X0(2)?2:X0(1)?1:0),Ge.registerFlag("WEBGL_CHECK_NUMERICAL_PROBLEMS",()=>!1),Ge.registerFlag("WEBGL_BUFFER_SUPPORTED",()=>Ge.get("WEBGL_VERSION")===2),Ge.registerFlag("WEBGL_CPU_FORWARD",()=>!0),Ge.registerFlag("WEBGL_FORCE_F16_TEXTURES",()=>!1),Ge.registerFlag("WEBGL_PACK",()=>Ge.getBool("HAS_WEBGL")),Ge.registerFlag("WEBGL_PACK_NORMALIZATION",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_CLIP",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_DEPTHWISECONV",()=>!1),Ge.registerFlag("WEBGL_PACK_BINARY_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_UNARY_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_ARRAY_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_IMAGE_OPERATIONS",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_PACK_REDUCE",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_LAZILY_UNPACK",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_CONV_IM2COL",()=>Ge.getBool("WEBGL_PACK")),Ge.registerFlag("WEBGL_MAX_TEXTURE_SIZE",()=>WK(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_MAX_TEXTURES_IN_SHADER",()=>$K(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION",()=>{const e=Ge.getNumber("WEBGL_VERSION");return e===0?0:UK(e)}),Ge.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE",()=>Ge.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0&&!hT()),Ge.registerFlag("WEBGL_RENDER_FLOAT32_CAPABLE",()=>BK(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_RENDER_FLOAT32_ENABLED",()=>Ge.getBool("WEBGL_FORCE_F16_TEXTURES")?!1:Ge.getBool("WEBGL_RENDER_FLOAT32_CAPABLE")),Ge.registerFlag("WEBGL_DOWNLOAD_FLOAT_ENABLED",()=>MK(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_FENCE_API_ENABLED",()=>zK(Ge.getNumber("WEBGL_VERSION"))),Ge.registerFlag("WEBGL_SIZE_UPLOAD_UNIFORM",()=>{const e=Ge.getBool("WEBGL_RENDER_FLOAT32_ENABLED");return e?4:0}),Ge.registerFlag("WEBGL_DELETE_TEXTURE_THRESHOLD",()=>-1,e=>{if(e<0&&e!==-1)throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never delete) or at least 0, but got ${e}.`)});const{simpleAbsImpl:VK,addImpl:GK,ceilImpl:YK,expImpl:HK,expm1Impl:qK,floorImpl:jK,logImpl:KK,maxImpl:XK,multiplyImpl:JK,rsqrtImpl:ZK,sliceImpl:QK,subImpl:e5,transposeImpl:SS,uniqueImpl:t5}=Eq;class n5{constructor(e,t){this.outputShape=[],this.outputShape=e,this.variableNames=t.map((i,o)=>`T${o}`);const n=[];this.variableNames.forEach(i=>{n.push(`float v${i} = get${i}AtOutCoords();`)});const s=this.variableNames.map(i=>`v${i}`).join(" + ");this.userCode=` +`)[0]),console.log(`%c ${fe(f[0],l)}`,"border:1px solid red; background-color:#e3d2d2; color:#a61717"),console.log(m.join(` +`))}function O8(n){return ks(n,()=>n.createProgram(),"Unable to create WebGLProgram.")}function L8(n,t){if(Rt(n,()=>n.linkProgram(t)),n.getProgramParameter(t,n.LINK_STATUS)===!1)throw console.log(n.getProgramInfoLog(t)),new Error("Failed to link vertex and fragment shaders.")}function f1(n,t){if(Rt(n,()=>n.validateProgram(t)),n.getProgramParameter(t,n.VALIDATE_STATUS)===!1)throw console.log(n.getProgramInfoLog(t)),new Error("Shader program validation failed.")}function M8(n,t){let e=ks(n,()=>n.createBuffer(),"Unable to create WebGLBuffer");return Rt(n,()=>n.bindBuffer(n.ARRAY_BUFFER,e)),Rt(n,()=>n.bufferData(n.ARRAY_BUFFER,t,n.STATIC_DRAW)),e}function B8(n,t){let e=ks(n,()=>n.createBuffer(),"Unable to create WebGLBuffer");return Rt(n,()=>n.bindBuffer(n.ELEMENT_ARRAY_BUFFER,e)),Rt(n,()=>n.bufferData(n.ELEMENT_ARRAY_BUFFER,t,n.STATIC_DRAW)),e}function qst(){return ct().getNumber("WEBGL_VERSION")===2?1:4}function z8(n){return ks(n,()=>n.createTexture(),"Unable to create WebGLTexture.")}function W8(n,t){let e=ct().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(n<=0||t<=0){let r=`[${n}x${t}]`;throw new Error("Requested texture size "+r+" is invalid.")}if(n>e||t>e){let r=`[${n}x${t}]`,o=`[${e}x${e}]`;throw new Error("Requested texture size "+r+" greater than WebGL maximum on this browser / GPU "+o+".")}}function V8(n){return ks(n,()=>n.createFramebuffer(),"Unable to create WebGLFramebuffer.")}function gI(n,t,e,r,o,s,c){let l=n.getAttribLocation(t,e);return l===-1?!1:(Rt(n,()=>n.bindBuffer(n.ARRAY_BUFFER,r)),Rt(n,()=>n.vertexAttribPointer(l,o,n.FLOAT,!1,s,c)),Rt(n,()=>n.enableVertexAttribArray(l)),!0)}function G8(n,t,e){bI(n,e),Rt(n,()=>n.activeTexture(n.TEXTURE0+e)),Rt(n,()=>n.bindTexture(n.TEXTURE_2D,t))}function Hst(n,t){bI(n,t),Rt(n,()=>n.activeTexture(n.TEXTURE0+t)),Rt(n,()=>n.bindTexture(n.TEXTURE_2D,null))}function U8(n,t,e){return ks(n,()=>n.getUniformLocation(t,e),'uniform "'+e+'" not present in program.')}function q8(n,t,e){return n.getUniformLocation(t,e)}function H8(n,t,e,r){Rt(n,()=>G8(n,t,r)),Rt(n,()=>n.uniform1i(e,r))}function jst(n){Rt(n,()=>n.bindFramebuffer(n.FRAMEBUFFER,null)),Rt(n,()=>n.viewport(0,0,n.canvas.width,n.canvas.height)),Rt(n,()=>n.scissor(0,0,n.canvas.width,n.canvas.height))}function d1(n,t,e){Rt(n,()=>n.bindFramebuffer(n.FRAMEBUFFER,e)),Rt(n,()=>n.framebufferTexture2D(n.FRAMEBUFFER,n.COLOR_ATTACHMENT0,n.TEXTURE_2D,t,0))}function yI(n,t){Rt(n,()=>n.bindFramebuffer(n.FRAMEBUFFER,t)),Rt(n,()=>n.framebufferTexture2D(n.FRAMEBUFFER,n.COLOR_ATTACHMENT0,n.TEXTURE_2D,null,0))}function Am(n){let t=n.checkFramebufferStatus(n.FRAMEBUFFER);if(t!==n.FRAMEBUFFER_COMPLETE)throw new Error("Error binding framebuffer: "+j8(n,t))}function j8(n,t){switch(t){case n.FRAMEBUFFER_INCOMPLETE_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_ATTACHMENT";case n.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:return"FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT";case n.FRAMEBUFFER_INCOMPLETE_DIMENSIONS:return"FRAMEBUFFER_INCOMPLETE_DIMENSIONS";case n.FRAMEBUFFER_UNSUPPORTED:return"FRAMEBUFFER_UNSUPPORTED";default:return`unknown error ${t}`}}function ks(n,t,e){let r=Rt(n,()=>t());if(r==null)throw new Error(e);return r}function bI(n,t){let e=n.MAX_COMBINED_TEXTURE_IMAGE_UNITS-1,r=t+n.TEXTURE0;if(re){let o=`[gl.TEXTURE0, gl.TEXTURE${e}]`;throw new Error(`textureUnit must be in ${o}.`)}}function rl(n,t=2){return G(n.slice(0,n.length-t))}function ol(n){if(n.length===0)throw Error("Cannot get rows and columns of an empty shape array.");return[n.length>1?n[n.length-2]:1,n[n.length-1]]}function m1(n){let t=[1,1,1],e=n.length===0||n.length===1&&n[0]===1;return e||(t=[rl(n),...ol(n)]),t}function K8(n,t=!1){let e=ct().getNumber("WEBGL_MAX_TEXTURE_SIZE");if(t&&(e=e*2,n=n.map((o,s)=>s>=n.length-2?k(n[s]):n[s]),n.length===1&&(n=[2,n[0]])),n.length!==2){let o=ln(n);n=o.newShape}let r=G(n);if(n.length<=1&&r<=e)return[1,r];if(n.length===2&&n[0]<=e&&n[1]<=e)return n;if(n.length===3&&n[0]*n[1]<=e&&n[2]<=e)return[n[0]*n[1],n[2]];if(n.length===3&&n[0]<=e&&n[1]*n[2]<=e)return[n[0],n[1]*n[2]];if(n.length===4&&n[0]*n[1]*n[2]<=e&&n[3]<=e)return[n[0]*n[1]*n[2],n[3]];if(n.length===4&&n[0]<=e&&n[1]*n[2]*n[3]<=e)return[n[0],n[1]*n[2]*n[3]];if(t){let o=rl(n),s=2,c=2;return n.length&&([s,c]=ol(n)),r=o*(s/2)*(c/2),Gt(r).map(l=>l*2)}return Gt(r)}function Fm(n){return n%2===0}function Rm(n,t){if(n=n.slice(-2),t=t.slice(-2),lt(n,t))return!0;if(!n.length||!t.length)return!0;if(n[0]===0||n[1]===0||t[0]===0||t[1]===0)return!0;if(n.length!==t.length){let e=n.slice(-1)[0],r=t.slice(-1)[0];if(e===r)return!0;if(Fm(e)&&Fm(r)&&(n[0]===1||t[0]===1))return!0}return n[1]===t[1]&&Fm(n[0])&&Fm(t[0])}let Pm,Om;function X8(n){if(Pm==null){let t=jo(n);Pm=t.getParameter(t.MAX_TEXTURE_SIZE)}return Pm}function Kst(){Pm=null}function Xst(){Om=null}function Y8(n){if(Om==null){let t=jo(n);Om=t.getParameter(t.MAX_TEXTURE_IMAGE_UNITS)}return Math.min(16,Om)}function J8(n){if(n===0)return 0;let t,e=jo(n);return eo(e,"EXT_disjoint_timer_query_webgl2")&&n===2?t=2:eo(e,"EXT_disjoint_timer_query")?t=1:t=0,t}function eo(n,t){let e=n.getExtension(t);return e!=null}function xI(n){try{let t=jo(n);if(t!=null)return!0}catch(t){return console.log("Error when getting WebGL context: ",t),!1}return!1}function Z8(n){if(n===0)return!1;let t=jo(n);if(n===1){if(!eo(t,"OES_texture_float"))return!1}else if(!eo(t,"EXT_color_buffer_float"))return!1;let e=g1(t);return e}function Q8(n){if(n===0)return!1;let t=jo(n);if(n===1){if(!eo(t,"OES_texture_float"))return!1;if(!eo(t,"WEBGL_color_buffer_float"))return!1}else{if(eo(t,"EXT_color_buffer_float"))return g1(t);let r="EXT_color_buffer_half_float";if(eo(t,r)){let o=t.getExtension(r);return tX(t,o)}return!1}let e=g1(t);return e}function g1(n){let t=h1(n),e=n.createTexture();n.bindTexture(n.TEXTURE_2D,e);let r=1,o=1;n.texImage2D(n.TEXTURE_2D,0,t.internalFormatFloat,r,o,0,t.textureFormatFloat,t.textureTypeFloat,null);let s=n.createFramebuffer();n.bindFramebuffer(n.FRAMEBUFFER,s),n.framebufferTexture2D(n.FRAMEBUFFER,n.COLOR_ATTACHMENT0,n.TEXTURE_2D,e,0);let c=n.checkFramebufferStatus(n.FRAMEBUFFER)===n.FRAMEBUFFER_COMPLETE;return n.bindTexture(n.TEXTURE_2D,null),n.bindFramebuffer(n.FRAMEBUFFER,null),n.deleteTexture(e),n.deleteFramebuffer(s),c}function tX(n,t){let e=h1(n,t),r=n.createTexture();n.bindTexture(n.TEXTURE_2D,r);let o=1,s=1;n.texImage2D(n.TEXTURE_2D,0,e.internalFormatHalfFloat,o,s,0,e.textureFormatFloat,e.textureTypeHalfFloat,null);let c=n.createFramebuffer();n.bindFramebuffer(n.FRAMEBUFFER,c),n.framebufferTexture2D(n.FRAMEBUFFER,n.COLOR_ATTACHMENT0,n.TEXTURE_2D,r,0);let l=n.checkFramebufferStatus(n.FRAMEBUFFER)===n.FRAMEBUFFER_COMPLETE;return n.bindTexture(n.TEXTURE_2D,null),n.bindFramebuffer(n.FRAMEBUFFER,null),n.deleteTexture(r),n.deleteFramebuffer(c),l}function eX(n){if(n!==2)return!1;let t=jo(n),e=t.fenceSync!=null;return e}function sh(n,t){Array.isArray(n)||(n=[n]),n.forEach(e=>{e!=null&&_(e.dtype!=="complex64",()=>`${t} does not support complex64 tensors in the WebGL backend.`)})}let qt=ct();qt.registerFlag("HAS_WEBGL",()=>qt.getNumber("WEBGL_VERSION")>0),qt.registerFlag("WEBGL_VERSION",()=>xI(2)?2:xI(1)?1:0),qt.registerFlag("WEBGL_CHECK_NUMERICAL_PROBLEMS",()=>!1),qt.registerFlag("WEBGL_BUFFER_SUPPORTED",()=>qt.get("WEBGL_VERSION")===2),qt.registerFlag("WEBGL_CPU_FORWARD",()=>!0),qt.registerFlag("WEBGL_FORCE_F16_TEXTURES",()=>!1),qt.registerFlag("WEBGL_PACK",()=>qt.getBool("HAS_WEBGL")),qt.registerFlag("WEBGL_PACK_NORMALIZATION",()=>qt.getBool("WEBGL_PACK")),qt.registerFlag("WEBGL_PACK_CLIP",()=>qt.getBool("WEBGL_PACK")),qt.registerFlag("WEBGL_PACK_DEPTHWISECONV",()=>!1),qt.registerFlag("WEBGL_PACK_BINARY_OPERATIONS",()=>qt.getBool("WEBGL_PACK")),qt.registerFlag("WEBGL_PACK_UNARY_OPERATIONS",()=>qt.getBool("WEBGL_PACK")),qt.registerFlag("WEBGL_PACK_ARRAY_OPERATIONS",()=>qt.getBool("WEBGL_PACK")),qt.registerFlag("WEBGL_PACK_IMAGE_OPERATIONS",()=>qt.getBool("WEBGL_PACK")),qt.registerFlag("WEBGL_PACK_REDUCE",()=>qt.getBool("WEBGL_PACK")),qt.registerFlag("WEBGL_LAZILY_UNPACK",()=>qt.getBool("WEBGL_PACK")),qt.registerFlag("WEBGL_CONV_IM2COL",()=>qt.getBool("WEBGL_PACK")),qt.registerFlag("WEBGL_MAX_TEXTURE_SIZE",()=>X8(qt.getNumber("WEBGL_VERSION"))),qt.registerFlag("WEBGL_MAX_TEXTURES_IN_SHADER",()=>Y8(qt.getNumber("WEBGL_VERSION"))),qt.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION",()=>{let n=qt.getNumber("WEBGL_VERSION");return n===0?0:J8(n)}),qt.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE",()=>qt.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0&&!PN()),qt.registerFlag("WEBGL_RENDER_FLOAT32_CAPABLE",()=>Z8(qt.getNumber("WEBGL_VERSION"))),qt.registerFlag("WEBGL_RENDER_FLOAT32_ENABLED",()=>qt.getBool("WEBGL_FORCE_F16_TEXTURES")?!1:qt.getBool("WEBGL_RENDER_FLOAT32_CAPABLE")),qt.registerFlag("WEBGL_DOWNLOAD_FLOAT_ENABLED",()=>Q8(qt.getNumber("WEBGL_VERSION"))),qt.registerFlag("WEBGL_FENCE_API_ENABLED",()=>eX(qt.getNumber("WEBGL_VERSION"))),qt.registerFlag("WEBGL_SIZE_UPLOAD_UNIFORM",()=>{let n=qt.getBool("WEBGL_RENDER_FLOAT32_ENABLED");return n?4:0}),qt.registerFlag("WEBGL_DELETE_TEXTURE_THRESHOLD",()=>-1,n=>{if(n<0&&n!==-1)throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never delete) or at least 0, but got ${n}.`)});let{simpleAbsImpl:nX,addImpl:rX,ceilImpl:oX,expImpl:sX,expm1Impl:iX,floorImpl:aX,logImpl:cX,maxImpl:lX,multiplyImpl:uX,rsqrtImpl:pX,sliceImpl:hX,subImpl:fX,transposeImpl:y1,uniqueImpl:dX}=U6;class mX{constructor(t,e){this.outputShape=[],this.outputShape=t,this.variableNames=e.map((s,c)=>`T${c}`);let r=[];this.variableNames.forEach(s=>{r.push(`float v${s} = get${s}AtOutCoords();`)});let o=this.variableNames.map(s=>`v${s}`).join(" + ");this.userCode=` void main() { - ${n.join(` + ${r.join(` `)} - float result = ${s}; + float result = ${o}; setOutput(result); } - `}}class s5{constructor(e,t){this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.variableNames=t.map((i,o)=>`T${o}`);const n=[];this.variableNames.forEach(i=>{n.push(`vec4 v${i} = get${i}AtOutCoords();`)});const s=this.variableNames.map(i=>`v${i}`).join(" + ");this.userCode=` + `}}class gX{constructor(t,e){this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t,this.variableNames=e.map((s,c)=>`T${c}`);let r=[];this.variableNames.forEach(s=>{r.push(`vec4 v${s} = get${s}AtOutCoords();`)});let o=this.variableNames.map(s=>`v${s}`).join(" + ");this.userCode=` void main() { - ${n.join(` + ${r.join(` `)} - vec4 result = ${s}; + vec4 result = ${o}; setOutput(result); } - `}}class i5{constructor(e,t,n){this.variableNames=["A"];const{windowSize:s,batchSize:i,outSize:o}=e;n||this.variableNames.push("bestIndicesA"),this.outputShape=[i,o];const a=t==="max"?">":"<",c=n?"inOffset + i;":"round(getBestIndicesA(batch, inOffset + i));";this.userCode=` + `}}class yX{constructor(t,e,r){this.variableNames=["A"];let{windowSize:o,batchSize:s,outSize:c}=t;r||this.variableNames.push("bestIndicesA"),this.outputShape=[s,c];let l=e==="max"?">":"<",p=r?"inOffset + i;":"round(getBestIndicesA(batch, inOffset + i));";this.userCode=` void main() { ivec2 coords = getOutputCoords(); int batch = coords[0]; int outIdx = coords[1]; - int inOffset = outIdx * ${s}; + int inOffset = outIdx * ${o}; int bestIndex = inOffset; float bestValue = getA(batch, bestIndex); - for (int i = 0; i < ${s}; i++) { - int inIdx = ${c}; + for (int i = 0; i < ${o}; i++) { + int inIdx = ${p}; float candidate = getA(batch, inIdx); - if (candidate ${a} bestValue) { + if (candidate ${l} bestValue) { bestValue = candidate; bestIndex = inIdx; } } setOutput(float(bestIndex)); } - `}}function J0(e,t){return["x","y","z","w","u","v"].slice(0,t).map(n=>`${e}.${n}`)}function Mn(e,t){return t===1?[e]:J0(e,t)}function r5(e,t){if(e===1)return"rc";let n="";for(let s=0;s`${n}.${e}`)}function Yn(n,t){return t===1?[n]:wI(n,t)}function bX(n,t){if(n===1)return"rc";let e="";for(let r=0;r 0.0 || val < 0.0) ? false : val != 0.0; } @@ -80,7 +80,7 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi } #define isnan(value) isnan_custom(value) - `,h="",d=` + `,p="",f=` #define round(value) newRound(value) int newRound(float value) { return int(floor(value + 0.5)); @@ -89,7 +89,7 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi ivec4 newRound(vec4 value) { return ivec4(floor(value + vec4(0.5))); } - `):(e="",t="attribute",n="varying",s="varying",i="texture2D",o="gl_FragColor",a="",c=` + `):(n="",t="attribute",e="varying",r="varying",o="texture2D",s="gl_FragColor",c="",l=` #define isnan(value) isnan_custom(value) bool isnan_custom(float val) { return (val > 0. || val < 1. || val == 0.) ? false : true; @@ -97,7 +97,7 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi bvec4 isnan_custom(vec4 val) { return bvec4(isnan(val.x), isnan(val.y), isnan(val.z), isnan(val.w)); } - `,h=` + `,p=` uniform float INFINITY; bool isinf(float val) { @@ -106,7 +106,7 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi bvec4 isinf(vec4 val) { return equal(abs(val), vec4(INFINITY)); } - `,d=` + `,f=` int round(float value) { return int(floor(value + 0.5)); } @@ -114,11 +114,11 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi ivec4 round(vec4 value) { return ivec4(floor(value + vec4(0.5))); } - `),{version:e,attribute:t,varyingVs:n,varyingFs:s,texture2D:i,output:o,defineOutput:a,defineSpecialNaN:c,defineSpecialInf:h,defineRound:d}}function Yo(e,t,n="index"){const s=je(t);return s.map((i,o)=>{const a=`int ${e[o]} = ${n} / ${i}`,c=o===s.length-1?`int ${e[o+1]} = ${n} - ${e[o]} * ${i}`:`index -= ${e[o]} * ${i}`;return`${a}; ${c};`}).join("")}function Dm(e){return e.length===1?`${e[0]}`:`vec${e.length}(${e.join(",")})`}function sne(e,t){if(e.length!==t.length)throw new Error(`Vectors to be dotted must be of the same length -got ${e.length} and ${t.length}`);const n=[],s=Math.floor(e.length/4),i=e.length%4;for(let o=0;o`float(${c})`),a=a.map(c=>`float(${c})`)),n.push(`${Dm(o)}, ${Dm(a)}`)}return n.map((o,a)=>`dot(${o})`).join("+")}function IS(e){const t=je(e).map(n=>n.toString());return` + `),{version:n,attribute:t,varyingVs:e,varyingFs:r,texture2D:o,output:s,defineOutput:c,defineSpecialNaN:l,defineSpecialInf:p,defineRound:f}}function xa(n,t,e="index"){let r=Yt(t);return r.map((o,s)=>{let c=`int ${n[s]} = ${e} / ${o}`,l=s===r.length-1?`int ${n[s+1]} = ${e} - ${n[s]} * ${o}`:`index -= ${n[s]} * ${o}`;return`${c}; ${l};`}).join("")}function Lm(n){return n.length===1?`${n[0]}`:`vec${n.length}(${n.join(",")})`}function Yst(n,t){if(n.length!==t.length)throw new Error(`Vectors to be dotted must be of the same length -got ${n.length} and ${t.length}`);let e=[],r=Math.floor(n.length/4),o=n.length%4;for(let s=0;s`float(${l})`),c=c.map(l=>`float(${l})`)),e.push(`${Lm(s)}, ${Lm(c)}`)}return e.map((s,c)=>`dot(${s})`).join("+")}function b1(n){let t=Yt(n).map(e=>e.toString());return` int getFlatIndex(ivec3 coords) { return coords.x * ${t[0]} + coords.y * ${t[1]} + coords.z; } -`}const Z0=` +`}let vI=` const float FLOAT_MAX = 1.70141184e38; const float FLOAT_MIN = 1.17549435e-38; @@ -157,27 +157,27 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi return c / 255.0; } -`;const{getBroadcastDims:Q0}=cw;function o5(e,t,n,s){const i=[];e.forEach(L=>{const x=P(L.shapeInfo.logicalShape);L.shapeInfo.isUniform?i.push(`uniform float ${L.name}${x>1?`[${x}]`:""};`):(i.push(`uniform sampler2D ${L.name};`),i.push(`uniform int offset${L.name};`))});const o=i.join(` -`),a=e.map(L=>a5(L,t,s)).join(` -`),c=t.texShape,h=Pn(),d=h5(h);let m,f,b=p5(h);t.isPacked?(m=c5(t.logicalShape,c),f=d5(h)):(m=l5(t.logicalShape,c),f=u5(h)),s&&(b+=y5);const w=[b,d,f,o,m,a,n].join(` -`);return w}function gc(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return R5(e);case 1:return E5(e);case 2:return k5(e);case 3:return _5(e);case 4:return $5(e);case 5:return U5(e);case 6:return B5(e);default:throw new Error(`${t.length}-D input sampling is not yet supported`)}}function eC(e){const t=e.shapeInfo.logicalShape;switch(t.length){case 0:return C5(e);case 1:return O5(e);case 2:return D5(e);case 3:return F5(e);default:return W5(e)}}function a5(e,t,n=!1){let s="";n?s+=eC(e):s+=gc(e);const i=e.shapeInfo.logicalShape,o=t.logicalShape;return i.length<=o.length&&(n?s+=M5(e,t):s+=P5(e,t)),s}function c5(e,t){switch(e.length){case 0:return tC();case 1:return b5(e,t);case 2:return v5(e,t);case 3:return L5(e,t);default:return I5(e,t)}}function l5(e,t){switch(e.length){case 0:return tC();case 1:return w5(e,t);case 2:return N5(e,t);case 3:return S5(e,t);case 4:return x5(e,t);case 5:return T5(e,t);case 6:return A5(e,t);default:throw new Error(`${e.length}-D output sampling is not yet supported`)}}function h5(e){return` +`;let{getBroadcastDims:TI}=ov;function xX(n,t,e,r){let o=[];n.forEach(T=>{let N=G(T.shapeInfo.logicalShape);T.shapeInfo.isUniform?o.push(`uniform float ${T.name}${N>1?`[${N}]`:""};`):(o.push(`uniform sampler2D ${T.name};`),o.push(`uniform int offset${T.name};`))});let s=o.join(` +`),c=n.map(T=>wX(T,t,r)).join(` +`),l=t.texShape,p=Jn(),f=kX(p),m,y,b=CX(p);t.isPacked?(m=vX(t.logicalShape,l),y=_X(p)):(m=TX(t.logicalShape,l),y=NX(p)),r&&(b+=EX);let v=[b,f,y,s,m,c,e].join(` +`);return v}function sl(n){let t=n.shapeInfo.logicalShape;switch(t.length){case 0:return VX(n);case 1:return UX(n);case 2:return HX(n);case 3:return KX(n);case 4:return YX(n);case 5:return JX(n);case 6:return ZX(n);default:throw new Error(`${t.length}-D input sampling is not yet supported`)}}function kI(n){let t=n.shapeInfo.logicalShape;switch(t.length){case 0:return WX(n);case 1:return GX(n);case 2:return qX(n);case 3:return jX(n);default:return XX(n)}}function wX(n,t,e=!1){let r="";e?r+=kI(n):r+=sl(n);let o=n.shapeInfo.logicalShape,s=t.logicalShape;return o.length<=s.length&&(e?r+=QX(n,t):r+=tY(n,t)),r}function vX(n,t){switch(n.length){case 0:return NI();case 1:return DX(n,t);case 2:return BX(n,t);case 3:return FX(n,t);default:return PX(n,t)}}function TX(n,t){switch(n.length){case 0:return NI();case 1:return AX(n,t);case 2:return zX(n,t);case 3:return RX(n,t);case 4:return OX(n,t);case 5:return LX(n,t);case 6:return MX(n,t);default:throw new Error(`${n.length}-D output sampling is not yet supported`)}}function kX(n){return` float sampleTexture(sampler2D textureSampler, vec2 uv) { - return ${e.texture2D}(textureSampler, uv).r; + return ${n.texture2D}(textureSampler, uv).r; } - `}function u5(e){return` + `}function NX(n){return` void setOutput(float val) { - ${e.output} = vec4(val, 0, 0, 0); + ${n.output} = vec4(val, 0, 0, 0); } - `}function d5(e){return` + `}function _X(n){return` void setOutput(vec4 val) { - ${e.output} = val; + ${n.output} = val; } - `}function p5(e){const t=`${e.version} + `}function CX(n){let t=`${n.version} precision highp float; precision highp int; precision highp sampler2D; - ${e.varyingFs} vec2 resultUV; - ${e.defineOutput} + ${n.varyingFs} vec2 resultUV; + ${n.defineOutput} const vec2 halfCR = vec2(0.5, 0.5); struct ivec5 @@ -200,9 +200,9 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi }; uniform float NAN; - ${e.defineSpecialNaN} - ${e.defineSpecialInf} - ${e.defineRound} + ${n.defineSpecialNaN} + ${n.defineSpecialInf} + ${n.defineRound} int imod(int x, int y) { return x - y * (x / y); @@ -227,10 +227,10 @@ Hi there 👋. Looks like you are running TensorFlow.js in Node.js. To speed thi return fract((p3.x + p3.y) * p3.z); } - ${m5} - ${f5} - ${g5} - `;return t}const m5=` + ${SX} + ${$X} + ${IX} + `;return t}let SX=` vec2 uvFromFlat(int texNumR, int texNumC, int index) { int texR = index / texNumC; int texC = index - texR * texNumC; @@ -242,7 +242,7 @@ vec2 packedUVfrom1D(int texNumR, int texNumC, int index) { int texC = texelIndex - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } -`,f5=` +`,$X=` vec2 packedUVfrom2D(int texelsInLogicalRow, int texNumR, int texNumC, int row, int col) { int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2); @@ -250,7 +250,7 @@ vec2 packedUVfrom2D(int texelsInLogicalRow, int texNumR, int texC = texelIndex - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } -`,g5=` +`,IX=` vec2 packedUVfrom3D(int texNumR, int texNumC, int texelsInBatch, int texelsInLogicalRow, int b, int row, int col) { @@ -259,7 +259,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, int texC = index - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } -`,y5=` +`,EX=` float getChannel(vec4 frag, vec2 innerDims) { vec2 modCoord = mod(innerDims, 2.); return modCoord.x == 0. ? @@ -270,25 +270,25 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, float modCoord = mod(float(dim), 2.); return modCoord == 0. ? frag.r : frag.g; } -`;function tC(){return` +`;function NI(){return` int getOutputCoords() { return 0; } - `}function b5(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)];return n[0]===1?` + `}function DX(n,t){let e=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)];return e[0]===1?` int getOutputCoords() { - return 2 * int(resultUV.x * ${n[1]}.0); + return 2 * int(resultUV.x * ${e[1]}.0); } - `:n[1]===1?` + `:e[1]===1?` int getOutputCoords() { - return 2 * int(resultUV.y * ${n[0]}.0); + return 2 * int(resultUV.y * ${e[0]}.0); } `:` int getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * - vec2(${n[0]}, ${n[1]})); - return 2 * (resTexRC.x * ${n[1]} + resTexRC.y); + vec2(${e[0]}, ${e[1]})); + return 2 * (resTexRC.x * ${e[1]} + resTexRC.y); } - `}function w5(e,t){return t[0]===1?` + `}function AX(n,t){return t[0]===1?` int getOutputCoords() { return int(resultUV.x * ${t[1]}.0); } @@ -302,105 +302,105 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, vec2(${t[0]}, ${t[1]})); return resTexRC.x * ${t[1]} + resTexRC.y; } - `}function L5(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)],s=Math.ceil(e[2]/2),i=s*Math.ceil(e[1]/2);return` + `}function FX(n,t){let e=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)],r=Math.ceil(n[2]/2),o=r*Math.ceil(n[1]/2);return` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * - vec2(${n[0]}, ${n[1]})); - int index = resTexRC.x * ${n[1]} + resTexRC.y; + vec2(${e[0]}, ${e[1]})); + int index = resTexRC.x * ${e[1]} + resTexRC.y; - int b = index / ${i}; - index -= b * ${i}; + int b = index / ${o}; + index -= b * ${o}; - int r = 2 * (index / ${s}); - int c = imod(index, ${s}) * 2; + int r = 2 * (index / ${r}); + int c = imod(index, ${r}) * 2; return ivec3(b, r, c); } - `}function S5(e,t){const n=Yo(["r","c","d"],e);return` + `}function RX(n,t){let e=xa(["r","c","d"],n);return` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(resultUV.yx * vec2(${t[0]}, ${t[1]})); int index = resTexRC.x * ${t[1]} + resTexRC.y; - ${n} + ${e} return ivec3(r, c, d); } - `}function I5(e,t){const n=[Math.ceil(t[0]/2),Math.ceil(t[1]/2)],s=Math.ceil(e[e.length-1]/2),i=s*Math.ceil(e[e.length-2]/2);let o=i,a="",c="b, r, c";for(let h=2;h=1?m="coords = 0;":m=c.map(O=>`coords.${f[O+d]} = 0;`).join(` -`);let b="";a<2&&o>0?b="coords":b=e.shapeInfo.logicalShape.map((O,E)=>`coords.${f[E+d]}`).join(", ");let w="return outputValue;";const L=P(e.shapeInfo.logicalShape),x=L===1,v=P(t.logicalShape),N=v===1;if(o===1&&!x&&!N)w=` + `}function QX(n,t){let e=n.name,r=e.charAt(0).toUpperCase()+e.slice(1),o="get"+r+"AtOutCoords",s=n.shapeInfo.logicalShape.length,c=t.logicalShape.length,l=TI(n.shapeInfo.logicalShape,t.logicalShape),p=Oe(c),f=c-s,m,y=["x","y","z","w","u","v"];s===0?m="":c<2&&l.length>=1?m="coords = 0;":m=l.map(I=>`coords.${y[I+f]} = 0;`).join(` +`);let b="";c<2&&s>0?b="coords":b=n.shapeInfo.logicalShape.map((I,P)=>`coords.${y[P+f]}`).join(", ");let v="return outputValue;",T=G(n.shapeInfo.logicalShape),N=T===1,S=G(t.logicalShape),D=S===1;if(s===1&&!N&&!D)v=` return vec4(outputValue.xy, outputValue.xy); - `;else if(x&&!N)a===1?w=` + `;else if(N&&!D)c===1?v=` return vec4(outputValue.x, outputValue.x, 0., 0.); - `:w=` + `:v=` return vec4(outputValue.x); - `;else if(c.length){const O=o-2,E=o-1;c.indexOf(O)>-1&&c.indexOf(E)>-1?w="return vec4(outputValue.x);":c.indexOf(O)>-1?w="return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);":c.indexOf(E)>-1&&(w="return vec4(outputValue.xx, outputValue.zz);")}return` - vec4 ${i}() { - ${h} coords = getOutputCoords(); + `;else if(l.length){let I=s-2,P=s-1;l.indexOf(I)>-1&&l.indexOf(P)>-1?v="return vec4(outputValue.x);":l.indexOf(I)>-1?v="return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);":l.indexOf(P)>-1&&(v="return vec4(outputValue.xx, outputValue.zz);")}return` + vec4 ${o}() { + ${p} coords = getOutputCoords(); ${m} - vec4 outputValue = get${s}(${b}); - ${w} + vec4 outputValue = get${r}(${b}); + ${v} } - `}function P5(e,t){const n=e.name,s=n.charAt(0).toUpperCase()+n.slice(1),i="get"+s+"AtOutCoords",o=t.texShape,a=e.shapeInfo.texShape,c=e.shapeInfo.logicalShape.length,h=t.logicalShape.length;if(!e.shapeInfo.isUniform&&c===h&&e.shapeInfo.flatOffset==null&&ae(a,o))return` - float ${i}() { - return sampleTexture(${n}, resultUV); + `}function tY(n,t){let e=n.name,r=e.charAt(0).toUpperCase()+e.slice(1),o="get"+r+"AtOutCoords",s=t.texShape,c=n.shapeInfo.texShape,l=n.shapeInfo.logicalShape.length,p=t.logicalShape.length;if(!n.shapeInfo.isUniform&&l===p&&n.shapeInfo.flatOffset==null&<(c,s))return` + float ${o}() { + return sampleTexture(${e}, resultUV); } - `;const d=Rt(h),m=Q0(e.shapeInfo.logicalShape,t.logicalShape),f=h-c;let b;const w=["x","y","z","w","u","v"];c===0?b="":h<2&&m.length>=1?b="coords = 0;":b=m.map(x=>`coords.${w[x+f]} = 0;`).join(` -`);let L="";return h<2&&c>0?L="coords":L=e.shapeInfo.logicalShape.map((x,v)=>`coords.${w[v+f]}`).join(", "),` - float ${i}() { - ${d} coords = getOutputCoords(); + `;let f=Oe(p),m=TI(n.shapeInfo.logicalShape,t.logicalShape),y=p-l,b,v=["x","y","z","w","u","v"];l===0?b="":p<2&&m.length>=1?b="coords = 0;":b=m.map(N=>`coords.${v[N+y]} = 0;`).join(` +`);let T="";return p<2&&l>0?T="coords":T=n.shapeInfo.logicalShape.map((N,S)=>`coords.${v[S+y]}`).join(", "),` + float ${o}() { + ${f} coords = getOutputCoords(); ${b} - return get${s}(${L}); + return get${r}(${T}); } - `}function Rt(e){if(e<=1)return"int";if(e===2)return"ivec2";if(e===3)return"ivec3";if(e===4)return"ivec4";if(e===5)return"ivec5";if(e===6)return"ivec6";throw Error(`GPU for rank ${e} is not yet supported`)}function bc(e,t){const n=JSON.parse(JSON.stringify(e));return n.shapeInfo.logicalShape=t,n}function wc(e,t){return t.map(n=>e[n]).join(", ")}class z5{constructor(e,t,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,A(e.length>2,()=>`Packed arg${n.charAt(0).toUpperCase()+n.slice(1)} supports only inputs with rank above 2.`);const i=e[e.length-1],o=Math.ceil(i/t);this.outputShape=e.slice(0,-1),o>1&&this.outputShape.push(o),s||this.variableNames.push("bestIndicesA");const a=this.outputShape,c=a.length,h=Rt(c),d=Mn("coords",c);let m,f;if(o===1){f=c+1;const $=Rt(f);m=` - ${$} sourceLocR = ${$}(${d.join()}, 0); - ++${d[c-1]}; - ${$} sourceLocG = ${$}(${d.join()}, 0); - ++${d[c-2]}; - ${$} sourceLocA = ${$}(${d.join()}, 0); - --${d[c-1]}; - ${$} sourceLocB = ${$}(${d.join()}, 0); - --${d[c-2]};`}else f=c,m=` - ${h} sourceLocR = coords; - ++${d[c-1]}; - ${h} sourceLocG = coords; - ++${d[c-2]}; - ${h} sourceLocA = coords; - --${d[c-1]}; - ${h} sourceLocB = coords; - --${d[c-2]};`;const b=["x","y","z","w","u","v"].slice(0,f),w="."+b[f-1],L=b.map($=>"int "+$),x=Mn("sourceLocR",f-1).concat("inIdx.r"),v=Mn("sourceLocG",f-1).concat("inIdx.g"),N=Mn("sourceLocB",f-1).concat("inIdx.b"),O=Mn("sourceLocA",f-1).concat("inIdx.a"),E=n==="max"?"greaterThan":"lessThan",k=s?"":` - inIdx = round(vec4(getBestIndicesAChannel(${x.join()}), - getBestIndicesAChannel(${v.join()}), - getBestIndicesAChannel(${N.join()}), - getBestIndicesAChannel(${O.join()})));`,F=`vec4( - getAChannel(${x.join()}), - hasNextCol ? getAChannel(${v.join()}) : 0., - hasNextRow ? getAChannel(${N.join()}) : 0., - hasNextRow && hasNextCol ? getAChannel(${O.join()}) : 0.)`,U=s?"":` - float getBestIndicesAChannel(${L.join()}) { - return getChannel(getBestIndicesA(${b.join()}), - vec2(${b.slice(-2).join()})); + `}function Oe(n){if(n<=1)return"int";if(n===2)return"ivec2";if(n===3)return"ivec3";if(n===4)return"ivec4";if(n===5)return"ivec5";if(n===6)return"ivec6";throw Error(`GPU for rank ${n} is not yet supported`)}function al(n,t){let e=JSON.parse(JSON.stringify(n));return e.shapeInfo.logicalShape=t,e}function cl(n,t){return t.map(e=>n[e]).join(", ")}class eY{constructor(t,e,r,o){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,_(t.length>2,()=>`Packed arg${r.charAt(0).toUpperCase()+r.slice(1)} supports only inputs with rank above 2.`);let s=t[t.length-1],c=Math.ceil(s/e);this.outputShape=t.slice(0,-1),c>1&&this.outputShape.push(c),o||this.variableNames.push("bestIndicesA");let l=this.outputShape,p=l.length,f=Oe(p),m=Yn("coords",p),y,b;if(c===1){b=p+1;let H=Oe(b);y=` + ${H} sourceLocR = ${H}(${m.join()}, 0); + ++${m[p-1]}; + ${H} sourceLocG = ${H}(${m.join()}, 0); + ++${m[p-2]}; + ${H} sourceLocA = ${H}(${m.join()}, 0); + --${m[p-1]}; + ${H} sourceLocB = ${H}(${m.join()}, 0); + --${m[p-2]};`}else b=p,y=` + ${f} sourceLocR = coords; + ++${m[p-1]}; + ${f} sourceLocG = coords; + ++${m[p-2]}; + ${f} sourceLocA = coords; + --${m[p-1]}; + ${f} sourceLocB = coords; + --${m[p-2]};`;let v=["x","y","z","w","u","v"].slice(0,b),T="."+v[b-1],N=v.map(H=>"int "+H),S=Yn("sourceLocR",b-1).concat("inIdx.r"),D=Yn("sourceLocG",b-1).concat("inIdx.g"),I=Yn("sourceLocB",b-1).concat("inIdx.b"),P=Yn("sourceLocA",b-1).concat("inIdx.a"),E=r==="max"?"greaterThan":"lessThan",L=o?"":` + inIdx = round(vec4(getBestIndicesAChannel(${S.join()}), + getBestIndicesAChannel(${D.join()}), + getBestIndicesAChannel(${I.join()}), + getBestIndicesAChannel(${P.join()})));`,B=`vec4( + getAChannel(${S.join()}), + hasNextCol ? getAChannel(${D.join()}) : 0., + hasNextRow ? getAChannel(${I.join()}) : 0., + hasNextRow && hasNextCol ? getAChannel(${P.join()}) : 0.)`,q=o?"":` + float getBestIndicesAChannel(${N.join()}) { + return getChannel(getBestIndicesA(${v.join()}), + vec2(${v.slice(-2).join()})); }`;this.userCode=` - float getAChannel(${L.join()}) { - return getChannel(getA(${b.join()}), - vec2(${b.slice(-2).join()})); + float getAChannel(${N.join()}) { + return getChannel(getA(${v.join()}), + vec2(${v.slice(-2).join()})); } - ${U} + ${q} void main() { - ${h} coords = getOutputCoords(); - bool hasNextCol = ${d[c-1]} < ${a[c-1]-1}; - bool hasNextRow = ${d[c-2]} < ${a[c-2]-1}; - ${m} - ivec4 srcIdx = ivec4(sourceLocR${w}, sourceLocG${w}, - sourceLocB${w}, sourceLocA${w}) * ${t}; + ${f} coords = getOutputCoords(); + bool hasNextCol = ${m[p-1]} < ${l[p-1]-1}; + bool hasNextRow = ${m[p-2]} < ${l[p-2]-1}; + ${y} + ivec4 srcIdx = ivec4(sourceLocR${T}, sourceLocG${T}, + sourceLocB${T}, sourceLocA${T}) * ${e}; ivec4 inIdx = srcIdx; vec4 bestIndex = vec4(inIdx); - vec4 bestValue = ${F}; + vec4 bestValue = ${B}; - for (int i = 0; i < ${t}; i++) { + for (int i = 0; i < ${e}; i++) { inIdx = srcIdx; - ${k} - vec4 candidate = ${F}; + ${L} + vec4 candidate = ${B}; bvec4 nan = isnan(candidate); bvec4 replace = bvec4( vec4(${E}(candidate, bestValue)) * (vec4(1.0) - vec4(nan))); @@ -777,9 +777,9 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } setOutput(bestIndex); } - `}}class V5{constructor(e){this.variableNames=["dy"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,i=e.strideWidth,o=e.dilationHeight,a=e.dilationWidth,c=e.effectiveFilterHeight,h=e.effectiveFilterWidth,d=c-1-e.padInfo.top,m=h-1-e.padInfo.left,f=1/(t*n);this.userCode=` - const ivec2 pads = ivec2(${d}, ${m}); - const float avgMultiplier = float(${f}); + `}}class nY{constructor(t){this.variableNames=["dy"],this.outputShape=t.inShape;let e=t.filterHeight,r=t.filterWidth,o=t.strideHeight,s=t.strideWidth,c=t.dilationHeight,l=t.dilationWidth,p=t.effectiveFilterHeight,f=t.effectiveFilterWidth,m=p-1-t.padInfo.top,y=f-1-t.padInfo.left,b=1/(e*r);this.userCode=` + const ivec2 pads = ivec2(${m}, ${y}); + const float avgMultiplier = float(${b}); void main() { ivec4 coords = getOutputCoords(); @@ -793,20 +793,20 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; - for (int wR = 0; wR < ${c}; - wR += ${o}) { - float dyR = float(dyRCorner + wR) / ${s}.0; + for (int wR = 0; wR < ${p}; + wR += ${c}) { + float dyR = float(dyRCorner + wR) / ${o}.0; - if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || fract(dyR) > 0.0) { + if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); - for (int wC = 0; wC < ${h}; - wC+= ${a}) { - float dyC = float(dyCCorner + wC) / ${i}.0; + for (int wC = 0; wC < ${f}; + wC+= ${l}) { + float dyC = float(dyCCorner + wC) / ${s}.0; - if (dyC < 0.0 || dyC >= ${e.outWidth}.0 || + if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } @@ -819,9 +819,9 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } setOutput(dotProd); } - `}}class G5{constructor(e){this.variableNames=["dy"],this.outputShape=e.inShape;const t=e.filterDepth,n=e.filterHeight,s=e.filterWidth,i=e.strideDepth,o=e.strideHeight,a=e.strideWidth,c=e.dilationDepth,h=e.dilationHeight,d=e.dilationWidth,m=e.effectiveFilterDepth,f=e.effectiveFilterHeight,b=e.effectiveFilterWidth,w=m-1-e.padInfo.front,L=f-1-e.padInfo.top,x=b-1-e.padInfo.left,v=1/(t*n*s);this.userCode=` - const ivec3 pads = ivec3(${w}, ${L}, ${x}); - const float avgMultiplier = float(${v}); + `}}class rY{constructor(t){this.variableNames=["dy"],this.outputShape=t.inShape;let e=t.filterDepth,r=t.filterHeight,o=t.filterWidth,s=t.strideDepth,c=t.strideHeight,l=t.strideWidth,p=t.dilationDepth,f=t.dilationHeight,m=t.dilationWidth,y=t.effectiveFilterDepth,b=t.effectiveFilterHeight,v=t.effectiveFilterWidth,T=y-1-t.padInfo.front,N=b-1-t.padInfo.top,S=v-1-t.padInfo.left,D=1/(e*r*o);this.userCode=` + const ivec3 pads = ivec3(${T}, ${N}, ${S}); + const float avgMultiplier = float(${D}); void main() { ivec5 coords = getOutputCoords(); @@ -838,30 +838,30 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; - for (int wD = 0; wD < ${m}; - wD += ${c}) { - float dyD = float(dyDCorner + wD) / ${i}.0; + for (int wD = 0; wD < ${y}; + wD += ${p}) { + float dyD = float(dyDCorner + wD) / ${s}.0; - if (dyD < 0.0 || dyD >= ${e.outDepth}.0 || fract(dyD) > 0.0) { + if (dyD < 0.0 || dyD >= ${t.outDepth}.0 || fract(dyD) > 0.0) { continue; } int idyD = int(dyD); - for (int wR = 0; wR < ${f}; - wR += ${h}) { - float dyR = float(dyRCorner + wR) / ${o}.0; + for (int wR = 0; wR < ${b}; + wR += ${f}) { + float dyR = float(dyRCorner + wR) / ${c}.0; - if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || + if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); - for (int wC = 0; wC < ${b}; - wC += ${d}) { - float dyC = float(dyCCorner + wC) / ${a}.0; + for (int wC = 0; wC < ${v}; + wC += ${m}) { + float dyC = float(dyCCorner + wC) / ${l}.0; - if (dyC < 0.0 || dyC >= ${e.outWidth}.0 || + if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } @@ -875,10 +875,10 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } setOutput(dotProd); } - `}}const nC=` + `}}let _I=` if (isnan(a)) return a; if (isnan(b)) return b; -`,Y5=` +`,oY=` float s = sign(a) * sign(b); int ia = round(a); int ib = round(b); @@ -888,7 +888,7 @@ vec2 packedUVfrom3D(int texNumR, int texNumC, } else { return NAN; } -`,H5=` +`,sY=` if(a < 0.0 && floor(b) < b){ return NAN; } @@ -897,14 +897,14 @@ if (b == 0.0) { } return (round(mod(b, 2.0)) != 1) ? pow(abs(a), b) : sign(a) * pow(abs(a), b); -`,ine="return (a - b) * (a - b);",q5="return float(a == b);",j5="return float(a < b);",K5="return float(a <= b);",X5="return float(a > b);",J5="return float(a >= b);",Z5="return float(a >= 1.0 && b >= 1.0);",Q5="return float(a >= 1.0 || b >= 1.0);",e8=nC+` +`,Jst="return (a - b) * (a - b);",iY="return float(a == b);",aY="return float(a < b);",cY="return float(a <= b);",lY="return float(a > b);",uY="return float(a >= b);",pY="return float(a >= 1.0 && b >= 1.0);",hY="return float(a >= 1.0 || b >= 1.0);",fY=_I+` return max(a, b); -`,t8=nC+` +`,dY=_I+` return min(a, b); -`,n8=`if (b == 0.0) return NAN; - return mod(a, b);`,s8="return (b >= 1.0) ? a : a * (b + 1.0);",sC="return (a < 0.) ? b * a : a;";class _n{constructor(e,t,n){this.variableNames=["A","B"],this.outputShape=nt(t,n),this.userCode=` +`,mY=`if (b == 0.0) return NAN; + return mod(a, b);`,gY="return (b >= 1.0) ? a : a * (b + 1.0);",CI="return (a < 0.) ? b * a : a;";class Hn{constructor(t,e,r){this.variableNames=["A","B"],this.outputShape=le(e,r),this.userCode=` float binaryOperation(float a, float b) { - ${e} + ${t} } void main() { @@ -912,12 +912,12 @@ return (round(mod(b, 2.0)) != 1) ? float b = getBAtOutCoords(); setOutput(binaryOperation(a, b)); } - `}}const km=` + `}}let Mm=` result.r = isNaN.r > 0. ? NAN : result.r; result.g = isNaN.g > 0. ? NAN : result.g; result.b = isNaN.b > 0. ? NAN : result.b; result.a = isNaN.a > 0. ? NAN : result.a; -`,i8=` +`,yY=` ivec4 ia = round(a); ivec4 ib = round(b); bvec4 cond = notEqual(ib, ivec4(0)); @@ -938,7 +938,7 @@ return (round(mod(b, 2.0)) != 1) ? result[3] = idiv(ia[3], ib[3], s[3]); } return vec4(result); -`,r8=` +`,bY=` // isModRound1 has 1 for components with round(mod(b, 2.0)) == 1, 0 otherwise. vec4 isModRound1 = vec4(equal(round(mod(b, 2.0)), ivec4(1))); vec4 multiplier = sign(a) * isModRound1 + (vec4(1.0) - isModRound1); @@ -952,71 +952,71 @@ return (round(mod(b, 2.0)) != 1) ? result.a = isExpZero.a ? 1.0 : result.a; vec4 isNaN = vec4(lessThan(a, vec4(0.0))) * vec4(lessThan(floor(b), b)); - `+km+` + `+Mm+` return result; -`,iC=` +`,SI=` vec4 aLessThanZero = vec4(lessThan(a, vec4(0.))); return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a); -`,o8=` +`,xY=` vec4 bGTEZero = vec4(greaterThanEqual(b, vec4(0.))); return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0)))); -`,a8=` +`,wY=` return vec4(equal(a, b)); -`,rne=` +`,Zst=` return vec4(notEqual(a, b)); -`,c8=` +`,vY=` return vec4(lessThan(a, b)); -`,l8=` +`,TY=` return vec4(lessThanEqual(a, b)); -`,h8=` +`,kY=` return vec4(greaterThan(a, b)); -`,u8=` +`,NY=` return vec4(greaterThanEqual(a, b)); -`,d8=` +`,_Y=` return vec4( vec4(greaterThanEqual(a, vec4(1.0))) * vec4(greaterThanEqual(b, vec4(1.0)))); -`,p8=` +`,CY=` return min( vec4(greaterThanEqual(a, vec4(1.0))) + vec4(greaterThanEqual(b, vec4(1.0))), vec4(1.0)); -`,m8=` +`,SY=` vec4 result = vec4(max(a, b)); vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0)); - `+km+` + `+Mm+` return result; -`,f8=` +`,$Y=` vec4 result = vec4(min(a, b)); vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0)); - `+km+` + `+Mm+` return result; -`,g8=` +`,IY=` vec4 result = mod(a, b); vec4 isNaN = vec4(equal(b, vec4(0.0))); - `+km+` + `+Mm+` return result; -`;class lr{constructor(e,t,n,s=!1){this.variableNames=["A","B"],this.supportsBroadcasting=!0,this.packedInputs=!0,this.packedOutput=!0,this.outputShape=nt(t,n);const i=this.outputShape.length;let o="";if(s)if(i===0||P(this.outputShape)===1)o=` +`;class Ns{constructor(t,e,r,o=!1){this.variableNames=["A","B"],this.supportsBroadcasting=!0,this.packedInputs=!0,this.packedOutput=!0,this.outputShape=le(e,r);let s=this.outputShape.length,c="";if(o)if(s===0||G(this.outputShape)===1)c=` result.y = 0.; result.z = 0.; result.w = 0.; - `;else{const a=Rt(i);if(o=` - ${a} coords = getOutputCoords(); - `,i===1)o+=` + `;else{let l=Oe(s);if(c=` + ${l} coords = getOutputCoords(); + `,s===1)c+=` result.y = (coords + 1) >= ${this.outputShape[0]} ? 0. : result.y; result.z = 0.; result.w = 0.; - `;else{const c=Mn("coords",i);o+=` + `;else{let p=Yn("coords",s);c+=` bool nextRowOutOfBounds = - (${c[i-2]} + 1) >= ${this.outputShape[i-2]}; + (${p[s-2]} + 1) >= ${this.outputShape[s-2]}; bool nextColOutOfBounds = - (${c[i-1]} + 1) >= ${this.outputShape[i-1]}; + (${p[s-1]} + 1) >= ${this.outputShape[s-1]}; result.y = nextColOutOfBounds ? 0. : result.y; result.z = nextRowOutOfBounds ? 0. : result.z; result.w = nextColOutOfBounds || nextRowOutOfBounds ? 0. : result.w; `}}this.userCode=` vec4 binaryOperation(vec4 a, vec4 b) { - ${e} + ${t} } void main() { @@ -1024,11 +1024,11 @@ return (round(mod(b, 2.0)) != 1) ? vec4 b = getBAtOutCoords(); vec4 result = binaryOperation(a, b); - ${o} + ${c} setOutput(result); } - `}}class y8{constructor(e){this.variableNames=["A"],this.outputShape=e,this.userCode=` + `}}class EY{constructor(t){this.variableNames=["A"],this.outputShape=t,this.userCode=` uniform float minVal; uniform float maxVal; @@ -1041,7 +1041,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(clamp(value, minVal, maxVal)); } - `}getCustomSetupFunc(e,t){return(n,s)=>{this.minLoc==null&&(this.minLoc=n.getUniformLocationNoThrow(s,"minVal"),this.maxLoc=n.getUniformLocationNoThrow(s,"maxVal")),n.gl.uniform1f(this.minLoc,e),n.gl.uniform1f(this.maxLoc,t)}}}class b8{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.userCode=` + `}getCustomSetupFunc(t,e){return(r,o)=>{this.minLoc==null&&(this.minLoc=r.getUniformLocationNoThrow(o,"minVal"),this.maxLoc=r.getUniformLocationNoThrow(o,"maxVal")),r.gl.uniform1f(this.minLoc,t),r.gl.uniform1f(this.maxLoc,e)}}}class DY{constructor(t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t,this.userCode=` uniform float minVal; uniform float maxVal; @@ -1055,7 +1055,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(clamp(value, vec4(minVal), vec4(maxVal))); } - `}getCustomSetupFunc(e,t){return(n,s)=>{this.minLoc==null&&(this.minLoc=n.getUniformLocationNoThrow(s,"minVal"),this.maxLoc=n.getUniformLocationNoThrow(s,"maxVal")),n.gl.uniform1f(this.minLoc,e),n.gl.uniform1f(this.maxLoc,t)}}}class w8{constructor(e){this.variableNames=["real","imag"],this.outputShape=e,this.userCode=` + `}getCustomSetupFunc(t,e){return(r,o)=>{this.minLoc==null&&(this.minLoc=r.getUniformLocationNoThrow(o,"minVal"),this.maxLoc=r.getUniformLocationNoThrow(o,"maxVal")),r.gl.uniform1f(this.minLoc,t),r.gl.uniform1f(this.maxLoc,e)}}}class AY{constructor(t){this.variableNames=["real","imag"],this.outputShape=t,this.userCode=` void main() { float re = abs(getRealAtOutCoords()); float im = abs(getImagAtOutCoords()); @@ -1068,7 +1068,7 @@ return (round(mod(b, 2.0)) != 1) ? mx == 0.0 ? 0.0 : mx * length(vec2(1, min(re, im)/mx)) ); } - `}}class L8{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideHeight,n=e.strideWidth,s=e.padInfo.top,i=e.padInfo.left,o=e.dataFormat==="channelsLast";this.userCode=` + `}}class FY{constructor(t){this.variableNames=["x","dy"],this.outputShape=t.filterShape;let e=t.strideHeight,r=t.strideWidth,o=t.padInfo.top,s=t.padInfo.left,c=t.dataFormat==="channelsLast";this.userCode=` void main() { ivec4 coords = getOutputCoords(); int wR = coords.x; @@ -1080,22 +1080,22 @@ return (round(mod(b, 2.0)) != 1) ? // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; - for (int b = 0; b < ${e.batchSize}; b++) { - for (int yR = 0; yR < ${e.outHeight}; yR++) { - int xR = wR + yR * ${t} - ${s}; + for (int b = 0; b < ${t.batchSize}; b++) { + for (int yR = 0; yR < ${t.outHeight}; yR++) { + int xR = wR + yR * ${e} - ${o}; - if (xR < 0 || xR >= ${e.inHeight}) { + if (xR < 0 || xR >= ${t.inHeight}) { continue; } - for (int yC = 0; yC < ${e.outWidth}; yC++) { - int xC = wC + yC * ${n} - ${i}; + for (int yC = 0; yC < ${t.outWidth}; yC++) { + int xC = wC + yC * ${r} - ${s}; - if (xC < 0 || xC >= ${e.inWidth}) { + if (xC < 0 || xC >= ${t.inWidth}) { continue; } - if (${o}) { + if (${c}) { float dyValue = getDy(b, yR, yC, d2); float xValue = getX(b, xR, xC, d1); dotProd += (xValue * dyValue); @@ -1110,45 +1110,45 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class S8{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,i=e.strideWidth,o=e.dataFormat==="channelsLast",a=t-1-e.padInfo.top,c=n-1-e.padInfo.left,h=o?1:2,d=o?2:3,m=o?3:1;this.userCode=` - const ivec2 pads = ivec2(${a}, ${c}); + `}}class RY{constructor(t){this.variableNames=["dy","W"],this.outputShape=t.inShape;let e=t.filterHeight,r=t.filterWidth,o=t.strideHeight,s=t.strideWidth,c=t.dataFormat==="channelsLast",l=e-1-t.padInfo.top,p=r-1-t.padInfo.left,f=c?1:2,m=c?2:3,y=c?3:1;this.userCode=` + const ivec2 pads = ivec2(${l}, ${p}); void main() { ivec4 coords = getOutputCoords(); int batch = coords[0]; - int d1 = coords[${m}]; + int d1 = coords[${y}]; - ivec2 dyCorner = ivec2(coords[${h}], coords[${d}]) - pads; + ivec2 dyCorner = ivec2(coords[${f}], coords[${m}]) - pads; int dyRCorner = dyCorner.x; int dyCCorner = dyCorner.y; // Convolve dy(?, ?, d2) with w(:, :, d1, d2) to compute dx(xR, xC, d1). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; - for (int wR = 0; wR < ${t}; wR++) { - float dyR = float(dyRCorner + wR) / ${s}.0; + for (int wR = 0; wR < ${e}; wR++) { + float dyR = float(dyRCorner + wR) / ${o}.0; - if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || fract(dyR) > 0.0) { + if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); - int wRPerm = ${t} - 1 - wR; + int wRPerm = ${e} - 1 - wR; - for (int wC = 0; wC < ${n}; wC++) { - float dyC = float(dyCCorner + wC) / ${i}.0; + for (int wC = 0; wC < ${r}; wC++) { + float dyC = float(dyCCorner + wC) / ${s}.0; - if (dyC < 0.0 || dyC >= ${e.outWidth}.0 || + if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); - int wCPerm = ${n} - 1 - wC; + int wCPerm = ${r} - 1 - wC; - for (int d2 = 0; d2 < ${e.outChannels}; d2++) { + for (int d2 = 0; d2 < ${t.outChannels}; d2++) { - if (${o}) { + if (${c}) { float xValue = getDy(batch, idyR, idyC, d2); float wValue = getW(wRPerm, wCPerm, d1, d2); dotProd += xValue * wValue; @@ -1163,7 +1163,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class I8{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideDepth,n=e.strideHeight,s=e.strideWidth,i=e.padInfo.front,o=e.padInfo.top,a=e.padInfo.left;this.userCode=` + `}}class PY{constructor(t){this.variableNames=["x","dy"],this.outputShape=t.filterShape;let e=t.strideDepth,r=t.strideHeight,o=t.strideWidth,s=t.padInfo.front,c=t.padInfo.top,l=t.padInfo.left;this.userCode=` void main() { ivec5 coords = getOutputCoords(); int wF = coords.x; @@ -1174,25 +1174,25 @@ return (round(mod(b, 2.0)) != 1) ? float dotProd = 0.0; - for (int b = 0; b < ${e.batchSize}; b++) { - for (int yF = 0; yF < ${e.outDepth}; yF++) { - int xF = wF + yF * ${t} - ${i}; + for (int b = 0; b < ${t.batchSize}; b++) { + for (int yF = 0; yF < ${t.outDepth}; yF++) { + int xF = wF + yF * ${e} - ${s}; - if (xF < 0 || xF >= ${e.inDepth}) { + if (xF < 0 || xF >= ${t.inDepth}) { continue; } - for (int yR = 0; yR < ${e.outHeight}; yR++) { - int xR = wR + yR * ${n} - ${o}; + for (int yR = 0; yR < ${t.outHeight}; yR++) { + int xR = wR + yR * ${r} - ${c}; - if (xR < 0 || xR >= ${e.inHeight}) { + if (xR < 0 || xR >= ${t.inHeight}) { continue; } - for (int yC = 0; yC < ${e.outWidth}; yC++) { - int xC = wC + yC * ${s} - ${a}; + for (int yC = 0; yC < ${t.outWidth}; yC++) { + int xC = wC + yC * ${o} - ${l}; - if (xC < 0 || xC >= ${e.inWidth}) { + if (xC < 0 || xC >= ${t.inWidth}) { continue; } @@ -1205,8 +1205,8 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class x8{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterDepth,n=e.filterHeight,s=e.filterWidth,i=e.strideDepth,o=e.strideHeight,a=e.strideWidth,c=t-1-e.padInfo.front,h=n-1-e.padInfo.top,d=s-1-e.padInfo.left;this.userCode=` - const ivec3 pads = ivec3(${c}, ${h}, ${d}); + `}}class OY{constructor(t){this.variableNames=["dy","W"],this.outputShape=t.inShape;let e=t.filterDepth,r=t.filterHeight,o=t.filterWidth,s=t.strideDepth,c=t.strideHeight,l=t.strideWidth,p=e-1-t.padInfo.front,f=r-1-t.padInfo.top,m=o-1-t.padInfo.left;this.userCode=` + const ivec3 pads = ivec3(${p}, ${f}, ${m}); void main() { ivec5 coords = getOutputCoords(); @@ -1220,39 +1220,39 @@ return (round(mod(b, 2.0)) != 1) ? int dyCCorner = dyCorner.z; float dotProd = 0.0; - for (int wF = 0; wF < ${t}; wF++) { - float dyF = float(dyFCorner + wF) / ${i}.0; + for (int wF = 0; wF < ${e}; wF++) { + float dyF = float(dyFCorner + wF) / ${s}.0; - if (dyF < 0.0 || dyF >= ${e.outDepth}.0 || fract(dyF) > 0.0) { + if (dyF < 0.0 || dyF >= ${t.outDepth}.0 || fract(dyF) > 0.0) { continue; } int idyF = int(dyF); - int wFPerm = ${t} - 1 - wF; + int wFPerm = ${e} - 1 - wF; - for (int wR = 0; wR < ${n}; wR++) { - float dyR = float(dyRCorner + wR) / ${o}.0; + for (int wR = 0; wR < ${r}; wR++) { + float dyR = float(dyRCorner + wR) / ${c}.0; - if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || + if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); - int wRPerm = ${n} - 1 - wR; + int wRPerm = ${r} - 1 - wR; - for (int wC = 0; wC < ${s}; wC++) { - float dyC = float(dyCCorner + wC) / ${a}.0; + for (int wC = 0; wC < ${o}; wC++) { + float dyC = float(dyCCorner + wC) / ${l}.0; - if (dyC < 0.0 || dyC >= ${e.outWidth}.0 || + if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); - int wCPerm = ${s} - 1 - wC; + int wCPerm = ${o} - 1 - wC; - for (int d2 = 0; d2 < ${e.outChannels}; d2++) { + for (int d2 = 0; d2 < ${t.outChannels}; d2++) { float xValue = getDy(batch, idyF, idyR, idyC, d2); float wValue = getW(wFPerm, wRPerm, wCPerm, d1, d2); dotProd += xValue * wValue; @@ -1262,30 +1262,30 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class T8{constructor(e){this.variableNames=["x","dy"],this.outputShape=e.filterShape;const t=e.strideHeight,n=e.strideWidth,s=e.padInfo.top,i=e.padInfo.left,o=e.outChannels/e.inChannels;this.userCode=` + `}}class LY{constructor(t){this.variableNames=["x","dy"],this.outputShape=t.filterShape;let e=t.strideHeight,r=t.strideWidth,o=t.padInfo.top,s=t.padInfo.left,c=t.outChannels/t.inChannels;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int wR = coords.x; int wC = coords.y; int d1 = coords.z; int dm = coords.w; - int d2 = d1 * ${o} + dm; + int d2 = d1 * ${c} + dm; float dotProd = 0.0; // TO DO: Vec4 over the batch size - for (int b = 0; b < ${e.batchSize}; b++) { - for (int yR = 0; yR < ${e.outHeight}; yR++) { - int xR = wR + yR * ${t} - ${s}; + for (int b = 0; b < ${t.batchSize}; b++) { + for (int yR = 0; yR < ${t.outHeight}; yR++) { + int xR = wR + yR * ${e} - ${o}; - if (xR < 0 || xR >= ${e.inHeight}) { + if (xR < 0 || xR >= ${t.inHeight}) { continue; } - for (int yC = 0; yC < ${e.outWidth}; yC++) { - int xC = wC + yC * ${n} - ${i}; + for (int yC = 0; yC < ${t.outWidth}; yC++) { + int xC = wC + yC * ${r} - ${s}; - if (xC < 0 || xC >= ${e.inWidth}) { + if (xC < 0 || xC >= ${t.inWidth}) { continue; } @@ -1297,8 +1297,8 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class A8{constructor(e){this.variableNames=["dy","W"],this.outputShape=e.inShape;const t=e.filterHeight,n=e.filterWidth,s=e.strideHeight,i=e.strideWidth,o=t-1-e.padInfo.top,a=n-1-e.padInfo.left,c=e.outChannels/e.inChannels;this.userCode=` - const ivec2 pads = ivec2(${o}, ${a}); + `}}class MY{constructor(t){this.variableNames=["dy","W"],this.outputShape=t.inShape;let e=t.filterHeight,r=t.filterWidth,o=t.strideHeight,s=t.strideWidth,c=e-1-t.padInfo.top,l=r-1-t.padInfo.left,p=t.outChannels/t.inChannels;this.userCode=` + const ivec2 pads = ivec2(${c}, ${l}); void main() { ivec4 coords = getOutputCoords(); @@ -1310,30 +1310,30 @@ return (round(mod(b, 2.0)) != 1) ? float dotProd = 0.0; - for (int wR = 0; wR < ${t}; wR++) { - float dyR = float(dyRCorner + wR) / ${s}.0; + for (int wR = 0; wR < ${e}; wR++) { + float dyR = float(dyRCorner + wR) / ${o}.0; - if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || fract(dyR) > 0.0) { + if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); - int wRPerm = ${t} - 1 - wR; + int wRPerm = ${e} - 1 - wR; - for (int wC = 0; wC < ${n}; wC++) { - float dyC = float(dyCCorner + wC) / ${i}.0; + for (int wC = 0; wC < ${r}; wC++) { + float dyC = float(dyCCorner + wC) / ${s}.0; - if (dyC < 0.0 || dyC >= ${e.outWidth}.0 || + if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); - int wCPerm = ${n} - 1 - wC; + int wCPerm = ${r} - 1 - wC; // TO DO: Vec4 over the channelMul - for (int dm = 0; dm < ${c}; dm++) { - int d2 = d1 * ${c} + dm; + for (int dm = 0; dm < ${p}; dm++) { + int d2 = d1 * ${p} + dm; float xValue = getDy(batch, idyR, idyC, d2); float wValue = getW(wRPerm, wCPerm, d1, dm); dotProd += xValue * wValue; @@ -1342,47 +1342,47 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class rC{constructor(e,t=!1,n=null,s=!1){this.variableNames=["x","W"],this.outputShape=e.outShape;const i=e.padInfo.top,o=e.padInfo.left,a=e.strideHeight,c=e.strideWidth,h=e.dilationHeight,d=e.dilationWidth,m=e.filterHeight,f=e.filterWidth,b=Math.floor(e.inChannels/4)*4,w=e.inChannels%4,L=e.dataFormat==="channelsLast",x=L?1:2,v=L?2:3,N=L?3:1;let O="",E="";n&&(s?O=`float activation(float a) { + `}}class $I{constructor(t,e=!1,r=null,o=!1){this.variableNames=["x","W"],this.outputShape=t.outShape;let s=t.padInfo.top,c=t.padInfo.left,l=t.strideHeight,p=t.strideWidth,f=t.dilationHeight,m=t.dilationWidth,y=t.filterHeight,b=t.filterWidth,v=Math.floor(t.inChannels/4)*4,T=t.inChannels%4,N=t.dataFormat==="channelsLast",S=N?1:2,D=N?2:3,I=N?3:1,P="",E="";r&&(o?P=`float activation(float a) { float b = getPreluActivationWeightsAtOutCoords(); - ${n} - }`:O=` + ${r} + }`:P=` float activation(float x) { - ${n} + ${r} } - `,E="result = activation(result);");const k=t?"result += getBiasAtOutCoords();":"";t&&this.variableNames.push("bias"),s&&this.variableNames.push("preluActivationWeights"),this.userCode=` - ${O} + `,E="result = activation(result);");let L=e?"result += getBiasAtOutCoords();":"";e&&this.variableNames.push("bias"),o&&this.variableNames.push("preluActivationWeights"),this.userCode=` + ${P} - const ivec2 strides = ivec2(${a}, ${c}); - const ivec2 pads = ivec2(${i}, ${o}); + const ivec2 strides = ivec2(${l}, ${p}); + const ivec2 pads = ivec2(${s}, ${c}); void main() { ivec4 coords = getOutputCoords(); int batch = coords[0]; - int d2 = coords[${N}]; + int d2 = coords[${I}]; ivec2 xRCCorner = - ivec2(coords[${x}], coords[${v}]) * strides - pads; + ivec2(coords[${S}], coords[${D}]) * strides - pads; int xRCorner = xRCCorner.x; int xCCorner = xRCCorner.y; // Convolve x(?, ?, d1) with w(:, :, d1, d2) to get y(yR, yC, d2). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; - for (int wR = 0; wR < ${m}; wR++) { - int xR = xRCorner + wR * ${h}; + for (int wR = 0; wR < ${y}; wR++) { + int xR = xRCorner + wR * ${f}; - if (xR < 0 || xR >= ${e.inHeight}) { + if (xR < 0 || xR >= ${t.inHeight}) { continue; } - for (int wC = 0; wC < ${f}; wC++) { - int xC = xCCorner + wC * ${d}; + for (int wC = 0; wC < ${b}; wC++) { + int xC = xCCorner + wC * ${m}; - if (xC < 0 || xC >= ${e.inWidth}) { + if (xC < 0 || xC >= ${t.inWidth}) { continue; } - for (int d1 = 0; d1 < ${b}; d1 += 4) { + for (int d1 = 0; d1 < ${v}; d1 += 4) { vec4 wValues = vec4( getW(wR, wC, d1, d2), getW(wR, wC, d1 + 1, d2), @@ -1390,7 +1390,7 @@ return (round(mod(b, 2.0)) != 1) ? getW(wR, wC, d1 + 3, d2) ); - if (${L}) { + if (${N}) { vec4 xValues = vec4( getX(batch, xR, xC, d1), getX(batch, xR, xC, d1 + 1), @@ -1409,57 +1409,57 @@ return (round(mod(b, 2.0)) != 1) ? } } - if (${w===1}) { + if (${T===1}) { - if (${L}) { + if (${N}) { dotProd += - getX(batch, xR, xC, ${b}) * - getW(wR, wC, ${b}, d2); + getX(batch, xR, xC, ${v}) * + getW(wR, wC, ${v}, d2); } else { dotProd += - getX(batch, ${b}, xR, xC) * - getW(wR, wC, ${b}, d2); + getX(batch, ${v}, xR, xC) * + getW(wR, wC, ${v}, d2); } - } else if (${w===2}) { + } else if (${T===2}) { vec2 wValues = vec2( - getW(wR, wC, ${b}, d2), - getW(wR, wC, ${b} + 1, d2) + getW(wR, wC, ${v}, d2), + getW(wR, wC, ${v} + 1, d2) ); - if (${L}) { + if (${N}) { vec2 xValues = vec2( - getX(batch, xR, xC, ${b}), - getX(batch, xR, xC, ${b} + 1) + getX(batch, xR, xC, ${v}), + getX(batch, xR, xC, ${v} + 1) ); dotProd += dot(xValues, wValues); } else { vec2 xValues = vec2( - getX(batch, ${b}, xR, xC), - getX(batch, ${b} + 1, xR, xC) + getX(batch, ${v}, xR, xC), + getX(batch, ${v} + 1, xR, xC) ); dotProd += dot(xValues, wValues); } - } else if (${w===3}) { + } else if (${T===3}) { vec3 wValues = vec3( - getW(wR, wC, ${b}, d2), - getW(wR, wC, ${b} + 1, d2), - getW(wR, wC, ${b} + 2, d2) + getW(wR, wC, ${v}, d2), + getW(wR, wC, ${v} + 1, d2), + getW(wR, wC, ${v} + 2, d2) ); - if (${L}) { + if (${N}) { vec3 xValues = vec3( - getX(batch, xR, xC, ${b}), - getX(batch, xR, xC, ${b} + 1), - getX(batch, xR, xC, ${b} + 2) + getX(batch, xR, xC, ${v}), + getX(batch, xR, xC, ${v} + 1), + getX(batch, xR, xC, ${v} + 2) ); dotProd += dot(xValues, wValues); } else { vec3 xValues = vec3( - getX(batch, ${b}, xR, xC), - getX(batch, ${b} + 1, xR, xC), - getX(batch, ${b} + 2, xR, xC) + getX(batch, ${v}, xR, xC), + getX(batch, ${v} + 1, xR, xC), + getX(batch, ${v} + 2, xR, xC) ); dotProd += dot(xValues, wValues); } @@ -1469,13 +1469,13 @@ return (round(mod(b, 2.0)) != 1) ? } float result = dotProd; - ${k} + ${L} ${E} setOutput(result); } - `}}class v8{constructor(e){this.variableNames=["x","W"],this.outputShape=e.outShape;const t=e.padInfo.front,n=e.padInfo.top,s=e.padInfo.left,i=e.strideDepth,o=e.strideHeight,a=e.strideWidth,c=e.dilationDepth,h=e.dilationHeight,d=e.dilationWidth,m=e.filterDepth,f=e.filterHeight,b=e.filterWidth,w=Math.floor(e.inChannels/4)*4,L=e.inChannels%4;this.userCode=` - const ivec3 strides = ivec3(${i}, ${o}, ${a}); - const ivec3 pads = ivec3(${t}, ${n}, ${s}); + `}}class BY{constructor(t){this.variableNames=["x","W"],this.outputShape=t.outShape;let e=t.padInfo.front,r=t.padInfo.top,o=t.padInfo.left,s=t.strideDepth,c=t.strideHeight,l=t.strideWidth,p=t.dilationDepth,f=t.dilationHeight,m=t.dilationWidth,y=t.filterDepth,b=t.filterHeight,v=t.filterWidth,T=Math.floor(t.inChannels/4)*4,N=t.inChannels%4;this.userCode=` + const ivec3 strides = ivec3(${s}, ${c}, ${l}); + const ivec3 pads = ivec3(${e}, ${r}, ${o}); void main() { ivec5 coords = getOutputCoords(); @@ -1491,28 +1491,28 @@ return (round(mod(b, 2.0)) != 1) ? // y(yF, yR, yC, d2). ? = to be determined. : = across all // values in that axis. float dotProd = 0.0; - for (int wF = 0; wF < ${m}; wF++) { - int xF = xFCorner + wF * ${c}; + for (int wF = 0; wF < ${y}; wF++) { + int xF = xFCorner + wF * ${p}; - if (xF < 0 || xF >= ${e.inDepth}) { + if (xF < 0 || xF >= ${t.inDepth}) { continue; } - for (int wR = 0; wR < ${f}; wR++) { - int xR = xRCorner + wR * ${h}; + for (int wR = 0; wR < ${b}; wR++) { + int xR = xRCorner + wR * ${f}; - if (xR < 0 || xR >= ${e.inHeight}) { + if (xR < 0 || xR >= ${t.inHeight}) { continue; } - for (int wC = 0; wC < ${b}; wC++) { - int xC = xCCorner + wC * ${d}; + for (int wC = 0; wC < ${v}; wC++) { + int xC = xCCorner + wC * ${m}; - if (xC < 0 || xC >= ${e.inWidth}) { + if (xC < 0 || xC >= ${t.inWidth}) { continue; } - for (int d1 = 0; d1 < ${w}; d1 += 4) { + for (int d1 = 0; d1 < ${T}; d1 += 4) { vec4 xValues = vec4( getX(batch, xF, xR, xC, d1), getX(batch, xF, xR, xC, d1 + 1), @@ -1529,30 +1529,30 @@ return (round(mod(b, 2.0)) != 1) ? dotProd += dot(xValues, wValues); } - if (${L===1}) { + if (${N===1}) { dotProd += - getX(batch, xF, xR, xC, ${w}) * - getW(wF, wR, wC, ${w}, d2); - } else if (${L===2}) { + getX(batch, xF, xR, xC, ${T}) * + getW(wF, wR, wC, ${T}, d2); + } else if (${N===2}) { vec2 xValues = vec2( - getX(batch, xF, xR, xC, ${w}), - getX(batch, xF, xR, xC, ${w} + 1) + getX(batch, xF, xR, xC, ${T}), + getX(batch, xF, xR, xC, ${T} + 1) ); vec2 wValues = vec2( - getW(wF, wR, wC, ${w}, d2), - getW(wF, wR, wC, ${w} + 1, d2) + getW(wF, wR, wC, ${T}, d2), + getW(wF, wR, wC, ${T} + 1, d2) ); dotProd += dot(xValues, wValues); - } else if (${L===3}) { + } else if (${N===3}) { vec3 xValues = vec3( - getX(batch, xF, xR, xC, ${w}), - getX(batch, xF, xR, xC, ${w} + 1), - getX(batch, xF, xR, xC, ${w} + 2) + getX(batch, xF, xR, xC, ${T}), + getX(batch, xF, xR, xC, ${T} + 1), + getX(batch, xF, xR, xC, ${T} + 2) ); vec3 wValues = vec3( - getW(wF, wR, wC, ${w}, d2), - getW(wF, wR, wC, ${w} + 1, d2), - getW(wF, wR, wC, ${w} + 2, d2) + getW(wF, wR, wC, ${T}, d2), + getW(wF, wR, wC, ${T} + 1, d2), + getW(wF, wR, wC, ${T} + 2, d2) ); dotProd += dot(xValues, wValues); } @@ -1561,26 +1561,26 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class oC{constructor(e,t=!1,n=null,s=!1){this.variableNames=["x","W"],this.outputShape=e.outShape;const i=e.inHeight,o=e.inWidth,a=e.padInfo.top,c=e.padInfo.left,h=e.strideHeight,d=e.strideWidth,m=e.dilationHeight,f=e.dilationWidth,b=e.filterHeight,w=e.filterWidth,L=e.outChannels/e.inChannels;let x="",v="";n&&(s?x=`float activation(float a) { + `}}class II{constructor(t,e=!1,r=null,o=!1){this.variableNames=["x","W"],this.outputShape=t.outShape;let s=t.inHeight,c=t.inWidth,l=t.padInfo.top,p=t.padInfo.left,f=t.strideHeight,m=t.strideWidth,y=t.dilationHeight,b=t.dilationWidth,v=t.filterHeight,T=t.filterWidth,N=t.outChannels/t.inChannels,S="",D="";r&&(o?S=`float activation(float a) { float b = getPreluActivationWeightsAtOutCoords(); - ${n} - }`:x=` + ${r} + }`:S=` float activation(float x) { - ${n} + ${r} } - `,v="result = activation(result);");const N=t?"result += getBiasAtOutCoords();":"";t&&this.variableNames.push("bias"),s&&this.variableNames.push("preluActivationWeights"),this.userCode=` - ${x} + `,D="result = activation(result);");let I=e?"result += getBiasAtOutCoords();":"";e&&this.variableNames.push("bias"),o&&this.variableNames.push("preluActivationWeights"),this.userCode=` + ${S} - const ivec2 strides = ivec2(${h}, ${d}); - const ivec2 pads = ivec2(${a}, ${c}); + const ivec2 strides = ivec2(${f}, ${m}); + const ivec2 pads = ivec2(${l}, ${p}); void main() { ivec4 coords = getOutputCoords(); int batch = coords.x; ivec2 xRCCorner = coords.yz * strides - pads; int d2 = coords.w; - int d1 = d2 / ${L}; - int q = d2 - d1 * ${L}; + int d1 = d2 / ${N}; + int q = d2 - d1 * ${N}; int xRCorner = xRCCorner.x; int xCCorner = xRCCorner.y; @@ -1589,17 +1589,17 @@ return (round(mod(b, 2.0)) != 1) ? // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; // TO DO(dsmilkov): Flatten the two for loops and vec4 the operations. - for (int wR = 0; wR < ${b}; wR++) { - int xR = xRCorner + wR * ${m}; + for (int wR = 0; wR < ${v}; wR++) { + int xR = xRCorner + wR * ${y}; - if (xR < 0 || xR >= ${i}) { + if (xR < 0 || xR >= ${s}) { continue; } - for (int wC = 0; wC < ${w}; wC++) { - int xC = xCCorner + wC * ${f}; + for (int wC = 0; wC < ${T}; wC++) { + int xC = xCCorner + wC * ${b}; - if (xC < 0 || xC >= ${o}) { + if (xC < 0 || xC >= ${c}) { continue; } @@ -1610,139 +1610,139 @@ return (round(mod(b, 2.0)) != 1) ? } float result = dotProd; - ${N} - ${v} + ${I} + ${D} setOutput(result); } - `}}class aC{constructor(e,t=!1,n=null,s=!1){this.variableNames=["x","W"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e.outShape;const i=e.inHeight,o=e.inWidth,a=e.padInfo.top,c=e.padInfo.left,h=e.strideHeight,d=e.strideWidth,m=e.dilationHeight,f=e.dilationWidth,b=e.filterHeight,w=e.filterWidth,L=w;let x="int xR; int xC; int xCOffset;";for(let E=0;E= 0 && xR < ${i} && xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${E}C${F} = getX(batch, xR, xCOffset, d1); + if(xR >= 0 && xR < ${s} && xCOffset >= 0 && xCOffset < ${c}) { + xTexelR${E}C${B} = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. - if(xCOffset + 1 >= ${o}) { - xTexelR${E}C${F}.zw = vec2(0.); + if(xCOffset + 1 >= ${c}) { + xTexelR${E}C${B}.zw = vec2(0.); } } else { - xTexelR${E}C${F} = vec4(0.); + xTexelR${E}C${B} = vec4(0.); } xCOffset = xC + 1 - 2; - if(xR >= 0 && xR < ${i} && xCOffset >= 0 && xCOffset < ${o}) { + if(xR >= 0 && xR < ${s} && xCOffset >= 0 && xCOffset < ${c}) { vec4 previous = getX(batch, xR, xCOffset, d1); // Need to manually clear unused channels in case // we're reading from recycled texture. - if(xCOffset + 1 >= ${o}) { + if(xCOffset + 1 >= ${c}) { previous.zw = vec2(0.); } - xR${E}C${F} = vec4(previous.zw, xTexelR${E}C${F}.xy); + xR${E}C${B} = vec4(previous.zw, xTexelR${E}C${B}.xy); } else { - xR${E}C${F} = vec4(0, 0, xTexelR${E}C${F}.xy); + xR${E}C${B} = vec4(0, 0, xTexelR${E}C${B}.xy); } - `:x+=` - if(xR >= 0 && xR < ${i} && xC >= 0 && xC < ${o}) { - xTexelR${E}C${F} = getX(batch, xR, xC, d1); + `:S+=` + if(xR >= 0 && xR < ${s} && xC >= 0 && xC < ${c}) { + xTexelR${E}C${B} = getX(batch, xR, xC, d1); } else { - xTexelR${E}C${F} = vec4(0.); + xTexelR${E}C${B} = vec4(0.); } - xR${E}C${F} = xTexelR${E}C${F}; - `,F+1= 0 && xR < ${i} && - xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${E}C${F+2} = getX(batch, xR, xCOffset, d1); + if(xR >= 0 && xR < ${s} && + xCOffset >= 0 && xCOffset < ${c}) { + xTexelR${E}C${B+2} = getX(batch, xR, xCOffset, d1); } - `,f>1&&(x+=` + `,b>1&&(S+=` xCOffset -= 2; - if(xR >= 0 && xR < ${i} && - xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${E}C${F} = getX(batch, xR, xCOffset, d1); + if(xR >= 0 && xR < ${s} && + xCOffset >= 0 && xCOffset < ${c}) { + xTexelR${E}C${B} = getX(batch, xR, xCOffset, d1); } else { - xTexelR${E}C${F} = vec4(0.); + xTexelR${E}C${B} = vec4(0.); } - `),x+=` - xR${E}C${F+1} = vec4( - xTexelR${E}C${F}.zw, xTexelR${E}C${F+2}.xy); - `):x+=` - xCOffset = xC + ${U}; + `),S+=` + xR${E}C${B+1} = vec4( + xTexelR${E}C${B}.zw, xTexelR${E}C${B+2}.xy); + `):S+=` + xCOffset = xC + ${q}; - if(xR >= 0 && xR < ${i} && - xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${E}C${F+2} = getX(batch, xR, xCOffset, d1); + if(xR >= 0 && xR < ${s} && + xCOffset >= 0 && xCOffset < ${c}) { + xTexelR${E}C${B+2} = getX(batch, xR, xCOffset, d1); } - xR${E}C${F+1} = xTexelR${E}C${F+2}; - `}}else F= 0 && xR < ${i}) { - `,c%2===1?(x+=` - xCOffset = xC + 1 - ${d}; - if(xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${E}C${F} = getX(batch, xR, xCOffset, d1); + xR${E}C${B+1} = xTexelR${E}C${B+2}; + `}}else B= 0 && xR < ${s}) { + `,p%2===1?(S+=` + xCOffset = xC + 1 - ${m}; + if(xCOffset >= 0 && xCOffset < ${c}) { + xTexelR${E}C${B} = getX(batch, xR, xCOffset, d1); } else { - xTexelR${E}C${F} = vec4(0.); + xTexelR${E}C${B} = vec4(0.); } - if(xC + 1 >= 0 && xC + 1 < ${o}) { - xTexelR${E}C${F+2} = getX(batch, xR, xC + 1, d1); + if(xC + 1 >= 0 && xC + 1 < ${c}) { + xTexelR${E}C${B+2} = getX(batch, xR, xC + 1, d1); } else { - xTexelR${E}C${F+2} = vec4(0.); + xTexelR${E}C${B+2} = vec4(0.); } - xR${E}C${F} = vec4( - xTexelR${E}C${F}.zw, xTexelR${E}C${F+2}.zw); - `,F+1= 0 && xCOffset < ${o}) { + xCOffset = xC + 1 + ${m}; + if(xCOffset >= 0 && xCOffset < ${c}) { final = getX(batch, xR, xCOffset, d1); } - xR${E}C${F+1} = vec4(xTexelR${E}C${F+2}.xy, final.xy); - `)):(x+=` - if(xC >= 0 && xC < ${o}) { - xTexelR${E}C${F} = getX(batch, xR, xC, d1); + xR${E}C${B+1} = vec4(xTexelR${E}C${B+2}.xy, final.xy); + `)):(S+=` + if(xC >= 0 && xC < ${c}) { + xTexelR${E}C${B} = getX(batch, xR, xC, d1); } else { - xTexelR${E}C${F} = vec4(0.); + xTexelR${E}C${B} = vec4(0.); } - xCOffset = xC + ${d}; - if(xCOffset >= 0 && xCOffset < ${o}) { - xTexelR${E}C${F+2} = getX(batch, xR, xCOffset, d1); + xCOffset = xC + ${m}; + if(xCOffset >= 0 && xCOffset < ${c}) { + xTexelR${E}C${B+2} = getX(batch, xR, xCOffset, d1); } else { - xTexelR${E}C${F+2} = vec4(0.); + xTexelR${E}C${B+2} = vec4(0.); } - xR${E}C${F} = vec4( - xTexelR${E}C${F}.xy, xTexelR${E}C${F+2}.xy); - `,F+11?[`${(a-1)/(m-1)}`,"(y2-y1) * height_ratio",`y1*${w} + float(y)*(height_scale)`]:["0.0","0.0",`0.5 * (y1+y2) * ${w}`],[O,E,k]=f>1?[`${(c-1)/(f-1)}`,"(x2-x1) * width_ratio",`x1*${L} + float(x)*(width_scale)`]:["0.0","0.0",`0.5 * (x1+x2) * ${L}`];this.userCode=` - const float height_ratio = float(${x}); - const float width_ratio = float(${O}); + `}}class zY{constructor(t,e,r,o,s){this.variableNames=["Image","Boxes","BoxInd"],this.outputShape=[];let[c,l,p,f]=t,[m]=e,[y,b]=r;this.outputShape=[m,y,b,f];let v=o==="bilinear"?1:0,[T,N]=[`${l-1}.0`,`${p-1}.0`],[S,D,I]=y>1?[`${(l-1)/(y-1)}`,"(y2-y1) * height_ratio",`y1*${T} + float(y)*(height_scale)`]:["0.0","0.0",`0.5 * (y1+y2) * ${T}`],[P,E,L]=b>1?[`${(p-1)/(b-1)}`,"(x2-x1) * width_ratio",`x1*${N} + float(x)*(width_scale)`]:["0.0","0.0",`0.5 * (x1+x2) * ${N}`];this.userCode=` + const float height_ratio = float(${S}); + const float width_ratio = float(${P}); void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -1782,26 +1782,26 @@ return (round(mod(b, 2.0)) != 1) ? // get image in batch index int bInd = round(getBoxInd(b)); - if(bInd < 0 || bInd >= ${o}) { + if(bInd < 0 || bInd >= ${c}) { return; } - float height_scale = ${v}; + float height_scale = ${D}; float width_scale = ${E}; - float in_y = ${N}; - if( in_y < 0.0 || in_y > ${w} ) { - setOutput(float(${i})); + float in_y = ${I}; + if( in_y < 0.0 || in_y > ${T} ) { + setOutput(float(${s})); return; } - float in_x = ${k}; - if( in_x < 0.0 || in_x > ${L} ) { - setOutput(float(${i})); + float in_x = ${L}; + if( in_x < 0.0 || in_x > ${N} ) { + setOutput(float(${s})); return; } vec2 sourceFracIndexCR = vec2(in_x,in_y); - if(${b} == 1) { + if(${v} == 1) { // Compute the four integer indices. ivec2 sourceFloorCR = ivec2(sourceFracIndexCR); ivec2 sourceCeilCR = ivec2(ceil(sourceFracIndexCR)); @@ -1825,30 +1825,30 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(newValue); } } - `}}class cC{constructor(e,t,n){this.variableNames=["x"],this.outputShape=e;const s=e.length,i=t?"0.0":`getX(${lC(s,"coords")})`,o=e[e.length-1];let a="",c="";t?(a=n?`end != ${o-1}`:"end != 0",c=n?"end + 1":"end - 1"):(a=n?`end + pow2 < ${o}`:"end >= pow2",c=n?"end + pow2":"end - pow2"),this.userCode=` + `}}class DI{constructor(t,e,r){this.variableNames=["x"],this.outputShape=t;let o=t.length,s=e?"0.0":`getX(${AI(o,"coords")})`,c=t[t.length-1],l="",p="";e?(l=r?`end != ${c-1}`:"end != 0",p=r?"end + 1":"end - 1"):(l=r?`end + pow2 < ${c}`:"end >= pow2",p=r?"end + pow2":"end - pow2"),this.userCode=` uniform float index; void main() { - ${Rt(s)} coords = getOutputCoords(); - int end = ${hC(s,"coords")}; - float val = ${i}; + ${Oe(o)} coords = getOutputCoords(); + int end = ${FI(o,"coords")}; + float val = ${s}; int pow2 = int(pow(2.0, index)); - if (${a}) { - int idx = ${c}; - ${hC(s,"coords")} = idx; - val += getX(${lC(s,"coords")}); + if (${l}) { + int idx = ${p}; + ${FI(o,"coords")} = idx; + val += getX(${AI(o,"coords")}); } setOutput(val); } - `}getCustomSetupFunc(e){return(t,n)=>{this.index==null&&(this.index=t.getUniformLocation(n,"index")),t.gl.uniform1f(this.index,e)}}}function lC(e,t){if(e===1)return`${t}`;if(e===2)return`${t}.x, ${t}.y`;if(e===3)return`${t}.x, ${t}.y, ${t}.z`;if(e===4)return`${t}.x, ${t}.y, ${t}.z, ${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}function hC(e,t){if(e===1)return`${t}`;if(e===2)return`${t}.y`;if(e===3)return`${t}.z`;if(e===4)return`${t}.w`;throw Error(`Cumulative sum for rank ${e} is not yet supported`)}class C8{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outPackingScheme=lu.DENSE;const t=uu(e),n=Pn();this.outputShape=e,this.userCode=` + `}getCustomSetupFunc(t){return(e,r)=>{this.index==null&&(this.index=e.getUniformLocation(r,"index")),e.gl.uniform1f(this.index,t)}}}function AI(n,t){if(n===1)return`${t}`;if(n===2)return`${t}.x, ${t}.y`;if(n===3)return`${t}.x, ${t}.y, ${t}.z`;if(n===4)return`${t}.x, ${t}.y, ${t}.z, ${t}.w`;throw Error(`Cumulative sum for rank ${n} is not yet supported`)}function FI(n,t){if(n===1)return`${t}`;if(n===2)return`${t}.y`;if(n===3)return`${t}.z`;if(n===4)return`${t}.w`;throw Error(`Cumulative sum for rank ${n} is not yet supported`)}class WY{constructor(t){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outPackingScheme=nh.DENSE;let e=oh(t),r=Jn();this.outputShape=t,this.userCode=` ivec3 outCoordsFromFlatIndex(int index) { - ${Yo(["r","c","d"],e)} + ${xa(["r","c","d"],t)} return ivec3(r, c, d); } void main() { ivec2 resTexRC = ivec2(resultUV.yx * - vec2(${t[0]}, ${t[1]})); - int index = 4 * (resTexRC.x * ${t[1]} + resTexRC.y); + vec2(${e[0]}, ${e[1]})); + int index = 4 * (resTexRC.x * ${e[1]} + resTexRC.y); vec4 result = vec4(0.); @@ -1858,18 +1858,18 @@ return (round(mod(b, 2.0)) != 1) ? result[i] = getA(rc.x, rc.y, rc.z); } - ${n.output} = result; + ${r.output} = result; } - `}}class R8{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outPackingScheme=lu.DENSE;const t=uu(e),n=Pn();this.outputShape=e,this.userCode=` + `}}class VY{constructor(t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outPackingScheme=nh.DENSE;let e=oh(t),r=Jn();this.outputShape=t,this.userCode=` ivec3 outCoordsFromFlatIndex(int index) { - ${Yo(["r","c","d"],e)} + ${xa(["r","c","d"],t)} return ivec3(r, c, d); } void main() { ivec2 resTexRC = ivec2(resultUV.yx * - vec2(${t[0]}, ${t[1]})); - int index = 4 * (resTexRC.x * ${t[1]} + resTexRC.y); + vec2(${e[0]}, ${e[1]})); + int index = 4 * (resTexRC.x * ${e[1]} + resTexRC.y); vec4 result = vec4(0.); @@ -1879,9 +1879,9 @@ return (round(mod(b, 2.0)) != 1) ? result[i] = getChannel(getA(rc.x, rc.y, rc.z), vec2(rc.y, rc.z)); } - ${n.output} = result; + ${r.output} = result; } - `}}class O8{constructor(e,t,n){this.variableNames=["x"],this.outputShape=[],this.outputShape=e,this.blockSize=t,this.dataFormat=n,this.userCode=` + `}}class GY{constructor(t,e,r){this.variableNames=["x"],this.outputShape=[],this.outputShape=t,this.blockSize=e,this.dataFormat=r,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -1889,40 +1889,40 @@ return (round(mod(b, 2.0)) != 1) ? int w = ${this.getWidthCoordString()}; int d = ${this.getDepthCoordString()}; - int in_h = h / ${t}; - int offset_h = imod(h, ${t}); - int in_w = w / ${t}; - int offset_w = imod(w, ${t}); - int offset_d = (offset_h * ${t} + offset_w) * + int in_h = h / ${e}; + int offset_h = imod(h, ${e}); + int in_w = w / ${e}; + int offset_w = imod(w, ${e}); + int offset_d = (offset_h * ${e} + offset_w) * ${this.getOutputDepthSize()}; int in_d = d + offset_d; float result = ${this.getInputSamplingString()}; setOutput(result); } - `}getHeightCoordString(){return this.dataFormat==="NHWC"?"coords[1]":"coords[2]"}getWidthCoordString(){return this.dataFormat==="NHWC"?"coords[2]":"coords[3]"}getDepthCoordString(){return this.dataFormat==="NHWC"?"coords[3]":"coords[1]"}getOutputDepthSize(){return this.dataFormat==="NHWC"?this.outputShape[3]:this.outputShape[1]}getInputSamplingString(){return this.dataFormat==="NHWC"?"getX(b, in_h, in_w, in_d)":"getX(b, in_d, in_h, in_w)"}}class E8{constructor(e){this.variableNames=["X"],this.outputShape=[e,e],this.userCode=` + `}getHeightCoordString(){return this.dataFormat==="NHWC"?"coords[1]":"coords[2]"}getWidthCoordString(){return this.dataFormat==="NHWC"?"coords[2]":"coords[3]"}getDepthCoordString(){return this.dataFormat==="NHWC"?"coords[3]":"coords[1]"}getOutputDepthSize(){return this.dataFormat==="NHWC"?this.outputShape[3]:this.outputShape[1]}getInputSamplingString(){return this.dataFormat==="NHWC"?"getX(b, in_h, in_w, in_d)":"getX(b, in_d, in_h, in_w)"}}class UY{constructor(t){this.variableNames=["X"],this.outputShape=[t,t],this.userCode=` void main() { ivec2 coords = getOutputCoords(); float val = coords[0] == coords[1] ? getX(coords[0]) : 0.0; setOutput(val); } - `}}class D8{constructor(e){this.variableNames=["A"],this.outTexUsage=Ns.DOWNLOAD;const t=Pn();this.outputShape=e,this.userCode=` - ${Z0} + `}}class qY{constructor(t){this.variableNames=["A"],this.outTexUsage=Mr.DOWNLOAD;let e=Jn();this.outputShape=t,this.userCode=` + ${vI} void main() { float x = getAAtOutCoords(); - ${t.output} = encode_float(x); + ${e.output} = encode_float(x); } - `}}class k8{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outTexUsage=Ns.DOWNLOAD;const t=Pn();this.outputShape=e,this.userCode=` - ${Z0} + `}}class HY{constructor(t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outTexUsage=Mr.DOWNLOAD;let e=Jn();this.outputShape=t,this.userCode=` + ${vI} void main() { ivec3 coords = getOutputCoords(); float x = getChannel(getAAtOutCoords(), vec2(coords.y, coords.z)); - ${t.output} = encode_float(x); + ${e.output} = encode_float(x); } - `}}class F8{constructor(e,t,n=!1){this.variableNames=["A"];const s=Pn(),[i,o]=t;this.outputShape=e;let a="result";n&&(a="floor(result * 255. + 0.5)"),this.userCode=` - ${IS(e)} + `}}class jY{constructor(t,e,r=!1){this.variableNames=["A"];let o=Jn(),[s,c]=e;this.outputShape=t;let l="result";r&&(l="floor(result * 255. + 0.5)"),this.userCode=` + ${b1(t)} void main() { ivec3 coords = getOutputCoords(); @@ -1932,10 +1932,10 @@ return (round(mod(b, 2.0)) != 1) ? flatIndex = idiv(flatIndex, 4, 1.); - int r = flatIndex / ${o}; - int c = imod(flatIndex, ${o}); - vec2 uv = (vec2(c, r) + halfCR) / vec2(${o}.0, ${i}.0); - vec4 values = ${s.texture2D}(A, uv); + int r = flatIndex / ${c}; + int c = imod(flatIndex, ${c}); + vec2 uv = (vec2(c, r) + halfCR) / vec2(${c}.0, ${s}.0); + vec4 values = ${o.texture2D}(A, uv); float result; @@ -1949,38 +1949,38 @@ return (round(mod(b, 2.0)) != 1) ? result = values[3]; } - ${s.output} = vec4(${a}, 0., 0., 0.); + ${o.output} = vec4(${l}, 0., 0., 0.); } - `}}class _8{constructor(e,t,n=!1){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;const s=Pn(),[i,o]=t;this.outputShape=e;let a="",c="result";n&&(c="floor(result * 255. + 0.5)");for(let h=0;h<=1;h++)for(let d=0;d<=1;d++){const m=h*2+d;a+=` + `}}class KY{constructor(t,e,r=!1){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;let o=Jn(),[s,c]=e;this.outputShape=t;let l="",p="result";r&&(p="floor(result * 255. + 0.5)");for(let f=0;f<=1;f++)for(let m=0;m<=1;m++){let y=f*2+m;l+=` localCoords = coords; - if(localCoords[2] + ${d} < ${e[2]}) { - localCoords[2] += ${d}; - if(localCoords[1] + ${h} < ${e[1]}) { - localCoords[1] += ${h}; + if(localCoords[2] + ${m} < ${t[2]}) { + localCoords[2] += ${m}; + if(localCoords[1] + ${f} < ${t[1]}) { + localCoords[1] += ${f}; flatIndex = getFlatIndex(localCoords); offset = imod(flatIndex, 4); flatIndex = idiv(flatIndex, 4, 1.); - r = flatIndex / ${o}; - c = imod(flatIndex, ${o}); - uv = (vec2(c, r) + halfCR) / vec2(${o}.0, ${i}.0); - values = ${s.texture2D}(A, uv); + r = flatIndex / ${c}; + c = imod(flatIndex, ${c}); + uv = (vec2(c, r) + halfCR) / vec2(${c}.0, ${s}.0); + values = ${o.texture2D}(A, uv); if(offset == 0) { - result[${m}] = values[0]; + result[${y}] = values[0]; } else if(offset == 1) { - result[${m}] = values[1]; + result[${y}] = values[1]; } else if(offset == 2) { - result[${m}] = values[2]; + result[${y}] = values[2]; } else { - result[${m}] = values[3]; + result[${y}] = values[3]; } } } `}this.userCode=` - ${IS(e)} + ${b1(t)} void main() { ivec3 coords = getOutputCoords(); @@ -1991,33 +1991,33 @@ return (round(mod(b, 2.0)) != 1) ? vec2 uv; vec4 values; - ${a} + ${l} - ${s.output} = ${c}; + ${o.output} = ${p}; } - `}}class W8{constructor(e,t){this.outputShape=[],this.variableNames=["x"],this.outputShape=e,this.userCode=` + `}}class XY{constructor(t,e){this.outputShape=[],this.variableNames=["x"],this.outputShape=t,this.userCode=` uniform float value; void main() { // Input can be obtained from uniform value. setOutput(value); } - `}getCustomSetupFunc(e){return(t,n)=>{this.valueLoc==null&&(this.valueLoc=t.getUniformLocationNoThrow(n,"value")),t.gl.uniform1f(this.valueLoc,e)}}}class $8{constructor(e,t,n){this.variableNames=["A","indices"];const s=e.slice();s[n]=t,this.outputShape=s,this.rank=s.length;const i=Rt(this.rank),o=U8(e,n);this.userCode=` + `}getCustomSetupFunc(t){return(e,r)=>{this.valueLoc==null&&(this.valueLoc=e.getUniformLocationNoThrow(r,"value")),e.gl.uniform1f(this.valueLoc,t)}}}class YY{constructor(t,e,r){this.variableNames=["A","indices"];let o=t.slice();o[r]=e,this.outputShape=o,this.rank=o.length;let s=Oe(this.rank),c=JY(t,r);this.userCode=` void main() { - ${i} resRC = getOutputCoords(); - setOutput(getA(${o})); + ${s} resRC = getOutputCoords(); + setOutput(getA(${c})); } - `}}function U8(e,t){const n=e.length;if(n>4)throw Error(`Gather for rank ${n} is not yet supported`);if(n===1)return"int(getIndices(resRC))";const s=["resRC.x","resRC.y","resRC.z","resRC.w"],i=[];for(let o=0;o1?"strides[j]":"strides";this.userCode=` - ${s} strides = ${s}(${this.strides}); + `}}function JY(n,t){let e=n.length;if(e>4)throw Error(`Gather for rank ${e} is not yet supported`);if(e===1)return"int(getIndices(resRC))";let r=["resRC.x","resRC.y","resRC.z","resRC.w"],o=[];for(let s=0;s1?"strides[j]":"strides";this.userCode=` + ${o} strides = ${o}(${this.strides}); void main() { - ${i} coords = getOutputCoords(); + ${s} coords = getOutputCoords(); int flattenIndex = 0; for (int j = 0; j < ${this.sliceDim}; j++) { int index = round(getIndices(coords[0], j)); - flattenIndex += index * ${o}; + flattenIndex += index * ${c}; } setOutput(getX(flattenIndex, coords[1])); } - `}}function M8(e){const t=Pn(),n=`${t.version} + `}}function QY(n){let t=Jn(),e=`${t.version} precision highp float; ${t.attribute} vec3 clipSpacePos; ${t.attribute} vec2 uv; @@ -2026,31 +2026,31 @@ return (round(mod(b, 2.0)) != 1) ? void main() { gl_Position = vec4(clipSpacePos, 1); resultUV = uv; - }`;return wK(e,n)}function P8(e){const t=new Float32Array([-1,1,0,0,1,-1,-1,0,0,0,1,1,0,1,1,1,-1,0,1,0]);return AK(e,t)}function z8(e){const t=new Uint16Array([0,1,2,2,1,3]);return vK(e,t)}function pu(e,t,n,s,i,o){CK(t,n);const a=NK(e),c=e.TEXTURE_2D;return Ee(e,()=>e.bindTexture(c,a)),Ee(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_S,e.CLAMP_TO_EDGE)),Ee(e,()=>e.texParameteri(c,e.TEXTURE_WRAP_T,e.CLAMP_TO_EDGE)),Ee(e,()=>e.texParameteri(c,e.TEXTURE_MIN_FILTER,e.NEAREST)),Ee(e,()=>e.texParameteri(c,e.TEXTURE_MAG_FILTER,e.NEAREST)),Ee(e,()=>e.texImage2D(c,0,s,t,n,0,i,o,null)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,null)),a}function uC(e){return e.internalFormatFloat}function V8(e,t,n,s){const[i,o]=hu(t,n);return pu(e,i,o,uC(s),s.textureFormatFloat,e.FLOAT)}function dC(e){return e.internalFormatHalfFloat}function G8(e,t,n,s){const[i,o]=hu(t,n);return pu(e,i,o,dC(s),s.textureFormatFloat,s.textureTypeHalfFloat)}function pC(e){return e.downloadTextureFormat}function Y8(e,t,n,s){const[i,o]=hu(t,n);return pu(e,i,o,pC(s),e.RGBA,e.UNSIGNED_BYTE)}function mC(e){return e.internalFormatPackedFloat}function H8(e,t,n,s){const[i,o]=pc(t,n);return pu(e,i,o,mC(s),e.RGBA,e.FLOAT)}function fC(e){return e.internalFormatPackedHalfFloat}function q8(e,t,n,s){const[i,o]=pc(t,n);return pu(e,i,o,fC(s),e.RGBA,s.textureTypeHalfFloat)}function j8(e,t,n){const s=0,i=3*4,o=3*4+2*4;Ee(e,()=>e.bindBuffer(e.ARRAY_BUFFER,n));const a=q0(e,t,"clipSpacePos",n,3,o,s);return a&&q0(e,t,"uv",n,2,o,i)}function K8(e,t,n,s,i,o){Ee(e,()=>e.bindTexture(e.TEXTURE_2D,t));let a,c,h;i instanceof Uint8Array?(a=new Uint8Array(n*s*4),c=e.UNSIGNED_BYTE,h=e.RGBA):(a=new Float32Array(n*s*4),c=e.FLOAT,h=o.internalFormatPackedFloat),a.set(i),Ee(e,()=>e.texImage2D(e.TEXTURE_2D,0,h,n,s,0,e.RGBA,c,a)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function X8(e,t,n){Ee(e,()=>e.bindTexture(e.TEXTURE_2D,t)),n.data instanceof Uint8Array?Ee(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,n.width,n.height,0,e.RGBA,e.UNSIGNED_BYTE,n.data)):Ee(e,()=>e.texImage2D(e.TEXTURE_2D,0,e.RGBA,e.RGBA,e.UNSIGNED_BYTE,n)),Ee(e,()=>e.bindTexture(e.TEXTURE_2D,null))}function J8(e,t,n,s){const i=e.createBuffer();Ee(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,i));const o=4,a=4,c=o*a*t*n;return Ee(e,()=>e.bufferData(e.PIXEL_PACK_BUFFER,c,e.STREAM_READ)),Ee(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,0)),Ee(e,()=>e.bindBuffer(e.PIXEL_PACK_BUFFER,null)),i}function Z8(e,t,n){const s=e,i=new Float32Array(n);return s.bindBuffer(s.PIXEL_PACK_BUFFER,t),s.getBufferSubData(s.PIXEL_PACK_BUFFER,0,i),s.bindBuffer(s.PIXEL_PACK_BUFFER,null),i}function Q8(e,t,n,s){const[i,o]=hu(t,n),a=4,c=new Uint8Array(dK(t*n,a));return Ee(e,()=>e.readPixels(0,0,i,o,s.downloadTextureFormat,e.UNSIGNED_BYTE,c)),new Float32Array(c.buffer)}function e6(e,t,n,s,i,o,a,c){const h=e,d=new Float32Array(pK(o,a));return h.bindBuffer(h.PIXEL_PACK_BUFFER,t),h.getBufferSubData(h.PIXEL_PACK_BUFFER,0,d),h.bindBuffer(h.PIXEL_PACK_BUFFER,null),d}function t6(e,t,n){const s=new Float32Array(t*n*4);return Ee(e,()=>e.readPixels(0,0,n,t,e.RGBA,e.FLOAT,s)),s}class n6{constructor(e){this.outputTexture=null,this.program=null,this.disposed=!1,this.vertexAttrsAreBound=!1,this.itemsToPoll=[];const t=oe().getNumber("WEBGL_VERSION");e!=null?(this.gl=e,lK(t,e)):this.gl=ki(t);let n="WEBGL_color_buffer_float";const s="EXT_color_buffer_half_float";if(oe().getNumber("WEBGL_VERSION")===1){const i="OES_texture_float",o="OES_texture_half_float";if(this.textureFloatExtension=vm(this.gl,i),Vs(this.gl,o))this.textureHalfFloatExtension=vm(this.gl,o);else if(oe().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support half float textures, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.");if(this.colorBufferFloatExtension=this.gl.getExtension(n),Vs(this.gl,s))this.colorBufferHalfFloatExtension=vm(this.gl,s);else if(oe().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support color renderable half floats, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.")}else if(n="EXT_color_buffer_float",Vs(this.gl,n))this.colorBufferFloatExtension=this.gl.getExtension(n);else if(Vs(this.gl,s))this.colorBufferHalfFloatExtension=this.gl.getExtension(s);else throw new Error("GL context does not support color renderable floats");this.vertexBuffer=P8(this.gl),this.indexBuffer=z8(this.gl),this.framebuffer=RK(this.gl),this.textureConfig=gS(this.gl,this.textureHalfFloatExtension)}get debug(){return oe().getBool("DEBUG")}dispose(){if(this.disposed)return;this.program!=null&&console.warn("Disposing a GPGPUContext that still has a bound WebGLProgram. This is probably a resource leak, delete the program with GPGPUContext.deleteProgram before disposing."),this.outputTexture!=null&&console.warn("Disposing a GPGPUContext that still has a bound output matrix texture. This is probably a resource leak, delete the output matrix texture with GPGPUContext.deleteMatrixTexture before disposing.");const e=this.gl;Ee(e,()=>e.finish()),Ee(e,()=>e.bindFramebuffer(e.FRAMEBUFFER,null)),Ee(e,()=>e.deleteFramebuffer(this.framebuffer)),Ee(e,()=>e.bindBuffer(e.ARRAY_BUFFER,null)),Ee(e,()=>e.bindBuffer(e.ELEMENT_ARRAY_BUFFER,null)),Ee(e,()=>e.deleteBuffer(this.indexBuffer)),this.disposed=!0}createFloat32MatrixTexture(e,t){return this.throwIfDisposed(),V8(this.gl,e,t,this.textureConfig)}createFloat16MatrixTexture(e,t){return this.throwIfDisposed(),G8(this.gl,e,t,this.textureConfig)}createUnsignedBytesMatrixTexture(e,t){return this.throwIfDisposed(),Y8(this.gl,e,t,this.textureConfig)}uploadPixelDataToTexture(e,t){this.throwIfDisposed(),X8(this.gl,e,t)}uploadDenseMatrixToTexture(e,t,n,s){this.throwIfDisposed(),K8(this.gl,e,t,n,s,this.textureConfig)}createFloat16PackedMatrixTexture(e,t){return this.throwIfDisposed(),q8(this.gl,e,t,this.textureConfig)}createPackedMatrixTexture(e,t){return this.throwIfDisposed(),H8(this.gl,e,t,this.textureConfig)}deleteMatrixTexture(e){this.throwIfDisposed(),this.outputTexture===e&&(j0(this.gl,this.framebuffer),this.outputTexture=null),Ee(this.gl,()=>this.gl.deleteTexture(e))}downloadByteEncodedFloatMatrixFromOutputTexture(e,t,n){return this.downloadMatrixDriver(e,()=>Q8(this.gl,t,n,this.textureConfig))}downloadPackedMatrixFromBuffer(e,t,n,s,i,o){return e6(this.gl,e,t,n,s,i,o,this.textureConfig)}downloadFloat32MatrixFromBuffer(e,t){return Z8(this.gl,e,t)}createBufferFromTexture(e,t,n){this.bindTextureToFrameBuffer(e);const s=J8(this.gl,t,n,this.textureConfig);return this.unbindTextureToFrameBuffer(),s}createAndWaitForFence(){const e=this.createFence(this.gl);return this.pollFence(e)}createFence(e){let t,n;if(oe().getBool("WEBGL_FENCE_API_ENABLED")){const s=e,i=s.fenceSync(s.SYNC_GPU_COMMANDS_COMPLETE,0);e.flush(),n=()=>{const o=s.clientWaitSync(i,0,0);return o===s.ALREADY_SIGNALED||o===s.CONDITION_SATISFIED},t=i}else oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0?(t=this.beginQuery(),this.endQuery(),n=()=>this.isQueryAvailable(t,oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))):n=()=>!0;return{query:t,isFencePassed:n}}downloadMatrixFromPackedTexture(e,t,n){return this.downloadMatrixDriver(e,()=>t6(this.gl,t,n))}createProgram(e){this.throwIfDisposed();const t=this.gl,n=LK(t,e),s=M8(t),i=xK(t);return Ee(t,()=>t.attachShader(i,s)),Ee(t,()=>t.attachShader(i,n)),TK(t,i),this.debug&&yS(t,i),this.vertexAttrsAreBound||(this.setProgram(i),this.vertexAttrsAreBound=j8(t,this.program,this.vertexBuffer)),i}deleteProgram(e){this.throwIfDisposed(),e===this.program&&(this.program=null),e!=null&&Ee(this.gl,()=>this.gl.deleteProgram(e))}setProgram(e){this.throwIfDisposed(),this.program=e,this.program!=null&&this.debug&&yS(this.gl,this.program),Ee(this.gl,()=>this.gl.useProgram(e))}getUniformLocation(e,t,n=!0){return this.throwIfDisposed(),n?EK(this.gl,e,t):DK(this.gl,e,t)}getAttributeLocation(e,t){return this.throwIfDisposed(),Ee(this.gl,()=>this.gl.getAttribLocation(e,t))}getUniformLocationNoThrow(e,t){return this.throwIfDisposed(),this.gl.getUniformLocation(e,t)}setInputMatrixTexture(e,t,n){this.throwIfDisposed(),this.throwIfNoProgram(),kK(this.gl,e,t,n)}setOutputMatrixTexture(e,t,n){this.setOutputMatrixTextureDriver(e,n,t)}setOutputPackedMatrixTexture(e,t,n){this.throwIfDisposed();const[s,i]=pc(t,n);this.setOutputMatrixTextureDriver(e,s,i)}setOutputMatrixWriteRegion(e,t,n,s){this.setOutputMatrixWriteRegionDriver(n,e,s,t)}setOutputPackedMatrixWriteRegion(e,t,n,s){throw new Error("setOutputPackedMatrixWriteRegion not implemented.")}debugValidate(){this.program!=null&&yS(this.gl,this.program),Nm(this.gl)}executeProgram(){this.throwIfDisposed(),this.throwIfNoProgram();const e=this.gl;this.debug&&this.debugValidate(),Ee(e,()=>e.drawElements(e.TRIANGLES,6,e.UNSIGNED_SHORT,0))}blockUntilAllProgramsCompleted(){this.throwIfDisposed(),Ee(this.gl,()=>this.gl.finish())}getQueryTimerExtension(){return this.disjointQueryTimerExtension==null&&(this.disjointQueryTimerExtension=vm(this.gl,oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2?"EXT_disjoint_timer_query_webgl2":"EXT_disjoint_timer_query")),this.disjointQueryTimerExtension}getQueryTimerExtensionWebGL2(){return this.getQueryTimerExtension()}getQueryTimerExtensionWebGL1(){return this.getQueryTimerExtension()}beginQuery(){if(oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.createQuery();return n.beginQuery(s.TIME_ELAPSED_EXT,i),i}const e=this.getQueryTimerExtensionWebGL1(),t=e.createQueryEXT();return e.beginQueryEXT(e.TIME_ELAPSED_EXT,t),t}endQuery(){if(oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){const t=this.gl,n=this.getQueryTimerExtensionWebGL2();t.endQuery(n.TIME_ELAPSED_EXT);return}const e=this.getQueryTimerExtensionWebGL1();e.endQueryEXT(e.TIME_ELAPSED_EXT)}async waitForQueryAndGetTime(e){return await $t(()=>this.disposed||this.isQueryAvailable(e,oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))),this.getQueryTime(e,oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))}getQueryTime(e,t){if(t===0)return null;if(t===2){const n=this.gl,s=n.getQueryParameter(e,n.QUERY_RESULT);return s/1e6}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_EXT);return s/1e6}}isQueryAvailable(e,t){if(t===0)return!0;if(t===2){const n=this.gl,s=this.getQueryTimerExtensionWebGL2(),i=n.getQueryParameter(e,n.QUERY_RESULT_AVAILABLE);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(s.GPU_DISJOINT_EXT)),i&&!this.disjoint}else{const n=this.getQueryTimerExtensionWebGL1(),s=n.getQueryObjectEXT(e,n.QUERY_RESULT_AVAILABLE_EXT);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(n.GPU_DISJOINT_EXT)),s&&!this.disjoint}}pollFence(e){return new Promise(t=>{this.addItemToPoll(()=>e.isFencePassed(),()=>t())})}pollItems(){const e=s6(this.itemsToPoll.map(t=>t.isDoneFn));for(let t=0;t<=e;++t){const{resolveFn:n}=this.itemsToPoll[t];n()}this.itemsToPoll=this.itemsToPoll.slice(e+1)}addItemToPoll(e,t){if(this.itemsToPoll.push({isDoneFn:e,resolveFn:t}),this.itemsToPoll.length>1)return;$t(()=>(this.pollItems(),this.itemsToPoll.length===0))}bindTextureToFrameBuffer(e){this.throwIfDisposed(),bS(this.gl,e,this.framebuffer),this.debug&&Nm(this.gl)}unbindTextureToFrameBuffer(){this.outputTexture!=null?(bS(this.gl,this.outputTexture,this.framebuffer),this.debug&&Nm(this.gl)):j0(this.gl,this.framebuffer)}downloadMatrixDriver(e,t){this.bindTextureToFrameBuffer(e);const n=t();return this.unbindTextureToFrameBuffer(),n}setOutputMatrixTextureDriver(e,t,n){this.throwIfDisposed();const s=this.gl;bS(s,e,this.framebuffer),this.debug&&Nm(s),this.outputTexture=e,Ee(s,()=>s.viewport(0,0,t,n)),Ee(s,()=>s.scissor(0,0,t,n))}setOutputMatrixWriteRegionDriver(e,t,n,s){this.throwIfDisposed(),Ee(this.gl,()=>this.gl.scissor(e,t,n,s))}throwIfDisposed(){if(this.disposed)throw new Error("Attempted to use disposed GPGPUContext.")}throwIfNoProgram(){if(this.program==null)throw new Error("No GPU program is currently set.")}}function s6(e){let t=0;for(;t{const x={logicalShape:w.shape,texShape:w.isUniform?null:w.texData.texShape,isUniform:w.isUniform,isPacked:w.isUniform?!1:w.texData.isPacked,flatOffset:null};return w.texData!=null&&w.texData.slice!=null&&w.texData.slice.flatOffset>0&&(x.flatOffset=w.texData.slice.flatOffset),{name:t.variableNames[L],shapeInfo:x}}),a=o.map(w=>w.shapeInfo),c={logicalShape:s.shape,texShape:s.texData.texShape,isUniform:!1,isPacked:s.texData.isPacked,flatOffset:null},h=o5(o,c,i,t.packedInputs),d=e.createProgram(h);let m=null;const f=e.getUniformLocation(d,"NAN",!1);oe().getNumber("WEBGL_VERSION")===1&&(m=e.getUniformLocation(d,"INFINITY",!1));const b={};for(let w=0;w{const i=n.logicalShape,o=t[s],a=o.shape;if(!ae(i,a))throw Error(`Binary was compiled with different shapes than the current args. Shapes ${i} and ${a} must match`);if(n.isUniform&&o.isUniform)return;const c=n.texShape,h=o.isUniform?null:o.texData.texShape;if(!ae(c,h))throw Error(`Binary was compiled with different texture shapes than the current args. Shape ${c} and ${h} must match`)})}function r6(e,t,n,s,i){gC(t.inShapeInfos,n),gC([t.outShapeInfo],[s]);const o=s.texData.texture,a=s.texData.texShape;s.texData.isPacked?e.setOutputPackedMatrixTexture(o,a[0],a[1]):e.setOutputMatrixTexture(o,a[0],a[1]),e.setProgram(t.webGLProgram),oe().getNumber("WEBGL_VERSION")===1&&(t.infLoc!==null&&e.gl.uniform1f(t.infLoc,Infinity)),t.nanLoc!==null&&e.gl.uniform1f(t.nanLoc,NaN),n.forEach((c,h)=>{const d=t.program.variableNames[h],m=t.uniformLocations[d],f=t.uniformLocations[`offset${d}`];if(m==null)return;if(c.isUniform){if(P(c.shape)<2)e.gl.uniform1f(m,c.uniformValues[0]);else{let b=c.uniformValues;b instanceof Float32Array||(b=new Float32Array(b)),e.gl.uniform1fv(m,b)}return}c.texData.slice!=null&&f!=null&&e.gl.uniform1i(f,c.texData.slice.flatOffset),e.setInputMatrixTexture(c.texData.texture,m,h)}),i!=null&&i(e,t.webGLProgram),e.executeProgram()}function o6(e,t,n){let s="";t.concat(n).forEach(a=>{const c=a.texData!=null&&a.texData.slice!=null&&a.texData.slice.flatOffset>0,h=a.isUniform?"uniform":a.texData.texShape;s+=`${a.shape}_${h}_${c}`});const i=e.userCode;let o=e.constructor.name;return o+="_"+s+"_"+i,o}class a6{constructor(e,t,n){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;const{filterWidth:s,inChannels:i,strideWidth:o,strideHeight:a,padInfo:c,outWidth:h,dilationWidth:d,dilationHeight:m,dataFormat:f}=n,{left:b,top:w}=c,L=i*s,x=Pn(),v=f==="channelsLast",N=v?0:1,O=v?1:2;let E="";for(let k=0;k<=1;k++)for(let F=0;F<=1;F++)E+=` - blockIndex = rc.y + ${F}; - pos = rc.x + ${k}; + }`;return A8(n,e)}function t7(n){let t=new Float32Array([-1,1,0,0,1,-1,-1,0,0,0,1,1,0,1,1,1,-1,0,1,0]);return M8(n,t)}function e7(n){let t=new Uint16Array([0,1,2,2,1,3]);return B8(n,t)}function ih(n,t,e,r,o,s){W8(t,e);let c=z8(n),l=n.TEXTURE_2D;return Rt(n,()=>n.bindTexture(l,c)),Rt(n,()=>n.texParameteri(l,n.TEXTURE_WRAP_S,n.CLAMP_TO_EDGE)),Rt(n,()=>n.texParameteri(l,n.TEXTURE_WRAP_T,n.CLAMP_TO_EDGE)),Rt(n,()=>n.texParameteri(l,n.TEXTURE_MIN_FILTER,n.NEAREST)),Rt(n,()=>n.texParameteri(l,n.TEXTURE_MAG_FILTER,n.NEAREST)),Rt(n,()=>n.texImage2D(l,0,r,t,e,0,o,s,null)),Rt(n,()=>n.bindTexture(n.TEXTURE_2D,null)),c}function RI(n){return n.internalFormatFloat}function n7(n,t,e,r){let[o,s]=rh(t,e);return ih(n,o,s,RI(r),r.textureFormatFloat,n.FLOAT)}function PI(n){return n.internalFormatHalfFloat}function r7(n,t,e,r){let[o,s]=rh(t,e);return ih(n,o,s,PI(r),r.textureFormatFloat,r.textureTypeHalfFloat)}function OI(n){return n.downloadTextureFormat}function o7(n,t,e,r){let[o,s]=rh(t,e);return ih(n,o,s,OI(r),n.RGBA,n.UNSIGNED_BYTE)}function LI(n){return n.internalFormatPackedFloat}function s7(n,t,e,r){let[o,s]=nl(t,e);return ih(n,o,s,LI(r),n.RGBA,n.FLOAT)}function MI(n){return n.internalFormatPackedHalfFloat}function i7(n,t,e,r){let[o,s]=nl(t,e);return ih(n,o,s,MI(r),n.RGBA,r.textureTypeHalfFloat)}function a7(n,t,e){let r=0,o=3*4,s=3*4+2*4;Rt(n,()=>n.bindBuffer(n.ARRAY_BUFFER,e));let c=gI(n,t,"clipSpacePos",e,3,s,r);return c&&gI(n,t,"uv",e,2,s,o)}function c7(n,t,e,r,o,s){Rt(n,()=>n.bindTexture(n.TEXTURE_2D,t));let c,l,p;o instanceof Uint8Array?(c=new Uint8Array(e*r*4),l=n.UNSIGNED_BYTE,p=n.RGBA):(c=new Float32Array(e*r*4),l=n.FLOAT,p=s.internalFormatPackedFloat),c.set(o),Rt(n,()=>n.texImage2D(n.TEXTURE_2D,0,p,e,r,0,n.RGBA,l,c)),Rt(n,()=>n.bindTexture(n.TEXTURE_2D,null))}function l7(n,t,e){Rt(n,()=>n.bindTexture(n.TEXTURE_2D,t)),e.data instanceof Uint8Array?Rt(n,()=>n.texImage2D(n.TEXTURE_2D,0,n.RGBA,e.width,e.height,0,n.RGBA,n.UNSIGNED_BYTE,e.data)):Rt(n,()=>n.texImage2D(n.TEXTURE_2D,0,n.RGBA,n.RGBA,n.UNSIGNED_BYTE,e)),Rt(n,()=>n.bindTexture(n.TEXTURE_2D,null))}function u7(n,t,e,r){let o=n.createBuffer();Rt(n,()=>n.bindBuffer(n.PIXEL_PACK_BUFFER,o));let s=4,c=4,l=s*c*t*e;return Rt(n,()=>n.bufferData(n.PIXEL_PACK_BUFFER,l,n.STREAM_READ)),Rt(n,()=>n.readPixels(0,0,e,t,n.RGBA,n.FLOAT,0)),Rt(n,()=>n.bindBuffer(n.PIXEL_PACK_BUFFER,null)),o}function p7(n,t,e){let r=n,o=new Float32Array(e);return r.bindBuffer(r.PIXEL_PACK_BUFFER,t),r.getBufferSubData(r.PIXEL_PACK_BUFFER,0,o),r.bindBuffer(r.PIXEL_PACK_BUFFER,null),o}function h7(n,t,e,r){let[o,s]=rh(t,e),c=4,l=new Uint8Array(_8(t*e,c));return Rt(n,()=>n.readPixels(0,0,o,s,r.downloadTextureFormat,n.UNSIGNED_BYTE,l)),new Float32Array(l.buffer)}function f7(n,t,e,r,o,s,c,l){let p=n,f=new Float32Array(C8(s,c));return p.bindBuffer(p.PIXEL_PACK_BUFFER,t),p.getBufferSubData(p.PIXEL_PACK_BUFFER,0,f),p.bindBuffer(p.PIXEL_PACK_BUFFER,null),f}function d7(n,t,e){let r=new Float32Array(t*e*4);return Rt(n,()=>n.readPixels(0,0,e,t,n.RGBA,n.FLOAT,r)),r}class m7{constructor(t){this.outputTexture=null,this.program=null,this.disposed=!1,this.vertexAttrsAreBound=!1,this.itemsToPoll=[];let e=ct().getNumber("WEBGL_VERSION");t!=null?(this.gl=t,T8(e,t)):this.gl=jo(e);let r="WEBGL_color_buffer_float",o="EXT_color_buffer_half_float";if(ct().getNumber("WEBGL_VERSION")===1){let s="OES_texture_float",c="OES_texture_half_float";if(this.textureFloatExtension=Dm(this.gl,s),eo(this.gl,c))this.textureHalfFloatExtension=Dm(this.gl,c);else if(ct().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support half float textures, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.");if(this.colorBufferFloatExtension=this.gl.getExtension(r),eo(this.gl,o))this.colorBufferHalfFloatExtension=Dm(this.gl,o);else if(ct().get("WEBGL_FORCE_F16_TEXTURES"))throw new Error("GL context does not support color renderable half floats, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.")}else if(r="EXT_color_buffer_float",eo(this.gl,r))this.colorBufferFloatExtension=this.gl.getExtension(r);else if(eo(this.gl,o))this.colorBufferHalfFloatExtension=this.gl.getExtension(o);else throw new Error("GL context does not support color renderable floats");this.vertexBuffer=t7(this.gl),this.indexBuffer=e7(this.gl),this.framebuffer=V8(this.gl),this.textureConfig=h1(this.gl,this.textureHalfFloatExtension)}get debug(){return ct().getBool("DEBUG")}dispose(){if(this.disposed)return;this.program!=null&&console.warn("Disposing a GPGPUContext that still has a bound WebGLProgram. This is probably a resource leak, delete the program with GPGPUContext.deleteProgram before disposing."),this.outputTexture!=null&&console.warn("Disposing a GPGPUContext that still has a bound output matrix texture. This is probably a resource leak, delete the output matrix texture with GPGPUContext.deleteMatrixTexture before disposing.");let t=this.gl;Rt(t,()=>t.finish()),Rt(t,()=>t.bindFramebuffer(t.FRAMEBUFFER,null)),Rt(t,()=>t.deleteFramebuffer(this.framebuffer)),Rt(t,()=>t.bindBuffer(t.ARRAY_BUFFER,null)),Rt(t,()=>t.bindBuffer(t.ELEMENT_ARRAY_BUFFER,null)),Rt(t,()=>t.deleteBuffer(this.indexBuffer)),this.disposed=!0}createFloat32MatrixTexture(t,e){return this.throwIfDisposed(),n7(this.gl,t,e,this.textureConfig)}createFloat16MatrixTexture(t,e){return this.throwIfDisposed(),r7(this.gl,t,e,this.textureConfig)}createUnsignedBytesMatrixTexture(t,e){return this.throwIfDisposed(),o7(this.gl,t,e,this.textureConfig)}uploadPixelDataToTexture(t,e){this.throwIfDisposed(),l7(this.gl,t,e)}uploadDenseMatrixToTexture(t,e,r,o){this.throwIfDisposed(),c7(this.gl,t,e,r,o,this.textureConfig)}createFloat16PackedMatrixTexture(t,e){return this.throwIfDisposed(),i7(this.gl,t,e,this.textureConfig)}createPackedMatrixTexture(t,e){return this.throwIfDisposed(),s7(this.gl,t,e,this.textureConfig)}deleteMatrixTexture(t){this.throwIfDisposed(),this.outputTexture===t&&(yI(this.gl,this.framebuffer),this.outputTexture=null),Rt(this.gl,()=>this.gl.deleteTexture(t))}downloadByteEncodedFloatMatrixFromOutputTexture(t,e,r){return this.downloadMatrixDriver(t,()=>h7(this.gl,e,r,this.textureConfig))}downloadPackedMatrixFromBuffer(t,e,r,o,s,c){return f7(this.gl,t,e,r,o,s,c,this.textureConfig)}downloadFloat32MatrixFromBuffer(t,e){return p7(this.gl,t,e)}createBufferFromTexture(t,e,r){this.bindTextureToFrameBuffer(t);let o=u7(this.gl,e,r,this.textureConfig);return this.unbindTextureToFrameBuffer(),o}createAndWaitForFence(){let t=this.createFence(this.gl);return this.pollFence(t)}createFence(t){let e,r;if(ct().getBool("WEBGL_FENCE_API_ENABLED")){let o=t,s=o.fenceSync(o.SYNC_GPU_COMMANDS_COMPLETE,0);t.flush(),r=()=>{let c=o.clientWaitSync(s,0,0);return c===o.ALREADY_SIGNALED||c===o.CONDITION_SATISFIED},e=s}else ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")>0?(e=this.beginQuery(),this.endQuery(),r=()=>this.isQueryAvailable(e,ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))):r=()=>!0;return{query:e,isFencePassed:r}}downloadMatrixFromPackedTexture(t,e,r){return this.downloadMatrixDriver(t,()=>d7(this.gl,e,r))}createProgram(t){this.throwIfDisposed();let e=this.gl,r=F8(e,t),o=QY(e),s=O8(e);return Rt(e,()=>e.attachShader(s,o)),Rt(e,()=>e.attachShader(s,r)),L8(e,s),this.debug&&f1(e,s),this.vertexAttrsAreBound||(this.setProgram(s),this.vertexAttrsAreBound=a7(e,this.program,this.vertexBuffer)),s}deleteProgram(t){this.throwIfDisposed(),t===this.program&&(this.program=null),t!=null&&Rt(this.gl,()=>this.gl.deleteProgram(t))}setProgram(t){this.throwIfDisposed(),this.program=t,this.program!=null&&this.debug&&f1(this.gl,this.program),Rt(this.gl,()=>this.gl.useProgram(t))}getUniformLocation(t,e,r=!0){return this.throwIfDisposed(),r?U8(this.gl,t,e):q8(this.gl,t,e)}getAttributeLocation(t,e){return this.throwIfDisposed(),Rt(this.gl,()=>this.gl.getAttribLocation(t,e))}getUniformLocationNoThrow(t,e){return this.throwIfDisposed(),this.gl.getUniformLocation(t,e)}setInputMatrixTexture(t,e,r){this.throwIfDisposed(),this.throwIfNoProgram(),H8(this.gl,t,e,r)}setOutputMatrixTexture(t,e,r){this.setOutputMatrixTextureDriver(t,r,e)}setOutputPackedMatrixTexture(t,e,r){this.throwIfDisposed();let[o,s]=nl(e,r);this.setOutputMatrixTextureDriver(t,o,s)}setOutputMatrixWriteRegion(t,e,r,o){this.setOutputMatrixWriteRegionDriver(r,t,o,e)}setOutputPackedMatrixWriteRegion(t,e,r,o){throw new Error("setOutputPackedMatrixWriteRegion not implemented.")}debugValidate(){this.program!=null&&f1(this.gl,this.program),Am(this.gl)}executeProgram(){this.throwIfDisposed(),this.throwIfNoProgram();let t=this.gl;this.debug&&this.debugValidate(),Rt(t,()=>t.drawElements(t.TRIANGLES,6,t.UNSIGNED_SHORT,0))}blockUntilAllProgramsCompleted(){this.throwIfDisposed(),Rt(this.gl,()=>this.gl.finish())}getQueryTimerExtension(){return this.disjointQueryTimerExtension==null&&(this.disjointQueryTimerExtension=Dm(this.gl,ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2?"EXT_disjoint_timer_query_webgl2":"EXT_disjoint_timer_query")),this.disjointQueryTimerExtension}getQueryTimerExtensionWebGL2(){return this.getQueryTimerExtension()}getQueryTimerExtensionWebGL1(){return this.getQueryTimerExtension()}beginQuery(){if(ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){let r=this.gl,o=this.getQueryTimerExtensionWebGL2(),s=r.createQuery();return r.beginQuery(o.TIME_ELAPSED_EXT,s),s}let t=this.getQueryTimerExtensionWebGL1(),e=t.createQueryEXT();return t.beginQueryEXT(t.TIME_ELAPSED_EXT,e),e}endQuery(){if(ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")===2){let e=this.gl,r=this.getQueryTimerExtensionWebGL2();e.endQuery(r.TIME_ELAPSED_EXT);return}let t=this.getQueryTimerExtensionWebGL1();t.endQueryEXT(t.TIME_ELAPSED_EXT)}async waitForQueryAndGetTime(t){return await _e(()=>this.disposed||this.isQueryAvailable(t,ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))),this.getQueryTime(t,ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))}getQueryTime(t,e){if(e===0)return null;if(e===2){let r=this.gl,o=r.getQueryParameter(t,r.QUERY_RESULT);return o/1e6}else{let r=this.getQueryTimerExtensionWebGL1(),o=r.getQueryObjectEXT(t,r.QUERY_RESULT_EXT);return o/1e6}}isQueryAvailable(t,e){if(e===0)return!0;if(e===2){let r=this.gl,o=this.getQueryTimerExtensionWebGL2(),s=r.getQueryParameter(t,r.QUERY_RESULT_AVAILABLE);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(o.GPU_DISJOINT_EXT)),s&&!this.disjoint}else{let r=this.getQueryTimerExtensionWebGL1(),o=r.getQueryObjectEXT(t,r.QUERY_RESULT_AVAILABLE_EXT);return this.disjoint==null&&(this.disjoint=this.gl.getParameter(r.GPU_DISJOINT_EXT)),o&&!this.disjoint}}pollFence(t){return new Promise(e=>{this.addItemToPoll(()=>t.isFencePassed(),()=>e())})}pollItems(){let t=g7(this.itemsToPoll.map(e=>e.isDoneFn));for(let e=0;e<=t;++e){let{resolveFn:r}=this.itemsToPoll[e];r()}this.itemsToPoll=this.itemsToPoll.slice(t+1)}addItemToPoll(t,e){if(this.itemsToPoll.push({isDoneFn:t,resolveFn:e}),this.itemsToPoll.length>1)return;_e(()=>(this.pollItems(),this.itemsToPoll.length===0))}bindTextureToFrameBuffer(t){this.throwIfDisposed(),d1(this.gl,t,this.framebuffer),this.debug&&Am(this.gl)}unbindTextureToFrameBuffer(){this.outputTexture!=null?(d1(this.gl,this.outputTexture,this.framebuffer),this.debug&&Am(this.gl)):yI(this.gl,this.framebuffer)}downloadMatrixDriver(t,e){this.bindTextureToFrameBuffer(t);let r=e();return this.unbindTextureToFrameBuffer(),r}setOutputMatrixTextureDriver(t,e,r){this.throwIfDisposed();let o=this.gl;d1(o,t,this.framebuffer),this.debug&&Am(o),this.outputTexture=t,Rt(o,()=>o.viewport(0,0,e,r)),Rt(o,()=>o.scissor(0,0,e,r))}setOutputMatrixWriteRegionDriver(t,e,r,o){this.throwIfDisposed(),Rt(this.gl,()=>this.gl.scissor(t,e,r,o))}throwIfDisposed(){if(this.disposed)throw new Error("Attempted to use disposed GPGPUContext.")}throwIfNoProgram(){if(this.program==null)throw new Error("No GPU program is currently set.")}}function g7(n){let t=0;for(;t{let N={logicalShape:v.shape,texShape:v.isUniform?null:v.texData.texShape,isUniform:v.isUniform,isPacked:v.isUniform?!1:v.texData.isPacked,flatOffset:null};return v.texData!=null&&v.texData.slice!=null&&v.texData.slice.flatOffset>0&&(N.flatOffset=v.texData.slice.flatOffset),{name:t.variableNames[T],shapeInfo:N}}),c=s.map(v=>v.shapeInfo),l={logicalShape:r.shape,texShape:r.texData.texShape,isUniform:!1,isPacked:r.texData.isPacked,flatOffset:null},p=xX(s,l,o,t.packedInputs),f=n.createProgram(p),m=null,y=n.getUniformLocation(f,"NAN",!1);ct().getNumber("WEBGL_VERSION")===1&&(m=n.getUniformLocation(f,"INFINITY",!1));let b={};for(let v=0;v{let o=e.logicalShape,s=t[r],c=s.shape;if(!lt(o,c))throw Error(`Binary was compiled with different shapes than the current args. Shapes ${o} and ${c} must match`);if(e.isUniform&&s.isUniform)return;let l=e.texShape,p=s.isUniform?null:s.texData.texShape;if(!lt(l,p))throw Error(`Binary was compiled with different texture shapes than the current args. Shape ${l} and ${p} must match`)})}function b7(n,t,e,r,o){BI(t.inShapeInfos,e),BI([t.outShapeInfo],[r]);let s=r.texData.texture,c=r.texData.texShape;r.texData.isPacked?n.setOutputPackedMatrixTexture(s,c[0],c[1]):n.setOutputMatrixTexture(s,c[0],c[1]),n.setProgram(t.webGLProgram),ct().getNumber("WEBGL_VERSION")===1&&(t.infLoc!==null&&n.gl.uniform1f(t.infLoc,Infinity)),t.nanLoc!==null&&n.gl.uniform1f(t.nanLoc,NaN),e.forEach((l,p)=>{let f=t.program.variableNames[p],m=t.uniformLocations[f],y=t.uniformLocations[`offset${f}`];if(m==null)return;if(l.isUniform){if(G(l.shape)<2)n.gl.uniform1f(m,l.uniformValues[0]);else{let b=l.uniformValues;b instanceof Float32Array||(b=new Float32Array(b)),n.gl.uniform1fv(m,b)}return}l.texData.slice!=null&&y!=null&&n.gl.uniform1i(y,l.texData.slice.flatOffset),n.setInputMatrixTexture(l.texData.texture,m,p)}),o!=null&&o(n,t.webGLProgram),n.executeProgram()}function x7(n,t,e){let r="";t.concat(e).forEach(c=>{let l=c.texData!=null&&c.texData.slice!=null&&c.texData.slice.flatOffset>0,p=c.isUniform?"uniform":c.texData.texShape;r+=`${c.shape}_${p}_${l}`});let o=n.userCode,s=n.constructor.name;return s+="_"+r+"_"+o,s}class w7{constructor(t,e,r){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t;let{filterWidth:o,inChannels:s,strideWidth:c,strideHeight:l,padInfo:p,outWidth:f,dilationWidth:m,dilationHeight:y,dataFormat:b}=r,{left:v,top:T}=p,N=s*o,S=Jn(),D=b==="channelsLast",I=D?0:1,P=D?1:2,E="";for(let L=0;L<=1;L++)for(let B=0;B<=1;B++)E+=` + blockIndex = rc.y + ${B}; + pos = rc.x + ${L}; - if(blockIndex < ${e[1]} && pos < ${e[0]}) { - offsetY = int(blockIndex / (${h})) * ${a} - ${w}; - d0 = offsetY + ${m} * (pos / ${L}); + if(blockIndex < ${t[1]} && pos < ${t[0]}) { + offsetY = int(blockIndex / (${f})) * ${l} - ${T}; + d0 = offsetY + ${y} * (pos / ${N}); - if(d0 < ${t[N]} && d0 >= 0) { + if(d0 < ${e[I]} && d0 >= 0) { - offsetX = int(mod(float(blockIndex), ${h}.) * ${o}. - ${b}.); - d1 = offsetX + ${d} * (int(mod(float(pos), ${L}.) / ${i}.)); + offsetX = int(mod(float(blockIndex), ${f}.) * ${c}. - ${v}.); + d1 = offsetX + ${m} * (int(mod(float(pos), ${N}.) / ${s}.)); - if(d1 < ${t[O]} && d1 >= 0) { + if(d1 < ${e[P]} && d1 >= 0) { - ch = int(mod(float(pos), ${i}.)); + ch = int(mod(float(pos), ${s}.)); - if (${v}) { + if (${D}) { innerDims = vec2(d1, ch); - result[${k*2+F}] = getChannel( + result[${L*2+B}] = getChannel( getA(d0, int(innerDims.x), int(innerDims.y)), innerDims); } else { innerDims = vec2(d0, d1); - result[${k*2+F}] = getChannel( + result[${L*2+B}] = getChannel( getA(ch, int(innerDims.x), int(innerDims.y)), innerDims); } @@ -2068,9 +2068,9 @@ return (round(mod(b, 2.0)) != 1) ? ${E} - ${x.output} = result; + ${S.output} = result; } - `}}class c6{constructor(e,t,n,s,i){this.variableNames=["x"],this.outputShape=[];const o=t,a=e[3]-1;this.outputShape=e;let c;const h=`float(${n}) + float(${s}) * sum`;i===.5?c=`inversesqrt(${h})`:i===1?c=`1.0/(${h})`:c=`exp(log(${h}) * float(-${i}));`,this.userCode=` + `}}class v7{constructor(t,e,r,o,s){this.variableNames=["x"],this.outputShape=[];let c=e,l=t[3]-1;this.outputShape=t;let p,f=`float(${r}) + float(${o}) * sum`;s===.5?p=`inversesqrt(${f})`:s===1?p=`1.0/(${f})`:p=`exp(log(${f}) * float(-${s}));`,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -2079,17 +2079,17 @@ return (round(mod(b, 2.0)) != 1) ? int d = coords[3]; float x = getX(b, r, c, d); float sum = 0.0; - for (int j = -${o}; j <= ${o}; j++) { + for (int j = -${c}; j <= ${c}; j++) { int idx = d + j; - if (idx >= 0 && idx <= ${a}) { + if (idx >= 0 && idx <= ${l}) { float z = getX(b, r, c, idx); sum += z * z; } } - float val = x * ${c}; + float val = x * ${p}; setOutput(val); } - `}}class l6{constructor(e,t,n,s,i){this.variableNames=["inputImage","outputImage","dy"],this.outputShape=[],this.outputShape=e,this.depth=e[3],this.depthRadius=t,this.bias=n,this.alpha=s,this.beta=i,this.userCode=` + `}}class T7{constructor(t,e,r,o,s){this.variableNames=["inputImage","outputImage","dy"],this.outputShape=[],this.outputShape=t,this.depth=t[3],this.depthRadius=e,this.bias=r,this.alpha=o,this.beta=s,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -2098,9 +2098,9 @@ return (round(mod(b, 2.0)) != 1) ? float result = 0.0; for (int d = 0; d < ${this.depth}; ++d) { - int depthBegin = int(max(0.0, float(d - ${t}))); + int depthBegin = int(max(0.0, float(d - ${e}))); int depthEnd = int(min(float(${this.depth}), - float(d + ${t} + 1))); + float(d + ${e} + 1))); const int MIN_DEPTH_BEGIN = 0; const int MAX_DEPTH_END = ${this.depth}; @@ -2118,19 +2118,19 @@ return (round(mod(b, 2.0)) != 1) ? } } - norm = float(${s}) * norm + float(${n}); + norm = float(${o}) * norm + float(${r}); for(int k = MIN_DEPTH_BEGIN; k < MAX_DEPTH_END; ++k){ if (k < depthBegin){ continue; } else if (k >= depthBegin && k < depthEnd){ - float dyi = -2.0 * float(${s}) - * float(${i}) + float dyi = -2.0 * float(${o}) + * float(${s}) * getInputImage(b ,r ,c, k) * getOutputImage(b, r, c, d) / norm; if (k == d) { - dyi += pow(norm, -1.0 * ${i}); + dyi += pow(norm, -1.0 * ${s}); } if (k == coords[3]) { dyi *= getDy(b, r, c, d); @@ -2144,7 +2144,7 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(result); } - `}}class h6{constructor(e,t,n,s,i){this.variableNames=["x"],this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0;const o=t,a=e[3]-1;this.outputShape=e;let c;const h=`float(${n}) + float(${s}) * sum`;i===.5?c=`inversesqrt(${h})`:i===1?c=`1.0/(${h})`:c=`exp(log(${h}) * float(-${i}));`,this.userCode=` + `}}class k7{constructor(t,e,r,o,s){this.variableNames=["x"],this.outputShape=[],this.packedInputs=!0,this.packedOutput=!0;let c=e,l=t[3]-1;this.outputShape=t;let p,f=`float(${r}) + float(${o}) * sum`;s===.5?p=`inversesqrt(${f})`:s===1?p=`1.0/(${f})`:p=`exp(log(${f}) * float(-${s}));`,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords.x; @@ -2168,7 +2168,7 @@ return (round(mod(b, 2.0)) != 1) ? getChannel(xFragAtOutputCoords, vec2(c + 1, d + 1)) : 0.0 ); - int firstChannel = d - ${o}; + int firstChannel = d - ${c}; vec2 cache = vec2(0.); if(firstChannel >= 0){ vec4 firstChannelFrag = getX(b, r, c, firstChannel); @@ -2179,10 +2179,10 @@ return (round(mod(b, 2.0)) != 1) ? } ivec2 depth = ivec2(d, d + 1); - for (int j = - ${o}; j <= ${o}; j++) { + for (int j = - ${c}; j <= ${c}; j++) { ivec2 idx = depth + j; bvec2 aboveLowerBound = greaterThanEqual(idx, ivec2(0)); - bvec2 belowUpperBound = lessThanEqual(idx, ivec2(${a})); + bvec2 belowUpperBound = lessThanEqual(idx, ivec2(${l})); bool depthInRange = aboveLowerBound.x && belowUpperBound.x; bool depthPlusOneInRange = aboveLowerBound.y && belowUpperBound.y; @@ -2203,11 +2203,11 @@ return (round(mod(b, 2.0)) != 1) ? sum += z * z; } } - vec4 result = xAtOutputCoords * ${c}; + vec4 result = xAtOutputCoords * ${p}; setOutput(result); } - `}}class u6{constructor(e){this.variableNames=["dy","maxPos"],this.outputShape=e.inShape;const t=e.strideHeight,n=e.strideWidth,s=e.dilationHeight,i=e.effectiveFilterHeight,o=e.effectiveFilterWidth,a=i-1-e.padInfo.top,c=o-1-e.padInfo.left,h=i*o-1;this.userCode=` - const ivec2 pads = ivec2(${a}, ${c}); + `}}class N7{constructor(t){this.variableNames=["dy","maxPos"],this.outputShape=t.inShape;let e=t.strideHeight,r=t.strideWidth,o=t.dilationHeight,s=t.effectiveFilterHeight,c=t.effectiveFilterWidth,l=s-1-t.padInfo.top,p=c-1-t.padInfo.left,f=s*c-1;this.userCode=` + const ivec2 pads = ivec2(${l}, ${p}); void main() { ivec4 coords = getOutputCoords(); @@ -2221,30 +2221,30 @@ return (round(mod(b, 2.0)) != 1) ? // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d). // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; - for (int wR = 0; wR < ${i}; - wR += ${s}) { - float dyR = float(dyRCorner + wR) / ${t}.0; + for (int wR = 0; wR < ${s}; + wR += ${o}) { + float dyR = float(dyRCorner + wR) / ${e}.0; - if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || fract(dyR) > 0.0) { + if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); - for (int wC = 0; wC < ${o}; wC++) { - float dyC = float(dyCCorner + wC) / ${n}.0; + for (int wC = 0; wC < ${c}; wC++) { + float dyC = float(dyCCorner + wC) / ${r}.0; - if (dyC < 0.0 || dyC >= ${e.outWidth}.0 || + if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); float dyValue = getDy(b, idyR, idyC, d); - int maxPosValue = ${h} - int(getMaxPos(b, idyR, idyC, d)); + int maxPosValue = ${f} - int(getMaxPos(b, idyR, idyC, d)); // Get the current value, check it against the value from the // position matrix. - int curPosValue = wR * ${o} + wC; + int curPosValue = wR * ${c} + wC; float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0); dotProd += dyValue * mask; @@ -2252,8 +2252,8 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class d6{constructor(e){this.variableNames=["dy","maxPos"],this.outputShape=e.inShape;const t=e.strideDepth,n=e.strideHeight,s=e.strideWidth,i=e.dilationDepth,o=e.dilationHeight,a=e.dilationWidth,c=e.effectiveFilterDepth,h=e.effectiveFilterHeight,d=e.effectiveFilterWidth,m=c-1-e.padInfo.front,f=h-1-e.padInfo.top,b=d-1-e.padInfo.left,w=c*h*d-1;this.userCode=` - const ivec3 pads = ivec3(${m}, ${f}, ${b}); + `}}class _7{constructor(t){this.variableNames=["dy","maxPos"],this.outputShape=t.inShape;let e=t.strideDepth,r=t.strideHeight,o=t.strideWidth,s=t.dilationDepth,c=t.dilationHeight,l=t.dilationWidth,p=t.effectiveFilterDepth,f=t.effectiveFilterHeight,m=t.effectiveFilterWidth,y=p-1-t.padInfo.front,b=f-1-t.padInfo.top,v=m-1-t.padInfo.left,T=p*f*m-1;this.userCode=` + const ivec3 pads = ivec3(${y}, ${b}, ${v}); void main() { ivec5 coords = getOutputCoords(); @@ -2270,44 +2270,44 @@ return (round(mod(b, 2.0)) != 1) ? // ? = to be determined. : = across all values in that axis. float dotProd = 0.0; - for (int wD = 0; wD < ${c}; - wD += ${i}) { - float dyD = float(dyDCorner + wD) / ${t}.0; + for (int wD = 0; wD < ${p}; + wD += ${s}) { + float dyD = float(dyDCorner + wD) / ${e}.0; - if (dyD < 0.0 || dyD >= ${e.outDepth}.0 || fract(dyD) > 0.0) { + if (dyD < 0.0 || dyD >= ${t.outDepth}.0 || fract(dyD) > 0.0) { continue; } int idyD = int(dyD); - for (int wR = 0; wR < ${h}; - wR += ${o}) { - float dyR = float(dyRCorner + wR) / ${n}.0; + for (int wR = 0; wR < ${f}; + wR += ${c}) { + float dyR = float(dyRCorner + wR) / ${r}.0; - if (dyR < 0.0 || dyR >= ${e.outHeight}.0 || + if (dyR < 0.0 || dyR >= ${t.outHeight}.0 || fract(dyR) > 0.0) { continue; } int idyR = int(dyR); - for (int wC = 0; wC < ${d}; - wC += ${a}) { - float dyC = float(dyCCorner + wC) / ${s}.0; + for (int wC = 0; wC < ${m}; + wC += ${l}) { + float dyC = float(dyCCorner + wC) / ${o}.0; - if (dyC < 0.0 || dyC >= ${e.outWidth}.0 || + if (dyC < 0.0 || dyC >= ${t.outWidth}.0 || fract(dyC) > 0.0) { continue; } int idyC = int(dyC); float dyValue = getDy(batch, idyD, idyR, idyC, ch); - int maxPosValue = ${w} - + int maxPosValue = ${T} - int(getMaxPos(batch, idyD, idyR, idyC, ch)); // Get the current value, check it against the value from the // position matrix. int curPosValue = - wD * ${h} * ${d} + - wR * ${d} + wC; + wD * ${f} * ${m} + + wR * ${m} + wC; float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0); dotProd += dyValue * mask; @@ -2316,28 +2316,28 @@ return (round(mod(b, 2.0)) != 1) ? } setOutput(dotProd); } - `}}class xS{constructor(e,t,n,s=!1,i=!1,o=!1,a=null,c=!1){this.variableNames=["matrixA","matrixB"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=n;const h=s?e[1]:e[2],d=Math.ceil(h/2),m=s?"i * 2, rc.y":"rc.y, i * 2",f=i?"rc.z, i * 2":"i * 2, rc.z",b=s?["a.xxyy","a.zzww"]:["a.xxzz","a.yyww"],w=i?["b.xzxz","b.ywyw"]:["b.xyxy","b.zwzw"];let L="",x="";a&&(c?L=`vec4 activation(vec4 a) { + `}}class x1{constructor(t,e,r,o=!1,s=!1,c=!1,l=null,p=!1){this.variableNames=["matrixA","matrixB"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=r;let f=o?t[1]:t[2],m=Math.ceil(f/2),y=o?"i * 2, rc.y":"rc.y, i * 2",b=s?"rc.z, i * 2":"i * 2, rc.z",v=o?["a.xxyy","a.zzww"]:["a.xxzz","a.yyww"],T=s?["b.xzxz","b.ywyw"]:["b.xyxy","b.zwzw"],N="",S="";l&&(p?N=`vec4 activation(vec4 a) { vec4 b = getPreluActivationWeightsAtOutCoords(); - ${a} - }`:L=`vec4 activation(vec4 x) { - ${a} - }`,x="result = activation(result);");const v=o?"result += getBiasAtOutCoords();":"";o&&this.variableNames.push("bias"),c&&this.variableNames.push("preluActivationWeights");let N="rc.x",O="rc.x";e[0]{this.seedLoc==null&&(this.seedLoc=t.getUniformLocation(n,"seed")),t.gl.uniform1f(this.seedLoc,e)}}}class m6{constructor(e,t,n,s){this.variableNames=["indices"],this.outputShape=[e,t],this.userCode=` + `}getCustomSetupFunc(t){return(e,r)=>{this.seedLoc==null&&(this.seedLoc=e.getUniformLocation(r,"seed")),e.gl.uniform1f(this.seedLoc,t)}}}class S7{constructor(t,e,r,o){this.variableNames=["indices"],this.outputShape=[t,e],this.userCode=` void main() { ivec2 coords = getOutputCoords(); int index = round(getIndices(coords.x)); - setOutput(mix(float(${s}), float(${n}), + setOutput(mix(float(${o}), float(${r}), float(index == coords.y))); } - `}}class f6{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outputShape=e;const t=e.length;if(t===0)this.userCode=` + `}}class $7{constructor(t){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0,this.outputShape=t;let e=t.length;if(e===0)this.userCode=` void main() { setOutput(vec4(getA(), 0., 0., 0.)); } - `;else{const n=Mn("rc",t),s=Rt(t),i=y6(t,e,n),o=b6(t,e[e.length-1],e[e.length-2],n),a=w6(e,n);this.userCode=` + `;else{let r=Yn("rc",e),o=Oe(e),s=E7(e,t,r),c=D7(e,t[t.length-1],t[t.length-2],r),l=A7(t,r);this.userCode=` void main() { - ${s} rc = getOutputCoords(); + ${o} rc = getOutputCoords(); - if(${i}) { + if(${s}) { setOutput(vec4(0)); } else { - ${o} + ${c} - setOutput(vec4(${a})); + setOutput(vec4(${l})); } } - `}}}function g6(e,t){const n=[];for(let s=0;s<=1;s++)for(let i=0;i<=1;i++){let o=`${s===0?"r":"rp1"}, ${i===0?"c":"cp1"}`;for(let a=2;a ${t[0]}`;let s="";for(let i=e-2;i= ${t[i]}`,i ${t[0]}`;let r="";for(let o=n-2;o= ${t[o]}`,o= ${t}; - bool rEdge = rp1 >= ${n}; - `}function w6(e,t){const n=e.length,s=g6(n,t);return n===1?`getA(rc), - rc + 1 >= ${e[0]} ? 0. : getA(rc + 1), - 0, 0`:`getA(${s[0]}), - cEdge ? 0. : getA(${s[1]}), - rEdge ? 0. : getA(${s[2]}), - rEdge || cEdge ? 0. : getA(${s[3]})`}class L6{constructor(e,t,n){this.variableNames=["x"],this.outputShape=t.map((h,d)=>h[0]+e[d]+h[1]);const s=e.length,i=Rt(s),o=t.map(h=>h[0]).join(","),a=t.map((h,d)=>h[0]+e[d]).join(","),c=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,s);if(s===1){this.userCode=` - int start = ${o}; - int end = ${a}; + bool rEdge = rp1 >= ${e}; + `}function A7(n,t){let e=n.length,r=I7(e,t);return e===1?`getA(rc), + rc + 1 >= ${n[0]} ? 0. : getA(rc + 1), + 0, 0`:`getA(${r[0]}), + cEdge ? 0. : getA(${r[1]}), + rEdge ? 0. : getA(${r[2]}), + rEdge || cEdge ? 0. : getA(${r[3]})`}class F7{constructor(t,e,r){this.variableNames=["x"],this.outputShape=e.map((f,m)=>f[0]+t[m]+f[1]);let o=t.length,s=Oe(o),c=e.map(f=>f[0]).join(","),l=e.map((f,m)=>f[0]+t[m]).join(","),p=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,o);if(o===1){this.userCode=` + int start = ${c}; + int end = ${l}; void main() { int outC = getOutputCoords(); if (outC < start || outC >= end) { - setOutput(float(${n})); + setOutput(float(${r})); } else { setOutput(getX(outC - start)); } } `;return}this.userCode=` - ${i} start = ${i}(${o}); - ${i} end = ${i}(${a}); + ${s} start = ${s}(${c}); + ${s} end = ${s}(${l}); void main() { - ${i} outC = getOutputCoords(); + ${s} outC = getOutputCoords(); if (any(lessThan(outC, start)) || any(greaterThanEqual(outC, end))) { - setOutput(float(${n})); + setOutput(float(${r})); } else { - ${i} coords = outC - start; - setOutput(getX(${c})); + ${s} coords = outC - start; + setOutput(getX(${p})); } } - `}}class S6{constructor(e,t,n){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t.map((L,x)=>L[0]+e[x]+L[1]);const s=e.length,i=Rt(s),o=t.map(L=>L[0]).join(","),a=t.map((L,x)=>L[0]+e[x]).join(","),c=Mn("rc",s),h=Mn("source",s),d=`${c[s-1]} < ${this.outputShape[s-1]}`,m=s===1?"source":`vec2(${h.slice(-2).join()})`,f=[`${i} rc = outputLoc;`,`${c[s-1]} += 1; - if(${d}) { - `,s===1?"":`} + `}}class R7{constructor(t,e,r){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e.map((N,S)=>N[0]+t[S]+N[1]);let o=t.length,s=Oe(o),c=e.map(N=>N[0]).join(","),l=e.map((N,S)=>N[0]+t[S]).join(","),p=Yn("rc",o),f=Yn("source",o),m=`${p[o-1]} < ${this.outputShape[o-1]}`,y=o===1?"source":`vec2(${f.slice(-2).join()})`,b=[`${s} rc = outputLoc;`,`${p[o-1]} += 1; + if(${m}) { + `,o===1?"":`} rc = outputLoc; - ${c[s-2]} += 1; - if(${c[s-2]} < ${this.outputShape[s-2]}) {`,s===1?"":` ${c[s-1]} += 1; - if(${d}) {`],b=s===1?"rc < start || rc >= end":"any(lessThan(rc, start)) || any(greaterThanEqual(rc, end))";let w="";for(let L=0,x=s===1?2:4;L= end":"any(lessThan(rc, start)) || any(greaterThanEqual(rc, end))",T="";for(let N=0,S=o===1?2:4;N= ${e.inHeight}) { + if (xR < 0 || xR >= ${t.inHeight}) { continue; } - for (int wC = 0; wC < ${f}; - wC += ${d}) { + for (int wC = 0; wC < ${b}; + wC += ${m}) { int xC = xCCorner + wC; - if (xC < 0 || xC >= ${e.inWidth}) { + if (xC < 0 || xC >= ${t.inWidth}) { continue; } @@ -2501,31 +2501,31 @@ return (round(mod(b, 2.0)) != 1) ? // use the current value. float currMinMaxValue = mix( value, minMaxValue, minMaxValueFound); - if (value ${$} currMinMaxValue) { + if (value ${H} currMinMaxValue) { minMaxValue = value; minMaxValueFound = 1.0; - minMaxPosition = ${s?i?x:v:`wR * ${f} + wC`}; + minMaxPosition = ${o?s?S:D:`wR * ${b} + wC`}; } } } setOutput(float(minMaxPosition)); } - `;return}const O="max";let E=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;t==="avg"&&(E="avgValue / count");const k=Math.floor(o/4)*4,F=o%4,U=` - if (${L}) { + `;return}let P="max",E=`${e}(${e}(${e}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;e==="avg"&&(E="avgValue / count");let L=Math.floor(c/4)*4,B=c%4,q=` + if (${N}) { avgValue += dot(values, ones); } else { - minMaxValue = ${O}(values, minMaxValue); + minMaxValue = ${P}(values, minMaxValue); } `;this.userCode=` - const ivec2 strides = ivec2(${a}, ${c}); - const ivec2 pads = ivec2(${b}, ${w}); - const float initializationValue = ${N}; + const ivec2 strides = ivec2(${l}, ${p}); + const ivec2 pads = ivec2(${v}, ${T}); + const float initializationValue = ${I}; const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); float count = 0.0; float getValue(int batch, int xR, int xC, int d) { - if (xC < 0 || xC >= ${e.inWidth}) { + if (xC < 0 || xC >= ${t.inWidth}) { return initializationValue; } count += 1.0; @@ -2543,33 +2543,33 @@ return (round(mod(b, 2.0)) != 1) ? // max/min x(?, ?, d) to get y(yR, yC, d). // ? = to be determined - vec4 minMaxValue = vec4(${N}); + vec4 minMaxValue = vec4(${I}); float avgValue = 0.0; count = 0.0; - for (int wR = 0; wR < ${m}; - wR += ${h}) { + for (int wR = 0; wR < ${y}; + wR += ${f}) { int xR = xRCorner + wR; - if (xR < 0 || xR >= ${e.inHeight}) { + if (xR < 0 || xR >= ${t.inHeight}) { continue; } - for (int wC = 0; wC < ${k}; wC += 4) { - int xC = xCCorner + wC * ${d}; + for (int wC = 0; wC < ${L}; wC += 4) { + int xC = xCCorner + wC * ${m}; vec4 values = vec4( getValue(batch, xR, xC, d), - getValue(batch, xR, xC + ${d}, d), - getValue(batch, xR, xC + 2 * ${d}, d), - getValue(batch, xR, xC + 3 * ${d}, d) + getValue(batch, xR, xC + ${m}, d), + getValue(batch, xR, xC + 2 * ${m}, d), + getValue(batch, xR, xC + 3 * ${m}, d) ); - ${U} + ${q} } - int xC = xCCorner + ${k}; - if (${F===1}) { + int xC = xCCorner + ${L}; + if (${B===1}) { vec4 values = vec4( getValue(batch, xR, xC, d), initializationValue, @@ -2577,33 +2577,33 @@ return (round(mod(b, 2.0)) != 1) ? initializationValue ); - ${U} - } else if (${F===2}) { + ${q} + } else if (${B===2}) { vec4 values = vec4( getValue(batch, xR, xC, d), - getValue(batch, xR, xC + ${d}, d), + getValue(batch, xR, xC + ${m}, d), initializationValue, initializationValue ); - ${U} - } else if (${F===3}) { + ${q} + } else if (${B===3}) { vec4 values = vec4( getValue(batch, xR, xC, d), - getValue(batch, xR, xC + ${d}, d), - getValue(batch, xR, xC + 2 * ${d}, d), + getValue(batch, xR, xC + ${m}, d), + getValue(batch, xR, xC + 2 * ${m}, d), initializationValue ); - ${U} + ${q} } } setOutput(${E}); } - `}}class TS{constructor(e,t,n,s=!1,i=!1){if(this.variableNames=["x"],t==="avg"&&n)throw new Error("Cannot compute positions for average pool.");const o=e.filterWidth,a=e.strideDepth,c=e.strideHeight,h=e.strideWidth,d=e.dilationDepth,m=e.dilationHeight,f=e.dilationWidth,b=e.effectiveFilterDepth,w=e.effectiveFilterHeight,L=e.effectiveFilterWidth,x=e.padInfo.front,v=e.padInfo.top,N=e.padInfo.left;this.outputShape=e.outShape;const O=t==="avg";let E="0.0";if(O||(E="-1.0 / 1e-20"),n){const j=">=";this.userCode=` + `}}class w1{constructor(t,e,r,o=!1,s=!1){if(this.variableNames=["x"],e==="avg"&&r)throw new Error("Cannot compute positions for average pool.");let c=t.filterWidth,l=t.strideDepth,p=t.strideHeight,f=t.strideWidth,m=t.dilationDepth,y=t.dilationHeight,b=t.dilationWidth,v=t.effectiveFilterDepth,T=t.effectiveFilterHeight,N=t.effectiveFilterWidth,S=t.padInfo.front,D=t.padInfo.top,I=t.padInfo.left;this.outputShape=t.outShape;let P=e==="avg",E="0.0";if(P||(E="-1.0 / 1e-20"),r){let J=">=";this.userCode=` const ivec3 strides = - ivec3(${a}, ${c}, ${h}); - const ivec3 pads = ivec3(${x}, ${v}, ${N}); + ivec3(${l}, ${p}, ${f}); + const ivec3 pads = ivec3(${S}, ${D}, ${I}); void main() { ivec5 coords = getOutputCoords(); @@ -2621,27 +2621,27 @@ return (round(mod(b, 2.0)) != 1) ? float minMaxValueFound = 0.0; int minMaxPosition = 0; - for (int wD = 0; wD < ${b}; - wD += ${d}) { + for (int wD = 0; wD < ${v}; + wD += ${m}) { int xD = xDCorner + wD; - if (xD < 0 || xD >= ${e.inDepth}) { + if (xD < 0 || xD >= ${t.inDepth}) { continue; } - for (int wR = 0; wR < ${w}; - wR += ${m}) { + for (int wR = 0; wR < ${T}; + wR += ${y}) { int xR = xRCorner + wR; - if (xR < 0 || xR >= ${e.inHeight}) { + if (xR < 0 || xR >= ${t.inHeight}) { continue; } - for (int wC = 0; wC < ${L}; - wC += ${f}) { + for (int wC = 0; wC < ${N}; + wC += ${b}) { int xC = xCCorner + wC; - if (xC < 0 || xC >= ${e.inWidth}) { + if (xC < 0 || xC >= ${t.inWidth}) { continue; } @@ -2651,34 +2651,34 @@ return (round(mod(b, 2.0)) != 1) ? // use the current value. float currMinMaxValue = mix( value, minMaxValue, minMaxValueFound); - if (value ${j} currMinMaxValue) { + if (value ${J} currMinMaxValue) { minMaxValue = value; minMaxValueFound = 1.0; - minMaxPosition = ${s?i?`(((batch * ${e.inDepth} + xD) * ${e.inHeight} + xR) * ${e.inWidth} + xC) * ${e.inChannels} + ch`:`((xD * ${e.inHeight} + xR) * ${e.inWidth} + xC) * ${e.inChannels} + ch`:`wD * ${w} * ${L} + - wR * ${L} + wC`}; + minMaxPosition = ${o?s?`(((batch * ${t.inDepth} + xD) * ${t.inHeight} + xR) * ${t.inWidth} + xC) * ${t.inChannels} + ch`:`((xD * ${t.inHeight} + xR) * ${t.inWidth} + xC) * ${t.inChannels} + ch`:`wD * ${T} * ${N} + + wR * ${N} + wC`}; } } } } setOutput(float(minMaxPosition)); } - `;return}const k="max";let F=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;t==="avg"&&(F="avgValue / count");const U=Math.floor(o/4)*4,$=o%4,Y=` - if (${O}) { + `;return}let L="max",B=`${e}(${e}(${e}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;e==="avg"&&(B="avgValue / count");let q=Math.floor(c/4)*4,H=c%4,Z=` + if (${P}) { avgValue += dot(values, ones); } else { - minMaxValue = ${k}(values, minMaxValue); + minMaxValue = ${L}(values, minMaxValue); } `;this.userCode=` const ivec3 strides = - ivec3(${a}, ${c}, ${h}); - const ivec3 pads = ivec3(${x}, ${v}, ${N}); + ivec3(${l}, ${p}, ${f}); + const ivec3 pads = ivec3(${S}, ${D}, ${I}); const float initializationValue = ${E}; const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); float count = 0.0; float getValue(int batch, int xD, int xR, int xC, int ch) { - if (xC < 0 || xC >= ${e.inWidth}) { + if (xC < 0 || xC >= ${t.inWidth}) { return initializationValue; } count += 1.0; @@ -2701,37 +2701,37 @@ return (round(mod(b, 2.0)) != 1) ? float avgValue = 0.0; count = 0.0; - for (int wD = 0; wD < ${b}; - wD += ${d}) { + for (int wD = 0; wD < ${v}; + wD += ${m}) { int xD = xDCorner + wD; - if (xD < 0 || xD >= ${e.inDepth}) { + if (xD < 0 || xD >= ${t.inDepth}) { continue; } - for (int wR = 0; wR < ${w}; - wR += ${m}) { + for (int wR = 0; wR < ${T}; + wR += ${y}) { int xR = xRCorner + wR; - if (xR < 0 || xR >= ${e.inHeight}) { + if (xR < 0 || xR >= ${t.inHeight}) { continue; } - for (int wC = 0; wC < ${U}; wC += 4) { - int xC = xCCorner + wC * ${f}; + for (int wC = 0; wC < ${q}; wC += 4) { + int xC = xCCorner + wC * ${b}; vec4 values = vec4( getValue(batch, xD, xR, xC, ch), - getValue(batch, xD, xR, xC + ${f}, ch), - getValue(batch, xD, xR, xC + 2 * ${f}, ch), - getValue(batch, xD, xR, xC + 3 * ${f}, ch) + getValue(batch, xD, xR, xC + ${b}, ch), + getValue(batch, xD, xR, xC + 2 * ${b}, ch), + getValue(batch, xD, xR, xC + 3 * ${b}, ch) ); - ${Y} + ${Z} } - int xC = xCCorner + ${U}; - if (${$===1}) { + int xC = xCCorner + ${q}; + if (${H===1}) { vec4 values = vec4( getValue(batch, xD, xR, xC, ch), initializationValue, @@ -2739,57 +2739,57 @@ return (round(mod(b, 2.0)) != 1) ? initializationValue ); - ${Y} - } else if (${$===2}) { + ${Z} + } else if (${H===2}) { vec4 values = vec4( getValue(batch, xD, xR, xC, ch), - getValue(batch, xD, xR, xC + ${f}, ch), + getValue(batch, xD, xR, xC + ${b}, ch), initializationValue, initializationValue ); - ${Y} - } else if (${$===3}) { + ${Z} + } else if (${H===3}) { vec4 values = vec4( getValue(batch, xD, xR, xC, ch), - getValue(batch, xD, xR, xC + ${f}, ch), - getValue(batch, xD, xR, xC + 2 * ${f}, ch), + getValue(batch, xD, xR, xC + ${b}, ch), + getValue(batch, xD, xR, xC + 2 * ${b}, ch), initializationValue ); - ${Y} + ${Z} } } - setOutput(${F}); + setOutput(${B}); } } - `}}class yC{constructor(e,t){this.variableNames=["x"];const{windowSize:n,batchSize:s,inSize:i,outSize:o}=e;this.outputShape=[s,o];let a="0.0",c="";t==="prod"?a="1.0":t==="min"?(a="1.0 / 1e-20",c="min"):t==="max"&&(a="-1.0 / 1e-20",c="max");let h=`${t}(${t}(${t}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;t==="sum"?h="sumValue":t==="prod"?h="prodValue":t==="all"?h="allValue":t==="any"&&(h="anyValue");const d=Math.floor(n/4)*4,m=n%4;let f=` - if (${t==="sum"}) { + `}}class zI{constructor(t,e){this.variableNames=["x"];let{windowSize:r,batchSize:o,inSize:s,outSize:c}=t;this.outputShape=[o,c];let l="0.0",p="";e==="prod"?l="1.0":e==="min"?(l="1.0 / 1e-20",p="min"):e==="max"&&(l="-1.0 / 1e-20",p="max");let f=`${e}(${e}(${e}(minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])`;e==="sum"?f="sumValue":e==="prod"?f="prodValue":e==="all"?f="allValue":e==="any"&&(f="anyValue");let m=Math.floor(r/4)*4,y=r%4,b=` + if (${e==="sum"}) { sumValue += dot(values, ones); - } else if (${t==="prod"}) { + } else if (${e==="prod"}) { vec2 tmp = vec2(values[0], values[1]) * vec2(values[2], values[3]); prodValue *= tmp[0] * tmp[1]; } else { - minMaxValue = ${c}(values, minMaxValue); + minMaxValue = ${p}(values, minMaxValue); } - `,b="vec4";t==="all"?(a="1.0",f=` + `,v="vec4";e==="all"?(l="1.0",b=` bool reducedAllValue = all(values); float floatedReducedAllValue = float(reducedAllValue); allValue = float(allValue >= 1.0 && floatedReducedAllValue >= 1.0); - `,b="bvec4"):t==="any"&&(a="0.0",f=` + `,v="bvec4"):e==="any"&&(l="0.0",b=` bool reducedAnyValue = any(values); float floatedReducedAnyValue = float(reducedAnyValue); anyValue = float(anyValue >= 1.0 || floatedReducedAnyValue >= 1.0); - `,b="bvec4");let w="";i%n>0&&(w=` - if (inIdx < 0 || inIdx >= ${i}) { + `,v="bvec4");let T="";s%r>0&&(T=` + if (inIdx < 0 || inIdx >= ${s}) { return initializationValue; } `),this.userCode=` - const float initializationValue = ${a}; + const float initializationValue = ${l}; const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); float getValue(int batch, int inIdx) { - ${w} + ${T} return getX(batch, inIdx); } @@ -2797,71 +2797,71 @@ return (round(mod(b, 2.0)) != 1) ? ivec2 coords = getOutputCoords(); int batch = coords[0]; int outIdx = coords[1]; - int inOffset = outIdx * ${n}; + int inOffset = outIdx * ${r}; - vec4 minMaxValue = vec4(${a}); + vec4 minMaxValue = vec4(${l}); float prodValue = 1.0; float sumValue = 0.0; float allValue = 1.0; float anyValue = 0.0; - for (int i = 0; i < ${d}; i += 4) { + for (int i = 0; i < ${m}; i += 4) { int inIdx = inOffset + i; - ${b} values = ${b}( + ${v} values = ${v}( getValue(batch, inIdx), getValue(batch, inIdx + 1), getValue(batch, inIdx + 2), getValue(batch, inIdx + 3) ); - ${f} + ${b} } - int inIdx = inOffset + ${d}; - if (${m===1}) { - ${b} values = ${b}( + int inIdx = inOffset + ${m}; + if (${y===1}) { + ${v} values = ${v}( getValue(batch, inIdx), initializationValue, initializationValue, initializationValue ); - ${f} - } else if (${m===2}) { - ${b} values = ${b}( + ${b} + } else if (${y===2}) { + ${v} values = ${v}( getValue(batch, inIdx), getValue(batch, inIdx + 1), initializationValue, initializationValue ); - ${f} - } else if (${m===3}) { - ${b} values = ${b}( + ${b} + } else if (${y===3}) { + ${v} values = ${v}( getValue(batch, inIdx), getValue(batch, inIdx + 1), getValue(batch, inIdx + 2), initializationValue ); - ${f} + ${b} } - setOutput(${h}); + setOutput(${f}); } - `}}class bC{constructor(e,t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e;let n="";for(let s=0;s<4;s++){let i="thisRC = rc;";s%2===1&&(i+="thisRC.z += 1;"),s>1&&(i+="thisRC.y += 1;"),n+=` - ${i} - ${s>0?"if(thisRC.y < rows && thisRC.z < cols){":""} + `}}class WI{constructor(t,e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t;let r="";for(let o=0;o<4;o++){let s="thisRC = rc;";o%2===1&&(s+="thisRC.z += 1;"),o>1&&(s+="thisRC.y += 1;"),r+=` + ${s} + ${o>0?"if(thisRC.y < rows && thisRC.z < cols){":""} int flatIndex = getFlatIndex(thisRC); ivec3 inputRC = inputCoordsFromReshapedOutCoords(flatIndex); vec2 inputRCInnerDims = vec2(float(inputRC.y),float(inputRC.z)); - result[${s}] = + result[${o}] = getChannel(getA(inputRC.x, inputRC.y, inputRC.z), inputRCInnerDims); - ${s>0?"}":""} + ${o>0?"}":""} `}this.userCode=` - ${I6(t)} - ${IS(e)} + ${P7(e)} + ${b1(t)} void main() { ivec3 rc = getOutputCoords(); @@ -2869,19 +2869,19 @@ return (round(mod(b, 2.0)) != 1) ? vec4 result = vec4(0.); ivec3 thisRC; - int rows = ${e[1]}; - int cols = ${e[2]}; + int rows = ${t[1]}; + int cols = ${t[2]}; - ${n} + ${r} setOutput(result); } - `}}function I6(e){const t=Yo(["r","c","d"],e);return` + `}}function P7(n){let t=xa(["r","c","d"],n);return` ivec3 inputCoordsFromReshapedOutCoords(int index) { ${t} return ivec3(r, c, d); } - `}class x6{constructor(e,t,n){this.variableNames=["dy"],this.outputShape=[],this.outputShape=t.shape;const[,s,i]=t.shape,[,o,a]=e.shape,c=[n&&o>1?s-1:s,n&&a>1?i-1:i],h=[n&&o>1?o-1:o,n&&a>1?a-1:a],d=c[0]/h[0],m=c[1]/h[1],f=1/d,b=1/m,w=Math.ceil(f)*2+2,L=Math.ceil(b)*2+2;this.userCode=` + `}class O7{constructor(t,e,r){this.variableNames=["dy"],this.outputShape=[],this.outputShape=e.shape;let[,o,s]=e.shape,[,c,l]=t.shape,p=[r&&c>1?o-1:o,r&&l>1?s-1:s],f=[r&&c>1?c-1:c,r&&l>1?l-1:l],m=p[0]/f[0],y=p[1]/f[1],b=1/m,v=1/y,T=Math.ceil(b)*2+2,N=Math.ceil(v)*2+2;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -2891,14 +2891,14 @@ return (round(mod(b, 2.0)) != 1) ? float accumulator = 0.0; - const float heightScale = float(${d}); - const float widthScale = float(${m}); + const float heightScale = float(${m}); + const float widthScale = float(${y}); - const float invHeightScale = float(${f}); - const float invWidthScale = float(${b}); + const float invHeightScale = float(${b}); + const float invWidthScale = float(${v}); - const int winHeight = int(${w}); - const int winWidth = int(${L}); + const int winHeight = int(${T}); + const int winWidth = int(${N}); // Compute bounds for where in dy we will look float startRLerp = floor(float(r) * invHeightScale); @@ -2912,7 +2912,7 @@ return (round(mod(b, 2.0)) != 1) ? int dyR = dyROffset + startDyR; // Guard against the window exceeding the bounds of dy - if (dyR < 0 || dyR >= ${o}) { + if (dyR < 0 || dyR >= ${c}) { continue; } @@ -2920,19 +2920,19 @@ return (round(mod(b, 2.0)) != 1) ? int dyC = dyCOffset + startDyC; // Guard against the window exceeding the bounds of dy - if (dyC < 0 || dyC >= ${a}) { + if (dyC < 0 || dyC >= ${l}) { continue; } float dxR = float(dyR) * heightScale; int topDxRIndex = int(floor(dxR)); - int bottomDxRIndex = int(min(ceil(dxR), ${s-1}.0)); + int bottomDxRIndex = int(min(ceil(dxR), ${o-1}.0)); float dxRLerp = dxR - float(topDxRIndex); float inverseDxRLerp = 1.0 - dxRLerp; float dxC = float(dyC) * widthScale; int leftDxCIndex = int(floor(dxC)); - int rightDxCIndex = int(min(ceil(dxC), ${i-1}.0)); + int rightDxCIndex = int(min(ceil(dxC), ${s-1}.0)); float dxCLerp = dxC - float(leftDxCIndex); float inverseDxCLerp = 1.0 - dxCLerp; @@ -2962,11 +2962,11 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(accumulator); } - `}}class T6{constructor(e,t,n,s){this.variableNames=["A"],this.outputShape=[];const[i,o,a,c]=e;this.outputShape=[i,t,n,c];const h=[s&&t>1?o-1:o,s&&n>1?a-1:a],d=[s&&t>1?t-1:t,s&&n>1?n-1:n];this.userCode=` + `}}class L7{constructor(t,e,r,o){this.variableNames=["A"],this.outputShape=[];let[s,c,l,p]=t;this.outputShape=[s,e,r,p];let f=[o&&e>1?c-1:c,o&&r>1?l-1:l],m=[o&&e>1?e-1:e,o&&r>1?r-1:r];this.userCode=` const vec2 effectiveInputOverOutputRatioRC = vec2( - ${h[0]/d[0]}, - ${h[1]/d[1]}); - const vec2 inputShapeRC = vec2(${o}.0, ${a}.0); + ${f[0]/m[0]}, + ${f[1]/m[1]}); + const vec2 inputShapeRC = vec2(${c}.0, ${l}.0); void main() { ivec4 coords = getOutputCoords(); @@ -2995,13 +2995,13 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(newValue); } - `}}class A6{constructor(e,t,n,s){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=[];const[i,o,a,c]=e;this.outputShape=[i,t,n,c];const h=[s&&t>1?o-1:o,s&&n>1?a-1:a],d=[s&&t>1?t-1:t,s&&n>1?n-1:n];this.userCode=` + `}}class M7{constructor(t,e,r,o){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=[];let[s,c,l,p]=t;this.outputShape=[s,e,r,p];let f=[o&&e>1?c-1:c,o&&r>1?l-1:l],m=[o&&e>1?e-1:e,o&&r>1?r-1:r];this.userCode=` const vec3 effectiveInputOverOutputRatioRC = vec3( - ${h[0]/d[0]}, - ${h[1]/d[1]}, - ${h[1]/d[1]}); - const vec3 inputShapeRC = vec3(${o}.0, ${a}.0, - ${a}.0); + ${f[0]/m[0]}, + ${f[1]/m[1]}, + ${f[1]/m[1]}); + const vec3 inputShapeRC = vec3(${c}.0, ${l}.0, + ${l}.0); float getAValue(int b, int r, int c, int d) { return getChannel(getA(b, r, c, d), vec2(c, d)); @@ -3023,8 +3023,8 @@ return (round(mod(b, 2.0)) != 1) ? min(inputShapeRC - 1.0, ceil(sourceFracIndexRC))); // Should we calculate next column and row elements in 2x2 packed cell. - bool hasNextCol = d < ${c-1}; - bool hasNextRow = coords.z < ${n-1}; + bool hasNextCol = d < ${p-1}; + bool hasNextRow = coords.z < ${r-1}; // In parallel, construct four corners for all four components in // packed 2x2 cell. @@ -3072,7 +3072,7 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(newValue); } - `}}class v6{constructor(e,t,n){this.variableNames=["dy"],this.outputShape=[],this.outputShape=t.shape;const[,s,i]=t.shape,[,o,a]=e.shape,c=[n&&o>1?s-1:s,n&&a>1?i-1:i],h=[n&&o>1?o-1:o,n&&a>1?a-1:a],d=c[0]/h[0],m=c[1]/h[1],f=1/d,b=1/m,w=Math.ceil(f)*2+2,L=Math.ceil(b)*2+2;this.userCode=` + `}}class B7{constructor(t,e,r){this.variableNames=["dy"],this.outputShape=[],this.outputShape=e.shape;let[,o,s]=e.shape,[,c,l]=t.shape,p=[r&&c>1?o-1:o,r&&l>1?s-1:s],f=[r&&c>1?c-1:c,r&&l>1?l-1:l],m=p[0]/f[0],y=p[1]/f[1],b=1/m,v=1/y,T=Math.ceil(b)*2+2,N=Math.ceil(v)*2+2;this.userCode=` void main() { ivec4 coords = getOutputCoords(); int b = coords[0]; @@ -3082,14 +3082,14 @@ return (round(mod(b, 2.0)) != 1) ? float accumulator = 0.0; - const float heightScale = float(${d}); - const float widthScale = float(${m}); + const float heightScale = float(${m}); + const float widthScale = float(${y}); - const float invHeightScale = float(${f}); - const float invWidthScale = float(${b}); + const float invHeightScale = float(${b}); + const float invWidthScale = float(${v}); - const int winHeight = int(${w}); - const int winWidth = int(${L}); + const int winHeight = int(${T}); + const int winWidth = int(${N}); // Compute bounds for where in dy we will look float startRLerp = floor(float(r) * invHeightScale); @@ -3103,7 +3103,7 @@ return (round(mod(b, 2.0)) != 1) ? int dyR = dyROffset + startDyR; // Guard against the window exceeding the bounds of dy - if (dyR < 0 || dyR >= ${o}) { + if (dyR < 0 || dyR >= ${c}) { continue; } @@ -3111,26 +3111,26 @@ return (round(mod(b, 2.0)) != 1) ? int dyC = dyCOffset + startDyC; // Guard against the window exceeding the bounds of dy - if (dyC < 0 || dyC >= ${a}) { + if (dyC < 0 || dyC >= ${l}) { continue; } float sourceFracRow = - float(${c[0]}) * - (float(dyR) / float(${h[0]})); + float(${p[0]}) * + (float(dyR) / float(${f[0]})); float sourceFracCol = - float(${c[1]}) * - (float(dyC) / float(${h[1]})); + float(${p[1]}) * + (float(dyC) / float(${f[1]})); int sourceNearestRow = int(min( - float(int(${s}) - 1), - ${n} ? float(round(sourceFracRow)) : + float(int(${o}) - 1), + ${r} ? float(round(sourceFracRow)) : float(floor(sourceFracRow)))); int sourceNearestCol = int(min( - float(int(${i}) - 1), - ${n} ? float(round(sourceFracCol)) : + float(int(${s}) - 1), + ${r} ? float(round(sourceFracCol)) : float(floor(sourceFracCol)))); if (r == sourceNearestRow && c == sourceNearestCol) { @@ -3142,11 +3142,11 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(accumulator); } - `}}class N6{constructor(e,t,n,s){this.variableNames=["A"],this.outputShape=[];const[i,o,a,c]=e;this.outputShape=[i,t,n,c];const h=[s&&t>1?o-1:o,s&&n>1?a-1:a],d=[s&&t>1?t-1:t,s&&n>1?n-1:n],m=s?"0.5":"0.0";this.userCode=` + `}}class z7{constructor(t,e,r,o){this.variableNames=["A"],this.outputShape=[];let[s,c,l,p]=t;this.outputShape=[s,e,r,p];let f=[o&&e>1?c-1:c,o&&r>1?l-1:l],m=[o&&e>1?e-1:e,o&&r>1?r-1:r],y=o?"0.5":"0.0";this.userCode=` const vec2 effectiveInputOverOutputRatioRC = vec2( - ${h[0]/d[0]}, - ${h[1]/d[1]}); - const vec2 inputShapeRC = vec2(${o}.0, ${a}.0); + ${f[0]/m[0]}, + ${f[1]/m[1]}); + const vec2 inputShapeRC = vec2(${c}.0, ${l}.0); void main() { ivec4 coords = getOutputCoords(); @@ -3159,90 +3159,90 @@ return (round(mod(b, 2.0)) != 1) ? // Compute the coordinators of nearest neighbor point. ivec2 sourceNearestRC = ivec2( - min(inputShapeRC - 1.0, floor(sourceFracIndexRC + ${m}))); + min(inputShapeRC - 1.0, floor(sourceFracIndexRC + ${y}))); float newValue = getA(b, sourceNearestRC.x, sourceNearestRC.y, d); setOutput(newValue); } - `}}class C6{constructor(e,t){this.variableNames=["x"];const n=e.length;if(n>4)throw new Error(`WebGL backend: Reverse of rank-${n} tensor is not yet supported`);if(this.outputShape=e,n===1){this.userCode=` + `}}class W7{constructor(t,e){this.variableNames=["x"];let r=t.length;if(r>4)throw new Error(`WebGL backend: Reverse of rank-${r} tensor is not yet supported`);if(this.outputShape=t,r===1){this.userCode=` void main() { int coord = getOutputCoords(); - setOutput(getX(${e[0]} - coord - 1)); + setOutput(getX(${t[0]} - coord - 1)); } - `;return}const s=a=>t.indexOf(a)!==-1&&e[a]!==1?`${e[a]} - coords[${a}] - 1`:`coords[${a}]`,i=e.map((a,c)=>s(c)).join(","),o=Rt(n);this.userCode=` + `;return}let o=l=>e.indexOf(l)!==-1&&t[l]!==1?`${t[l]} - coords[${l}] - 1`:`coords[${l}]`,s=t.map((l,p)=>o(p)).join(","),c=Oe(r);this.userCode=` void main() { - ${o} coords = getOutputCoords(); - setOutput(getX(${i})); + ${c} coords = getOutputCoords(); + setOutput(getX(${s})); } - `}}class R6{constructor(e,t){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0;const n=e.length;if(n>4)throw new Error(`WebGL backend: Reverse of rank-${n} tensor is not yet supported`);this.outputShape=e;const s=Mn("rc",n),i=`${s[n-1]} + 1 < ${this.outputShape[n-1]}`,o=`${s[n-2]} + 1 < ${this.outputShape[n-2]}`,a=Rt(n);n===1?this.userCode=` + `}}class V7{constructor(t,e){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0;let r=t.length;if(r>4)throw new Error(`WebGL backend: Reverse of rank-${r} tensor is not yet supported`);this.outputShape=t;let o=Yn("rc",r),s=`${o[r-1]} + 1 < ${this.outputShape[r-1]}`,c=`${o[r-2]} + 1 < ${this.outputShape[r-2]}`,l=Oe(r);r===1?this.userCode=` void main(){ int rc = getOutputCoords(); vec4 result = vec4(0.); - result.r = getChannel(getX(${e[0]} - rc - 1), - ${e[0]} - rc - 1); - if(${i}){ - result.g = getChannel(getX(${e[0]} - (rc + 1) - 1), - ${e[0]} - (rc + 1) - 1); + result.r = getChannel(getX(${t[0]} - rc - 1), + ${t[0]} - rc - 1); + if(${s}){ + result.g = getChannel(getX(${t[0]} - (rc + 1) - 1), + ${t[0]} - (rc + 1) - 1); } setOutput(result); } `:this.userCode=` void main() { - ${a} rc = getOutputCoords(); + ${l} rc = getOutputCoords(); vec4 result = vec4(0.); - result.r = ${c(s.slice())}; - if(${i}){ - result.g = ${h(s.slice())}; + result.r = ${p(o.slice())}; + if(${s}){ + result.g = ${f(o.slice())}; } - if(${o}) { - result.b = ${d(s.slice())}; - if(${i}) { - result.a = ${m(s.slice())}; + if(${c}) { + result.b = ${m(o.slice())}; + if(${s}) { + result.a = ${y(o.slice())}; } } setOutput(result); } - `;function c(w){return f(w)}function h(w){return w[n-1]="("+w[n-1]+" + 1)",f(w)}function d(w){return w[n-2]="("+w[n-2]+" + 1)",f(w)}function m(w){return w[n-1]="("+w[n-1]+" + 1)",w[n-2]="("+w[n-2]+" + 1)",f(w)}function f(w){const L=e.map((N,O)=>b(O,w)),x=L.join(","),v=L.slice(-2).join(",");return`getChannel(getX(${x}), vec2(${v}))`}function b(w,L){return t.indexOf(w)!==-1&&e[w]!==1?`${e[w]} - ${L[w]} - 1`:`${L[w]}`}}}class wC{constructor(e,t,n,s,i,o,a=!0){this.variableNames=["updates","indices","defaultValue"],this.outputShape=o;const c=Rt(i.length),h=Rt(o.length);let d="";n===1?d="i":n===2&&(d="i, j");const m=`getIndices(${d})`;let f="";s===1?f="i":s===2&&(f="i, coords[1]");const b=`getUpdates(${f})`,w=t>1?"strides[j]":"strides";this.userCode=` - ${c} strides = ${c}(${i}); + `;function p(T){return b(T)}function f(T){return T[r-1]="("+T[r-1]+" + 1)",b(T)}function m(T){return T[r-2]="("+T[r-2]+" + 1)",b(T)}function y(T){return T[r-1]="("+T[r-1]+" + 1)",T[r-2]="("+T[r-2]+" + 1)",b(T)}function b(T){let N=t.map((I,P)=>v(P,T)),S=N.join(","),D=N.slice(-2).join(",");return`getChannel(getX(${S}), vec2(${D}))`}function v(T,N){return e.indexOf(T)!==-1&&t[T]!==1?`${t[T]} - ${N[T]} - 1`:`${N[T]}`}}}class VI{constructor(t,e,r,o,s,c,l=!0){this.variableNames=["updates","indices","defaultValue"],this.outputShape=c;let p=Oe(s.length),f=Oe(c.length),m="";r===1?m="i":r===2&&(m="i, j");let y=`getIndices(${m})`,b="";o===1?b="i":o===2&&(b="i, coords[1]");let v=`getUpdates(${b})`,T=e>1?"strides[j]":"strides";this.userCode=` + ${p} strides = ${p}(${s}); void main() { - ${h} coords = getOutputCoords(); + ${f} coords = getOutputCoords(); float sum = 0.0; bool found = false; - for (int i = 0; i < ${e}; i++) { + for (int i = 0; i < ${t}; i++) { int flattenedIndex = 0; - for (int j = 0; j < ${t}; j++) { - int index = round(${m}); - flattenedIndex += index * ${w}; + for (int j = 0; j < ${e}; j++) { + int index = round(${y}); + flattenedIndex += index * ${T}; } if (flattenedIndex == coords[0]) { - sum += ${b}; + sum += ${v}; found = true; } } setOutput(mix(getDefaultValue(), sum, float(found))); } - `}}class O6{constructor(e,t){this.variableNames=["x","segmentIds"];const n=e.windowSize,s=e.batchSize,i=e.inSize,o=e.numSegments,a=o*Math.ceil(i/n);this.outputShape=[s,a];const c="0.0",h="sumValue",d=Math.floor(n/4)*4,m=n%4,f=` + `}}class G7{constructor(t,e){this.variableNames=["x","segmentIds"];let r=t.windowSize,o=t.batchSize,s=t.inSize,c=t.numSegments,l=c*Math.ceil(s/r);this.outputShape=[o,l];let p="0.0",f="sumValue",m=Math.floor(r/4)*4,y=r%4,b=` sumValue += dot(values, segFilter); - `;let b="";i%n>0&&(b=` - if (inIdx < 0 || inIdx >= ${i}) { + `,v="";s%r>0&&(v=` + if (inIdx < 0 || inIdx >= ${s}) { return initializationValue; } - `);let w="";i%n>0&&(w=` - if (inIdx < 0 || inIdx >= ${i}) { + `);let T="";s%r>0&&(T=` + if (inIdx < 0 || inIdx >= ${s}) { return -1.0; } `),this.userCode=` - const float initializationValue = ${c}; + const float initializationValue = ${p}; float getValue(int batch, int inIdx) { - ${b} + ${v} return getX(batch, inIdx); } float getSegmentIdAtIndex(int inIdx) { - ${w} + ${T} return getSegmentIds(inIdx); } @@ -3251,12 +3251,12 @@ return (round(mod(b, 2.0)) != 1) ? int batch = coords[0]; int outIdx = coords[1]; int inOffset = int(floor(float(outIdx) / float( - ${o})) * float(${n})); - int currentSeg = int(mod(float(outIdx), float(${o}))); + ${c})) * float(${r})); + int currentSeg = int(mod(float(outIdx), float(${c}))); float sumValue = 0.0; - for (int i = 0; i < ${d}; i += 4) { + for (int i = 0; i < ${m}; i += 4) { int inIdx = inOffset + i; vec4 values = vec4( getValue(batch, inIdx), @@ -3272,11 +3272,11 @@ return (round(mod(b, 2.0)) != 1) ? int(getSegmentIdAtIndex(inIdx + 3)) == currentSeg ? 1 : 0 ); - ${f} + ${b} } - int inIdx = inOffset + ${d}; - if (${m===1}) { + int inIdx = inOffset + ${m}; + if (${y===1}) { vec4 values = vec4( getValue(batch, inIdx), initializationValue, @@ -3293,8 +3293,8 @@ return (round(mod(b, 2.0)) != 1) ? 0 ); - ${f} - } else if (${m===2}) { + ${b} + } else if (${y===2}) { vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), @@ -3309,8 +3309,8 @@ return (round(mod(b, 2.0)) != 1) ? 0 ); - ${f} - } else if (${m===3}) { + ${b} + } else if (${y===3}) { vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), @@ -3325,77 +3325,77 @@ return (round(mod(b, 2.0)) != 1) ? 0 ); - ${f} + ${b} } - setOutput(${h}); + setOutput(${f}); } - `}}class E6{constructor(e,t,n){this.variableNames=["c","a","b"],this.outputShape=t;let s,i;if(n>4)throw Error(`Where for rank ${n} is not yet supported`);if(n===1)i="resRC",s="resRC";else{const a=["resRC.x","resRC.y","resRC.z","resRC.w"],c=[],h=[];for(let d=0;d4)throw Error(`Where for rank ${r} is not yet supported`);if(r===1)s="resRC",o="resRC";else{let l=["resRC.x","resRC.y","resRC.z","resRC.w"],p=[],f=[];for(let m=0;m= 1.0) { - setOutput(getA(${i})); + setOutput(getA(${s})); } else { - setOutput(getB(${i})); + setOutput(getB(${s})); } } - `}}class D6{constructor(e){this.variableNames=["source"],this.outputShape=e,this.rank=e.length;const t=Rt(this.rank),n=`uniform int start[${this.rank}];`,s=k6(this.rank);let i;const o=e.map((a,c)=>`sourceLoc.${AS[c]} = start[${c}] + coords.${AS[c]};`);i=` - ${t} sourceLoc; - ${t} coords = getOutputCoords(); - ${o.join(` + `}}class q7{constructor(t){this.variableNames=["source"],this.outputShape=t,this.rank=t.length;let e=Oe(this.rank),r=`uniform int start[${this.rank}];`,o=H7(this.rank),s,c=t.map((l,p)=>`sourceLoc.${v1[p]} = start[${p}] + coords.${v1[p]};`);s=` + ${e} sourceLoc; + ${e} coords = getOutputCoords(); + ${c.join(` `)} `,this.userCode=` - ${n} + ${r} void main() { - ${i} - setOutput(getSource(${s})); + ${s} + setOutput(getSource(${o})); } - `}getCustomSetupFunc(e){if(e.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${e.length})`);return(t,n)=>{if(this.startLoc==null&&(this.startLoc=t.getUniformLocationNoThrow(n,"start"),this.startLoc==null))return;t.gl.uniform1iv(this.startLoc,e)}}}const AS=["x","y","z","w","u","v"];function k6(e){if(e===1)return"sourceLoc";if(e<=6)return AS.slice(0,e).map(t=>"sourceLoc."+t).join(",");throw Error(`Slicing for rank ${e} is not yet supported`)}class F6{constructor(e){this.variableNames=["source"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.rank=e.length;const t=Rt(this.rank),n=Mn("coords",this.rank),s=Mn("sourceLoc",this.rank),i=this.rank===1?"sourceLoc":`vec2(${s.slice(-2).join()})`,o=`getChannel(getSource(${s.join()}), ${i})`,a=` - result.x = ${o}; - if (++${n[this.rank-1]} < ${e[this.rank-1]}) { - ++${s[this.rank-1]}; - result.y = ${o}; - --${s[this.rank-1]}; + `}getCustomSetupFunc(t){if(t.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${t.length})`);return(e,r)=>{if(this.startLoc==null&&(this.startLoc=e.getUniformLocationNoThrow(r,"start"),this.startLoc==null))return;e.gl.uniform1iv(this.startLoc,t)}}}let v1=["x","y","z","w","u","v"];function H7(n){if(n===1)return"sourceLoc";if(n<=6)return v1.slice(0,n).map(t=>"sourceLoc."+t).join(",");throw Error(`Slicing for rank ${n} is not yet supported`)}class j7{constructor(t){this.variableNames=["source"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t,this.rank=t.length;let e=Oe(this.rank),r=Yn("coords",this.rank),o=Yn("sourceLoc",this.rank),s=this.rank===1?"sourceLoc":`vec2(${o.slice(-2).join()})`,c=`getChannel(getSource(${o.join()}), ${s})`,l=` + result.x = ${c}; + if (++${r[this.rank-1]} < ${t[this.rank-1]}) { + ++${o[this.rank-1]}; + result.y = ${c}; + --${o[this.rank-1]}; } - `,c=this.rank===1?"":` - --${n[this.rank-1]}; - if (++${n[this.rank-2]} < ${e[this.rank-2]}) { - ++${s[this.rank-2]}; - result.z = ${o}; - if (++${n[this.rank-1]} < ${e[this.rank-1]}) { - ++${s[this.rank-1]}; - result.w = ${o}; + `,p=this.rank===1?"":` + --${r[this.rank-1]}; + if (++${r[this.rank-2]} < ${t[this.rank-2]}) { + ++${o[this.rank-2]}; + result.z = ${c}; + if (++${r[this.rank-1]} < ${t[this.rank-1]}) { + ++${o[this.rank-1]}; + result.w = ${c}; } } - `,h=this.rank<=4?`sourceLoc = coords + - ${t}(${e.map((d,m)=>`start[${m}]`).join()});`:e.map((d,m)=>`${s[m]} = ${n[m]} + start[${m}];`).join(` + `,f=this.rank<=4?`sourceLoc = coords + + ${e}(${t.map((m,y)=>`start[${y}]`).join()});`:t.map((m,y)=>`${o[y]} = ${r[y]} + start[${y}];`).join(` `);this.userCode=` uniform int start[${this.rank}]; void main() { - ${t} coords = getOutputCoords(); - ${t} sourceLoc; - ${h} + ${e} coords = getOutputCoords(); + ${e} sourceLoc; + ${f} vec4 result = vec4(0.); - ${a} - ${c} + ${l} + ${p} setOutput(result); } - `}getCustomSetupFunc(e){if(e.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${e.length})`);return(t,n)=>{if(this.startLoc==null&&(this.startLoc=t.getUniformLocationNoThrow(n,"start"),this.startLoc==null))return;t.gl.uniform1iv(this.startLoc,e)}}}class _6{constructor(e,t,n){this.variableNames=["x"],this.outputShape=n;const s=n.length,i=Rt(n.length),o=Rt(n.length);let a="";if(s===1)a="coords * strides + begin";else{let c=0;a=n.map((h,d)=>(c++,n.length===1?`coords * strides[${d}] + begin[${d}]`:`coords[${c-1}] * strides[${d}] + begin[${d}]`)).join(",")}this.userCode=` - ${i} begin = ${i}(${e}); - ${i} strides = ${i}(${t}); + `}getCustomSetupFunc(t){if(t.length!==this.rank)throw Error(`The rank (${this.rank}) of the program must match the length of start (${t.length})`);return(e,r)=>{if(this.startLoc==null&&(this.startLoc=e.getUniformLocationNoThrow(r,"start"),this.startLoc==null))return;e.gl.uniform1iv(this.startLoc,t)}}}class K7{constructor(t,e,r){this.variableNames=["x"],this.outputShape=r;let o=r.length,s=Oe(r.length),c=Oe(r.length),l="";if(o===1)l="coords * strides + begin";else{let p=0;l=r.map((f,m)=>(p++,r.length===1?`coords * strides[${m}] + begin[${m}]`:`coords[${p-1}] * strides[${m}] + begin[${m}]`)).join(",")}this.userCode=` + ${s} begin = ${s}(${t}); + ${s} strides = ${s}(${e}); void main() { - ${o} coords = getOutputCoords(); - setOutput(getX(${a})); + ${c} coords = getOutputCoords(); + setOutput(getX(${l})); } - `}}class W6{constructor(e){this.gpgpu=e,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0,this.freeTextures={},this.logEnabled=!1,this.usedTextures={}}acquireTexture(e,t,n){const s=SC(t,n),i=IC(e,s,n);i in this.freeTextures||(this.freeTextures[i]=[]),i in this.usedTextures||(this.usedTextures[i]=[]);const o=LC(e,s,this.gpgpu.gl,this.gpgpu.textureConfig,n);if(this.freeTextures[i].length>0){this.numFreeTextures--,this.numUsedTextures++,this._numBytesFree-=o,this.log();const c=this.freeTextures[i].shift();return this.usedTextures[i].push(c),c}let a;return s===Cn.PACKED_2X2_FLOAT32?a=this.gpgpu.createPackedMatrixTexture(e[0],e[1]):s===Cn.PACKED_2X2_FLOAT16?a=this.gpgpu.createFloat16PackedMatrixTexture(e[0],e[1]):s===Cn.UNPACKED_FLOAT32?a=this.gpgpu.createFloat32MatrixTexture(e[0],e[1]):s===Cn.UNPACKED_FLOAT16?a=this.gpgpu.createFloat16MatrixTexture(e[0],e[1]):s===Cn.PACKED_4X1_UNSIGNED_BYTE&&(a=this.gpgpu.createUnsignedBytesMatrixTexture(e[0],e[1])),this.usedTextures[i].push(a),this.numUsedTextures++,this._numBytesAllocated+=o,this.log(),a}releaseTexture(e,t,n,s){if(this.freeTextures==null)return;const i=SC(n,s),o=IC(t,i,s);o in this.freeTextures||(this.freeTextures[o]=[]);const a=LC(t,i,this.gpgpu.gl,this.gpgpu.textureConfig,s),c=oe().get("WEBGL_DELETE_TEXTURE_THRESHOLD");c!==-1&&this._numBytesAllocated>c?(this.gpgpu.deleteMatrixTexture(e),this._numBytesAllocated-=a):(this.freeTextures[o].push(e),this.numFreeTextures++,this._numBytesFree+=a),this.numUsedTextures--;const h=this.usedTextures[o],d=h.indexOf(e);if(d<0)throw new Error("Cannot release a texture that was never provided by this texture manager");h.splice(d,1),this.log()}log(){if(!this.logEnabled)return;const e=this.numFreeTextures+this.numUsedTextures;console.log("Free/Used",`${this.numFreeTextures} / ${this.numUsedTextures}`,`(${e})`);const t=this._numBytesFree/this._numBytesAllocated;console.log(`Bytes allocated: ${this._numBytesAllocated}`),console.log(`Bytes unused: ${this._numBytesFree} (${Math.round(100*t)}%)`)}get numBytesAllocated(){return this._numBytesAllocated}get numBytesFree(){return this._numBytesFree}getNumUsedTextures(){return this.numUsedTextures}getNumFreeTextures(){return this.numFreeTextures}dispose(){if(this.freeTextures==null)return;for(const e in this.freeTextures)this.freeTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});for(const e in this.usedTextures)this.usedTextures[e].forEach(t=>{this.gpgpu.deleteMatrixTexture(t)});this.freeTextures=null,this.usedTextures=null,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0}}function $6(e,t){const n=e;if(t===n.R32F)return 4;if(t===n.R16F)return 2;if(t===n.RGBA32F)return 16;if(t===e.RGBA)return 16;if(t===n.RGBA16F)return 8;throw new Error(`Unknown internal format ${t}`)}function LC(e,t,n,s,i){const o=U6(t,s);let a;if(i){const[h,d]=pc(e[0],e[1]);a=h*d}else{const[h,d]=hu(e[0],e[1]);a=h*d}const c=$6(n,o);return a*c}function U6(e,t){switch(e){case Cn.PACKED_2X2_FLOAT32:return mC(t);case Cn.PACKED_2X2_FLOAT16:return fC(t);case Cn.UNPACKED_FLOAT32:return uC(t);case Cn.UNPACKED_FLOAT16:return dC(t);case Cn.PACKED_4X1_UNSIGNED_BYTE:return pC(t);default:throw new Error(`Unknown physical texture type ${e}`)}}function B6(e){return oe().getBool("WEBGL_RENDER_FLOAT32_ENABLED")?e?Cn.PACKED_2X2_FLOAT32:Cn.UNPACKED_FLOAT32:e?Cn.PACKED_2X2_FLOAT16:Cn.UNPACKED_FLOAT16}function SC(e,t){if(e===Ns.UPLOAD)return Cn.PACKED_2X2_FLOAT32;if(e===Ns.RENDER||e==null)return B6(t);if(e===Ns.DOWNLOAD||e===Ns.PIXELS)return Cn.PACKED_4X1_UNSIGNED_BYTE;throw new Error(`Unknown logical texture type ${e}`)}function IC(e,t,n){return`${e[0]}_${e[1]}_${t}_${n}`}class M6{constructor(e,t){this.variableNames=["A"];const n=new Array(e.length);for(let o=0;o0){this.numFreeTextures--,this.numUsedTextures++,this._numBytesFree-=c,this.log();let p=this.freeTextures[s].shift();return this.usedTextures[s].push(p),p}let l;return o===On.PACKED_2X2_FLOAT32?l=this.gpgpu.createPackedMatrixTexture(t[0],t[1]):o===On.PACKED_2X2_FLOAT16?l=this.gpgpu.createFloat16PackedMatrixTexture(t[0],t[1]):o===On.UNPACKED_FLOAT32?l=this.gpgpu.createFloat32MatrixTexture(t[0],t[1]):o===On.UNPACKED_FLOAT16?l=this.gpgpu.createFloat16MatrixTexture(t[0],t[1]):o===On.PACKED_4X1_UNSIGNED_BYTE&&(l=this.gpgpu.createUnsignedBytesMatrixTexture(t[0],t[1])),this.usedTextures[s].push(l),this.numUsedTextures++,this._numBytesAllocated+=c,this.log(),l}releaseTexture(t,e,r,o){if(this.freeTextures==null)return;let s=UI(r,o),c=qI(e,s,o);c in this.freeTextures||(this.freeTextures[c]=[]);let l=GI(e,s,this.gpgpu.gl,this.gpgpu.textureConfig,o),p=ct().get("WEBGL_DELETE_TEXTURE_THRESHOLD");p!==-1&&this._numBytesAllocated>p?(this.gpgpu.deleteMatrixTexture(t),this._numBytesAllocated-=l):(this.freeTextures[c].push(t),this.numFreeTextures++,this._numBytesFree+=l),this.numUsedTextures--;let f=this.usedTextures[c],m=f.indexOf(t);if(m<0)throw new Error("Cannot release a texture that was never provided by this texture manager");f.splice(m,1),this.log()}log(){if(!this.logEnabled)return;let t=this.numFreeTextures+this.numUsedTextures;console.log("Free/Used",`${this.numFreeTextures} / ${this.numUsedTextures}`,`(${t})`);let e=this._numBytesFree/this._numBytesAllocated;console.log(`Bytes allocated: ${this._numBytesAllocated}`),console.log(`Bytes unused: ${this._numBytesFree} (${Math.round(100*e)}%)`)}get numBytesAllocated(){return this._numBytesAllocated}get numBytesFree(){return this._numBytesFree}getNumUsedTextures(){return this.numUsedTextures}getNumFreeTextures(){return this.numFreeTextures}dispose(){if(this.freeTextures==null)return;for(let t in this.freeTextures)this.freeTextures[t].forEach(e=>{this.gpgpu.deleteMatrixTexture(e)});for(let t in this.usedTextures)this.usedTextures[t].forEach(e=>{this.gpgpu.deleteMatrixTexture(e)});this.freeTextures=null,this.usedTextures=null,this.numUsedTextures=0,this.numFreeTextures=0,this._numBytesAllocated=0,this._numBytesFree=0}}function Y7(n,t){let e=n;if(t===e.R32F)return 4;if(t===e.R16F)return 2;if(t===e.RGBA32F)return 16;if(t===n.RGBA)return 16;if(t===e.RGBA16F)return 8;throw new Error(`Unknown internal format ${t}`)}function GI(n,t,e,r,o){let s=J7(t,r),c;if(o){let[p,f]=nl(n[0],n[1]);c=p*f}else{let[p,f]=rh(n[0],n[1]);c=p*f}let l=Y7(e,s);return c*l}function J7(n,t){switch(n){case On.PACKED_2X2_FLOAT32:return LI(t);case On.PACKED_2X2_FLOAT16:return MI(t);case On.UNPACKED_FLOAT32:return RI(t);case On.UNPACKED_FLOAT16:return PI(t);case On.PACKED_4X1_UNSIGNED_BYTE:return OI(t);default:throw new Error(`Unknown physical texture type ${n}`)}}function Z7(n){return ct().getBool("WEBGL_RENDER_FLOAT32_ENABLED")?n?On.PACKED_2X2_FLOAT32:On.UNPACKED_FLOAT32:n?On.PACKED_2X2_FLOAT16:On.UNPACKED_FLOAT16}function UI(n,t){if(n===Mr.UPLOAD)return On.PACKED_2X2_FLOAT32;if(n===Mr.RENDER||n==null)return Z7(t);if(n===Mr.DOWNLOAD||n===Mr.PIXELS)return On.PACKED_4X1_UNSIGNED_BYTE;throw new Error(`Unknown logical texture type ${n}`)}function qI(n,t,e){return`${n[0]}_${n[1]}_${t}_${e}`}class Q7{constructor(t,e){this.variableNames=["A"];let r=new Array(t.length);for(let c=0;c5)throw Error(`Tile for rank ${t} is not yet supported`);if(t===1)return`imod(resRC, ${e[0]})`;const n=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u"],s=[];for(let i=0;i5)throw Error(`Tile for rank ${t} is not yet supported`);if(t===1)return`imod(resRC, ${n[0]})`;let e=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u"],r=[];for(let o=0;o= 0.0) ? scale * x : scaleAlpha * (exp(x) - 1.0); -`;function G6(e=0){return hr+` - return x > 0.0 ? 1.0 : float(${e}); - `}const NC="return -x;",CC="return ceil(x);",RC="return floor(x);",Y6=` +`;function rJ(n=0){return _s+` + return x > 0.0 ? 1.0 : float(${n}); + `}let YI="return -x;",JI="return ceil(x);",ZI="return floor(x);",oJ=` if (isnan(x)) { return 0.0; } return sign(x); -`,H6="return float(isnan(x));",q6="return float(isinf(x));",j6="return float(!isnan(x) && !isinf(x));",K6=` +`,sJ="return float(isnan(x));",iJ="return float(isinf(x));",aJ="return float(!isnan(x) && !isinf(x));",cJ=` // OpenGL ES does not support round function. // The algorithm is based on banker's rounding. float base = floor(x); @@ -3434,8 +3434,8 @@ return (round(mod(b, 2.0)) != 1) ? return base + 1.0; } } -`,OC="return exp(x);",EC="return exp(x) - 1.0;",X6=`if (x < 0.0) return NAN; - return log(x);`,J6="return log(1.0 + x);",Z6="return sqrt(x);",Q6="return inversesqrt(x);",eX="return 1.0 / (1.0 + exp(-1.0 * x));",tX=` +`,QI="return exp(x);",tE="return exp(x) - 1.0;",lJ=`if (x < 0.0) return NAN; + return log(x);`,uJ="return log(1.0 + x);",pJ="return sqrt(x);",hJ="return inversesqrt(x);",fJ="return 1.0 / (1.0 + exp(-1.0 * x));",dJ=` float epsilon = 1.1920928955078125e-7; float threshold = log(epsilon) + 2.0; @@ -3455,47 +3455,47 @@ return (round(mod(b, 2.0)) != 1) ? result = log(exp_x + 1.0); } return result; -`,nX=hr+` +`,mJ=_s+` if (abs(x) > 1.) { return NAN; } return asin(x); -`,sX=hr+` +`,gJ=_s+` if (abs(x) > 1.) { return NAN; } return acos(x); -`,iX=hr+` +`,yJ=_s+` return atan(x); -`,rX=` +`,bJ=` float e2x = exp(x); return (e2x - 1.0 / e2x) / 2.0; -`,oX=` +`,xJ=` float e2x = exp(-x); return (e2x + 1.0 / e2x) / 2.0; -`,aX=` +`,wJ=` float e2x = exp(-2.0 * abs(x)); return sign(x) * (1.0 - e2x) / (1.0 + e2x); -`,cX=hr+"return log(x + sqrt(x * x + 1.0));",lX=hr+` +`,vJ=_s+"return log(x + sqrt(x * x + 1.0));",TJ=_s+` if (x < 1.0) return NAN; - return log(x + sqrt(x * x - 1.0));`,hX=hr+` + return log(x + sqrt(x * x - 1.0));`,kJ=_s+` if ((x < -1.0) || (x > 1.0)) return NAN; - return (log(1.0 + x) - log(1.0 - x)) / 2.0;`,uX=` + return (log(1.0 + x) - log(1.0 - x)) / 2.0;`,NJ=` // Error function is calculated approximately with elementary function. // See "Handbook of Mathematical Functions with Formulas, // Graphs, and Mathematical Tables", Abramowitz and Stegun. - float p = ${ew}; - float a1 = ${tw}; - float a2 = ${nw}; - float a3 = ${sw}; - float a4 = ${iw}; - float a5 = ${rw}; + float p = ${Yw}; + float a1 = ${Jw}; + float a2 = ${Zw}; + float a3 = ${Qw}; + float a4 = ${tv}; + float a5 = ${ev}; float sign = sign(x); x = abs(x); float t = 1.0 / (1.0 + p * x); return sign * (1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x)); -`,dX="return 1.0 / x;",pX="return float(!(x >= 1.0));",Fm="return x;";const mX="return x;",fX=` +`,_J="return 1.0 / x;",CJ="return float(!(x >= 1.0));",Bm="return x;";let SJ="return x;",$J=` vec4 result = log(x); vec4 isNaN = vec4(lessThan(x, vec4(0.0))); result.r = isNaN.r == 1.0 ? NAN : result.r; @@ -3504,7 +3504,7 @@ return (round(mod(b, 2.0)) != 1) ? result.a = isNaN.a == 1.0 ? NAN : result.a; return result; -`,DC=` +`,eE=` vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0))); bvec4 isNaN = isnan(x); @@ -3514,7 +3514,7 @@ return (round(mod(b, 2.0)) != 1) ? result.a = isNaN.a ? x.a : result.a; return result; -`,kC=` +`,nE=` vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0))); bvec4 isNaN = isnan(x); @@ -3524,7 +3524,7 @@ return (round(mod(b, 2.0)) != 1) ? result.a = isNaN.a ? x.a : result.a; return result; -`,FC=` +`,rE=` vec4 result; result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0); @@ -3533,9 +3533,9 @@ return (round(mod(b, 2.0)) != 1) ? result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0); return result; -`;class fu{constructor(e,t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e,this.userCode=` +`;class ch{constructor(t,e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t,this.userCode=` vec4 unaryOperation(vec4 x) { - ${t} + ${e} } void main() { @@ -3544,104 +3544,104 @@ return (round(mod(b, 2.0)) != 1) ? setOutput(y); } - `}}class gX{constructor(e){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outputShape=e;const t=e.length,n=Mn("rc",t),s=Rt(t),i=r5(t,n),o=n.slice(-2),a=t<=1?"rc":`vec2(${o.join(",")})`;this.userCode=` + `}}class IJ{constructor(t){this.variableNames=["A"],this.packedInputs=!0,this.packedOutput=!1,this.outputShape=t;let e=t.length,r=Yn("rc",e),o=Oe(e),s=bX(e,r),c=r.slice(-2),l=e<=1?"rc":`vec2(${c.join(",")})`;this.userCode=` void main() { - ${s} rc = getOutputCoords(); - vec4 packedInput = getA(${i}); + ${o} rc = getOutputCoords(); + vec4 packedInput = getA(${s}); - setOutput(getChannel(packedInput, ${a})); + setOutput(getChannel(packedInput, ${l})); } - `}}const{segment_util:_C}=cw,yX=lw,bX=hw,wX=uw,LX=Ap,SX=1e-7,IX=1e-4,_m={};function xX(e){return e in _m||(_m[e]={}),_m[e]}function Wm(e,t=!1){if(e==="linear")return t?mX:z6;if(e==="relu")return t?DC:TC;if(e==="elu")return t?FC:vC;if(e==="relu6")return t?kC:AC;if(e==="prelu")return t?iC:sC;throw new Error(`Activation ${e} has not been implemented for the WebGL backend.`)}const TX=128,AX=600;function vX(){return oe().global.screen==null?1024:oe().global.screen.height*oe().global.screen.width*window.devicePixelRatio*AX/1024/1024}const WC=1e3;class NX extends y{constructor(e){super();if(this.pendingRead=new WeakMap,this.pendingDisposal=new WeakSet,this.dataRefCount=new WeakMap,this.numBytesInGPU=0,this.uploadWaitMs=0,this.downloadWaitMs=0,this.warnedAboutMemory=!1,this.warnedAboutCPUBackend=!1,this.pendingDeletes=0,this.disposed=!1,!oe().getBool("HAS_WEBGL"))throw new Error("WebGL is not supported on this device");if(e==null){const t=ki(oe().getNumber("WEBGL_VERSION"));this.binaryCache=xX(oe().getNumber("WEBGL_VERSION")),this.gpgpu=new n6(t),this.canvas=t.canvas,this.gpgpuCreatedLocally=!0}else this.gpgpu=e,this.binaryCache={},this.gpgpuCreatedLocally=!1,this.canvas=e.gl.canvas;this.textureManager=new W6(this.gpgpu),this.numMBBeforeWarning=vX(),this.texData=new p(this,Ki())}numDataIds(){return this.texData.numDataIds()+(this.cpuBackend?this.cpuBackend.numDataIds():0)-this.pendingDeletes}write(e,t,n){if((oe().getBool("WEBGL_CHECK_NUMERICAL_PROBLEMS")||oe().getBool("DEBUG"))&&this.checkNumericalProblems(e),n==="complex64"&&e!=null)throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");const s={};return this.texData.set(s,{shape:t,dtype:n,values:e,usage:Ns.UPLOAD,refCount:1,complexParentRefCount:0}),s}incRef(e){const t=this.texData.get(e);t.refCount++}decRef(e){if(this.texData.has(e)){const t=this.texData.get(e);t.refCount--}}move(e,t,n,s){if(oe().getBool("DEBUG")&&this.checkNumericalProblems(t),s==="complex64")throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");this.texData.set(e,{shape:n,dtype:s,values:t,usage:Ns.UPLOAD,refCount:1,complexParentRefCount:0})}disposeIntermediateTensorInfo(e){const t=e.dataId;if(this.texData.has(t)){const n=this.texData.get(t);n.refCount--,n.refCount<1&&this.disposeData(t)}}readSync(e){const t=this.texData.get(e),{values:n,dtype:s,complexTensorInfos:i,slice:o,shape:a,isPacked:c}=t;if(o!=null){let f;c?f=new fu(a,Fm):f=new st(a,Fm);const b=this.runWebGLProgram(f,[{dataId:e,shape:a,dtype:s}],s),w=this.readSync(b.dataId);return this.disposeIntermediateTensorInfo(b),w}if(n!=null)return this.convertAndCacheOnCPU(e);if(s==="string")return n;const h=this.activeTimers!=null;let d;h&&(d=jn());let m;if(s==="complex64"){const f=this.readSync(i.real.dataId),b=this.readSync(i.imag.dataId);m=tr(f,b)}else m=this.getValuesFromTexture(e);return h&&(this.downloadWaitMs+=jn()-d),this.convertAndCacheOnCPU(e,m)}async read(e){if(this.pendingRead.has(e)){const w=this.pendingRead.get(e);return new Promise(L=>w.push(L))}const t=this.texData.get(e),{values:n,shape:s,slice:i,dtype:o,complexTensorInfos:a,isPacked:c}=t;if(i!=null){let w;c?w=new fu(s,Fm):w=new st(s,Fm);const L=this.runWebGLProgram(w,[{dataId:e,shape:s,dtype:o}],o),x=this.read(L.dataId);return this.disposeIntermediateTensorInfo(L),x}if(n!=null)return this.convertAndCacheOnCPU(e);if(!oe().getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")&&oe().getNumber("WEBGL_VERSION")===2)throw new Error("tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and WEBGL_VERSION=2 not yet supported.");let h=null,d;if(o!=="complex64"&&oe().get("WEBGL_BUFFER_SUPPORTED")){d=this.decode(e);const w=this.texData.get(d.dataId);h=this.gpgpu.createBufferFromTexture(w.texture,...uu(s))}this.pendingRead.set(e,[]),o!=="complex64"&&await this.gpgpu.createAndWaitForFence();let m;if(o==="complex64"){const w=await Promise.all([this.read(a.real.dataId),this.read(a.imag.dataId)]),L=w[0],x=w[1];m=tr(L,x)}else if(h==null)m=this.getValuesFromTexture(e);else{const w=P(s);m=this.gpgpu.downloadFloat32MatrixFromBuffer(h,w)}d!=null&&this.disposeIntermediateTensorInfo(d);const f=this.convertAndCacheOnCPU(e,m),b=this.pendingRead.get(e);return this.pendingRead.delete(e),b.forEach(w=>w(f)),this.pendingDisposal.has(e)&&(this.pendingDisposal.delete(e),this.disposeData(e),this.pendingDeletes--),f}checkNumericalProblems(e){if(e==null)return;for(let t=0;tc.query)).filter(c=>c!=null),o=te(this.activeTimers.map(c=>c.name)).filter(c=>c!=null);this.activeTimers=t,s&&(this.programTimersStack=null);const a={uploadWaitMs:this.uploadWaitMs,downloadWaitMs:this.downloadWaitMs,kernelMs:null,wallMs:null};if(oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0){const c=await Promise.all(i);a.kernelMs=C(c),a.getExtraProfileInfo=()=>c.map((h,d)=>({name:o[d],ms:h})).map(h=>`${h.name}: ${h.ms}`).join(", ")}else a.kernelMs={error:"WebGL query timers are not supported in this environment."};return this.uploadWaitMs=0,this.downloadWaitMs=0,a}memory(){return{unreliable:!1,numBytesInGPU:this.numBytesInGPU,numBytesInGPUAllocated:this.textureManager.numBytesAllocated,numBytesInGPUFree:this.textureManager.numBytesFree}}startTimer(){return oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.beginQuery():{startMs:jn(),endMs:null}}endTimer(e){return oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?(this.gpgpu.endQuery(),e):(e.endMs=jn(),e)}async getQueryTime(e){if(oe().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0)return this.gpgpu.waitForQueryAndGetTime(e);const t=e;return t.endMs-t.startMs}disposeData(e){if(this.pendingDisposal.has(e))return;if(this.pendingRead.has(e)){this.pendingDisposal.add(e),this.pendingDeletes++;return}if(!this.texData.has(e))return;if(this.texData.get(e).complexParentRefCount>0){this.texData.get(e).refCount--;return}this.releaseGPUData(e);const{complexTensorInfos:t}=this.texData.get(e);t!=null&&(this.texData.get(t.real.dataId).complexParentRefCount--,this.disposeIntermediateTensorInfo(t.real),this.texData.get(t.imag.dataId).complexParentRefCount--,this.disposeIntermediateTensorInfo(t.imag)),this.texData.delete(e)}releaseGPUData(e){const{texture:t,dtype:n,texShape:s,usage:i,isPacked:o,slice:a}=this.texData.get(e),c=a&&a.origDataId||e,h=this.dataRefCount.get(c);h>1?this.dataRefCount.set(c,h-1):(this.dataRefCount.delete(c),t!=null&&(this.numBytesInGPU-=this.computeBytes(s,n),this.textureManager.releaseTexture(t,s,i,o)));const d=this.texData.get(e);d.texture=null,d.texShape=null,d.isPacked=!1,d.slice=null}getTexture(e){return this.uploadToGPU(e),this.texData.get(e).texture}getDataInfo(e){return this.texData.get(e)}getCPUBackend(){return oe().getBool("WEBGL_CPU_FORWARD")?(this.cpuBackend==null&&(this.cpuBackend=Ki().findBackend("cpu")),this.cpuBackend):null}shouldExecuteOnCPU(e,t=TX){const n=this.getCPUBackend();return!this.warnedAboutCPUBackend&&n==null&&(console.warn("Your application contains ops that are small enough to be executed on the CPU backend, however the CPU backend cannot be found. Consider importing the CPU backend (@tensorflow/tfjs-backend-cpu) for better performance."),this.warnedAboutCPUBackend=!0),n!=null&&e.every(s=>this.texData.get(s.dataId).texture==null&&P(s.shape)this.cpuBackend.stridedSlice(e,t,n,s));if(i)return i;const o=jd(t,n,s);if(o.some(c=>c===0))return sn([],o);const a=new _6(t,s,o);return this.compileAndRun(a,[e])}reverse(e,t){const n=oe().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new R6(e.shape,t):new C6(e.shape,t);return this.compileAndRun(n,[e])}neg(e){const t=this.tryRunOnCpuOrThrow([e],()=>this.cpuBackend.neg(e));if(t)return t;if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,NC,e.dtype);const n=new st(e.shape,NC);return this.compileAndRun(n,[e])}batchMatMul(e,t,n,s){const i=n?e.shape[2]:e.shape[1],o=s?t.shape[1]:t.shape[2],a=n?e.shape[1]:e.shape[2],c=Math.max(e.shape[0],t.shape[0]);if((i===1||o===1)&&a>WC){n&&(e=Ye(e,[0,2,1])),s&&(t=Ye(t,[0,2,1]));const m=o===1?e:e.as3D(c,a,1),f=o===1?2:1,b=o===1?t.as3D(c,1,a):t,w=X(m,b);return w.sum(f,!0)}const h=$n(e.dtype,t.dtype),d=new xS(e.shape,t.shape,[c,i,o],n,s);return this.compileAndRun(d,[e,t],h)}fusedBatchMatMul({a:e,b:t,transposeA:n,transposeB:s,bias:i,activation:o,preluActivationWeights:a}){const c=n?e.shape[2]:e.shape[1],h=s?t.shape[1]:t.shape[2],d=Math.max(e.shape[0],t.shape[0]),m=$n(e.dtype,t.dtype),f=i!=null,b=a!=null,w=o?Wm(o,!0):null,L=new xS(e.shape,t.shape,[d,c,h],n,s,f,w,b),x=[e,t];return i&&x.push(i),a&&x.push(a),this.compileAndRun(L,x,m)}localResponseNormalization4D(e,t,n,s,i){const o=oe().getBool("WEBGL_PACK_NORMALIZATION")?new h6(e.shape,t,n,s,i):new c6(e.shape,t,n,s,i);return this.compileAndRun(o,[e])}LRNGrad(e,t,n,s,i,o,a){const c=new l6(t.shape,s,i,o,a);return this.compileAndRun(c,[t,n,e])}tile(e,t){if(e.dtype==="string"){const s=this.readSync(e.dataId),i=s.map(a=>Kl(a)),o=wt(e.shape,e.dtype,i);return bX(o,t)}const n=new M6(e.shape,t);return this.compileAndRun(n,[e])}pad(e,t,n){const s=oe().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new S6(e.shape,t,n):new L6(e.shape,t,n);return this.compileAndRun(s,[e])}gather(e,t,n){const s=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.gather(e,t,n));if(s)return s;const i=new $8(e.shape,t.size,n);return this.compileAndRun(i,[e,t])}batchToSpaceND(e,t,n){A(e.rank<=4,()=>"batchToSpaceND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((d,m)=>d*m),i=Oh(e.shape,t,s),o=Eh(i.length,t.length),a=Dh(e.shape,t,s),c=Zb(n,t.length),h=Qb(a,n,t.length);return Ye(e.reshape(i),o).reshape(a).slice(c,h)}spaceToBatchND(e,t,n){A(e.rank<=4,()=>"spaceToBatchND for rank > 4 with a WebGL backend not implemented yet");const s=t.reduce((m,f)=>m*f),i=[[0,0]];i.push(...n);for(let m=1+t.length;mthis.cpuBackend.prod(e,t));if(n)return n;const[s,i]=An(e.shape,t),o=P(i),a=e.as2D(-1,o),c=Ud(e.dtype);return this.reduce(a,"prod",c).reshape(s)}unsortedSegmentSum(e,t,n){let s=0;const i=Xn([s],e.rank);let o=e;i!=null&&(o=Ye(e,i),s=as(1,e.rank)[0]);const a=_C.computeOutShape(o.shape,s,n),c=P([o.shape[s]]),h=o.as2D(-1,c),d=Ud(e.dtype);let m=this.segOpCompute(h,"unsortedSegmentSum",t,d,n).reshape(a);return i!=null&&(m=Ye(m,sh(i))),m}segOpCompute(e,t,n,s,i){const o=e.shape[0],a=e.shape[1],c=_C.segOpComputeOptimalWindowSize(a,i),h={windowSize:c,inSize:a,batchSize:o,numSegments:i},d=new O6(h,t),m=this.compileAndRun(d,[e,n],s);return m.shape[1]===i?m:(n=bh(0,i).tile([a/c]),this.segOpCompute(m,t,n,s,i))}argMinMaxReduce(e,t,n){const s=[t];if(Kn("arg"+n.charAt(0).toUpperCase()+n.slice(1),s,e.rank),!oe().getBool("WEBGL_PACK_REDUCE")||e.rank<=2){const[i,o]=An(e.shape,s),a=P(o),c=e.as2D(-1,a);return this.argReduce(c,n).reshape(i)}return this.argReducePacked(e,n)}argMin(e,t){return this.argMinMaxReduce(e,t,"min")}argMax(e,t){return this.argMinMaxReduce(e,t,"max")}cumsum(e,t,n,s){if(t!==e.rank-1)throw new Error(`WebGL cumsum shader expects an inner-most axis=${e.rank-1} but got axis=${t}`);const i=e.shape[t];let o=e;for(let a=0;a<=Math.ceil(Math.log2(i))-1;a++){const c=new cC(e.shape,!1,s),h=c.getCustomSetupFunc(a),d=o;o=this.compileAndRun(c,[o],o.dtype,h),d.dispose()}if(n){const a=new cC(e.shape,n,s),c=o;o=this.compileAndRun(a,[o]),c.dispose()}return o}equal(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,a8,"bool");const n=new _n(q5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}less(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.less(e,t));if(n)return n;if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,c8,"bool");const s=new _n(j5,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}lessEqual(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,l8,"bool");const n=new _n(K5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}greater(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.greater(e,t));if(n)return n;if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,h8,"bool");const s=new _n(X5,e.shape,t.shape);return this.compileAndRun(s,[e,t],"bool")}greaterEqual(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,u8,"bool");const n=new _n(J5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalNot(e){const t=new st(e.shape,pX);return this.compileAndRun(t,[e])}logicalAnd(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,d8,"bool");const n=new _n(Z5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}logicalOr(e,t){if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,p8,"bool");const n=new _n(Q5,e.shape,t.shape);return this.compileAndRun(n,[e,t],"bool")}select(e,t,n){const s=new E6(e.rank,t.shape,t.rank);return this.compileAndRun(s,[e,t,n],$n(t.dtype,n.dtype))}where(e){Za("tf.where() in webgl locks the UI thread. Call tf.whereAsync() instead");const t=e.dataSync();return LX(e.shape,t)}topk(e,t,n){const s=e.dataSync();return wX(s,e.shape,e.dtype,t,n)}min(e,t){Kn("min",t,e.rank);const[n,s]=An(e.shape,t),i=P(s),o=e.as2D(-1,i);return this.reduce(o,"min",o.dtype).reshape(n)}minimum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.minimum(e,t));if(n)return n;const s=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(f8,e.shape,t.shape):new _n(t8,e.shape,t.shape);return this.compileAndRun(s,[e,t])}mod(e,t){const n=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(g8,e.shape,t.shape):new _n(n8,e.shape,t.shape);return this.compileAndRun(n,[e,t])}maximum(e,t){const n=this.tryRunOnCpuOrThrow([e,t],()=>this.cpuBackend.maximum(e,t));if(n)return n;const s=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(m8,e.shape,t.shape):new _n(e8,e.shape,t.shape);return this.compileAndRun(s,[e,t])}all(e,t){Kn("all",t,e.rank);const[n,s]=An(e.shape,t),i=P(s),o=e.as2D(-1,i);return this.reduce(o,"all",o.dtype).reshape(n)}any(e,t){Kn("any",t,e.rank);const[n,s]=An(e.shape,t),i=P(s),o=e.as2D(-1,i);return this.reduce(o,"any",o.dtype).reshape(n)}floorDiv(e,t){const n=Y5,s="int32";if(oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(e,t,i8,s);const i=new _n(n,e.shape,t.shape);return this.compileAndRun(i,[e,t],s)}packedUnaryOp(e,t,n){const s=new fu(e.shape,t);return this.compileAndRun(s,[e],n)}packedBinaryOp(e,t,n,s,i=!1){const o=new lr(n,e.shape,t.shape,i);return this.compileAndRun(o,[e,t],s)}makeComplexComponentTensorInfo(e,t){return{dataId:t.dataId,dtype:t.dtype,shape:e.shape}}addN(e){if(e.length===1)return e[0];if(e.length>oe().get("WEBGL_MAX_TEXTURES_IN_SHADER")){const o=Math.floor(e.length/2),a=this.addN(e.slice(0,o)),c=this.addN(e.slice(o));return this.addN([a,c])}const t=e.map(o=>o.dtype).reduce((o,a)=>$n(o,a)),n=e.map(o=>o.shape),s=oe().getBool("WEBGL_PACK"),i=s?new s5(e[0].shape,n):new n5(e[0].shape,n);return this.compileAndRun(i,e,t)}pow(e,t){const n=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS"),s=n?new lr(r8,e.shape,t.shape):new _n(H5,e.shape,t.shape),i=$n(e.dtype,t.dtype);return this.compileAndRun(s,[e,t],i)}ceil(e){if(this.shouldExecuteOnCPU([e])){const n=YK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,CC,e.dtype);const t=new st(e.shape,CC);return this.compileAndRun(t,[e])}floor(e){if(this.shouldExecuteOnCPU([e])){const n=jK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,RC,e.dtype);const t=new st(e.shape,RC);return this.compileAndRun(t,[e])}sign(e){const t=new st(e.shape,Y6);return this.compileAndRun(t,[e])}isNaN(e){const t=new st(e.shape,H6);return this.compileAndRun(t,[e],"bool")}isInf(e){const t=new st(e.shape,q6);return this.compileAndRun(t,[e],"bool")}isFinite(e){const t=new st(e.shape,j6);return this.compileAndRun(t,[e],"bool")}round(e){const t=new st(e.shape,K6);return this.compileAndRun(t,[e])}exp(e){if(this.shouldExecuteOnCPU([e])){const n=HK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,OC,e.dtype);const t=new st(e.shape,OC);return this.compileAndRun(t,[e])}expm1(e){if(this.shouldExecuteOnCPU([e])){const n=qK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,EC,e.dtype);const t=new st(e.shape,EC);return this.compileAndRun(t,[e])}softmax(e,t){const n=qe([t],e.shape),s=Qn(e,n),i=vn(s.shape,n),o=Re(e,s.reshape(i)),a=this.exp(o),c=this.sum(a,n).reshape(i);return We(a,c)}log(e){if(this.shouldExecuteOnCPU([e])){const n=KK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,fX,e.dtype);const t=new st(e.shape,X6);return this.compileAndRun(t,[e])}log1p(e){const t=new st(e.shape,J6);return this.compileAndRun(t,[e])}sqrt(e){const t=new st(e.shape,Z6);return this.compileAndRun(t,[e])}rsqrt(e){if(this.shouldExecuteOnCPU([e])){const n=ZK(this.texData.get(e.dataId).values,e.dtype);return this.makeOutput(e.shape,e.dtype,n)}const t=new st(e.shape,Q6);return this.compileAndRun(t,[e])}reciprocal(e){const t=new st(e.shape,dX);return this.compileAndRun(t,[e])}relu(e){let t;return oe().getBool("WEBGL_PACK")?t=new fu(e.shape,DC):t=new st(e.shape,TC),this.compileAndRun(t,[e])}relu6(e){let t;return oe().getBool("WEBGL_PACK")?t=new fu(e.shape,kC):t=new st(e.shape,AC),this.compileAndRun(t,[e])}prelu(e,t){const n=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(iC,e.shape,t.shape):new _n(sC,e.shape,t.shape);return this.compileAndRun(n,[e,t])}elu(e){if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,FC,e.dtype);const t=new st(e.shape,vC);return this.compileAndRun(t,[e])}eluDer(e,t){const n=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new lr(o8,e.shape,t.shape):new _n(s8,e.shape,t.shape);return this.compileAndRun(n,[e,t])}selu(e){const t=new st(e.shape,V6);return this.compileAndRun(t,[e])}clip(e,t,n){let s;oe().getBool("WEBGL_PACK_CLIP")?s=new b8(e.shape):s=new y8(e.shape);const i=s.getCustomSetupFunc(t,n);return this.compileAndRun(s,[e],null,i)}abs(e){if(this.shouldExecuteOnCPU([e])&&e.dtype!=="complex64"){const n=VK(this.texData.get(e.dataId).values);return this.makeOutput(e.shape,e.dtype,n)}if(oe().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(e,xC,e.dtype);const t=new st(e.shape,xC);return this.compileAndRun(t,[e])}complexAbs(e){const t=this.texData.get(e.dataId),n=new w8(e.shape),s=[this.makeComplexComponentTensorInfo(e,t.complexTensorInfos.real),this.makeComplexComponentTensorInfo(e,t.complexTensorInfos.imag)];return this.compileAndRun(n,s)}sigmoid(e){const t=new st(e.shape,eX);return this.compileAndRun(t,[e])}softplus(e){const t=new st(e.shape,tX);return this.compileAndRun(t,[e])}asin(e){const t=new st(e.shape,nX);return this.compileAndRun(t,[e])}acos(e){const t=new st(e.shape,sX);return this.compileAndRun(t,[e])}atan(e){const t=new st(e.shape,iX);return this.compileAndRun(t,[e])}sinh(e){const t=new st(e.shape,rX);return this.compileAndRun(t,[e])}cosh(e){const t=new st(e.shape,oX);return this.compileAndRun(t,[e])}tanh(e){const t=new st(e.shape,aX);return this.compileAndRun(t,[e])}asinh(e){const t=new st(e.shape,cX);return this.compileAndRun(t,[e])}acosh(e){const t=new st(e.shape,lX);return this.compileAndRun(t,[e])}atanh(e){const t=new st(e.shape,hX);return this.compileAndRun(t,[e])}erf(e){const t=new st(e.shape,uX);return this.compileAndRun(t,[e])}step(e,t){const n=new st(e.shape,G6(t));return this.compileAndRun(n,[e])}conv2dByMatMul(e,t,n,s,i,o){const a=e.shape,c=this.texData.get(e.dataId),h=n.inChannels,d=a[0]*a[1]*a[2],m=n.outChannels,f=n.dataFormat==="channelsLast",b=!1,w=!1,L=(d===1||m===1)&&h>WC,x=a[2]%2!==0&&!!c.isPacked;if(L||!oe().getBool("WEBGL_LAZILY_UNPACK")||!oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")||!x){const U=f?a[0]*a[1]*a[2]:a[0]*a[2]*a[3],$=K(e,[1,U,n.inChannels]),Y=K(t,[1,n.inChannels,n.outChannels]),j=this.fusedBatchMatMul({a:$,b:Y,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o});return K(j,n.outShape)}const v=f?a[0]*a[1]*(a[2]+1):a[0]*a[2]*(a[3]+1),N={dataId:e.dataId,shape:[1,v,n.inChannels],dtype:e.dtype},O=c.shape;c.shape=c.shape.slice(),c.shape[c.shape.length-2]++,A(Rm(c.shape,N.shape),()=>`packed reshape ${c.shape} to ${N.shape} isn't free`);const E=K(t,[1,n.inChannels,n.outChannels]),k=this.fusedBatchMatMul({a:N,b:E,transposeA:b,transposeB:w,bias:s,activation:i,preluActivationWeights:o}),F=this.texData.get(k.dataId);return A(F.isPacked,()=>"batchMatMul result is expected to be packed"),c.shape=O,F.shape=n.outShape,Ki().makeTensorFromDataId(k.dataId,n.outShape,k.dtype)}conv2dWithIm2Row(e,t,n,s,i,o){const{filterWidth:a,filterHeight:c,inChannels:h,outWidth:d,outHeight:m,dataFormat:f}=n,b=f==="channelsLast",w=a*c*h,L=m*d,x=[w,L],v=!0,N=!1,O=e.squeeze([0]),E=t.reshape([1,w,-1]),k=new a6(x,O.shape,n),F=this.compileAndRun(k,[O]).reshape([1,x[0],x[1]]),U=s!=null,$=o!=null,Y=i?Wm(i,!0):null,j=new xS(F.shape,E.shape,[1,L,n.outChannels],v,N,U,Y,$),Z=[F,E];s&&Z.push(s),$&&Z.push(o);const ie=this.compileAndRun(j,Z);return b?ie.reshape([1,m,d,n.outChannels]):ie.reshape([1,n.outChannels,m,d])}fusedConv2d({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n,s,i,o);if(oe().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n,s,i,o);const a=s!=null,c=o!=null,h=i?Wm(i,!1):null,d=new rC(n,a,h,c),m=[e,t];return s&&m.push(s),o&&m.push(o),this.compileAndRun(d,m)}conv2d(e,t,n){if(n.filterHeight===1&&n.filterWidth===1&&n.dilationHeight===1&&n.dilationWidth===1&&n.strideHeight===1&&n.strideWidth===1&&(n.padInfo.type==="SAME"||n.padInfo.type==="VALID"))return this.conv2dByMatMul(e,t,n);if(oe().getBool("WEBGL_CONV_IM2COL")&&e.shape[0]===1)return this.conv2dWithIm2Row(e,t,n);const s=new rC(n);return this.compileAndRun(s,[e,t])}conv2dDerInput(e,t,n){const s=new S8(n);return this.compileAndRun(s,[e,t])}conv2dDerFilter(e,t,n){const s=new L8(n);return this.compileAndRun(s,[e,t])}fusedDepthwiseConv2D({input:e,filter:t,convInfo:n,bias:s,activation:i,preluActivationWeights:o}){const a=oe().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1,c=i?Wm(i,a):null,h=[e,t],d=s!=null,m=o!=null;d&&h.push(s),m&&h.push(o);let f;return a?(f=new aC(n,d,c,m),this.compileAndRun(f,h)):(f=new oC(n,d,c,m),this.compileAndRun(f,h))}depthwiseConv2D(e,t,n){let s;return oe().getBool("WEBGL_PACK_DEPTHWISECONV")&&n.strideWidth<=2&&n.outChannels/n.inChannels===1?(s=new aC(n),this.compileAndRun(s,[e,t])):(s=new oC(n),this.compileAndRun(s,[e,t]))}depthwiseConv2DDerInput(e,t,n){const s=new A8(n);return this.compileAndRun(s,[e,t])}depthwiseConv2DDerFilter(e,t,n){const s=new T8(n);return this.compileAndRun(s,[e,t])}conv3d(e,t,n){const s=new v8(n);return this.compileAndRun(s,[e,t])}conv3dDerInput(e,t,n){const s=new x8(n);return this.compileAndRun(s,[e,t])}conv3dDerFilter(e,t,n){const s=new I8(n);return this.compileAndRun(s,[e,t])}unstack(e,t){const n=e.shape[t],s=new Array(e.rank-1);let i=0;for(let h=0;h1,()=>`blockSize should be > 1 for depthToSpace, but was: ${t}`);const s=e.shape[0],i=n==="NHWC"?e.shape[1]:e.shape[2],o=n==="NHWC"?e.shape[2]:e.shape[3],a=n==="NHWC"?e.shape[3]:e.shape[1],c=i*t,h=o*t,d=a/(t*t),m=n==="NHWC"?[s,c,h,d]:[s,d,c,h],f=new O8(m,t,n);return this.compileAndRun(f,[e])}split(e,t,n){return yX(e,t,n)}scatterND(e,t,n){const{sliceRank:s,numUpdates:i,sliceSize:o,strides:a,outputSize:c}=_a(t,e,n),h=[c/o,o],d=e.reshape([i,s]),m=t.reshape([i,o]);if(c===0)return JA(sn([]),n);const f=Ce(0),b=new wC(i,s,d.rank,m.rank,a,h),w=this.compileAndRun(b,[m,d,f]);return w.reshape(n)}sparseToDense(e,t,n,s){const{sliceRank:i,numUpdates:o,strides:a,outputSize:c}=_a(t,e,n),h=!1,d=new wC(o,i,e.rank,t.rank,a,[c,1],h),m=this.compileAndRun(d,[t,e,s]);return m.reshape(n)}gatherND(e,t){const n=t.shape,s=n[n.length-1],[i,o,a,c]=Hd(e,t),h=t.reshape([o,s]),d=e.reshape([e.size/a,a]),m=new B8(s,c,[o,a]),f=this.compileAndRun(m,[d,h]);return f.reshape(i)}fill(e,t,n){if(n=n||wa(t),n==="string"){const s=ws(n,P(e));return s.fill(t),Ki().makeTensor(s,e,n,this)}else{const s=new W8(e,t),i=s.getCustomSetupFunc(t);return this.compileAndRun(s,[],n,i)}}onesLike(e){if(e.dtype==="string")throw new Error("onesLike is not supported under string dtype");return this.fill(e.shape,1,e.dtype)}zerosLike(e){return this.fill(e.shape,e.dtype==="string"?"":0,e.dtype)}linspace(e,t,n){return aw(e,t,n)}makeTensorInfo(e,t,n){const s=this.write(n,e,t);return this.texData.get(s).usage=null,{dataId:s,shape:e,dtype:t}}makeOutput(e,t,n){const{dataId:s}=this.makeTensorInfo(e,t,n);return Ki().makeTensorFromDataId(s,e,t,this)}unpackTensor(e){const t=new gX(e.shape);return this.runWebGLProgram(t,[e],e.dtype)}packTensor(e){const t=new f6(e.shape),n=!0;return this.runWebGLProgram(t,[e],e.dtype,null,n)}packedReshape(e,t){const n=[mc(e.shape),...fc(e.shape)],s={dtype:e.dtype,shape:n,dataId:e.dataId},i=[mc(t),...fc(t)],o=new bC(i,n),a=!0,c=this.runWebGLProgram(o,[s],e.dtype,null,a);return{dataId:c.dataId,shape:t,dtype:c.dtype}}decode(e){const t=this.texData.get(e),{isPacked:n,shape:s,dtype:i}=t,o=wS(s);let a;n?a=new R8(o):a=new C8(o);const c=!0,h=this.runWebGLProgram(a,[{shape:o,dtype:i,dataId:e}],i,null,c);return{dtype:i,shape:s,dataId:h.dataId}}runWebGLProgram(e,t,n,s,i=!1){const o=this.makeTensorInfo(e.outputShape,n),a=this.texData.get(o.dataId);if(e.packedOutput&&(a.isPacked=!0),e.outPackingScheme===lu.DENSE){const L=uu(e.outputShape);a.texShape=L.map(x=>x*2)}if(e.outTexUsage!=null&&(a.usage=e.outTexUsage),P(o.shape)===0)return a.values=bt(o.dtype,0),o;const c=[],h=t.map(L=>{if(L.dtype==="complex64")throw new Error("GPGPUProgram does not support complex64 input. For complex64 dtypes, please separate the program into real and imaginary parts.");let x=this.texData.get(L.dataId);if(x.texture==null){if(!e.packedInputs&&P(L.shape)<=oe().getNumber("WEBGL_SIZE_UPLOAD_UNIFORM"))return{shape:L.shape,texData:null,isUniform:!0,uniformValues:x.values};e.packedInputs&&(x.isPacked=!0,x.shape=L.shape)}else if(!!x.isPacked!==!!e.packedInputs)L=x.isPacked?this.unpackTensor(L):this.packTensor(L),c.push(L),x=this.texData.get(L.dataId);else if(x.isPacked&&!Rm(x.shape,L.shape)){const v=L,N=L.shape;L.shape=x.shape,L=this.packedReshape(L,N),c.push(L),x=this.texData.get(L.dataId),v.shape=N}return this.uploadToGPU(L.dataId),{shape:L.shape,texData:x,isUniform:!1}});this.uploadToGPU(o.dataId);const d={shape:o.shape,texData:a,isUniform:!1},m=o6(e,h,d),f=this.getAndSaveBinary(m,()=>i6(this.gpgpu,e,h,d)),b=this.activeTimers!=null;let w;if(b&&(w=this.startTimer()),r6(this.gpgpu,f,h,d,s),c.forEach(L=>this.disposeIntermediateTensorInfo(L)),b&&(w=this.endTimer(w),this.activeTimers.push({name:e.constructor.name,query:this.getQueryTime(w)})),!oe().getBool("WEBGL_LAZILY_UNPACK")&&a.isPacked&&i===!1){const L=this.unpackTensor(o);return this.disposeIntermediateTensorInfo(o),L}return o}compileAndRun(e,t,n,s,i=!1){n=n||t[0].dtype;const o=this.runWebGLProgram(e,t,n,s,i);return Ki().makeTensorFromDataId(o.dataId,o.shape,o.dtype)}getAndSaveBinary(e,t){return e in this.binaryCache||(this.binaryCache[e]=t()),this.binaryCache[e]}getTextureManager(){return this.textureManager}dispose(){if(this.disposed)return;if(!oe().getBool("IS_TEST")){const e=Object.keys(this.binaryCache);e.forEach(t=>{this.gpgpu.deleteProgram(this.binaryCache[t].webGLProgram),delete this.binaryCache[t]})}this.textureManager.dispose(),this.canvas!=null&&typeof HTMLCanvasElement!="undefined"&&this.canvas instanceof HTMLCanvasElement?this.canvas.remove():this.canvas=null,this.gpgpuCreatedLocally&&(this.gpgpu.program=null,this.gpgpu.dispose()),this.disposed=!0}floatPrecision(){return this.floatPrecisionValue==null&&(this.floatPrecisionValue=Q(()=>{if(!oe().get("WEBGL_RENDER_FLOAT32_ENABLED")){const e=oe().getBool("DEBUG");oe().set("DEBUG",!1);const t=this.abs(Ce(1e-8)).dataSync()[0];if(oe().set("DEBUG",e),t>0)return 32}return 16})),this.floatPrecisionValue}epsilon(){return this.floatPrecision()===32?SX:IX}uploadToGPU(e){const t=this.texData.get(e),{shape:n,dtype:s,values:i,texture:o,usage:a,isPacked:c}=t;if(o!=null)return;const h=this.activeTimers!=null;let d;h&&(d=jn());let m=t.texShape;if(m==null&&(m=_K(n,c),t.texShape=m),i!=null){const f=wS(n);let b,w=m[1],L=m[0];const x=i instanceof Uint8Array;c?([w,L]=pc(m[0],m[1]),b=new _8(f,[L,w],x)):b=new F8(f,[L,w],x);const v=this.makeTensorInfo([L,w],s);x?this.texData.get(v.dataId).usage=Ns.PIXELS:this.texData.get(v.dataId).usage=Ns.UPLOAD,this.gpgpu.uploadDenseMatrixToTexture(this.getTexture(v.dataId),w,L,i);const N=!0,O=this.runWebGLProgram(b,[v],s,null,N),E=this.texData.get(O.dataId);t.texture=E.texture,t.texShape=E.texShape,t.isPacked=E.isPacked,t.usage=E.usage,this.disposeIntermediateTensorInfo(v),this.texData.delete(O.dataId),t.values=null,h&&(this.uploadWaitMs+=jn()-d)}else{const f=this.acquireTexture(m,a,s,c);t.texture=f}}convertAndCacheOnCPU(e,t){const n=this.texData.get(e),{dtype:s}=n;return this.releaseGPUData(e),t!=null&&(n.values=CX(t,s)),n.values}acquireTexture(e,t,n,s){if(this.numBytesInGPU+=this.computeBytes(e,n),!this.warnedAboutMemory&&this.numBytesInGPU>this.numMBBeforeWarning*1024*1024){const i=(this.numBytesInGPU/1024/1024).toFixed(2);this.warnedAboutMemory=!0,console.warn(`High memory usage in GPU: ${i} MB, most likely due to a memory leak`)}return this.textureManager.acquireTexture(e,t,s)}computeBytes(e,t){return e[0]*e[1]*Bg(t)}tryRunOnCpuOrThrow(e,t){if(this.shouldExecuteOnCPU(e))try{return t()}catch(n){if(oe().getBool("IS_TEST"))throw new Error("CPU forwarding failed")}return null}}function CX(e,t){if(t==="float32"||t==="complex64")return e;if(t==="int32"||t==="bool"){const n=t==="int32"?new Int32Array(e.length):new Uint8Array(e.length);for(let s=0;snew NX,2);const one={forceHalfFloat:OX};function ur(e){const{inputs:t,backend:n}=e,{x:s}=t;return n.incRef(s.dataId),{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}const EX={kernelName:xl,backendName:"webgl",kernelFunc:ur};function Lc(e){const{inputs:t,backend:n}=e,{real:s,imag:i}=t,o=n.makeTensorInfo(s.shape,"complex64"),a=n.texData.get(o.dataId),c=ur({inputs:{x:s},backend:n}),h=n.texData.get(c.dataId);h.complexParentRefCount++;const d=ur({inputs:{x:i},backend:n}),m=n.texData.get(d.dataId);return m.complexParentRefCount++,a.complexTensorInfos={real:c,imag:d},o}const DX={kernelName:rd,backendName:"webgl",kernelFunc:Lc};const $C="if (isnan(x)) return x;",kX=` + `}}let{segment_util:oE}=ov,EJ=sv,DJ=iv,AJ=av,FJ=Id,RJ=1e-7,PJ=1e-4,zm={};function OJ(n){return n in zm||(zm[n]={}),zm[n]}function Wm(n,t=!1){if(n==="linear")return t?SJ:eJ;if(n==="relu")return t?eE:jI;if(n==="elu")return t?rE:XI;if(n==="relu6")return t?nE:KI;if(n==="prelu")return t?SI:CI;throw new Error(`Activation ${n} has not been implemented for the WebGL backend.`)}let LJ=128,MJ=600;function BJ(){return ct().global.screen==null?1024:ct().global.screen.height*ct().global.screen.width*window.devicePixelRatio*MJ/1024/1024}let sE=1e3;class zJ extends d{constructor(t){super();if(this.pendingRead=new WeakMap,this.pendingDisposal=new WeakSet,this.dataRefCount=new WeakMap,this.numBytesInGPU=0,this.uploadWaitMs=0,this.downloadWaitMs=0,this.warnedAboutMemory=!1,this.warnedAboutCPUBackend=!1,this.pendingDeletes=0,this.disposed=!1,!ct().getBool("HAS_WEBGL"))throw new Error("WebGL is not supported on this device");if(t==null){let e=jo(ct().getNumber("WEBGL_VERSION"));this.binaryCache=OJ(ct().getNumber("WEBGL_VERSION")),this.gpgpu=new m7(e),this.canvas=e.canvas,this.gpgpuCreatedLocally=!0}else this.gpgpu=t,this.binaryCache={},this.gpgpuCreatedLocally=!1,this.canvas=t.gl.canvas;this.textureManager=new X7(this.gpgpu),this.numMBBeforeWarning=BJ(),this.texData=new h(this,ps())}numDataIds(){return this.texData.numDataIds()+(this.cpuBackend?this.cpuBackend.numDataIds():0)-this.pendingDeletes}write(t,e,r){if((ct().getBool("WEBGL_CHECK_NUMERICAL_PROBLEMS")||ct().getBool("DEBUG"))&&this.checkNumericalProblems(t),r==="complex64"&&t!=null)throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");let o={};return this.texData.set(o,{shape:e,dtype:r,values:t,usage:Mr.UPLOAD,refCount:1,complexParentRefCount:0}),o}incRef(t){let e=this.texData.get(t);e.refCount++}decRef(t){if(this.texData.has(t)){let e=this.texData.get(t);e.refCount--}}move(t,e,r,o){if(ct().getBool("DEBUG")&&this.checkNumericalProblems(e),o==="complex64")throw new Error("Cannot write to a complex64 dtype. Please use tf.complex(real, imag).");this.texData.set(t,{shape:r,dtype:o,values:e,usage:Mr.UPLOAD,refCount:1,complexParentRefCount:0})}disposeIntermediateTensorInfo(t){let e=t.dataId;if(this.texData.has(e)){let r=this.texData.get(e);r.refCount--,r.refCount<1&&this.disposeData(e)}}readSync(t){let e=this.texData.get(t),{values:r,dtype:o,complexTensorInfos:s,slice:c,shape:l,isPacked:p}=e;if(c!=null){let b;p?b=new ch(l,Bm):b=new ue(l,Bm);let v=this.runWebGLProgram(b,[{dataId:t,shape:l,dtype:o}],o),T=this.readSync(v.dataId);return this.disposeIntermediateTensorInfo(v),T}if(r!=null)return this.convertAndCacheOnCPU(t);if(o==="string")return r;let f=this.activeTimers!=null,m;f&&(m=or());let y;if(o==="complex64"){let b=this.readSync(s.real.dataId),v=this.readSync(s.imag.dataId);y=ys(b,v)}else y=this.getValuesFromTexture(t);return f&&(this.downloadWaitMs+=or()-m),this.convertAndCacheOnCPU(t,y)}async read(t){if(this.pendingRead.has(t)){let T=this.pendingRead.get(t);return new Promise(N=>T.push(N))}let e=this.texData.get(t),{values:r,shape:o,slice:s,dtype:c,complexTensorInfos:l,isPacked:p}=e;if(s!=null){let T;p?T=new ch(o,Bm):T=new ue(o,Bm);let N=this.runWebGLProgram(T,[{dataId:t,shape:o,dtype:c}],c),S=this.read(N.dataId);return this.disposeIntermediateTensorInfo(N),S}if(r!=null)return this.convertAndCacheOnCPU(t);if(!ct().getBool("WEBGL_DOWNLOAD_FLOAT_ENABLED")&&ct().getNumber("WEBGL_VERSION")===2)throw new Error("tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and WEBGL_VERSION=2 not yet supported.");let f=null,m;if(c!=="complex64"&&ct().get("WEBGL_BUFFER_SUPPORTED")){m=this.decode(t);let T=this.texData.get(m.dataId);f=this.gpgpu.createBufferFromTexture(T.texture,...oh(o))}this.pendingRead.set(t,[]),c!=="complex64"&&await this.gpgpu.createAndWaitForFence();let y;if(c==="complex64"){let T=await Promise.all([this.read(l.real.dataId),this.read(l.imag.dataId)]),N=T[0],S=T[1];y=ys(N,S)}else if(f==null)y=this.getValuesFromTexture(t);else{let T=G(o);y=this.gpgpu.downloadFloat32MatrixFromBuffer(f,T)}m!=null&&this.disposeIntermediateTensorInfo(m);let b=this.convertAndCacheOnCPU(t,y),v=this.pendingRead.get(t);return this.pendingRead.delete(t),v.forEach(T=>T(b)),this.pendingDisposal.has(t)&&(this.pendingDisposal.delete(t),this.disposeData(t),this.pendingDeletes--),b}checkNumericalProblems(t){if(t==null)return;for(let e=0;ep.query)).filter(p=>p!=null),c=tt(this.activeTimers.map(p=>p.name)).filter(p=>p!=null);this.activeTimers=e,o&&(this.programTimersStack=null);let l={uploadWaitMs:this.uploadWaitMs,downloadWaitMs:this.downloadWaitMs,kernelMs:null,wallMs:null};if(ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0){let p=await Promise.all(s);l.kernelMs=C(p),l.getExtraProfileInfo=()=>p.map((f,m)=>({name:c[m],ms:f})).map(f=>`${f.name}: ${f.ms}`).join(", ")}else l.kernelMs={error:"WebGL query timers are not supported in this environment."};return this.uploadWaitMs=0,this.downloadWaitMs=0,l}memory(){return{unreliable:!1,numBytesInGPU:this.numBytesInGPU,numBytesInGPUAllocated:this.textureManager.numBytesAllocated,numBytesInGPUFree:this.textureManager.numBytesFree}}startTimer(){return ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?this.gpgpu.beginQuery():{startMs:or(),endMs:null}}endTimer(t){return ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0?(this.gpgpu.endQuery(),t):(t.endMs=or(),t)}async getQueryTime(t){if(ct().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE")>0)return this.gpgpu.waitForQueryAndGetTime(t);let e=t;return e.endMs-e.startMs}disposeData(t){if(this.pendingDisposal.has(t))return;if(this.pendingRead.has(t)){this.pendingDisposal.add(t),this.pendingDeletes++;return}if(!this.texData.has(t))return;if(this.texData.get(t).complexParentRefCount>0){this.texData.get(t).refCount--;return}this.releaseGPUData(t);let{complexTensorInfos:e}=this.texData.get(t);e!=null&&(this.texData.get(e.real.dataId).complexParentRefCount--,this.disposeIntermediateTensorInfo(e.real),this.texData.get(e.imag.dataId).complexParentRefCount--,this.disposeIntermediateTensorInfo(e.imag)),this.texData.delete(t)}releaseGPUData(t){let{texture:e,dtype:r,texShape:o,usage:s,isPacked:c,slice:l}=this.texData.get(t),p=l&&l.origDataId||t,f=this.dataRefCount.get(p);f>1?this.dataRefCount.set(p,f-1):(this.dataRefCount.delete(p),e!=null&&(this.numBytesInGPU-=this.computeBytes(o,r),this.textureManager.releaseTexture(e,o,s,c)));let m=this.texData.get(t);m.texture=null,m.texShape=null,m.isPacked=!1,m.slice=null}getTexture(t){return this.uploadToGPU(t),this.texData.get(t).texture}getDataInfo(t){return this.texData.get(t)}getCPUBackend(){return ct().getBool("WEBGL_CPU_FORWARD")?(this.cpuBackend==null&&(this.cpuBackend=ps().findBackend("cpu")),this.cpuBackend):null}shouldExecuteOnCPU(t,e=LJ){let r=this.getCPUBackend();return!this.warnedAboutCPUBackend&&r==null&&(console.warn("Your application contains ops that are small enough to be executed on the CPU backend, however the CPU backend cannot be found. Consider importing the CPU backend (@tensorflow/tfjs-backend-cpu) for better performance."),this.warnedAboutCPUBackend=!0),r!=null&&t.every(o=>this.texData.get(o.dataId).texture==null&&G(o.shape)this.cpuBackend.stridedSlice(t,e,r,o));if(s)return s;let c=Zf(e,r,o);if(c.some(p=>p===0))return un([],c);let l=new K7(e,o,c);return this.compileAndRun(l,[t])}reverse(t,e){let r=ct().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new V7(t.shape,e):new W7(t.shape,e);return this.compileAndRun(r,[t])}neg(t){let e=this.tryRunOnCpuOrThrow([t],()=>this.cpuBackend.neg(t));if(e)return e;if(ct().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(t,YI,t.dtype);let r=new ue(t.shape,YI);return this.compileAndRun(r,[t])}batchMatMul(t,e,r,o){let s=r?t.shape[2]:t.shape[1],c=o?e.shape[1]:e.shape[2],l=r?t.shape[1]:t.shape[2],p=Math.max(t.shape[0],e.shape[0]);if((s===1||c===1)&&l>sE){r&&(t=Kt(t,[0,2,1])),o&&(e=Kt(e,[0,2,1]));let y=c===1?t:t.as3D(p,l,1),b=c===1?2:1,v=c===1?e.as3D(p,1,l):e,T=nt(y,v);return T.sum(b,!0)}let f=jn(t.dtype,e.dtype),m=new x1(t.shape,e.shape,[p,s,c],r,o);return this.compileAndRun(m,[t,e],f)}fusedBatchMatMul({a:t,b:e,transposeA:r,transposeB:o,bias:s,activation:c,preluActivationWeights:l}){let p=r?t.shape[2]:t.shape[1],f=o?e.shape[1]:e.shape[2],m=Math.max(t.shape[0],e.shape[0]),y=jn(t.dtype,e.dtype),b=s!=null,v=l!=null,T=c?Wm(c,!0):null,N=new x1(t.shape,e.shape,[m,p,f],r,o,b,T,v),S=[t,e];return s&&S.push(s),l&&S.push(l),this.compileAndRun(N,S,y)}localResponseNormalization4D(t,e,r,o,s){let c=ct().getBool("WEBGL_PACK_NORMALIZATION")?new k7(t.shape,e,r,o,s):new v7(t.shape,e,r,o,s);return this.compileAndRun(c,[t])}LRNGrad(t,e,r,o,s,c,l){let p=new T7(e.shape,o,s,c,l);return this.compileAndRun(p,[e,r,t])}tile(t,e){if(t.dtype==="string"){let o=this.readSync(t.dataId),s=o.map(l=>Uu(l)),c=Se(t.shape,t.dtype,s);return DJ(c,e)}let r=new Q7(t.shape,e);return this.compileAndRun(r,[t])}pad(t,e,r){let o=ct().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new R7(t.shape,e,r):new F7(t.shape,e,r);return this.compileAndRun(o,[t])}gather(t,e,r){let o=this.tryRunOnCpuOrThrow([t,e],()=>this.cpuBackend.gather(t,e,r));if(o)return o;let s=new YY(t.shape,e.size,r);return this.compileAndRun(s,[t,e])}batchToSpaceND(t,e,r){_(t.rank<=4,()=>"batchToSpaceND for rank > 4 with a WebGL backend not implemented yet");let o=e.reduce((m,y)=>m*y),s=Np(t.shape,e,o),c=_p(s.length,e.length),l=Cp(t.shape,e,o),p=Kw(r,e.length),f=Xw(l,r,e.length);return Kt(t.reshape(s),c).reshape(l).slice(p,f)}spaceToBatchND(t,e,r){_(t.rank<=4,()=>"spaceToBatchND for rank > 4 with a WebGL backend not implemented yet");let o=e.reduce((y,b)=>y*b),s=[[0,0]];s.push(...r);for(let y=1+e.length;ythis.cpuBackend.prod(t,e));if(r)return r;let[o,s]=Fn(t.shape,e),c=G(s),l=t.as2D(-1,c),p=Vf(t.dtype);return this.reduce(l,"prod",p).reshape(o)}unsortedSegmentSum(t,e,r){let o=0,s=ir([o],t.rank),c=t;s!=null&&(c=Kt(t,s),o=xr(1,t.rank)[0]);let l=oE.computeOutShape(c.shape,o,r),p=G([c.shape[o]]),f=c.as2D(-1,p),m=Vf(t.dtype),y=this.segOpCompute(f,"unsortedSegmentSum",e,m,r).reshape(l);return s!=null&&(y=Kt(y,Ju(s))),y}segOpCompute(t,e,r,o,s){let c=t.shape[0],l=t.shape[1],p=oE.segOpComputeOptimalWindowSize(l,s),f={windowSize:p,inSize:l,batchSize:c,numSegments:s},m=new G7(f,e),y=this.compileAndRun(m,[t,r],o);return y.shape[1]===s?y:(r=hp(0,s).tile([l/p]),this.segOpCompute(y,e,r,o,s))}argMinMaxReduce(t,e,r){let o=[e];if(sr("arg"+r.charAt(0).toUpperCase()+r.slice(1),o,t.rank),!ct().getBool("WEBGL_PACK_REDUCE")||t.rank<=2){let[s,c]=Fn(t.shape,o),l=G(c),p=t.as2D(-1,l);return this.argReduce(p,r).reshape(s)}return this.argReducePacked(t,r)}argMin(t,e){return this.argMinMaxReduce(t,e,"min")}argMax(t,e){return this.argMinMaxReduce(t,e,"max")}cumsum(t,e,r,o){if(e!==t.rank-1)throw new Error(`WebGL cumsum shader expects an inner-most axis=${t.rank-1} but got axis=${e}`);let s=t.shape[e],c=t;for(let l=0;l<=Math.ceil(Math.log2(s))-1;l++){let p=new DI(t.shape,!1,o),f=p.getCustomSetupFunc(l),m=c;c=this.compileAndRun(p,[c],c.dtype,f),m.dispose()}if(r){let l=new DI(t.shape,r,o),p=c;c=this.compileAndRun(l,[c]),p.dispose()}return c}equal(t,e){if(ct().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(t,e,wY,"bool");let r=new Hn(iY,t.shape,e.shape);return this.compileAndRun(r,[t,e],"bool")}less(t,e){let r=this.tryRunOnCpuOrThrow([t,e],()=>this.cpuBackend.less(t,e));if(r)return r;if(ct().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(t,e,vY,"bool");let o=new Hn(aY,t.shape,e.shape);return this.compileAndRun(o,[t,e],"bool")}lessEqual(t,e){if(ct().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(t,e,TY,"bool");let r=new Hn(cY,t.shape,e.shape);return this.compileAndRun(r,[t,e],"bool")}greater(t,e){let r=this.tryRunOnCpuOrThrow([t,e],()=>this.cpuBackend.greater(t,e));if(r)return r;if(ct().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(t,e,kY,"bool");let o=new Hn(lY,t.shape,e.shape);return this.compileAndRun(o,[t,e],"bool")}greaterEqual(t,e){if(ct().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(t,e,NY,"bool");let r=new Hn(uY,t.shape,e.shape);return this.compileAndRun(r,[t,e],"bool")}logicalNot(t){let e=new ue(t.shape,CJ);return this.compileAndRun(e,[t])}logicalAnd(t,e){if(ct().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(t,e,_Y,"bool");let r=new Hn(pY,t.shape,e.shape);return this.compileAndRun(r,[t,e],"bool")}logicalOr(t,e){if(ct().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(t,e,CY,"bool");let r=new Hn(hY,t.shape,e.shape);return this.compileAndRun(r,[t,e],"bool")}select(t,e,r){let o=new U7(t.rank,e.shape,e.rank);return this.compileAndRun(o,[t,e,r],jn(e.dtype,r.dtype))}where(t){Bc("tf.where() in webgl locks the UI thread. Call tf.whereAsync() instead");let e=t.dataSync();return FJ(t.shape,e)}topk(t,e,r){let o=t.dataSync();return AJ(o,t.shape,t.dtype,e,r)}min(t,e){sr("min",e,t.rank);let[r,o]=Fn(t.shape,e),s=G(o),c=t.as2D(-1,s);return this.reduce(c,"min",c.dtype).reshape(r)}minimum(t,e){let r=this.tryRunOnCpuOrThrow([t,e],()=>this.cpuBackend.minimum(t,e));if(r)return r;let o=ct().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new Ns($Y,t.shape,e.shape):new Hn(dY,t.shape,e.shape);return this.compileAndRun(o,[t,e])}mod(t,e){let r=ct().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new Ns(IY,t.shape,e.shape):new Hn(mY,t.shape,e.shape);return this.compileAndRun(r,[t,e])}maximum(t,e){let r=this.tryRunOnCpuOrThrow([t,e],()=>this.cpuBackend.maximum(t,e));if(r)return r;let o=ct().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new Ns(SY,t.shape,e.shape):new Hn(fY,t.shape,e.shape);return this.compileAndRun(o,[t,e])}all(t,e){sr("all",e,t.rank);let[r,o]=Fn(t.shape,e),s=G(o),c=t.as2D(-1,s);return this.reduce(c,"all",c.dtype).reshape(r)}any(t,e){sr("any",e,t.rank);let[r,o]=Fn(t.shape,e),s=G(o),c=t.as2D(-1,s);return this.reduce(c,"any",c.dtype).reshape(r)}floorDiv(t,e){let r=oY,o="int32";if(ct().getBool("WEBGL_PACK_BINARY_OPERATIONS"))return this.packedBinaryOp(t,e,yY,o);let s=new Hn(r,t.shape,e.shape);return this.compileAndRun(s,[t,e],o)}packedUnaryOp(t,e,r){let o=new ch(t.shape,e);return this.compileAndRun(o,[t],r)}packedBinaryOp(t,e,r,o,s=!1){let c=new Ns(r,t.shape,e.shape,s);return this.compileAndRun(c,[t,e],o)}makeComplexComponentTensorInfo(t,e){return{dataId:e.dataId,dtype:e.dtype,shape:t.shape}}addN(t){if(t.length===1)return t[0];if(t.length>ct().get("WEBGL_MAX_TEXTURES_IN_SHADER")){let c=Math.floor(t.length/2),l=this.addN(t.slice(0,c)),p=this.addN(t.slice(c));return this.addN([l,p])}let e=t.map(c=>c.dtype).reduce((c,l)=>jn(c,l)),r=t.map(c=>c.shape),o=ct().getBool("WEBGL_PACK"),s=o?new gX(t[0].shape,r):new mX(t[0].shape,r);return this.compileAndRun(s,t,e)}pow(t,e){let r=ct().getBool("WEBGL_PACK_BINARY_OPERATIONS"),o=r?new Ns(bY,t.shape,e.shape):new Hn(sY,t.shape,e.shape),s=jn(t.dtype,e.dtype);return this.compileAndRun(o,[t,e],s)}ceil(t){if(this.shouldExecuteOnCPU([t])){let r=oX(this.texData.get(t.dataId).values,t.dtype);return this.makeOutput(t.shape,t.dtype,r)}if(ct().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(t,JI,t.dtype);let e=new ue(t.shape,JI);return this.compileAndRun(e,[t])}floor(t){if(this.shouldExecuteOnCPU([t])){let r=aX(this.texData.get(t.dataId).values,t.dtype);return this.makeOutput(t.shape,t.dtype,r)}if(ct().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(t,ZI,t.dtype);let e=new ue(t.shape,ZI);return this.compileAndRun(e,[t])}sign(t){let e=new ue(t.shape,oJ);return this.compileAndRun(e,[t])}isNaN(t){let e=new ue(t.shape,sJ);return this.compileAndRun(e,[t],"bool")}isInf(t){let e=new ue(t.shape,iJ);return this.compileAndRun(e,[t],"bool")}isFinite(t){let e=new ue(t.shape,aJ);return this.compileAndRun(e,[t],"bool")}round(t){let e=new ue(t.shape,cJ);return this.compileAndRun(e,[t])}exp(t){if(this.shouldExecuteOnCPU([t])){let r=sX(this.texData.get(t.dataId).values,t.dtype);return this.makeOutput(t.shape,t.dtype,r)}if(ct().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(t,QI,t.dtype);let e=new ue(t.shape,QI);return this.compileAndRun(e,[t])}expm1(t){if(this.shouldExecuteOnCPU([t])){let r=iX(this.texData.get(t.dataId).values,t.dtype);return this.makeOutput(t.shape,t.dtype,r)}if(ct().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(t,tE,t.dtype);let e=new ue(t.shape,tE);return this.compileAndRun(e,[t])}softmax(t,e){let r=Vt([e],t.shape),o=lr(t,r),s=Rn(o.shape,r),c=Dt(t,o.reshape(s)),l=this.exp(c),p=this.sum(l,r).reshape(s);return Bt(l,p)}log(t){if(this.shouldExecuteOnCPU([t])){let r=cX(this.texData.get(t.dataId).values,t.dtype);return this.makeOutput(t.shape,t.dtype,r)}if(ct().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(t,$J,t.dtype);let e=new ue(t.shape,lJ);return this.compileAndRun(e,[t])}log1p(t){let e=new ue(t.shape,uJ);return this.compileAndRun(e,[t])}sqrt(t){let e=new ue(t.shape,pJ);return this.compileAndRun(e,[t])}rsqrt(t){if(this.shouldExecuteOnCPU([t])){let r=pX(this.texData.get(t.dataId).values,t.dtype);return this.makeOutput(t.shape,t.dtype,r)}let e=new ue(t.shape,hJ);return this.compileAndRun(e,[t])}reciprocal(t){let e=new ue(t.shape,_J);return this.compileAndRun(e,[t])}relu(t){let e;return ct().getBool("WEBGL_PACK")?e=new ch(t.shape,eE):e=new ue(t.shape,jI),this.compileAndRun(e,[t])}relu6(t){let e;return ct().getBool("WEBGL_PACK")?e=new ch(t.shape,nE):e=new ue(t.shape,KI),this.compileAndRun(e,[t])}prelu(t,e){let r=ct().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new Ns(SI,t.shape,e.shape):new Hn(CI,t.shape,e.shape);return this.compileAndRun(r,[t,e])}elu(t){if(ct().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(t,rE,t.dtype);let e=new ue(t.shape,XI);return this.compileAndRun(e,[t])}eluDer(t,e){let r=ct().getBool("WEBGL_PACK_BINARY_OPERATIONS")?new Ns(xY,t.shape,e.shape):new Hn(gY,t.shape,e.shape);return this.compileAndRun(r,[t,e])}selu(t){let e=new ue(t.shape,nJ);return this.compileAndRun(e,[t])}clip(t,e,r){let o;ct().getBool("WEBGL_PACK_CLIP")?o=new DY(t.shape):o=new EY(t.shape);let s=o.getCustomSetupFunc(e,r);return this.compileAndRun(o,[t],null,s)}abs(t){if(this.shouldExecuteOnCPU([t])&&t.dtype!=="complex64"){let r=nX(this.texData.get(t.dataId).values);return this.makeOutput(t.shape,t.dtype,r)}if(ct().getBool("WEBGL_PACK_UNARY_OPERATIONS"))return this.packedUnaryOp(t,HI,t.dtype);let e=new ue(t.shape,HI);return this.compileAndRun(e,[t])}complexAbs(t){let e=this.texData.get(t.dataId),r=new AY(t.shape),o=[this.makeComplexComponentTensorInfo(t,e.complexTensorInfos.real),this.makeComplexComponentTensorInfo(t,e.complexTensorInfos.imag)];return this.compileAndRun(r,o)}sigmoid(t){let e=new ue(t.shape,fJ);return this.compileAndRun(e,[t])}softplus(t){let e=new ue(t.shape,dJ);return this.compileAndRun(e,[t])}asin(t){let e=new ue(t.shape,mJ);return this.compileAndRun(e,[t])}acos(t){let e=new ue(t.shape,gJ);return this.compileAndRun(e,[t])}atan(t){let e=new ue(t.shape,yJ);return this.compileAndRun(e,[t])}sinh(t){let e=new ue(t.shape,bJ);return this.compileAndRun(e,[t])}cosh(t){let e=new ue(t.shape,xJ);return this.compileAndRun(e,[t])}tanh(t){let e=new ue(t.shape,wJ);return this.compileAndRun(e,[t])}asinh(t){let e=new ue(t.shape,vJ);return this.compileAndRun(e,[t])}acosh(t){let e=new ue(t.shape,TJ);return this.compileAndRun(e,[t])}atanh(t){let e=new ue(t.shape,kJ);return this.compileAndRun(e,[t])}erf(t){let e=new ue(t.shape,NJ);return this.compileAndRun(e,[t])}step(t,e){let r=new ue(t.shape,rJ(e));return this.compileAndRun(r,[t])}conv2dByMatMul(t,e,r,o,s,c){let l=t.shape,p=this.texData.get(t.dataId),f=r.inChannels,m=l[0]*l[1]*l[2],y=r.outChannels,b=r.dataFormat==="channelsLast",v=!1,T=!1,N=(m===1||y===1)&&f>sE,S=l[2]%2!==0&&!!p.isPacked;if(N||!ct().getBool("WEBGL_LAZILY_UNPACK")||!ct().getBool("WEBGL_PACK_BINARY_OPERATIONS")||!S){let q=b?l[0]*l[1]*l[2]:l[0]*l[2]*l[3],H=Q(t,[1,q,r.inChannels]),Z=Q(e,[1,r.inChannels,r.outChannels]),J=this.fusedBatchMatMul({a:H,b:Z,transposeA:v,transposeB:T,bias:o,activation:s,preluActivationWeights:c});return Q(J,r.outShape)}let D=b?l[0]*l[1]*(l[2]+1):l[0]*l[2]*(l[3]+1),I={dataId:t.dataId,shape:[1,D,r.inChannels],dtype:t.dtype},P=p.shape;p.shape=p.shape.slice(),p.shape[p.shape.length-2]++,_(Rm(p.shape,I.shape),()=>`packed reshape ${p.shape} to ${I.shape} isn't free`);let E=Q(e,[1,r.inChannels,r.outChannels]),L=this.fusedBatchMatMul({a:I,b:E,transposeA:v,transposeB:T,bias:o,activation:s,preluActivationWeights:c}),B=this.texData.get(L.dataId);return _(B.isPacked,()=>"batchMatMul result is expected to be packed"),p.shape=P,B.shape=r.outShape,ps().makeTensorFromDataId(L.dataId,r.outShape,L.dtype)}conv2dWithIm2Row(t,e,r,o,s,c){let{filterWidth:l,filterHeight:p,inChannels:f,outWidth:m,outHeight:y,dataFormat:b}=r,v=b==="channelsLast",T=l*p*f,N=y*m,S=[T,N],D=!0,I=!1,P=t.squeeze([0]),E=e.reshape([1,T,-1]),L=new w7(S,P.shape,r),B=this.compileAndRun(L,[P]).reshape([1,S[0],S[1]]),q=o!=null,H=c!=null,Z=s?Wm(s,!0):null,J=new x1(B.shape,E.shape,[1,N,r.outChannels],D,I,q,Z,H),it=[B,E];o&&it.push(o),H&&it.push(c);let pt=this.compileAndRun(J,it);return v?pt.reshape([1,y,m,r.outChannels]):pt.reshape([1,r.outChannels,y,m])}fusedConv2d({input:t,filter:e,convInfo:r,bias:o,activation:s,preluActivationWeights:c}){if(r.filterHeight===1&&r.filterWidth===1&&r.dilationHeight===1&&r.dilationWidth===1&&r.strideHeight===1&&r.strideWidth===1&&(r.padInfo.type==="SAME"||r.padInfo.type==="VALID"))return this.conv2dByMatMul(t,e,r,o,s,c);if(ct().getBool("WEBGL_CONV_IM2COL")&&t.shape[0]===1)return this.conv2dWithIm2Row(t,e,r,o,s,c);let l=o!=null,p=c!=null,f=s?Wm(s,!1):null,m=new $I(r,l,f,p),y=[t,e];return o&&y.push(o),c&&y.push(c),this.compileAndRun(m,y)}conv2d(t,e,r){if(r.filterHeight===1&&r.filterWidth===1&&r.dilationHeight===1&&r.dilationWidth===1&&r.strideHeight===1&&r.strideWidth===1&&(r.padInfo.type==="SAME"||r.padInfo.type==="VALID"))return this.conv2dByMatMul(t,e,r);if(ct().getBool("WEBGL_CONV_IM2COL")&&t.shape[0]===1)return this.conv2dWithIm2Row(t,e,r);let o=new $I(r);return this.compileAndRun(o,[t,e])}conv2dDerInput(t,e,r){let o=new RY(r);return this.compileAndRun(o,[t,e])}conv2dDerFilter(t,e,r){let o=new FY(r);return this.compileAndRun(o,[t,e])}fusedDepthwiseConv2D({input:t,filter:e,convInfo:r,bias:o,activation:s,preluActivationWeights:c}){let l=ct().getBool("WEBGL_PACK_DEPTHWISECONV")&&r.strideWidth<=2&&r.outChannels/r.inChannels===1,p=s?Wm(s,l):null,f=[t,e],m=o!=null,y=c!=null;m&&f.push(o),y&&f.push(c);let b;return l?(b=new EI(r,m,p,y),this.compileAndRun(b,f)):(b=new II(r,m,p,y),this.compileAndRun(b,f))}depthwiseConv2D(t,e,r){let o;return ct().getBool("WEBGL_PACK_DEPTHWISECONV")&&r.strideWidth<=2&&r.outChannels/r.inChannels===1?(o=new EI(r),this.compileAndRun(o,[t,e])):(o=new II(r),this.compileAndRun(o,[t,e]))}depthwiseConv2DDerInput(t,e,r){let o=new MY(r);return this.compileAndRun(o,[t,e])}depthwiseConv2DDerFilter(t,e,r){let o=new LY(r);return this.compileAndRun(o,[t,e])}conv3d(t,e,r){let o=new BY(r);return this.compileAndRun(o,[t,e])}conv3dDerInput(t,e,r){let o=new OY(r);return this.compileAndRun(o,[t,e])}conv3dDerFilter(t,e,r){let o=new PY(r);return this.compileAndRun(o,[t,e])}unstack(t,e){let r=t.shape[e],o=new Array(t.rank-1),s=0;for(let f=0;f1,()=>`blockSize should be > 1 for depthToSpace, but was: ${e}`);let o=t.shape[0],s=r==="NHWC"?t.shape[1]:t.shape[2],c=r==="NHWC"?t.shape[2]:t.shape[3],l=r==="NHWC"?t.shape[3]:t.shape[1],p=s*e,f=c*e,m=l/(e*e),y=r==="NHWC"?[o,p,f,m]:[o,m,p,f],b=new GY(y,e,r);return this.compileAndRun(b,[t])}split(t,e,r){return EJ(t,e,r)}scatterND(t,e,r){let{sliceRank:o,numUpdates:s,sliceSize:c,strides:l,outputSize:p}=Tc(e,t,r),f=[p/c,c],m=t.reshape([s,o]),y=e.reshape([s,c]);if(p===0)return TC(un([]),r);let b=Et(0),v=new VI(s,o,m.rank,y.rank,l,f),T=this.compileAndRun(v,[y,m,b]);return T.reshape(r)}sparseToDense(t,e,r,o){let{sliceRank:s,numUpdates:c,strides:l,outputSize:p}=Tc(e,t,r),f=!1,m=new VI(c,s,t.rank,e.rank,l,[p,1],f),y=this.compileAndRun(m,[e,t,o]);return y.reshape(r)}gatherND(t,e){let r=e.shape,o=r[r.length-1],[s,c,l,p]=Yf(t,e),f=e.reshape([c,o]),m=t.reshape([t.size/l,l]),y=new ZY(o,p,[c,l]),b=this.compileAndRun(y,[m,f]);return b.reshape(s)}fill(t,e,r){if(r=r||ic(e),r==="string"){let o=rr(r,G(t));return o.fill(e),ps().makeTensor(o,t,r,this)}else{let o=new XY(t,e),s=o.getCustomSetupFunc(e);return this.compileAndRun(o,[],r,s)}}onesLike(t){if(t.dtype==="string")throw new Error("onesLike is not supported under string dtype");return this.fill(t.shape,1,t.dtype)}zerosLike(t){return this.fill(t.shape,t.dtype==="string"?"":0,t.dtype)}linspace(t,e,r){return rv(t,e,r)}makeTensorInfo(t,e,r){let o=this.write(r,t,e);return this.texData.get(o).usage=null,{dataId:o,shape:t,dtype:e}}makeOutput(t,e,r){let{dataId:o}=this.makeTensorInfo(t,e,r);return ps().makeTensorFromDataId(o,t,e,this)}unpackTensor(t){let e=new IJ(t.shape);return this.runWebGLProgram(e,[t],t.dtype)}packTensor(t){let e=new $7(t.shape),r=!0;return this.runWebGLProgram(e,[t],t.dtype,null,r)}packedReshape(t,e){let r=[rl(t.shape),...ol(t.shape)],o={dtype:t.dtype,shape:r,dataId:t.dataId},s=[rl(e),...ol(e)],c=new WI(s,r),l=!0,p=this.runWebGLProgram(c,[o],t.dtype,null,l);return{dataId:p.dataId,shape:e,dtype:p.dtype}}decode(t){let e=this.texData.get(t),{isPacked:r,shape:o,dtype:s}=e,c=m1(o),l;r?l=new VY(c):l=new WY(c);let p=!0,f=this.runWebGLProgram(l,[{shape:c,dtype:s,dataId:t}],s,null,p);return{dtype:s,shape:o,dataId:f.dataId}}runWebGLProgram(t,e,r,o,s=!1){let c=this.makeTensorInfo(t.outputShape,r),l=this.texData.get(c.dataId);if(t.packedOutput&&(l.isPacked=!0),t.outPackingScheme===nh.DENSE){let N=oh(t.outputShape);l.texShape=N.map(S=>S*2)}if(t.outTexUsage!=null&&(l.usage=t.outTexUsage),G(c.shape)===0)return l.values=Ce(c.dtype,0),c;let p=[],f=e.map(N=>{if(N.dtype==="complex64")throw new Error("GPGPUProgram does not support complex64 input. For complex64 dtypes, please separate the program into real and imaginary parts.");let S=this.texData.get(N.dataId);if(S.texture==null){if(!t.packedInputs&&G(N.shape)<=ct().getNumber("WEBGL_SIZE_UPLOAD_UNIFORM"))return{shape:N.shape,texData:null,isUniform:!0,uniformValues:S.values};t.packedInputs&&(S.isPacked=!0,S.shape=N.shape)}else if(!!S.isPacked!==!!t.packedInputs)N=S.isPacked?this.unpackTensor(N):this.packTensor(N),p.push(N),S=this.texData.get(N.dataId);else if(S.isPacked&&!Rm(S.shape,N.shape)){let D=N,I=N.shape;N.shape=S.shape,N=this.packedReshape(N,I),p.push(N),S=this.texData.get(N.dataId),D.shape=I}return this.uploadToGPU(N.dataId),{shape:N.shape,texData:S,isUniform:!1}});this.uploadToGPU(c.dataId);let m={shape:c.shape,texData:l,isUniform:!1},y=x7(t,f,m),b=this.getAndSaveBinary(y,()=>y7(this.gpgpu,t,f,m)),v=this.activeTimers!=null,T;if(v&&(T=this.startTimer()),b7(this.gpgpu,b,f,m,o),p.forEach(N=>this.disposeIntermediateTensorInfo(N)),v&&(T=this.endTimer(T),this.activeTimers.push({name:t.constructor.name,query:this.getQueryTime(T)})),!ct().getBool("WEBGL_LAZILY_UNPACK")&&l.isPacked&&s===!1){let N=this.unpackTensor(c);return this.disposeIntermediateTensorInfo(c),N}return c}compileAndRun(t,e,r,o,s=!1){r=r||e[0].dtype;let c=this.runWebGLProgram(t,e,r,o,s);return ps().makeTensorFromDataId(c.dataId,c.shape,c.dtype)}getAndSaveBinary(t,e){return t in this.binaryCache||(this.binaryCache[t]=e()),this.binaryCache[t]}getTextureManager(){return this.textureManager}dispose(){if(this.disposed)return;if(!ct().getBool("IS_TEST")){let t=Object.keys(this.binaryCache);t.forEach(e=>{this.gpgpu.deleteProgram(this.binaryCache[e].webGLProgram),delete this.binaryCache[e]})}this.textureManager.dispose(),this.canvas!=null&&typeof HTMLCanvasElement!="undefined"&&this.canvas instanceof HTMLCanvasElement?this.canvas.remove():this.canvas=null,this.gpgpuCreatedLocally&&(this.gpgpu.program=null,this.gpgpu.dispose()),this.disposed=!0}floatPrecision(){return this.floatPrecisionValue==null&&(this.floatPrecisionValue=rt(()=>{if(!ct().get("WEBGL_RENDER_FLOAT32_ENABLED")){let t=ct().getBool("DEBUG");ct().set("DEBUG",!1);let e=this.abs(Et(1e-8)).dataSync()[0];if(ct().set("DEBUG",t),e>0)return 32}return 16})),this.floatPrecisionValue}epsilon(){return this.floatPrecision()===32?RJ:PJ}uploadToGPU(t){let e=this.texData.get(t),{shape:r,dtype:o,values:s,texture:c,usage:l,isPacked:p}=e;if(c!=null)return;let f=this.activeTimers!=null,m;f&&(m=or());let y=e.texShape;if(y==null&&(y=K8(r,p),e.texShape=y),s!=null){let b=m1(r),v,T=y[1],N=y[0],S=s instanceof Uint8Array;p?([T,N]=nl(y[0],y[1]),v=new KY(b,[N,T],S)):v=new jY(b,[N,T],S);let D=this.makeTensorInfo([N,T],o);S?this.texData.get(D.dataId).usage=Mr.PIXELS:this.texData.get(D.dataId).usage=Mr.UPLOAD,this.gpgpu.uploadDenseMatrixToTexture(this.getTexture(D.dataId),T,N,s);let I=!0,P=this.runWebGLProgram(v,[D],o,null,I),E=this.texData.get(P.dataId);e.texture=E.texture,e.texShape=E.texShape,e.isPacked=E.isPacked,e.usage=E.usage,this.disposeIntermediateTensorInfo(D),this.texData.delete(P.dataId),e.values=null,f&&(this.uploadWaitMs+=or()-m)}else{let b=this.acquireTexture(y,l,o,p);e.texture=b}}convertAndCacheOnCPU(t,e){let r=this.texData.get(t),{dtype:o}=r;return this.releaseGPUData(t),e!=null&&(r.values=WJ(e,o)),r.values}acquireTexture(t,e,r,o){if(this.numBytesInGPU+=this.computeBytes(t,r),!this.warnedAboutMemory&&this.numBytesInGPU>this.numMBBeforeWarning*1024*1024){let s=(this.numBytesInGPU/1024/1024).toFixed(2);this.warnedAboutMemory=!0,console.warn(`High memory usage in GPU: ${s} MB, most likely due to a memory leak`)}return this.textureManager.acquireTexture(t,e,o)}computeBytes(t,e){return t[0]*t[1]*Pb(e)}tryRunOnCpuOrThrow(t,e){if(this.shouldExecuteOnCPU(t))try{return e()}catch(r){if(ct().getBool("IS_TEST"))throw new Error("CPU forwarding failed")}return null}}function WJ(n,t){if(t==="float32"||t==="complex64")return n;if(t==="int32"||t==="bool"){let e=t==="int32"?new Int32Array(n.length):new Uint8Array(n.length);for(let r=0;rnew zJ,2);let Qst={forceHalfFloat:GJ};function Cs(n){let{inputs:t,backend:e}=n,{x:r}=t;return e.incRef(r.dataId),{dataId:r.dataId,shape:r.shape,dtype:r.dtype}}let UJ={kernelName:bu,backendName:"webgl",kernelFunc:Cs};function ll(n){let{inputs:t,backend:e}=n,{real:r,imag:o}=t,s=e.makeTensorInfo(r.shape,"complex64"),c=e.texData.get(s.dataId),l=Cs({inputs:{x:r},backend:e}),p=e.texData.get(l.dataId);p.complexParentRefCount++;let f=Cs({inputs:{x:o},backend:e}),m=e.texData.get(f.dataId);return m.complexParentRefCount++,c.complexTensorInfos={real:l,imag:f},s}let qJ={kernelName:lf,backendName:"webgl",kernelFunc:ll};let iE="if (isnan(x)) return x;",HJ=` if (isnan(a)) return a; if (isnan(b)) return b; -`,FX=` +`,jJ=` result.r = isNaN.r > 0. ? NAN : result.r; result.g = isNaN.g > 0. ? NAN : result.g; result.b = isNaN.b > 0. ? NAN : result.b; result.a = isNaN.a > 0. ? NAN : result.a; -`;function $m(e){return({inputs:t,backend:n})=>{const{x:s}=t,i=n,o=new st(s.shape,e);return i.runWebGLProgram(o,[s],s.dtype)}}function Sc({opSnippet:e,packedOpSnippet:t,checkOutOfBounds:n=!1,supportsComplex:s=!1,cpuKernelImpl:i,dtype:o}){return({inputs:a,backend:c})=>{const{a:h,b:d}=a,m=c;if(s&&h.dtype==="complex64"){const L=m.texData.get(h.dataId),x=m.texData.get(d.dataId),[v,N]=[[L.complexTensorInfos.real,x.complexTensorInfos.real],[L.complexTensorInfos.imag,x.complexTensorInfos.imag]].map(E=>{const[k,F]=E,U={dataId:k.dataId,dtype:k.dtype,shape:h.shape},$={dataId:F.dataId,dtype:F.dtype,shape:d.shape},Y=new _n(e,h.shape,d.shape);return m.runWebGLProgram(Y,[U,$],$n(k.dtype,F.dtype))}),O=Lc({inputs:{real:v,imag:N},backend:m});return m.disposeIntermediateTensorInfo(v),m.disposeIntermediateTensorInfo(N),O}const f=o||$n(h.dtype,d.dtype);if(m.shouldExecuteOnCPU([h,d])&&i!=null){const L=m.texData.get(h.dataId),x=m.texData.get(d.dataId),[v,N]=i(h.shape,d.shape,L.values,x.values,f),O=m.makeTensorInfo(N,f),E=m.texData.get(O.dataId);return E.values=v,O}const b=oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")&&t!=null;let w;return b?w=new lr(t,h.shape,d.shape,n):w=new _n(e,h.shape,d.shape),m.runWebGLProgram(w,[h,d],f)}}const UC="return a + b;",_X=Sc({opSnippet:UC,packedOpSnippet:UC,supportsComplex:!0,cpuKernelImpl:GK}),WX={kernelName:wo,backendName:"webgl",kernelFunc:_X};const $X=kX+` +`;function Vm(n){return({inputs:t,backend:e})=>{let{x:r}=t,o=e,s=new ue(r.shape,n);return o.runWebGLProgram(s,[r],r.dtype)}}function ul({opSnippet:n,packedOpSnippet:t,checkOutOfBounds:e=!1,supportsComplex:r=!1,cpuKernelImpl:o,dtype:s}){return({inputs:c,backend:l})=>{let{a:p,b:f}=c,m=l;if(r&&p.dtype==="complex64"){let T=m.texData.get(p.dataId),N=m.texData.get(f.dataId),[S,D]=[[T.complexTensorInfos.real,N.complexTensorInfos.real],[T.complexTensorInfos.imag,N.complexTensorInfos.imag]].map(P=>{let[E,L]=P,B={dataId:E.dataId,dtype:E.dtype,shape:p.shape},q={dataId:L.dataId,dtype:L.dtype,shape:f.shape},H=new Hn(n,p.shape,f.shape);return m.runWebGLProgram(H,[B,q],jn(E.dtype,L.dtype))}),I=ll({inputs:{real:S,imag:D},backend:m});return m.disposeIntermediateTensorInfo(S),m.disposeIntermediateTensorInfo(D),I}let y=s||jn(p.dtype,f.dtype);if(m.shouldExecuteOnCPU([p,f])&&o!=null){let T=m.texData.get(p.dataId),N=m.texData.get(f.dataId),[S,D]=o(p.shape,f.shape,T.values,N.values,y),I=m.makeTensorInfo(D,y),P=m.texData.get(I.dataId);return P.values=S,I}let b=ct().getBool("WEBGL_PACK_BINARY_OPERATIONS")&&t!=null,v;return b?v=new Ns(t,p.shape,f.shape,e):v=new Hn(n,p.shape,f.shape),m.runWebGLProgram(v,[p,f],y)}}let aE="return a + b;",KJ=ul({opSnippet:aE,packedOpSnippet:aE,supportsComplex:!0,cpuKernelImpl:rX}),XJ={kernelName:Hi,backendName:"webgl",kernelFunc:KJ};let YJ=HJ+` return atan(a, b); -`,UX=` +`,JJ=` vec4 result = atan(a, b); vec4 isNaN = min(vec4(isnan(a)) + vec4(isnan(b)), vec4(1.0)); - `+FX+` + `+jJ+` return result; -`,BX=Sc({opSnippet:$X,packedOpSnippet:UX}),MX={kernelName:nd,backendName:"webgl",kernelFunc:BX};function PX(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t;du(i,"avgPool");const{filterSize:o,strides:a,pad:c,dimRoundingMode:h}=s,d=1;A(cn(a,d),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Un(i.shape,o,a,d,c,h);if(m.filterWidth===1&&m.filterHeight===1&&ae(m.inShape,m.outShape))return ur({inputs:{x:i},backend:n});const f=new mu(m,"avg",!1);return n.runWebGLProgram(f,[i],"float32")}const zX={kernelName:dl,backendName:"webgl",kernelFunc:PX};function VX(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o}=t,a=o;du([i,o],"avgPoolBackprop");const{filterSize:c,strides:h,pad:d}=s,m=Un(a.shape,c,h,1,d),f=new V5(m);return n.runWebGLProgram(f,[i],a.dtype)}const GX={kernelName:sd,backendName:"webgl",kernelFunc:VX};class YX{constructor(e,t,n,s,i,o){this.outputShape=[],this.variableNames=["x","mean","variance"],nt(e,t),nt(e,n);let a="0.0";s!=null&&(nt(e,s),this.variableNames.push("offset"),a="getOffsetAtOutCoords()");let c="1.0";i!=null&&(nt(e,i),this.variableNames.push("scale"),c="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=` +`,ZJ=ul({opSnippet:YJ,packedOpSnippet:JJ}),QJ={kernelName:sf,backendName:"webgl",kernelFunc:ZJ};function tZ(n){let{inputs:t,backend:e,attrs:r}=n,{x:o}=t;sh(o,"avgPool");let{filterSize:s,strides:c,pad:l,dimRoundingMode:p}=r,f=1;_(fn(c,f),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${c} and dilations '${f}'`);let m=Kn(o.shape,s,c,f,l,p);if(m.filterWidth===1&&m.filterHeight===1&<(m.inShape,m.outShape))return Cs({inputs:{x:o},backend:e});let y=new ah(m,"avg",!1);return e.runWebGLProgram(y,[o],"float32")}let eZ={kernelName:au,backendName:"webgl",kernelFunc:tZ};function nZ(n){let{inputs:t,backend:e,attrs:r}=n,{dy:o,input:s}=t,c=s;sh([o,s],"avgPoolBackprop");let{filterSize:l,strides:p,pad:f}=r,m=Kn(c.shape,l,p,1,f),y=new nY(m);return e.runWebGLProgram(y,[o],c.dtype)}let rZ={kernelName:af,backendName:"webgl",kernelFunc:nZ};class oZ{constructor(t,e,r,o,s,c){this.outputShape=[],this.variableNames=["x","mean","variance"],le(t,e),le(t,r);let l="0.0";o!=null&&(le(t,o),this.variableNames.push("offset"),l="getOffsetAtOutCoords()");let p="1.0";s!=null&&(le(t,s),this.variableNames.push("scale"),p="getScaleAtOutCoords()"),this.outputShape=t,this.userCode=` void main() { float x = getXAtOutCoords(); float mean = getMeanAtOutCoords(); float variance = getVarianceAtOutCoords(); - float offset = ${a}; - float scale = ${c}; - float inv = scale * inversesqrt(variance + float(${o})); + float offset = ${l}; + float scale = ${p}; + float inv = scale * inversesqrt(variance + float(${c})); setOutput(dot(vec3(x, -mean, offset), vec3(inv, inv, 1))); } - `}}class HX{constructor(e,t,n,s,i,o){this.packedInputs=!0,this.packedOutput=!0,this.variableNames=["x","mean","variance"],nt(e,t),nt(e,n);let a="vec4(0.0)";s!=null&&(nt(e,s),this.variableNames.push("offset"),a="getOffsetAtOutCoords()");let c="vec4(1.0)";i!=null&&(nt(e,i),this.variableNames.push("scale"),c="getScaleAtOutCoords()"),this.outputShape=e,this.userCode=` + `}}class sZ{constructor(t,e,r,o,s,c){this.packedInputs=!0,this.packedOutput=!0,this.variableNames=["x","mean","variance"],le(t,e),le(t,r);let l="vec4(0.0)";o!=null&&(le(t,o),this.variableNames.push("offset"),l="getOffsetAtOutCoords()");let p="vec4(1.0)";s!=null&&(le(t,s),this.variableNames.push("scale"),p="getScaleAtOutCoords()"),this.outputShape=t,this.userCode=` void main() { - vec4 offset = ${a}; - vec4 scale = ${c}; + vec4 offset = ${l}; + vec4 scale = ${p}; vec4 x = getXAtOutCoords(); vec4 mean = getMeanAtOutCoords(); vec4 variance = getVarianceAtOutCoords(); - vec4 inv = scale * inversesqrt(variance + vec4(${o})); + vec4 inv = scale * inversesqrt(variance + vec4(${c})); setOutput((x - mean) * inv + offset); } - `}}const qX=({inputs:e,backend:t,attrs:n})=>{const{x:s,mean:i,variance:o,offset:a,scale:c}=e;A(i.shape.length===o.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),A(a==null||i.shape.length===a.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),A(c==null||i.shape.length===c.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");let{varianceEpsilon:h}=n;h==null&&(h=.001);const d=[s,i,o];let m=null;a!=null&&(m=a.shape,d.push(a));let f=null;c!=null&&(f=c.shape,d.push(c));const b=oe().getBool("WEBGL_PACK_NORMALIZATION")?new HX(s.shape,i.shape,o.shape,m,f,h):new YX(s.shape,i.shape,o.shape,m,f,h),w=t.runWebGLProgram(b,d,d[0].dtype);return w},jX={kernelName:Il,backendName:"webgl",kernelFunc:qX};const KX="return float(a != b);",BC=Sc({opSnippet:KX,dtype:"bool"}),XX={kernelName:Dl,backendName:"webgl",kernelFunc:BC};function vS(e){const{inputs:t,backend:n}=e,{input:s}=t,i=n.texData.get(s.dataId);return ur({inputs:{x:i.complexTensorInfos.real},backend:n})}const JX={kernelName:Td,backendName:"webgl",kernelFunc:vS};const ZX="return float(int(x));";function QX(e,t){const n=new st(e.shape,ZX),s=t.runWebGLProgram(n,[e],"int32");return{dataId:s.dataId,shape:s.shape,dtype:s.dtype}}function NS(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{dtype:o}=s;if(o==="complex64"){if(i.dtype==="complex64")return ur({inputs:{x:i},backend:n});const a=dt(i.shape),c=NS({inputs:{x:i},backend:n,attrs:{dtype:"float32"}}),h=Lc({inputs:{real:c,imag:a},backend:n});return a.dispose(),n.disposeIntermediateTensorInfo(c),h}if(i.dtype==="complex64"){const a=vS({inputs:{input:i},backend:n}),c=NS({inputs:{x:a},backend:n,attrs:{dtype:o}});return n.disposeIntermediateTensorInfo(a),c}if(!ba(i.dtype,o)){const a=ur({inputs:{x:i},backend:n});return{dataId:a.dataId,shape:a.shape,dtype:o}}if(o==="int32")return QX(i,n);if(o==="bool"){const a=n.makeTensorInfo([],"bool",bt("bool",1)),c={a:i,b:a},h=BC({inputs:c,backend:n});return n.disposeIntermediateTensorInfo(a),h}throw new Error(`Error in Cast: failed to cast ${i.dtype} to ${o}`)}const e7={kernelName:Sa,backendName:"webgl",kernelFunc:NS};class t7{constructor(e){this.outputShape=[],this.outputShape=Xi(e,1),this.variableNames=e.map((o,a)=>`T${a}`);const t=new Array(e.length-1);t[0]=e[0][1];for(let o=1;o{let{x:r,mean:o,variance:s,offset:c,scale:l}=n;_(o.shape.length===s.shape.length,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),_(c==null||o.shape.length===c.shape.length,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),_(l==null||o.shape.length===l.shape.length,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");let{varianceEpsilon:p}=e;p==null&&(p=.001);let f=[r,o,s],m=null;c!=null&&(m=c.shape,f.push(c));let y=null;l!=null&&(y=l.shape,f.push(l));let b=ct().getBool("WEBGL_PACK_NORMALIZATION")?new sZ(r.shape,o.shape,s.shape,m,y,p):new oZ(r.shape,o.shape,s.shape,m,y,p),v=t.runWebGLProgram(b,f,f[0].dtype);return v},aZ={kernelName:yu,backendName:"webgl",kernelFunc:iZ};let cZ="return float(a != b);",cE=ul({opSnippet:cZ,dtype:"bool"}),lZ={kernelName:Su,backendName:"webgl",kernelFunc:cE};function T1(n){let{inputs:t,backend:e}=n,{input:r}=t,o=e.texData.get(r.dataId);return Cs({inputs:{x:o.complexTensorInfos.real},backend:e})}let uZ={kernelName:$f,backendName:"webgl",kernelFunc:T1};let pZ="return float(int(x));";function hZ(n,t){let e=new ue(n.shape,pZ),r=t.runWebGLProgram(e,[n],"int32");return{dataId:r.dataId,shape:r.shape,dtype:r.dtype}}function k1(n){let{inputs:t,backend:e,attrs:r}=n,{x:o}=t,{dtype:s}=r;if(s==="complex64"){if(o.dtype==="complex64")return Cs({inputs:{x:o},backend:e});let c=xe(o.shape),l=k1({inputs:{x:o},backend:e,attrs:{dtype:"float32"}}),p=ll({inputs:{real:l,imag:c},backend:e});return c.dispose(),e.disposeIntermediateTensorInfo(l),p}if(o.dtype==="complex64"){let c=T1({inputs:{input:o},backend:e}),l=k1({inputs:{x:c},backend:e,attrs:{dtype:s}});return e.disposeIntermediateTensorInfo(c),l}if(!sc(o.dtype,s)){let c=Cs({inputs:{x:o},backend:e});return{dataId:c.dataId,shape:c.shape,dtype:s}}if(s==="int32")return hZ(o,e);if(s==="bool"){let c=e.makeTensorInfo([],"bool",Ce("bool",1)),l={a:o,b:c},p=cE({inputs:l,backend:e});return e.disposeIntermediateTensorInfo(c),p}throw new Error(`Error in Cast: failed to cast ${o.dtype} to ${s}`)}let fZ={kernelName:cc,backendName:"webgl",kernelFunc:k1};class dZ{constructor(t){this.outputShape=[],this.outputShape=hs(t,1),this.variableNames=t.map((c,l)=>`T${l}`);let e=new Array(t.length-1);e[0]=t[0][1];for(let c=1;c`T${x}`);const c=new Array(e.length-1);c[0]=e[0][t];for(let L=1;L`T${S}`);let p=new Array(t.length-1);p[0]=t[0][e];for(let N=1;N= ${c[L-1]}) { + getT0(${y}), vec2(${m.join()})); + }`;for(let N=1;N= ${p[N-1]}) { return getChannel( - getT${L}(${Um(a,h,x)}), - vec2(${Um(d,h,x)})); - }`}const b=c.length,w=c[c.length-1];f+=` + getT${N}(${Gm(l,f,S)}), + vec2(${Gm(m,f,S)})); + }`}let v=p.length,T=p[p.length-1];b+=` return getChannel( - getT${b}(${Um(a,h,w)}), - vec2(${Um(d,h,w)}));`,this.userCode=` - float getValue(${a.map(L=>"int "+L)}) { - ${f} + getT${v}(${Gm(l,f,T)}), + vec2(${Gm(m,f,T)}));`,this.userCode=` + float getValue(${l.map(N=>"int "+N)}) { + ${b} } void main() { - ${i} coords = getOutputCoords(); - vec4 result = vec4(getValue(${o}), 0., 0., 0.); + ${s} coords = getOutputCoords(); + vec4 result = vec4(getValue(${c}), 0., 0., 0.); - ${o[s-1]} = ${o[s-1]} + 1; - if (${o[s-1]} < ${n[s-1]}) { - result.g = getValue(${o}); + ${c[o-1]} = ${c[o-1]} + 1; + if (${c[o-1]} < ${r[o-1]}) { + result.g = getValue(${c}); } - ${o[s-2]} = ${o[s-2]} + 1; - if (${o[s-2]} < ${n[s-2]}) { - result.a = getValue(${o}); + ${c[o-2]} = ${c[o-2]} + 1; + if (${c[o-2]} < ${r[o-2]}) { + result.a = getValue(${c}); } - ${o[s-1]} = ${o[s-1]} - 1; - if (${o[s-2]} < ${n[s-2]} && - ${o[s-1]} < ${n[s-1]}) { - result.b = getValue(${o}); + ${c[o-1]} = ${c[o-1]} - 1; + if (${c[o-2]} < ${r[o-2]} && + ${c[o-1]} < ${r[o-1]}) { + result.b = getValue(${c}); } setOutput(result); } - `}}function Um(e,t,n){const s=e.indexOf(t),i=e.map((o,a)=>a===s?`${o} - ${n}`:o);return i.join()}function MC(e){const{inputs:t,backend:n}=e,{input:s}=t,i=n.texData.get(s.dataId);return ur({inputs:{x:i.complexTensorInfos.imag},backend:n})}const s7={kernelName:gd,backendName:"webgl",kernelFunc:MC};function i7(e,t,n){const s=[mc(e.shape),...fc(e.shape)],i={dtype:e.dtype,shape:s,dataId:e.dataId},o=[mc(t),...fc(t)],a=new bC(o,s),c=!0,h=n.runWebGLProgram(a,[i],e.dtype,null,c);return{dataId:h.dataId,shape:t,dtype:h.dtype}}function dr(e){const{inputs:t,backend:n,attrs:s}=e,{x:i}=t,{shape:o}=s,a=n,c=P(i.shape),h=Vt(o,c),d=P(h);A(c===d,()=>`The new shape (${h}) has ${d} elements and the old shape (${i.shape}) has ${c} elements. The new shape and old shape must have the same number of elements.`);const m=a.texData.get(i.dataId);return m.isPacked&&!Rm(i.shape,h)&&!(m.texture!==null&&Rm(m.shape,h))?i7(i,h,a):(a.incRef(i.dataId),{dataId:i.dataId,shape:h,dtype:i.dtype})}const r7={kernelName:_l,backendName:"webgl",kernelFunc:dr};function Ic(e,t,n){const s=e[0].dtype;if(s==="complex64"){const d=e.map(L=>vS({inputs:{input:L},backend:n})),m=e.map(L=>MC({inputs:{input:L},backend:n})),f=Ic(d,t,n),b=Ic(m,t,n),w=Lc({inputs:{real:f,imag:b},backend:n});return d.forEach(L=>n.disposeIntermediateTensorInfo(L)),m.forEach(L=>n.disposeIntermediateTensorInfo(L)),n.disposeIntermediateTensorInfo(f),n.disposeIntermediateTensorInfo(b),w}if(e.length>oe().getNumber("WEBGL_MAX_TEXTURES_IN_SHADER")){const d=Math.floor(e.length/2),m=Ic(e.slice(0,d),t,n),f=Ic(e.slice(d),t,n),b=Ic([m,f],t,n);return n.disposeIntermediateTensorInfo(m),n.disposeIntermediateTensorInfo(f),b}if(oe().getBool("WEBGL_PACK_ARRAY_OPERATIONS")&&e[0].shape.length>1){const d=new n7(e.map(m=>m.shape),t);return n.runWebGLProgram(d,e,s)}const i=Xi(e.map(d=>d.shape),t),o=e.map(d=>dr({inputs:{x:d},attrs:{shape:[-1,P(d.shape.slice(t))]},backend:n})),a=new t7(o.map(d=>d.shape)),c=n.runWebGLProgram(a,o,s);o.forEach(d=>n.disposeIntermediateTensorInfo(d));const h=dr({inputs:{x:c},attrs:{shape:i},backend:n});return n.disposeIntermediateTensorInfo(c),h}function o7(e){const{inputs:t,backend:n,attrs:s}=e,{axis:i}=s,o=qe(i,t[0].shape)[0],a=Xi(t.map(d=>d.shape),o);if(P(a)===0)return n.makeTensorInfo(a,t[0].dtype,[]);const c=t.filter(d=>P(d.shape)>0);if(c.length===1)return c[0];const h=c.map(d=>d.shape);return np(h,o),Ic(c,o,n)}const a7={kernelName:fl,backendName:"webgl",kernelFunc:o7};const c7=$C+` + `}}function Gm(n,t,e){let r=n.indexOf(t),o=n.map((s,c)=>c===r?`${s} - ${e}`:s);return o.join()}function lE(n){let{inputs:t,backend:e}=n,{input:r}=t,o=e.texData.get(r.dataId);return Cs({inputs:{x:o.complexTensorInfos.imag},backend:e})}let gZ={kernelName:wf,backendName:"webgl",kernelFunc:lE};function yZ(n,t,e){let r=[rl(n.shape),...ol(n.shape)],o={dtype:n.dtype,shape:r,dataId:n.dataId},s=[rl(t),...ol(t)],c=new WI(s,r),l=!0,p=e.runWebGLProgram(c,[o],n.dtype,null,l);return{dataId:p.dataId,shape:t,dtype:p.dtype}}function Ss(n){let{inputs:t,backend:e,attrs:r}=n,{x:o}=t,{shape:s}=r,c=e,l=G(o.shape),p=Ge(s,l),f=G(p);_(l===f,()=>`The new shape (${p}) has ${f} elements and the old shape (${o.shape}) has ${l} elements. The new shape and old shape must have the same number of elements.`);let m=c.texData.get(o.dataId);return m.isPacked&&!Rm(o.shape,p)&&!(m.texture!==null&&Rm(m.shape,p))?yZ(o,p,c):(c.incRef(o.dataId),{dataId:o.dataId,shape:p,dtype:o.dtype})}let bZ={kernelName:Eu,backendName:"webgl",kernelFunc:Ss};function pl(n,t,e){let r=n[0].dtype;if(r==="complex64"){let f=n.map(T=>T1({inputs:{input:T},backend:e})),m=n.map(T=>lE({inputs:{input:T},backend:e})),y=pl(f,t,e),b=pl(m,t,e),v=ll({inputs:{real:y,imag:b},backend:e});return f.forEach(T=>e.disposeIntermediateTensorInfo(T)),m.forEach(T=>e.disposeIntermediateTensorInfo(T)),e.disposeIntermediateTensorInfo(y),e.disposeIntermediateTensorInfo(b),v}if(n.length>ct().getNumber("WEBGL_MAX_TEXTURES_IN_SHADER")){let f=Math.floor(n.length/2),m=pl(n.slice(0,f),t,e),y=pl(n.slice(f),t,e),b=pl([m,y],t,e);return e.disposeIntermediateTensorInfo(m),e.disposeIntermediateTensorInfo(y),b}if(ct().getBool("WEBGL_PACK_ARRAY_OPERATIONS")&&n[0].shape.length>1){let f=new mZ(n.map(m=>m.shape),t);return e.runWebGLProgram(f,n,r)}let o=hs(n.map(f=>f.shape),t),s=n.map(f=>Ss({inputs:{x:f},attrs:{shape:[-1,G(f.shape.slice(t))]},backend:e})),c=new dZ(s.map(f=>f.shape)),l=e.runWebGLProgram(c,s,r);s.forEach(f=>e.disposeIntermediateTensorInfo(f));let p=Ss({inputs:{x:l},attrs:{shape:o},backend:e});return e.disposeIntermediateTensorInfo(l),p}function xZ(n){let{inputs:t,backend:e,attrs:r}=n,{axis:o}=r,s=Vt(o,t[0].shape)[0],c=hs(t.map(f=>f.shape),s);if(G(c)===0)return e.makeTensorInfo(c,t[0].dtype,[]);let l=t.filter(f=>G(f.shape)>0);if(l.length===1)return l[0];let p=l.map(f=>f.shape);return id(p,s),pl(l,s,e)}let wZ={kernelName:uu,backendName:"webgl",kernelFunc:xZ};let vZ=iE+` return cos(x); -`,l7=$m(c7),h7={kernelName:Ia,backendName:"webgl",kernelFunc:l7};const u7=` +`,TZ=Vm(vZ),kZ={kernelName:lc,backendName:"webgl",kernelFunc:TZ};let NZ=` if (a == b) { return 1.0; }; -return a / b;`,d7=` +return a / b;`,_Z=` // vec4 one = vec4(equal(a, b)); // return one + (vec4(1.0) - one) * a / b; vec4 result = a / b; @@ -3659,21 +3659,21 @@ return a / b;`,d7=` } return result; -`,p7=Sc({opSnippet:u7,packedOpSnippet:d7,checkOutOfBounds:!0}),m7={kernelName:xa,backendName:"webgl",kernelFunc:p7};class PC{constructor(e,t,n){this.variableNames=["real","imag"];const s=t[1];this.outputShape=t;const i=n?`2.0 * ${Math.PI}`:`-2.0 * ${Math.PI}`,o=n?`${s}.0`:"1.0";let a;if(e==="real")a="return real * expR - imag * expI;";else if(e==="imag")a="return real * expI + imag * expR;";else throw new Error(`FFT component must be either "real" or "imag", got ${e}.`);this.userCode=` - const float exponentMultiplier = ${i}; +`,CZ=ul({opSnippet:NZ,packedOpSnippet:_Z,checkOutOfBounds:!0}),SZ={kernelName:uc,backendName:"webgl",kernelFunc:CZ};class uE{constructor(t,e,r){this.variableNames=["real","imag"];let o=e[1];this.outputShape=e;let s=r?`2.0 * ${Math.PI}`:`-2.0 * ${Math.PI}`,c=r?`${o}.0`:"1.0",l;if(t==="real")l="return real * expR - imag * expI;";else if(t==="imag")l="return real * expI + imag * expR;";else throw new Error(`FFT component must be either "real" or "imag", got ${t}.`);this.userCode=` + const float exponentMultiplier = ${s}; float unaryOpComplex(float real, float expR, float imag, float expI) { - ${a} + ${l} } float mulMatDFT(int batch, int index) { - float indexRatio = float(index) / float(${s}); + float indexRatio = float(index) / float(${o}); float exponentMultiplierTimesIndexRatio = exponentMultiplier * indexRatio; float result = 0.0; - for (int i = 0; i < ${s}; i++) { + for (int i = 0; i < ${o}; i++) { // x = (-2|2 * PI / N) * index * i; float x = exponentMultiplierTimesIndexRatio * float(i); float expR = cos(x); @@ -3682,7 +3682,7 @@ return a / b;`,d7=` float imag = getImag(batch, i); result += - unaryOpComplex(real, expR, imag, expI) / ${o}; + unaryOpComplex(real, expR, imag, expI) / ${c}; } return result; @@ -3692,29 +3692,29 @@ return a / b;`,d7=` ivec2 coords = getOutputCoords(); setOutput(mulMatDFT(coords[0], coords[1])); } - `}}function zC(e,t,n){const s=n.texData.get(e.dataId),i=P(e.shape),o=e.shape[e.shape.length-1],a=i/o,c=dr({inputs:{x:e},backend:n,attrs:{shape:[a,o]}}),h=c.shape,d=new PC("real",h,t),m=new PC("imag",h,t),f=[{dataId:s.complexTensorInfos.real.dataId,dtype:s.complexTensorInfos.real.dtype,shape:h},{dataId:s.complexTensorInfos.imag.dataId,dtype:s.complexTensorInfos.imag.dtype,shape:h}],b=n.runWebGLProgram(d,f,"float32"),w=n.runWebGLProgram(m,f,"float32"),L=Lc({inputs:{real:b,imag:w},backend:n});n.disposeIntermediateTensorInfo(b),n.disposeIntermediateTensorInfo(w);const x=dr({inputs:{x:L},backend:n,attrs:{shape:e.shape}});return n.disposeIntermediateTensorInfo(x),x}function f7(e){const{inputs:t,backend:n}=e,{input:s}=t;return zC(s,!1,n)}const g7={kernelName:pd,backendName:"webgl",kernelFunc:f7};class y7{constructor(e){this.variableNames=["Image"],this.outputShape=[];const t=e[2];this.outputShape=e,this.userCode=` + `}}function pE(n,t,e){let r=e.texData.get(n.dataId),o=G(n.shape),s=n.shape[n.shape.length-1],c=o/s,l=Ss({inputs:{x:n},backend:e,attrs:{shape:[c,s]}}),p=l.shape,f=new uE("real",p,t),m=new uE("imag",p,t),y=[{dataId:r.complexTensorInfos.real.dataId,dtype:r.complexTensorInfos.real.dtype,shape:p},{dataId:r.complexTensorInfos.imag.dataId,dtype:r.complexTensorInfos.imag.dtype,shape:p}],b=e.runWebGLProgram(f,y,"float32"),v=e.runWebGLProgram(m,y,"float32"),T=ll({inputs:{real:b,imag:v},backend:e});e.disposeIntermediateTensorInfo(b),e.disposeIntermediateTensorInfo(v);let N=Ss({inputs:{x:T},backend:e,attrs:{shape:n.shape}});return e.disposeIntermediateTensorInfo(N),N}function $Z(n){let{inputs:t,backend:e}=n,{input:r}=t;return pE(r,!1,e)}let IZ={kernelName:yf,backendName:"webgl",kernelFunc:$Z};class EZ{constructor(t){this.variableNames=["Image"],this.outputShape=[];let e=t[2];this.outputShape=t,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int x = coords[2]; - int coordX = ${t} - x; + int coordX = ${e} - x; float outputValue; - if(coordX >= 0 && coordX < ${t}) { + if(coordX >= 0 && coordX < ${e}) { outputValue = getImage(coords[0], coords[1], coordX, coords[3]); } else { outputValue = getImage(coords[0], coords[1], coords[2], coords[3]); } setOutput(outputValue); } - `}}const b7={kernelName:md,backendName:"webgl",kernelFunc:({inputs:e,backend:t})=>{const{image:n}=e,s=t,i=new y7(n.shape),o=s.runWebGLProgram(i,[n],n.dtype);return o}};class w7{constructor(e){this.variableNames=["A"];const t=Pn(),[n,s]=e;this.outputShape=e,this.userCode=` + `}}let DZ={kernelName:bf,backendName:"webgl",kernelFunc:({inputs:n,backend:t})=>{let{image:e}=n,r=t,o=new EZ(e.shape),s=r.runWebGLProgram(o,[e],e.dtype);return s}};class AZ{constructor(t){this.variableNames=["A"];let e=Jn(),[r,o]=t;this.outputShape=t,this.userCode=` void main() { ivec3 coords = getOutputCoords(); int texR = coords[0]; int texC = coords[1]; int depth = coords[2]; - vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${s}.0, ${n}.0); + vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${o}.0, ${r}.0); - vec4 values = ${t.texture2D}(A, uv); + vec4 values = ${e.texture2D}(A, uv); float value; if (depth == 0) { value = values.r; @@ -3728,7 +3728,7 @@ return a / b;`,d7=` setOutput(floor(value * 255.0 + 0.5)); } - `}}class L7{constructor(e){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;const t=Pn(),[n,s]=e;this.outputShape=e,this.userCode=` + `}}class FZ{constructor(t){this.variableNames=["A"],this.packedInputs=!1,this.packedOutput=!0;let e=Jn(),[r,o]=t;this.outputShape=t,this.userCode=` void main() { ivec3 coords = getOutputCoords(); int texR = coords[0]; @@ -3743,8 +3743,8 @@ return a / b;`,d7=` depth = coords[2] + col; vec2 uv = (vec2(texC, texR) + halfCR) / - vec2(${s}.0, ${n}.0); - vec4 values = ${t.texture2D}(A, uv); + vec2(${o}.0, ${r}.0); + vec4 values = ${e.texture2D}(A, uv); float value; if (depth == 0) { value = values.r; @@ -3760,17 +3760,17 @@ return a / b;`,d7=` } } - ${t.output} = result; + ${e.output} = result; } - `}}const S7={kernelName:Rd,backendName:"webgl",kernelFunc:I7};let xc;function I7(e){const{inputs:t,backend:n,attrs:s}=e;let{pixels:i}=t;const{numChannels:o}=s,a=typeof HTMLVideoElement!="undefined"&&i instanceof HTMLVideoElement,c=typeof HTMLImageElement!="undefined"&&i instanceof HTMLImageElement,[h,d]=a?[i.videoWidth,i.videoHeight]:[i.width,i.height],m=[d,h],f=[d,h,o];(c||a)&&(xc==null&&(xc=document.createElement("canvas").getContext("2d")),xc.canvas.width=h,xc.canvas.height=d,xc.drawImage(i,0,0,h,d),i=xc.canvas);const b=n.makeTensorInfo(m,"int32");n.texData.get(b.dataId).usage=Ns.PIXELS,n.gpgpu.uploadPixelDataToTexture(n.getTexture(b.dataId),i);const w=oe().getBool("WEBGL_PACK")?new L7(f):new w7(f),L=n.runWebGLProgram(w,[b],"int32");return n.disposeData(b.dataId),L}function x7(e){const{inputs:t,backend:n}=e,{input:s}=t;return zC(s,!0,n)}const T7={kernelName:fd,backendName:"webgl",kernelFunc:x7};class VC{constructor(e,t){this.variableNames=["x"];const{windowSize:n,batchSize:s,inSize:i,outSize:o}=e;this.outputShape=[s,o];const a=Math.floor(n/4)*4,c=n%4;let h="sumValue += dot(values, ones);";if(t!=null){const m=1/t;h=`sumValue += dot(values * ${Le(m)?m.toPrecision(2):m}, ones);`}let d="";i%n>0&&(d=` - if (inIdx < 0 || inIdx >= ${i}) { + `}}let RZ={kernelName:Ff,backendName:"webgl",kernelFunc:PZ},hl;function PZ(n){let{inputs:t,backend:e,attrs:r}=n,{pixels:o}=t,{numChannels:s}=r,c=typeof HTMLVideoElement!="undefined"&&o instanceof HTMLVideoElement,l=typeof HTMLImageElement!="undefined"&&o instanceof HTMLImageElement,[p,f]=c?[o.videoWidth,o.videoHeight]:[o.width,o.height],m=[f,p],y=[f,p,s];(l||c)&&(hl==null&&(hl=document.createElement("canvas").getContext("2d")),hl.canvas.width=p,hl.canvas.height=f,hl.drawImage(o,0,0,p,f),o=hl.canvas);let b=e.makeTensorInfo(m,"int32");e.texData.get(b.dataId).usage=Mr.PIXELS,e.gpgpu.uploadPixelDataToTexture(e.getTexture(b.dataId),o);let v=ct().getBool("WEBGL_PACK")?new FZ(y):new AZ(y),T=e.runWebGLProgram(v,[b],"int32");return e.disposeData(b.dataId),T}function OZ(n){let{inputs:t,backend:e}=n,{input:r}=t;return pE(r,!0,e)}let LZ={kernelName:xf,backendName:"webgl",kernelFunc:OZ};class hE{constructor(t,e){this.variableNames=["x"];let{windowSize:r,batchSize:o,inSize:s,outSize:c}=t;this.outputShape=[o,c];let l=Math.floor(r/4)*4,p=r%4,f="sumValue += dot(values, ones);";if(e!=null){let y=1/e;f=`sumValue += dot(values * ${gt(y)?y.toPrecision(2):y}, ones);`}let m="";s%r>0&&(m=` + if (inIdx < 0 || inIdx >= ${s}) { return 0.0; } `),this.userCode=` const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); float getValue(int batch, int inIdx) { - ${d} + ${m} return getX(batch, inIdx); } @@ -3778,11 +3778,11 @@ return a / b;`,d7=` ivec2 coords = getOutputCoords(); int batch = coords[0]; int outIdx = coords[1]; - int inOffset = outIdx * ${n}; + int inOffset = outIdx * ${r}; float sumValue = 0.0; - for (int i = 0; i < ${a}; i += 4) { + for (int i = 0; i < ${l}; i += 4) { int inIdx = inOffset + i; vec4 values = vec4( getValue(batch, inIdx), @@ -3791,141 +3791,141 @@ return a / b;`,d7=` getValue(batch, inIdx + 3) ); - ${h} + ${f} } - int inIdx = inOffset + ${a}; - if (${c===1}) { + int inIdx = inOffset + ${l}; + if (${p===1}) { vec4 values = vec4(getValue(batch, inIdx), 0.0, 0.0, 0.0); - ${h} - } else if (${c===2}) { + ${f} + } else if (${p===2}) { vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), 0.0, 0.0); - ${h} - } else if (${c===3}) { + ${f} + } else if (${p===3}) { vec4 values = vec4( getValue(batch, inIdx), getValue(batch, inIdx + 1), getValue(batch, inIdx + 2), 0.0); - ${h} + ${f} } setOutput(sumValue); } - `}}function A7(e){const t=[];for(;t.length===0||t[t.length-1].outSize!==1;){const n=t.length?t[t.length-1].outSize:e[1],s=uh(n);t.push({inSize:n,windowSize:s,outSize:Math.ceil(n/s)})}return t}function GC(e,t,n,s){const i=A7(e.shape);let o=e;for(let a=0;a6)throw Error(`Transpose for rank ${t} is not yet supported`);const n=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u","resRC.v"],s=new Array(t);for(let i=0;i6)throw Error(`Packed transpose for rank ${this.rank} is not yet supported.`);const s=Rt(this.rank),i=J0("rc",this.rank),o=new Array(this.rank);for(let d=0;d6)throw Error(`Transpose for rank ${t} is not yet supported`);let e=["resRC.x","resRC.y","resRC.z","resRC.w","resRC.u","resRC.v"],r=new Array(t);for(let o=0;o6)throw Error(`Packed transpose for rank ${this.rank} is not yet supported.`);let o=Oe(this.rank),s=wI("rc",this.rank),c=new Array(this.rank);for(let m=0;m{const{x:s}=e,{reductionIndices:i,keepDims:o}=t,a=n,c=s.shape.length,h=qe(i,s.shape);let d=h;const m=Xn(d,c),f=m!=null,b=a.shouldExecuteOnCPU([s]);let w=s;if(f){if(b){const O=a.texData.get(w.dataId),E=O.values,k=new Array(c);for(let $=0;$`Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${d}'`);const m=Un(i.shape,o,a,d,c,h);if(m.filterWidth===1&&m.filterHeight===1&&ae(m.inShape,m.outShape))return ur({inputs:{x:i},backend:n});const f=new mu(m,"max",!1);return n.runWebGLProgram(f,[i],i.dtype)}const D7={kernelName:Ol,backendName:"webgl",kernelFunc:E7};function k7(e){const{inputs:t,backend:n,attrs:s}=e,{dy:i,input:o,output:a}=t,c=o;du([o,a],"maxPoolBackprop");const{filterSize:h,strides:d,pad:m,dimRoundingMode:f}=s,b=Un(c.shape,h,d,1,m,f),w=!0,L=new mu(b,"max",w),x=n.runWebGLProgram(L,[c],c.dtype),v=new u6(b),N=n.runWebGLProgram(v,[i,x],c.dtype);return n.disposeIntermediateTensorInfo(x),N}const F7={kernelName:bd,backendName:"webgl",kernelFunc:k7};function _7(e,t,n,s){let i=new mu(n,"max",!1);const o=s.runWebGLProgram(i,[e],"float32");i=new mu(n,"max",!0,!0,t);const a=s.runWebGLProgram(i,[e],"float32");return[o,a]}const W7={kernelName:wd,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{filterSize:i,strides:o,pad:a,includeBatchInIndex:c}=t,h=n;A(s.shape.length===4,()=>`Error in maxPool: input must be rank 4 but got rank ${s.shape.length}.`);const d=[1,1];A(cn(o,d),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${o} and dilations '${d}'`);const m=Un(s.shape,i,o,d,a),[f,b]=_7(s,c,m,h);return[f,b]}};function $7(e,t,n,s){const i=P(t),o=P(e.shape),a=o/i,c=dr({inputs:{x:e},attrs:{shape:[a,i]},backend:s}),h=GC(c,"float32","mean",s),d=dr({inputs:{x:h},attrs:{shape:n},backend:s});return s.disposeIntermediateTensorInfo(c),s.disposeIntermediateTensorInfo(h),d}const U7={kernelName:hy,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{keepDims:i,axis:o}=t,a=n,c=s.shape.length,h=qe(o,s.shape);let d=h;const m=Xn(d,c),f=m!=null,b=a.shouldExecuteOnCPU([s]),w=[];let L=s;if(f){if(b){const E=a.texData.get(L.dataId),k=E.values,F=new Array(c);for(let Y=0;Yd[0]+e[m]+d[1]);const s=e.length,i=Rt(s),o=t.map(d=>d[0]).join(","),a=t.map((d,m)=>d[0]+e[m]).join(","),c=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,s),h=n==="reflect"?0:1;if(s===1){this.userCode=` - int start = ${o}; - int end = ${a}; + `}}function N1(n,t,e){let r=ct().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new VZ(n.shape,t):new zZ(n.shape,t);return e.runWebGLProgram(r,[n],n.dtype)}let GZ={kernelName:Nu,backendName:"webgl",kernelFunc:({inputs:n,attrs:t,backend:e})=>{let{x:r}=n,{reductionIndices:o,keepDims:s}=t,c=e,l=r.shape.length,p=Vt(o,r.shape),f=p,m=ir(f,l),y=m!=null,b=c.shouldExecuteOnCPU([r]),v=r;if(y){if(b){let I=c.texData.get(v.dataId),P=I.values,E=new Array(l);for(let q=0;q`Error in maxPool: Either strides or dilations must be 1. Got strides ${c} and dilations '${f}'`);let m=Kn(o.shape,s,c,f,l,p);if(m.filterWidth===1&&m.filterHeight===1&<(m.inShape,m.outShape))return Cs({inputs:{x:o},backend:e});let y=new ah(m,"max",!1);return e.runWebGLProgram(y,[o],o.dtype)}let qZ={kernelName:_u,backendName:"webgl",kernelFunc:UZ};function HZ(n){let{inputs:t,backend:e,attrs:r}=n,{dy:o,input:s,output:c}=t,l=s;sh([s,c],"maxPoolBackprop");let{filterSize:p,strides:f,pad:m,dimRoundingMode:y}=r,b=Kn(l.shape,p,f,1,m,y),v=!0,T=new ah(b,"max",v),N=e.runWebGLProgram(T,[l],l.dtype),S=new N7(b),D=e.runWebGLProgram(S,[o,N],l.dtype);return e.disposeIntermediateTensorInfo(N),D}let jZ={kernelName:Tf,backendName:"webgl",kernelFunc:HZ};function KZ(n,t,e,r){let o=new ah(e,"max",!1),s=r.runWebGLProgram(o,[n],"float32");o=new ah(e,"max",!0,!0,t);let c=r.runWebGLProgram(o,[n],"float32");return[s,c]}let XZ={kernelName:kf,backendName:"webgl",kernelFunc:({inputs:n,attrs:t,backend:e})=>{let{x:r}=n,{filterSize:o,strides:s,pad:c,includeBatchInIndex:l}=t,p=e;_(r.shape.length===4,()=>`Error in maxPool: input must be rank 4 but got rank ${r.shape.length}.`);let f=[1,1];_(fn(s,f),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${s} and dilations '${f}'`);let m=Kn(r.shape,o,s,f,c),[y,b]=KZ(r,l,m,p);return[y,b]}};function YZ(n,t,e,r){let o=G(t),s=G(n.shape),c=s/o,l=Ss({inputs:{x:n},attrs:{shape:[c,o]},backend:r}),p=fE(l,"float32","mean",r),f=Ss({inputs:{x:p},attrs:{shape:e},backend:r});return r.disposeIntermediateTensorInfo(l),r.disposeIntermediateTensorInfo(p),f}let JZ={kernelName:ix,backendName:"webgl",kernelFunc:({inputs:n,attrs:t,backend:e})=>{let{x:r}=n,{keepDims:o,axis:s}=t,c=e,l=r.shape.length,p=Vt(s,r.shape),f=p,m=ir(f,l),y=m!=null,b=c.shouldExecuteOnCPU([r]),v=[],T=r;if(y){if(b){let P=c.texData.get(T.dataId),E=P.values,L=new Array(l);for(let H=0;Hm[0]+t[y]+m[1]);let o=t.length,s=Oe(o),c=e.map(m=>m[0]).join(","),l=e.map((m,y)=>m[0]+t[y]).join(","),p=["coords[0]","coords[1]","coords[2]","coords[3]"].slice(0,o),f=r==="reflect"?0:1;if(o===1){this.userCode=` + int start = ${c}; + int end = ${l}; void main() { int outC = getOutputCoords(); if (outC < start) { - outC = start * 2 - outC - ${h}; + outC = start * 2 - outC - ${f}; } else if(outC >= end) { - outC = (end - 1) * 2 - outC + ${h}; + outC = (end - 1) * 2 - outC + ${f}; } setOutput(getX(outC - start)); } `;return}this.userCode=` - ${i} start = ${i}(${o}); - ${i} end = ${i}(${a}); + ${s} start = ${s}(${c}); + ${s} end = ${s}(${l}); void main() { - ${i} outC = getOutputCoords(); - for (int i = 0; i < ${s}; i++) { + ${s} outC = getOutputCoords(); + for (int i = 0; i < ${o}; i++) { if (outC[i] < start[i]) { - outC[i] = start[i] * 2 - outC[i] - ${h}; + outC[i] = start[i] * 2 - outC[i] - ${f}; } else if(outC[i] >= end[i]) { - outC[i] = (end[i] - 1) * 2 - outC[i] + ${h}; + outC[i] = (end[i] - 1) * 2 - outC[i] + ${f}; } } - ${i} coords = outC - start; - setOutput(getX(${c})); + ${s} coords = outC - start; + setOutput(getX(${p})); } - `}}class M7{constructor(e,t,n){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=t.map((w,L)=>w[0]+e[L]+w[1]);const s=e.length,i=Rt(s),o=t.map(w=>w[0]).join(","),a=t.map((w,L)=>w[0]+e[L]).join(","),c=Mn("rc",s),h=Mn("source",s),d=`${c[s-1]} < ${this.outputShape[s-1]}`,m=s===1?"source":`vec2(${h.slice(-2).join()})`,f=n==="reflect"?0:1;let b="";if(s===1){const w=` - ${i} source = rc; + `}}class QZ{constructor(t,e,r){this.variableNames=["x"],this.packedInputs=!0,this.packedOutput=!0,this.outputShape=e.map((T,N)=>T[0]+t[N]+T[1]);let o=t.length,s=Oe(o),c=e.map(T=>T[0]).join(","),l=e.map((T,N)=>T[0]+t[N]).join(","),p=Yn("rc",o),f=Yn("source",o),m=`${p[o-1]} < ${this.outputShape[o-1]}`,y=o===1?"source":`vec2(${f.slice(-2).join()})`,b=r==="reflect"?0:1,v="";if(o===1){let T=` + ${s} source = rc; if (source < start) { - source = start * 2 - source - ${f}; + source = start * 2 - source - ${b}; } else if (source >= end) { - source = (end - 1) * 2 - source + ${f}; + source = (end - 1) * 2 - source + ${b}; } source -= start; - `;b=` - ${i} rc = outputLoc; - ${w} - result[0] = getChannel(getX(${h.join()}), ${m}); - ${c[s-1]} += 1; - if(${d}) { - ${w} - result[1] = getChannel(getX(${h.join()}), ${m}); + `;v=` + ${s} rc = outputLoc; + ${T} + result[0] = getChannel(getX(${f.join()}), ${y}); + ${p[o-1]} += 1; + if(${m}) { + ${T} + result[1] = getChannel(getX(${f.join()}), ${y}); } - `}else{const w=` - ${i} source = rc; - ${i} lt = ${i}(lessThan(source, start)); - ${i} gte = ${i}(greaterThanEqual(source, end)); - ${i} orig = 1 - (lt + gte); + `}else{let T=` + ${s} source = rc; + ${s} lt = ${s}(lessThan(source, start)); + ${s} gte = ${s}(greaterThanEqual(source, end)); + ${s} orig = 1 - (lt + gte); source = orig * source + - lt * (start * 2 - source - ${f}) + - gte * ((end - 1) * 2 - source + ${f}); + lt * (start * 2 - source - ${b}) + + gte * ((end - 1) * 2 - source + ${b}); source -= start; - `;b=` - ${i} rc = outputLoc; - ${w} - result[0] = getChannel(getX(${h.join()}), ${m}); - ${c[s-1]} += 1; - if(${d}) { - ${w} - result[1] = getChannel(getX(${h.join()}), ${m}); + `;v=` + ${s} rc = outputLoc; + ${T} + result[0] = getChannel(getX(${f.join()}), ${y}); + ${p[o-1]} += 1; + if(${m}) { + ${T} + result[1] = getChannel(getX(${f.join()}), ${y}); } rc = outputLoc; - ${c[s-2]} += 1; - if(${c[s-2]} < ${this.outputShape[s-2]}) { - ${w} - result[2] = getChannel(getX(${h.join()}), ${m}); - ${c[s-1]} += 1; - if(${d}) { - ${w} - result[3] = getChannel(getX(${h.join()}), ${m}); + ${p[o-2]} += 1; + if(${p[o-2]} < ${this.outputShape[o-2]}) { + ${T} + result[2] = getChannel(getX(${f.join()}), ${y}); + ${p[o-1]} += 1; + if(${m}) { + ${T} + result[3] = getChannel(getX(${f.join()}), ${y}); } } `}this.userCode=` - const ${i} start = ${i}(${o}); - const ${i} end = ${i}(${a}); + const ${s} start = ${s}(${c}); + const ${s} end = ${s}(${l}); void main() { - ${i} outputLoc = getOutputCoords(); + ${s} outputLoc = getOutputCoords(); vec4 result = vec4(0.); - ${b} + ${v} setOutput(result); } - `}}const P7=({inputs:e,backend:t,attrs:n})=>{const{x:s}=e,{paddings:i,mode:o}=n,a=oe().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new M7(s.shape,i,o):new B7(s.shape,i,o),c=t.runWebGLProgram(a,[s],s.dtype);return c},z7={kernelName:El,backendName:"webgl",kernelFunc:P7};const YC={REAL:"return areal * breal - aimag * bimag;",IMAG:"return areal * bimag + aimag * breal;"};class HC{constructor(e,t,n){this.variableNames=["AReal","AImag","BReal","BImag"],this.outputShape=nt(t,n),this.userCode=` + `}}let t9=({inputs:n,backend:t,attrs:e})=>{let{x:r}=n,{paddings:o,mode:s}=e,c=ct().getBool("WEBGL_PACK_ARRAY_OPERATIONS")?new QZ(r.shape,o,s):new ZZ(r.shape,o,s),l=t.runWebGLProgram(c,[r],r.dtype);return l},e9={kernelName:Cu,backendName:"webgl",kernelFunc:t9};let dE={REAL:"return areal * breal - aimag * bimag;",IMAG:"return areal * bimag + aimag * breal;"};class mE{constructor(t,e,r){this.variableNames=["AReal","AImag","BReal","BImag"],this.outputShape=le(e,r),this.userCode=` float binaryOpComplex( float areal, float aimag, float breal, float bimag) { - ${e} + ${t} } void main() { @@ -3935,31 +3935,36 @@ return a / b;`,d7=` float bimag = getBImagAtOutCoords(); setOutput(binaryOpComplex(areal, aimag, breal, bimag)); } - `}}const qC="return a * b;";function V7(e){const{inputs:t,backend:n}=e,{a:s,b:i}=t,o=$n(s.dtype,i.dtype);if(s.dtype==="complex64"){const c=n.texData.get(s.dataId),h=n.texData.get(i.dataId),d=new HC(YC.REAL,s.shape,i.shape),m=new HC(YC.IMAG,s.shape,i.shape),f=[{dataId:c.complexTensorInfos.real.dataId,dtype:c.complexTensorInfos.real.dtype,shape:s.shape},{dataId:c.complexTensorInfos.imag.dataId,dtype:c.complexTensorInfos.imag.dtype,shape:s.shape},{dataId:h.complexTensorInfos.real.dataId,dtype:h.complexTensorInfos.real.dtype,shape:i.shape},{dataId:h.complexTensorInfos.imag.dataId,dtype:h.complexTensorInfos.imag.dtype,shape:i.shape}],b=n.runWebGLProgram(d,f,"float32"),w=n.runWebGLProgram(m,f,"float32"),L=Lc({inputs:{real:b,imag:w},backend:n});return n.disposeIntermediateTensorInfo(b),n.disposeIntermediateTensorInfo(w),L}if(n.shouldExecuteOnCPU([s,i])){const c=n.texData.get(s.dataId),h=n.texData.get(i.dataId),[d,m]=JK(s.shape,i.shape,c.values,h.values,o),f=n.makeTensorInfo(m,o),b=n.texData.get(f.dataId);return b.values=d,f}let a;return oe().getBool("WEBGL_PACK_BINARY_OPERATIONS")?a=new lr(qC,s.shape,i.shape):a=new _n(qC,s.shape,i.shape),n.runWebGLProgram(a,[s,i],o)}const G7={kernelName:Ta,backendName:"webgl",kernelFunc:V7};const Y7={kernelName:fy,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Za("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c}=n,h=t,d=h.readSync(s.dataId),m=h.readSync(i.dataId),f=o,b=a,w=c;return Dp(d,m,f,b,w)}};const H7=kp,q7={kernelName:Ld,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Za("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,padToMaxOutputSize:h}=n,d=t,m=d.readSync(s.dataId),f=d.readSync(i.dataId),{selectedIndices:b,validOutputs:w}=H7(m,f,o,a,c,h);return[b,w]}};const j7=Fp,K7={kernelName:Sd,backendName:"webgl",kernelFunc:({inputs:e,backend:t,attrs:n})=>{Za("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");const{boxes:s,scores:i}=e,{maxOutputSize:o,iouThreshold:a,scoreThreshold:c,softNmsSigma:h}=n,d=t,m=d.readSync(s.dataId),f=d.readSync(i.dataId),b=o,w=a,L=c,x=h,{selectedIndices:v,selectedScores:N}=j7(m,f,b,w,L,x);return[v,N]}};class X7{constructor(e,t,n,s){this.variableNames=["Image"],this.outputShape=[];const i=e[1],o=e[2],a=Math.sin(t).toFixed(3),c=Math.cos(t).toFixed(3);this.outputShape=e;const[h,d]=Jb(s,i,o),m=h.toFixed(3),f=d.toFixed(3);let b="";typeof n=="number"?b=`float outputValue = ${n.toFixed(2)};`:b=` - vec3 fill = vec3(${n.join(",")}); + `}}let gE="return a * b;";function n9(n){let{inputs:t,backend:e}=n,{a:r,b:o}=t,s=jn(r.dtype,o.dtype);if(r.dtype==="complex64"){let l=e.texData.get(r.dataId),p=e.texData.get(o.dataId),f=new mE(dE.REAL,r.shape,o.shape),m=new mE(dE.IMAG,r.shape,o.shape),y=[{dataId:l.complexTensorInfos.real.dataId,dtype:l.complexTensorInfos.real.dtype,shape:r.shape},{dataId:l.complexTensorInfos.imag.dataId,dtype:l.complexTensorInfos.imag.dtype,shape:r.shape},{dataId:p.complexTensorInfos.real.dataId,dtype:p.complexTensorInfos.real.dtype,shape:o.shape},{dataId:p.complexTensorInfos.imag.dataId,dtype:p.complexTensorInfos.imag.dtype,shape:o.shape}],b=e.runWebGLProgram(f,y,"float32"),v=e.runWebGLProgram(m,y,"float32"),T=ll({inputs:{real:b,imag:v},backend:e});return e.disposeIntermediateTensorInfo(b),e.disposeIntermediateTensorInfo(v),T}if(e.shouldExecuteOnCPU([r,o])){let l=e.texData.get(r.dataId),p=e.texData.get(o.dataId),[f,m]=uX(r.shape,o.shape,l.values,p.values,s),y=e.makeTensorInfo(m,s),b=e.texData.get(y.dataId);return b.values=f,y}let c;return ct().getBool("WEBGL_PACK_BINARY_OPERATIONS")?c=new Ns(gE,r.shape,o.shape):c=new Hn(gE,r.shape,o.shape),e.runWebGLProgram(c,[r,o],s)}let r9={kernelName:pc,backendName:"webgl",kernelFunc:n9};let o9={kernelName:px,backendName:"webgl",kernelFunc:({inputs:n,backend:t,attrs:e})=>{Bc("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");let{boxes:r,scores:o}=n,{maxOutputSize:s,iouThreshold:c,scoreThreshold:l}=e,p=t,f=p.readSync(r.dataId),m=p.readSync(o.dataId),y=s,b=c,v=l;return Od(f,m,y,b,v)}};let s9=Ld,i9={kernelName:Nf,backendName:"webgl",kernelFunc:({inputs:n,backend:t,attrs:e})=>{Bc("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");let{boxes:r,scores:o}=n,{maxOutputSize:s,iouThreshold:c,scoreThreshold:l,padToMaxOutputSize:p}=e,f=t,m=f.readSync(r.dataId),y=f.readSync(o.dataId),{selectedIndices:b,validOutputs:v}=s9(m,y,s,c,l,p);return[b,v]}};let a9=Md,c9={kernelName:_f,backendName:"webgl",kernelFunc:({inputs:n,backend:t,attrs:e})=>{Bc("tf.nonMaxSuppression() in webgl locks the UI thread. Call tf.nonMaxSuppressionAsync() instead");let{boxes:r,scores:o}=n,{maxOutputSize:s,iouThreshold:c,scoreThreshold:l,softNmsSigma:p}=e,f=t,m=f.readSync(r.dataId),y=f.readSync(o.dataId),b=s,v=c,T=l,N=p,{selectedIndices:S,selectedScores:D}=a9(m,y,b,v,T,N);return[S,D]}};class l9{constructor(t,e,r,o){this.variableNames=["Image"],this.outputShape=[];let s=t[1],c=t[2],l=Math.sin(e).toFixed(3),p=Math.cos(e).toFixed(3);this.outputShape=t;let[f,m]=jw(o,s,c),y=f.toFixed(3),b=m.toFixed(3),v="";typeof r=="number"?v=`float outputValue = ${r.toFixed(2)};`:v=` + vec3 fill = vec3(${r.join(",")}); float outputValue = fill[coords[3]];`,this.userCode=` void main() { ivec4 coords = getOutputCoords(); int x = coords[2]; int y = coords[1]; - float coordXFloat = (float(x) - ${m}) * ${c} - (float(y) - ${f}) * ${a}; - float coordYFloat = (float(x) - ${m}) * ${a} + (float(y) - ${f}) * ${c}; - int coordX = int(round(coordXFloat + ${m})); - int coordY = int(round(coordYFloat + ${f})); - ${b} - if(coordX >= 0 && coordX < ${o} && coordY >= 0 && coordY < ${i}) { + float coordXFloat = (float(x) - ${y}) * ${p} - (float(y) - ${b}) * ${l}; + float coordYFloat = (float(x) - ${y}) * ${l} + (float(y) - ${b}) * ${p}; + int coordX = int(round(coordXFloat + ${y})); + int coordY = int(round(coordYFloat + ${b})); + ${v} + if(coordX >= 0 && coordX < ${c} && coordY >= 0 && coordY < ${s}) { outputValue = getImage(coords[0], coordY, coordX, coords[3]); } setOutput(outputValue); } - `}}const J7={kernelName:Od,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{image:s}=e,{radians:i,fillValue:o,center:a}=t,c=n,h=new X7(s.shape,i,o,a),d=c.runWebGLProgram(h,[s],s.dtype);return d}};const Z7=$C+` + `}}let u9={kernelName:Rf,backendName:"webgl",kernelFunc:({inputs:n,attrs:t,backend:e})=>{let{image:r}=n,{radians:o,fillValue:s,center:c}=t,l=e,p=new l9(r.shape,o,s,c),f=l.runWebGLProgram(p,[r],r.dtype);return f}};let p9=iE+` return sin(x); -`,Q7=$m(Z7),eJ={kernelName:Aa,backendName:"webgl",kernelFunc:Q7};const tJ="return x * x;",nJ=$m(tJ),sJ={kernelName:Nd,backendName:"webgl",kernelFunc:nJ};const jC="return (a - b) * (a - b);",iJ=Sc({opSnippet:jC,packedOpSnippet:jC}),rJ={kernelName:va,backendName:"webgl",kernelFunc:iJ};const KC="return a - b;",oJ=Sc({opSnippet:KC,packedOpSnippet:KC,supportsComplex:!0,cpuKernelImpl:e5}),aJ={kernelName:Na,backendName:"webgl",kernelFunc:oJ};const cJ="return tan(x);",lJ=$m(cJ),hJ={kernelName:Ca,backendName:"webgl",kernelFunc:lJ};const uJ={kernelName:Hl,backendName:"webgl",kernelFunc:({inputs:e,attrs:t,backend:n})=>{const{x:s}=e,{perm:i}=t,o=n,a=s.shape.length,c=new Array(a);for(let d=0;d{Pm(xJ,{isNodejs:()=>TJ});function TJ(){return typeof global=="object"&&!0&&typeof e2!="undefined"&&typeof process!="undefined"&&!!process.version}});function fr(r,l,u=!1){if(r.beginPath(),l.slice(1).forEach(({x:p,y},g)=>{const I=l[g];r.moveTo(I.x,I.y),r.lineTo(p,y)}),u){const p=l[l.length-1],y=l[0];if(!p||!y)return;r.moveTo(p.x,p.y),r.lineTo(y.x,y.y)}r.stroke()}class ms{constructor(r,l){if(!ui(r)||!ui(l))throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width:r,height:l})}`);this._width=r,this._height=l}get width(){return this._width}get height(){return this._height}reverse(){return new ms(1/this.width,1/this.height)}}const DS={};Pm(DS,{computeReshapedDimensions:()=>_S,getCenterPoint:()=>Xo,isDimensions:()=>Gm,isEven:()=>Vm,isFloat:()=>FS,isTensor:()=>jo,isTensor1D:()=>AJ,isTensor2D:()=>kS,isTensor3D:()=>gr,isTensor4D:()=>Rs,isValidNumber:()=>ui,isValidProbablitiy:()=>vc,range:()=>_i,round:()=>Ko});const n2=Je(Ze());function jo(r,l){return r instanceof n2.Tensor&&r.shape.length===l}function AJ(r){return jo(r,1)}function kS(r){return jo(r,2)}function gr(r){return jo(r,3)}function Rs(r){return jo(r,4)}function FS(r){return r%1!==0}function Vm(r){return r%2===0}function Ko(r,l=2){const u=Math.pow(10,l);return Math.floor(r*u)/u}function Gm(r){return r&&r.width&&r.height}function _S({width:r,height:l},u){const p=u/Math.max(l,r);return new ms(Math.round(r*p),Math.round(l*p))}function Xo(r){return r.reduce((l,u)=>l.add(u),new Qe(0,0)).div(new Qe(r.length,r.length))}function _i(r,l,u){return Array(r).fill(0).map((p,y)=>l+y*u)}function ui(r){return!!r&&r!==Infinity&&r!==-Infinity&&!isNaN(r)||r===0}function vc(r){return ui(r)&&0<=r&&r<=1}class Qe{constructor(r,l){this._x=r,this._y=l}get x(){return this._x}get y(){return this._y}add(r){return new Qe(this.x+r.x,this.y+r.y)}sub(r){return new Qe(this.x-r.x,this.y-r.y)}mul(r){return new Qe(this.x*r.x,this.y*r.y)}div(r){return new Qe(this.x/r.x,this.y/r.y)}abs(){return new Qe(Math.abs(this.x),Math.abs(this.y))}magnitude(){return Math.sqrt(Math.pow(this.x,2)+Math.pow(this.y,2))}floor(){return new Qe(Math.floor(this.x),Math.floor(this.y))}}class _t{static isRect(r){return!!r&&[r.x,r.y,r.width,r.height].every(ui)}static assertIsValidBox(r,l,u=!1){if(!_t.isRect(r))throw new Error(`${l} - invalid box: ${JSON.stringify(r)}, expected object with properties x, y, width, height`);if(!u&&(r.width<0||r.height<0))throw new Error(`${l} - width (${r.width}) and height (${r.height}) must be positive numbers`)}constructor(r,l=!0){const u=r||{},p=[u.left,u.top,u.right,u.bottom].every(ui),y=[u.x,u.y,u.width,u.height].every(ui);if(!y&&!p)throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(u)}`);const[g,I,S,T]=y?[u.x,u.y,u.width,u.height]:[u.left,u.top,u.right-u.left,u.bottom-u.top];_t.assertIsValidBox({x:g,y:I,width:S,height:T},"Box.constructor",l),this._x=g,this._y=I,this._width=S,this._height=T}get x(){return this._x}get y(){return this._y}get width(){return this._width}get height(){return this._height}get left(){return this.x}get top(){return this.y}get right(){return this.x+this.width}get bottom(){return this.y+this.height}get area(){return this.width*this.height}get topLeft(){return new Qe(this.left,this.top)}get topRight(){return new Qe(this.right,this.top)}get bottomLeft(){return new Qe(this.left,this.bottom)}get bottomRight(){return new Qe(this.right,this.bottom)}round(){const[r,l,u,p]=[this.x,this.y,this.width,this.height].map(y=>Math.round(y));return new _t({x:r,y:l,width:u,height:p})}floor(){const[r,l,u,p]=[this.x,this.y,this.width,this.height].map(y=>Math.floor(y));return new _t({x:r,y:l,width:u,height:p})}toSquare(){let{x:r,y:l,width:u,height:p}=this;const y=Math.abs(u-p);return ul&&(I=-D+l+u,D=l),_>r&&(S=-_+r+p,_=r),T<1&&(S=2-T,T=1),C<1&&(S=2-C,C=1),{dy:g,edy:S,dx:y,edx:I,y:C,ey:_,x:T,ex:D,w:u,h:p}}calibrate(r){return new _t({left:this.left+r.left*this.width,top:this.top+r.top*this.height,right:this.right+r.right*this.width,bottom:this.bottom+r.bottom*this.height}).toSquare().round()}}class gu extends _t{constructor(r,l,u,p,y=!1){super({left:r,top:l,right:u,bottom:p},y)}}class Nc{constructor(r,l,u,p,y){this._imageDims=new ms(y.width,y.height),this._score=r,this._classScore=l,this._className=u,this._box=new _t(p).rescale(this._imageDims)}get score(){return this._score}get classScore(){return this._classScore}get className(){return this._className}get box(){return this._box}get imageDims(){return this._imageDims}get imageWidth(){return this.imageDims.width}get imageHeight(){return this.imageDims.height}get relativeBox(){return new _t(this._box).rescale(this.imageDims.reverse())}forSize(r,l){return new Nc(this.score,this.classScore,this.className,this.relativeBox,{width:r,height:l})}}class Jt extends Nc{constructor(r,l,u){super(r,r,"",l,u)}forSize(r,l){const{score:u,relativeBox:p,imageDims:y}=super.forSize(r,l);return new Jt(u,p,y)}}function WS(r,l,u=!0){const p=Math.max(0,Math.min(r.right,l.right)-Math.max(r.left,l.left)),y=Math.max(0,Math.min(r.bottom,l.bottom)-Math.max(r.top,l.top)),g=p*y;return u?g/(r.area+l.area-g):g/Math.min(r.area,l.area)}function $S(r){const l=r.map(S=>S.x),u=r.map(S=>S.y),p=l.reduce((S,T)=>TTSS({score:I,boxIndex:S})).sort((I,S)=>I.score-S.score).map(I=>I.boxIndex);const g=[];for(;y.length>0;){const I=y.pop();g.push(I);const S=y,T=[];for(let C=0;CT[D]<=u)}return g}const Wi=Je(Ze());function di(r,l){return Wi.tidy(()=>{const[u,p,y]=l,g=Wi.fill([...r.shape.slice(0,3),1],u,"float32"),I=Wi.fill([...r.shape.slice(0,3),1],p,"float32"),S=Wi.fill([...r.shape.slice(0,3),1],y,"float32"),T=Wi.concat([g,I,S],3);return Wi.sub(r,T)})}const so=Je(Ze());function BS(r,l=!1){return so.tidy(()=>{const[u,p]=r.shape.slice(1);if(u===p)return r;const y=Math.abs(u-p),g=Math.round(y*(l?.5:1)),I=u>p?2:1,S=A=>{const B=r.shape.slice();return B[I]=A,so.fill(B,0,"float32")},T=S(g),C=y-T.shape[I],D=l&&C?S(C):null,_=[D,r,T].filter(A=>!!A).map(A=>so.cast(A,"float32"));return so.concat(_,I)})}function vJ(r){const l=r.slice();for(let u=l.length-1;u>0;u--){const p=Math.floor(Math.random()*(u+1)),y=l[u];l[u]=l[p],l[p]=y}return l}function yu(r){return 1/(1+Math.exp(-r))}function NJ(r){return Math.log(r/(1-r))}class bu extends _t{constructor(r,l,u,p,y=!1){super({x:r,y:l,width:u,height:p},y)}}const CJ=.5,RJ=.43,OJ=.45;class Gs{constructor(r,l,u=new Qe(0,0)){const{width:p,height:y}=l;this._imgDims=new ms(p,y),this._shift=u,this._positions=r.map(g=>g.mul(new Qe(p,y)).add(u))}get shift(){return new Qe(this._shift.x,this._shift.y)}get imageWidth(){return this._imgDims.width}get imageHeight(){return this._imgDims.height}get positions(){return this._positions}get relativePositions(){return this._positions.map(r=>r.sub(this._shift).div(new Qe(this.imageWidth,this.imageHeight)))}forSize(r,l){return new this.constructor(this.relativePositions,{width:r,height:l})}shiftBy(r,l){return new this.constructor(this.relativePositions,this._imgDims,new Qe(r,l))}shiftByPoint(r){return this.shiftBy(r.x,r.y)}align(r,l={}){if(r){const y=r instanceof Jt?r.box.floor():new _t(r);return this.shiftBy(y.x,y.y).align(null,l)}const{useDlibAlignment:u,minBoxPadding:p}=Object.assign({},{useDlibAlignment:!1,minBoxPadding:.2},l);return u?this.alignDlib():this.alignMinBbox(p)}alignDlib(){const r=this.getRefPointsForAlignment(),[l,u,p]=r,y=D=>p.sub(D).magnitude(),g=(y(l)+y(u))/2,I=Math.floor(g/OJ),S=Xo(r),T=Math.floor(Math.max(0,S.x-CJ*I)),C=Math.floor(Math.max(0,S.y-RJ*I));return new bu(T,C,Math.min(I,this.imageWidth+T),Math.min(I,this.imageHeight+C))}alignMinBbox(r){const l=$S(this.positions);return l.pad(l.width*r,l.height*r)}getRefPointsForAlignment(){throw new Error("getRefPointsForAlignment not implemented by base class")}}class EJ extends Gs{getRefPointsForAlignment(){const r=this.positions;return[r[0],r[1],Xo([r[3],r[4]])]}}class wu extends Gs{getJawOutline(){return this.positions.slice(0,17)}getLeftEyeBrow(){return this.positions.slice(17,22)}getRightEyeBrow(){return this.positions.slice(22,27)}getNose(){return this.positions.slice(27,36)}getLeftEye(){return this.positions.slice(36,42)}getRightEye(){return this.positions.slice(42,48)}getMouth(){return this.positions.slice(48,68)}getRefPointsForAlignment(){return[this.getLeftEye(),this.getRightEye(),this.getMouth()].map(Xo)}}class Ym{constructor(r,l){this._label=r,this._distance=l}get label(){return this._label}get distance(){return this._distance}toString(r=!0){return`${this.label}${r?` (${Ko(this.distance)})`:""}`}}class Hm extends _t{static assertIsValidLabeledBox(r,l){if(_t.assertIsValidBox(r,l),!ui(r.label))throw new Error(`${l} - expected property label (${r.label}) to be a number`)}constructor(r,l){super(r);this._label=l}get label(){return this._label}}class Jo{constructor(r,l){if(!(typeof r=="string"))throw new Error("LabeledFaceDescriptors - constructor expected label to be a string");if(!Array.isArray(l)||l.some(u=>!(u instanceof Float32Array)))throw new Error("LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array");this._label=r,this._descriptors=l}get label(){return this._label}get descriptors(){return this._descriptors}toJSON(){return{label:this.label,descriptors:this.descriptors.map(r=>Array.from(r))}}static fromJSON(r){const l=r.descriptors.map(u=>new Float32Array(u));return new Jo(r.label,l)}}class DJ extends Hm{static assertIsValidPredictedBox(r,l){if(Hm.assertIsValidLabeledBox(r,l),!vc(r.score)||!vc(r.classScore))throw new Error(`${l} - expected properties score (${r.score}) and (${r.classScore}) to be a number between [0, 1]`)}constructor(r,l,u,p){super(r,l);this._score=u,this._classScore=p}get score(){return this._score}get classScore(){return this._classScore}}function $i(r){return r.detection instanceof Jt}function Zo(r,l){const u={detection:l};return Object.assign({},r,u)}function MS(){const r=window.fetch||function(){throw new Error("fetch - missing fetch implementation for browser environment")},l=function(){throw new Error("readFile - filesystem not available for browser environment")};return{Canvas:HTMLCanvasElement,CanvasRenderingContext2D,Image:HTMLImageElement,ImageData,Video:HTMLVideoElement,createCanvasElement:()=>document.createElement("canvas"),createImageElement:()=>document.createElement("img"),fetch:r,readFile:l}}function qm(r){let l="";if(!r)try{r=require("fs")}catch(p){l=p.toString()}const u=r?function(p){return new Promise((y,g)=>{r.readFile(p,function(I,S){return I?g(I):y(S)})})}:function(){throw new Error(`readFile - failed to require fs in nodejs environment with error: ${l}`)};return{readFile:u}}function PS(){const r=global.Canvas||global.HTMLCanvasElement,l=global.Image||global.HTMLImageElement,u=function(){if(r)return new r;throw new Error("createCanvasElement - missing Canvas implementation for nodejs environment")},p=function(){if(l)return new l;throw new Error("createImageElement - missing Image implementation for nodejs environment")},y=global.fetch||function(){throw new Error("fetch - missing fetch implementation for nodejs environment")},g=qm();return{Canvas:r||class{},CanvasRenderingContext2D:global.CanvasRenderingContext2D||class{},Image:l||class{},ImageData:global.ImageData||class{},Video:global.HTMLVideoElement||class{},createCanvasElement:u,createImageElement:p,fetch:y,...g}}function zS(){return typeof window=="object"&&typeof document!="undefined"&&typeof HTMLImageElement!="undefined"&&typeof HTMLCanvasElement!="undefined"&&typeof HTMLVideoElement!="undefined"&&typeof ImageData!="undefined"&&typeof CanvasRenderingContext2D!="undefined"}const VS=Je(t2());let In;function kJ(){if(!In)throw new Error("getEnv - environment is not defined, check isNodejs() and isBrowser()");return In}function GS(r){In=r}function YS(){if(zS())return GS(MS());if(VS.isNodejs())return GS(PS())}function FJ(r){if(In||YS(),!In)throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()");const{Canvas:l=In.Canvas,Image:u=In.Image}=r;In.Canvas=l,In.Image=u,In.createCanvasElement=r.createCanvasElement||(()=>new l),In.createImageElement=r.createImageElement||(()=>new u),In.ImageData=r.ImageData||In.ImageData,In.Video=r.Video||In.Video,In.fetch=r.fetch||In.fetch,In.readFile=r.readFile||In.readFile}const St={getEnv:kJ,setEnv:GS,initialize:YS,createBrowserEnv:MS,createFileSystem:qm,createNodejsEnv:PS,monkeyPatch:FJ,isBrowser:zS,isNodejs:VS.isNodejs};YS();function Qo(r){return!St.isNodejs()&&typeof r=="string"?document.getElementById(r):r}function is(r){const{Canvas:l,CanvasRenderingContext2D:u}=St.getEnv();if(r instanceof u)return r;const p=Qo(r);if(!(p instanceof l))throw new Error("resolveContext2d - expected canvas to be of instance of Canvas");const y=p.getContext("2d");if(!y)throw new Error("resolveContext2d - canvas 2d context is null");return y}var Ui;(function(r){r.TOP_LEFT="TOP_LEFT",r.TOP_RIGHT="TOP_RIGHT",r.BOTTOM_LEFT="BOTTOM_LEFT",r.BOTTOM_RIGHT="BOTTOM_RIGHT"})(Ui||(Ui={}));class jm{constructor(r={}){const{anchorPosition:l,backgroundColor:u,fontColor:p,fontSize:y,fontStyle:g,padding:I}=r;this.anchorPosition=l||Ui.TOP_LEFT,this.backgroundColor=u||"rgba(0, 0, 0, 0.5)",this.fontColor=p||"rgba(255, 255, 255, 1)",this.fontSize=y||14,this.fontStyle=g||"Georgia",this.padding=I||4}}class Cc{constructor(r,l,u={}){this.text=typeof r=="string"?[r]:r instanceof Cc?r.text:r,this.anchor=l,this.options=new jm(u)}measureWidth(r){const{padding:l}=this.options;return this.text.map(u=>r.measureText(u).width).reduce((u,p)=>u{const B=S+D.x,ne=S+D.y+(A+1)*g;u.fillText(_,B,ne)})}}class s2{constructor(r={}){const{boxColor:l,lineWidth:u,label:p,drawLabelOptions:y}=r;this.boxColor=l||"rgba(0, 0, 255, 1)",this.lineWidth=u||2,this.label=p;const g={anchorPosition:Ui.BOTTOM_LEFT,backgroundColor:this.boxColor};this.drawLabelOptions=new jm(Object.assign({},g,y))}}class HS{constructor(r,l={}){this.box=new _t(r),this.options=new s2(l)}draw(r){const l=is(r),{boxColor:u,lineWidth:p}=this.options,{x:y,y:g,width:I,height:S}=this.box;l.strokeStyle=u,l.lineWidth=p,l.strokeRect(y,g,I,S);const{label:T}=this.options;T&&new Cc([T],{x:y-p/2,y:g},this.options.drawLabelOptions).draw(r)}}function _J(r,l){const u=Array.isArray(l)?l:[l];u.forEach(p=>{const y=p instanceof Jt?p.score:$i(p)?p.detection.score:void 0,g=p instanceof Jt?p.box:$i(p)?p.detection.box:new _t(p),I=y?`${Ko(y)}`:void 0;new HS(g,{label:I}).draw(r)})}function Lu(r){const{Image:l,Video:u}=St.getEnv();return r instanceof l&&r.complete||r instanceof u&&r.readyState>=3}function qS(r){return new Promise((l,u)=>{if(r instanceof St.getEnv().Canvas||Lu(r))return l(null);function p(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",p),g.currentTarget.removeEventListener("error",y),l(g)}function y(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",p),g.currentTarget.removeEventListener("error",y),u(g)}r.addEventListener("load",p),r.addEventListener("error",y)})}function jS(r){return new Promise((l,u)=>{if(!(r instanceof Blob))return u("bufferToImage - expected buf to be of type: Blob");const p=new FileReader;p.onload=()=>{if(typeof p.result!="string")return u("bufferToImage - expected reader.result to be a string, in onload");const y=St.getEnv().createImageElement();y.onload=()=>l(y),y.onerror=u,y.src=p.result},p.onerror=u,p.readAsDataURL(r)})}function ea(r){const{Image:l,Video:u}=St.getEnv();return r instanceof l?new ms(r.naturalWidth,r.naturalHeight):r instanceof u?new ms(r.videoWidth,r.videoHeight):new ms(r.width,r.height)}function Rc({width:r,height:l}){const{createCanvasElement:u}=St.getEnv(),p=u();return p.width=r,p.height=l,p}function Su(r,l){const{ImageData:u}=St.getEnv();if(!(r instanceof u)&&!Lu(r))throw new Error("createCanvasFromMedia - media has not finished loading yet");const{width:p,height:y}=l||ea(r),g=Rc({width:p,height:y});return r instanceof u?is(g).putImageData(r,0,0):is(g).drawImage(r,0,0,p,y),g}const Km=Je(Ze());async function KS(r,l){const u=l||St.getEnv().createCanvasElement(),[p,y,g]=r.shape.slice(Rs(r)?1:0),I=Km.tidy(()=>r.as3D(p,y,g).toInt());return await Km.browser.toPixels(I,u),I.dispose(),u}function Xm(r){const{Image:l,Canvas:u,Video:p}=St.getEnv();return r instanceof l||r instanceof u||r instanceof p}const WJ=1e-7,$J=1e-4;class i2{time(r){return se("time")}read(r){return se("read")}readSync(r){return se("readSync")}numDataIds(){return se("numDataIds")}disposeData(r){return se("disposeData")}write(r,l,u){return se("write")}move(r,l,u,p){return se("move")}memory(){return se("memory")}floatPrecision(){return se("floatPrecision")}epsilon(){return this.floatPrecision()===32?WJ:$J}batchMatMul(r,l,u,p){return se("batchMatMul")}fusedBatchMatMul({a:r,b:l,transposeA:u,transposeB:p,bias:y,activation:g,preluActivationWeights:I}){return se("fusedBatchMatMul")}slice(r,l,u){return se("slice")}stridedSlice(r,l,u,p){return se("stridedSlice")}unstack(r,l){return se("unstack")}reverse(r,l){return se("reverse")}concat(r,l){return se("concat")}neg(r){return se("neg")}add(r,l){return se("add")}addN(r){return se("addN")}subtract(r,l){return se("subtract")}multiply(r,l){return se("multiply")}realDivide(r,l){return se("realDivide")}floorDiv(r,l){return se("floorDiv")}sum(r,l){return se("sum")}prod(r,l){return se("prod")}unsortedSegmentSum(r,l,u){return se("unsortedSegmentSum")}argMin(r,l){return se("argMin")}argMax(r,l){return se("argMax")}equal(r,l){return se("equal")}notEqual(r,l){return se("notEqual")}less(r,l){return se("less")}lessEqual(r,l){return se("lessEqual")}greater(r,l){return se("greater")}greaterEqual(r,l){return se("greaterEqual")}logicalNot(r){return se("logicalNot")}logicalAnd(r,l){return se("logicalAnd")}logicalOr(r,l){return se("logicalOr")}where(r){return se("where")}select(r,l,u){return se("select")}topk(r,l,u){return se("topk")}min(r,l){return se("min")}minimum(r,l){return se("minimum")}mod(r,l){return se("mod")}max(r,l){return se("max")}maximum(r,l){return se("maximum")}all(r,l){return se("all")}any(r,l){return se("any")}squaredDifference(r,l){return se("squaredDifference")}ceil(r){return se("ceil")}floor(r){return se("floor")}round(r){return se("round")}sign(r){return se("sign")}isNaN(r){return se("isNaN")}isInf(r){return se("isInf")}isFinite(r){return se("isFinite")}pow(r,l){return se("pow")}exp(r){return se("exp")}expm1(r){return se("expm1")}softmax(r,l){return se("softmax")}log(r){return se("log")}log1p(r){return se("log1p")}sqrt(r){return se("sqrt")}rsqrt(r){return se("rsqrt")}square(r){return se("square")}reciprocal(r){return se("reciprocal")}relu(r){return se("relu")}relu6(r){return se("relu6")}prelu(r,l){return se("prelu")}elu(r){return se("elu")}eluDer(r,l){return se("eluDer")}selu(r){return se("selu")}int(r){return se("int")}clip(r,l,u){return se("clip")}abs(r){return se("abs")}complexAbs(r){return se("complexAbs")}sigmoid(r){return se("sigmoid")}softplus(r){return se("softplus")}sin(r){return se("sin")}cos(r){return se("cos")}tan(r){return se("tan")}asin(r){return se("asin")}acos(r){return se("acos")}atan(r){return se("atan")}atan2(r,l){return se("atan2")}sinh(r){return se("sinh")}cosh(r){return se("cosh")}tanh(r){return se("tanh")}asinh(r){return se("asinh")}acosh(r){return se("acosh")}atanh(r){return se("atanh")}erf(r){return se("erf")}step(r,l){return se("step")}fusedConv2d({input:r,filter:l,convInfo:u,bias:p,activation:y,preluActivationWeights:g}){return se("fusedConv2d")}conv2d(r,l,u){return se("conv2d")}conv2dDerInput(r,l,u){return se("conv2dDerInput")}conv2dDerFilter(r,l,u){return se("conv2dDerFilter")}fusedDepthwiseConv2D({input:r,filter:l,convInfo:u,bias:p,activation:y,preluActivationWeights:g}){return se("fusedDepthwiseConv2D")}depthwiseConv2D(r,l,u){return se("depthwiseConv2D")}depthwiseConv2DDerInput(r,l,u){return se("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(r,l,u){return se("depthwiseConv2DDerFilter")}conv3d(r,l,u){return se("conv3d")}conv3dDerInput(r,l,u){return se("conv3dDerInput")}conv3dDerFilter(r,l,u){return se("conv3dDerFilter")}maxPool(r,l){return se("maxPool")}maxPoolBackprop(r,l,u,p){return se("maxPoolBackprop")}avgPool(r,l){return se("avgPool")}avgPoolBackprop(r,l,u){return se("avgPoolBackprop")}avgPool3d(r,l){return se("avgPool3d")}avgPool3dBackprop(r,l,u){return se("avgPool3dBackprop")}maxPool3d(r,l){return se("maxPool3d")}maxPool3dBackprop(r,l,u,p){return se("maxPool3dBackprop")}reshape(r,l){return se("reshape")}cast(r,l){return se("cast")}tile(r,l){return se("tile")}pad(r,l,u){return se("pad")}transpose(r,l){return se("transpose")}gather(r,l,u){return se("gather")}gatherND(r,l){return se("gatherND")}scatterND(r,l,u){return se("scatterND")}batchToSpaceND(r,l,u){return se("batchToSpaceND")}spaceToBatchND(r,l,u){return se("spaceToBatchND")}resizeBilinear(r,l,u,p){return se("resizeBilinear")}resizeBilinearBackprop(r,l,u){return se("resizeBilinearBackprop")}resizeNearestNeighbor(r,l,u,p){return se("resizeNearestNeighbor")}resizeNearestNeighborBackprop(r,l,u){return se("resizeNearestNeighborBackprop")}batchNorm(r,l,u,p,y,g){return se("batchNorm")}localResponseNormalization4D(r,l,u,p,y){return se("localResponseNormalization4D")}LRNGrad(r,l,u,p,y,g,I){return se("LRNGrad")}multinomial(r,l,u,p){return se("multinomial")}oneHot(r,l,u,p){return se("oneHot")}cumsum(r,l,u,p){return se("cumsum")}nonMaxSuppression(r,l,u,p,y){return se("nonMaxSuppression")}fft(r){return se("fft")}ifft(r){return se("ifft")}complex(r,l){return se("complex")}real(r){return se("real")}imag(r){return se("imag")}cropAndResize(r,l,u,p,y,g){return se("cropAndResize")}depthToSpace(r,l,u){return se("depthToSpace")}split(r,l,u){return se("split")}sparseToDense(r,l,u,p){return se("sparseToDense")}diag(r){return se("diag")}fill(r,l,u){return se("fill")}onesLike(r){return se("onesLike")}zerosLike(r){return se("zerosLike")}linspace(r,l,u){return se("linspace")}dispose(){return se("dispose")}}function se(r){throw new Error(`'${r}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}function J(r,l){if(!r)throw new Error(typeof l=="string"?l:l())}function tn(r,l,u=""){J(Iu(r,l),()=>u+` Shapes ${r} and ${l} must match`)}function Oc(r){J(r!=null,()=>"The input to the tensor constructor must be a non-null value.")}function ta(r,l=[],u=!1){if(l==null&&(l=[]),Array.isArray(r)||Os(r)&&!u)for(let p=0;p=0)u*=r[g];else if(r[g]===-1){if(p!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${p} and dim ${g}`);p=g}else if(r[g]<0)throw Error(`Shapes can not be < 0. Found ${r[g]} at dim ${g}`);if(p===-1){if(l>0&&l!==u)throw Error(`Size(${l}) must match the product of shape ${r}`);return r}if(u===0)throw Error(`Cannot infer the missing size in [${r}] when there are 0 elements`);if(l%u!==0)throw Error(`The implicit shape can't be a fractional number. Got ${l} / ${u}`);const y=r.slice();return y[p]=l/u,y}function ft(r,l){const u=l.length;return r=r==null?l.map((p,y)=>y):[].concat(r),J(r.every(p=>p>=-u&&p`All values in axis param must be in range [-${u}, ${u}) but got axis ${r}`),J(r.every(p=>nn(p)),()=>`All values in axis param must be integers but got axis ${r}`),r.map(p=>p<0?u+p:p)}function o2(r,l){const u=[],p=[],y=l!=null&&Array.isArray(l)&&l.length===0,g=l==null||y?null:ft(l,r).sort();let I=0;for(let S=0;SS)&&r[S]===1&&(u.push(r[S]),p.push(S)),g[I]<=S&&I++}r[S]!==1&&(u.push(r[S]),p.push(S))}return{newShape:u,keptDims:p}}function a2(r,l){let u=null;if(r==null||r==="float32")u=new Float32Array(l);else if(r==="int32")u=new Int32Array(l);else if(r==="bool")u=new Uint8Array(l);else if(r==="string")u=new Array(l);else throw new Error(`Unknown data type ${r}`);return u}function c2(r,l){for(let u=0;ul+=u.length),l}function xu(r){return typeof r=="string"||r instanceof String}function UJ(r){return typeof r=="boolean"}function BJ(r){return typeof r=="number"}function Tu(r){return Array.isArray(r)?Tu(r[0]):r instanceof Float32Array?"float32":r instanceof Int32Array||r instanceof Uint8Array?"int32":BJ(r)?"float32":xu(r)?"string":UJ(r)?"bool":"float32"}function XS(r){return!!(r&&r.constructor&&r.call&&r.apply)}function Au(r){const l=r.length;if(l<2)return[];const u=new Array(l-1);u[l-2]=r[l-1];for(let p=l-3;p>=0;--p)u[p]=u[p+1]*r[p+1];return u}function d2(r,l,u){const p=new Array;if(l.length===1){const y=l[0];for(let g=0;gS*T);for(let S=0;Sp*y);if(u===0)return[];if(u!==l.length)throw new Error(`[${r}] does not match the input size ${l.length}.`);return d2(0,r,l)}function Jm(r,l){const u=na(r,l);for(let p=0;p{J(Number.isInteger(l)&&l>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${r}].`)})}function Qm(r){return r&&r.then&&typeof r.then=="function"}const p2="tfjsflags";class m2{constructor(r){this.global=r,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(r,l){this.platform!=null&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${l}.`),this.platformName=r,this.platform=l}registerFlag(r,l,u){if(this.flagRegistry[r]={evaluationFn:l,setHook:u},this.urlFlags[r]!=null){const p=this.urlFlags[r];console.warn(`Setting feature override from URL ${r}: ${p}.`),this.set(r,p)}}async getAsync(r){return r in this.flags?this.flags[r]:(this.flags[r]=await this.evaluateFlag(r),this.flags[r])}get(r){if(r in this.flags)return this.flags[r];const l=this.evaluateFlag(r);if(Qm(l))throw new Error(`Flag ${r} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[r]=l,this.flags[r]}getNumber(r){return this.get(r)}getBool(r){return this.get(r)}getFlags(){return this.flags}get features(){return this.flags}set(r,l){if(this.flagRegistry[r]==null)throw new Error(`Cannot set flag ${r} as it has not been registered.`);this.flags[r]=l,this.flagRegistry[r].setHook!=null&&this.flagRegistry[r].setHook(l)}evaluateFlag(r){if(this.flagRegistry[r]==null)throw new Error(`Cannot evaluate flag '${r}': no evaluation function found.`);return this.flagRegistry[r].evaluationFn()}setFlags(r){this.flags=Object.assign({},r)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(typeof this.global=="undefined"||typeof this.global.location=="undefined"||typeof this.global.location.search=="undefined")return;const r=MJ(this.global.location.search);if(p2 in r){const l=r[p2].split(",");l.forEach(u=>{const[p,y]=u.split(":");this.urlFlags[p]=PJ(p,y)})}}}function MJ(r){const l={};return r.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(u,...p)=>(zJ(l,p[0],p[1]),p.join("="))),l}function zJ(r,l,u){r[decodeURIComponent(l)]=decodeURIComponent(u||"")}function PJ(r,l){if(l=l.toLowerCase(),l==="true"||l==="false")return l==="true";if(`${+l}`===l)return+l;throw new Error(`Could not parse value flag value ${l} for flag ${r}.`)}function Es(){return f2}let f2=null;function g2(r){f2=r}let ZS;function QS(){if(ZS==null){let r;if(typeof window!="undefined")r=window;else if(typeof global!="undefined")r=global;else if(typeof process!="undefined")r=process;else if(typeof self!="undefined")r=self;else throw new Error("Could not find a global object");ZS=r}return ZS}function VJ(){const r=QS();return r._tfGlobals==null&&(r._tfGlobals=new Map),r._tfGlobals}function eI(r,l){const u=VJ();if(u.has(r))return u.get(r);{const p=l();return u.set(r,p),u.get(r)}}const ef="Abs",y2="Acos",b2="Acosh",Dc="Add",w2="AddN",L2="ArgMax",S2="ArgMin",I2="Asin",x2="Asinh",T2="Atan",A2="Atanh",v2="Atan2",N2="AvgPool",C2="AvgPoolBackprop",R2="AvgPool3D",O2="AvgPool3DBackprop",tf="BatchMatMul",nf="BatchToSpaceND",sf="BroadcastTo",kc="Cast",E2="Ceil",D2="ClipByValue",k2="Complex",rf="Concat",of="Conv2D",F2="Conv2DBackpropFilter",af="Conv2DBackpropInput",_2="Conv3D",W2="Conv3DBackpropFilterV2",$2="Conv3DBackpropInputV2",cf="Cos",lf="Cosh",hf="Cumsum",U2="CropAndResize",B2="DepthwiseConv2dNative",M2="DepthwiseConv2dNativeBackpropFilter",P2="DepthwiseConv2dNativeBackpropInput",z2="Dilation2D",V2="Dilation2DBackpropInput",G2="Dilation2DBackpropFilter",uf="Div",Y2="Elu",H2="EluGrad",q2="Erf",j2="Equal",df="Exp",K2="Expm1",X2="FFT",J2="Fill",Z2="FlipLeftRight",pf="Floor",mf="FloorDiv",Q2="FusedBatchNorm",ff="GatherV2",eR="Greater",gf="GreaterEqual",yf="Identity",tR="IFFT",nR="Imag",sR="IsFinite",iR="IsInf",rR="IsNan",oR="Less",aR="LessEqual",bf="Log",wf="Log1p",cR="LogicalAnd",lR="LogicalNot",hR="LogSoftmax",uR="LRN",dR="LRNBackprop",Lf="Max",Sf="Maximum",pR="MaxPool",mR="MaxPoolBackprop",fR="MaxPool3D",gR="MaxPool3DBackprop",yR="Mean",If="Min",xf="Minimum",bR="MirrorPad",wR="Mod",Tf="Multiply",Af="Negate",LR="NotEqual",SR="NonMaxSuppressionV3",IR="NonMaxSuppressionV4",xR="NonMaxSuppressionV5",TR="OnesLike",AR="OneHot",vf="PadV2",Nf="Pow",vR="Prelu",NR="Range",CR="Real",RR="Reciprocal",Cf="Relu",Rf="Reshape",Of="ResizeNearestNeighbor",OR="ResizeNearestNeighborGrad",Ef="ResizeBilinear",ER="ResizeBilinearGrad",DR="Relu6",Df="Reverse",kR="Round",kf="Rsqrt",Ff="SelectV2",FR="Selu",_f="Slice",Wf="Sin",$f="Sinh",_R="Sign",Uf="Sigmoid",WR="Softplus",Bf="Sqrt",Mf="Sum",Pf="SpaceToBatchND",zf="SplitV",$R="Softmax",Vf="SquaredDifference",UR="Square",Gf="Sub",BR="Tan",MR="Tanh",Yf="Tile",Hf="Transpose",qf="Unpack",jf="UnsortedSegmentSum",Kf="ZerosLike",Xf="Step",tI="FromPixels",PR="RotateWithOffset";const zR=eI("kernelRegistry",()=>new Map),nI=eI("gradRegistry",()=>new Map);function Jf(r,l){const u=GJ(r,l);return zR.get(u)}function sI(r){return nI.get(r)}function iI(r){const l=zR.entries(),u=[];for(;;){const{done:p,value:y}=l.next();if(p)break;const[g,I]=y,[S]=g.split("_");S===r&&u.push(I)}return u}function VR(r){const{kernelName:l}=r;nI.has(l)&&(Es().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${l}'`)),nI.set(l,r)}function GJ(r,l){return`${l}_${r}`}function YJ(r,l){return r instanceof Float32Array&&l==="float32"||r instanceof Int32Array&&l==="int32"||r instanceof Uint8Array&&l==="bool"}function Zf(r,l){if(l==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(r)&&(r=ta(r)),Es().getBool("DEBUG")&&c2(r,l),YJ(r,l))return r;if(l==null||l==="float32"||l==="complex64")return new Float32Array(r);if(l==="int32")return new Int32Array(r);if(l==="bool"){const u=new Uint8Array(r.length);for(let p=0;p{p=u()},g=this.backendTimer.time(y);for(let S=0;S{HJ(C,T.dtype,r)})}const I={kernelName:r,outputs:p,inputs:l,timeMs:g.then(S=>S.kernelMs),extraInfo:g.then(S=>S.getExtraProfileInfo!=null?S.getExtraProfileInfo():"")};return I}logKernelProfile(r){const{kernelName:l,outputs:u,timeMs:p,inputs:y,extraInfo:g}=r;u.forEach(I=>{Promise.all([I.data(),p,g]).then(S=>{this.logger.logKernelProfile(l,I,S[0],S[1],y,S[2])})})}}function HJ(r,l,u){if(l!=="float32")return!1;for(let p=0;p0?ne:""} `}}console.log(`%c${S} %c${I} %c${T}D ${D} %c${C} %c${_} %c${g}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}}function HR(r,l,u){const p={},y={};for(let T=0;Tp[te.id]=!0),B=!0,y[C.id]=!0;break}if(B)break}}const g={};g[u.id]=!0;const I={};for(let T=r.length-1;T>=0;T--){const C=r[T],D=C.inputs;for(let _=0;_=0;y--){const g=l[y],I=[];if(g.outputs.forEach(T=>{const C=r[T.id];C!=null?I.push(C):I.push(null)}),g.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${g.kernelName}.`);const S=g.gradient(I);for(const T in g.inputs){if(!(T in S))throw new Error(`Cannot backprop through input ${T}. Available gradients found: ${Object.keys(S)}.`);const C=u(()=>S[T]());if(C.dtype!=="float32")throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input ${T} must have 'float32' dtype, but has '${C.dtype}'`);const D=g.inputs[T];if(!Iu(C.shape,D.shape))throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input '${T}' has shape '${C.shape}', which does not match the shape of the input '${D.shape}'`);if(r[D.id]==null)r[D.id]=C;else{const _=r[D.id];r[D.id]=p(_,C),_.dispose()}}}}const jR=20,vu=3,aI=7;function KR(r,l,u,p){const y=Au(l),g=jJ(r,l,u,y),I=l.length,S=Qf(r,l,u,y,g),T=["Tensor"];return p&&(T.push(` dtype: ${u}`),T.push(` rank: ${I}`),T.push(` shape: [${l}]`),T.push(" values:")),T.push(S.map(C=>" "+C).join(` -`)),T.join(` -`)}function jJ(r,l,u,p){const y=Zt(l),g=p[p.length-1],I=new Array(g).fill(0),S=l.length,T=u==="complex64"?Cu(r):r;if(S>1)for(let C=0;CjR){const P=vu*I;let ge=Array.from(r.slice(0,P)),ae=Array.from(r.slice((S-vu)*I,S*I));return u==="complex64"&&(ge=Cu(ge),ae=Cu(ae)),["["+ge.map((Le,ve)=>Nu(Le,y[ve],u)).join(", ")+", ..., "+ae.map((Le,ve)=>Nu(Le,y[S-vu+ve],u)).join(", ")+"]"]}const te=u==="complex64"?Cu(r):Array.from(r);return["["+te.map((P,ge)=>Nu(P,y[ge],u)).join(", ")+"]"]}const C=l.slice(1),D=p.slice(1),_=p[0]*I,A=[];if(S>jR){for(let te=0;te`Length of values '${p}' does not match the size inferred by the shape '${this.size}'.`)}if(l==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=u||a2(l,this.size),this.strides=Au(r)}set(r,...l){l.length===0&&(l=[0]),J(l.length===this.rank,()=>`The number of provided coordinates (${l.length}) must match the rank (${this.rank})`);const u=this.locToIndex(l);this.values[u]=r}get(...r){r.length===0&&(r=[0]);let l=0;for(const p of r){if(p<0||p>=this.shape[l]){const y=`Requested out of range element at ${r}. Buffer shape=${this.shape}`;throw new Error(y)}l++}let u=r[r.length-1];for(let p=0;poI(u))}catch(u){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return r}dataSync(){this.throwIfDisposed();const r=Bi().readSync(this.dataId);if(this.dtype==="string")try{return r.map(l=>oI(l))}catch(l){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return r}async bytes(){this.throwIfDisposed();const r=await Bi().read(this.dataId);return this.dtype==="string"?r:new Uint8Array(r.buffer)}dispose(){if(this.isDisposed)return;Bi().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(r=!1){return Fc.print(this,r)}clone(){return this.throwIfDisposed(),Fc.clone(this)}toString(r=!1){const l=this.dataSync();return KR(l,this.shape,this.dtype,r)}cast(r){return this.throwIfDisposed(),Fc.cast(this,r)}variable(r=!0,l,u){return this.throwIfDisposed(),Bi().makeVariable(this,r,l,u)}}Object.defineProperty(En,Symbol.hasInstance,{value:r=>!!r&&r.data!=null&&r.dataSync!=null&&r.throwIfDisposed!=null});class eg extends En{constructor(r,l,u,p){super(r.shape,r.dtype,r.dataId,p);this.trainable=l,this.name=u}assign(r){if(r.dtype!==this.dtype)throw new Error(`dtype of the new value (${r.dtype}) and previous value (${this.dtype}) must match`);if(!Iu(r.shape,this.shape))throw new Error(`shape of the new value (${r.shape}) and previous value (${this.shape}) must match`);Bi().disposeTensor(this),this.dataId=r.dataId,Bi().incRef(this,null)}dispose(){Bi().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty(eg,Symbol.hasInstance,{value:r=>r instanceof En&&r.assign!=null&&r.assign instanceof Function});var tO;(function(r){r.R0="R0",r.R1="R1",r.R2="R2",r.R3="R3",r.R4="R4",r.R5="R5",r.R6="R6"})(tO||(tO={}));var cI;(function(r){r.float32="float32",r.int32="int32",r.bool="int32",r.complex64="complex64"})(cI||(cI={}));var lI;(function(r){r.float32="float32",r.int32="int32",r.bool="bool",r.complex64="complex64"})(lI||(lI={}));var hI;(function(r){r.float32="float32",r.int32="float32",r.bool="float32",r.complex64="complex64"})(hI||(hI={}));var uI;(function(r){r.float32="complex64",r.int32="complex64",r.bool="complex64",r.complex64="complex64"})(uI||(uI={}));const XJ={float32:hI,int32:cI,bool:lI,complex64:uI};function nO(r,l){if(r==="string"||l==="string"){if(r==="string"&&l==="string")return"string";throw new Error(`Can not upcast ${r} with ${l}`)}return XJ[r][l]}function Lt(r,l){if(r.dtype===l.dtype)return[r,l];const u=nO(r.dtype,l.dtype);return[r.cast(u),l.cast(u)]}function tg(r){const l=[],u=new Set;return sO(r,l,u),l}function sO(r,l,u){if(r==null)return;if(r instanceof En){l.push(r);return}if(!JJ(r))return;const p=r;for(const y in p){const g=p[y];u.has(g)||(u.add(g),sO(g,l,u))}}function JJ(r){return Array.isArray(r)||typeof r=="object"}class iO{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(const r in this.registeredVariables)this.registeredVariables[r].dispose()}}class Ru{constructor(r){this.ENV=r,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new iO}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;const r=this.getSortedBackends();for(let l=0;l{l.setupFunc!=null&&l.setupFunc(this.backendInstance)})}disposeRegisteredKernels(r){const l=iI(r);l.forEach(u=>{u.disposeFunc!=null&&u.disposeFunc(this.registry[r])})}initializeBackend(r){const l=this.registryFactory[r];if(l==null)throw new Error(`Cannot initialize backend ${r}, no registration found.`);try{const u=l.factory();if(u&&!(u instanceof i2)&&typeof u.then=="function"){const p=++this.pendingBackendInitId,y=u.then(g=>p(pthis.registryFactory[l].priority-this.registryFactory[r].priority)}initializeBackendsAndReturnBest(){const r=this.getSortedBackends();for(let l=0;lthis.startScope(u),()=>this.endScope(p),()=>(p=l(),p instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),p))}scopedRun(r,l,u){r();try{const p=u();return l(),p}catch(p){throw l(),p}}nextTensorId(){return Ru.nextTensorId++}nextVariableId(){return Ru.nextVariableId++}clone(r){const l=this.makeTensorFromDataId(r.dataId,r.shape,r.dtype),u={x:r},p=g=>({x:()=>{const I="float32",S={x:g},T={dtype:I};return H.runKernelFunc(C=>C.cast(g,I),S,null,kc,T)}}),y=[];return this.addTapeNode(this.state.activeScope.name,u,[l],p,y,{}),l}runKernel(r,l,u,p,y){const g=null,I=null;return this.runKernelFunc(g,l,I,r,u,p,y)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(r,l,u){const p=this.backend.numDataIds();let y=0;u.forEach(S=>{y+=S.dtype==="complex64"?3:1});const g=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],I=p-l-y-g;if(I>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${I} data ids) after running '${r}'`)}runKernelFunc(r,l,u,p,y,g,I){let S,T=[];const C=this.isTapeOn();p==null&&(p=this.state.activeScope!=null?this.state.activeScope.name:"");const D=this.state.numBytes,_=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let A;const B=Jf(p,this.backendName);let ne;if(B!=null)A=()=>{const P=this.backend.numDataIds();ne=B.kernelFunc({inputs:l,attrs:y,backend:this.backend});const ge=Array.isArray(ne)?ne:[ne];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(p,P,ge);const ae=ge.map(({dataId:Le,shape:ve,dtype:Ve})=>this.makeTensorFromDataId(Le,ve,Ve));if(C){let Le=this.getTensorsForGradient(p,l,ae);if(Le==null){I==null&&(I=[]);const ve=ae.filter((Ve,at)=>I[at]);Le=(g||[]).slice().concat(ve)}T=this.saveTensorsForBackwardMode(Le)}return ae};else{const P=ge=>{if(!C)return;T=ge.map(ae=>this.keep(this.clone(ae)))};A=()=>{const ge=this.backend.numDataIds();ne=this.tidy(()=>r(this.backend,P));const ae=Array.isArray(ne)?ne:[ne];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(p,ge,ae),ae}}let te;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?S=A():(te=this.profiler.profileKernel(p,l,()=>A()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(te),S=te.outputs)}),C&&this.addTapeNode(p,l,S,u,T,y),this.state.profiling&&this.state.activeProfile.kernels.push({name:p,bytesAdded:this.state.numBytes-D,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-_,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(l).map(P=>l[P]!=null?l[P].shape:null),outputShapes:S.map(P=>P.shape),kernelTimeMs:te.timeMs,extraInfo:te.extraInfo}),Array.isArray(ne)?S:S[0]}saveTensorsForBackwardMode(r){const l=r.map(u=>this.keep(this.clone(u)));return l}getTensorsForGradient(r,l,u){const p=sI(r);if(p!=null){const y=p.inputsToSave||[],g=p.outputsToSave||[];let I;p.saveAllInputs?(J(Array.isArray(l),()=>"saveAllInputs is true, expected inputs to be an array."),I=Object.keys(l).map(T=>l[T])):I=y.map(T=>l[T]);const S=u.filter((T,C)=>g[C]);return I.concat(S)}return null}makeTensor(r,l,u,p){if(r==null)throw new Error("Values passed to engine.makeTensor() are null");u=u||"float32",p=p||this.backend;let y=r;u==="string"&&xu(r[0])&&(y=r.map(S=>GR(S)));const g=p.write(y,l,u),I=new En(l,u,g,this.nextTensorId());if(this.incRef(I,p),u==="string"){const S=this.state.tensorInfo.get(g),T=u2(y);this.state.numBytes+=T-S.bytes,S.bytes=T}return I}makeTensorFromDataId(r,l,u,p){u=u||"float32";const y=new En(l,u,r,this.nextTensorId());return this.incRef(y,p),y}makeVariable(r,l=!0,u,p){u=u||this.nextVariableId().toString(),p!=null&&p!==r.dtype&&(r=r.cast(p));const y=new eg(r,l,u,this.nextTensorId());if(this.state.registeredVariables[y.name]!=null)throw new Error(`Variable with name ${y.name} was already registered`);return this.state.registeredVariables[y.name]=y,this.incRef(y,this.backend),y}incRef(r,l){const u=this.state.tensorInfo.has(r.dataId)?this.state.tensorInfo.get(r.dataId).refCount:0;if(this.state.numTensors++,r.dtype==="string"&&this.state.numStringTensors++,u===0){this.state.numDataBuffers++;let p=0;r.dtype!=="complex64"&&r.dtype!=="string"&&(p=r.size*h2(r.dtype)),this.state.tensorInfo.set(r.dataId,{backend:l||this.backend,dtype:r.dtype,shape:r.shape,bytes:p,refCount:0}),this.state.numBytes+=p}this.state.tensorInfo.get(r.dataId).refCount++,r instanceof eg||this.track(r)}disposeTensor(r){if(!this.state.tensorInfo.has(r.dataId))return;this.state.numTensors--,r.dtype==="string"&&this.state.numStringTensors--;const l=this.state.tensorInfo.get(r.dataId),u=l.refCount;u<=1?(r.dtype!=="complex64"&&(this.state.numBytes-=l.bytes),this.state.numDataBuffers--,l.backend.disposeData(r.dataId),this.state.tensorInfo.delete(r.dataId)):this.state.tensorInfo.get(r.dataId).refCount--}disposeVariables(){for(const r in this.state.registeredVariables){const l=this.state.registeredVariables[r];this.disposeVariable(l)}}disposeVariable(r){this.disposeTensor(r),this.state.registeredVariables[r.name]!=null&&delete this.state.registeredVariables[r.name]}memory(){const r=this.backend.memory();return r.numTensors=this.state.numTensors,r.numDataBuffers=this.state.numDataBuffers,r.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(r.unreliable=!0,r.reasons==null&&(r.reasons=[]),r.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),r}async profile(r){this.state.profiling=!0;const l=this.state.numBytes,u=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await r(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(p=>p.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-l,this.state.activeProfile.newTensors=this.state.numTensors-u;for(const p of this.state.activeProfile.kernels)p.kernelTimeMs=await p.kernelTimeMs,p.extraInfo=await p.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(r,l,u,p,y,g){const I={id:this.state.nextTapeNodeId++,kernelName:r,inputs:l,outputs:u,saved:y},S=sI(r);S!=null&&(p=S.gradFunc),p!=null&&(I.gradient=T=>(T=T.map((C,D)=>{if(C==null){const _=u[D],A=na(_.size,_.dtype);return this.makeTensor(A,_.shape,_.dtype)}return C}),p(T.length>1?T:T[0],y,g))),this.state.activeTape.push(I)}keep(r){return r.kept=!0,r}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(r){const l={track:[],name:"unnamed scope",id:this.state.nextScopeId++};r&&(l.name=r),this.state.scopeStack.push(l),this.state.activeScope=l}endScope(r){const l=tg(r),u=new Set(l.map(y=>y.id));for(let y=0;y{!y.kept&&y.scopeId===p.id&&this.track(y)})}gradients(r,l,u,p=!1){if(J(l.length>0,()=>"gradients() received an empty list of xs."),u!=null&&u.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${u.dtype}'`);const y=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",r));J(y instanceof En,()=>"The result y returned by f() must be a tensor.");const g=HR(this.state.activeTape,l,y);if(!p&&g.length===0&&l.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{const I={};I[y.id]=u==null?ZJ(y.shape):u,qR(I,g,T=>this.tidy(T),QJ);const S=l.map(T=>I[T.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(T=>{for(const C of T.saved)C.dispose()}),this.state.activeTape=null),{value:y,grads:S}})}customGrad(r){return J(XS(r),()=>"The f passed in customGrad(f) must be a function."),(...l)=>{J(l.every(y=>y instanceof En),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let u;const p={};return l.forEach((y,g)=>{p[g]=y}),this.runKernelFunc((y,g)=>(u=r(...l,g),J(u.value instanceof En,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),J(XS(u.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),u.value),p,(y,g)=>{const I=u.gradFunc(y,g),S=Array.isArray(I)?I:[I];J(S.length===l.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),J(S.every(C=>C instanceof En),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");const T={};return S.forEach((C,D)=>{T[D]=()=>C}),T})}}readSync(r){const l=this.state.tensorInfo.get(r);return l.backend.readSync(r)}read(r){const l=this.state.tensorInfo.get(r);return l.backend.read(r)}async time(r){const l=rI(),u=await this.backend.time(r);return u.wallMs=rI()-l,u}track(r){return this.state.activeScope!=null&&(r.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(r)),r}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new iO;for(const r in this.registry)this.disposeRegisteredKernels(r),this.registry[r].dispose(),delete this.registry[r];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}}Ru.nextTensorId=0;Ru.nextVariableId=0;function ZJ(r){const l=Jm(Zt(r),"float32");return H.makeTensor(l,r,"float32")}function dI(){const r=QS();if(r._tfengine==null){const l=new m2(r);r._tfengine=new Ru(l)}return g2(r._tfengine.ENV),ZR(()=>r._tfengine),r._tfengine}const H=dI();function QJ(r,l){const u={a:r,b:l};return H.runKernelFunc((p,y)=>{const g=p.add(r,l);return y([r,l]),g},u,null,Dc)}function rO(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}const yr=Es();yr.registerFlag("DEBUG",()=>!1,r=>{r&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")});yr.registerFlag("IS_BROWSER",()=>rO());yr.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined");yr.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor));yr.registerFlag("PROD",()=>!1);yr.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>yr.getBool("DEBUG"));yr.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0);yr.registerFlag("IS_TEST",()=>!1);function br(r,l){let u=r;if(Os(r))return l==="string"?[]:[r.length];if(!Array.isArray(r))return[];const p=[];for(;Array.isArray(u)||Os(u)&&l!=="string";)p.push(u.length),u=u[0];return Array.isArray(r)&&Es().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&oO(r,p,[]),p}function oO(r,l,u){if(u=u||[],!Array.isArray(r)&&!Os(r)){J(l.length===0,()=>`Element arr[${u.join("][")}] is a primitive, but should be an array/TypedArray of ${l[0]} elements`);return}J(l.length>0,()=>`Element arr[${u.join("][")}] should be a primitive, but is an array of ${r.length} elements`),J(r.length===l[0],()=>`Element arr[${u.join("][")}] should have ${l[0]} elements, but has ${r.length} elements`);const p=l.slice(1);for(let y=0;y=0&&(y=p),aO(p,y,l,u),r==null||!Os(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string"){const T=r==null?"null":r.constructor.name;throw new Error(`Argument '${l}' passed to '${u}' must be a Tensor or TensorLike, but got '${T}'`)}const g=br(r,y);!Os(r)&&!Array.isArray(r)&&(r=[r]);const I=!0,S=y!=="string"?Zf(r,y):ta(r,[],I);return H.makeTensor(S,g,y)}function ng(r,l,u,p="numeric"){if(!Array.isArray(r))throw new Error(`Argument ${l} passed to ${u} must be a \`Tensor[]\` or \`TensorLike[]\``);const y=r;return y.map((g,I)=>M(g,`${l}[${I}]`,u),p)}const cO="__op";function V(r){const l=Object.keys(r);if(l.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${l.length} keys.`);let u=l[0];const p=r[u];u.endsWith("_")&&(u=u.substring(0,u.length-1)),u=u+cO;const y=(...g)=>{H.startScope(u);try{const I=p(...g);return Qm(I)&&console.error("Cannot return a Promise inside of tidy."),H.endScope(I),I}catch(I){throw H.endScope(null),I}};return Object.defineProperty(y,"name",{value:u,configurable:!0}),y}function e9(r,l){const u=M(r,"real","complex"),p=M(l,"imag","complex");tn(u.shape,p.shape,`real and imag shapes, ${u.shape} and ${p.shape}, must match in call to tf.complex().`);const y=I=>I.complex(u,p),g={real:u,imag:p};return H.runKernelFunc(y,g,null,k2)}const Mi=V({complex_:e9});function Pi(r,l,u,p){if(p==null&&(p=Tu(r)),p==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!Os(r)&&!Array.isArray(r)&&typeof r!="number"&&typeof r!="boolean"&&typeof r!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(l!=null){Zm(l);const y=Zt(l),g=Zt(u);J(y===g,()=>`Based on the provided shape, [${l}], the tensor should have ${y} values but has ${g}`);for(let I=0;I`Error creating a new Tensor. Inferred shape (${u}) does not match the provided shape (${l}). `)}}return!Os(r)&&!Array.isArray(r)&&(r=[r]),l=l||u,r=p!=="string"?Zf(r,p):ta(r,[],!0),H.makeTensor(r,l,p)}function pI(r,l,u){const p=br(r,u);return Pi(r,l,p,u)}function Ou(r,l="float32",u){return l=l||"float32",Zm(r),new JR(r,l,u)}function t9(r,l){const u=M(r,"x","cast");if(!l2(l))throw new Error(`Failed to cast to unknown dtype ${l}`);if(l==="string"&&u.dtype!=="string"||l!=="string"&&u.dtype==="string")throw new Error("Only strings can be casted to strings");const p={x:u},y={dtype:l};return H.runKernelFunc(g=>g.cast(u,l),p,null,kc,y)}const Ie=V({cast_:t9});function n9(r){const l=M(r,"x","clone",null),u=()=>H.makeTensorFromDataId(l.dataId,l.shape,l.dtype),p={x:l};return H.runKernelFunc(u,p,null,yf)}const pi=V({clone_:n9});function mI(r,l=!1){console.log(r.toString(l))}dI();const s9={buffer:Ou,cast:Ie,clone:pi,print:mI};QR(s9);function i9(r,l){const u=M(r,"x","reshape",null),p={x:u},y={shape:l},g=(I,S)=>(l=r2(l,u.size),J(u.size===Zt(l),()=>"new shape and old shape must have the same number of elements."),S([u]),I.reshape(u,l));return H.runKernelFunc(g,p,null,Rf,y)}const re=V({reshape_:i9});function r9(r,l,u=!1,p=!1){let y=M(r,"a","matMul"),g=M(l,"b","matMul");[y,g]=Lt(y,g);const I=(C,D)=>{D([y,g]);const _=u?y.shape[y.rank-2]:y.shape[y.rank-1],A=p?g.shape[g.rank-1]:g.shape[g.rank-2],B=u?y.shape[y.rank-1]:y.shape[y.rank-2],ne=p?g.shape[g.rank-2]:g.shape[g.rank-1],te=y.shape.slice(0,-2),P=g.shape.slice(0,-2),ge=Zt(te),ae=Zt(P),Le=ge===ae||ge===1||ae===1;J(y.rank>=2&&g.rank>=2&&Le,()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${te}) and (${P}).`),J(_===A,()=>`Error in matMul: inner shapes (${_}) and (${A}) of Tensors with shapes ${y.shape} and ${g.shape} and transposeA=${u} and transposeB=${p} must match.`);const ve=ge>ae?te:P,Ve=ve.concat([B,ne]),at=u?re(y,[ge,_,B]):re(y,[ge,B,_]),pt=p?re(g,[ae,ne,A]):re(g,[ae,A,ne]),$t=C.batchMatMul(at,pt,u,p);return re($t,Ve)},S={a:y,b:g},T={transposeA:u,transposeB:p};return H.runKernelFunc(I,S,null,tf,T)}const yn=V({matMul_:r9});function o9(r,l){const u=M(r,"x","transpose");if(l==null&&(l=u.shape.map((g,I)=>I).reverse()),J(u.rank===l.length,()=>`Error in transpose: rank of input ${u.rank} must match length of perm ${l}.`),l.forEach(g=>{J(g>=0&&g`All entries in 'perm' must be between 0 and ${u.rank-1} but got ${l}`)}),u.rank<=1)return u.clone();const p={x:u},y={perm:l};return H.runKernelFunc(g=>g.transpose(u,l),p,null,Hf,y)}const xn=V({transpose_:o9});function fI(r,l,u){if(Oc(r),l!=null&&l.length!==3)throw new Error("tensor3d() requires shape to have three numbers");const p=br(r,u);if(p.length!==3&&p.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(p.length===1&&l==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return Pi(r,l,p,u)}const gI={};Pm(gI,{fromPixels:()=>l9,toPixels:()=>c9});let _c;function a9(r,l=3){if(l>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(r==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let u=!1,p=!1,y=!1,g=!1,I=!1;if(r.data instanceof Uint8Array)u=!0;else if(typeof ImageData!="undefined"&&r instanceof ImageData)p=!0;else if(typeof HTMLVideoElement!="undefined"&&r instanceof HTMLVideoElement)y=!0;else if(typeof HTMLImageElement!="undefined"&&r instanceof HTMLImageElement)g=!0;else if(r.getContext!=null)I=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${r.constructor.name}`);if(y){const B=2;if(y&&r.readyState element.")}const S=Jf(tI,H.backendName);if(S!=null){const B={pixels:r},ne={numChannels:l};return H.runKernel(tI,B,ne)}const[T,C]=y?[r.videoWidth,r.videoHeight]:[r.width,r.height];let D;I?D=r.getContext("2d").getImageData(0,0,T,C).data:p||u?D=r.data:(g||y)&&(_c==null&&(_c=document.createElement("canvas").getContext("2d")),_c.canvas.width=T,_c.canvas.height=C,_c.drawImage(r,0,0,T,C),D=_c.getImageData(0,0,T,C).data);let _;if(l===4)_=new Int32Array(D);else{const B=T*C;_=new Int32Array(B*l);for(let ne=0;ne4||g===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${g}`);if(u.dtype!=="float32"&&u.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${u.dtype}. Please use float32 or int32 tensors.`);const I=await u.data(),S=u.dtype==="float32"?255:1,T=new Uint8ClampedArray(y*p*4);for(let C=0;C1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${B}.`)}else if(u.dtype==="int32"&&(B<0||B>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${B}.`);g===1?(D[0]=B*S,D[1]=B*S,D[2]=B*S):D[A]=B*S}const _=C*4;T[_+0]=Math.round(D[0]),T[_+1]=Math.round(D[1]),T[_+2]=Math.round(D[2]),T[_+3]=Math.round(D[3])}if(l!=null){l.width=y,l.height=p;const C=l.getContext("2d"),D=new ImageData(T,y,p);C.putImageData(D,0,0)}return u!==r&&u.dispose(),T}const l9=V({fromPixels_:a9});function lO(r,l,u){const p=r.shape.length;J(p===l.length,()=>`Error in slice${p}D: Length of begin ${l} must match the rank of the array (${p}).`),J(p===u.length,()=>`Error in slice${p}D: Length of size ${u} must match the rank of the array (${p}).`);for(let y=0;y`Error in slice${p}D: begin[${y}] + size[${y}] (${l[y]+u[y]}) would overflow input.shape[${y}] (${r.shape[y]})`)}function sg(r,l,u){let p;const y=r.shape.length;typeof l=="number"?p=[l,...new Array(y-1).fill(0)]:l.length{J(I!==-1,()=>"slice() does not support negative begin indexing.")});let g;return u==null?g=new Array(y).fill(-1):typeof u=="number"?g=[u,...new Array(y-1).fill(-1)]:u.lengthI>=0?I:(J(I===-1,()=>`Negative size values should be exactly -1 but got ${I} for the slice() size at index ${S}.`),r.shape[S]-p[S])),[p,g]}function h9(r){Es().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(r+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}eO(h9);function hO(r,l){return H.tidy(r,l)}function uO(r){const l=tg(r);l.forEach(u=>u.dispose())}function u9(r,l){let u=M(r,"a","add"),p=M(l,"b","add");[u,p]=Lt(u,p);const y=(I,S)=>{const T=I.add(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,Dc)}const vt=V({add_:u9});function d9(r,l){let u=M(r,"a","floorDiv"),p=M(l,"b","floorDiv");[u,p]=Lt(u,p);const y=(I,S)=>{const T=I.floorDiv(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,mf)}const yI=V({floorDiv_:d9});function p9(r,l){let u=M(r,"a","div"),p=M(l,"b","div");if([u,p]=Lt(u,p),u.dtype==="int32"&&p.dtype==="int32")return yI(u,p);const y=(S,T)=>{const C=S.realDivide(u,p);return T([u,p]),C},g={a:u,b:p},I={};return H.runKernelFunc(y,g,null,uf,I)}const Me=V({div_:p9});function m9(r,l){let u=M(r,"a","mul"),p=M(l,"b","mul");[u,p]=Lt(u,p);const y=(I,S)=>{const T=I.multiply(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,Tf)}const le=V({mul_:m9});function f9(r){const l=M(r,"x","abs"),u={x:l};return H.runKernelFunc((p,y)=>(y([l]),l.dtype==="complex64"?p.complexAbs(l):p.abs(l)),u,null,ef)}const zn=V({abs_:f9});function g9(r,l){for(let u=0;ur[g]);return[u,y]}function rs(r,l){const u=l.map(p=>1);return y9(r,u,l)}function zi(r,l){if(g9(r,l))return null;const u=[];for(let p=0;pu.push(p)),u}function Wc(r){return r.map((l,u)=>[u,l]).sort((l,u)=>l[1]-u[1]).map(l=>l[0])}function io(r,l){const u=[];for(let p=l-r;p`The output # of rows (${S}) must be an integer. Change the stride and/or zero pad parameters`);const T=sa((I-l+2*p)/u+1,y);return J(nn(T),()=>`The output # of columns (${T}) must be an integer. Change the stride and/or zero pad parameters`),[S,T]}function S9(r,l,u,p,y,g){y==null&&(y=pO(r,l,p));const I=r[0],S=r[1],T=r[2],C=sa((I-l+2*y)/p+1,g);J(nn(C),()=>`The output # of depths (${C}) must be an integer. Change the stride and/or zero pad parameters`);const D=sa((S-l+2*y)/p+1,g);J(nn(D),()=>`The output # of rows (${D}) must be an integer. Change the stride and/or zero pad parameters`);const _=sa((T-l+2*y)/p+1,g);return J(nn(_),()=>`The output # of columns (${_}) must be an integer. Change the stride and/or zero pad parameters`),[C,D,_,u]}function pO(r,l,u,p=1){const y=$c(l,p);return Math.floor((r[0]*(u-1)-u+y)/2)}function ig(r){return typeof r=="number"?[r,r,r]:r.length===2?[r[0],r[1],1]:r}function bI(r){return typeof r=="number"?[r,r,r]:r}function $c(r,l){return l<=1?r:r+(r-1)*(l-1)}function b9(r,l,u,p,y,g,I,S,T){let C,D,_;if(typeof r=="number"){const A=r===0?"VALID":"NUMBER";C={top:r,bottom:r,left:r,right:r,type:A};const B=L9([l,u],g,p,r,S);D=B[0],_=B[1]}else if(r==="same"){D=Math.ceil(l/p),_=Math.ceil(u/y);const A=Math.max(0,(D-1)*p+g-l),B=Math.max(0,(_-1)*y+I-u),ne=Math.floor(A/2),te=A-ne,P=Math.floor(B/2),ge=B-P;C={top:ne,bottom:te,left:P,right:ge,type:"SAME"}}else if(r==="valid")C={top:0,bottom:0,left:0,right:0,type:"VALID"},D=Math.ceil((l-g+1)/p),_=Math.ceil((u-I+1)/y);else if(typeof r=="object"){const A=T==="channelsLast"?r[1][0]:r[2][0],B=T==="channelsLast"?r[1][1]:r[2][1],ne=T==="channelsLast"?r[2][0]:r[3][0],te=T==="channelsLast"?r[2][1]:r[3][1],P=A===0&&B===0&&ne===0&&te===0?"VALID":"EXPLICIT";C={top:A,bottom:B,left:ne,right:te,type:P},D=sa((l-g+A+B)/p+1,S),_=sa((u-I+ne+te)/y+1,S)}else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:C,outHeight:D,outWidth:_}}function w9(r,l,u,p,y,g,I,S,T,C,D){let _,A,B,ne;if(typeof r=="number"){const te=r===0?"VALID":"NUMBER";_={top:r,bottom:r,left:r,right:r,front:r,back:r,type:te};const P=S9([l,u,p,1],S,1,y,r,D);A=P[0],B=P[1],ne=P[2]}else if(r==="same"){A=Math.ceil(l/y),B=Math.ceil(u/g),ne=Math.ceil(p/I);const te=(A-1)*y+S-l,P=(B-1)*g+T-u,ge=(ne-1)*I+C-p,ae=Math.floor(te/2),Le=te-ae,ve=Math.floor(P/2),Ve=P-ve,at=Math.floor(ge/2),pt=ge-at;_={top:ve,bottom:Ve,left:at,right:pt,front:ae,back:Le,type:"SAME"}}else if(r==="valid")_={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},A=Math.ceil((l-S+1)/y),B=Math.ceil((u-T+1)/g),ne=Math.ceil((p-C+1)/I);else throw Error(`Unknown padding parameter: ${r}`);return{padInfo:_,outDepth:A,outHeight:B,outWidth:ne}}function sa(r,l){if(!l)return r;switch(l){case"round":return Math.round(r);case"ceil":return Math.ceil(r);case"floor":return Math.floor(r);default:throw new Error(`Unknown roundingMode ${l}`)}}function ro(r){const[l,u,p]=ig(r);return l===1&&u===1&&p===1}function oo(r,l){return ro(r)||ro(l)}function Uc(r){if(r==="NHWC")return"channelsLast";if(r==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${r}`)}function mO(r,l){const u=r[0].length;r.forEach((y,g)=>{J(y.length===u,()=>`Error in concat${u}D: rank of tensors[${g}] must be the same as the rank of the rest (${u})`)}),J(l>=0&&l`Error in concat${u}D: axis must be between 0 and ${u-1}.`);const p=r[0];r.forEach((y,g)=>{for(let I=0;I`Error in concat${u}D: Shape of tensors[${g}] (${y}) does not match the shape of the rest (${p}) along the non-concatenated axis ${g}.`)})}function fO(r,l){const u=r[0].slice();for(let p=1;p=1,()=>"Pass at least one tensor to concat");let u=ng(r,"tensors","concat");u[0].dtype==="complex64"&&u.forEach(I=>{if(I.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor - with dtype ${I.dtype}. `)});const p=(I,S)=>{const T=ft(l,u[0].shape)[0],C=fO(u.map(A=>A.shape),T);if(Zt(C)===0)return pI([],C);if(u=u.filter(A=>A.size>0),u.length===1)return u[0];const D=u.map(A=>A.shape);mO(D,T);const _=I.concat(u,T);return S(u),_},y=u,g={axis:l};return H.runKernelFunc(p,y,null,rf,g)}const Tn=V({concat_:I9});function x9(r){const l=M(r,"x","sigmoid"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.sigmoid(l);return y([g]),g},u,null,Uf)}const wI=V({sigmoid_:x9});function T9(r,l,u){const p=M(r,"x","slice");if(p.rank===0)throw new Error("Slicing scalar is not possible");const y=(S,T)=>{const[C,D]=sg(p,l,u);return lO(p,C,D),T([p]),S.slice(p,C,D)},g={x:p},I={begin:l,size:u};return H.runKernelFunc(y,g,null,_f,I)}const Tt=V({slice_:T9});function A9(r,l,u){const p=M(r,"x","batchToSpaceND"),y=l.reduce((T,C)=>T*C);J(p.rank>=1+l.length,()=>`input rank is ${p.rank} but should be > than blockShape.length ${l.length}`),J(u.length===l.length,()=>`crops.length is ${u.length} but should be equal to blockShape.length ${l.length}`),J(p.shape[0]%y===0,()=>`input tensor batch is ${p.shape[0]} but is not divisible by the product of the elements of blockShape ${l.join(" * ")} === ${y}`);const g=T=>T.batchToSpaceND(p,l,u),I={x:p},S={blockShape:l,crops:u};return H.runKernelFunc(g,I,null,nf,S)}const LI=V({batchToSpaceND_:A9});function v9(r,l){let u=M(r,"broadcastTo","x");const p=u.shape;if(l.some(D=>!(D>0)||D%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${l}].`);if(l.lengthu.rank){const D=u.shape.slice();for(;D.length=0;D--)if(y[D]===l[D])g[D]=1;else if(u.shape[D]!==1)throw new Error(`broadcastTo(): [${p}] cannot be broadcast to [${l}].`);const I=g.map((D,_)=>D>1?_:-1).filter(D=>D>=0);if(I.length===0)return pi(u);const S=D=>D.tile(u,g),T={x:u},C={shape:l,inputShape:y};return H.runKernelFunc(S,T,null,sf,C)}const ag=V({broadcastTo_:v9});function N9(r,l,u,p,y="NHWC",g=[1,1],I){const S=M(r,"x","conv2d"),T=M(l,"filter","conv2d");let C=S,D=!1;S.rank===3&&(D=!0,C=re(S,[1,S.shape[0],S.shape[1],S.shape[2]])),J(C.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${C.rank}.`),J(T.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${T.rank}.`),I!=null&&J(nn(p),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${I} but got pad ${p}.`);const _=y==="NHWC"?C.shape[3]:C.shape[1];J(_===T.shape[2],()=>`Error in conv2d: depth of input (${_}) must match input depth for filter ${T.shape[2]}.`),J(oo(u,g),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${u} and dilations '${g}'`);const A=(P,ge)=>{const ae=Uc(y),Le=mi(C.shape,T.shape,u,g,p,I,!1,ae),ve=P.conv2d(C,T,Le);return ge([C,T]),ve},B={x:C,filter:T},ne={strides:u,pad:p,dataFormat:y,dilations:g,dimRoundingMode:I},te=H.runKernelFunc(A,B,null,of,ne);return D?re(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const SI=V({conv2d_:N9});function C9(r,l,u,p,y,g="NHWC",I){J(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let S=r,T=l,C=!1;l.rank===3&&(C=!0,T=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]),S=[1,r[0],r[1],r[2]]),J(S.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${S.length}.`),J(T.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${T.rank}`),J(u.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${u.rank}`);const D=g==="NHWC"?S[3]:S[1],_=g==="NHWC"?T.shape[3]:T.shape[1];J(D===u.shape[2],()=>`Error in conv2dDerInput: depth of input (${D}) must match input depth for filter ${u.shape[2]}.`),J(_===u.shape[3],()=>`Error in conv2dDerInput: depth of output (${_}) must match output depth for filter ${u.shape[3]}.`),I!=null&&J(nn(y),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${I} but got pad ${y}.`);const A=(P,ge)=>{const ae=1,Le=Uc(g),ve=mi(S,u.shape,p,ae,y,I,!1,Le),Ve=P.conv2dDerInput(T,u,ve);return ge([T,u]),Ve},B={dy:T,filter:u},ne={strides:p,pad:y,dataFormat:g,dimRoundingMode:I,inputShape:S},te=H.runKernelFunc(A,B,null,af,ne);return C?re(te,[te.shape[1],te.shape[2],te.shape[3]]):te}const gO=V({conv2DBackpropInput_:C9});function R9(r,l,u,p,y){J(r.length===l.rank,()=>`Length of inShape (${r.length}) and rank of dy (${l.rank}) must match`);let g=r,I=l,S=!1;l.rank===4&&(S=!0,I=re(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]]),g=[1,r[0],r[1],r[2],r[3]]);const T=g[4],C=I.shape[4];J(g.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${g.length}.`),J(I.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${I.rank}`),J(u.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${u.rank}`),J(T===u.shape[3],()=>`Error in conv3dDerInput: depth of input (${T}) must match input depth for filter ${u.shape[3]}.`),J(C===u.shape[4],()=>`Error in conv3dDerInput: depth of output (${C}) must match output depth for filter ${u.shape[4]}.`);const D=ne=>{const te=1,P=Eu(g,u.shape,p,te,y);return ne.conv3dDerInput(I,u,P)},_={dy:I,filter:u},A={pad:y,strides:p,inputShape:g},B=H.runKernelFunc(D,_,null,$2,A);return S?re(B,[B.shape[1],B.shape[2],B.shape[3],B.shape[4]]):B}const yO=V({conv3DBackpropInput_:R9});function O9(r){const l=M(r,"x","cos"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.cos(l);return y([l]),g},u,null,cf)}const Du=V({cos_:O9});function E9(r){const l=M(r,"x","cosh"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.cosh(l);return y([l]),g},u,null,lf)}const II=V({cosh_:E9});function D9(r,l=0,u=!1,p=!1){const y=M(r,"x","cumsum"),g=(T,C)=>{const D=zi([l],y.rank);let _=y;D!=null&&(_=xn(y,D));const A=io(1,y.rank)[0];let B=T.cumsum(_,A,u,p);if(C([y]),D!=null){const ne=Wc(D);B=xn(B,ne)}return B},I={x:y},S={axis:l,exclusive:u,reverse:p};return H.runKernelFunc(g,I,null,hf,S)}const xI=V({cumsum_:D9});function Ot(r,l){const u=[];for(let p=0;p1)&&u.unshift(g)}return u}function ot(r,l){const u=[],p=Math.max(r.length,l.length);for(let y=0;yI.equal(u,p),g={a:u,b:p};return H.runKernelFunc(y,g,null,j2)}const TI=V({equal_:k9});function F9(r,l,u){const p=M(l,"a","where"),y=M(u,"b","where"),g=M(r,"condition","where","bool"),I=ot(p.shape,y.shape),S=ag(p,I),T=ag(y,I);g.rank===1&&J(g.shape[0]===p.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),g.rank!==1&&tn(g.shape,T.shape,"Error in where: ");const C=(_,A)=>{const B=_.select(g,S,T);return A([g]),B},D={condition:g,t:S,e:T};return H.runKernelFunc(C,D,null,Ff)}const Vn=V({where_:F9});function _9(r){const l=M(r,"x","zerosLike"),u={x:l};return H.runKernelFunc(p=>p.zerosLike(l),u,null,Kf)}const Ke=V({zerosLike_:_9});function W9(r){const l=M(r,"x","exp"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.exp(l);return y([g]),g},u,null,df)}const Gn=V({exp_:W9});function $9(r,l=0){const u=null,p=M(r,"x","expandDims",u);J(l<=p.rank,()=>"Axis must be <= rank of the tensor");const y=p.shape.slice();return l<0&&(J(-(p.rank+1)<=l,()=>`Axis must be in the interval [${-(p.rank+1)}, ${p.rank}]`),l=p.rank+l+1),y.splice(l,0,1),re(p,y)}const Ds=V({expandDims_:$9});function U9(r,l){const u=null,p=M(r,"x","tile",u);J(p.rank===l.length,()=>`Error in transpose: rank of input ${p.rank} must match length of reps ${l}.`);const y=(T,C)=>{const D=T.tile(p,l);return C([p]),D},g=[p],I={x:p},S={reps:l};return H.runKernelFunc(y,I,null,Yf,S,g)}const ia=V({tile_:U9});function B9(r,l,u,p="float32"){l==null&&(l=r);const y=Ou([r,l],p),g=r<=l?r:l;for(let S=0;Sy.fill(r,l,u),{},null,J2,p)}function M9(r){const l=M(r,"x","floor"),u={x:l};return H.runKernelFunc(p=>p.floor(l),u,null,pf)}const NI=V({floor_:M9});function bO(r,l,u){const p=r.shape[u],y=[];let g=1,I=1;for(let S=0;S{const D=ft(u,p.shape)[0],_=bO(p,y,D),A=T.gather(p,re(y,[y.size]),D);return C([p,y]),re(A,_.outputShape)};return H.runKernelFunc(S,g,null,ff,I)}const CI=V({gather_:P9});function z9(r,l){let u=M(r,"a","greater"),p=M(l,"b","greater");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=I=>I.greater(u,p),g={a:u,b:p};return H.runKernelFunc(y,g,null,eR)}const fi=V({greater_:z9});function V9(r,l){let u=M(r,"a","greaterEqual"),p=M(l,"b","greaterEqual");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=(I,S)=>{const T=I.greaterEqual(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,gf)}const wr=V({greaterEqual_:V9});function G9(r){const l=M(r,"input","imag"),u=y=>y.imag(l),p={input:l};return H.runKernelFunc(u,p,null,nR)}const ku=V({imag_:G9});function Y9(r,l){let u=M(r,"a","maximum"),p=M(l,"b","maximum");[u,p]=Lt(u,p),u.dtype==="bool"&&(u=Ie(u,"int32"),p=Ie(p,"int32")),ot(u.shape,p.shape);const y=(I,S)=>{const T=I.maximum(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,Sf)}const RI=V({maximum_:Y9});function Fe(r,l){if((Os(r)&&l!=="string"||Array.isArray(r))&&l!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(l==="string"&&Os(r)&&!(r instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");const u=[],p=[];return Pi(r,u,p,l)}function H9(r,l){let u=M(r,"a","less"),p=M(l,"b","less");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=I=>I.less(u,p),g={a:u,b:p};return H.runKernelFunc(y,g,null,oR)}const OI=V({less_:H9});function q9(r,l){let u=M(r,"a","lessEqual"),p=M(l,"b","lessEqual");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=(I,S)=>{const T=I.lessEqual(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,aR)}const Lr=V({lessEqual_:q9});function j9(r){const l=M(r,"x","log"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.log(l);return y([l]),g},u,null,bf)}const ao=V({log_:j9});function K9(r){const l=M(r,"x","log1p"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.log1p(l);return y([l]),g},u,null,wf)}const EI=V({log1p_:K9});function cg(r){return H.customGrad(r)}function X9(r){const l=M(r,"x","neg"),u={x:l};return H.runKernelFunc(p=>p.neg(l),u,null,Af)}const It=V({neg_:X9});function J9(r,l=null,u=!1){const p=M(r,"x","max"),y=(S,T)=>{const C=ft(l,p.shape);let D=C;const _=zi(D,p.rank);let A=p;_!=null&&(A=xn(p,_),D=io(D.length,A.rank));const B=S.max(A,D);_!=null&&A.dispose();let ne=B;if(u){const te=rs(ne.shape,ft(l,p.shape));ne=re(ne,te),B.dispose()}return T([p,ne]),ne},g={x:p},I={reductionIndices:l,keepDims:u};return H.runKernelFunc(y,g,null,Lf,I)}const ra=V({max_:J9});function Z9(r,l){let u=M(r,"a","sub"),p=M(l,"b","sub");[u,p]=Lt(u,p);const y=(I,S)=>{const T=I.subtract(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,Gf)}const Be=V({sub_:Z9});function Q9(r,l=null,u=!1){let p=M(r,"x","sum");p.dtype==="bool"&&(p=Ie(p,"int32"));const y=(S,T)=>{T([p]);const C=ft(l,p.shape),D=zi(C,p.rank);let _=C,A=p;D!=null&&(A=xn(p,D),_=io(_.length,p.rank));let B=S.sum(A,_);if(u){const ne=rs(B.shape,C);B=re(B,ne)}return B},g={x:p},I={axis:l,keepDims:u};return H.runKernelFunc(y,g,null,Mf,I)}const _e=V({sum_:Q9});function eZ(r,l=null,u=!1){const p=M(r,"x","logSumExp"),y=ft(l,p.shape),g=ra(p,y,!0),I=Be(p,g),S=Gn(I),T=_e(S,y),C=ao(T),D=vt(re(g,C.shape),C);if(u){const _=rs(D.shape,y);return re(D,_)}return D}const DI=V({logSumExp_:eZ});function tZ(r,l){const u=M(r,"a","logicalAnd","bool"),p=M(l,"b","logicalAnd","bool");ot(u.shape,p.shape);const y={a:u,b:p};return H.runKernelFunc(g=>g.logicalAnd(u,p),y,null,cR)}const oa=V({logicalAnd_:tZ});function nZ(r){const l=M(r,"x","logicalNot","bool"),u={x:l};return H.runKernelFunc(p=>p.logicalNot(l),u,null,lR)}const kI=V({logicalNot_:nZ});function ks(r,l="float32"){if(l==="complex64"){const p=ks(r,"float32"),y=ks(r,"float32");return Mi(p,y)}const u=na(Zt(r),l);return H.makeTensor(u,r,l)}function Vi(r,l="float32"){if(l==="complex64"){const p=Vi(r,"float32"),y=ks(r,"float32");return Mi(p,y)}const u=Jm(Zt(r),l);return H.makeTensor(u,r,l)}function sZ(r,l=null,u=!1){const p=M(r,"x","mean"),y=ft(l,p.shape),g=dO(p.shape,y),I=g[1],S=Zt(I),T={x:p},C={axis:l,keepDims:u},D=()=>{const A=Fe(S),B=A.dtype===p.dtype?p:Ie(p,A.dtype),ne=Me(B,A);return _e(ne,l,u)},_=cg(A=>{const B=H.runKernelFunc(D,T,null,yR,C),ne=te=>{const P=A.shape.slice();y.forEach(Le=>{P[Le]=1});const ge=re(te,P),ae=Me(le(ge,Vi(A.shape,"float32")),S);return ae};return{value:B,gradFunc:ne}});return _(p)}const FI=V({mean_:sZ});function iZ(r,l=null,u=!1){const p=M(r,"x","min"),y=(S,T)=>{const C=ft(l,p.shape);let D=C;const _=zi(D,p.rank);let A=p;_!=null&&(A=xn(p,_),D=io(D.length,p.rank));const B=S.min(A,D);_!=null&&A.dispose();let ne=B;if(u){const te=rs(ne.shape,C);ne=re(B,te),B.dispose()}return T([p,ne]),ne},g={x:p},I={axis:l,keepDims:u};return H.runKernelFunc(y,g,null,If,I)}const lg=V({min_:iZ});function rZ(r,l){let u=M(r,"a","minimum"),p=M(l,"b","minimum");[u,p]=Lt(u,p),u.dtype==="bool"&&(u=Ie(u,"int32"),p=Ie(p,"int32")),ot(u.shape,p.shape);const y=(I,S)=>{const T=I.minimum(u,p);return S([u,p]),T},g={a:u,b:p};return H.runKernelFunc(y,g,null,xf)}const _I=V({minimum_:rZ});function oZ(r){const l=M(r,"x","square"),u={},p=[l],y=[];return H.runKernelFunc((g,I)=>(I([l]),g.square(l)),{x:l},null,"Square",u,p,y)}const gt=V({square_:oZ});function aZ(r,l){let u=M(r,"a","notEqual"),p=M(l,"b","notEqual");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=I=>I.notEqual(u,p),g={a:u,b:p};return H.runKernelFunc(y,g,null,LR)}const WI=V({notEqual_:aZ});function cZ(r){const l=M(r,"input","real"),u=y=>y.real(l),p={input:l};return H.runKernelFunc(u,p,null,CR)}const Bc=V({real_:cZ});function lZ(r,l,u=0){const p=M(r,"x","pad");if(p.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");const y=(S,T)=>(T([p]),S.pad(p,l,u)),g={paddings:l,constantValue:u},I={x:p};return H.runKernelFunc(y,I,null,vf,g)}const $I=V({pad_:lZ});function hZ(r,l,u){const p=M(r,"x","spaceToBatchND");J(p.rank>=1+l.length,()=>`input rank ${p.rank} should be > than [blockShape] ${l.length}`),J(u.length===l.length,()=>`paddings.shape[0] ${u.length} must be equal to [blockShape] ${l.length}`),J(p.shape.reduce((S,T,C)=>C>0&&C<=l.length?S&&(T+u[C-1][0]+u[C-1][1])%l[C-1]===0:S,!0),()=>`input spatial dimensions ${p.shape.slice(1)} with paddings ${u.toString()} must be divisible by blockShapes ${l.toString()}`);const y=S=>S.spaceToBatchND(p,l,u),g={x:p},I={blockShape:l,paddings:u};return H.runKernelFunc(y,g,null,Pf,I)}const UI=V({spaceToBatchND_:hZ});function uZ(r,l){let u=M(r,"base","pow"),p=M(l,"exp","pow");[u,p]=Lt(u,p);const y={a:u,b:p},g=(I,S)=>{const T=I.pow(u,p);return S([u,p,T]),T};return H.runKernelFunc(g,y,null,Nf)}const aa=V({pow_:uZ});function co(r,l){Oc(r);const u=br(r,l);if(u.length!==1)throw new Error("tensor1d() requires values to be a flat/TypedArray");const p=null;return Pi(r,p,u,l)}function hg(r,l,u=1,p="float32"){if(u===0)throw new Error("Cannot have a step of zero");const y=()=>{const I=r===l,S=r1;if(I||S||T)return ks([0],p);const C=Math.abs(Math.ceil((l-r)/u)),D=na(C,p);l(g([l]),l.dtype==="bool"?Ie(l,"int32"):y.relu(l)),p={x:l};return H.runKernelFunc(u,p,null,Cf)}const Fu=V({relu_:dZ});function pZ(r,l){const u=M(r,"x","reverse"),p=I=>{const S=ft(l,u.shape);if(u.rank===0)return pi(u);const T=I.reverse(u,S);return re(T,u.shape)},y={x:u},g={dims:l};return H.runKernelFunc(p,y,null,Df,g)}const Mc=V({reverse_:pZ});function mZ(r){const l=M(r,"x","rsqrt"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.rsqrt(l);return y([l]),g},u,null,kf)}const BI=V({rsqrt_:mZ});function fZ(r){const l=M(r,"x","sin"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.sin(l);return y([l]),g},u,null,Wf)}const MI=V({sin_:fZ});function gZ(r){const l=M(r,"x","sinh"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.sinh(l);return y([l]),g},u,null,$f)}const PI=V({sinh_:gZ});function yZ(r){J(r.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${r.dtype}.`);const l={input:r};return H.runKernelFunc(u=>{const p=r.shape[r.shape.length-1],y=r.size/p,g=r.as2D(y,p),I=u.fft(g);return I.reshape(r.shape)},l,null,X2)}const _u=V({fft_:yZ});function bZ(r){J(r.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${r.dtype}.`);const l={input:r};return H.runKernelFunc(u=>{const p=r.shape[r.shape.length-1],y=r.size/p,g=re(r,[y,p]),I=u.ifft(g);return re(I,r.shape)},l,null,tR)}const Pc=V({ifft_:bZ});function wZ(r){const l=r.shape[r.shape.length-1],u=r.size/l;let p;if(l<=2){const y=re(r,[u,l]);p=Pc(y)}else{const y=[u,2*(l-1)],g=re(Bc(r),[u,l]),I=re(ku(r),[u,l]),S=Mc(Tt(g,[0,1],[u,l-2]),1),T=le(Mc(Tt(I,[0,1],[u,l-2]),1),Fe(-1)),C=Tn([g,S],1),D=Tn([I,T],1),_=re(Mi(C,D),[y[0],y[1]]);p=Pc(_)}if(p=Bc(p),r.rank===3&&r.shape[0]!==0){const y=p,g=r.shape[0];p=re(p,[g,p.shape[0]/g,p.shape[1]]),y.dispose()}return p}const zI=V({irfft_:wZ});function wO(r,l,u=0){let p=[];if(typeof l=="number")J(r.shape[u]%l===0,()=>"Number of splits must evenly divide the axis."),p=new Array(l).fill(r.shape[u]/l);else{const y=l.reduce((I,S)=>(S===-1&&(I+=1),I),0);J(y<=1,()=>"There should be only one negative value in split array.");const g=l.indexOf(-1);if(g!==-1){const I=l.reduce((S,T)=>T>0?S+T:S);l[g]=r.shape[u]-I}J(r.shape[u]===l.reduce((I,S)=>I+S),()=>"The sum of sizes must match the size of the axis dimension."),p=l}return p}function LZ(r,l,u=0){const p=M(r,"x","split"),y=(S,T)=>{const C=ft(u,p.shape)[0],D=wO(p,l,C);return S.split(p,D,C)},g={x:p},I={numOrSizeSplits:l,axis:u};return H.runKernelFunc(y,g,null,zf,I)}const lo=V({split_:LZ});function SZ(r,l){J(r.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${r.dtype}`);let u=r.shape[r.shape.length-1];const p=r.size/u;let y;if(l!=null&&l0),te=r.shape.map(P=>P);te[r.shape.length-1]=l,y=Tt(r,ne,te),u=l}else if(l!=null&&l>u){const ne=r.shape.map(te=>te);ne[r.shape.length-1]=l-u,y=Tn([r,ks(ne)],r.shape.length-1),u=l}else y=r;const g=Ke(y),I=re(Mi(y,g),[p,u]),S=_u(I),T=Math.floor(u/2)+1,C=Bc(S),D=ku(S),_=lo(C,[T,u-T],C.shape.length-1),A=lo(D,[T,u-T],D.shape.length-1),B=y.shape.slice();return B[y.shape.length-1]=T,re(Mi(_[0],A[0]),B)}const Wu=V({rfft_:SZ});function IZ(r){const l=M(r,"x","sqrt"),u={x:l};return H.runKernelFunc((p,y)=>{const g=p.sqrt(l);return y([l]),g},u,null,Bf)}const fs=V({sqrt_:IZ});function xZ(r,l){let u=M(r,"a","squaredDifference"),p=M(l,"b","squaredDifference");[u,p]=Lt(u,p),ot(u.shape,p.shape);const y=(S,T)=>{const C=S.squaredDifference(u,p);return T([u,p]),C},g={a:u,b:p},I={};return H.runKernelFunc(y,g,null,Vf,I)}const VI=V({squaredDifference_:xZ});function TZ(r,l){const u=M(r,"x","squeeze");return re(u,o2(u.shape,l).newShape)}const GI=V({squeeze_:TZ});function AZ(r,l=0){const u=ng(r,"tensors","stack");if(J(u.length>=1,()=>"Pass at least one tensor to tf.stack"),u.length===1)return Ds(u[0],l);const p=u[0].rank,y=u[0].shape,g=u[0].dtype;J(l<=p,()=>"Axis must be <= rank of the tensor"),u.forEach(S=>{tn(y,S.shape,"All tensors passed to stack must have matching shapes"),J(g===S.dtype,()=>"All tensors passed to stack must have matching dtypes")});const I=u.map(S=>Ds(S,l));return Tn(I,l)}const Ys=V({stack_:AZ});function vZ(r,l=0){const u=M(r,"x","step"),p={x:u},y={alpha:l};return H.runKernelFunc(g=>g.step(u,l),p,null,Xf,y)}const ca=V({step_:vZ});function la(r,l,u){if(Oc(r),l!=null&&l.length!==2)throw new Error("tensor2d() requires shape to have two numbers");const p=br(r,u);if(p.length!==2&&p.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(p.length===1&&l==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return Pi(r,l,p,u)}function NZ(r,l,u){const p=M(r,"x","unsortedSegmentSum"),y=M(l,"segmentIds","unsortedSegmentSum","int32");J(nn(u),()=>"numSegments must be of dtype int");const g={x:p,segmentIds:y},I={numSegments:u},S=(T,C)=>{const D=T.unsortedSegmentSum(p,y,u);return C([y]),D};return H.runKernelFunc(S,g,null,jf,I)}const YI=V({unsortedSegmentSum_:NZ});function CZ(r,l=0){const u=M(r,"x","unstack");J(l>=-u.shape.length&&l`Axis = ${l} is not in [-${u.shape.length}, ${u.shape.length})`),l<0&&(l+=u.shape.length);const p={value:u},y={axis:l},g=I=>I.unstack(u,l);return H.runKernelFunc(g,p,null,qf,y)}const $u=V({unstack_:CZ});function RZ(r,l="euclidean",u=null,p=!1){r=M(r,"x","norm");const y=LO(r,l,u);let g=y.shape;if(p){const I=ft(u,r.shape);g=rs(y.shape,I)}return re(y,g)}function LO(r,l,u=null){if(r.rank===0)return zn(r);if(r.rank!==1&&u===null)return LO(re(r,[-1]),l,u);if(r.rank===1||typeof u=="number"||Array.isArray(u)&&u.length===1){if(l===1)return _e(zn(r),u);if(l===Infinity)return ra(zn(r),u);if(l===-Infinity)return lg(zn(r),u);if(l==="euclidean"||l===2)return fs(_e(aa(zn(r),Fe(2,"int32")),u));throw new Error(`Error in norm: invalid ord value: ${l}`)}if(Array.isArray(u)&&u.length===2){if(l===1)return ra(_e(zn(r),u[0]),u[1]-1);if(l===Infinity)return ra(_e(zn(r),u[1]),u[0]);if(l===-Infinity)return lg(_e(zn(r),u[1]),u[0]);if(l==="fro"||l==="euclidean")return fs(_e(gt(r),u));throw new Error(`Error in norm: invalid ord value: ${l}`)}throw new Error(`Error in norm: invalid axis: ${u}`)}const ug=V({norm_:RZ});function SO(r){return Math.floor(Math.pow(2,Math.ceil(Math.log(r)/Math.log(2))))}function dg(r,l,u){const p=1-r%2,y=new Float32Array(r);for(let g=0;g`Error in conv2dDerFilter: input must be rank 4, but got shape ${S.shape}.`),J(T.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${T.shape}.`),J(u.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${u}.`);const C=g==="NHWC"?S.shape[3]:S.shape[1],D=g==="NHWC"?T.shape[3]:T.shape[1];J(C===u[2],()=>`Error in conv2dDerFilter: depth of input ${C}) must match input depth in filter (${u[2]}.`),J(D===u[3],()=>`Error in conv2dDerFilter: depth of dy (${D}) must match output depth for filter (${u[3]}).`),I!=null&&J(nn(y),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${I} but got pad ${y}.`);const _=ne=>{const te=1,P=Uc(g),ge=mi(S.shape,u,p,te,y,I,!1,P);return ne.conv2dDerFilter(S,T,ge)},A={x:S,dy:T},B={strides:p,pad:y,dataFormat:g,dimRoundingMode:I,filterShape:u};return H.runKernelFunc(_,A,null,F2,B)}const pg=V({conv2DBackpropFilter_:OZ});function EZ(r,l,u,p,y,g=[1,1],I){let S=r;r.rank===3&&(S=re(r,[1,r.shape[0],r.shape[1],r.shape[2]]));let T=l;T.rank===3&&(T=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const C=A=>{const B=mi(r.shape,u,p,g,y,I,!0);return A.depthwiseConv2DDerFilter(S,T,B)},D={x:S,dy:T},_={strides:p,pad:y,dimRoundingMode:I,dilations:g,filterShape:u};return H.runKernelFunc(C,D,null,M2,_)}const IO=V({depthwiseConv2dNativeBackpropFilter_:EZ});function DZ(r,l,u,p,y,g=[1,1],I){let S=l,T=!1;l.rank===3&&(T=!0,S=re(l,[1,l.shape[0],l.shape[1],l.shape[2]]));const C=B=>{const ne=mi(r,u.shape,p,g,y,I,!0);return B.depthwiseConv2DDerInput(S,u,ne)},D={dy:S,filter:u},_={strides:p,pad:y,dimRoundingMode:I,dilations:g,inputShape:r},A=H.runKernelFunc(C,D,null,P2,_);return T?re(A,[A.shape[1],A.shape[2],A.shape[3]]):A}const xO=V({depthwiseConv2dNativeBackpropInput_:DZ});function kZ(r){return dg(r,.54,.46)}const TO=V({hammingWindow_:kZ});function FZ(r){return dg(r,.5,.5)}const mg=V({hannWindow_:FZ});function _Z(r,l,u,p=!1,y=0){let g=0;const I=[];for(;g+l<=r.size;)I.push(Tt(r,g,l)),g+=u;if(p)for(;g`Error in cropAndResize: image must be rank 4,but got rank ${I.rank}.`),J(S.rank===2&&S.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${C},4] but had shape ${S.shape}.`),J(T.rank===1&&T.shape[0]===C,()=>`Error in cropAndResize: boxInd must be have size [${C}] but had shape ${S.shape}.`),J(p.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${p.length}.`),J(p[0]>=1&&p[1]>=1,()=>`cropSize must be atleast [1,1], but was ${p}`),J(y==="bilinear"||y==="nearest",()=>`method must be bilinear or nearest, but was ${y}`);const D=ne=>ne.cropAndResize(I,S,T,p,y,g),_={image:I,boxes:S,boxInd:T},A={method:y,extrapolationValue:g,cropSize:p},B=H.runKernelFunc(D,_,null,U2,A);return B}const vO=V({cropAndResize_:$Z});function UZ(r){const l=M(r,"image","flipLeftRight","float32");J(l.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${l.rank}.`);const u={image:l},p=H.runKernel(Z2,u,{});return p}const NO=V({flipLeftRight_:UZ});function BZ(r,l,u=0,p=.5){const y=M(r,"image","rotateWithOffset","float32");J(y.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${y.rank}.`);const g={image:y},I={radians:l,fillValue:u,center:p},S=H.runKernel(PR,g,I);return S}const CO=V({rotateWithOffset_:BZ});function Hs(r,l,u,p,y,g){p==null&&(p=.5),y==null&&(y=Number.NEGATIVE_INFINITY),g==null&&(g=0);const I=r.shape[0];return u=Math.min(u,I),J(0<=p&&p<=1,()=>`iouThreshold must be in [0, 1], but was '${p}'`),J(r.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${r.rank}'`),J(r.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${r.shape[1]}`),J(l.rank===1,()=>"scores must be a 1D tensor"),J(l.shape[0]===I,()=>`scores has incompatible shape with boxes. Expected ${I}, but was ${l.shape[0]}`),J(0<=g&&g<=1,()=>`softNmsSigma must be in [0, 1], but was '${g}'`),{maxOutputSize:u,iouThreshold:p,scoreThreshold:y,softNmsSigma:g}}function MZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY){const g=M(r,"boxes","nonMaxSuppression"),I=M(l,"scores","nonMaxSuppression"),S=Hs(g,I,u,p,y);u=S.maxOutputSize,p=S.iouThreshold,y=S.scoreThreshold;const T={maxOutputSize:u,iouThreshold:p,scoreThreshold:y};return H.runKernelFunc(C=>C.nonMaxSuppression(g,I,u,p,y),{boxes:g,scores:I},null,SR,T)}const RO=V({nonMaxSuppression_:MZ});function OO(r,l,u){const p=PZ(r,l,u),y=p<0?-(p+1):p;r.splice(y,0,l)}function PZ(r,l,u){return VZ(r,l,u||zZ)}function zZ(r,l){return r>l?1:r>>1);const S=u(l,r[g]);S>0?p=g+1:(y=g,I=!S)}return I?p:-p-1}function EO(r,l,u,p,y){return HI(r,l,u,p,y,0).selectedIndices}function DO(r,l,u,p,y,g){return HI(r,l,u,p,y,0,!1,g,!0)}function kO(r,l,u,p,y,g){return HI(r,l,u,p,y,g,!0)}function HI(r,l,u,p,y,g,I=!1,S=!1,T=!1){const C=[];for(let P=0;Py&&C.push({score:l[P],boxIndex:P,suppressBeginIndex:0});C.sort(FO);const D=g>0?-.5/g:0,_=[],A=[];for(;_.length0;){const P=C.pop(),{score:ge,boxIndex:ae,suppressBeginIndex:Le}=P;if(ge=Le;--Ve){const at=GZ(r,ae,_[Ve]);if(at>=p){ve=!0;break}if(P.score=P.score*YZ(p,D,at),P.score<=y)break}P.suppressBeginIndex=_.length,ve||(P.score===ge?(_.push(ae),A.push(P.score)):P.score>y&&OO(C,P,FO))}const B=_.length,ne=u-B;S&&ne>0&&(_.push(...new Array(ne).fill(0)),A.push(...new Array(ne).fill(0)));const te={selectedIndices:co(_,"int32")};return I&&(te.selectedScores=co(A,"float32")),T&&(te.validOutputs=Fe(B,"int32")),te}function GZ(r,l,u){const p=r.subarray(l*4,l*4+4),y=r.subarray(u*4,u*4+4),g=Math.min(p[0],p[2]),I=Math.min(p[1],p[3]),S=Math.max(p[0],p[2]),T=Math.max(p[1],p[3]),C=Math.min(y[0],y[2]),D=Math.min(y[1],y[3]),_=Math.max(y[0],y[2]),A=Math.max(y[1],y[3]),B=(S-g)*(T-I),ne=(_-C)*(A-D);if(B<=0||ne<=0)return 0;const te=Math.max(g,C),P=Math.max(I,D),ge=Math.min(S,_),ae=Math.min(T,A),Le=Math.max(ge-te,0)*Math.max(ae-P,0);return Le/(B+ne-Le)}function YZ(r,l,u){const p=Math.exp(l*u*u);return u<=r?p:0}function FO(r,l){return r.score-l.score||r.score===l.score&&l.boxIndex-r.boxIndex}async function HZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY){const g=M(r,"boxes","nonMaxSuppressionAsync"),I=M(l,"scores","nonMaxSuppressionAsync"),S=Hs(g,I,u,p,y);u=S.maxOutputSize,p=S.iouThreshold,y=S.scoreThreshold;const T=await Promise.all([g.data(),I.data()]),C=T[0],D=T[1],_=EO(C,D,u,p,y);return g!==r&&g.dispose(),I!==l&&I.dispose(),_}const _O=HZ;function qZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY,g=0){const I=M(r,"boxes","nonMaxSuppression"),S=M(l,"scores","nonMaxSuppression"),T=Hs(I,S,u,p,y,g);u=T.maxOutputSize,p=T.iouThreshold,y=T.scoreThreshold,g=T.softNmsSigma;const C={boxes:I,scores:S},D={maxOutputSize:u,iouThreshold:p,scoreThreshold:y,softNmsSigma:g},_=H.runKernel(xR,C,D);return{selectedIndices:_[0],selectedScores:_[1]}}const WO=V({nonMaxSuppressionWithScore_:qZ});async function jZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY,g=0){const I=M(r,"boxes","nonMaxSuppressionAsync"),S=M(l,"scores","nonMaxSuppressionAsync"),T=Hs(I,S,u,p,y,g);u=T.maxOutputSize,p=T.iouThreshold,y=T.scoreThreshold,g=T.softNmsSigma;const C=await Promise.all([I.data(),S.data()]),D=C[0],_=C[1],A=kO(D,_,u,p,y,g);return I!==r&&I.dispose(),S!==l&&S.dispose(),A}const $O=jZ;function KZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY,g=!1){const I=M(r,"boxes","nonMaxSuppression"),S=M(l,"scores","nonMaxSuppression"),T=Hs(I,S,u,p,y,null),C=T.maxOutputSize,D=T.iouThreshold,_=T.scoreThreshold,A={boxes:I,scores:S},B={maxOutputSize:C,iouThreshold:D,scoreThreshold:_,padToMaxOutputSize:g},ne=H.runKernel(IR,A,B);return{selectedIndices:ne[0],validOutputs:ne[1]}}const UO=V({nonMaxSuppressionPadded_:KZ});async function XZ(r,l,u,p=.5,y=Number.NEGATIVE_INFINITY,g=!1){const I=M(r,"boxes","nonMaxSuppressionAsync"),S=M(l,"scores","nonMaxSuppressionAsync"),T=Hs(I,S,u,p,y,null),C=T.maxOutputSize,D=T.iouThreshold,_=T.scoreThreshold,[A,B]=await Promise.all([I.data(),S.data()]),ne=DO(A,B,C,D,_,g);return I!==r&&I.dispose(),S!==l&&S.dispose(),ne}const BO=XZ;function JZ(r,l,u=!1){const p=M(r,"images","resizeBilinear");J(p.rank===3||p.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${p.rank}.`),J(l.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${l}.`);let y=p,g=!1;p.rank===3&&(g=!0,y=re(p,[1,p.shape[0],p.shape[1],p.shape[2]]));const[I,S]=l,T=(A,B)=>(B([y]),A.resizeBilinear(y,I,S,u)),C={images:y},D={alignCorners:u,size:l},_=H.runKernelFunc(T,C,null,Ef,D);return g?re(_,[_.shape[1],_.shape[2],_.shape[3]]):_}const MO=V({resizeBilinear_:JZ});function ZZ(r,l,u=!1){const p=M(r,"images","resizeNearestNeighbor");J(p.rank===3||p.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${p.rank}.`),J(l.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${l}.`),J(p.dtype==="float32"||p.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let y=p,g=!1;p.rank===3&&(g=!0,y=re(p,[1,p.shape[0],p.shape[1],p.shape[2]]));const[I,S]=l,T={images:y},C={alignCorners:u,size:l},D=(A,B)=>(B([y]),A.resizeNearestNeighbor(y,I,S,u)),_=H.runKernelFunc(D,T,null,Of,C);return g?re(_,[_.shape[1],_.shape[2],_.shape[3]]):_}const PO=V({resizeNearestNeighbor_:ZZ});function QZ(r,l,u){J(l%1===0,()=>`bandPart(): numLower must be an integer, got ${l}.`),J(u%1===0,()=>`bandPart(): numUpper must be an integer, got ${u}.`);const p=M(r,"a","bandPart");J(p.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${p.rank}.`);const y=p.shape,[g,I]=p.shape.slice(-2);if(!(l<=g))throw new Error(`bandPart(): numLower (${l}) must not be greater than the number of rows (${g}).`);if(!(u<=I))throw new Error(`bandPart(): numUpper (${u}) must not be greater than the number of columns (${I}).`);l<0&&(l=g),u<0&&(u=I);const S=re(hg(0,g,1,"int32"),[-1,1]),T=hg(0,I,1,"int32"),C=Be(S,T),D=oa(Lr(C,Fe(+l,"int32")),wr(C,Fe(-u,"int32"))),_=ks([g,I],p.dtype);return re(Ys($u(re(p,[-1,g,I])).map(A=>Vn(D,A,_))),y)}const zO=V({bandPart_:QZ});function eQ(r){let l;if(Array.isArray(r)){l=!1,J(r!=null&&r.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");const y=r[0].shape[0];for(let g=1;g`Gram-Schmidt: Non-unique lengths found in the input vectors: (${r[g].shape[0]} vs. ${y})`)}else l=!0,r=lo(r,r.shape[0],0).map(y=>GI(y,[0]));J(r.length<=r[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${r.length}) exceeds number of dimensions (${r[0].shape[0]}).`);const u=[],p=r;for(let y=0;y{let g=p[y];if(y>0)for(let I=0;I=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${r.rank}`),r.rank===2)return GO(r,l);{const u=r.shape.slice(0,r.shape.length-2).reduce((T,C)=>T*C),p=$u(re(r,[u,r.shape[r.shape.length-2],r.shape[r.shape.length-1]]),0),y=[],g=[];p.forEach(T=>{const[C,D]=GO(T,l);y.push(C),g.push(D)});const I=re(Ys(y,0),r.shape),S=re(Ys(g,0),r.shape);return[I,S]}}function GO(r,l=!1){return H.tidy(()=>{J(r.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${r.shape.length}D Tensor.`);const u=r.shape[0],p=r.shape[1];let y=AI(u),g=pi(r);const I=la([[1]],[1,1]);let S=pi(I);const T=u>=p?p:u;for(let C=0;C{const B=Tt(g,[C,C],[u-C,1]),ne=ug(B),te=Tt(g,[C,C],[1,1]),P=Vn(fi(te,0),la([[-1]]),la([[1]])),ge=Be(te,le(P,ne)),ae=Me(B,ge);ae.shape[0]===1?S=pi(I):S=Tn([I,Tt(ae,[1,0],[ae.shape[0]-1,ae.shape[1]])],0);const Le=It(Me(yn(P,ge),ne)),ve=Tt(g,[C,0],[u-C,p]),Ve=le(Le,S),at=xn(S);if(C===0)g=Be(ve,yn(Ve,yn(at,ve)));else{const Vt=Be(ve,yn(Ve,yn(at,ve)));g=Tn([Tt(g,[0,0],[C,p]),Vt],0)}const pt=xn(Ve),$t=Tt(y,[0,C],[u,y.shape[1]-C]);if(C===0)y=Be($t,yn(yn($t,S),pt));else{const Vt=Be($t,yn(yn($t,S),pt));y=Tn([Tt(y,[0,0],[u,C]),Vt],1)}return[S,g,y]}),uO([D,_,A])}return!l&&u>p&&(y=Tt(y,[0,0],[u,p]),g=Tt(g,[0,0],[p,p])),[y,g]})}const YO=V({qr_:tQ});var Qt;(function(r){r[r.NONE=0]="NONE",r[r.MEAN=1]="MEAN",r[r.SUM=2]="SUM",r[r.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(Qt||(Qt={}));function nQ(r,l,u=Qt.SUM_BY_NONZERO_WEIGHTS){const p=M(r,"losses","computeWeightedLoss");let y=null;l!=null&&(y=M(l,"weights","computeWeightedLoss"));const g=y==null?p:le(p,y);if(u===Qt.NONE)return g;if(u===Qt.SUM)return _e(g);if(u===Qt.MEAN){if(y==null)return FI(g);{const I=p.size/y.size,S=Me(_e(g),_e(y));return I>1?Me(S,Fe(I)):S}}if(u===Qt.SUM_BY_NONZERO_WEIGHTS){if(y==null)return Me(_e(g),Fe(p.size));{const I=le(y,Vi(p.shape)),S=Ie(_e(WI(I,Fe(0))),"float32");return Me(_e(g),S)}}throw Error(`Unknown reduction: ${u}`)}const Dn=V({computeWeightedLoss_:nQ});function sQ(r,l,u,p=Qt.SUM_BY_NONZERO_WEIGHTS){const y=M(r,"labels","absoluteDifference"),g=M(l,"predictions","absoluteDifference");let I=null;u!=null&&(I=M(u,"weights","absoluteDifference")),tn(y.shape,g.shape,"Error in absoluteDifference: ");const S=zn(Be(y,g));return Dn(S,I,p)}const HO=V({absoluteDifference_:sQ});function iQ(r,l,u,p,y=Qt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","cosineDistance"),I=M(l,"predictions","cosineDistance");let S=null;p!=null&&(S=M(p,"weights","cosineDistance")),tn(g.shape,I.shape,"Error in cosineDistance: ");const T=Fe(1),C=Be(T,_e(le(g,I),u,!0));return Dn(C,S,y)}const qO=V({cosineDistance_:iQ});function rQ(r,l,u,p=Qt.SUM_BY_NONZERO_WEIGHTS){let y=M(r,"labels","hingeLoss");const g=M(l,"predictions","hingeLoss");let I=null;u!=null&&(I=M(u,"weights","hingeLoss")),tn(y.shape,g.shape,"Error in hingeLoss: ");const S=Fe(1);y=Be(le(Fe(2),y),S);const T=Fu(Be(S,le(y,g)));return Dn(T,I,p)}const jO=V({hingeLoss_:rQ});function oQ(r,l,u,p=1,y=Qt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","huberLoss"),I=M(l,"predictions","huberLoss");let S=null;u!=null&&(S=M(u,"weights","huberLoss")),tn(g.shape,I.shape,"Error in huberLoss: ");const T=Fe(p),C=zn(Be(I,g)),D=_I(C,T),_=Be(C,D),A=vt(le(Fe(.5),gt(D)),le(T,_));return Dn(A,S,y)}const KO=V({huberLoss_:oQ});function aQ(r,l,u,p=1e-7,y=Qt.SUM_BY_NONZERO_WEIGHTS){const g=M(r,"labels","logLoss"),I=M(l,"predictions","logLoss");let S=null;u!=null&&(S=M(u,"weights","logLoss")),tn(g.shape,I.shape,"Error in logLoss: ");const T=Fe(1),C=Fe(p),D=It(le(g,ao(vt(I,C)))),_=le(Be(T,g),ao(vt(Be(T,I),C))),A=Be(D,_);return Dn(A,S,y)}const XO=V({logLoss_:aQ});function cQ(r,l,u,p=Qt.SUM_BY_NONZERO_WEIGHTS){const y=M(r,"labels","meanSquaredError"),g=M(l,"predictions","meanSquaredError");let I=null;u!=null&&(I=M(u,"weights","meanSquaredError")),tn(y.shape,g.shape,"Error in meanSquaredError: ");const S=VI(y,g);return Dn(S,I,p)}const JO=V({meanSquaredError_:cQ});function lQ(r,l){const u=M(r,"labels","sigmoidCrossEntropyWithLogits"),p=M(l,"logits","sigmoidCrossEntropyWithLogits");tn(u.shape,p.shape,"Error in sigmoidCrossEntropyWithLogits: ");const y=Fu(p),g=le(p,u),I=EI(Gn(It(zn(p))));return vt(Be(y,g),I)}function hQ(r,l,u,p=0,y=Qt.SUM_BY_NONZERO_WEIGHTS){let g=M(r,"multiClassLabels","sigmoidCrossEntropy");const I=M(l,"logits","sigmoidCrossEntropy");let S=null;if(u!=null&&(S=M(u,"weights","sigmoidCrossEntropy")),tn(g.shape,I.shape,"Error in sigmoidCrossEntropy: "),p>0){const C=Fe(p),D=Fe(1),_=Fe(.5);g=vt(le(g,Be(D,C)),le(_,C))}const T=lQ(g,I);return Dn(T,S,y)}const ZO=V({sigmoidCrossEntropy_:hQ});function uQ(r,l,u=-1){if(u===-1&&(u=l.rank-1),u!==l.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${l.rank} and dim was ${u}`);const p=cg((y,g,I)=>{const S=!0,T=DI(g,[u],S),C=Be(Ie(g,"float32"),T);I([y,C]);const D=It(le(C,y)),_=_e(D,[u]),A=(B,ne)=>{const[te,P]=ne,ge=rs(B.shape,[u]);return[le(re(B,ge),Be(Ie(te,"float32"),Gn(P))),le(re(B,ge),Be(Gn(P),Ie(te,"float32")))]};return{value:_,gradFunc:A}});return p(r,l)}function dQ(r,l,u,p=0,y=Qt.SUM_BY_NONZERO_WEIGHTS){let g=M(r,"onehotLabels","softmaxCrossEntropy");const I=M(l,"logits","softmaxCrossEntropy");let S=null;if(u!=null&&(S=M(u,"weights","softmaxCrossEntropy")),tn(g.shape,I.shape,"Error in softmaxCrossEntropy: "),p>0){const C=Fe(p),D=Fe(1),_=Fe(g.shape[1]);g=vt(le(g,Be(D,C)),Me(C,_))}const T=uQ(g,I);return Dn(T,S,y)}const QO=V({softmaxCrossEntropy_:dQ});const xve={fft:_u,ifft:Pc,rfft:Wu,irfft:zI},Cve={hammingWindow:TO,hannWindow:mg,frame:fg,stft:AO},e1={flipLeftRight:NO,resizeNearestNeighbor:PO,resizeBilinear:MO,rotateWithOffset:CO,cropAndResize:vO,nonMaxSuppression:RO,nonMaxSuppressionAsync:_O,nonMaxSuppressionWithScore:WO,nonMaxSuppressionWithScoreAsync:$O,nonMaxSuppressionPadded:UO,nonMaxSuppressionPaddedAsync:BO},Vve={bandPart:zO,gramSchmidt:VO,qr:YO},Qve={absoluteDifference:HO,computeWeightedLoss:Dn,cosineDistance:qO,hingeLoss:jO,huberLoss:KO,logLoss:XO,meanSquaredError:JO,sigmoidCrossEntropy:ZO,softmaxCrossEntropy:QO};const t1=1.7580993408473768,n1=1.0507009873554805;const s1={kernelName:ef,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,ca(Ie(u,"float32"),-1))}}};const i1={kernelName:y2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=gt(Ie(u,"float32")),y=fs(Be(Fe(1),p));return It(Me(r,y))}}}};const r1={kernelName:b2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=fs(Be(gt(Ie(u,"float32")),1));return Me(r,p)}}}};const o1={kernelName:Dc,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{let S=r;const T=Ot(u.shape,y);return T.length>0&&(S=_e(S,T)),re(S,u.shape)},I=()=>{let S=r;const T=Ot(p.shape,y);return T.length>0&&(S=_e(S,T)),re(S,p.shape)};return{a:g,b:I}}};const a1={kernelName:w2,saveAllInputs:!0,gradFunc:(r,l)=>{const u={};return l.forEach((p,y)=>{u[y]=()=>r.clone()}),u}};const c1={kernelName:L2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Ke(u)}}};const l1={kernelName:S2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Ke(u)}}};const h1={kernelName:I2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,fs(Be(Fe(1),gt(Ie(u,"float32")))))}}};const u1={kernelName:x2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=fs(vt(Fe(1),gt(Ie(u,"float32"))));return Me(r,p)}}}};const d1={kernelName:v2,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=vt(gt(u),gt(p));let T=le(r,Me(p,S));const C=Ot(u.shape,y);return C.length>0&&(T=_e(T,C)),re(T,u.shape)},I=()=>{const S=vt(gt(u),gt(p));let T=It(le(r,Me(u,S)));const C=Ot(p.shape,y);return C.length>0&&(T=_e(T,C)),re(T,p.shape)};return{a:g,b:I}}};const p1={kernelName:T2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,vt(gt(Ie(u,"float32")),1))}}};const m1={kernelName:A2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,Be(Fe(1),gt(Ie(u,"float32"))))}}};function pQ(r,l,u,p,y=[1,1,1],g,I){const S=M(r,"dy","avgPool3dBackprop"),T=M(l,"input","avgPool3dBackprop");let C=S,D=T,_=!1;T.rank===4&&(_=!0,C=re(S,[1,S.shape[0],S.shape[1],S.shape[2],S.shape[3]]),D=re(T,[1,T.shape[0],T.shape[1],T.shape[2],T.shape[3]])),J(C.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${C.rank}.`),J(D.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${D.rank}.`),J(oo(p,y),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${p} and dilations '${y}'`),I!=null&&J(nn(g),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${I} but got pad ${g}.`);const A=P=>{const ge=og(D.shape,u,p,y,g,I);return P.avgPool3dBackprop(C,D,ge)},B={dy:C,input:D},ne={filterSize:u,strides:p,dilations:y,pad:g,dimRoundingMode:I},te=H.runKernelFunc(A,B,null,O2,ne);return _?re(te,[te.shape[1],te.shape[2],te.shape[3],te.shape[4]]):te}const f1=V({avgPool3dBackprop_:pQ});const g1={kernelName:R2,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{filterSize:y,strides:g,dilations:I,pad:S,dimRoundingMode:T}=u,C=I==null?[1,1,1]:I;return{x:()=>f1(r,p,y,g,C,S,T)}}};function mQ(r,l,u,p,y){const g=M(r,"dy","avgPoolBackprop"),I=M(l,"input","avgPoolBackprop");J(I.rank===g.rank,()=>`Rank of input (${I.rank}) does not match rank of dy (${g.rank})`);let S=I,T=g,C=!1;I.rank===3&&(C=!0,S=re(I,[1,I.shape[0],I.shape[1],I.shape[2]]),T=re(g,[1,g.shape[0],g.shape[1],g.shape[2]])),J(T.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${T.rank}.`),J(S.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${S.rank}.`);const D=ne=>{const te=rg(S.shape,u,p,1,y);return ne.avgPoolBackprop(T,S,te)},_={dy:T,input:S},A={filterSize:u,strides:p,pad:y},B=H.runKernelFunc(D,_,null,C2,A);return C?re(B,[B.shape[1],B.shape[2],B.shape[3]]):B}const y1=V({avgPoolBackprop_:mQ});const b1={kernelName:N2,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{filterSize:y,strides:g,pad:I}=u;return{x:()=>y1(r,p,y,g,I)}}};const w1={kernelName:tf,inputsToSave:["a","b"],gradFunc:(r,l,u)=>{const[p,y]=l,{transposeA:g,transposeB:I}=u;return!g&&!I?{a:()=>yn(r,y,!1,!0),b:()=>yn(p,r,!0,!1)}:!g&&I?{a:()=>yn(r,y,!1,!1),b:()=>yn(r,p,!0,!1)}:g&&!I?{a:()=>yn(y,r,!1,!0),b:()=>yn(p,r,!1,!1)}:{a:()=>yn(y,r,!0,!0),b:()=>yn(r,p,!0,!0)}}};const L1={kernelName:nf,gradFunc:(r,l,u)=>{const{blockShape:p,crops:y}=u;return{x:()=>UI(r,p,y)}}};const S1={kernelName:sf,gradFunc:(r,l,u)=>{const p=u,y=p.inputShape,g=p.shape,I=Array.from(g);for(let T=y.length-1;T>=0;T--)if(y[T]===g[T])I[T]=1;else if(y[T]!==1)throw new Error(`broadcastTo(): [${y}] cannot be broadcast to [${g}].`);const S=[];for(let T=0;T1&&S.push(T);return{x:()=>_e(r,S,!0)}}};const I1={kernelName:kc,gradFunc:r=>({x:()=>r.clone()})};const x1={kernelName:E2,gradFunc:r=>({x:()=>Ke(r)})};const T1={kernelName:D2,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{clipValueMin:y,clipValueMax:g}=u;return{x:()=>Vn(oa(wr(p,y),Lr(p,g)),r,Ke(r))}}};const A1={kernelName:rf,saveAllInputs:!0,gradFunc:(r,l,u)=>{const p=l.map(T=>T.shape),{axis:y}=u,g=ft(y,l[0].shape)[0],I=p.map(T=>T[g]),S=lo(r,I,g);return S.map(T=>()=>T)}};const v1={kernelName:of,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const[p,y]=l,{dilations:g,strides:I,pad:S,dataFormat:T}=u;return J(ro(g),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${g}'`),{x:()=>gO(p.shape,r,y,I,S,T),filter:()=>pg(p,r,y.shape,I,S,T)}}};const N1={kernelName:af,inputsToSave:["dy","filter"],gradFunc:(r,l,u)=>{const[p,y]=l,{strides:g,pad:I,dataFormat:S,dimRoundingMode:T}=u;return{dy:()=>SI(r,y,g,I,S,1,T),filter:()=>pg(r,p,y.shape,g,I,S,T)}}};function fQ(r,l,u,p,y){let g=r;r.rank===4&&(g=re(r,[1,r.shape[0],r.shape[1],r.shape[2],r.shape[3]]));let I=l;I.rank===4&&(I=re(l,[1,l.shape[0],l.shape[1],l.shape[2],l.shape[3]])),J(g.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${g.shape}.`),J(I.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${I.shape}.`),J(u.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${u}.`),J(g.shape[4]===u[3],()=>`Error in conv3dDerFilter: depth of input ${g.shape[4]}) must match input depth in filter (${u[3]}.`),J(I.shape[4]===u[4],()=>`Error in conv3dDerFilter: depth of dy (${I.shape[4]}) must match output depth for filter (${u[4]}).`);const S=D=>{const _=1,A=Eu(g.shape,u,p,_,y);return D.conv3dDerFilter(g,I,A)},T={x:g,dy:I},C={strides:p,pad:y,filterShape:u};return H.runKernelFunc(S,T,null,W2,C)}const C1=V({conv3DBackpropFilter_:fQ});const R1={kernelName:_2,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const{dilations:p,strides:y,pad:g}=u;J(ro(p),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${p}'`);const[I,S]=l;return{x:()=>yO(I.shape,r,S,y,g),filter:()=>C1(I,r,S.shape,y,g)}}};const O1={kernelName:cf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(It(MI(Ie(u,"float32"))),r)}}};const E1={kernelName:lf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(PI(Ie(u,"float32")),r)}}};const D1={kernelName:hf,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{axis:y,exclusive:g,reverse:I}=u;return{x:()=>{const S=zi([y],p.rank);let T=xI(r,y,g,!I);return S!=null&&(T=xn(T,S)),T}}}};const k1={kernelName:B2,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const{dilations:p,strides:y,pad:g,dimRoundingMode:I}=u,S=p==null?[1,1]:p;J(ro(S),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${S}'`);const[T,C]=l;return J(T.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${T.rank}.`),J(C.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${C.rank}.`),J(T.shape[3]===C.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${T.shape[3]}) must match the inChannels dimension in filter ${C.shape[2]}.`),J(oo(y,S),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${y} and dilations '${S}'.`),I!=null&&J(nn(g),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${I} but got pad ${g}.`),{x:()=>xO(T.shape,r,C,y,g,p,I),filter:()=>IO(T,r,C.shape,y,g,p,I)}}};const F1={kernelName:z2,inputsToSave:["x","filter"],gradFunc:(r,l,u)=>{const[p,y]=l,g={x:p,filter:y,dy:r},I={x:p,filter:y,dy:r};return{x:()=>H.runKernel(V2,g,u),filter:()=>H.runKernel(G2,I,u)}}};const _1={kernelName:uf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=Me(r,Ie(p,"float32")),T=Ot(u.shape,y);return T.length>0?re(_e(S,T),u.shape):S},I=()=>{let S=le(r,Ie(u,"float32"));const T=Ot(p.shape,y);T.length>0&&(S=re(_e(S,T),p.shape));const C=gt(p);return It(Me(S,Ie(C,"float32")))};return{a:g,b:I}}};const W1={kernelName:Y2,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l,p=g=>g.eluDer(r,u),y={dy:r,y:u};return{x:()=>H.runKernelFunc(p,y,null,H2)}}};const $1={kernelName:q2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l,p=le(Gn(It(gt(u))),2/Math.sqrt(Math.PI));return{x:()=>le(r,p)}}};const U1={kernelName:df,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,u)}}};const B1={kernelName:K2,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,Gn(u))}}};const M1={kernelName:pf,gradFunc:r=>({x:()=>Ke(r)})};const P1={kernelName:mf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=Me(r,Ie(p,"float32")),T=Ot(u.shape,y);return T.length>0?re(_e(S,T),u.shape):S},I=()=>{let S=le(r,Ie(u,"float32"));const T=Ot(p.shape,y);T.length>0&&(S=re(_e(S,T),p.shape));const C=gt(p);return It(Me(S,Ie(C,"float32")))};return{a:g,b:I}}};const z1={kernelName:Q2,inputsToSave:["x","mean","variance","scale"],gradFunc:(r,l,u)=>{const{varianceEpsilon:p}=u,[y,g,I,S]=l,T=S==null?Fe(1):S,C=Ot(g.shape,y.shape),D=[];if(g.rank===1){for(let ve=0;veg.rank===1?re(le(le(r,ia(re(B,[1,1,1,g.shape[0]]),D)),T),y.shape):re(le(le(r,B),T),y.shape),P=()=>{let ve=le(le(B,Fe(-1)),A);return g.rank===1&&(ve=_e(ve,C)),re(ve,g.shape)},ge=()=>{let ve=le(le(ne,_),A);return g.rank===1&&(ve=_e(ve,C)),re(ve,g.shape)},ae=()=>{const ve=le(_,B);let Ve=le(r,ve);return g.rank===1&&(Ve=_e(Ve,C)),re(Ve,g.shape)},Le=()=>{let ve=r;return g.rank===1&&(ve=_e(ve,C)),re(ve,g.shape)};return{x:te,mean:P,variance:ge,scale:ae,offset:Le}}};const Y1={kernelName:ff,inputsToSave:["x","indices"],gradFunc:(r,l,u)=>{const[p,y]=l,{axis:g}=u,I=ft(g,p.shape)[0],S=()=>{const T=p.shape,C=y.size,D=T.slice(0,I),_=D.length,A=T.slice(g,T.length).slice(1),B=A.length,ne=V1(0,_),te=V1(_+1,_+1+B),P=G1([D,[C],A]),ge=re(r,P),ae=re(y,[C]),Le=G1([[_],ne,te]),ve=xn(ge,Le);let Ve=YI(ve,ae,p.shape[I]);const at=Wc(Le);return Ve=xn(Ve,at),Ve};return{x:S,indices:()=>y}}};function V1(r,l){const u=[];for(let p=r;p{const[u,p]=l;return{a:()=>Ke(u),b:()=>Ke(p)}}};const q1={kernelName:yf,gradFunc:r=>({x:()=>Ie(r,"float32")})};const j1={kernelName:sR,gradFunc:r=>({x:()=>Ke(r)})};const K1={kernelName:iR,gradFunc:r=>({x:()=>Ke(r)})};const X1={kernelName:rR,gradFunc:r=>({x:()=>Ke(r)})};const J1={kernelName:wf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,vt(u,1))}}};const Z1={kernelName:bf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,Ie(u,"float32"))}}};const Q1={kernelName:hR,inputsToSave:[],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p]=l,{axis:y}=u;return{logits:()=>{const g=!0,I=Gn(p);return Be(r,le(_e(r,y,g),I))}}}};function gQ(r,l,u,p=5,y=1,g=1,I=.5){const S=D=>D.LRNGrad(u,r,l,p,y,g,I),T={x:r,y:l,dy:u},C={depthRadius:p,bias:y,alpha:g,beta:I};return H.runKernelFunc(S,T,null,dR,C)}const eE=V({localResponseNormalizationBackprop_:gQ});const tE={kernelName:uR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p,y]=l,{depthRadius:g,bias:I,alpha:S,beta:T}=u;return{x:()=>eE(p,y,r,g,I,S,T)}}};function gg(r,l,u,p){return l.rank{const y=le(r,Ie(TI(u,l),r.dtype));return y}}}const qI={kernelName:Lf,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const p=u,{reductionIndices:y}=p,g=l[0],I=l[1],S=ft(y,g.shape),T=gg(r,I,g,S);return{x:()=>T.x()}}};const nE={kernelName:Sf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=()=>le(r,Ie(wr(u,p),"float32")),g=()=>le(r,Ie(OI(u,p),"float32"));return{a:y,b:g}}};function yQ(r,l,u,p,y,g=[1,1,1],I,S){const T=M(r,"dy","maxPool3dBackprop"),C=M(l,"input","maxPool3dBackprop"),D=M(u,"output","maxPool3dBackprop");let _=T,A=C,B=D,ne=!1;C.rank===4&&(ne=!0,_=re(T,[1,T.shape[0],T.shape[1],T.shape[2],T.shape[3]]),A=re(C,[1,C.shape[0],C.shape[1],C.shape[2],C.shape[3]]),B=re(D,[1,D.shape[0],D.shape[1],D.shape[2],D.shape[3]])),J(_.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${_.rank}.`),J(A.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${A.rank}.`),J(B.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${B.rank}.`),J(oo(y,g),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${y} and dilations '${g}'`),S!=null&&J(nn(I),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${S} but got pad ${I}.`);const te=Le=>{const ve=og(A.shape,p,y,g,I,S);return Le.maxPool3dBackprop(_,A,B,ve)},P={dy:_,input:A,output:B},ge={filterSize:p,strides:y,dilations:g,pad:I,dimRoundingMode:S},ae=H.runKernelFunc(te,P,null,gR,ge);return ne?re(ae,[ae.shape[1],ae.shape[2],ae.shape[3],ae.shape[4]]):ae}const sE=V({maxPool3dBackprop_:yQ});const iE={kernelName:fR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p,y]=l,{filterSize:g,strides:I,dilations:S,pad:T,dimRoundingMode:C}=u,D=S==null?[1,1,1]:S;return{x:()=>sE(r,p,y,g,I,D,T,C)}}};function bQ(r,l,u,p,y,g,I){const S=M(r,"dy","maxPoolBackprop"),T=M(l,"input","maxPoolBackprop"),C=M(u,"output","maxPoolBackprop");J(T.rank===S.rank,()=>`Rank of input (${T.rank}) does not match rank of dy (${S.rank})`),J(S.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${S.rank}.`),J(T.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${T.rank}.`),I!=null&&J(nn(g),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${I} but got pad ${g}.`);const D=B=>{const ne=rg(T.shape,p,y,1,g,I);return B.maxPoolBackprop(S,T,C,ne)},_={dy:S,input:T,output:C},A={filterSize:p,strides:y,pad:g,dimRoundingMode:I};return H.runKernelFunc(D,_,null,mR,A)}const rE=V({maxPoolBackprop_:bQ});const oE={kernelName:pR,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p,y]=l,{filterSize:g,strides:I,pad:S}=u;return{x:()=>rE(r,p,y,g,I,S)}}};const aE={kernelName:If,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(r,l,u)=>{const p=u,{axis:y}=p,[g,I]=l,S=ft(y,g.shape),T=gg(r,I,g,S);return{x:()=>T.x()}}};const cE={kernelName:xf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=()=>le(r,Ie(Lr(u,p),"float32")),g=()=>le(r,Ie(fi(u,p),"float32"));return{a:y,b:g}}};const lE={kernelName:bR,inputsToSave:["x"],gradFunc:(r,l,u)=>{const p=l[0],{paddings:y}=u,g=y.map(I=>I[0]);return{x:()=>Tt(r,g,p.shape)}}};const hE={kernelName:wR,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=Ot(u.shape,y);return S.length>0?re(_e(r,S),u.shape):r},I=()=>{const S=le(r,It(NI(Me(u,p)))),T=Ot(p.shape,y);return T.length>0?re(_e(S,T),p.shape):S};return{a:g,b:I}}};const uE={kernelName:Tf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{const S=le(r,Ie(p,"float32")),T=Ot(u.shape,y);return T.length>0?re(_e(S,T),u.shape):S},I=()=>{const S=le(r,Ie(u,"float32")),T=Ot(p.shape,y);return T.length>0?re(_e(S,T),p.shape):S};return{a:g,b:I}}};const dE={kernelName:Af,gradFunc:r=>({x:()=>It(r)})};const pE={kernelName:AR,inputsToSave:["indices"],gradFunc:(r,l)=>{const u=l[0];return{indices:()=>ks(u.shape,"float32")}}};const mE={kernelName:TR,gradFunc:r=>({x:()=>Ke(r)})};const jI={kernelName:vf,inputsToSave:["x"],gradFunc:(r,l,u)=>{const p=l[0],{paddings:y}=u,g=y.map(I=>I[0]);return{x:()=>Tt(r,g,p.shape)}}};const fE={kernelName:Nf,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(r,l)=>{const[u,p,y]=l,g=u,I=p,S=ot(g.shape,I.shape),T=()=>{const D=Ie(I,"float32");let _=le(r,le(D,aa(g,Be(D,Fe(1)))));const A=Ot(g.shape,S);return A.length>0&&(_=_e(_,A)),re(_,g.shape)},C=()=>{const D=fi(g,0),_=Vn(D,ao(g),Ke(g));let A=le(r,le(y,_));const B=Ot(I.shape,S);return B.length>0&&(A=_e(A,B)),re(A,I.shape)};return{a:T,b:C}}};const gE={kernelName:vR,inputsToSave:["x","alpha"],gradFunc:(r,l)=>{const[u,p]=l,y=fi(u,0);return{x:()=>Vn(y,r,le(r,p)),alpha:()=>{let g=Vn(y,Ke(r),le(r,u));const I=Ot(p.shape,r.shape);return I.length>0&&(g=_e(g,I)),re(g,p.shape)}}}};const yE={kernelName:RR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,It(gt(u)))}}};const bE={kernelName:DR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l,p=le(Lr(u,6),ca(u));return{x:()=>le(r,Ie(p,"float32"))}}};const wE={kernelName:Cf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,Ie(ca(u),"float32"))}}};const LE={kernelName:Rf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>re(r,u.shape)}}};const SE={kernelName:Ef,inputsToSave:["images"],gradFunc:(r,l,u)=>{const[p]=l,y=S=>{const{alignCorners:T}=u;return S.resizeBilinearBackprop(r,p,T)},g={images:p},I=()=>H.runKernelFunc(y,g,null,ER,u);return{images:I}}};const IE={kernelName:Of,inputsToSave:["images"],gradFunc:(r,l,u)=>{const[p]=l,y=S=>{const{alignCorners:T}=u;return S.resizeNearestNeighborBackprop(r,p,T)},g={images:p},I=()=>H.runKernelFunc(y,g,null,OR,u);return{images:I}}};const xE={kernelName:Df,gradFunc:(r,l,u)=>{const{dims:p}=u,y=ft(p,r.shape);return{x:()=>Mc(r,y)}}};const TE={kernelName:kR,gradFunc:r=>({x:()=>Ke(r)})};const AE={kernelName:kf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>It(Me(r,le(aa(u,1.5),2)))}}};const vE={kernelName:Ff,inputsToSave:["condition"],gradFunc:(r,l)=>{const[u]=l;return{condition:()=>Ie(Ke(u),"float32"),t:()=>le(r,Ie(u,r.dtype)),e:()=>le(r,Ie(kI(u),r.dtype))}}};const NE={kernelName:FR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>{const p=fi(u,Fe(0)),y=Fe(t1),g=Fe(n1),I=le(r,g),S=le(le(r,y),Gn(Ie(u,"float32")));return Vn(p,I,S)}}}};const CE={kernelName:Uf,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,le(u,Be(Fe(1),u)))}}};const RE={kernelName:_R,gradFunc:r=>({x:()=>Ke(r)})};const OE={kernelName:Wf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(Du(Ie(u,"float32")),r)}}};const EE={kernelName:$f,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(II(Ie(u,"float32")),r)}}};const DE={kernelName:_f,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{begin:y,size:g}=u,I=p.shape,[S,T]=sg(p,y,g),C=[];for(let D=0;D$I(r,C)}}};const kE={kernelName:$R,outputsToSave:[!0],gradFunc:(r,l,u)=>{const[p]=l,{dim:y}=u,g=!0,I=le(r,p);return{logits:()=>Be(I,le(_e(I,[y],g),p))}}};const FE={kernelName:WR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,wI(u))}}};const KI={kernelName:Pf,gradFunc:(r,l,u)=>{const{blockShape:p,paddings:y}=u;return{x:()=>LI(r,p,y)}}};const XI={kernelName:zf,gradFunc:(r,l,u)=>{const{axis:p}=u;return{x:()=>Tn(r,p)}}};const _E={kernelName:Bf,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,le(fs(Ie(u,"float32")),2))}}};const WE={kernelName:UR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(r,le(Ie(u,"float32"),2))}}};const $E={kernelName:Vf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=Fe(2),g=()=>le(r,le(y,Be(u,p))),I=()=>le(r,le(y,Be(p,u)));return{a:g,b:I}}};const UE={kernelName:Xf,gradFunc:r=>({x:()=>Ke(r)})};const BE={kernelName:Gf,inputsToSave:["a","b"],gradFunc:(r,l)=>{const[u,p]=l,y=ot(u.shape,p.shape),g=()=>{let S=r;const T=Ot(u.shape,y);return T.length>0&&(S=_e(S,T)),re(S,u.shape)},I=()=>{let S=r;const T=Ot(p.shape,y);return T.length>0&&(S=_e(S,T)),re(It(S),p.shape)};return{a:g,b:I}}};const ME={kernelName:Mf,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,y=p.shape.slice(),{axis:g}=u,I=ft(g,p.shape);I.forEach(C=>{y[C]=1});const S=re(r,y),T=le(S,Vi(p.shape,"float32"));return{x:()=>T}}};const PE={kernelName:BR,inputsToSave:["x"],gradFunc:(r,l)=>{const[u]=l;return{x:()=>Me(r,gt(Du(u)))}}};const zE={kernelName:MR,outputsToSave:[!0],gradFunc:(r,l)=>{const[u]=l;return{x:()=>le(Be(Fe(1),gt(u)),r)}}};const VE={kernelName:Yf,inputsToSave:["x"],gradFunc:(r,l,u)=>{const[p]=l,{reps:y}=u,g=()=>{let I=Ke(p);if(p.rank===1)for(let S=0;S{const p=u,{perm:y}=p,g=Wc(y);return{x:()=>xn(r,g)}}};const YE={kernelName:qf,gradFunc:(r,l,u)=>{const p=u,{axis:y}=p;return{value:()=>Ys(r,y)}}};const HE={kernelName:jf,inputsToSave:["segmentIds"],gradFunc:(r,l)=>{const[u]=l,p=()=>wQ(r,u);return{x:p}}};function wQ(r,l){const u=RI(l,Ke(l)),p=CI(r,u);let y=wr(l,Fe(0,"int32"));const g=p.rank-y.rank;for(let S=0;S({x:()=>Ke(r)})};const LQ=[s1,i1,r1,o1,a1,c1,l1,h1,u1,d1,p1,m1,g1,b1,w1,L1,S1,I1,x1,T1,A1,N1,v1,R1,O1,E1,D1,k1,F1,_1,W1,$1,U1,B1,P1,M1,z1,Y1,H1,q1,j1,K1,X1,J1,Z1,Q1,tE,qI,qI,nE,iE,oE,aE,cE,lE,hE,uE,dE,pE,mE,jI,jI,fE,gE,yE,bE,wE,LE,SE,IE,xE,TE,AE,vE,NE,CE,RE,OE,EE,DE,kE,FE,KI,KI,XI,XI,_E,$E,WE,UE,BE,ME,PE,zE,VE,GE,YE,HE,qE];for(const r of LQ)VR(r);function JI(r,l,u=!1){const{Image:p,Canvas:y}=St.getEnv();if(!(r instanceof p||r instanceof y))throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement");const g=ea(r),I=l/Math.max(g.height,g.width),S=I*g.width,T=I*g.height,C=Rc({width:l,height:l}),D=r instanceof y?r:Su(r),_=Math.abs(S-T)/2,A=u&&S{if(gr(u)){this._imageTensors[p]=u,this._inputDimensions[p]=u.shape;return}if(Rs(u)){const g=u.shape[0];if(g!==1)throw new Error(`NetInput - tf.Tensor4D with batchSize ${g} passed, but not supported in input array`);this._imageTensors[p]=u,this._inputDimensions[p]=u.shape.slice(1);return}const y=u instanceof St.getEnv().Canvas?u:Su(u);this._canvases[p]=y,this._inputDimensions[p]=[y.height,y.width,3]})}get imageTensors(){return this._imageTensors}get canvases(){return this._canvases}get isBatchInput(){return this.batchSize>1||this._treatAsBatchInput}get batchSize(){return this._batchSize}get inputDimensions(){return this._inputDimensions}get inputSize(){return this._inputSize}get reshapedInputDimensions(){return _i(this.batchSize,0,1).map((r,l)=>this.getReshapedInputDimensions(l))}getInput(r){return this.canvases[r]||this.imageTensors[r]}getInputDimensions(r){return this._inputDimensions[r]}getInputHeight(r){return this._inputDimensions[r][0]}getInputWidth(r){return this._inputDimensions[r][1]}getReshapedInputDimensions(r){if(typeof this.inputSize!="number")throw new Error("getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet");const l=this.getInputWidth(r),u=this.getInputHeight(r);return _S({width:l,height:u},this.inputSize)}toBatchTensor(r,l=!0){return this._inputSize=r,hO(()=>{const u=_i(this.batchSize,0,1).map(y=>{const g=this.getInput(y);if(g instanceof En){let I=Rs(g)?g:g.expandDims();return I=BS(I,l),(I.shape[1]!==r||I.shape[2]!==r)&&(I=e1.resizeBilinear(I,[r,r])),I.as3D(r,r,3)}if(g instanceof St.getEnv().Canvas)return gI.fromPixels(JI(g,r,l));throw new Error(`toBatchTensor - at batchIdx ${y}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${g}`)}),p=Ys(u.map(y=>Ie(y,"float32"))).as4D(this.batchSize,r,r,3);return p})}}async function Wt(r){if(r instanceof ho)return r;let l=Array.isArray(r)?r:[r];if(!l.length)throw new Error("toNetInput - empty array passed as input");const u=y=>Array.isArray(r)?` at input index ${y}:`:"",p=l.map(Qo);return p.forEach((y,g)=>{if(!Xm(y)&&!gr(y)&&!Rs(y))throw typeof l[g]=="string"?new Error(`toNetInput -${u(g)} string passed, but could not resolve HTMLElement for element id ${l[g]}`):new Error(`toNetInput -${u(g)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);if(Rs(y)){const I=y.shape[0];if(I!==1)throw new Error(`toNetInput -${u(g)} tf.Tensor4D with batchSize ${I} passed, but not supported in input array`)}}),await Promise.all(p.map(y=>Xm(y)&&qS(y))),new ho(p,Array.isArray(r))}async function zc(r,l){const{Canvas:u}=St.getEnv();let p=r;if(!(r instanceof u)){const I=await Wt(r);if(I.batchSize>1)throw new Error("extractFaces - batchSize > 1 not supported");const S=I.getInput(0);p=S instanceof u?S:await KS(S)}const y=is(p),g=l.map(I=>I instanceof Jt?I.forSize(p.width,p.height).box.floor():I).map(I=>I.clipAtImageBorders(p.width,p.height));return g.map(({x:I,y:S,width:T,height:C})=>{const D=Rc({width:T,height:C});return is(D).putImageData(y.getImageData(I,S,T,C),0,0),D})}const yg=Je(Ze());async function Vc(r,l){if(!gr(r)&&!Rs(r))throw new Error("extractFaceTensors - expected image tensor to be 3D or 4D");if(Rs(r)&&r.shape[0]>1)throw new Error("extractFaceTensors - batchSize > 1 not supported");return yg.tidy(()=>{const[u,p,y]=r.shape.slice(Rs(r)?1:0),g=l.map(S=>S instanceof Jt?S.forSize(p,u).box:S).map(S=>S.clipAtImageBorders(p,u)),I=g.map(({x:S,y:T,width:C,height:D})=>yg.slice3d(r.as3D(u,p,y),[T,S,0],[D,C,y]));return I})}async function ha(r,l){const u=St.getEnv().fetch,p=await u(r,l);if(!(p.status<400))throw new Error(`failed to fetch: (${p.status}) ${p.statusText}, from url: ${p.url}`);return p}async function SQ(r){const l=await ha(r),u=await l.blob();if(!u.type.startsWith("image/"))throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${u.type}, for url: ${l.url}`);return jS(u)}async function ZI(r){return(await ha(r)).json()}async function IQ(r){return new Float32Array(await(await ha(r)).arrayBuffer())}function bg(r,l){const u=`${l}-weights_manifest.json`;if(!r)return{modelBaseUri:"",manifestUri:u};if(r==="/")return{modelBaseUri:"/",manifestUri:`/${u}`};const p=r.startsWith("http://")?"http://":r.startsWith("https://")?"https://":"";r=r.replace(p,"");const y=r.split("/").filter(S=>S),g=r.endsWith(".json")?y[y.length-1]:u;let I=p+(r.endsWith(".json")?y.slice(0,y.length-1):y).join("/");return I=r.startsWith("/")?`/${I}`:I,{modelBaseUri:I,manifestUri:I==="/"?`/${g}`:`${I}/${g}`}}const jE=Je(Ze());async function QI(r,l){const{manifestUri:u,modelBaseUri:p}=bg(r,l);let y=await ZI(u);return jE.io.loadWeights(y,p)}function xQ(r,l,u=!1){const{width:p,height:y}=u?ea(l):l;return r.width=p,r.height=y,{width:p,height:y}}const Sr=Je(Ze());class Wn{constructor(r){this._name=r;this._params=void 0;this._paramMappings=[]}get params(){return this._params}get paramMappings(){return this._paramMappings}get isLoaded(){return!!this.params}getParamFromPath(r){const{obj:l,objProp:u}=this.traversePropertyPath(r);return l[u]}reassignParamFromPath(r,l){const{obj:u,objProp:p}=this.traversePropertyPath(r);u[p].dispose(),u[p]=l}getParamList(){return this._paramMappings.map(({paramPath:r})=>({path:r,tensor:this.getParamFromPath(r)}))}getTrainableParams(){return this.getParamList().filter(r=>r.tensor instanceof Sr.Variable)}getFrozenParams(){return this.getParamList().filter(r=>!(r.tensor instanceof Sr.Variable))}variable(){this.getFrozenParams().forEach(({path:r,tensor:l})=>{this.reassignParamFromPath(r,l.variable())})}freeze(){this.getTrainableParams().forEach(({path:r,tensor:l})=>{const u=Sr.tensor(l.dataSync());l.dispose(),this.reassignParamFromPath(r,u)})}dispose(r=!0){this.getParamList().forEach(l=>{if(r&&l.tensor.isDisposed)throw new Error(`param tensor has already been disposed for path ${l.path}`);l.tensor.dispose()}),this._params=void 0}serializeParams(){return new Float32Array(this.getParamList().map(({tensor:r})=>Array.from(r.dataSync())).reduce((r,l)=>r.concat(l)))}async load(r){if(r instanceof Float32Array){this.extractWeights(r);return}await this.loadFromUri(r)}async loadFromUri(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromUri - expected model uri`);const l=await QI(r,this.getDefaultModelName());this.loadFromWeightMap(l)}async loadFromDisk(r){if(r&&typeof r!="string")throw new Error(`${this._name}.loadFromDisk - expected model file path`);const{readFile:l}=St.getEnv(),{manifestUri:u,modelBaseUri:p}=bg(r,this.getDefaultModelName()),y=T=>Promise.all(T.map(C=>l(C).then(D=>D.buffer))),g=Sr.io.weightsLoaderFactory(y),I=JSON.parse((await l(u)).toString()),S=await g(I,p);this.loadFromWeightMap(S)}loadFromWeightMap(r){const{paramMappings:l,params:u}=this.extractParamsFromWeigthMap(r);this._paramMappings=l,this._params=u}extractWeights(r){const{paramMappings:l,params:u}=this.extractParams(r);this._paramMappings=l,this._params=u}traversePropertyPath(r){if(!this.params)throw new Error("traversePropertyPath - model has no loaded params");const l=r.split("/").reduce((y,g)=>{if(!y.nextObj.hasOwnProperty(g))throw new Error(`traversePropertyPath - object does not have property ${g}, for path ${r}`);return{obj:y.nextObj,objProp:g,nextObj:y.nextObj[g]}},{nextObj:this.params}),{obj:u,objProp:p}=l;if(!u||!p||!(u[p]instanceof Sr.Tensor))throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${r}`);return{obj:u,objProp:p}}}const Gc=Je(Ze());function os(r,l,u){return Gc.tidy(()=>{let p=Gc.separableConv2d(r,l.depthwise_filter,l.pointwise_filter,u,"same");return p=Gc.add(p,l.bias),p})}const Bt=Je(Ze());function wg(r,l,u=!1){return Bt.tidy(()=>{const p=Bt.relu(u?Bt.add(Bt.conv2d(r,l.conv0.filters,[2,2],"same"),l.conv0.bias):os(r,l.conv0,[2,2])),y=os(p,l.conv1,[1,1]),g=Bt.relu(Bt.add(p,y)),I=os(g,l.conv2,[1,1]);return Bt.relu(Bt.add(p,Bt.add(y,I)))})}function Uu(r,l,u=!1,p=!0){return Bt.tidy(()=>{const y=Bt.relu(u?Bt.add(Bt.conv2d(r,l.conv0.filters,p?[2,2]:[1,1],"same"),l.conv0.bias):os(r,l.conv0,p?[2,2]:[1,1])),g=os(y,l.conv1,[1,1]),I=Bt.relu(Bt.add(y,g)),S=os(I,l.conv2,[1,1]),T=Bt.relu(Bt.add(y,Bt.add(g,S))),C=os(T,l.conv3,[1,1]);return Bt.relu(Bt.add(y,Bt.add(g,Bt.add(S,C))))})}const uo=Je(Ze());function ua(r,l,u="same",p=!1){return uo.tidy(()=>{const y=uo.add(uo.conv2d(r,l.filters,[1,1],u),l.bias);return p?uo.relu(y):y})}function Yn(r,l){Object.keys(r).forEach(u=>{l.some(p=>p.originalPath===u)||r[u].dispose()})}const Lg=Je(Ze());function Yc(r,l){return function(u,p,y,g){const I=Lg.tensor4d(r(u*p*y*y),[y,y,u,p]),S=Lg.tensor1d(r(p));return l.push({paramPath:`${g}/filters`},{paramPath:`${g}/bias`}),{filters:I,bias:S}}}const Sg=Je(Ze());function Ig(r,l){return function(u,p,y){const g=Sg.tensor2d(r(u*p),[u,p]),I=Sg.tensor1d(r(p));return l.push({paramPath:`${y}/weights`},{paramPath:`${y}/bias`}),{weights:g,bias:I}}}class ex{constructor(r,l,u){this.depthwise_filter=r;this.pointwise_filter=l;this.bias=u}}const Bu=Je(Ze());function Hc(r,l){return function(u,p,y){const g=Bu.tensor4d(r(3*3*u),[3,3,u,1]),I=Bu.tensor4d(r(u*p),[1,1,u,p]),S=Bu.tensor1d(r(p));return l.push({paramPath:`${y}/depthwise_filter`},{paramPath:`${y}/pointwise_filter`},{paramPath:`${y}/bias`}),new ex(g,I,S)}}function qc(r){return function(l){const u=r(`${l}/depthwise_filter`,4),p=r(`${l}/pointwise_filter`,4),y=r(`${l}/bias`,1);return new ex(u,p,y)}}function gs(r,l){return function(u,p,y){const g=r[u];if(!jo(g,p))throw new Error(`expected weightMap[${u}] to be a Tensor${p}D, instead have ${g}`);return l.push({originalPath:u,paramPath:y||u}),g}}function Hn(r){let l=r;function u(y){const g=l.slice(0,y);return l=l.slice(y),g}function p(){return l}return{extractWeights:u,getRemainingWeights:p}}function xg(r,l){const u=Yc(r,l),p=Hc(r,l);function y(I,S,T,C=!1){const D=C?u(I,S,3,`${T}/conv0`):p(I,S,`${T}/conv0`),_=p(S,S,`${T}/conv1`),A=p(S,S,`${T}/conv2`);return{conv0:D,conv1:_,conv2:A}}function g(I,S,T,C=!1){const{conv0:D,conv1:_,conv2:A}=y(I,S,T,C),B=p(S,S,`${T}/conv3`);return{conv0:D,conv1:_,conv2:A,conv3:B}}return{extractDenseBlock3Params:y,extractDenseBlock4Params:g}}function KE(r){const l=[],{extractWeights:u,getRemainingWeights:p}=Hn(r),{extractDenseBlock4Params:y}=xg(u,l),g=y(3,32,"dense0",!0),I=y(32,64,"dense1"),S=y(64,128,"dense2"),T=y(128,256,"dense3");if(p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{paramMappings:l,params:{dense0:g,dense1:I,dense2:S,dense3:T}}}function Tg(r){return function(l){const u=r(`${l}/filters`,4),p=r(`${l}/bias`,1);return{filters:u,bias:p}}}function Ag(r,l){const u=gs(r,l),p=Tg(u),y=qc(u);function g(S,T=!1){const C=T?p(`${S}/conv0`):y(`${S}/conv0`),D=y(`${S}/conv1`),_=y(`${S}/conv2`);return{conv0:C,conv1:D,conv2:_}}function I(S,T=!1){const C=T?p(`${S}/conv0`):y(`${S}/conv0`),D=y(`${S}/conv1`),_=y(`${S}/conv2`),A=y(`${S}/conv3`);return{conv0:C,conv1:D,conv2:_,conv3:A}}return{extractDenseBlock3Params:g,extractDenseBlock4Params:I}}function XE(r){const l=[],{extractDenseBlock4Params:u}=Ag(r,l),p={dense0:u("dense0",!0),dense1:u("dense1"),dense2:u("dense2"),dense3:u("dense3")};return Yn(r,l),{params:p,paramMappings:l}}const po=Je(Ze());class vg extends Wn{constructor(){super("FaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceFeatureExtractor - load model before inference");return po.tidy(()=>{const u=po.cast(r.toBatchTensor(112,!0),"float32"),p=[122.782,117.001,104.298],y=di(u,p).div(po.scalar(255));let g=Uu(y,l.dense0,!0);return g=Uu(g,l.dense1),g=Uu(g,l.dense2),g=Uu(g,l.dense3),g=po.avgPool(g,[7,7],[2,2],"valid"),g})}async forward(r){return this.forwardInput(await Wt(r))}getDefaultModelName(){return"face_feature_extractor_model"}extractParamsFromWeigthMap(r){return XE(r)}extractParams(r){return KE(r)}}const jc=Je(Ze());function Mu(r,l){return jc.tidy(()=>jc.add(jc.matMul(r,l.weights),l.bias))}function JE(r,l,u){const p=[],{extractWeights:y,getRemainingWeights:g}=Hn(r),I=Ig(y,p),S=I(l,u,"fc");if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{paramMappings:p,params:{fc:S}}}function ZE(r){const l=[],u=gs(r,l);function p(g){const I=u(`${g}/weights`,2),S=u(`${g}/bias`,1);return{weights:I,bias:S}}const y={fc:p("fc")};return Yn(r,l),{params:y,paramMappings:l}}function Ng(r){const l={},u={};return Object.keys(r).forEach(p=>{const y=p.startsWith("fc")?u:l;y[p]=r[p]}),{featureExtractorMap:l,classifierMap:u}}const QE=Je(Ze());class Cg extends Wn{constructor(r,l){super(r);this._faceFeatureExtractor=l}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return QE.tidy(()=>{const u=r instanceof ho?this.faceFeatureExtractor.forwardInput(r):r;return Mu(u.as2D(u.shape[0],-1),l.fc)})}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:u}=this.extractClassifierParams(r);this._params=l,this._paramMappings=u}extractClassifierParams(r){return JE(r,this.getClassifierChannelsIn(),this.getClassifierChannelsOut())}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:u}=Ng(r);return this.faceFeatureExtractor.loadFromWeightMap(l),ZE(u)}extractParams(r){const l=this.getClassifierChannelsIn(),u=this.getClassifierChannelsOut(),p=u*l+u,y=r.slice(0,r.length-p),g=r.slice(r.length-p);return this.faceFeatureExtractor.extractWeights(y),this.extractClassifierParams(g)}}const tx=["neutral","happy","sad","angry","fearful","disgusted","surprised"];class da{constructor(r){if(r.length!==7)throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${r.length}`);tx.forEach((l,u)=>{this[l]=r[u]})}asSortedArray(){return tx.map(r=>({expression:r,probability:this[r]})).sort((r,l)=>l.probability-r.probability)}}const Kc=Je(Ze());class nx extends Cg{constructor(r=new vg){super("FaceExpressionNet",r)}forwardInput(r){return Kc.tidy(()=>Kc.softmax(this.runNet(r)))}async forward(r){return this.forwardInput(await Wt(r))}async predictExpressions(r){const l=await Wt(r),u=await this.forwardInput(l),p=await Promise.all(Kc.unstack(u).map(async g=>{const I=await g.data();return g.dispose(),I}));u.dispose();const y=p.map(g=>new da(g));return l.isBatchInput?y:y[0]}getDefaultModelName(){return"face_expression_model"}getClassifierChannelsIn(){return 256}getClassifierChannelsOut(){return 7}}function sx(r){return r.expressions instanceof da}function Rg(r,l){const u={expressions:l};return Object.assign({},r,u)}function TQ(r,l,u=.1,p){const y=Array.isArray(l)?l:[l];y.forEach(g=>{const I=g instanceof da?g:sx(g)?g.expressions:void 0;if(!I)throw new Error("drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof");const S=I.asSortedArray(),T=S.filter(_=>_.probability>u),C=$i(g)?g.detection.box.bottomLeft:p||new Qe(0,0),D=new Cc(T.map(_=>`${_.expression} (${Ko(_.probability)})`),C);D.draw(r)})}function pa(r){return $i(r)&&r.landmarks instanceof Gs&&r.unshiftedLandmarks instanceof Gs&&r.alignedRect instanceof Jt}function Xc(r,l){const{box:u}=r.detection,p=l.shiftBy(u.x,u.y),y=p.align(),{imageDims:g}=r.detection,I=new Jt(r.detection.score,y.rescale(g.reverse()),g),S={landmarks:p,unshiftedLandmarks:l,alignedRect:I};return Object.assign({},r,S)}class eD{constructor(r={}){const{drawLines:l=!0,drawPoints:u=!0,lineWidth:p,lineColor:y,pointSize:g,pointColor:I}=r;this.drawLines=l,this.drawPoints=u,this.lineWidth=p||1,this.pointSize=g||2,this.lineColor=y||"rgba(0, 255, 255, 1)",this.pointColor=I||"rgba(255, 0, 255, 1)"}}class tD{constructor(r,l={}){this.faceLandmarks=r,this.options=new eD(l)}draw(r){const l=is(r),{drawLines:u,drawPoints:p,lineWidth:y,lineColor:g,pointSize:I,pointColor:S}=this.options;if(u&&this.faceLandmarks instanceof wu&&(l.strokeStyle=g,l.lineWidth=y,fr(l,this.faceLandmarks.getJawOutline()),fr(l,this.faceLandmarks.getLeftEyeBrow()),fr(l,this.faceLandmarks.getRightEyeBrow()),fr(l,this.faceLandmarks.getNose()),fr(l,this.faceLandmarks.getLeftEye(),!0),fr(l,this.faceLandmarks.getRightEye(),!0),fr(l,this.faceLandmarks.getMouth(),!0)),p){l.strokeStyle=S,l.fillStyle=S;const T=C=>{l.beginPath(),l.arc(C.x,C.y,I,0,2*Math.PI),l.fill()};this.faceLandmarks.positions.forEach(T)}}}function AQ(r,l){const u=Array.isArray(l)?l:[l];u.forEach(p=>{const y=p instanceof Gs?p:pa(p)?p.landmarks:void 0;if(!y)throw new Error("drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof");new tD(y).draw(r)})}const ix={};Pm(ix,{AnchorPosition:()=>Ui,DrawBox:()=>HS,DrawBoxOptions:()=>s2,DrawFaceLandmarks:()=>tD,DrawFaceLandmarksOptions:()=>eD,DrawTextField:()=>Cc,DrawTextFieldOptions:()=>jm,drawContour:()=>fr,drawDetections:()=>_J,drawFaceExpressions:()=>TQ,drawFaceLandmarks:()=>AQ});function vQ(r,l){const u=Yc(r,l),p=Hc(r,l);function y(I,S,T){const C=p(I,S,`${T}/separable_conv0`),D=p(S,S,`${T}/separable_conv1`),_=u(I,S,1,`${T}/expansion_conv`);return{separable_conv0:C,separable_conv1:D,expansion_conv:_}}function g(I,S){const T=p(I,I,`${S}/separable_conv0`),C=p(I,I,`${S}/separable_conv1`),D=p(I,I,`${S}/separable_conv2`);return{separable_conv0:T,separable_conv1:C,separable_conv2:D}}return{extractConvParams:u,extractSeparableConvParams:p,extractReductionBlockParams:y,extractMainBlockParams:g}}function nD(r,l){const u=[],{extractWeights:p,getRemainingWeights:y}=Hn(r),{extractConvParams:g,extractSeparableConvParams:I,extractReductionBlockParams:S,extractMainBlockParams:T}=vQ(p,u),C=g(3,32,3,"entry_flow/conv_in"),D=S(32,64,"entry_flow/reduction_block_0"),_=S(64,128,"entry_flow/reduction_block_1"),A={conv_in:C,reduction_block_0:D,reduction_block_1:_},B={};_i(l,0,1).forEach(ge=>{B[`main_block_${ge}`]=T(128,`middle_flow/main_block_${ge}`)});const ne=S(128,256,"exit_flow/reduction_block"),te=I(256,512,"exit_flow/separable_conv"),P={reduction_block:ne,separable_conv:te};if(y().length!==0)throw new Error(`weights remaing after extract: ${y().length}`);return{paramMappings:u,params:{entry_flow:A,middle_flow:B,exit_flow:P}}}function NQ(r,l){const u=gs(r,l),p=Tg(u),y=qc(u);function g(S){const T=y(`${S}/separable_conv0`),C=y(`${S}/separable_conv1`),D=p(`${S}/expansion_conv`);return{separable_conv0:T,separable_conv1:C,expansion_conv:D}}function I(S){const T=y(`${S}/separable_conv0`),C=y(`${S}/separable_conv1`),D=y(`${S}/separable_conv2`);return{separable_conv0:T,separable_conv1:C,separable_conv2:D}}return{extractConvParams:p,extractSeparableConvParams:y,extractReductionBlockParams:g,extractMainBlockParams:I}}function sD(r,l){const u=[],{extractConvParams:p,extractSeparableConvParams:y,extractReductionBlockParams:g,extractMainBlockParams:I}=NQ(r,u),S=p("entry_flow/conv_in"),T=g("entry_flow/reduction_block_0"),C=g("entry_flow/reduction_block_1"),D={conv_in:S,reduction_block_0:T,reduction_block_1:C},_={};_i(l,0,1).forEach(te=>{_[`main_block_${te}`]=I(`middle_flow/main_block_${te}`)});const A=g("exit_flow/reduction_block"),B=y("exit_flow/separable_conv"),ne={reduction_block:A,separable_conv:B};return Yn(r,u),{params:{entry_flow:D,middle_flow:_,exit_flow:ne},paramMappings:u}}const on=Je(Ze());function iD(r,l,u){return on.add(on.conv2d(r,l.filters,u,"same"),l.bias)}function rx(r,l,u=!0){let p=u?on.relu(r):r;return p=os(p,l.separable_conv0,[1,1]),p=os(on.relu(p),l.separable_conv1,[1,1]),p=on.maxPool(p,[3,3],[2,2],"same"),p=on.add(p,iD(r,l.expansion_conv,[2,2])),p}function CQ(r,l){let u=os(on.relu(r),l.separable_conv0,[1,1]);return u=os(on.relu(u),l.separable_conv1,[1,1]),u=os(on.relu(u),l.separable_conv2,[1,1]),u=on.add(u,r),u}class rD extends Wn{constructor(r){super("TinyXception");this._numMainBlocks=r}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyXception - load model before inference");return on.tidy(()=>{const u=on.cast(r.toBatchTensor(112,!0),"float32"),p=[122.782,117.001,104.298],y=di(u,p).div(on.scalar(256));let g=on.relu(iD(y,l.entry_flow.conv_in,[2,2]));return g=rx(g,l.entry_flow.reduction_block_0,!1),g=rx(g,l.entry_flow.reduction_block_1),_i(this._numMainBlocks,0,1).forEach(I=>{g=CQ(g,l.middle_flow[`main_block_${I}`])}),g=rx(g,l.exit_flow.reduction_block),g=on.relu(os(g,l.exit_flow.separable_conv,[1,1])),g})}async forward(r){return this.forwardInput(await Wt(r))}getDefaultModelName(){return"tiny_xception_model"}extractParamsFromWeigthMap(r){return sD(r,this._numMainBlocks)}extractParams(r){return nD(r,this._numMainBlocks)}}function oD(r){const l=[],{extractWeights:u,getRemainingWeights:p}=Hn(r),y=Ig(u,l),g=y(512,1,"fc/age"),I=y(512,2,"fc/gender");if(p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{paramMappings:l,params:{fc:{age:g,gender:I}}}}function aD(r){const l=[],u=gs(r,l);function p(g){const I=u(`${g}/weights`,2),S=u(`${g}/bias`,1);return{weights:I,bias:S}}const y={fc:{age:p("fc/age"),gender:p("fc/gender")}};return Yn(r,l),{params:y,paramMappings:l}}var Ir;(function(r){r.FEMALE="female",r.MALE="male"})(Ir||(Ir={}));const Gi=Je(Ze());class ox extends Wn{constructor(r=new rD(2)){super("AgeGenderNet");this._faceFeatureExtractor=r}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(r){const{params:l}=this;if(!l)throw new Error(`${this._name} - load model before inference`);return Gi.tidy(()=>{const u=r instanceof ho?this.faceFeatureExtractor.forwardInput(r):r,p=Gi.avgPool(u,[7,7],[2,2],"valid").as2D(u.shape[0],-1),y=Mu(p,l.fc.age).as1D(),g=Mu(p,l.fc.gender);return{age:y,gender:g}})}forwardInput(r){return Gi.tidy(()=>{const{age:l,gender:u}=this.runNet(r);return{age:l,gender:Gi.softmax(u)}})}async forward(r){return this.forwardInput(await Wt(r))}async predictAgeAndGender(r){const l=await Wt(r),u=await this.forwardInput(l),p=Gi.unstack(u.age),y=Gi.unstack(u.gender),g=p.map((S,T)=>({ageTensor:S,genderTensor:y[T]})),I=await Promise.all(g.map(async({ageTensor:S,genderTensor:T})=>{const C=(await S.data())[0],D=(await T.data())[0],_=D>.5,A=_?Ir.MALE:Ir.FEMALE,B=_?D:1-D;return S.dispose(),T.dispose(),{age:C,gender:A,genderProbability:B}}));return u.age.dispose(),u.gender.dispose(),l.isBatchInput?I:I[0]}getDefaultModelName(){return"age_gender_model"}dispose(r=!0){this.faceFeatureExtractor.dispose(r),super.dispose(r)}loadClassifierParams(r){const{params:l,paramMappings:u}=this.extractClassifierParams(r);this._params=l,this._paramMappings=u}extractClassifierParams(r){return oD(r)}extractParamsFromWeigthMap(r){const{featureExtractorMap:l,classifierMap:u}=Ng(r);return this.faceFeatureExtractor.loadFromWeightMap(l),aD(u)}extractParams(r){const l=512*1+1+(512*2+2),u=r.slice(0,r.length-l),p=r.slice(r.length-l);return this.faceFeatureExtractor.extractWeights(u),this.extractClassifierParams(p)}}const ys=Je(Ze());class Og extends Cg{postProcess(r,l,u){const p=u.map(({width:g,height:I})=>{const S=l/Math.max(I,g);return{width:g*S,height:I*S}}),y=p.length;return ys.tidy(()=>{const g=(D,_)=>ys.stack([ys.fill([68],D,"float32"),ys.fill([68],_,"float32")],1).as2D(1,136).as1D(),I=(D,_)=>{const{width:A,height:B}=p[D];return _(A,B)?Math.abs(A-B)/2:0},S=D=>I(D,(_,A)=>_I(D,(_,A)=>A<_),C=r.mul(ys.fill([y,136],l,"float32")).sub(ys.stack(Array.from(Array(y),(D,_)=>g(S(_),T(_))))).div(ys.stack(Array.from(Array(y),(D,_)=>g(p[_].width,p[_].height))));return C})}forwardInput(r){return ys.tidy(()=>{const l=this.runNet(r);return this.postProcess(l,r.inputSize,r.inputDimensions.map(([u,p])=>({height:u,width:p})))})}async forward(r){return this.forwardInput(await Wt(r))}async detectLandmarks(r){const l=await Wt(r),u=ys.tidy(()=>ys.unstack(this.forwardInput(l))),p=await Promise.all(u.map(async(y,g)=>{const I=Array.from(await y.data()),S=I.filter((C,D)=>Vm(D)),T=I.filter((C,D)=>!Vm(D));return new wu(Array(68).fill(0).map((C,D)=>new Qe(S[D],T[D])),{height:l.getInputHeight(g),width:l.getInputWidth(g)})}));return u.forEach(y=>y.dispose()),l.isBatchInput?p:p[0]}getClassifierChannelsOut(){return 136}}class Pu extends Og{constructor(r=new vg){super("FaceLandmark68Net",r)}getDefaultModelName(){return"face_landmark_68_model"}getClassifierChannelsIn(){return 256}}function cD(r){const l=[],{extractDenseBlock3Params:u}=Ag(r,l),p={dense0:u("dense0",!0),dense1:u("dense1"),dense2:u("dense2")};return Yn(r,l),{params:p,paramMappings:l}}function lD(r){const l=[],{extractWeights:u,getRemainingWeights:p}=Hn(r),{extractDenseBlock3Params:y}=xg(u,l),g=y(3,32,"dense0",!0),I=y(32,64,"dense1"),S=y(64,128,"dense2");if(p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{paramMappings:l,params:{dense0:g,dense1:I,dense2:S}}}const mo=Je(Ze());class hD extends Wn{constructor(){super("TinyFaceFeatureExtractor")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("TinyFaceFeatureExtractor - load model before inference");return mo.tidy(()=>{const u=mo.cast(r.toBatchTensor(112,!0),"float32"),p=[122.782,117.001,104.298],y=di(u,p).div(mo.scalar(255));let g=wg(y,l.dense0,!0);return g=wg(g,l.dense1),g=wg(g,l.dense2),g=mo.avgPool(g,[14,14],[2,2],"valid"),g})}async forward(r){return this.forwardInput(await Wt(r))}getDefaultModelName(){return"face_feature_extractor_tiny_model"}extractParamsFromWeigthMap(r){return cD(r)}extractParams(r){return lD(r)}}class ax extends Og{constructor(r=new hD){super("FaceLandmark68TinyNet",r)}getDefaultModelName(){return"face_landmark_68_tiny_model"}getClassifierChannelsIn(){return 128}}class RQ extends Pu{}const Eg=Je(Ze());function uD(r,l){return Eg.add(Eg.mul(r,l.weights),l.biases)}const Jc=Je(Ze());function cx(r,l,u,p,y="same"){const{filters:g,bias:I}=l.conv;let S=Jc.conv2d(r,g,u,y);return S=Jc.add(S,I),S=uD(S,l.scale),p?Jc.relu(S):S}function dD(r,l){return cx(r,l,[1,1],!0)}function lx(r,l){return cx(r,l,[1,1],!1)}function Dg(r,l){return cx(r,l,[2,2],!0,"valid")}const bs=Je(Ze());function OQ(r,l){function u(S,T,C){const D=r(S),_=D.length/(T*C*C);if(FS(_))throw new Error(`depth has to be an integer: ${_}, weights.length: ${D.length}, numFilters: ${T}, filterSize: ${C}`);return bs.tidy(()=>bs.transpose(bs.tensor4d(D,[T,_,C,C]),[2,3,1,0]))}function p(S,T,C,D){const _=u(S,T,C),A=bs.tensor1d(r(T));return l.push({paramPath:`${D}/filters`},{paramPath:`${D}/bias`}),{filters:_,bias:A}}function y(S,T){const C=bs.tensor1d(r(S)),D=bs.tensor1d(r(S));return l.push({paramPath:`${T}/weights`},{paramPath:`${T}/biases`}),{weights:C,biases:D}}function g(S,T,C,D){const _=p(S,T,C,`${D}/conv`),A=y(T,`${D}/scale`);return{conv:_,scale:A}}function I(S,T,C,D,_=!1){const A=g((_?.5:1)*S,T,C,`${D}/conv1`),B=g(S,T,C,`${D}/conv2`);return{conv1:A,conv2:B}}return{extractConvLayerParams:g,extractResidualLayerParams:I}}function pD(r){const{extractWeights:l,getRemainingWeights:u}=Hn(r),p=[],{extractConvLayerParams:y,extractResidualLayerParams:g}=OQ(l,p),I=y(4704,32,7,"conv32_down"),S=g(9216,32,3,"conv32_1"),T=g(9216,32,3,"conv32_2"),C=g(9216,32,3,"conv32_3"),D=g(36864,64,3,"conv64_down",!0),_=g(36864,64,3,"conv64_1"),A=g(36864,64,3,"conv64_2"),B=g(36864,64,3,"conv64_3"),ne=g(147456,128,3,"conv128_down",!0),te=g(147456,128,3,"conv128_1"),P=g(147456,128,3,"conv128_2"),ge=g(589824,256,3,"conv256_down",!0),ae=g(589824,256,3,"conv256_1"),Le=g(589824,256,3,"conv256_2"),ve=g(589824,256,3,"conv256_down_out"),Ve=bs.tidy(()=>bs.transpose(bs.tensor2d(l(256*128),[128,256]),[1,0]));if(p.push({paramPath:"fc"}),u().length!==0)throw new Error(`weights remaing after extract: ${u().length}`);const at={conv32_down:I,conv32_1:S,conv32_2:T,conv32_3:C,conv64_down:D,conv64_1:_,conv64_2:A,conv64_3:B,conv128_down:ne,conv128_1:te,conv128_2:P,conv256_down:ge,conv256_1:ae,conv256_2:Le,conv256_down_out:ve,fc:Ve};return{params:at,paramMappings:p}}function EQ(r,l){const u=gs(r,l);function p(I){const S=u(`${I}/scale/weights`,1),T=u(`${I}/scale/biases`,1);return{weights:S,biases:T}}function y(I){const S=u(`${I}/conv/filters`,4),T=u(`${I}/conv/bias`,1),C=p(I);return{conv:{filters:S,bias:T},scale:C}}function g(I){return{conv1:y(`${I}/conv1`),conv2:y(`${I}/conv2`)}}return{extractConvLayerParams:y,extractResidualLayerParams:g}}function mD(r){const l=[],{extractConvLayerParams:u,extractResidualLayerParams:p}=EQ(r,l),y=u("conv32_down"),g=p("conv32_1"),I=p("conv32_2"),S=p("conv32_3"),T=p("conv64_down"),C=p("conv64_1"),D=p("conv64_2"),_=p("conv64_3"),A=p("conv128_down"),B=p("conv128_1"),ne=p("conv128_2"),te=p("conv256_down"),P=p("conv256_1"),ge=p("conv256_2"),ae=p("conv256_down_out"),Le=r.fc;if(l.push({originalPath:"fc",paramPath:"fc"}),!kS(Le))throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${Le}`);const ve={conv32_down:y,conv32_1:g,conv32_2:I,conv32_3:S,conv64_down:T,conv64_1:C,conv64_2:D,conv64_3:_,conv128_down:A,conv128_1:B,conv128_2:ne,conv256_down:te,conv256_1:P,conv256_2:ge,conv256_down_out:ae,fc:Le};return Yn(r,l),{params:ve,paramMappings:l}}const qn=Je(Ze());function gi(r,l){let u=dD(r,l.conv1);return u=lx(u,l.conv2),u=qn.add(u,r),u=qn.relu(u),u}function zu(r,l){let u=Dg(r,l.conv1);u=lx(u,l.conv2);let p=qn.avgPool(r,2,2,"valid");const y=qn.zeros(p.shape),g=p.shape[3]!==u.shape[3],I=p.shape[1]!==u.shape[1]||p.shape[2]!==u.shape[2];if(I){const S=[...u.shape];S[1]=1;const T=qn.zeros(S);u=qn.concat([u,T],1);const C=[...u.shape];C[2]=1;const D=qn.zeros(C);u=qn.concat([u,D],2)}return p=g?qn.concat([p,y],3):p,u=qn.add(p,u),u=qn.relu(u),u}const Fs=Je(Ze());class Vu extends Wn{constructor(){super("FaceRecognitionNet")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("FaceRecognitionNet - load model before inference");return Fs.tidy(()=>{const u=Fs.cast(r.toBatchTensor(150,!0),"float32"),p=[122.782,117.001,104.298],y=di(u,p).div(Fs.scalar(256));let g=Dg(y,l.conv32_down);g=Fs.maxPool(g,3,2,"valid"),g=gi(g,l.conv32_1),g=gi(g,l.conv32_2),g=gi(g,l.conv32_3),g=zu(g,l.conv64_down),g=gi(g,l.conv64_1),g=gi(g,l.conv64_2),g=gi(g,l.conv64_3),g=zu(g,l.conv128_down),g=gi(g,l.conv128_1),g=gi(g,l.conv128_2),g=zu(g,l.conv256_down),g=gi(g,l.conv256_1),g=gi(g,l.conv256_2),g=zu(g,l.conv256_down_out);const I=g.mean([1,2]),S=Fs.matMul(I,l.fc);return S})}async forward(r){return this.forwardInput(await Wt(r))}async computeFaceDescriptor(r){const l=await Wt(r),u=Fs.tidy(()=>Fs.unstack(this.forwardInput(l))),p=await Promise.all(u.map(y=>y.data()));return u.forEach(y=>y.dispose()),l.isBatchInput?p:p[0]}getDefaultModelName(){return"face_recognition_model"}extractParamsFromWeigthMap(r){return mD(r)}extractParams(r){return pD(r)}}function DQ(r){const l=new Vu;return l.extractWeights(r),l}function kg(r,l){const u={descriptor:l};return Object.assign({},r,u)}function kQ(r){return typeof r.age=="number"}function Fg(r,l){const u={age:l};return Object.assign({},r,u)}function FQ(r){return(r.gender===Ir.MALE||r.gender===Ir.FEMALE)&&vc(r.genderProbability)}function _g(r,l,u){const p={gender:l,genderProbability:u};return Object.assign({},r,p)}const yi=Je(Ze());function _Q(r,l){function u(T,C){const D=yi.tensor4d(r(3*3*T),[3,3,T,1]),_=yi.tensor1d(r(T)),A=yi.tensor1d(r(T)),B=yi.tensor1d(r(T)),ne=yi.tensor1d(r(T));return l.push({paramPath:`${C}/filters`},{paramPath:`${C}/batch_norm_scale`},{paramPath:`${C}/batch_norm_offset`},{paramPath:`${C}/batch_norm_mean`},{paramPath:`${C}/batch_norm_variance`}),{filters:D,batch_norm_scale:_,batch_norm_offset:A,batch_norm_mean:B,batch_norm_variance:ne}}function p(T,C,D,_,A){const B=yi.tensor4d(r(T*C*D*D),[D,D,T,C]),ne=yi.tensor1d(r(C));return l.push({paramPath:`${_}/filters`},{paramPath:`${_}/${A?"batch_norm_offset":"bias"}`}),{filters:B,bias:ne}}function y(T,C,D,_){const{filters:A,bias:B}=p(T,C,D,_,!0);return{filters:A,batch_norm_offset:B}}function g(T,C,D){const _=u(T,`${D}/depthwise_conv`),A=y(T,C,1,`${D}/pointwise_conv`);return{depthwise_conv:_,pointwise_conv:A}}function I(){const T=y(3,32,3,"mobilenetv1/conv_0"),C=g(32,64,"mobilenetv1/conv_1"),D=g(64,128,"mobilenetv1/conv_2"),_=g(128,128,"mobilenetv1/conv_3"),A=g(128,256,"mobilenetv1/conv_4"),B=g(256,256,"mobilenetv1/conv_5"),ne=g(256,512,"mobilenetv1/conv_6"),te=g(512,512,"mobilenetv1/conv_7"),P=g(512,512,"mobilenetv1/conv_8"),ge=g(512,512,"mobilenetv1/conv_9"),ae=g(512,512,"mobilenetv1/conv_10"),Le=g(512,512,"mobilenetv1/conv_11"),ve=g(512,1024,"mobilenetv1/conv_12"),Ve=g(1024,1024,"mobilenetv1/conv_13");return{conv_0:T,conv_1:C,conv_2:D,conv_3:_,conv_4:A,conv_5:B,conv_6:ne,conv_7:te,conv_8:P,conv_9:ge,conv_10:ae,conv_11:Le,conv_12:ve,conv_13:Ve}}function S(){const T=y(1024,256,1,"prediction_layer/conv_0"),C=y(256,512,3,"prediction_layer/conv_1"),D=y(512,128,1,"prediction_layer/conv_2"),_=y(128,256,3,"prediction_layer/conv_3"),A=y(256,128,1,"prediction_layer/conv_4"),B=y(128,256,3,"prediction_layer/conv_5"),ne=y(256,64,1,"prediction_layer/conv_6"),te=y(64,128,3,"prediction_layer/conv_7"),P=p(512,12,1,"prediction_layer/box_predictor_0/box_encoding_predictor"),ge=p(512,9,1,"prediction_layer/box_predictor_0/class_predictor"),ae=p(1024,24,1,"prediction_layer/box_predictor_1/box_encoding_predictor"),Le=p(1024,18,1,"prediction_layer/box_predictor_1/class_predictor"),ve=p(512,24,1,"prediction_layer/box_predictor_2/box_encoding_predictor"),Ve=p(512,18,1,"prediction_layer/box_predictor_2/class_predictor"),at=p(256,24,1,"prediction_layer/box_predictor_3/box_encoding_predictor"),pt=p(256,18,1,"prediction_layer/box_predictor_3/class_predictor"),$t=p(256,24,1,"prediction_layer/box_predictor_4/box_encoding_predictor"),Vt=p(256,18,1,"prediction_layer/box_predictor_4/class_predictor"),qe=p(128,24,1,"prediction_layer/box_predictor_5/box_encoding_predictor"),ln=p(128,18,1,"prediction_layer/box_predictor_5/class_predictor"),bt={box_encoding_predictor:P,class_predictor:ge},ws={box_encoding_predictor:ae,class_predictor:Le},Nr={box_encoding_predictor:ve,class_predictor:Ve},Cr={box_encoding_predictor:at,class_predictor:pt},ba={box_encoding_predictor:$t,class_predictor:Vt},hn={box_encoding_predictor:qe,class_predictor:ln};return{conv_0:T,conv_1:C,conv_2:D,conv_3:_,conv_4:A,conv_5:B,conv_6:ne,conv_7:te,box_predictor_0:bt,box_predictor_1:ws,box_predictor_2:Nr,box_predictor_3:Cr,box_predictor_4:ba,box_predictor_5:hn}}return{extractMobilenetV1Params:I,extractPredictionLayerParams:S}}function fD(r){const l=[],{extractWeights:u,getRemainingWeights:p}=Hn(r),{extractMobilenetV1Params:y,extractPredictionLayerParams:g}=_Q(u,l),I=y(),S=g(),T=yi.tensor3d(u(5118*4),[1,5118,4]),C={extra_dim:T};if(l.push({paramPath:"output_layer/extra_dim"}),p().length!==0)throw new Error(`weights remaing after extract: ${p().length}`);return{params:{mobilenetv1:I,prediction_layer:S,output_layer:C},paramMappings:l}}function WQ(r,l){const u=gs(r,l);function p(C,D,_){const A=u(`${C}/Conv2d_${D}_pointwise/weights`,4,`${_}/filters`),B=u(`${C}/Conv2d_${D}_pointwise/convolution_bn_offset`,1,`${_}/batch_norm_offset`);return{filters:A,batch_norm_offset:B}}function y(C){const D=`mobilenetv1/conv_${C}`,_=`MobilenetV1/Conv2d_${C}_depthwise`,A=`${D}/depthwise_conv`,B=`${D}/pointwise_conv`,ne=u(`${_}/depthwise_weights`,4,`${A}/filters`),te=u(`${_}/BatchNorm/gamma`,1,`${A}/batch_norm_scale`),P=u(`${_}/BatchNorm/beta`,1,`${A}/batch_norm_offset`),ge=u(`${_}/BatchNorm/moving_mean`,1,`${A}/batch_norm_mean`),ae=u(`${_}/BatchNorm/moving_variance`,1,`${A}/batch_norm_variance`);return{depthwise_conv:{filters:ne,batch_norm_scale:te,batch_norm_offset:P,batch_norm_mean:ge,batch_norm_variance:ae},pointwise_conv:p("MobilenetV1",C,B)}}function g(){return{conv_0:p("MobilenetV1",0,"mobilenetv1/conv_0"),conv_1:y(1),conv_2:y(2),conv_3:y(3),conv_4:y(4),conv_5:y(5),conv_6:y(6),conv_7:y(7),conv_8:y(8),conv_9:y(9),conv_10:y(10),conv_11:y(11),conv_12:y(12),conv_13:y(13)}}function I(C,D){const _=u(`${C}/weights`,4,`${D}/filters`),A=u(`${C}/biases`,1,`${D}/bias`);return{filters:_,bias:A}}function S(C){const D=I(`Prediction/BoxPredictor_${C}/BoxEncodingPredictor`,`prediction_layer/box_predictor_${C}/box_encoding_predictor`),_=I(`Prediction/BoxPredictor_${C}/ClassPredictor`,`prediction_layer/box_predictor_${C}/class_predictor`);return{box_encoding_predictor:D,class_predictor:_}}function T(){return{conv_0:p("Prediction",0,"prediction_layer/conv_0"),conv_1:p("Prediction",1,"prediction_layer/conv_1"),conv_2:p("Prediction",2,"prediction_layer/conv_2"),conv_3:p("Prediction",3,"prediction_layer/conv_3"),conv_4:p("Prediction",4,"prediction_layer/conv_4"),conv_5:p("Prediction",5,"prediction_layer/conv_5"),conv_6:p("Prediction",6,"prediction_layer/conv_6"),conv_7:p("Prediction",7,"prediction_layer/conv_7"),box_predictor_0:S(0),box_predictor_1:S(1),box_predictor_2:S(2),box_predictor_3:S(3),box_predictor_4:S(4),box_predictor_5:S(5)}}return{extractMobilenetV1Params:g,extractPredictionLayerParams:T}}function gD(r){const l=[],{extractMobilenetV1Params:u,extractPredictionLayerParams:p}=WQ(r,l),y=r["Output/extra_dim"];if(l.push({originalPath:"Output/extra_dim",paramPath:"output_layer/extra_dim"}),!gr(y))throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${y}`);const g={mobilenetv1:u(),prediction_layer:p(),output_layer:{extra_dim:y}};return Yn(r,l),{params:g,paramMappings:l}}const fo=Je(Ze());function qs(r,l,u){return fo.tidy(()=>{let p=fo.conv2d(r,l.filters,u,"same");return p=fo.add(p,l.batch_norm_offset),fo.clipByValue(p,0,6)})}const xr=Je(Ze()),$Q=.0010000000474974513;function UQ(r,l,u){return xr.tidy(()=>{let p=xr.depthwiseConv2d(r,l.filters,u,"same");return p=xr.batchNorm(p,l.batch_norm_mean,l.batch_norm_variance,l.batch_norm_offset,l.batch_norm_scale,$Q),xr.clipByValue(p,0,6)})}function BQ(r){return[2,4,6,12].some(l=>l===r)?[2,2]:[1,1]}function yD(r,l){return xr.tidy(()=>{let u,p=qs(r,l.conv_0,[2,2]);const y=[l.conv_1,l.conv_2,l.conv_3,l.conv_4,l.conv_5,l.conv_6,l.conv_7,l.conv_8,l.conv_9,l.conv_10,l.conv_11,l.conv_12,l.conv_13];if(y.forEach((g,I)=>{const S=I+1,T=BQ(S);p=UQ(p,g.depthwise_conv,T),p=qs(p,g.pointwise_conv,[1,1]),S===11&&(u=p)}),u===null)throw new Error("mobileNetV1 - output of conv layer 11 is null");return{out:p,conv11:u}})}function bD(r,l,u,p,y){const g=r.shape[0],I=Math.min(u,g),S=l.map((D,_)=>({score:D,boxIndex:_})).filter(D=>D.score>y).sort((D,_)=>_.score-D.score),T=D=>D<=p?1:0,C=[];return S.forEach(D=>{if(C.length>=I)return;const _=D.score;for(let A=C.length-1;A>=0;--A){const B=MQ(r,D.boxIndex,C[A]);if(B===0)continue;if(D.score*=T(B),D.score<=y)break}_===D.score&&C.push(D.boxIndex)}),C}function MQ(r,l,u){const p=r.arraySync(),y=Math.min(p[l][0],p[l][2]),g=Math.min(p[l][1],p[l][3]),I=Math.max(p[l][0],p[l][2]),S=Math.max(p[l][1],p[l][3]),T=Math.min(p[u][0],p[u][2]),C=Math.min(p[u][1],p[u][3]),D=Math.max(p[u][0],p[u][2]),_=Math.max(p[u][1],p[u][3]),A=(I-y)*(S-g),B=(D-T)*(_-C);if(A<=0||B<=0)return 0;const ne=Math.max(y,T),te=Math.max(g,C),P=Math.min(I,D),ge=Math.min(S,_),ae=Math.max(P-ne,0)*Math.max(ge-te,0);return ae/(A+B-ae)}const ke=Je(Ze());function PQ(r){const l=ke.unstack(ke.transpose(r,[1,0])),u=[ke.sub(l[2],l[0]),ke.sub(l[3],l[1])],p=[ke.add(l[0],ke.div(u[0],ke.scalar(2))),ke.add(l[1],ke.div(u[1],ke.scalar(2)))];return{sizes:u,centers:p}}function zQ(r,l){const{sizes:u,centers:p}=PQ(r),y=ke.unstack(ke.transpose(l,[1,0])),g=ke.div(ke.mul(ke.exp(ke.div(y[2],ke.scalar(5))),u[0]),ke.scalar(2)),I=ke.add(ke.mul(ke.div(y[0],ke.scalar(10)),u[0]),p[0]),S=ke.div(ke.mul(ke.exp(ke.div(y[3],ke.scalar(5))),u[1]),ke.scalar(2)),T=ke.add(ke.mul(ke.div(y[1],ke.scalar(10)),u[1]),p[1]);return ke.transpose(ke.stack([ke.sub(I,g),ke.sub(T,S),ke.add(I,g),ke.add(T,S)]),[1,0])}function wD(r,l,u){return ke.tidy(()=>{const p=r.shape[0];let y=zQ(ke.reshape(ke.tile(u.extra_dim,[p,1,1]),[-1,4]),ke.reshape(r,[-1,4]));y=ke.reshape(y,[p,y.shape[0]/p,4]);const g=ke.sigmoid(ke.slice(l,[0,0,1],[-1,-1,-1]));let I=ke.slice(g,[0,0,0],[-1,-1,1]);I=ke.reshape(I,[p,I.shape[1]]);const S=ke.unstack(y),T=ke.unstack(I);return{boxes:S,scores:T}})}const Gu=Je(Ze());function ma(r,l){return Gu.tidy(()=>{const u=r.shape[0],p=Gu.reshape(ua(r,l.box_encoding_predictor),[u,-1,1,4]),y=Gu.reshape(ua(r,l.class_predictor),[u,-1,3]);return{boxPredictionEncoding:p,classPrediction:y}})}const Yu=Je(Ze());function LD(r,l,u){return Yu.tidy(()=>{const p=qs(r,u.conv_0,[1,1]),y=qs(p,u.conv_1,[2,2]),g=qs(y,u.conv_2,[1,1]),I=qs(g,u.conv_3,[2,2]),S=qs(I,u.conv_4,[1,1]),T=qs(S,u.conv_5,[2,2]),C=qs(T,u.conv_6,[1,1]),D=qs(C,u.conv_7,[2,2]),_=ma(l,u.box_predictor_0),A=ma(r,u.box_predictor_1),B=ma(y,u.box_predictor_2),ne=ma(I,u.box_predictor_3),te=ma(T,u.box_predictor_4),P=ma(D,u.box_predictor_5),ge=Yu.concat([_.boxPredictionEncoding,A.boxPredictionEncoding,B.boxPredictionEncoding,ne.boxPredictionEncoding,te.boxPredictionEncoding,P.boxPredictionEncoding],1),ae=Yu.concat([_.classPrediction,A.classPrediction,B.classPrediction,ne.classPrediction,te.classPrediction,P.classPrediction],1);return{boxPredictions:ge,classPredictions:ae}})}class bi{constructor({minConfidence:r,maxResults:l}={}){this._name="SsdMobilenetv1Options";if(this._minConfidence=r||.5,this._maxResults=l||100,typeof this._minConfidence!="number"||this._minConfidence<=0||this._minConfidence>=1)throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`);if(typeof this._maxResults!="number")throw new Error(`${this._name} - expected maxResults to be a number`)}get minConfidence(){return this._minConfidence}get maxResults(){return this._maxResults}}const wi=Je(Ze());class Zc extends Wn{constructor(){super("SsdMobilenetv1")}forwardInput(r){const{params:l}=this;if(!l)throw new Error("SsdMobilenetv1 - load model before inference");return wi.tidy(()=>{const u=wi.cast(r.toBatchTensor(512,!1),"float32"),p=wi.sub(wi.mul(u,wi.scalar(.007843137718737125)),wi.scalar(1)),y=yD(p,l.mobilenetv1),{boxPredictions:g,classPredictions:I}=LD(y.out,y.conv11,l.prediction_layer);return wD(g,I,l.output_layer)})}async forward(r){return this.forwardInput(await Wt(r))}async locateFaces(r,l={}){const{maxResults:u,minConfidence:p}=new bi(l),y=await Wt(r),{boxes:g,scores:I}=this.forwardInput(y),S=g[0],T=I[0];for(let ae=1;ae{const[Le,ve]=[Math.max(0,P[ae][0]),Math.min(1,P[ae][2])].map(pt=>pt*te),[Ve,at]=[Math.max(0,P[ae][1]),Math.min(1,P[ae][3])].map(pt=>pt*ne);return new Jt(C[ae],new bu(Ve,Le,at-Ve,ve-Le),{height:y.getInputHeight(0),width:y.getInputWidth(0)})});return S.dispose(),T.dispose(),ge}getDefaultModelName(){return"ssd_mobilenetv1_model"}extractParamsFromWeigthMap(r){return gD(r)}extractParams(r){return fD(r)}}function SD(r){const l=new Zc;return l.extractWeights(r),l}function VQ(r){return SD(r)}class GQ extends Zc{}const ID=.4,xD=[new Qe(.738768,.874946),new Qe(2.42204,2.65704),new Qe(4.30971,7.04493),new Qe(10.246,4.59428),new Qe(12.6868,11.8741)],TD=[new Qe(1.603231,2.094468),new Qe(6.041143,7.080126),new Qe(2.882459,3.518061),new Qe(4.266906,5.178857),new Qe(9.041765,10.66308)],AD=[117.001,114.697,97.404],vD="tiny_yolov2_model",ND="tiny_yolov2_separable_conv_model";const Wg=r=>typeof r=="number";function hx(r){if(!r)throw new Error(`invalid config: ${r}`);if(typeof r.withSeparableConvs!="boolean")throw new Error(`config.withSeparableConvs has to be a boolean, have: ${r.withSeparableConvs}`);if(!Wg(r.iouThreshold)||r.iouThreshold<0||r.iouThreshold>1)throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${r.iouThreshold}`);if(!Array.isArray(r.classes)||!r.classes.length||!r.classes.every(l=>typeof l=="string"))throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(r.classes)}`);if(!Array.isArray(r.anchors)||!r.anchors.length||!r.anchors.map(l=>l||{}).every(l=>Wg(l.x)&&Wg(l.y)))throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(r.anchors)}`);if(r.meanRgb&&(!Array.isArray(r.meanRgb)||r.meanRgb.length!==3||!r.meanRgb.every(Wg)))throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(r.meanRgb)}`)}const js=Je(Ze());function Qc(r){return js.tidy(()=>{const l=js.mul(r,js.scalar(.10000000149011612));return js.add(js.relu(js.sub(r,l)),l)})}const Ks=Je(Ze());function Tr(r,l){return Ks.tidy(()=>{let u=Ks.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return u=Ks.conv2d(u,l.conv.filters,[1,1],"valid"),u=Ks.sub(u,l.bn.sub),u=Ks.mul(u,l.bn.truediv),u=Ks.add(u,l.conv.bias),Qc(u)})}const go=Je(Ze());function Ar(r,l){return go.tidy(()=>{let u=go.pad(r,[[0,0],[1,1],[1,1],[0,0]]);return u=go.separableConv2d(u,l.depthwise_filter,l.pointwise_filter,[1,1],"valid"),u=go.add(u,l.bias),Qc(u)})}const ux=Je(Ze());function YQ(r,l){const u=Yc(r,l);function p(I,S){const T=ux.tensor1d(r(I)),C=ux.tensor1d(r(I));return l.push({paramPath:`${S}/sub`},{paramPath:`${S}/truediv`}),{sub:T,truediv:C}}function y(I,S,T){const C=u(I,S,3,`${T}/conv`),D=p(S,`${T}/bn`);return{conv:C,bn:D}}const g=Hc(r,l);return{extractConvParams:u,extractConvWithBatchNormParams:y,extractSeparableConvParams:g}}function CD(r,l,u,p){const{extractWeights:y,getRemainingWeights:g}=Hn(r),I=[],{extractConvParams:S,extractConvWithBatchNormParams:T,extractSeparableConvParams:C}=YQ(y,I);let D;if(l.withSeparableConvs){const[_,A,B,ne,te,P,ge,ae,Le]=p,ve=l.isFirstLayerConv2d?S(_,A,3,"conv0"):C(_,A,"conv0"),Ve=C(A,B,"conv1"),at=C(B,ne,"conv2"),pt=C(ne,te,"conv3"),$t=C(te,P,"conv4"),Vt=C(P,ge,"conv5"),qe=ae?C(ge,ae,"conv6"):void 0,ln=Le?C(ae,Le,"conv7"):void 0,bt=S(Le||ae||ge,5*u,1,"conv8");D={conv0:ve,conv1:Ve,conv2:at,conv3:pt,conv4:$t,conv5:Vt,conv6:qe,conv7:ln,conv8:bt}}else{const[_,A,B,ne,te,P,ge,ae,Le]=p,ve=T(_,A,"conv0"),Ve=T(A,B,"conv1"),at=T(B,ne,"conv2"),pt=T(ne,te,"conv3"),$t=T(te,P,"conv4"),Vt=T(P,ge,"conv5"),qe=T(ge,ae,"conv6"),ln=T(ae,Le,"conv7"),bt=S(Le,5*u,1,"conv8");D={conv0:ve,conv1:Ve,conv2:at,conv3:pt,conv4:$t,conv5:Vt,conv6:qe,conv7:ln,conv8:bt}}if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{params:D,paramMappings:I}}function HQ(r,l){const u=gs(r,l);function p(S){const T=u(`${S}/sub`,1),C=u(`${S}/truediv`,1);return{sub:T,truediv:C}}function y(S){const T=u(`${S}/filters`,4),C=u(`${S}/bias`,1);return{filters:T,bias:C}}function g(S){const T=y(`${S}/conv`),C=p(`${S}/bn`);return{conv:T,bn:C}}const I=qc(u);return{extractConvParams:y,extractConvWithBatchNormParams:g,extractSeparableConvParams:I}}function RD(r,l){const u=[],{extractConvParams:p,extractConvWithBatchNormParams:y,extractSeparableConvParams:g}=HQ(r,u);let I;if(l.withSeparableConvs){const S=l.filterSizes&&l.filterSizes.length||9;I={conv0:l.isFirstLayerConv2d?p("conv0"):g("conv0"),conv1:g("conv1"),conv2:g("conv2"),conv3:g("conv3"),conv4:g("conv4"),conv5:g("conv5"),conv6:S>7?g("conv6"):void 0,conv7:S>8?g("conv7"):void 0,conv8:p("conv8")}}else I={conv0:y("conv0"),conv1:y("conv1"),conv2:y("conv2"),conv3:y("conv3"),conv4:y("conv4"),conv5:y("conv5"),conv6:y("conv6"),conv7:y("conv7"),conv8:p("conv8")};return Yn(r,u),{params:I,paramMappings:u}}var dx;(function(r){r[r.XS=224]="XS",r[r.SM=320]="SM",r[r.MD=416]="MD",r[r.LG=608]="LG"})(dx||(dx={}));class vr{constructor({inputSize:r,scoreThreshold:l}={}){this._name="TinyYolov2Options";if(this._inputSize=r||416,this._scoreThreshold=l||.5,typeof this._inputSize!="number"||this._inputSize%32!==0)throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`);if(typeof this._scoreThreshold!="number"||this._scoreThreshold<=0||this._scoreThreshold>=1)throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`)}get inputSize(){return this._inputSize}get scoreThreshold(){return this._scoreThreshold}}const Mt=Je(Ze());class el extends Wn{constructor(r){super("TinyYolov2");hx(r),this._config=r}get config(){return this._config}get withClassScores(){return this.config.withClassScores||this.config.classes.length>1}get boxEncodingSize(){return 5+(this.withClassScores?this.config.classes.length:0)}runTinyYolov2(r,l){let u=Tr(r,l.conv0);return u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv1),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv2),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv3),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv4),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Tr(u,l.conv5),u=Mt.maxPool(u,[2,2],[1,1],"same"),u=Tr(u,l.conv6),u=Tr(u,l.conv7),ua(u,l.conv8,"valid",!1)}runMobilenet(r,l){let u=this.config.isFirstLayerConv2d?Qc(ua(r,l.conv0,"valid",!1)):Ar(r,l.conv0);return u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv1),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv2),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv3),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv4),u=Mt.maxPool(u,[2,2],[2,2],"same"),u=Ar(u,l.conv5),u=Mt.maxPool(u,[2,2],[1,1],"same"),u=l.conv6?Ar(u,l.conv6):u,u=l.conv7?Ar(u,l.conv7):u,ua(u,l.conv8,"valid",!1)}forwardInput(r,l){const{params:u}=this;if(!u)throw new Error("TinyYolov2 - load model before inference");return Mt.tidy(()=>{let p=Mt.cast(r.toBatchTensor(l,!1),"float32");return p=this.config.meanRgb?di(p,this.config.meanRgb):p,p=p.div(Mt.scalar(256)),this.config.withSeparableConvs?this.runMobilenet(p,u):this.runTinyYolov2(p,u)})}async forward(r,l){return await this.forwardInput(await Wt(r),l)}async detect(r,l={}){const{inputSize:u,scoreThreshold:p}=new vr(l),y=await Wt(r),g=await this.forwardInput(y,u),I=Mt.tidy(()=>Mt.unstack(g)[0].expandDims()),S={width:y.getInputWidth(0),height:y.getInputHeight(0)},T=await this.extractBoxes(I,y.getReshapedInputDimensions(0),p);g.dispose(),I.dispose();const C=T.map(te=>te.box),D=T.map(te=>te.score),_=T.map(te=>te.classScore),A=T.map(te=>this.config.classes[te.label]),B=US(C.map(te=>te.rescale(u)),D,this.config.iouThreshold,!0),ne=B.map(te=>new Nc(D[te],_[te],A[te],C[te],S));return ne}getDefaultModelName(){return""}extractParamsFromWeigthMap(r){return RD(r,this.config)}extractParams(r){const l=this.config.filterSizes||el.DEFAULT_FILTER_SIZES,u=l?l.length:void 0;if(u!==7&&u!==8&&u!==9)throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${u} filterSizes in config`);return CD(r,this.config,this.boxEncodingSize,l)}async extractBoxes(r,l,u){const{width:p,height:y}=l,g=Math.max(p,y),I=g/p,S=g/y,T=r.shape[1],C=this.config.anchors.length,[D,_,A]=Mt.tidy(()=>{const P=r.reshape([T,T,C,this.boxEncodingSize]),ge=P.slice([0,0,0,0],[T,T,C,4]),ae=P.slice([0,0,0,4],[T,T,C,1]),Le=this.withClassScores?Mt.softmax(P.slice([0,0,0,5],[T,T,C,this.config.classes.length]),3):Mt.scalar(0);return[ge,ae,Le]}),B=[],ne=await _.array(),te=await D.array();for(let P=0;Pu){const ve=(ge+yu(te[P][ge][ae][0]))/T*I,Ve=(P+yu(te[P][ge][ae][1]))/T*S,at=Math.exp(te[P][ge][ae][2])*this.config.anchors[ae].x/T*I,pt=Math.exp(te[P][ge][ae][3])*this.config.anchors[ae].y/T*S,$t=ve-at/2,Vt=Ve-pt/2,qe={row:P,col:ge,anchor:ae},{classScore:ln,label:bt}=this.withClassScores?await this.extractPredictedClass(A,qe):{classScore:1,label:0};B.push({box:new gu($t,Vt,$t+at,Vt+pt),score:Le,classScore:Le*ln,label:bt,...qe})}}return D.dispose(),_.dispose(),A.dispose(),B}async extractPredictedClass(r,l){const{row:u,col:p,anchor:y}=l,g=await r.array();return Array(this.config.classes.length).fill(0).map((I,S)=>g[u][p][y][S]).map((I,S)=>({classScore:I,label:S})).reduce((I,S)=>I.classScore>S.classScore?I:S)}}el.DEFAULT_FILTER_SIZES=[3,16,32,64,128,256,512,1024,1024];class Hu extends el{constructor(r=!0){const l=Object.assign({},{withSeparableConvs:r,iouThreshold:ID,classes:["face"]},r?{anchors:TD,meanRgb:AD}:{anchors:xD,withClassScores:!0});super(l)}get withSeparableConvs(){return this.config.withSeparableConvs}get anchors(){return this.config.anchors}async locateFaces(r,l){const u=await this.detect(r,l);return u.map(p=>new Jt(p.score,p.relativeBox,{width:p.imageWidth,height:p.imageHeight}))}getDefaultModelName(){return this.withSeparableConvs?ND:vD}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}function qQ(r,l=!0){const u=new Hu(l);return u.extractWeights(r),u}class px extends vr{constructor(){super(...arguments);this._name="TinyFaceDetectorOptions"}}class Li{async then(r){return r(await this.run())}async run(){throw new Error("ComposableTask - run is not implemented")}}const mx=Je(Ze());async function fa(r,l,u,p,y=({alignedRect:g})=>g){const g=r.map(T=>pa(T)?y(T):T.detection),I=p||(l instanceof mx.Tensor?await Vc(l,g):await zc(l,g)),S=await u(I);return I.forEach(T=>T instanceof mx.Tensor&&T.dispose()),S}async function tl(r,l,u,p,y){return fa([r],l,async g=>u(g[0]),p,y)}const OD=.4,ED=[new Qe(1.603231,2.094468),new Qe(6.041143,7.080126),new Qe(2.882459,3.518061),new Qe(4.266906,5.178857),new Qe(9.041765,10.66308)],DD=[117.001,114.697,97.404];class qu extends el{constructor(){const r={withSeparableConvs:!0,iouThreshold:OD,classes:["face"],anchors:ED,meanRgb:DD,isFirstLayerConv2d:!0,filterSizes:[3,16,32,64,128,256,512]};super(r)}get anchors(){return this.config.anchors}async locateFaces(r,l){const u=await this.detect(r,l);return u.map(p=>new Jt(p.score,p.relativeBox,{width:p.imageWidth,height:p.imageHeight}))}getDefaultModelName(){return"tiny_face_detector_model"}extractParamsFromWeigthMap(r){return super.extractParamsFromWeigthMap(r)}}const yt={ssdMobilenetv1:new Zc,tinyFaceDetector:new qu,tinyYolov2:new Hu,faceLandmark68Net:new Pu,faceLandmark68TinyNet:new ax,faceRecognitionNet:new Vu,faceExpressionNet:new nx,ageGenderNet:new ox},kD=(r,l)=>yt.ssdMobilenetv1.locateFaces(r,l),jQ=(r,l)=>yt.tinyFaceDetector.locateFaces(r,l),KQ=(r,l)=>yt.tinyYolov2.locateFaces(r,l),FD=r=>yt.faceLandmark68Net.detectLandmarks(r),XQ=r=>yt.faceLandmark68TinyNet.detectLandmarks(r),JQ=r=>yt.faceRecognitionNet.computeFaceDescriptor(r),ZQ=r=>yt.faceExpressionNet.predictExpressions(r),QQ=r=>yt.ageGenderNet.predictAgeAndGender(r),_D=r=>yt.ssdMobilenetv1.load(r),eee=r=>yt.tinyFaceDetector.load(r),tee=r=>yt.tinyYolov2.load(r),nee=r=>yt.faceLandmark68Net.load(r),see=r=>yt.faceLandmark68TinyNet.load(r),iee=r=>yt.faceRecognitionNet.load(r),ree=r=>yt.faceExpressionNet.load(r),oee=r=>yt.ageGenderNet.load(r),aee=_D,cee=kD,lee=FD;class WD extends Li{constructor(r,l,u){super();this.parentTask=r;this.input=l;this.extractedFaces=u}}class Xu extends WD{async run(){const r=await this.parentTask,l=await fa(r,this.input,async u=>await Promise.all(u.map(p=>yt.faceExpressionNet.predictExpressions(p))),this.extractedFaces);return r.map((u,p)=>Rg(u,l[p]))}withAgeAndGender(){return new ju(this,this.input)}}class Ju extends WD{async run(){const r=await this.parentTask;if(!r)return;const l=await tl(r,this.input,u=>yt.faceExpressionNet.predictExpressions(u),this.extractedFaces);return Rg(r,l)}withAgeAndGender(){return new Ku(this,this.input)}}class il extends Xu{withAgeAndGender(){return new nl(this,this.input)}withFaceDescriptors(){return new ga(this,this.input)}}class rl extends Ju{withAgeAndGender(){return new sl(this,this.input)}withFaceDescriptor(){return new ya(this,this.input)}}class $D extends Li{constructor(r,l,u){super();this.parentTask=r;this.input=l;this.extractedFaces=u}}class ju extends $D{async run(){const r=await this.parentTask,l=await fa(r,this.input,async u=>await Promise.all(u.map(p=>yt.ageGenderNet.predictAgeAndGender(p))),this.extractedFaces);return r.map((u,p)=>{const{age:y,gender:g,genderProbability:I}=l[p];return Fg(_g(u,g,I),y)})}withFaceExpressions(){return new Xu(this,this.input)}}class Ku extends $D{async run(){const r=await this.parentTask;if(!r)return;const{age:l,gender:u,genderProbability:p}=await tl(r,this.input,y=>yt.ageGenderNet.predictAgeAndGender(y),this.extractedFaces);return Fg(_g(r,u,p),l)}withFaceExpressions(){return new Ju(this,this.input)}}class nl extends ju{withFaceExpressions(){return new il(this,this.input)}withFaceDescriptors(){return new ga(this,this.input)}}class sl extends Ku{withFaceExpressions(){return new rl(this,this.input)}withFaceDescriptor(){return new ya(this,this.input)}}class fx extends Li{constructor(r,l){super();this.parentTask=r;this.input=l}}class ga extends fx{async run(){const r=await this.parentTask,l=await fa(r,this.input,u=>Promise.all(u.map(p=>yt.faceRecognitionNet.computeFaceDescriptor(p))),null,u=>u.landmarks.align(null,{useDlibAlignment:!0}));return l.map((u,p)=>kg(r[p],u))}withFaceExpressions(){return new il(this,this.input)}withAgeAndGender(){return new nl(this,this.input)}}class ya extends fx{async run(){const r=await this.parentTask;if(!r)return;const l=await tl(r,this.input,u=>yt.faceRecognitionNet.computeFaceDescriptor(u),null,u=>u.landmarks.align(null,{useDlibAlignment:!0}));return kg(r,l)}withFaceExpressions(){return new rl(this,this.input)}withAgeAndGender(){return new sl(this,this.input)}}const Zu=Je(Ze());class gx extends Li{constructor(r,l,u){super();this.parentTask=r;this.input=l;this.useTinyLandmarkNet=u}get landmarkNet(){return this.useTinyLandmarkNet?yt.faceLandmark68TinyNet:yt.faceLandmark68Net}}class yx extends gx{async run(){const r=await this.parentTask,l=r.map(y=>y.detection),u=this.input instanceof Zu.Tensor?await Vc(this.input,l):await zc(this.input,l),p=await Promise.all(u.map(y=>this.landmarkNet.detectLandmarks(y)));return u.forEach(y=>y instanceof Zu.Tensor&&y.dispose()),r.map((y,g)=>Xc(y,p[g]))}withFaceExpressions(){return new il(this,this.input)}withAgeAndGender(){return new nl(this,this.input)}withFaceDescriptors(){return new ga(this,this.input)}}class bx extends gx{async run(){const r=await this.parentTask;if(!r)return;const{detection:l}=r,u=this.input instanceof Zu.Tensor?await Vc(this.input,[l]):await zc(this.input,[l]),p=await this.landmarkNet.detectLandmarks(u[0]);return u.forEach(y=>y instanceof Zu.Tensor&&y.dispose()),Xc(r,p)}withFaceExpressions(){return new rl(this,this.input)}withAgeAndGender(){return new sl(this,this.input)}withFaceDescriptor(){return new ya(this,this.input)}}class wx extends Li{constructor(r,l=new bi){super();this.input=r;this.options=l}}class $g extends wx{async run(){const{input:r,options:l}=this,u=l instanceof px?p=>yt.tinyFaceDetector.locateFaces(p,l):l instanceof bi?p=>yt.ssdMobilenetv1.locateFaces(p,l):l instanceof vr?p=>yt.tinyYolov2.locateFaces(p,l):null;if(!u)throw new Error("detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options");return u(r)}runAndExtendWithFaceDetections(){return new Promise(async r=>{const l=await this.run();return r(l.map(u=>Zo({},u)))})}withFaceLandmarks(r=!1){return new yx(this.runAndExtendWithFaceDetections(),this.input,r)}withFaceExpressions(){return new Xu(this.runAndExtendWithFaceDetections(),this.input)}withAgeAndGender(){return new ju(this.runAndExtendWithFaceDetections(),this.input)}}class Lx extends wx{async run(){const r=await new $g(this.input,this.options);let l=r[0];return r.forEach(u=>{u.score>l.score&&(l=u)}),l}runAndExtendWithFaceDetection(){return new Promise(async r=>{const l=await this.run();return r(l?Zo({},l):void 0)})}withFaceLandmarks(r=!1){return new bx(this.runAndExtendWithFaceDetection(),this.input,r)}withFaceExpressions(){return new Ju(this.runAndExtendWithFaceDetection(),this.input)}withAgeAndGender(){return new Ku(this.runAndExtendWithFaceDetection(),this.input)}}function hee(r,l=new bi){return new Lx(r,l)}function Ug(r,l=new bi){return new $g(r,l)}async function UD(r,l){return console.warn("allFacesSsdMobilenetv1 is deprecated and will be removed soon, use the high level api instead"),await Ug(r,new bi(l?{minConfidence:l}:{})).withFaceLandmarks().withFaceDescriptors()}async function uee(r,l={}){return console.warn("allFacesTinyYolov2 is deprecated and will be removed soon, use the high level api instead"),await Ug(r,new vr(l)).withFaceLandmarks().withFaceDescriptors()}const dee=UD;function Sx(r,l){if(r.length!==l.length)throw new Error("euclideanDistance: arr1.length !== arr2.length");const u=Array.from(r),p=Array.from(l);return Math.sqrt(u.map((y,g)=>y-p[g]).reduce((y,g)=>y+Math.pow(g,2),0))}class BD{constructor(r,l=.6){this._distanceThreshold=l;const u=Array.isArray(r)?r:[r];if(!u.length)throw new Error("FaceRecognizer.constructor - expected atleast one input");let p=1;const y=()=>`person ${p++}`;this._labeledDescriptors=u.map(g=>{if(g instanceof Jo)return g;if(g instanceof Float32Array)return new Jo(y(),[g]);if(g.descriptor&&g.descriptor instanceof Float32Array)return new Jo(y(),[g.descriptor]);throw new Error("FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>")})}get labeledDescriptors(){return this._labeledDescriptors}get distanceThreshold(){return this._distanceThreshold}computeMeanDistance(r,l){return l.map(u=>Sx(u,r)).reduce((u,p)=>u+p,0)/(l.length||1)}matchDescriptor(r){return this.labeledDescriptors.map(({descriptors:l,label:u})=>new Ym(u,this.computeMeanDistance(r,l))).reduce((l,u)=>l.distancer.toJSON())}}static fromJSON(r){const l=r.labeledDescriptors.map(u=>Jo.fromJSON(u));return new BD(l,r.distanceThreshold)}}function pee(r){const l=new qu;return l.extractWeights(r),l}function MD(r,l){const{width:u,height:p}=new ms(l.width,l.height);if(u<=0||p<=0)throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width:u,height:p})}`);if(Array.isArray(r))return r.map(y=>MD(y,{width:u,height:p}));if(pa(r)){const y=r.detection.forSize(u,p),g=r.unshiftedLandmarks.forSize(y.box.width,y.box.height);return Xc(Zo(r,y),g)}return $i(r)?Zo(r,r.detection.forSize(u,p)):r instanceof Gs||r instanceof Jt?r.forSize(u,p):r}var PD="0.8.6";const mee=Je(Ze()),fee=typeof process!="undefined",gee=typeof navigator!="undefined"&&typeof navigator.userAgent!="undefined",yee={faceapi:PD,node:fee,browser:gee};export{ox as AgeGenderNet,gu as BoundingBox,_t as Box,Li as ComposableTask,ga as ComputeAllFaceDescriptorsTask,fx as ComputeFaceDescriptorsTaskBase,ya as ComputeSingleFaceDescriptorTask,yx as DetectAllFaceLandmarksTask,$g as DetectAllFacesTask,gx as DetectFaceLandmarksTaskBase,wx as DetectFacesTaskBase,bx as DetectSingleFaceLandmarksTask,Lx as DetectSingleFaceTask,ms as Dimensions,tx as FACE_EXPRESSION_LABELS,Jt as FaceDetection,GQ as FaceDetectionNet,nx as FaceExpressionNet,da as FaceExpressions,Pu as FaceLandmark68Net,ax as FaceLandmark68TinyNet,RQ as FaceLandmarkNet,Gs as FaceLandmarks,EJ as FaceLandmarks5,wu as FaceLandmarks68,Ym as FaceMatch,BD as FaceMatcher,Vu as FaceRecognitionNet,Ir as Gender,Hm as LabeledBox,Jo as LabeledFaceDescriptors,ho as NetInput,Wn as NeuralNetwork,Nc as ObjectDetection,Qe as Point,DJ as PredictedBox,bu as Rect,Zc as SsdMobilenetv1,bi as SsdMobilenetv1Options,qu as TinyFaceDetector,px as TinyFaceDetectorOptions,Hu as TinyYolov2,vr as TinyYolov2Options,dx as TinyYolov2SizeType,dee as allFaces,UD as allFacesSsdMobilenetv1,uee as allFacesTinyYolov2,qS as awaitMediaLoaded,jS as bufferToImage,JQ as computeFaceDescriptor,Rc as createCanvas,Su as createCanvasFromMedia,VQ as createFaceDetectionNet,DQ as createFaceRecognitionNet,SD as createSsdMobilenetv1,pee as createTinyFaceDetector,qQ as createTinyYolov2,Ug as detectAllFaces,FD as detectFaceLandmarks,XQ as detectFaceLandmarksTiny,lee as detectLandmarks,hee as detectSingleFace,ix as draw,St as env,Sx as euclideanDistance,Fg as extendWithAge,kg as extendWithFaceDescriptor,Zo as extendWithFaceDetection,Rg as extendWithFaceExpressions,Xc as extendWithFaceLandmarks,_g as extendWithGender,Vc as extractFaceTensors,zc as extractFaces,SQ as fetchImage,ZI as fetchJson,IQ as fetchNetWeights,ha as fetchOrThrow,is as getContext2dOrThrow,ea as getMediaDimensions,KS as imageTensorToCanvas,JI as imageToSquare,NJ as inverseSigmoid,WS as iou,Xm as isMediaElement,Lu as isMediaLoaded,kQ as isWithAge,$i as isWithFaceDetection,sx as isWithFaceExpressions,pa as isWithFaceLandmarks,FQ as isWithGender,oee as loadAgeGenderModel,aee as loadFaceDetectionModel,ree as loadFaceExpressionModel,nee as loadFaceLandmarkModel,see as loadFaceLandmarkTinyModel,iee as loadFaceRecognitionModel,_D as loadSsdMobilenetv1Model,eee as loadTinyFaceDetectorModel,tee as loadTinyYolov2Model,QI as loadWeightMap,cee as locateFaces,xQ as matchDimensions,$S as minBbox,yt as nets,US as nonMaxSuppression,di as normalize,BS as padToSquare,QQ as predictAgeAndGender,ZQ as recognizeFaceExpressions,MD as resizeResults,Qo as resolveInput,vJ as shuffleArray,yu as sigmoid,kD as ssdMobilenetv1,mee as tf,jQ as tinyFaceDetector,KQ as tinyYolov2,Wt as toNetInput,DS as utils,hx as validateConfig,yee as version}; +`,h9=Vm(p9),f9={kernelName:hc,backendName:"webgl",kernelFunc:h9};let d9="return x * x;",m9=Vm(d9),g9={kernelName:Df,backendName:"webgl",kernelFunc:m9};let yE="return (a - b) * (a - b);",y9=ul({opSnippet:yE,packedOpSnippet:yE}),b9={kernelName:fc,backendName:"webgl",kernelFunc:y9};let bE="return a - b;",x9=ul({opSnippet:bE,packedOpSnippet:bE,supportsComplex:!0,cpuKernelImpl:fX}),w9={kernelName:dc,backendName:"webgl",kernelFunc:x9};let v9="return tan(x);",T9=Vm(v9),k9={kernelName:mc,backendName:"webgl",kernelFunc:T9};let N9={kernelName:Wu,backendName:"webgl",kernelFunc:({inputs:n,attrs:t,backend:e})=>{let{x:r}=n,{perm:o}=t,s=e,c=r.shape.length,l=new Array(c);for(let f=0;f{jm(H9,{isNodejs:()=>j9});function j9(){return typeof global=="object"&&!0&&typeof CE!="undefined"&&typeof process!="undefined"&&!!process.version}}),lD=Hm(()=>{}),aot=te(ee()),Fk={};jm(Fk,{AnchorPosition:()=>Zo,DrawBox:()=>Zm,DrawBoxOptions:()=>W1,DrawFaceLandmarks:()=>Ak,DrawFaceLandmarksOptions:()=>Dk,DrawTextField:()=>Ii,DrawTextFieldOptions:()=>hh,drawContour:()=>Es,drawDetections:()=>Y9,drawFaceExpressions:()=>xrt,drawFaceLandmarks:()=>wrt});function Es(i,a,u=!1){if(i.beginPath(),a.slice(1).forEach(({x:h,y:d},g)=>{let x=a[g];i.moveTo(x.x,x.y),i.lineTo(h,d)}),u){let h=a[a.length-1],d=a[0];if(!h||!d)return;i.moveTo(h.x,h.y),i.lineTo(d.x,d.y)}i.stroke()}var S1={};jm(S1,{computeReshapedDimensions:()=>E1,getCenterPoint:()=>Na,isDimensions:()=>Ym,isEven:()=>Xm,isFloat:()=>I1,isTensor:()=>Ta,isTensor1D:()=>z9,isTensor2D:()=>$1,isTensor3D:()=>Ds,isTensor4D:()=>zr,isValidNumber:()=>$o,isValidProbablitiy:()=>ml,range:()=>Xo,round:()=>ka});var kE=te(ee()),Zn=class{constructor(a,u){if(!$o(a)||!$o(u))throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width:a,height:u})}`);this._width=a,this._height=u}get width(){return this._width}get height(){return this._height}reverse(){return new Zn(1/this.width,1/this.height)}};function Ta(i,a){return i instanceof kE.Tensor&&i.shape.length===a}function z9(i){return Ta(i,1)}function $1(i){return Ta(i,2)}function Ds(i){return Ta(i,3)}function zr(i){return Ta(i,4)}function I1(i){return i%1!==0}function Xm(i){return i%2===0}function ka(i,a=2){let u=Math.pow(10,a);return Math.floor(i*u)/u}function Ym(i){return i&&i.width&&i.height}function E1({width:i,height:a},u){let h=u/Math.max(a,i);return new Zn(Math.round(i*h),Math.round(a*h))}function Na(i){return i.reduce((a,u)=>a.add(u),new Jt(0,0)).div(new Jt(i.length,i.length))}function Xo(i,a,u){return Array(i).fill(0).map((h,d)=>a+d*u)}function $o(i){return!!i&&i!==Infinity&&i!==-Infinity&&!isNaN(i)||i===0}function ml(i){return $o(i)&&0<=i&&i<=1}var Jt=class{constructor(a,u){this._x=a,this._y=u}get x(){return this._x}get y(){return this._y}add(a){return new Jt(this.x+a.x,this.y+a.y)}sub(a){return new Jt(this.x-a.x,this.y-a.y)}mul(a){return new Jt(this.x*a.x,this.y*a.y)}div(a){return new Jt(this.x/a.x,this.y/a.y)}abs(){return new Jt(Math.abs(this.x),Math.abs(this.y))}magnitude(){return Math.sqrt(Math.pow(this.x,2)+Math.pow(this.y,2))}floor(){return new Jt(Math.floor(this.x),Math.floor(this.y))}},Fe=class{static isRect(a){return!!a&&[a.x,a.y,a.width,a.height].every($o)}static assertIsValidBox(a,u,h=!1){if(!Fe.isRect(a))throw new Error(`${u} - invalid box: ${JSON.stringify(a)}, expected object with properties x, y, width, height`);if(!h&&(a.width<0||a.height<0))throw new Error(`${u} - width (${a.width}) and height (${a.height}) must be positive numbers`)}constructor(a,u=!0){let h=a||{},d=[h.left,h.top,h.right,h.bottom].every($o),g=[h.x,h.y,h.width,h.height].every($o);if(!g&&!d)throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(h)}`);let[x,w,k,C]=g?[h.x,h.y,h.width,h.height]:[h.left,h.top,h.right-h.left,h.bottom-h.top];Fe.assertIsValidBox({x,y:w,width:k,height:C},"Box.constructor",u),this._x=x,this._y=w,this._width=k,this._height=C}get x(){return this._x}get y(){return this._y}get width(){return this._width}get height(){return this._height}get left(){return this.x}get top(){return this.y}get right(){return this.x+this.width}get bottom(){return this.y+this.height}get area(){return this.width*this.height}get topLeft(){return new Jt(this.left,this.top)}get topRight(){return new Jt(this.right,this.top)}get bottomLeft(){return new Jt(this.left,this.bottom)}get bottomRight(){return new Jt(this.right,this.bottom)}round(){let[a,u,h,d]=[this.x,this.y,this.width,this.height].map(g=>Math.round(g));return new Fe({x:a,y:u,width:h,height:d})}floor(){let[a,u,h,d]=[this.x,this.y,this.width,this.height].map(g=>Math.floor(g));return new Fe({x:a,y:u,width:h,height:d})}toSquare(){let{x:a,y:u,width:h,height:d}=this,g=Math.abs(h-d);return hu&&(w=-F+u+h,F=u),_>a&&(k=-_+a+d,_=a),C<1&&(k=2-C,C=1),$<1&&(k=2-$,$=1),{dy:x,edy:k,dx:g,edx:w,y:$,ey:_,x:C,ex:F,w:h,h:d}}calibrate(a){return new Fe({left:this.left+a.left*this.width,top:this.top+a.top*this.height,right:this.right+a.right*this.width,bottom:this.bottom+a.bottom*this.height}).toSquare().round()}},gl=class extends Fe{constructor(a,u,h,d,g=!1){super({left:a,top:u,right:h,bottom:d},g)}};var Si=class{constructor(a,u,h,d,g){this._imageDims=new Zn(g.width,g.height),this._score=a,this._classScore=u,this._className=h,this._box=new Fe(d).rescale(this._imageDims)}get score(){return this._score}get classScore(){return this._classScore}get className(){return this._className}get box(){return this._box}get imageDims(){return this._imageDims}get imageWidth(){return this.imageDims.width}get imageHeight(){return this.imageDims.height}get relativeBox(){return new Fe(this._box).rescale(this.imageDims.reverse())}forSize(a,u){return new Si(this.score,this.classScore,this.className,this.relativeBox,{width:a,height:u})}},Ue=class extends Si{constructor(a,u,h){super(a,a,"",u,h)}forSize(a,u){let{score:h,relativeBox:d,imageDims:g}=super.forSize(a,u);return new Ue(h,d,g)}};function D1(i,a,u=!0){let h=Math.max(0,Math.min(i.right,a.right)-Math.max(i.left,a.left)),d=Math.max(0,Math.min(i.bottom,a.bottom)-Math.max(i.top,a.top)),g=h*d;return u?g/(i.area+a.area-g):g/Math.min(i.area,a.area)}function A1(i){let a=i.map(w=>w.x),u=i.map(w=>w.y),h=a.reduce((w,k)=>kkww({score:x,boxIndex:w})).sort((x,w)=>x.score-w.score).map(x=>x.boxIndex),g=[];for(;d.length>0;){let x=d.pop();g.push(x);let w=d,k=[];for(let C=0;Ck[$]<=u)}return g}var Yo=te(ee());function Io(i,a){return Yo.tidy(()=>{let[u,h,d]=a,g=Yo.fill([...i.shape.slice(0,3),1],u,"float32"),x=Yo.fill([...i.shape.slice(0,3),1],h,"float32"),w=Yo.fill([...i.shape.slice(0,3),1],d,"float32"),k=Yo.concat([g,x,w],3);return Yo.sub(i,k)})}var $i=te(ee());function R1(i,a=!1){return $i.tidy(()=>{let[u,h]=i.shape.slice(1);if(u===h)return i;let d=Math.abs(u-h),g=Math.round(d*(a?.5:1)),x=u>h?2:1,w=_=>{let W=i.shape.slice();return W[x]=_,$i.fill(W,0,"float32")},k=w(g),C=d-k.shape[x],$=a&&C?w(C):null,F=[$,i,k].filter(_=>!!_).map(_=>$i.cast(_,"float32"));return $i.concat(F,x)})}function W9(i){let a=i.slice();for(let u=a.length-1;u>0;u--){let h=Math.floor(Math.random()*(u+1)),d=a[u];a[u]=a[h],a[h]=d}return a}function lh(i){return 1/(1+Math.exp(-i))}function V9(i){return Math.log(i/(1-i))}var yl=class extends Fe{constructor(a,u,h,d,g=!1){super({x:a,y:u,width:h,height:d},g)}},G9=.5,U9=.43,q9=.45,Wr=class{constructor(a,u,h=new Jt(0,0)){let{width:d,height:g}=u;this._imgDims=new Zn(d,g),this._shift=h,this._positions=a.map(x=>x.mul(new Jt(d,g)).add(h))}get shift(){return new Jt(this._shift.x,this._shift.y)}get imageWidth(){return this._imgDims.width}get imageHeight(){return this._imgDims.height}get positions(){return this._positions}get relativePositions(){return this._positions.map(a=>a.sub(this._shift).div(new Jt(this.imageWidth,this.imageHeight)))}forSize(a,u){return new this.constructor(this.relativePositions,{width:a,height:u})}shiftBy(a,u){return new this.constructor(this.relativePositions,this._imgDims,new Jt(a,u))}shiftByPoint(a){return this.shiftBy(a.x,a.y)}align(a,u={}){if(a){let g=a instanceof Ue?a.box.floor():new Fe(a);return this.shiftBy(g.x,g.y).align(null,u)}let{useDlibAlignment:h,minBoxPadding:d}=Object.assign({},{useDlibAlignment:!1,minBoxPadding:.2},u);return h?this.alignDlib():this.alignMinBbox(d)}alignDlib(){let a=this.getRefPointsForAlignment(),[u,h,d]=a,g=F=>d.sub(F).magnitude(),x=(g(u)+g(h))/2,w=Math.floor(x/q9),k=Na(a),C=Math.floor(Math.max(0,k.x-G9*w)),$=Math.floor(Math.max(0,k.y-U9*w));return new yl(C,$,Math.min(w,this.imageWidth+C),Math.min(w,this.imageHeight+$))}alignMinBbox(a){let u=A1(this.positions);return u.pad(u.width*a,u.height*a)}getRefPointsForAlignment(){throw new Error("getRefPointsForAlignment not implemented by base class")}};var NE=class extends Wr{getRefPointsForAlignment(){let a=this.positions;return[a[0],a[1],Na([a[3],a[4]])]}};var bl=class extends Wr{getJawOutline(){return this.positions.slice(0,17)}getLeftEyeBrow(){return this.positions.slice(17,22)}getRightEyeBrow(){return this.positions.slice(22,27)}getNose(){return this.positions.slice(27,36)}getLeftEye(){return this.positions.slice(36,42)}getRightEye(){return this.positions.slice(42,48)}getMouth(){return this.positions.slice(48,68)}getRefPointsForAlignment(){return[this.getLeftEye(),this.getRightEye(),this.getMouth()].map(Na)}};var uh=class{constructor(a,u){this._label=a,this._distance=u}get label(){return this._label}get distance(){return this._distance}toString(a=!0){return`${this.label}${a?` (${ka(this.distance)})`:""}`}};var ph=class extends Fe{static assertIsValidLabeledBox(a,u){if(Fe.assertIsValidBox(a,u),!$o(a.label))throw new Error(`${u} - expected property label (${a.label}) to be a number`)}constructor(a,u){super(a);this._label=u}get label(){return this._label}};var As=class{constructor(a,u){if(!(typeof a=="string"))throw new Error("LabeledFaceDescriptors - constructor expected label to be a string");if(!Array.isArray(u)||u.some(h=>!(h instanceof Float32Array)))throw new Error("LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array");this._label=a,this._descriptors=u}get label(){return this._label}get descriptors(){return this._descriptors}toJSON(){return{label:this.label,descriptors:this.descriptors.map(a=>Array.from(a))}}static fromJSON(a){let u=a.descriptors.map(h=>new Float32Array(h));return new As(a.label,u)}};var _E=class extends ph{static assertIsValidPredictedBox(a,u){if(ph.assertIsValidLabeledBox(a,u),!ml(a.score)||!ml(a.classScore))throw new Error(`${u} - expected properties score (${a.score}) and (${a.classScore}) to be a number between [0, 1]`)}constructor(a,u,h,d){super(a,u);this._score=h,this._classScore=d}get score(){return this._score}get classScore(){return this._classScore}};function Jo(i){return i.detection instanceof Ue}function _a(i,a){let u={detection:a};return Object.assign({},i,u)}function P1(){let i=window.fetch||function(){throw new Error("fetch - missing fetch implementation for browser environment")},a=function(){throw new Error("readFile - filesystem not available for browser environment")};return{Canvas:HTMLCanvasElement,CanvasRenderingContext2D,Image:HTMLImageElement,ImageData,Video:HTMLVideoElement,createCanvasElement:()=>document.createElement("canvas"),createImageElement:()=>document.createElement("img"),fetch:i,readFile:a}}function Jm(i){let a="";if(!i)try{i=require("fs")}catch(h){a=h.toString()}let u=i?function(h){return new Promise((d,g)=>{i.readFile(h,function(x,w){return x?g(x):d(w)})})}:function(){throw new Error(`readFile - failed to require fs in nodejs environment with error: ${a}`)};return{readFile:u}}function O1(){let i=global.Canvas||global.HTMLCanvasElement,a=global.Image||global.HTMLImageElement,u=function(){if(i)return new i;throw new Error("createCanvasElement - missing Canvas implementation for nodejs environment")},h=function(){if(a)return new a;throw new Error("createImageElement - missing Image implementation for nodejs environment")},d=global.fetch||function(){throw new Error("fetch - missing fetch implementation for nodejs environment")},g=Jm();return{Canvas:i||class{},CanvasRenderingContext2D:global.CanvasRenderingContext2D||class{},Image:a||class{},ImageData:global.ImageData||class{},Video:global.HTMLVideoElement||class{},createCanvasElement:u,createImageElement:h,fetch:d,...g}}function L1(){return typeof window=="object"&&typeof document!="undefined"&&typeof HTMLImageElement!="undefined"&&typeof HTMLCanvasElement!="undefined"&&typeof HTMLVideoElement!="undefined"&&typeof ImageData!="undefined"&&typeof CanvasRenderingContext2D!="undefined"}var M1=te(SE()),Dn;function K9(){if(!Dn)throw new Error("getEnv - environment is not defined, check isNodejs() and isBrowser()");return Dn}function B1(i){Dn=i}function z1(){if(L1())return B1(P1());if(M1.isNodejs())return B1(O1())}function X9(i){if(Dn||z1(),!Dn)throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()");let{Canvas:a=Dn.Canvas,Image:u=Dn.Image}=i;Dn.Canvas=a,Dn.Image=u,Dn.createCanvasElement=i.createCanvasElement||(()=>new a),Dn.createImageElement=i.createImageElement||(()=>new u),Dn.ImageData=i.ImageData||Dn.ImageData,Dn.Video=i.Video||Dn.Video,Dn.fetch=i.fetch||Dn.fetch,Dn.readFile=i.readFile||Dn.readFile}var $e={getEnv:K9,setEnv:B1,initialize:z1,createBrowserEnv:P1,createFileSystem:Jm,createNodejsEnv:O1,monkeyPatch:X9,isBrowser:L1,isNodejs:M1.isNodejs};z1();function Ca(i){return!$e.isNodejs()&&typeof i=="string"?document.getElementById(i):i}function dr(i){let{Canvas:a,CanvasRenderingContext2D:u}=$e.getEnv();if(i instanceof u)return i;let h=Ca(i);if(!(h instanceof a))throw new Error("resolveContext2d - expected canvas to be of instance of Canvas");let d=h.getContext("2d");if(!d)throw new Error("resolveContext2d - canvas 2d context is null");return d}var Zo;(function(i){i.TOP_LEFT="TOP_LEFT",i.TOP_RIGHT="TOP_RIGHT",i.BOTTOM_LEFT="BOTTOM_LEFT",i.BOTTOM_RIGHT="BOTTOM_RIGHT"})(Zo||(Zo={}));var hh=class{constructor(a={}){let{anchorPosition:u,backgroundColor:h,fontColor:d,fontSize:g,fontStyle:x,padding:w}=a;this.anchorPosition=u||Zo.TOP_LEFT,this.backgroundColor=h||"rgba(0, 0, 0, 0.5)",this.fontColor=d||"rgba(255, 255, 255, 1)",this.fontSize=g||14,this.fontStyle=x||"Georgia",this.padding=w||4}},Ii=class{constructor(a,u,h={}){this.text=typeof a=="string"?[a]:a instanceof Ii?a.text:a,this.anchor=u,this.options=new hh(h)}measureWidth(a){let{padding:u}=this.options;return this.text.map(h=>a.measureText(h).width).reduce((h,d)=>h{let et=k+F.x,tt=k+F.y+(W+1)*x;h.fillText(_,et,tt)})}},W1=class{constructor(a={}){let{boxColor:u,lineWidth:h,label:d,drawLabelOptions:g}=a;this.boxColor=u||"rgba(0, 0, 255, 1)",this.lineWidth=h||2,this.label=d;let x={anchorPosition:Zo.BOTTOM_LEFT,backgroundColor:this.boxColor};this.drawLabelOptions=new hh(Object.assign({},x,g))}},Zm=class{constructor(a,u={}){this.box=new Fe(a),this.options=new W1(u)}draw(a){let u=dr(a),{boxColor:h,lineWidth:d}=this.options,{x:g,y:x,width:w,height:k}=this.box;u.strokeStyle=h,u.lineWidth=d,u.strokeRect(g,x,w,k);let{label:C}=this.options;C&&new Ii([C],{x:g-d/2,y:x},this.options.drawLabelOptions).draw(a)}};function Y9(i,a){let u=Array.isArray(a)?a:[a];u.forEach(h=>{let d=h instanceof Ue?h.score:Jo(h)?h.detection.score:void 0,g=h instanceof Ue?h.box:Jo(h)?h.detection.box:new Fe(h),x=d?`${ka(d)}`:void 0;new Zm(g,{label:x}).draw(i)})}var Wl=te(ee());function fh(i){let{Image:a,Video:u}=$e.getEnv();return i instanceof a&&i.complete||i instanceof u&&i.readyState>=3}function V1(i){return new Promise((a,u)=>{if(i instanceof $e.getEnv().Canvas||fh(i))return a(null);function h(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",h),g.currentTarget.removeEventListener("error",d),a(g)}function d(g){if(!g.currentTarget)return;g.currentTarget.removeEventListener("load",h),g.currentTarget.removeEventListener("error",d),u(g)}i.addEventListener("load",h),i.addEventListener("error",d)})}function G1(i){return new Promise((a,u)=>{if(!(i instanceof Blob))return u("bufferToImage - expected buf to be of type: Blob");let h=new FileReader;h.onload=()=>{if(typeof h.result!="string")return u("bufferToImage - expected reader.result to be a string, in onload");let d=$e.getEnv().createImageElement();d.onload=()=>a(d),d.onerror=u,d.src=h.result},h.onerror=u,h.readAsDataURL(i)})}function Sa(i){let{Image:a,Video:u}=$e.getEnv();return i instanceof a?new Zn(i.naturalWidth,i.naturalHeight):i instanceof u?new Zn(i.videoWidth,i.videoHeight):new Zn(i.width,i.height)}function xl({width:i,height:a}){let{createCanvasElement:u}=$e.getEnv(),h=u();return h.width=i,h.height=a,h}function dh(i,a){let{ImageData:u}=$e.getEnv();if(!(i instanceof u)&&!fh(i))throw new Error("createCanvasFromMedia - media has not finished loading yet");let{width:h,height:d}=a||Sa(i),g=xl({width:h,height:d});return i instanceof u?dr(g).putImageData(i,0,0):dr(g).drawImage(i,0,0,h,d),g}var Qm=te(ee());async function U1(i,a){let u=a||$e.getEnv().createCanvasElement(),[h,d,g]=i.shape.slice(zr(i)?1:0),x=Qm.tidy(()=>i.as3D(h,d,g).toInt());return await Qm.browser.toPixels(x,u),x.dispose(),u}function tg(i){let{Image:a,Canvas:u,Video:h}=$e.getEnv();return i instanceof a||i instanceof u||i instanceof h}var J9=1e-7,Z9=1e-4,q1=class{time(a){return st("time")}read(a){return st("read")}readSync(a){return st("readSync")}numDataIds(){return st("numDataIds")}disposeData(a){return st("disposeData")}write(a,u,h){return st("write")}move(a,u,h,d){return st("move")}memory(){return st("memory")}floatPrecision(){return st("floatPrecision")}epsilon(){return this.floatPrecision()===32?J9:Z9}batchMatMul(a,u,h,d){return st("batchMatMul")}fusedBatchMatMul({a,b:u,transposeA:h,transposeB:d,bias:g,activation:x,preluActivationWeights:w}){return st("fusedBatchMatMul")}slice(a,u,h){return st("slice")}stridedSlice(a,u,h,d){return st("stridedSlice")}unstack(a,u){return st("unstack")}reverse(a,u){return st("reverse")}concat(a,u){return st("concat")}neg(a){return st("neg")}add(a,u){return st("add")}addN(a){return st("addN")}subtract(a,u){return st("subtract")}multiply(a,u){return st("multiply")}realDivide(a,u){return st("realDivide")}floorDiv(a,u){return st("floorDiv")}sum(a,u){return st("sum")}prod(a,u){return st("prod")}unsortedSegmentSum(a,u,h){return st("unsortedSegmentSum")}argMin(a,u){return st("argMin")}argMax(a,u){return st("argMax")}equal(a,u){return st("equal")}notEqual(a,u){return st("notEqual")}less(a,u){return st("less")}lessEqual(a,u){return st("lessEqual")}greater(a,u){return st("greater")}greaterEqual(a,u){return st("greaterEqual")}logicalNot(a){return st("logicalNot")}logicalAnd(a,u){return st("logicalAnd")}logicalOr(a,u){return st("logicalOr")}where(a){return st("where")}select(a,u,h){return st("select")}topk(a,u,h){return st("topk")}min(a,u){return st("min")}minimum(a,u){return st("minimum")}mod(a,u){return st("mod")}max(a,u){return st("max")}maximum(a,u){return st("maximum")}all(a,u){return st("all")}any(a,u){return st("any")}squaredDifference(a,u){return st("squaredDifference")}ceil(a){return st("ceil")}floor(a){return st("floor")}round(a){return st("round")}sign(a){return st("sign")}isNaN(a){return st("isNaN")}isInf(a){return st("isInf")}isFinite(a){return st("isFinite")}pow(a,u){return st("pow")}exp(a){return st("exp")}expm1(a){return st("expm1")}softmax(a,u){return st("softmax")}log(a){return st("log")}log1p(a){return st("log1p")}sqrt(a){return st("sqrt")}rsqrt(a){return st("rsqrt")}square(a){return st("square")}reciprocal(a){return st("reciprocal")}relu(a){return st("relu")}relu6(a){return st("relu6")}prelu(a,u){return st("prelu")}elu(a){return st("elu")}eluDer(a,u){return st("eluDer")}selu(a){return st("selu")}int(a){return st("int")}clip(a,u,h){return st("clip")}abs(a){return st("abs")}complexAbs(a){return st("complexAbs")}sigmoid(a){return st("sigmoid")}softplus(a){return st("softplus")}sin(a){return st("sin")}cos(a){return st("cos")}tan(a){return st("tan")}asin(a){return st("asin")}acos(a){return st("acos")}atan(a){return st("atan")}atan2(a,u){return st("atan2")}sinh(a){return st("sinh")}cosh(a){return st("cosh")}tanh(a){return st("tanh")}asinh(a){return st("asinh")}acosh(a){return st("acosh")}atanh(a){return st("atanh")}erf(a){return st("erf")}step(a,u){return st("step")}fusedConv2d({input:a,filter:u,convInfo:h,bias:d,activation:g,preluActivationWeights:x}){return st("fusedConv2d")}conv2d(a,u,h){return st("conv2d")}conv2dDerInput(a,u,h){return st("conv2dDerInput")}conv2dDerFilter(a,u,h){return st("conv2dDerFilter")}fusedDepthwiseConv2D({input:a,filter:u,convInfo:h,bias:d,activation:g,preluActivationWeights:x}){return st("fusedDepthwiseConv2D")}depthwiseConv2D(a,u,h){return st("depthwiseConv2D")}depthwiseConv2DDerInput(a,u,h){return st("depthwiseConv2DDerInput")}depthwiseConv2DDerFilter(a,u,h){return st("depthwiseConv2DDerFilter")}conv3d(a,u,h){return st("conv3d")}conv3dDerInput(a,u,h){return st("conv3dDerInput")}conv3dDerFilter(a,u,h){return st("conv3dDerFilter")}maxPool(a,u){return st("maxPool")}maxPoolBackprop(a,u,h,d){return st("maxPoolBackprop")}avgPool(a,u){return st("avgPool")}avgPoolBackprop(a,u,h){return st("avgPoolBackprop")}avgPool3d(a,u){return st("avgPool3d")}avgPool3dBackprop(a,u,h){return st("avgPool3dBackprop")}maxPool3d(a,u){return st("maxPool3d")}maxPool3dBackprop(a,u,h,d){return st("maxPool3dBackprop")}reshape(a,u){return st("reshape")}cast(a,u){return st("cast")}tile(a,u){return st("tile")}pad(a,u,h){return st("pad")}transpose(a,u){return st("transpose")}gather(a,u,h){return st("gather")}gatherND(a,u){return st("gatherND")}scatterND(a,u,h){return st("scatterND")}batchToSpaceND(a,u,h){return st("batchToSpaceND")}spaceToBatchND(a,u,h){return st("spaceToBatchND")}resizeBilinear(a,u,h,d){return st("resizeBilinear")}resizeBilinearBackprop(a,u,h){return st("resizeBilinearBackprop")}resizeNearestNeighbor(a,u,h,d){return st("resizeNearestNeighbor")}resizeNearestNeighborBackprop(a,u,h){return st("resizeNearestNeighborBackprop")}batchNorm(a,u,h,d,g,x){return st("batchNorm")}localResponseNormalization4D(a,u,h,d,g){return st("localResponseNormalization4D")}LRNGrad(a,u,h,d,g,x,w){return st("LRNGrad")}multinomial(a,u,h,d){return st("multinomial")}oneHot(a,u,h,d){return st("oneHot")}cumsum(a,u,h,d){return st("cumsum")}nonMaxSuppression(a,u,h,d,g){return st("nonMaxSuppression")}fft(a){return st("fft")}ifft(a){return st("ifft")}complex(a,u){return st("complex")}real(a){return st("real")}imag(a){return st("imag")}cropAndResize(a,u,h,d,g,x){return st("cropAndResize")}depthToSpace(a,u,h){return st("depthToSpace")}split(a,u,h){return st("split")}sparseToDense(a,u,h,d){return st("sparseToDense")}diag(a){return st("diag")}fill(a,u,h){return st("fill")}onesLike(a){return st("onesLike")}zerosLike(a){return st("zerosLike")}linspace(a,u,h){return st("linspace")}dispose(){return st("dispose")}};function st(i){throw new Error(`'${i}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`)}function mh(i,a,u){return Math.max(i,Math.min(a,u))}function U(i,a){if(!i)throw new Error(typeof a=="string"?a:a())}function ae(i,a,u=""){U(Fs(i,a),()=>u+` Shapes ${i} and ${a} must match`)}function wl(i){U(i!=null,()=>"The input to the tensor constructor must be a non-null value.")}function $a(i,a=[],u=!1){if(a==null&&(a=[]),Array.isArray(i)||Vr(i)&&!u)for(let h=0;h=0)u*=i[g];else if(i[g]===-1){if(h!==-1)throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${h} and dim ${g}`);h=g}else if(i[g]<0)throw Error(`Shapes can not be < 0. Found ${i[g]} at dim ${g}`);if(h===-1){if(a>0&&a!==u)throw Error(`Size(${a}) must match the product of shape ${i}`);return i}if(u===0)throw Error(`Cannot infer the missing size in [${i}] when there are 0 elements`);if(a%u!==0)throw Error(`The implicit shape can't be a fractional number. Got ${a} / ${u}`);let d=i.slice();return d[h]=a/u,d}function Zt(i,a){let u=a.length;return i=i==null?a.map((h,d)=>d):[].concat(i),U(i.every(h=>h>=-u&&h`All values in axis param must be in range [-${u}, ${u}) but got axis ${i}`),U(i.every(h=>Te(h)),()=>`All values in axis param must be integers but got axis ${i}`),i.map(h=>h<0?u+h:h)}function IE(i,a){let u=[],h=[],d=a!=null&&Array.isArray(a)&&a.length===0,g=a==null||d?null:Zt(a,i).sort(),x=0;for(let w=0;ww)&&i[w]===1&&(u.push(i[w]),h.push(w)),g[x]<=w&&x++}i[w]!==1&&(u.push(i[w]),h.push(w))}return{newShape:u,keptDims:h}}function EE(i,a){let u=null;if(i==null||i==="float32")u=new Float32Array(a);else if(i==="int32")u=new Int32Array(a);else if(i==="bool")u=new Uint8Array(a);else if(i==="string")u=new Array(a);else throw new Error(`Unknown data type ${i}`);return u}function DE(i,a){for(let u=0;ua+=u.length),a}function gh(i){return typeof i=="string"||i instanceof String}function Q9(i){return typeof i=="boolean"}function tQ(i){return typeof i=="number"}function yh(i){return Array.isArray(i)?yh(i[0]):i instanceof Float32Array?"float32":i instanceof Int32Array||i instanceof Uint8Array?"int32":tQ(i)?"float32":gh(i)?"string":Q9(i)?"bool":"float32"}function H1(i){return!!(i&&i.constructor&&i.call&&i.apply)}function bh(i){let a=i.length;if(a<2)return[];let u=new Array(a-1);u[a-2]=i[a-1];for(let h=a-3;h>=0;--h)u[h]=u[h+1]*i[h+1];return u}function PE(i,a,u){let h=new Array;if(a.length===1){let d=a[0];for(let g=0;gw*k);for(let w=0;wh*d);if(u===0)return[];if(u!==a.length)throw new Error(`[${i}] does not match the input size ${a.length}.`);return PE(0,i,a)}function eg(i,a){let u=Ia(i,a);for(let h=0;h{U(Number.isInteger(a)&&a>=0,()=>`Tensor must have a shape comprised of positive integers but got shape [${i}].`)})}function rg(i){return i&&i.then&&typeof i.then=="function"}var OE="tfjsflags",K1=class{constructor(a){this.global=a,this.flags={},this.flagRegistry={},this.urlFlags={},this.populateURLFlags()}setPlatform(a,u){this.platform!=null&&console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${u}.`),this.platformName=a,this.platform=u}registerFlag(a,u,h){if(this.flagRegistry[a]={evaluationFn:u,setHook:h},this.urlFlags[a]!=null){let d=this.urlFlags[a];console.warn(`Setting feature override from URL ${a}: ${d}.`),this.set(a,d)}}async getAsync(a){return a in this.flags?this.flags[a]:(this.flags[a]=await this.evaluateFlag(a),this.flags[a])}get(a){if(a in this.flags)return this.flags[a];let u=this.evaluateFlag(a);if(rg(u))throw new Error(`Flag ${a} cannot be synchronously evaluated. Please use getAsync() instead.`);return this.flags[a]=u,this.flags[a]}getNumber(a){return this.get(a)}getBool(a){return this.get(a)}getFlags(){return this.flags}get features(){return this.flags}set(a,u){if(this.flagRegistry[a]==null)throw new Error(`Cannot set flag ${a} as it has not been registered.`);this.flags[a]=u,this.flagRegistry[a].setHook!=null&&this.flagRegistry[a].setHook(u)}evaluateFlag(a){if(this.flagRegistry[a]==null)throw new Error(`Cannot evaluate flag '${a}': no evaluation function found.`);return this.flagRegistry[a].evaluationFn()}setFlags(a){this.flags=Object.assign({},a)}reset(){this.flags={},this.urlFlags={},this.populateURLFlags()}populateURLFlags(){if(typeof this.global=="undefined"||typeof this.global.location=="undefined"||typeof this.global.location.search=="undefined")return;let a=eQ(this.global.location.search);if(OE in a){let u=a[OE].split(",");u.forEach(h=>{let[d,g]=h.split(":");this.urlFlags[d]=nQ(d,g)})}}};function eQ(i){let a={};return i.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g,(u,...h)=>(rQ(a,h[0],h[1]),h.join("="))),a}function rQ(i,a,u){i[decodeURIComponent(a)]=decodeURIComponent(u||"")}function nQ(i,a){if(a=a.toLowerCase(),a==="true"||a==="false")return a==="true";if(`${+a}`===a)return+a;throw new Error(`Could not parse value flag value ${a} for flag ${i}.`)}function Me(){return LE}var LE=null;function ME(i){LE=i}var X1;function Y1(){if(X1==null){let i;if(typeof window!="undefined")i=window;else if(typeof global!="undefined")i=global;else if(typeof process!="undefined")i=process;else if(typeof self!="undefined")i=self;else throw new Error("Could not find a global object");X1=i}return X1}function oQ(){let i=Y1();return i._tfGlobals==null&&(i._tfGlobals=new Map),i._tfGlobals}function J1(i,a){let u=oQ();if(u.has(i))return u.get(i);{let h=a();return u.set(i,h),u.get(i)}}var og="Abs",sg="Acos",ig="Acosh",Tl="Add",BE="AddN",zE="All",WE="Any",ag="ArgMax",cg="ArgMin",lg="Asin",ug="Asinh",pg="Atan",hg="Atanh",fg="Atan2",dg="AvgPool",VE="AvgPoolBackprop",GE="AvgPool3D",UE="AvgPool3DBackprop",mg="BatchMatMul",gg="BatchToSpaceND",yg="BroadcastTo",kl="Cast",bg="Ceil",xg="ClipByValue",qE="Complex",wg="Concat",vg="Conv2D",HE="Conv2DBackpropFilter",Tg="Conv2DBackpropInput",jE="Conv3D",KE="Conv3DBackpropFilterV2",XE="Conv3DBackpropInputV2",kg="Cos",Ng="Cosh",_g="Cumsum",YE="CropAndResize",JE="DepthToSpace",Cg="DepthwiseConv2dNative",ZE="DepthwiseConv2dNativeBackpropFilter",QE="DepthwiseConv2dNativeBackpropInput",Sg="Dilation2D",t2="Dilation2DBackpropInput",e2="Dilation2DBackpropFilter",$g="Div",Ig="Elu",n2="EluGrad",Eg="Erf",r2="Equal",Dg="Exp",Ag="Expm1",o2="FFT",s2="Fill",i2="FlipLeftRight",Fg="Floor",Rg="FloorDiv",Pg="FusedBatchNorm",Og="GatherV2",a2="Greater",Lg="GreaterEqual",Mg="Identity",c2="IFFT",l2="Imag",Bg="IsFinite",zg="IsInf",Wg="IsNan",u2="Less",p2="LessEqual",Vg="Log",Gg="Log1p",h2="LogicalAnd",f2="LogicalNot",d2="LogicalOr",Ug="LogSoftmax",qg="LRN",m2="LRNBackprop",Hg="Max",jg="Maximum",Kg="MaxPool",g2="MaxPoolBackprop",y2="MaxPool3D",b2="MaxPool3DBackprop",x2="Mean",Xg="Min",Yg="Minimum",Jg="MirrorPad",Zg="Mod",Qg="Multiply",ty="Negate",w2="NotEqual",v2="NonMaxSuppressionV3",T2="NonMaxSuppressionV4",k2="NonMaxSuppressionV5",ey="OnesLike",ny="OneHot",ry="PadV2",oy="Pow",sy="Prelu",N2="Prod",_2="Range",C2="Real",iy="Reciprocal",ay="Relu",cy="Reshape",ly="ResizeNearestNeighbor",S2="ResizeNearestNeighborGrad",uy="ResizeBilinear",$2="ResizeBilinearGrad",py="Relu6",hy="Reverse",fy="Round",dy="Rsqrt",my="SelectV2",gy="Selu",yy="Slice",by="Sin",xy="Sinh",wy="Sign",vy="Sigmoid",Ty="Softplus",ky="Sqrt",Ny="Sum",_y="SpaceToBatchND",Cy="SplitV",Sy="Softmax",$y="SquaredDifference",I2="Square",Iy="Sub",E2="StridedSlice",Ey="Tan",Dy="Tanh",Ay="Tile",D2="TopK",Fy="Transpose",A2="Unique",Ry="Unpack",Py="UnsortedSegmentSum",Oy="ZerosLike",Ly="Step",Z1="FromPixels",F2="RotateWithOffset";var R2=J1("kernelRegistry",()=>new Map),Q1=J1("gradRegistry",()=>new Map);function My(i,a){let u=sQ(i,a);return R2.get(u)}function tT(i){return Q1.get(i)}function eT(i){let a=R2.entries(),u=[];for(;;){let{done:h,value:d}=a.next();if(h)break;let[g,x]=d,[w]=g.split("_");w===i&&u.push(x)}return u}function P2(i){let{kernelName:a}=i;Q1.has(a)&&(Me().getBool("DEBUG")&&console.warn(`Overriding the gradient for '${a}'`)),Q1.set(a,i)}function sQ(i,a){return`${a}_${i}`}function iQ(i,a){return i instanceof Float32Array&&a==="float32"||i instanceof Int32Array&&a==="int32"||i instanceof Uint8Array&&a==="bool"}function By(i,a){if(a==="string")throw new Error("Cannot convert a string[] to a TypedArray");if(Array.isArray(i)&&(i=$a(i)),Me().getBool("DEBUG")&&DE(i,a),iQ(i,a))return i;if(a==null||a==="float32"||a==="complex64")return new Float32Array(i);if(a==="int32")return new Int32Array(i);if(a==="bool"){let u=new Uint8Array(i.length);for(let h=0;h{d=h()},x=this.backendTimer.time(g);for(let k=0;k{aQ($,C.dtype,a)})}let w={kernelName:a,outputs:d,inputs:u,timeMs:x.then(k=>k.kernelMs),extraInfo:x.then(k=>k.getExtraProfileInfo!=null?k.getExtraProfileInfo():"")};return w}logKernelProfile(a){let{kernelName:u,outputs:h,timeMs:d,inputs:g,extraInfo:x}=a;h.forEach(w=>{Promise.all([w.data(),d,x]).then(k=>{this.logger.logKernelProfile(u,w,k[0],k[1],g,k[2])})})}};function aQ(i,a,u){if(a!=="float32")return!1;for(let h=0;h0?tt:""} `}}console.log(`%c${k} %c${w} %c${C}D ${F} %c${$} %c${_} %c${x}`,"font-weight:bold","color:red","color:blue","color: orange","color: green","color: steelblue")}};function M2(i,a,u){let h={},d={};for(let k=0;kh[tt.id]=!0),W=!0,d[C.id]=!0;break}if(W)break}}let g={};g[u.id]=!0;let x={};for(let k=i.length-1;k>=0;k--){let C=i[k],$=C.inputs;for(let F=0;F=0;d--){let g=a[d],x=[];if(g.outputs.forEach(k=>{let C=i[k.id];C!=null?x.push(C):x.push(null)}),g.gradient==null)throw new Error(`Cannot compute gradient: gradient function not found for ${g.kernelName}.`);let w=g.gradient(x);for(let k in g.inputs){if(!(k in w))throw new Error(`Cannot backprop through input ${k}. Available gradients found: ${Object.keys(w)}.`);let C=u(()=>w[k]());if(C.dtype!=="float32")throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input ${k} must have 'float32' dtype, but has '${C.dtype}'`);let $=g.inputs[k];if(!Fs(C.shape,$.shape))throw new Error(`Error in gradient for op ${g.kernelName}. The gradient of input '${k}' has shape '${C.shape}', which does not match the shape of the input '${$.shape}'`);if(i[$.id]==null)i[$.id]=C;else{let F=i[$.id];i[$.id]=h(F,C),F.dispose()}}}}var z2=20,xh=3,sT=7;function W2(i,a,u,h){let d=bh(a),g=cQ(i,a,u,d),x=a.length,w=zy(i,a,u,d,g),k=["Tensor"];return h&&(k.push(` dtype: ${u}`),k.push(` rank: ${x}`),k.push(` shape: [${a}]`),k.push(" values:")),k.push(w.map(C=>" "+C).join(` +`)),k.join(` +`)}function cQ(i,a,u,h){let d=an(a),g=h[h.length-1],x=new Array(g).fill(0),w=a.length,k=u==="complex64"?vh(i):i;if(w>1)for(let C=0;Cz2){let G=xh*x,mt=Array.from(i.slice(0,G)),lt=Array.from(i.slice((w-xh)*x,w*x));return u==="complex64"&&(mt=vh(mt),lt=vh(lt)),["["+mt.map((gt,_t)=>wh(gt,d[_t],u)).join(", ")+", ..., "+lt.map((gt,_t)=>wh(gt,d[w-xh+_t],u)).join(", ")+"]"]}let tt=u==="complex64"?vh(i):Array.from(i);return["["+tt.map((G,mt)=>wh(G,d[mt],u)).join(", ")+"]"]}let C=a.slice(1),$=h.slice(1),F=h[0]*x,_=[];if(w>z2){for(let tt=0;tt`Length of values '${d}' does not match the size inferred by the shape '${this.size}'.`)}if(u==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=h||EE(u,this.size),this.strides=bh(a)}set(a,...u){u.length===0&&(u=[0]),U(u.length===this.rank,()=>`The number of provided coordinates (${u.length}) must match the rank (${this.rank})`);let h=this.locToIndex(u);this.values[h]=a}get(...a){a.length===0&&(a=[0]);let u=0;for(let d of a){if(d<0||d>=this.shape[u]){let g=`Requested out of range element at ${a}. Buffer shape=${this.shape}`;throw new Error(g)}u++}let h=a[a.length-1];for(let d=0;drT(h))}catch(h){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return a}dataSync(){this.throwIfDisposed();let a=Qo().readSync(this.dataId);if(this.dtype==="string")try{return a.map(u=>rT(u))}catch(u){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return a}async bytes(){this.throwIfDisposed();let a=await Qo().read(this.dataId);return this.dtype==="string"?a:new Uint8Array(a.buffer)}dispose(){if(this.isDisposed)return;Qo().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(a=!1){return Nl.print(this,a)}clone(){return this.throwIfDisposed(),Nl.clone(this)}toString(a=!1){let u=this.dataSync();return W2(u,this.shape,this.dtype,a)}cast(a){return this.throwIfDisposed(),Nl.cast(this,a)}variable(a=!0,u,h){return this.throwIfDisposed(),Qo().makeVariable(this,a,u,h)}};Object.defineProperty(z,Symbol.hasInstance,{value:i=>!!i&&i.data!=null&&i.dataSync!=null&&i.throwIfDisposed!=null});var Th=class extends z{constructor(a,u,h,d){super(a.shape,a.dtype,a.dataId,d);this.trainable=u,this.name=h}assign(a){if(a.dtype!==this.dtype)throw new Error(`dtype of the new value (${a.dtype}) and previous value (${this.dtype}) must match`);if(!Fs(a.shape,this.shape))throw new Error(`shape of the new value (${a.shape}) and previous value (${this.shape}) must match`);Qo().disposeTensor(this),this.dataId=a.dataId,Qo().incRef(this,null)}dispose(){Qo().disposeVariable(this),this.isDisposedInternal=!0}};Object.defineProperty(Th,Symbol.hasInstance,{value:i=>i instanceof z&&i.assign!=null&&i.assign instanceof Function});var H2;(function(i){i.R0="R0",i.R1="R1",i.R2="R2",i.R3="R3",i.R4="R4",i.R5="R5",i.R6="R6"})(H2||(H2={}));var aT;(function(i){i.float32="float32",i.int32="int32",i.bool="int32",i.complex64="complex64"})(aT||(aT={}));var cT;(function(i){i.float32="float32",i.int32="int32",i.bool="bool",i.complex64="complex64"})(cT||(cT={}));var lT;(function(i){i.float32="float32",i.int32="float32",i.bool="float32",i.complex64="complex64"})(lT||(lT={}));var uT;(function(i){i.float32="complex64",i.int32="complex64",i.bool="complex64",i.complex64="complex64"})(uT||(uT={}));var uQ={float32:lT,int32:aT,bool:cT,complex64:uT};function j2(i,a){if(i==="string"||a==="string"){if(i==="string"&&a==="string")return"string";throw new Error(`Can not upcast ${i} with ${a}`)}return uQ[i][a]}function he(i,a){if(i.dtype===a.dtype)return[i,a];let u=j2(i.dtype,a.dtype);return[i.cast(u),a.cast(u)]}function Wy(i){let a=[],u=new Set;return K2(i,a,u),a}function K2(i,a,u){if(i==null)return;if(i instanceof z){a.push(i);return}if(!pQ(i))return;let h=i;for(let d in h){let g=h[d];u.has(g)||(u.add(g),K2(g,a,u))}}function pQ(i){return Array.isArray(i)||typeof i=="object"}var pT=class{constructor(){this.registeredVariables={},this.nextTapeNodeId=0,this.numBytes=0,this.numTensors=0,this.numStringTensors=0,this.numDataBuffers=0,this.gradientDepth=0,this.kernelDepth=0,this.scopeStack=[],this.numDataMovesStack=[],this.nextScopeId=0,this.tensorInfo=new WeakMap,this.profiling=!1,this.activeProfile={newBytes:0,newTensors:0,peakBytes:0,kernels:[],result:null}}dispose(){for(let a in this.registeredVariables)this.registeredVariables[a].dispose()}},Ea=class{constructor(a){this.ENV=a,this.registry={},this.registryFactory={},this.pendingBackendInitId=0,this.state=new pT}async ready(){if(this.pendingBackendInit!=null)return this.pendingBackendInit.then(()=>{});if(this.backendInstance!=null)return;let a=this.getSortedBackends();for(let u=0;u{u.setupFunc!=null&&u.setupFunc(this.backendInstance)})}disposeRegisteredKernels(a){let u=eT(a);u.forEach(h=>{h.disposeFunc!=null&&h.disposeFunc(this.registry[a])})}initializeBackend(a){let u=this.registryFactory[a];if(u==null)throw new Error(`Cannot initialize backend ${a}, no registration found.`);try{let h=u.factory();if(h&&!(h instanceof q1)&&typeof h.then=="function"){let d=++this.pendingBackendInitId,g=h.then(x=>d(dthis.registryFactory[u].priority-this.registryFactory[a].priority)}initializeBackendsAndReturnBest(){let a=this.getSortedBackends();for(let u=0;uthis.startScope(h),()=>this.endScope(d),()=>(d=u(),d instanceof Promise&&console.error("Cannot return a Promise inside of tidy."),d))}scopedRun(a,u,h){a();try{let d=h();return u(),d}catch(d){throw u(),d}}nextTensorId(){return Ea.nextTensorId++}nextVariableId(){return Ea.nextVariableId++}clone(a){let u=this.makeTensorFromDataId(a.dataId,a.shape,a.dtype),h={x:a},d=x=>({x:()=>{let w="float32",k={x},C={dtype:w};return V.runKernelFunc($=>$.cast(x,w),k,null,kl,C)}}),g=[];return this.addTapeNode(this.state.activeScope.name,h,[u],d,g,{}),u}runKernel(a,u,h,d,g){let x=null,w=null;return this.runKernelFunc(x,u,w,a,h,d,g)}shouldCheckForMemLeaks(){return this.ENV.getBool("IS_TEST")}checkKernelForMemLeak(a,u,h){let d=this.backend.numDataIds(),g=0;h.forEach(k=>{g+=k.dtype==="complex64"?3:1});let x=this.state.numDataMovesStack[this.state.numDataMovesStack.length-1],w=d-u-g-x;if(w>0)throw new Error(`Backend '${this.backendName}' has an internal memory leak (${w} data ids) after running '${a}'`)}runKernelFunc(a,u,h,d,g,x,w){let k,C=[],$=this.isTapeOn();d==null&&(d=this.state.activeScope!=null?this.state.activeScope.name:"");let F=this.state.numBytes,_=this.state.numTensors;this.shouldCheckForMemLeaks()&&this.state.numDataMovesStack.push(0);let W,et=My(d,this.backendName),tt;if(et!=null)W=()=>{let mt=this.backend.numDataIds();tt=et.kernelFunc({inputs:u,attrs:g,backend:this.backend});let lt=Array.isArray(tt)?tt:[tt];this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(d,mt,lt);let gt=lt.map(({dataId:_t,shape:Gt,dtype:se})=>this.makeTensorFromDataId(_t,Gt,se));if($){let _t=this.getTensorsForGradient(d,u,gt);if(_t==null){w==null&&(w=[]);let Gt=gt.filter((se,fe)=>w[fe]);_t=(x||[]).slice().concat(Gt)}C=this.saveTensorsForBackwardMode(_t)}return gt};else{let mt=lt=>{if(!$)return;C=lt.map(gt=>this.keep(this.clone(gt)))};W=()=>{let lt=this.backend.numDataIds();tt=this.tidy(()=>a(this.backend,mt));let gt=Array.isArray(tt)?tt:[tt];return this.shouldCheckForMemLeaks()&&this.checkKernelForMemLeak(d,lt,gt),gt}}let G;return this.scopedRun(()=>this.state.kernelDepth++,()=>this.state.kernelDepth--,()=>{!this.ENV.getBool("DEBUG")&&!this.state.profiling?k=W():(G=this.profiler.profileKernel(d,u,()=>W()),this.ENV.getBool("DEBUG")&&this.profiler.logKernelProfile(G),k=G.outputs)}),$&&this.addTapeNode(d,u,k,h,C,g),this.state.profiling&&this.state.activeProfile.kernels.push({name:d,bytesAdded:this.state.numBytes-F,totalBytesSnapshot:this.state.numBytes,tensorsAdded:this.state.numTensors-_,totalTensorsSnapshot:this.state.numTensors,inputShapes:Object.keys(u).map(mt=>u[mt]!=null?u[mt].shape:null),outputShapes:k.map(mt=>mt.shape),kernelTimeMs:G.timeMs,extraInfo:G.extraInfo}),Array.isArray(tt)?k:k[0]}saveTensorsForBackwardMode(a){let u=a.map(h=>this.keep(this.clone(h)));return u}getTensorsForGradient(a,u,h){let d=tT(a);if(d!=null){let g=d.inputsToSave||[],x=d.outputsToSave||[],w;d.saveAllInputs?(U(Array.isArray(u),()=>"saveAllInputs is true, expected inputs to be an array."),w=Object.keys(u).map(C=>u[C])):w=g.map(C=>u[C]);let k=h.filter((C,$)=>x[$]);return w.concat(k)}return null}makeTensor(a,u,h,d){if(a==null)throw new Error("Values passed to engine.makeTensor() are null");h=h||"float32",d=d||this.backend;let g=a;h==="string"&&gh(a[0])&&(g=a.map(k=>O2(k)));let x=d.write(g,u,h),w=new z(u,h,x,this.nextTensorId());if(this.incRef(w,d),h==="string"){let k=this.state.tensorInfo.get(x),C=RE(g);this.state.numBytes+=C-k.bytes,k.bytes=C}return w}makeTensorFromDataId(a,u,h,d){h=h||"float32";let g=new z(u,h,a,this.nextTensorId());return this.incRef(g,d),g}makeVariable(a,u=!0,h,d){h=h||this.nextVariableId().toString(),d!=null&&d!==a.dtype&&(a=a.cast(d));let g=new Th(a,u,h,this.nextTensorId());if(this.state.registeredVariables[g.name]!=null)throw new Error(`Variable with name ${g.name} was already registered`);return this.state.registeredVariables[g.name]=g,this.incRef(g,this.backend),g}incRef(a,u){let h=this.state.tensorInfo.has(a.dataId)?this.state.tensorInfo.get(a.dataId).refCount:0;if(this.state.numTensors++,a.dtype==="string"&&this.state.numStringTensors++,h===0){this.state.numDataBuffers++;let d=0;a.dtype!=="complex64"&&a.dtype!=="string"&&(d=a.size*FE(a.dtype)),this.state.tensorInfo.set(a.dataId,{backend:u||this.backend,dtype:a.dtype,shape:a.shape,bytes:d,refCount:0}),this.state.numBytes+=d}this.state.tensorInfo.get(a.dataId).refCount++,a instanceof Th||this.track(a)}disposeTensor(a){if(!this.state.tensorInfo.has(a.dataId))return;this.state.numTensors--,a.dtype==="string"&&this.state.numStringTensors--;let u=this.state.tensorInfo.get(a.dataId),h=u.refCount;h<=1?(a.dtype!=="complex64"&&(this.state.numBytes-=u.bytes),this.state.numDataBuffers--,u.backend.disposeData(a.dataId),this.state.tensorInfo.delete(a.dataId)):this.state.tensorInfo.get(a.dataId).refCount--}disposeVariables(){for(let a in this.state.registeredVariables){let u=this.state.registeredVariables[a];this.disposeVariable(u)}}disposeVariable(a){this.disposeTensor(a),this.state.registeredVariables[a.name]!=null&&delete this.state.registeredVariables[a.name]}memory(){let a=this.backend.memory();return a.numTensors=this.state.numTensors,a.numDataBuffers=this.state.numDataBuffers,a.numBytes=this.state.numBytes,this.state.numStringTensors>0&&(a.unreliable=!0,a.reasons==null&&(a.reasons=[]),a.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")),a}async profile(a){this.state.profiling=!0;let u=this.state.numBytes,h=this.state.numTensors;this.state.activeProfile.kernels=[],this.state.activeProfile.result=await a(),this.state.profiling=!1,this.state.activeProfile.peakBytes=Math.max(...this.state.activeProfile.kernels.map(d=>d.totalBytesSnapshot)),this.state.activeProfile.newBytes=this.state.numBytes-u,this.state.activeProfile.newTensors=this.state.numTensors-h;for(let d of this.state.activeProfile.kernels)d.kernelTimeMs=await d.kernelTimeMs,d.extraInfo=await d.extraInfo;return this.state.activeProfile}isTapeOn(){return this.state.gradientDepth>0&&this.state.kernelDepth===0}addTapeNode(a,u,h,d,g,x){let w={id:this.state.nextTapeNodeId++,kernelName:a,inputs:u,outputs:h,saved:g},k=tT(a);k!=null&&(d=k.gradFunc),d!=null&&(w.gradient=C=>(C=C.map(($,F)=>{if($==null){let _=h[F],W=Ia(_.size,_.dtype);return this.makeTensor(W,_.shape,_.dtype)}return $}),d(C.length>1?C:C[0],g,x))),this.state.activeTape.push(w)}keep(a){return a.kept=!0,a}startTape(){this.state.gradientDepth===0&&(this.state.activeTape=[]),this.state.gradientDepth++}endTape(){this.state.gradientDepth--}startScope(a){let u={track:[],name:"unnamed scope",id:this.state.nextScopeId++};a&&(u.name=a),this.state.scopeStack.push(u),this.state.activeScope=u}endScope(a){let u=Wy(a),h=new Set(u.map(g=>g.id));for(let g=0;g{!g.kept&&g.scopeId===d.id&&this.track(g)})}gradients(a,u,h,d=!1){if(U(u.length>0,()=>"gradients() received an empty list of xs."),h!=null&&h.dtype!=="float32")throw new Error(`dy must have 'float32' dtype, but has '${h.dtype}'`);let g=this.scopedRun(()=>this.startTape(),()=>this.endTape(),()=>this.tidy("forward",a));U(g instanceof z,()=>"The result y returned by f() must be a tensor.");let x=M2(this.state.activeTape,u,g);if(!d&&x.length===0&&u.length>0)throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.");return this.tidy("backward",()=>{let w={};w[g.id]=h==null?hQ(g.shape):h,B2(w,x,C=>this.tidy(C),fQ);let k=u.map(C=>w[C.id]);return this.state.gradientDepth===0&&(this.state.activeTape.forEach(C=>{for(let $ of C.saved)$.dispose()}),this.state.activeTape=null),{value:g,grads:k}})}customGrad(a){return U(H1(a),()=>"The f passed in customGrad(f) must be a function."),(...u)=>{U(u.every(g=>g instanceof z),()=>"The args passed in customGrad(f)(x1, x2,...) must all be tensors");let h,d={};return u.forEach((g,x)=>{d[x]=g}),this.runKernelFunc((g,x)=>(h=a(...u,x),U(h.value instanceof z,()=>"The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"),U(H1(h.gradFunc),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."),h.value),d,(g,x)=>{let w=h.gradFunc(g,x),k=Array.isArray(w)?w:[w];U(k.length===u.length,()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."),U(k.every($=>$ instanceof z),()=>"The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors.");let C={};return k.forEach(($,F)=>{C[F]=()=>$}),C})}}readSync(a){let u=this.state.tensorInfo.get(a);return u.backend.readSync(a)}read(a){let u=this.state.tensorInfo.get(a);return u.backend.read(a)}async time(a){let u=nT(),h=await this.backend.time(a);return h.wallMs=nT()-u,h}track(a){return this.state.activeScope!=null&&(a.scopeId=this.state.activeScope.id,this.state.activeScope.track.push(a)),a}get registeredVariables(){return this.state.registeredVariables}reset(){this.pendingBackendInitId++,this.state.dispose(),this.ENV.reset(),this.state=new pT;for(let a in this.registry)this.disposeRegisteredKernels(a),this.registry[a].dispose(),delete this.registry[a];this.backendName=null,this.backendInstance=null,this.pendingBackendInit=null}};Ea.nextTensorId=0;Ea.nextVariableId=0;function hQ(i){let a=eg(an(i),"float32");return V.makeTensor(a,i,"float32")}function hT(){let i=Y1();if(i._tfengine==null){let a=new K1(i);i._tfengine=new Ea(a)}return ME(i._tfengine.ENV),G2(()=>i._tfengine),i._tfengine}var V=hT();function fQ(i,a){let u={a:i,b:a};return V.runKernelFunc((h,d)=>{let g=h.add(i,a);return d([i,a]),g},u,null,Tl)}function X2(){return typeof window!="undefined"&&window.document!=null||typeof WorkerGlobalScope!="undefined"}var Rs=Me();Rs.registerFlag("DEBUG",()=>!1,i=>{i&&console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance.")});Rs.registerFlag("IS_BROWSER",()=>X2());Rs.registerFlag("IS_NODE",()=>typeof process!="undefined"&&typeof process.versions!="undefined"&&typeof process.versions.node!="undefined");Rs.registerFlag("IS_CHROME",()=>typeof navigator!="undefined"&&navigator!=null&&navigator.userAgent!=null&&/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor));Rs.registerFlag("PROD",()=>!1);Rs.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY",()=>Rs.getBool("DEBUG"));Rs.registerFlag("DEPRECATION_WARNINGS_ENABLED",()=>!0);Rs.registerFlag("IS_TEST",()=>!1);function Ps(i,a){let u=i;if(Vr(i))return a==="string"?[]:[i.length];if(!Array.isArray(i))return[];let h=[];for(;Array.isArray(u)||Vr(u)&&a!=="string";)h.push(u.length),u=u[0];return Array.isArray(i)&&Me().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")&&Y2(i,h,[]),h}function Y2(i,a,u){if(u=u||[],!Array.isArray(i)&&!Vr(i)){U(a.length===0,()=>`Element arr[${u.join("][")}] is a primitive, but should be an array/TypedArray of ${a[0]} elements`);return}U(a.length>0,()=>`Element arr[${u.join("][")}] should be a primitive, but is an array of ${i.length} elements`),U(i.length===a[0],()=>`Element arr[${u.join("][")}] should have ${a[0]} elements, but has ${i.length} elements`);let h=a.slice(1);for(let d=0;d=0&&(d=h),J2(h,d,a,u),i==null||!Vr(i)&&!Array.isArray(i)&&typeof i!="number"&&typeof i!="boolean"&&typeof i!="string"){let k=i==null?"null":i.constructor.name;throw new Error(`Argument '${a}' passed to '${u}' must be a Tensor or TensorLike, but got '${k}'`)}let g=Ps(i,d);!Vr(i)&&!Array.isArray(i)&&(i=[i]);let x=!0,w=d!=="string"?By(i,d):$a(i,[],x);return V.makeTensor(w,g,d)}function Vy(i,a,u,h="numeric"){if(!Array.isArray(i))throw new Error(`Argument ${a} passed to ${u} must be a \`Tensor[]\` or \`TensorLike[]\``);let d=i;return d.map((g,x)=>R(g,`${a}[${x}]`,u),h)}var Z2="__op";function O(i){let a=Object.keys(i);if(a.length!==1)throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${a.length} keys.`);let u=a[0],h=i[u];u.endsWith("_")&&(u=u.substring(0,u.length-1)),u=u+Z2;let d=(...g)=>{V.startScope(u);try{let x=h(...g);return rg(x)&&console.error("Cannot return a Promise inside of tidy."),V.endScope(x),x}catch(x){throw V.endScope(null),x}};return Object.defineProperty(d,"name",{value:u,configurable:!0}),d}function dQ(i,a){let u=R(i,"real","complex"),h=R(a,"imag","complex");ae(u.shape,h.shape,`real and imag shapes, ${u.shape} and ${h.shape}, must match in call to tf.complex().`);let d=x=>x.complex(u,h),g={real:u,imag:h};return V.runKernelFunc(d,g,null,qE)}var no=O({complex_:dQ});function ts(i,a,u,h){if(h==null&&(h=yh(i)),h==="complex64")throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).");if(!Vr(i)&&!Array.isArray(i)&&typeof i!="number"&&typeof i!="boolean"&&typeof i!="string")throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray");if(a!=null){ng(a);let d=an(a),g=an(u);U(d===g,()=>`Based on the provided shape, [${a}], the tensor should have ${d} values but has ${g}`);for(let x=0;x`Error creating a new Tensor. Inferred shape (${u}) does not match the provided shape (${a}). `)}}return!Vr(i)&&!Array.isArray(i)&&(i=[i]),a=a||u,i=h!=="string"?By(i,h):$a(i,[],!0),V.makeTensor(i,a,h)}function fT(i,a,u){let h=Ps(i,u);return ts(i,a,h,u)}var dT=typeof Buffer!="undefined"&&(typeof Blob=="undefined"||typeof atob=="undefined"||typeof btoa=="undefined");function Q2(i){return dT?Buffer.byteLength(i):new Blob([i]).size}function tD(i){if(dT)return Buffer.from(i).toString("base64");let a=new Uint8Array(i),u="";for(let h=0,d=a.length;h{let w=x(a,h);w!==null&&d.push(w)}),d}};var mT="tensorflowjs",gT=1,Da="models_store",Ei="model_info_store";function nD(){if(!Me().getBool("IS_BROWSER"))throw new Error("Failed to obtain IndexedDB factory because the current environmentis not a web browser.");let i=typeof window=="undefined"?self:window,a=i.indexedDB||i.mozIndexedDB||i.webkitIndexedDB||i.msIndexedDB||i.shimIndexedDB;if(a==null)throw new Error("The current browser does not appear to support IndexedDB.");return a}function yT(i){let a=i.result;a.createObjectStore(Da,{keyPath:"modelPath"}),a.createObjectStore(Ei,{keyPath:"modelPath"})}var Os=class{constructor(a){if(this.indexedDB=nD(),a==null||!a)throw new Error("For IndexedDB, modelPath must not be null, undefined or empty.");this.modelPath=a}async save(a){if(a.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");return this.databaseAction(this.modelPath,a)}async load(){return this.databaseAction(this.modelPath)}databaseAction(a,u){return new Promise((h,d)=>{let g=this.indexedDB.open(mT,gT);g.onupgradeneeded=()=>yT(g),g.onsuccess=()=>{let x=g.result;if(u==null){let w=x.transaction(Da,"readonly"),k=w.objectStore(Da),C=k.get(this.modelPath);C.onsuccess=()=>{if(C.result==null)return x.close(),d(new Error(`Cannot find model with path '${this.modelPath}' in IndexedDB.`));h(C.result.modelArtifacts)},C.onerror=$=>(x.close(),d(C.error)),w.oncomplete=()=>x.close()}else{let w=Gy(u),k=x.transaction(Ei,"readwrite"),C=k.objectStore(Ei),$=C.put({modelPath:this.modelPath,modelArtifactsInfo:w}),F;$.onsuccess=()=>{F=x.transaction(Da,"readwrite");let _=F.objectStore(Da),W=_.put({modelPath:this.modelPath,modelArtifacts:u,modelArtifactsInfo:w});W.onsuccess=()=>h({modelArtifactsInfo:w}),W.onerror=et=>{C=k.objectStore(Ei);let tt=C.delete(this.modelPath);tt.onsuccess=()=>(x.close(),d(W.error)),tt.onerror=G=>(x.close(),d(W.error))}},$.onerror=_=>(x.close(),d($.error)),k.oncomplete=()=>{F==null?x.close():F.oncomplete=()=>x.close()}}},g.onerror=x=>d(g.error)})}};Os.URL_SCHEME="indexeddb://";var rD=i=>Me().getBool("IS_BROWSER")&&(!Array.isArray(i)&&i.startsWith(Os.URL_SCHEME))?mQ(i.slice(Os.URL_SCHEME.length)):null;An.registerSaveRouter(rD);An.registerLoadRouter(rD);function mQ(i){return new Os(i)}function gQ(i){return i.startsWith(Os.URL_SCHEME)?i.slice(Os.URL_SCHEME.length):i}var bT=class{constructor(){this.indexedDB=nD()}async listModels(){return new Promise((a,u)=>{let h=this.indexedDB.open(mT,gT);h.onupgradeneeded=()=>yT(h),h.onsuccess=()=>{let d=h.result,g=d.transaction(Ei,"readonly"),x=g.objectStore(Ei),w=x.getAll();w.onsuccess=()=>{let k={};for(let C of w.result)k[C.modelPath]=C.modelArtifactsInfo;a(k)},w.onerror=k=>(d.close(),u(w.error)),g.oncomplete=()=>d.close()},h.onerror=d=>u(h.error)})}async removeModel(a){return a=gQ(a),new Promise((u,h)=>{let d=this.indexedDB.open(mT,gT);d.onupgradeneeded=()=>yT(d),d.onsuccess=()=>{let g=d.result,x=g.transaction(Ei,"readwrite"),w=x.objectStore(Ei),k=w.get(a),C;k.onsuccess=()=>{if(k.result==null)return g.close(),h(new Error(`Cannot find model with path '${a}' in IndexedDB.`));{let $=w.delete(a),F=()=>{C=g.transaction(Da,"readwrite");let _=C.objectStore(Da),W=_.delete(a);W.onsuccess=()=>u(k.result.modelArtifactsInfo),W.onerror=et=>h(k.error)};$.onsuccess=F,$.onerror=_=>(F(),g.close(),h(k.error))}},k.onerror=$=>(g.close(),h(k.error)),x.oncomplete=()=>{C==null?g.close():C.oncomplete=()=>g.close()}},d.onerror=g=>h(d.error)})}};var Ls="/",_l="tensorflowjs_models",oD="info",yQ="model_topology",bQ="weight_specs",xQ="weight_data",wQ="model_metadata";function sD(i){return{info:[_l,i,oD].join(Ls),topology:[_l,i,yQ].join(Ls),weightSpecs:[_l,i,bQ].join(Ls),weightData:[_l,i,xQ].join(Ls),modelMetadata:[_l,i,wQ].join(Ls)}}function vQ(i){let a=i.split(Ls);if(a.length<3)throw new Error(`Invalid key format: ${i}`);return a.slice(1,a.length-1).join(Ls)}function TQ(i){return i.startsWith(Ms.URL_SCHEME)?i.slice(Ms.URL_SCHEME.length):i}var Ms=class{constructor(a){if(!Me().getBool("IS_BROWSER")||typeof window=="undefined"||typeof window.localStorage=="undefined")throw new Error("The current environment does not support local storage.");if(this.LS=window.localStorage,a==null||!a)throw new Error("For local storage, modelPath must not be null, undefined or empty.");this.modelPath=a,this.keys=sD(this.modelPath)}async save(a){if(a.modelTopology instanceof ArrayBuffer)throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet.");{let u=JSON.stringify(a.modelTopology),h=JSON.stringify(a.weightSpecs),d=Gy(a);try{return this.LS.setItem(this.keys.info,JSON.stringify(d)),this.LS.setItem(this.keys.topology,u),this.LS.setItem(this.keys.weightSpecs,h),this.LS.setItem(this.keys.weightData,tD(a.weightData)),this.LS.setItem(this.keys.modelMetadata,JSON.stringify({format:a.format,generatedBy:a.generatedBy,convertedBy:a.convertedBy,userDefinedMetadata:a.userDefinedMetadata})),{modelArtifactsInfo:d}}catch(g){throw this.LS.removeItem(this.keys.info),this.LS.removeItem(this.keys.topology),this.LS.removeItem(this.keys.weightSpecs),this.LS.removeItem(this.keys.weightData),this.LS.removeItem(this.keys.modelMetadata),new Error(`Failed to save model '${this.modelPath}' to local storage: size quota being exceeded is a possible cause of this failure: modelTopologyBytes=${d.modelTopologyBytes}, weightSpecsBytes=${d.weightSpecsBytes}, weightDataBytes=${d.weightDataBytes}.`)}}}async load(){let a=JSON.parse(this.LS.getItem(this.keys.info));if(a==null)throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);if(a.modelTopologyType!=="JSON")throw new Error("BrowserLocalStorage does not support loading non-JSON model topology yet.");let u={},h=JSON.parse(this.LS.getItem(this.keys.topology));if(h==null)throw new Error(`In local storage, the topology of model '${this.modelPath}' is missing.`);u.modelTopology=h;let d=JSON.parse(this.LS.getItem(this.keys.weightSpecs));if(d==null)throw new Error(`In local storage, the weight specs of model '${this.modelPath}' are missing.`);u.weightSpecs=d;let g=this.LS.getItem(this.keys.modelMetadata);if(g!=null){let w=JSON.parse(g);u.format=w.format,u.generatedBy=w.generatedBy,u.convertedBy=w.convertedBy,u.userDefinedMetadata=w.userDefinedMetadata}let x=this.LS.getItem(this.keys.weightData);if(x==null)throw new Error(`In local storage, the binary weight values of model '${this.modelPath}' are missing.`);return u.weightData=eD(x),u}};Ms.URL_SCHEME="localstorage://";var iD=i=>Me().getBool("IS_BROWSER")&&(!Array.isArray(i)&&i.startsWith(Ms.URL_SCHEME))?kQ(i.slice(Ms.URL_SCHEME.length)):null;An.registerSaveRouter(iD);An.registerLoadRouter(iD);function kQ(i){return new Ms(i)}var xT=class{constructor(){U(Me().getBool("IS_BROWSER"),()=>"Current environment is not a web browser"),U(typeof window=="undefined"||typeof window.localStorage!="undefined",()=>"Current browser does not appear to support localStorage"),this.LS=window.localStorage}async listModels(){let a={},u=_l+Ls,h=Ls+oD;for(let d=0;d"scheme must not be undefined or null."),a.endsWith(aD)&&(a=a.slice(0,a.indexOf(aD))),U(a.length>0,()=>"scheme must not be an empty string.");let h=es.getInstance();U(h.managers[a]==null,()=>`A model store manager is already registered for scheme '${a}'.`),h.managers[a]=u}static getManager(a){let u=this.getInstance().managers[a];if(u==null)throw new Error(`Cannot find model manager for scheme '${a}'`);return u}static getSchemes(){return Object.keys(this.getInstance().managers)}};var cD=class{fetch(a,u){return fetch(a,u)}now(){return performance.now()}encode(a,u){if(u!=="utf-8"&&u!=="utf8")throw new Error(`Browser's encoder only supports utf-8, but got ${u}`);return this.textEncoder==null&&(this.textEncoder=new TextEncoder),this.textEncoder.encode(a)}decode(a,u){return new TextDecoder(u).decode(a)}};if(Me().get("IS_BROWSER")){Me().setPlatform("browser",new cD);try{es.registerManager(Ms.URL_SCHEME,new xT)}catch(i){}try{es.registerManager(Os.URL_SCHEME,new bT)}catch(i){}}var NQ={importFetch:()=>lD()},wT,uD=class{constructor(){this.util=require("util"),this.textEncoder=new this.util.TextEncoder}fetch(a,u){return Me().global.fetch!=null?Me().global.fetch(a,u):(wT==null&&(wT=NQ.importFetch()),wT(a,u))}now(){let a=process.hrtime();return a[0]*1e3+a[1]/1e6}encode(a,u){if(u!=="utf-8"&&u!=="utf8")throw new Error(`Node built-in encoder only supports utf-8, but got ${u}`);return this.textEncoder.encode(a)}decode(a,u){return a.length===0?"":new this.util.TextDecoder(u).decode(a)}};Me().get("IS_NODE")&&Me().setPlatform("node",new uD);function kh(i,a="float32",u){return a=a||"float32",ng(i),new iT(i,a,u)}function _Q(i,a){let u=R(i,"x","cast");if(!AE(a))throw new Error(`Failed to cast to unknown dtype ${a}`);if(a==="string"&&u.dtype!=="string"||a!=="string"&&u.dtype==="string")throw new Error("Only strings can be casted to strings");let h={x:u},d={dtype:a};return V.runKernelFunc(g=>g.cast(u,a),h,null,kl,d)}var wt=O({cast_:_Q});function CQ(i){let a=R(i,"x","clone",null),u=()=>V.makeTensorFromDataId(a.dataId,a.shape,a.dtype),h={x:a};return V.runKernelFunc(u,h,null,Mg)}var Eo=O({clone_:CQ});function vT(i,a=!1){console.log(i.toString(a))}hT();var SQ={buffer:kh,cast:wt,clone:Eo,print:vT};U2(SQ);function $Q(i,a){let u=R(i,"x","reshape",null),h={x:u},d={shape:a},g=(x,w)=>(a=$E(a,u.size),U(u.size===an(a),()=>"new shape and old shape must have the same number of elements."),w([u]),x.reshape(u,a));return V.runKernelFunc(g,h,null,cy,d)}var K=O({reshape_:$Q});function IQ(i,a,u=!1,h=!1){let d=R(i,"a","matMul"),g=R(a,"b","matMul");[d,g]=he(d,g);let x=(C,$)=>{$([d,g]);let F=u?d.shape[d.rank-2]:d.shape[d.rank-1],_=h?g.shape[g.rank-1]:g.shape[g.rank-2],W=u?d.shape[d.rank-1]:d.shape[d.rank-2],et=h?g.shape[g.rank-2]:g.shape[g.rank-1],tt=d.shape.slice(0,-2),G=g.shape.slice(0,-2),mt=an(tt),lt=an(G),gt=mt===lt||mt===1||lt===1;U(d.rank>=2&&g.rank>=2&>,()=>`Error in matMul: the input batch dimensions must either be the same or at least one input batch dimension must be 1. Got input batch dimensions of (${tt}) and (${G}).`),U(F===_,()=>`Error in matMul: inner shapes (${F}) and (${_}) of Tensors with shapes ${d.shape} and ${g.shape} and transposeA=${u} and transposeB=${h} must match.`);let _t=mt>lt?tt:G,Gt=_t.concat([W,et]),se=u?K(d,[mt,F,W]):K(d,[mt,W,F]),fe=h?K(g,[lt,et,_]):K(g,[lt,_,et]),_e=C.batchMatMul(se,fe,u,h);return K(_e,Gt)},w={a:d,b:g},k={transposeA:u,transposeB:h};return V.runKernelFunc(x,w,null,mg,k)}var We=O({matMul_:IQ});function EQ(i,a,u=1,h=0){if(a<2)throw new Error(`Error in oneHot: depth must be >=2, but it is ${a}`);let d=R(i,"indices","oneHot","int32"),g=[...d.shape,a],x=(C,$)=>($([d]),K(C.oneHot(K(d,[d.size]),a,u,h),g)),w={indices:d},k={depth:a,onValue:u,offValue:h};return V.runKernelFunc(x,w,null,ny,k)}var TT=O({oneHot_:EQ});function DQ(i,a){let u=R(i,"x","transpose");if(a==null&&(a=u.shape.map((g,x)=>x).reverse()),U(u.rank===a.length,()=>`Error in transpose: rank of input ${u.rank} must match length of perm ${a}.`),a.forEach(g=>{U(g>=0&&g`All entries in 'perm' must be between 0 and ${u.rank-1} but got ${a}`)}),u.rank<=1)return u.clone();let h={x:u},d={perm:a};return V.runKernelFunc(g=>g.transpose(u,a),h,null,Fy,d)}var Ie=O({transpose_:DQ}),NT={};jm(NT,{fromPixels:()=>RQ,toPixels:()=>FQ});function kT(i,a,u){if(wl(i),a!=null&&a.length!==3)throw new Error("tensor3d() requires shape to have three numbers");let h=Ps(i,u);if(h.length!==3&&h.length!==1)throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray");if(h.length===1&&a==null)throw new Error("tensor3d() requires shape to be provided when `values` are a flat array");return ts(i,a,h,u)}var Cl;function AQ(i,a=3){if(a>4)throw new Error("Cannot construct Tensor with more than 4 channels from pixels.");if(i==null)throw new Error("pixels passed to tf.browser.fromPixels() can not be null");let u=!1,h=!1,d=!1,g=!1,x=!1;if(i.data instanceof Uint8Array)u=!0;else if(typeof ImageData!="undefined"&&i instanceof ImageData)h=!0;else if(typeof HTMLVideoElement!="undefined"&&i instanceof HTMLVideoElement)d=!0;else if(typeof HTMLImageElement!="undefined"&&i instanceof HTMLImageElement)g=!0;else if(i.getContext!=null)x=!0;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${i.constructor.name}`);if(d){let W=2;if(d&&i.readyState element.")}let w=My(Z1,V.backendName);if(w!=null){let W={pixels:i},et={numChannels:a};return V.runKernel(Z1,W,et)}let[k,C]=d?[i.videoWidth,i.videoHeight]:[i.width,i.height],$;x?$=i.getContext("2d").getImageData(0,0,k,C).data:h||u?$=i.data:(g||d)&&(Cl==null&&(Cl=document.createElement("canvas").getContext("2d")),Cl.canvas.width=k,Cl.canvas.height=C,Cl.drawImage(i,0,0,k,C),$=Cl.getImageData(0,0,k,C).data);let F;if(a===4)F=new Int32Array($);else{let W=k*C;F=new Int32Array(W*a);for(let et=0;et4||g===2)throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${g}`);if(u.dtype!=="float32"&&u.dtype!=="int32")throw new Error(`Unsupported type for toPixels: ${u.dtype}. Please use float32 or int32 tensors.`);let x=await u.data(),w=u.dtype==="float32"?255:1,k=new Uint8ClampedArray(d*h*4);for(let C=0;C1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${W}.`)}else if(u.dtype==="int32"&&(W<0||W>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${W}.`);g===1?($[0]=W*w,$[1]=W*w,$[2]=W*w):$[_]=W*w}let F=C*4;k[F+0]=Math.round($[0]),k[F+1]=Math.round($[1]),k[F+2]=Math.round($[2]),k[F+3]=Math.round($[3])}if(a!=null){a.width=d,a.height=h;let C=a.getContext("2d"),$=new ImageData(k,d,h);C.putImageData($,0,0)}return u!==i&&u.dispose(),k}var RQ=O({fromPixels_:AQ});function pD(i,a,u){let h=i.shape.length;U(h===a.length,()=>`Error in slice${h}D: Length of begin ${a} must match the rank of the array (${h}).`),U(h===u.length,()=>`Error in slice${h}D: Length of size ${u} must match the rank of the array (${h}).`);for(let d=0;d`Error in slice${h}D: begin[${d}] + size[${d}] (${a[d]+u[d]}) would overflow input.shape[${d}] (${i.shape[d]})`)}function Uy(i){let a=[],u=0;for(;i>0;)i&1&&a.push(u),i/=2,u++;return a}function hD(i,a,u){let h=[];for(let d=0;d0){let W=a[0],et=u+1;$=OQ(x,W,et,h,i),F=LQ(w,W,et,d,i),_=PQ(g,W,et,i)}else for(let W=0;W-1)g[w]=0;else{let k=fD(a,u,w),C=h[k];i&1<-1)g[w]=Number.MAX_SAFE_INTEGER;else{let k=fD(a,u,w),C=h[k];i&1<0?x=Number.MIN_SAFE_INTEGER:x=Number.MAX_SAFE_INTEGER);let k=h[d];return x<0&&(x+=k),x=mh(0,x,k-1),x}function zQ(i,a,u,h,d,g){let x=a[d],w=u[d]||1;(i&1<0?x=Number.MAX_SAFE_INTEGER:x=Number.MIN_SAFE_INTEGER);let k=h[d];return x<0&&(x+=k),w>0?x=mh(0,x,k):x=mh(-1,x,k-1),x}function qy(i,a,u){let h,d=i.shape.length;typeof a=="number"?h=[a,...new Array(d-1).fill(0)]:a.length{U(x!==-1,()=>"slice() does not support negative begin indexing.")});let g;return u==null?g=new Array(d).fill(-1):typeof u=="number"?g=[u,...new Array(d-1).fill(-1)]:u.lengthx>=0?x:(U(x===-1,()=>`Negative size values should be exactly -1 but got ${x} for the slice() size at index ${w}.`),i.shape[w]-h[w])),[h,g]}function kn(i){Me().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(i+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}q2(kn);function gD(i,a){return V.tidy(i,a)}function yD(i){let a=Wy(i);a.forEach(u=>u.dispose())}function WQ(i,a){let u=R(i,"a","add"),h=R(a,"b","add");[u,h]=he(u,h);let d=(x,w)=>{let k=x.add(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,Tl)}var ke=O({add_:WQ});function VQ(i,a){let u=R(i,"a","floorDiv"),h=R(a,"b","floorDiv");[u,h]=he(u,h);let d=(x,w)=>{let k=x.floorDiv(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,Rg)}var Nh=O({floorDiv_:VQ});function GQ(i,a){let u=R(i,"a","div"),h=R(a,"b","div");if([u,h]=he(u,h),u.dtype==="int32"&&h.dtype==="int32")return Nh(u,h);let d=(w,k)=>{let C=w.realDivide(u,h);return k([u,h]),C},g={a:u,b:h},x={};return V.runKernelFunc(d,g,null,$g,x)}var Wt=O({div_:GQ});function UQ(i,a){let u=R(i,"a","mul"),h=R(a,"b","mul");[u,h]=he(u,h);let d=(x,w)=>{let k=x.multiply(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,Qg)}var at=O({mul_:UQ});function qQ(i){let a=R(i,"x","abs"),u={x:a};return V.runKernelFunc((h,d)=>(d([a]),a.dtype==="complex64"?h.complexAbs(a):h.abs(a)),u,null,og)}var Bn=O({abs_:qQ});function HQ(i){let a=R(i,"x","acos"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.acos(a);return d([a]),g},u,null,sg)}var _T=O({acos_:HQ});function jQ(i){let a=R(i,"x","acosh"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.acosh(a);return d([a]),g},u,null,ig)}var CT=O({acosh_:jQ});function KQ(i,a){for(let u=0;ui[g]);return[u,d]}function dn(i,a){let u=a.map(h=>1);return XQ(i,u,a)}function Nn(i,a){if(KQ(i,a))return null;let u=[];for(let h=0;hu.push(h)),u}function Sl(i){return i.map((a,u)=>[u,a]).sort((a,u)=>a[1]-u[1]).map(a=>a[0])}function zn(i,a){let u=[];for(let h=a-i;h{let k=Zt(a,h.shape),C=k,$=Nn(C,h.rank);$!=null&&(h=Ie(h,$),C=zn(C.length,h.rank));let F=w.all(h,C);if(u){let _=dn(F.shape,k);return K(F,_)}return F},g={x:h},x={axis:a,keepDims:u};return V.runKernelFunc(d,g,null,zE,x)}var ST=O({all_:YQ});function JQ(i,a=null,u=!1){let h=R(i,"x","any","bool"),d=w=>{let k=Zt(a,h.shape),C=k,$=Nn(C,h.rank);$!=null&&(h=Ie(h,$),C=zn(C.length,h.rank));let F=w.any(h,C);if(u){let _=dn(F.shape,k);return K(F,_)}return F},g={x:h},x={axis:a,keepDims:u};return V.runKernelFunc(d,g,null,WE,x)}var $T=O({any_:JQ});function ZQ(i,a=0){let u=R(i,"x","argMax"),h=(x,w)=>{w([u]);let k=Zt(a,u.shape),C=Nn(k,u.rank);return C!=null&&(u=Ie(u,C),k=zn(k.length,u.rank)),x.argMax(u,k[0])},d={x:u},g={axis:a};return V.runKernelFunc(h,d,null,ag,g)}var IT=O({argMax_:ZQ});function QQ(i,a=0){let u=R(i,"x","argMin"),h=(x,w)=>{w([u]),a==null&&(a=0);let k=Zt(a,u.shape),C=Nn(k,u.rank);return C!=null&&(u=Ie(u,C),k=zn(k.length,u.rank)),x.argMin(u,k[0])},d={x:u},g={axis:a};return V.runKernelFunc(h,d,null,cg,g)}var ET=O({argMin_:QQ});function ttt(i){let a=R(i,"x","asin"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.asin(a);return d([a]),g},u,null,lg)}var DT=O({asin_:ttt});function ett(i){let a=R(i,"x","asinh"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.asinh(a);return d([a]),g},u,null,ug)}var AT=O({asinh_:ett});function ntt(i){let a=R(i,"x","atan"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.atan(a);return d([a]),g},u,null,pg)}var FT=O({atan_:ntt});function rtt(i,a){let u=R(i,"a","atan2"),h=R(a,"b","atan2");[u,h]=he(u,h);let d=(x,w)=>{let k=x.atan2(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,fg)}var RT=O({atan2_:rtt});function ott(i){let a=R(i,"x","atanh"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.atanh(a);return d([a]),g},u,null,hg)}var PT=O({atanh_:ott});function ns(i,a,u,h,d,g,x="channelsLast"){let[w,k]=Hy(a),C;if(x==="channelsLast")C=[w,k,i[3],i[3]];else if(x==="channelsFirst")C=[w,k,i[1],i[1]];else throw new Error(`Unknown dataFormat ${x}`);return Gr(i,C,u,h,d,g,!1,x)}function jy(i,a,u,h,d,g,x="NDHWC"){let[w,k,C]=OT(a),$,F;if(x==="NDHWC")F="channelsLast",$=[w,k,C,i[4],i[4]];else if(x==="NCDHW")F="channelsFirst",$=[w,k,C,i[1],i[1]];else throw new Error(`Unknown dataFormat ${x}`);return _h(i,$,u,h,d,!1,F,g)}function Gr(i,a,u,h,d,g,x=!1,w="channelsLast"){let[k,C,$,F]=[-1,-1,-1,-1];if(w==="channelsLast")[k,C,$,F]=i;else if(w==="channelsFirst")[k,F,C,$]=i;else throw new Error(`Unknown dataFormat ${w}`);let[_,W,,et]=a,[tt,G]=Hy(u),[mt,lt]=Hy(h),gt=$l(_,mt),_t=$l(W,lt),{padInfo:Gt,outHeight:se,outWidth:fe}=stt(d,C,$,tt,G,gt,_t,g,w),_e=x?et*F:et,Ge;return w==="channelsFirst"?Ge=[k,_e,se,fe]:w==="channelsLast"&&(Ge=[k,se,fe,_e]),{batchSize:k,dataFormat:w,inHeight:C,inWidth:$,inChannels:F,outHeight:se,outWidth:fe,outChannels:_e,padInfo:Gt,strideHeight:tt,strideWidth:G,filterHeight:_,filterWidth:W,effectiveFilterHeight:gt,effectiveFilterWidth:_t,dilationHeight:mt,dilationWidth:lt,inShape:i,outShape:Ge,filterShape:a}}function _h(i,a,u,h,d,g=!1,x="channelsLast",w){let[k,C,$,F,_]=[-1,-1,-1,-1,-1];if(x==="channelsLast")[k,C,$,F,_]=i;else if(x==="channelsFirst")[k,_,C,$,F]=i;else throw new Error(`Unknown dataFormat ${x}`);let[W,et,tt,,G]=a,[mt,lt,gt]=OT(u),[_t,Gt,se]=OT(h),fe=$l(W,_t),_e=$l(et,Gt),Ge=$l(tt,se),{padInfo:Vt,outDepth:ln,outHeight:Ce,outWidth:rr}=itt(d,C,$,F,mt,lt,gt,fe,_e,Ge,w),Ys=g?G*_:G,Js;return x==="channelsFirst"?Js=[k,Ys,ln,Ce,rr]:x==="channelsLast"&&(Js=[k,ln,Ce,rr,Ys]),{batchSize:k,dataFormat:x,inDepth:C,inHeight:$,inWidth:F,inChannels:_,outDepth:ln,outHeight:Ce,outWidth:rr,outChannels:Ys,padInfo:Vt,strideDepth:mt,strideHeight:lt,strideWidth:gt,filterDepth:W,filterHeight:et,filterWidth:tt,effectiveFilterDepth:fe,effectiveFilterHeight:_e,effectiveFilterWidth:Ge,dilationDepth:_t,dilationHeight:Gt,dilationWidth:se,inShape:i,outShape:Js,filterShape:a}}function att(i,a,u,h,d){h==null&&(h=xD(i,a,u));let g=i[0],x=i[1],w=Aa((g-a+2*h)/u+1,d);U(Te(w),()=>`The output # of rows (${w}) must be an integer. Change the stride and/or zero pad parameters`);let k=Aa((x-a+2*h)/u+1,d);return U(Te(k),()=>`The output # of columns (${k}) must be an integer. Change the stride and/or zero pad parameters`),[w,k]}function ctt(i,a,u,h,d,g){d==null&&(d=xD(i,a,h));let x=i[0],w=i[1],k=i[2],C=Aa((x-a+2*d)/h+1,g);U(Te(C),()=>`The output # of depths (${C}) must be an integer. Change the stride and/or zero pad parameters`);let $=Aa((w-a+2*d)/h+1,g);U(Te($),()=>`The output # of rows (${$}) must be an integer. Change the stride and/or zero pad parameters`);let F=Aa((k-a+2*d)/h+1,g);return U(Te(F),()=>`The output # of columns (${F}) must be an integer. Change the stride and/or zero pad parameters`),[C,$,F,u]}function xD(i,a,u,h=1){let d=$l(a,h);return Math.floor((i[0]*(u-1)-u+d)/2)}function Hy(i){return typeof i=="number"?[i,i,i]:i.length===2?[i[0],i[1],1]:i}function OT(i){return typeof i=="number"?[i,i,i]:i}function $l(i,a){return a<=1?i:i+(i-1)*(a-1)}function stt(i,a,u,h,d,g,x,w,k){let C,$,F;if(typeof i=="number"){let _=i===0?"VALID":"NUMBER";C={top:i,bottom:i,left:i,right:i,type:_};let W=att([a,u],g,h,i,w);$=W[0],F=W[1]}else if(i==="same"){$=Math.ceil(a/h),F=Math.ceil(u/d);let _=Math.max(0,($-1)*h+g-a),W=Math.max(0,(F-1)*d+x-u),et=Math.floor(_/2),tt=_-et,G=Math.floor(W/2),mt=W-G;C={top:et,bottom:tt,left:G,right:mt,type:"SAME"}}else if(i==="valid")C={top:0,bottom:0,left:0,right:0,type:"VALID"},$=Math.ceil((a-g+1)/h),F=Math.ceil((u-x+1)/d);else if(typeof i=="object"){let _=k==="channelsLast"?i[1][0]:i[2][0],W=k==="channelsLast"?i[1][1]:i[2][1],et=k==="channelsLast"?i[2][0]:i[3][0],tt=k==="channelsLast"?i[2][1]:i[3][1],G=_===0&&W===0&&et===0&&tt===0?"VALID":"EXPLICIT";C={top:_,bottom:W,left:et,right:tt,type:G},$=Aa((a-g+_+W)/h+1,w),F=Aa((u-x+et+tt)/d+1,w)}else throw Error(`Unknown padding parameter: ${i}`);return{padInfo:C,outHeight:$,outWidth:F}}function itt(i,a,u,h,d,g,x,w,k,C,$){let F,_,W,et;if(typeof i=="number"){let tt=i===0?"VALID":"NUMBER";F={top:i,bottom:i,left:i,right:i,front:i,back:i,type:tt};let G=ctt([a,u,h,1],w,1,d,i,$);_=G[0],W=G[1],et=G[2]}else if(i==="same"){_=Math.ceil(a/d),W=Math.ceil(u/g),et=Math.ceil(h/x);let tt=(_-1)*d+w-a,G=(W-1)*g+k-u,mt=(et-1)*x+C-h,lt=Math.floor(tt/2),gt=tt-lt,_t=Math.floor(G/2),Gt=G-_t,se=Math.floor(mt/2),fe=mt-se;F={top:_t,bottom:Gt,left:se,right:fe,front:lt,back:gt,type:"SAME"}}else if(i==="valid")F={top:0,bottom:0,left:0,right:0,front:0,back:0,type:"VALID"},_=Math.ceil((a-w+1)/d),W=Math.ceil((u-k+1)/g),et=Math.ceil((h-C+1)/x);else throw Error(`Unknown padding parameter: ${i}`);return{padInfo:F,outDepth:_,outHeight:W,outWidth:et}}function Aa(i,a){if(!a)return i;switch(a){case"round":return Math.round(i);case"ceil":return Math.ceil(i);case"floor":return Math.floor(i);default:throw new Error(`Unknown roundingMode ${a}`)}}function Di(i){let[a,u,h]=Hy(i);return a===1&&u===1&&h===1}function Wn(i,a){return Di(i)||Di(a)}function Il(i){if(i==="NHWC")return"channelsLast";if(i==="NCHW")return"channelsFirst";throw new Error(`Unknown dataFormat ${i}`)}function ltt(i,a,u,h,d){let g=R(i,"x","avgPool","float32"),x=1;U(Wn(u,x),()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${u} and dilations '${x}'`);let w=g,k=!1;g.rank===3&&(k=!0,w=K(g,[1,g.shape[0],g.shape[1],g.shape[2]])),U(w.rank===4,()=>`Error in avgPool: x must be rank 4 but got rank ${w.rank}.`),d!=null&&U(Te(h),()=>`Error in avgPool: pad must be an integer when using, dimRoundingMode ${d} but got pad ${h}.`);let C=(W,et)=>{let tt=ns(w.shape,a,u,1,h,d);return et([w]),tt.filterWidth===1&&tt.filterHeight===1&&Fs(tt.inShape,tt.outShape)?w.clone():W.avgPool(w,tt)},$={x:w},F={filterSize:a,strides:u,pad:h,dimRoundingMode:d},_=V.runKernelFunc(C,$,null,dg,F);return _=wt(_,g.dtype),k?K(_,[_.shape[1],_.shape[2],_.shape[3]]):_}var Ch=O({avgPool_:ltt});function wD(i,a){let u=i[0].length;i.forEach((d,g)=>{U(d.length===u,()=>`Error in concat${u}D: rank of tensors[${g}] must be the same as the rank of the rest (${u})`)}),U(a>=0&&a`Error in concat${u}D: axis must be between 0 and ${u-1}.`);let h=i[0];i.forEach((d,g)=>{for(let x=0;x`Error in concat${u}D: Shape of tensors[${g}] (${d}) does not match the shape of the rest (${h}) along the non-concatenated axis ${g}.`)})}function vD(i,a){let u=i[0].slice();for(let h=1;h=1,()=>"Pass at least one tensor to concat");let u=Vy(i,"tensors","concat");u[0].dtype==="complex64"&&u.forEach(x=>{if(x.dtype!=="complex64")throw new Error(`Cannot concatenate complex64 tensors with a tensor + with dtype ${x.dtype}. `)});let h=(x,w)=>{let k=Zt(a,u[0].shape)[0],C=vD(u.map(_=>_.shape),k);if(an(C)===0)return fT([],C);if(u=u.filter(_=>_.size>0),u.length===1)return u[0];let $=u.map(_=>_.shape);wD($,k);let F=x.concat(u,k);return w(u),F},d=u,g={axis:a};return V.runKernelFunc(h,d,null,wg,g)}var mn=O({concat_:utt});function ptt(i){let a=R(i,"x","sigmoid"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.sigmoid(a);return d([g]),g},u,null,vy)}var Fa=O({sigmoid_:ptt});function htt(i,a,u){let h=R(i,"x","slice");if(h.rank===0)throw new Error("Slicing scalar is not possible");let d=(w,k)=>{let[C,$]=qy(h,a,u);return pD(h,C,$),k([h]),w.slice(h,C,$)},g={x:h},x={begin:a,size:u};return V.runKernelFunc(d,g,null,yy,x)}var ve=O({slice_:htt});function ftt(i){let a=R(i,"x","tanh"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.tanh(a);return d([g]),g},u,null,Dy)}var LT=O({tanh_:ftt});function dtt(i,a,u){let h=R(i,"x","batchToSpaceND"),d=a.reduce((k,C)=>k*C);U(h.rank>=1+a.length,()=>`input rank is ${h.rank} but should be > than blockShape.length ${a.length}`),U(u.length===a.length,()=>`crops.length is ${u.length} but should be equal to blockShape.length ${a.length}`),U(h.shape[0]%d===0,()=>`input tensor batch is ${h.shape[0]} but is not divisible by the product of the elements of blockShape ${a.join(" * ")} === ${d}`);let g=k=>k.batchToSpaceND(h,a,u),x={x:h},w={blockShape:a,crops:u};return V.runKernelFunc(g,x,null,gg,w)}var Ra=O({batchToSpaceND_:dtt});function TD(i){let a;return i.rank===0||i.rank===1?a=K(i,[1,1,1,i.size]):i.rank===2?a=K(i,[1,1,i.shape[0],i.shape[1]]):i.rank===3?a=K(i,[1,i.shape[0],i.shape[1],i.shape[2]]):a=i,a}function mtt(i,a,u,h,d,g){g==null&&(g=.001);let x=R(i,"x","batchNorm"),w=R(a,"mean","batchNorm"),k=R(u,"variance","batchNorm"),C;d!=null&&(C=R(d,"scale","batchNorm"));let $;h!=null&&($=R(h,"offset","batchNorm")),U(w.rank===k.rank,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),U($==null||w.rank===$.rank,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),U(C==null||w.rank===C.rank,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");let F=TD(x),_=(G,mt)=>(mt([F,w,k,C]),G.batchNorm(F,Ky(w),Ky(k),Ky($),Ky(C),g)),W={x:F,scale:C,offset:$,mean:w,variance:k},et={varianceEpsilon:g},tt=V.runKernelFunc(_,W,null,Pg,et);return K(tt,x.shape)}function Ky(i){return i==null?null:i.rank===0?K(i,[i.size]):i.rank===1?i:i.rank===2?K(i,[1,1,i.shape[0],i.shape[1]]):i.rank===3?K(i,[1,i.shape[0],i.shape[1],i.shape[2]]):i}var MT=O({batchNorm_:mtt});function gtt(i,a){let u=R(i,"broadcastTo","x"),h=u.shape;if(a.some($=>!($>0)||$%1!==0))throw new Error(`broadcastTo(): Invalid broadcast shape [${a}].`);if(a.lengthu.rank){let $=u.shape.slice();for(;$.length=0;$--)if(d[$]===a[$])g[$]=1;else if(u.shape[$]!==1)throw new Error(`broadcastTo(): [${h}] cannot be broadcast to [${a}].`);let x=g.map(($,F)=>$>1?F:-1).filter($=>$>=0);if(x.length===0)return Eo(u);let w=$=>$.tile(u,g),k={x:u},C={shape:a,inputShape:d};return V.runKernelFunc(w,k,null,yg,C)}var El=O({broadcastTo_:gtt});function ytt(i){let a=R(i,"x","ceil"),u={x:a};return V.runKernelFunc(h=>h.ceil(a),u,null,bg)}var BT=O({ceil_:ytt});function btt(i,a,u){let h=R(i,"x","clipByValue");U(a<=u,()=>`Error in clip: min (${a}) must be less than or equal to max (${u}).`);let d={x:h},g={clipValueMin:a,clipValueMax:u};return V.runKernelFunc((x,w)=>{let k=x.clip(h,a,u);return w([h]),k},d,null,xg,g)}var zT=O({clipByValue_:btt});function xtt(i,a,u,h,d="NHWC",g=[1,1],x){let w=R(i,"x","conv2d"),k=R(a,"filter","conv2d"),C=w,$=!1;w.rank===3&&($=!0,C=K(w,[1,w.shape[0],w.shape[1],w.shape[2]])),U(C.rank===4,()=>`Error in conv2d: input must be rank 4, but got rank ${C.rank}.`),U(k.rank===4,()=>`Error in conv2d: filter must be rank 4, but got rank ${k.rank}.`),x!=null&&U(Te(h),()=>`Error in conv2d: pad must be an integer when using, dimRoundingMode ${x} but got pad ${h}.`);let F=d==="NHWC"?C.shape[3]:C.shape[1];U(F===k.shape[2],()=>`Error in conv2d: depth of input (${F}) must match input depth for filter ${k.shape[2]}.`),U(Wn(u,g),()=>`Error in conv2D: Either strides or dilations must be 1. Got strides ${u} and dilations '${g}'`);let _=(G,mt)=>{let lt=Il(d),gt=Gr(C.shape,k.shape,u,g,h,x,!1,lt),_t=G.conv2d(C,k,gt);return mt([C,k]),_t},W={x:C,filter:k},et={strides:u,pad:h,dataFormat:d,dilations:g,dimRoundingMode:x},tt=V.runKernelFunc(_,W,null,vg,et);return $?K(tt,[tt.shape[1],tt.shape[2],tt.shape[3]]):tt}var Bs=O({conv2d_:xtt});function wtt(i,a,u,h,d="NWC",g=1,x){let w=R(i,"x","conv1d"),k=R(a,"filter","conv1d"),C=w,$=!1;w.rank===2&&($=!0,C=K(w,[1,w.shape[0],w.shape[1]])),U(C.rank===3,()=>`Error in conv1d: input must be rank 3, but got rank ${C.rank}.`),U(k.rank===3,()=>`Error in conv1d: filter must be rank 3, but got rank ${k.rank}.`),x!=null&&U(Te(h),()=>`Error in conv1d: pad must be an integer when using, dimRoundingMode ${x} but got pad ${h}.`),U(C.shape[2]===k.shape[1],()=>`Error in conv1d: depth of input (${C.shape[2]}) must match input depth for filter ${k.shape[1]}.`),U(Wn(u,g),()=>`Error in conv1D: Either stride or dilation must be 1. Got stride ${u} and dilation '${g}'`),U(d==="NWC",()=>`Error in conv1d: got dataFormat of ${d} but only NWC is currently supported.`);let F=K(k,[1,k.shape[0],k.shape[1],k.shape[2]]),_=K(C,[C.shape[0],1,C.shape[1],C.shape[2]]),W=[1,u],et=[1,g],tt="NHWC",G=Bs(_,F,W,h,tt,et,x);return $?K(G,[G.shape[2],G.shape[3]]):K(G,[G.shape[0],G.shape[2],G.shape[3]])}var WT=O({conv1d_:wtt});function vtt(i,a,u,h,d,g="NHWC",x){U(i.length===a.rank,()=>`Length of inShape (${i.length}) and rank of dy (${a.rank}) must match`);let w=i,k=a,C=!1;a.rank===3&&(C=!0,k=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]),w=[1,i[0],i[1],i[2]]),U(w.length===4,()=>`Error in conv2dDerInput: inShape must be length 4, but got length ${w.length}.`),U(k.rank===4,()=>`Error in conv2dDerInput: dy must be rank 4, but got rank ${k.rank}`),U(u.rank===4,()=>`Error in conv2dDerInput: filter must be rank 4, but got rank ${u.rank}`);let $=g==="NHWC"?w[3]:w[1],F=g==="NHWC"?k.shape[3]:k.shape[1];U($===u.shape[2],()=>`Error in conv2dDerInput: depth of input (${$}) must match input depth for filter ${u.shape[2]}.`),U(F===u.shape[3],()=>`Error in conv2dDerInput: depth of output (${F}) must match output depth for filter ${u.shape[3]}.`),x!=null&&U(Te(d),()=>`Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${x} but got pad ${d}.`);let _=(G,mt)=>{let lt=1,gt=Il(g),_t=Gr(w,u.shape,h,lt,d,x,!1,gt),Gt=G.conv2dDerInput(k,u,_t);return mt([k,u]),Gt},W={dy:k,filter:u},et={strides:h,pad:d,dataFormat:g,dimRoundingMode:x,inputShape:w},tt=V.runKernelFunc(_,W,null,Tg,et);return C?K(tt,[tt.shape[1],tt.shape[2],tt.shape[3]]):tt}var Xy=O({conv2DBackpropInput_:vtt});function Ttt(i,a,u,h,d,g){let x=R(i,"x","conv2dTranspose"),w=R(a,"filter","conv2dTranspose");return Xy(u,x,w,h,d,"NHWC",g)}var VT=O({conv2dTranspose_:Ttt});function ktt(i,a,u,h,d){U(i.length===a.rank,()=>`Length of inShape (${i.length}) and rank of dy (${a.rank}) must match`);let g=i,x=a,w=!1;a.rank===4&&(w=!0,x=K(a,[1,a.shape[0],a.shape[1],a.shape[2],a.shape[3]]),g=[1,i[0],i[1],i[2],i[3]]);let k=g[4],C=x.shape[4];U(g.length===5,()=>`Error in conv3dDerInput: inShape must be length 5, but got length ${g.length}.`),U(x.rank===5,()=>`Error in conv3dDerInput: dy must be rank 5, but got rank ${x.rank}`),U(u.rank===5,()=>`Error in conv3dDerInput: filter must be rank 5, but got rank ${u.rank}`),U(k===u.shape[3],()=>`Error in conv3dDerInput: depth of input (${k}) must match input depth for filter ${u.shape[3]}.`),U(C===u.shape[4],()=>`Error in conv3dDerInput: depth of output (${C}) must match output depth for filter ${u.shape[4]}.`);let $=et=>{let tt=1,G=_h(g,u.shape,h,tt,d);return et.conv3dDerInput(x,u,G)},F={dy:x,filter:u},_={pad:d,strides:h,inputShape:g},W=V.runKernelFunc($,F,null,XE,_);return w?K(W,[W.shape[1],W.shape[2],W.shape[3],W.shape[4]]):W}var kD=O({conv3DBackpropInput_:ktt});function Ntt(i){let a=R(i,"x","cos"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.cos(a);return d([a]),g},u,null,kg)}var Pa=O({cos_:Ntt});function _tt(i){let a=R(i,"x","cosh"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.cosh(a);return d([a]),g},u,null,Ng)}var Sh=O({cosh_:_tt});function Ctt(i,a=0,u=!1,h=!1){let d=R(i,"x","cumsum"),g=(k,C)=>{let $=Nn([a],d.rank),F=d;$!=null&&(F=Ie(d,$));let _=zn(1,d.rank)[0],W=k.cumsum(F,_,u,h);if(C([d]),$!=null){let et=Sl($);W=Ie(W,et)}return W},x={x:d},w={axis:a,exclusive:u,reverse:h};return V.runKernelFunc(g,x,null,_g,w)}var $h=O({cumsum_:Ctt});function Stt(i,a,u="NHWC"){let h=R(i,"x","depthToSpace"),d=u==="NHWC"?h.shape[1]:h.shape[2],g=u==="NHWC"?h.shape[2]:h.shape[3],x=u==="NHWC"?h.shape[3]:h.shape[1];U(d*a>=0,()=>`Negative dimension size caused by overflow when multiplying + ${d} and ${a} for depthToSpace with input shape + ${h.shape}`),U(g*a>=0,()=>`Negative dimension size caused by overflow when multiplying + ${g} and ${a} for depthToSpace with input shape + ${h.shape}`),U(x%(a*a)===0,()=>`Dimension size must be evenly divisible by ${a*a} but is ${x} for depthToSpace with input shape ${h.shape}`);let w=$=>$.depthToSpace(h,a,u),k={x:h},C={blockSize:a,dataFormat:u};return V.runKernelFunc(w,k,null,JE,C)}var GT=O({depthToSpace_:Stt});function $tt(i,a,u,h,d="NHWC",g=[1,1],x){let w=R(i,"x","depthwiseConv2d"),k=R(a,"filter","depthwiseConv2d"),C=w,$=!1;w.rank===3&&($=!0,C=K(w,[1,w.shape[0],w.shape[1],w.shape[2]])),U(C.rank===4,()=>`Error in depthwiseConv2d: input must be rank 4, but got rank ${C.rank}.`),U(k.rank===4,()=>`Error in depthwiseConv2d: filter must be rank 4, but got rank ${k.rank}.`),U(C.shape[3]===k.shape[2],()=>`Error in depthwiseConv2d: number of input channels (${C.shape[3]}) must match the inChannels dimension in filter ${k.shape[2]}.`),x!=null&&U(Te(h),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${x} but got pad ${h}.`);let F=(tt,G)=>{g==null&&(g=[1,1]),U(Wn(u,g),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${u} and dilations '${g}'`);let mt=Gr(C.shape,k.shape,u,g,h,x,!0),lt=tt.depthwiseConv2D(C,k,mt);return G([C,k]),lt},_={x:C,filter:k},W={strides:u,pad:h,dataFormat:d,dilations:g,dimRoundingMode:x},et=V.runKernelFunc(F,_,null,Cg,W);return $?K(et,[et.shape[1],et.shape[2],et.shape[3]]):et}var Oa=O({depthwiseConv2d_:$tt});function Itt(i,a,u,h,d=[1,1],g="NHWC"){let x=R(i,"x","dilation2d"),w=R(a,"filter","dilation2d");U(x.rank===3||x.rank===4,()=>`Error in dilation2d: input must be rank 3 or 4, but got rank ${x.rank}.`),U(w.rank===3,()=>`Error in dilation2d: filter must be rank 3, but got rank ${w.rank}.`),U(g==="NHWC",()=>`Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${g}`);let k=x,C=!1;x.rank===3&&(k=K(x,[1,x.shape[0],x.shape[1],x.shape[2]]),C=!0);let $={x:k,filter:w},F={strides:u,pad:h,dilations:d},_=V.runKernel(Sg,$,F);return C?K(_,[_.shape[1],_.shape[2],_.shape[3]]):_}var UT=O({dilation2d_:Itt});function Be(i,a){let u=[];for(let h=0;h1)&&u.unshift(g)}return u}function ne(i,a){let u=[],h=Math.max(i.length,a.length);for(let d=0;dx.equal(u,h),g={a:u,b:h};return V.runKernelFunc(d,g,null,r2)}var zs=O({equal_:Ett});function Dtt(i,a,u){let h=R(a,"a","where"),d=R(u,"b","where"),g=R(i,"condition","where","bool"),x=ne(h.shape,d.shape),w=El(h,x),k=El(d,x);g.rank===1&&U(g.shape[0]===h.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),g.rank!==1&&ae(g.shape,k.shape,"Error in where: ");let C=(F,_)=>{let W=F.select(g,w,k);return _([g]),W},$={condition:g,t:w,e:k};return V.runKernelFunc(C,$,null,my)}var _n=O({where_:Dtt});function Att(i){let a=R(i,"x","zerosLike"),u={x:a};return V.runKernelFunc(h=>h.zerosLike(a),u,null,Oy)}var jt=O({zerosLike_:Att});function Ftt(i,a){let u=R(i,"a","div"),h=R(a,"b","div");[u,h]=he(u,h);let d=Wt(u,h),g=jt(d),x=zs(h,g);return _n(x,g,d)}var qT=O({divNoNan_:Ftt});function Rtt(i,a){let u=R(i,"t1","dot"),h=R(a,"t2","dot");U((u.rank===1||u.rank===2)&&(h.rank===1||h.rank===2),()=>`Error in dot: inputs must all be rank 1 or 2, but got ranks ${u.rank} and ${h.rank}.`);let d=u.rank===1?u.size:u.shape[1],g=h.rank===1?h.size:h.shape[0];if(U(d===g,()=>`Error in dot: inner dimensions of inputs must match, but got ${d} and ${g}.`),u.rank===1&&h.rank===1){let x=K(u,[1,-1]),w=K(h,[-1,1]),k=We(x,w);return K(k,[])}else if(u.rank===1&&h.rank===2){let x=K(u,[1,-1]),w=K(h,[h.shape[0],h.shape[1]]),k=We(x,w);return K(k,[k.size])}else if(u.rank===2&&h.rank===1){let x=K(h,[-1,1]),w=We(u,x);return K(w,[w.size])}else{let x=K(h,[h.shape[0],h.shape[1]]),w=We(u,x);return w}}var HT=O({dot_:Rtt});function Ptt(i){let a=R(i,"x","elu"),u=(d,g)=>{let x=d.elu(a);return g([x]),x},h={x:a};return V.runKernelFunc(u,h,null,Ig)}var jT=O({elu_:Ptt});function Ott(i){let a=R(i,"x","erf");U(a.dtype==="int32"||a.dtype==="float32",()=>"Input dtype must be `int32` or `float32`."),a.dtype==="int32"&&(a=wt(a,"float32"));let u={x:a};return V.runKernelFunc((h,d)=>{let g=h.erf(a);return d([a]),g},u,null,Eg)}var KT=O({erf_:Ott});function Ltt(i){let a=R(i,"x","exp"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.exp(a);return d([g]),g},u,null,Dg)}var Cn=O({exp_:Ltt});function Mtt(i,a=0){let u=null,h=R(i,"x","expandDims",u);U(a<=h.rank,()=>"Axis must be <= rank of the tensor");let d=h.shape.slice();return a<0&&(U(-(h.rank+1)<=a,()=>`Axis must be in the interval [${-(h.rank+1)}, ${h.rank}]`),a=h.rank+a+1),d.splice(a,0,1),K(h,d)}var mr=O({expandDims_:Mtt});function Btt(i){let a=R(i,"x","expm1"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.expm1(a);return d([a]),g},u,null,Ag)}var XT=O({expm1_:Btt});function ztt(i,a){let u=null,h=R(i,"x","tile",u);U(h.rank===a.length,()=>`Error in transpose: rank of input ${h.rank} must match length of reps ${a}.`);let d=(k,C)=>{let $=k.tile(h,a);return C([h]),$},g=[h],x={x:h},w={reps:a};return V.runKernelFunc(d,x,null,Ay,w,g)}var Ws=O({tile_:ztt});function Wtt(i,a,u,h="float32"){a==null&&(a=i);let d=kh([i,a],h),g=i<=a?i:a;for(let w=0;wd.fill(i,a,u),{},null,s2,h)}function Vtt(i){let a=R(i,"x","floor"),u={x:a};return V.runKernelFunc(h=>h.floor(a),u,null,Fg)}var Ih=O({floor_:Vtt});function ND(i,a,u){let h=i.shape[u],d=[],g=1,x=1;for(let w=0;w{let $=Zt(u,h.shape)[0],F=ND(h,d,$),_=k.gather(h,K(d,[d.size]),$);return C([h,d]),K(_,F.outputShape)};return V.runKernelFunc(w,g,null,Og,x)}var Eh=O({gather_:Gtt});function Utt(i,a){let u=R(i,"a","greater"),h=R(a,"b","greater");[u,h]=he(u,h),ne(u.shape,h.shape);let d=x=>x.greater(u,h),g={a:u,b:h};return V.runKernelFunc(d,g,null,a2)}var gr=O({greater_:Utt});function qtt(i,a){let u=R(i,"a","greaterEqual"),h=R(a,"b","greaterEqual");[u,h]=he(u,h),ne(u.shape,h.shape);let d=(x,w)=>{let k=x.greaterEqual(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,Lg)}var Ur=O({greaterEqual_:qtt});function Htt(i){let a=R(i,"input","imag"),u=d=>d.imag(a),h={input:a};return V.runKernelFunc(u,h,null,l2)}var La=O({imag_:Htt});function jtt(i){let a=R(i,"x","isFinite"),u={x:a};return V.runKernelFunc(h=>h.isFinite(a),u,null,Bg)}var ZT=O({isFinite_:jtt});function Ktt(i){let a=R(i,"x","isInf"),u={x:a};return V.runKernelFunc(h=>h.isInf(a),u,null,zg)}var QT=O({isInf_:Ktt});function Xtt(i){let a=R(i,"x","isNaN"),u={x:a};return V.runKernelFunc(h=>h.isNaN(a),u,null,Wg)}var tk=O({isNaN_:Xtt});function Ytt(i,a){let u=R(i,"a","maximum"),h=R(a,"b","maximum");[u,h]=he(u,h),u.dtype==="bool"&&(u=wt(u,"int32"),h=wt(h,"int32")),ne(u.shape,h.shape);let d=(x,w)=>{let k=x.maximum(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,jg)}var Vs=O({maximum_:Ytt});function Ot(i,a){if((Vr(i)&&a!=="string"||Array.isArray(i))&&a!=="complex64")throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)");if(a==="string"&&Vr(i)&&!(i instanceof Uint8Array))throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`.");let u=[],h=[];return ts(i,u,h,a)}function Jtt(i,a=.2){let u=R(i,"x","leakyRelu");return Vs(at(Ot(a),u),u)}var ek=O({leakyRelu_:Jtt});function Ztt(i,a){let u=R(i,"a","less"),h=R(a,"b","less");[u,h]=he(u,h),ne(u.shape,h.shape);let d=x=>x.less(u,h),g={a:u,b:h};return V.runKernelFunc(d,g,null,u2)}var Ma=O({less_:Ztt});function Qtt(i,a){let u=R(i,"a","lessEqual"),h=R(a,"b","lessEqual");[u,h]=he(u,h),ne(u.shape,h.shape);let d=(x,w)=>{let k=x.lessEqual(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,p2)}var qr=O({lessEqual_:Qtt});function tet(i,a=5,u=1,h=1,d=.5){let g=R(i,"x","localResponseNormalization");U(g.rank===4||g.rank===3,()=>`Error in localResponseNormalization: x must be rank 3 or 4 but got + rank ${g.rank}.`),U(Te(a),()=>`Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${a}.`);let x=g,w=!1;g.rank===3&&(w=!0,x=K(g,[1,g.shape[0],g.shape[1],g.shape[2]]));let k=(_,W)=>{let et=_.localResponseNormalization4D(x,a,u,h,d);return W([x,et]),et},C={x},$={depthRadius:a,bias:u,alpha:h,beta:d},F=V.runKernelFunc(k,C,null,qg,$);return w?K(F,[F.shape[1],F.shape[2],F.shape[3]]):F}var nk=O({localResponseNormalization_:tet});function eet(i){let a=R(i,"x","log"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.log(a);return d([a]),g},u,null,Vg)}var ro=O({log_:eet});function net(i){let a=R(i,"x","log1p"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.log1p(a);return d([a]),g},u,null,Gg)}var Dh=O({log1p_:net});function Dl(i){return V.customGrad(i)}function ret(i){let a=R(i,"x","neg"),u={x:a};return V.runKernelFunc(h=>h.neg(a),u,null,ty)}var me=O({neg_:ret});function oet(i){let a=R(i,"x","softplus"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.softplus(a);return d([a]),g},u,null,Ty)}var Ah=O({softplus_:oet});function set(i){let a=R(i,"x","logSigmoid"),u=Dl(h=>{let d=me(Ah(me(h))),g=x=>{let w=at(x,Fa(me(h)));return w};return{value:d,gradFunc:g}});return u(a)}var rk=O({logSigmoid_:set});function iet(i,a=null,u=!1){let h=R(i,"x","max"),d=(w,k)=>{let C=Zt(a,h.shape),$=C,F=Nn($,h.rank),_=h;F!=null&&(_=Ie(h,F),$=zn($.length,_.rank));let W=w.max(_,$);F!=null&&_.dispose();let et=W;if(u){let tt=dn(et.shape,Zt(a,h.shape));et=K(et,tt),W.dispose()}return k([h,et]),et},g={x:h},x={reductionIndices:a,keepDims:u};return V.runKernelFunc(d,g,null,Hg,x)}var Do=O({max_:iet});function aet(i,a){let u=R(i,"a","sub"),h=R(a,"b","sub");[u,h]=he(u,h);let d=(x,w)=>{let k=x.subtract(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,Iy)}var Pt=O({sub_:aet});function cet(i,a=null,u=!1){let h=R(i,"x","sum");h.dtype==="bool"&&(h=wt(h,"int32"));let d=(w,k)=>{k([h]);let C=Zt(a,h.shape),$=Nn(C,h.rank),F=C,_=h;$!=null&&(_=Ie(h,$),F=zn(F.length,h.rank));let W=w.sum(_,F);if(u){let et=dn(W.shape,C);W=K(W,et)}return W},g={x:h},x={axis:a,keepDims:u};return V.runKernelFunc(d,g,null,Ny,x)}var Ft=O({sum_:cet});function uet(i,a=-1){let u=R(i,"logits","logSoftmax");if(a===-1&&(a=u.rank-1),a!==u.rank-1)throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${u.rank} and axis was ${a}`);let h=(x,w)=>{let k=!0,C=Do(i,a,!0),$=Pt(i,C),F=Pt(wt($,"float32"),ro(Ft(Cn($),a,k)));return w([F]),F},d={logits:u},g={axis:a};return V.runKernelFunc(h,d,null,Ug,g)}var ok=O({logSoftmax_:uet});function pet(i,a=null,u=!1){let h=R(i,"x","logSumExp"),d=Zt(a,h.shape),g=Do(h,d,!0),x=Pt(h,g),w=Cn(x),k=Ft(w,d),C=ro(k),$=ke(K(g,C.shape),C);if(u){let F=dn($.shape,d);return K($,F)}return $}var Fh=O({logSumExp_:pet});function het(i,a){let u=R(i,"a","logicalAnd","bool"),h=R(a,"b","logicalAnd","bool");ne(u.shape,h.shape);let d={a:u,b:h};return V.runKernelFunc(g=>g.logicalAnd(u,h),d,null,h2)}var oo=O({logicalAnd_:het});function fet(i){let a=R(i,"x","logicalNot","bool"),u={x:a};return V.runKernelFunc(h=>h.logicalNot(a),u,null,f2)}var Ba=O({logicalNot_:fet});function det(i,a){let u=R(i,"a","logicalOr","bool"),h=R(a,"b","logicalOr","bool");ne(u.shape,h.shape);let d={a:u,b:h};return V.runKernelFunc(g=>g.logicalOr(u,h),d,null,d2)}var Rh=O({logicalOr_:det});function met(i,a){let u=R(i,"a","logicalXor","bool"),h=R(a,"b","logicalXor","bool");return ne(u.shape,h.shape),oo(Rh(i,a),Ba(oo(i,a)))}var sk=O({logicalXor_:met});function get(i,a,u,h,d){let g=R(i,"x","maxPool"),x=1,w=g,k=!1;g.rank===3&&(k=!0,w=K(g,[1,g.shape[0],g.shape[1],g.shape[2]])),U(w.rank===4,()=>`Error in maxPool: input must be rank 4 but got rank ${w.rank}.`),U(Wn(u,x),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${u} and dilations '${x}'`),d!=null&&U(Te(h),()=>`Error in maxPool: pad must be an integer when using, dimRoundingMode ${d} but got pad ${h}.`);let C=(W,et)=>{let tt=ns(w.shape,a,u,1,h,d),G;return tt.filterWidth===1&&tt.filterHeight===1&&Fs(tt.inShape,tt.outShape)?G=w.clone():G=W.maxPool(w,tt),et([w,G]),G},$={x:w},F={filterSize:a,strides:u,pad:h,dimRoundingMode:d},_=V.runKernelFunc(C,$,null,Kg,F);return k?K(_,[_.shape[1],_.shape[2],_.shape[3]]):_}var Ph=O({maxPool_:get});function Hr(i,a="float32"){if(a==="complex64"){let h=Hr(i,"float32"),d=Hr(i,"float32");return no(h,d)}let u=Ia(an(i),a);return V.makeTensor(u,i,a)}function rs(i,a="float32"){if(a==="complex64"){let h=rs(i,"float32"),d=Hr(i,"float32");return no(h,d)}let u=eg(an(i),a);return V.makeTensor(u,i,a)}function yet(i,a=null,u=!1){let h=R(i,"x","mean"),d=Zt(a,h.shape),g=bD(h.shape,d),x=g[1],w=an(x),k={x:h},C={axis:a,keepDims:u},$=()=>{let _=Ot(w),W=_.dtype===h.dtype?h:wt(h,_.dtype),et=Wt(W,_);return Ft(et,a,u)},F=Dl(_=>{let W=V.runKernelFunc($,k,null,x2,C),et=tt=>{let G=_.shape.slice();d.forEach(gt=>{G[gt]=1});let mt=K(tt,G),lt=Wt(at(mt,rs(_.shape,"float32")),w);return lt};return{value:W,gradFunc:et}});return F(h)}var Oh=O({mean_:yet});function bet(i,a=null,u=!1){let h=R(i,"x","min"),d=(w,k)=>{let C=Zt(a,h.shape),$=C,F=Nn($,h.rank),_=h;F!=null&&(_=Ie(h,F),$=zn($.length,h.rank));let W=w.min(_,$);F!=null&&_.dispose();let et=W;if(u){let tt=dn(et.shape,C);et=K(W,tt),W.dispose()}return k([h,et]),et},g={x:h},x={axis:a,keepDims:u};return V.runKernelFunc(d,g,null,Xg,x)}var Al=O({min_:bet});function xet(i,a){let u=R(i,"a","minimum"),h=R(a,"b","minimum");[u,h]=he(u,h),u.dtype==="bool"&&(u=wt(u,"int32"),h=wt(h,"int32")),ne(u.shape,h.shape);let d=(x,w)=>{let k=x.minimum(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,Yg)}var za=O({minimum_:xet});function wet(i,a,u){U(u==="reflect"||u==="symmetric",()=>`Invalid mode. Mode must be either reflect or symmetric. Got ${u}.`);let h=R(i,"x","mirrorPad");if(h.rank===0)throw new Error("mirrorPad(scalar) is not defined. Pass non-scalar to mirrorPad");U(a.length===h.rank,()=>`Padding doesn't match input. Must be ${h.rank}. Got ${a.length}.`);let d=u==="reflect"?1:0;for(let w=0;w"Invalid number of paddings. Must be length of 2 each."),U(a[w][0]>=0&&a[w][0]<=h.shape[w]-d&&a[w][1]>=0&&a[w][1]<=h.shape[w]-d,()=>`Padding in dimension ${w} cannot be greater than or equal to ${h.shape[w]-d} or less than 0 for input of shape ${h.shape}`);let g={paddings:a,mode:u},x={x:h};return V.runKernel(Jg,x,g)}var ik=O({mirrorPad_:wet});function vet(i,a){let u=R(i,"a","mod"),h=R(a,"b","mod");[u,h]=he(u,h);let d=(x,w)=>{let k=x.mod(u,h);return w([u,h]),k},g={a:u,b:h};return V.runKernelFunc(d,g,null,Zg)}var Lh=O({mod_:vet});function Tet(i){let a=R(i,"x","square"),u={},h=[a],d=[];return V.runKernelFunc((g,x)=>(x([a]),g.square(a)),{x:a},null,"Square",u,h,d)}var be=O({square_:Tet});function ket(i,a){let u=R(i,"a","notEqual"),h=R(a,"b","notEqual");[u,h]=he(u,h),ne(u.shape,h.shape);let d=x=>x.notEqual(u,h),g={a:u,b:h};return V.runKernelFunc(d,g,null,w2)}var Wa=O({notEqual_:ket});function Net(i){let a=R(i,"input","real"),u=d=>d.real(a),h={input:a};return V.runKernelFunc(u,h,null,C2)}var Ai=O({real_:Net});function _et(i){let a=R(i,"x","onesLike"),u=(d,g)=>{if(a.dtype==="complex64"){let x=Yy(Ai(a)),w=jt(La(a));return no(x,w)}return d.onesLike(a)},h={x:a};return V.runKernelFunc(u,h,null,ey)}var Yy=O({onesLike_:_et});function Cet(i,a,u=0){let h=R(i,"x","pad");if(h.rank===0)throw new Error("pad(scalar) is not defined. Pass non-scalar to pad");let d=(w,k)=>(k([h]),w.pad(h,a,u)),g={paddings:a,constantValue:u},x={x:h};return V.runKernelFunc(d,x,null,ry,g)}var Mh=O({pad_:Cet});function $et(i,a,u){let h=R(i,"x","spaceToBatchND");U(h.rank>=1+a.length,()=>`input rank ${h.rank} should be > than [blockShape] ${a.length}`),U(u.length===a.length,()=>`paddings.shape[0] ${u.length} must be equal to [blockShape] ${a.length}`),U(h.shape.reduce((w,k,C)=>C>0&&C<=a.length?w&&(k+u[C-1][0]+u[C-1][1])%a[C-1]===0:w,!0),()=>`input spatial dimensions ${h.shape.slice(1)} with paddings ${u.toString()} must be divisible by blockShapes ${a.toString()}`);let d=w=>w.spaceToBatchND(h,a,u),g={x:h},x={blockShape:a,paddings:u};return V.runKernelFunc(d,g,null,_y,x)}var Va=O({spaceToBatchND_:$et});function Det(i,a,u,h,d,g){d==null&&(d=[1,1]),g==null&&(g=1),h===0&&(h="valid");let x=R(i,"x","maxPool"),w=x,k=!1;x.rank===3&&(k=!0,w=K(x,[1,x.shape[0],x.shape[1],x.shape[2]])),U(Wn(g,d),()=>`Error in pool: Either strides or dilations must be 1. Got strides ${g} and dilations '${d}'`);let C=ns(w.shape,a,g,d,h),$=[C.dilationHeight,C.dilationWidth],F;h==="same"?F=Eet([C.filterHeight,C.filterWidth],$):F=[[0,0],[0,0]];let _=$[0]===1&&$[1]===1,[W,et]=Iet([C.inHeight,C.inWidth],$,F),tt=_?h:"valid",G=_?w:Va(w,$,W),mt=u==="avg"?()=>Ch(G,a,g,tt):()=>Ph(G,a,g,tt),lt=mt(),gt=_?lt:Ra(lt,$,et);return k?K(gt,[gt.shape[1],gt.shape[2],gt.shape[3]]):gt}function Iet(i,a,u){let h=u.map($=>$[0]),d=u.map($=>$[1]),g=i.concat(h,d),x=a.map(($,F)=>($-g[F]%$)%$),w=d.map(($,F)=>$+x[F]),k=a.map(($,F)=>[h[F],w[F]]),C=a.map(($,F)=>[0,x[F]]);return[k,C]}function Eet(i,a){let u=i.map((x,w)=>x+(x-1)*(a[w]-1)),h=u.map(x=>x-1),d=h.map(x=>Math.floor(x/2)),g=h.map((x,w)=>x-d[w]);return h.map((x,w)=>[d[w],g[w]])}var ak=O({pool_:Det});function Aet(i,a){let u=R(i,"base","pow"),h=R(a,"exp","pow");[u,h]=he(u,h);let d={a:u,b:h},g=(x,w)=>{let k=x.pow(u,h);return w([u,h,k]),k};return V.runKernelFunc(g,d,null,oy)}var Ao=O({pow_:Aet});function Fet(i,a){let u=R(i,"x","prelu"),h=R(a,"alpha","prelu"),d=(x,w)=>{let k=x.prelu(u,h);return w([u,h]),k},g={x:u,alpha:h};return V.runKernelFunc(d,g,null,sy)}var ck=O({prelu_:Fet});function Ret(i,a=null,u=!1){let h=R(i,"x","prod");h.dtype==="bool"&&(h=wt(h,"int32"));let d=w=>{let k=Zt(a,h.shape),C=Nn(k,h.rank),$=k,F=h;C!=null&&(F=Ie(h,C),$=zn($.length,h.rank));let _=w.prod(F,$);if(u){let W=dn(_.shape,k);_=K(_,W)}return _},g={x:h},x={axis:a,keepDims:u};return V.runKernelFunc(d,g,null,N2,x)}var lk=O({prod_:Ret});function Fi(i,a){wl(i);let u=Ps(i,a);if(u.length!==1)throw new Error("tensor1d() requires values to be a flat/TypedArray");let h=null;return ts(i,h,u,a)}function Jy(i,a,u=1,h="float32"){if(u===0)throw new Error("Cannot have a step of zero");let d=()=>{let x=i===a,w=i1;if(x||w||k)return Hr([0],h);let C=Math.abs(Math.ceil((a-i)/u)),$=Ia(C,h);a{let g=h.reciprocal(a);return d([a]),g},u,null,iy)}var uk=O({reciprocal_:Pet});function Oet(i){let a=R(i,"x","relu"),u=(d,g)=>(g([a]),a.dtype==="bool"?wt(a,"int32"):d.relu(a)),h={x:a};return V.runKernelFunc(u,h,null,ay)}var Ga=O({relu_:Oet});function Let(i){let a=R(i,"x","relu6"),u=(d,g)=>(g([a]),a.dtype==="bool"?wt(a,"int32"):d.relu6(a)),h={x:a};return V.runKernelFunc(u,h,null,py)}var pk=O({relu6_:Let});function Met(i,a){let u=R(i,"x","reverse"),h=x=>{let w=Zt(a,u.shape);if(u.rank===0)return Eo(u);let k=x.reverse(u,w);return K(k,u.shape)},d={x:u},g={dims:a};return V.runKernelFunc(h,d,null,hy,g)}var Ri=O({reverse_:Met});function Bet(i){let a=R(i,"x","round"),u={x:a};return V.runKernelFunc(h=>h.round(a),u,null,fy)}var hk=O({round_:Bet});function zet(i){let a=R(i,"x","rsqrt"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.rsqrt(a);return d([a]),g},u,null,dy)}var Bh=O({rsqrt_:zet});function Wet(i){let a=R(i,"x","selu"),u=(d,g)=>{let x=d.selu(a);return g([a]),x},h={x:a};return V.runKernelFunc(u,h,null,gy)}var fk=O({selu_:Wet});function Vet(i,a,u,h,d,g=[1,1],x="NHWC"){let w=R(i,"x","separableConv2d"),k=R(a,"depthwiseFilter","separableConv2d"),C=R(u,"pointwiseFilter","separableConv2d"),$=w,F=!1;if(w.rank===3&&(F=!0,$=K(w,[1,w.shape[0],w.shape[1],w.shape[2]])),x==="NCHW")throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported");U($.rank===4,()=>`Error in separableConv2d: input must be rank 4, but got rank ${$.rank}.`),U(k.rank===4,()=>`Error in separableConv2d: depthwise filter must be rank 4, but got rank ${k.rank}.`),U(C.rank===4,()=>`Error in separableConv2d: pointwise filter must be rank 4, but got rank ${k.rank}.`),U(C.shape[0]===1,()=>`Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${C.shape[0]}.`),U(C.shape[1]===1,()=>`Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${C.shape[1]}.`);let _=k.shape[2],W=k.shape[3];U(C.shape[2]===_*W,()=>`Error in separableConv2d: the third dimension of pointwise filter must be ${_*W}, but got ${C.shape[2]}.`);let et=Oa($,k,h,d,x,g),tt=1,G=Bs(et,C,tt,"valid",x);return F?K(G,[G.shape[1],G.shape[2],G.shape[3]]):G}var dk=O({separableConv2d_:Vet});function Get(i){let a=R(i,"x","sign"),u={x:a};return V.runKernelFunc(h=>h.sign(a),u,null,wy)}var mk=O({sign_:Get});function Uet(i){let a=R(i,"x","sin"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.sin(a);return d([a]),g},u,null,by)}var zh=O({sin_:Uet});function qet(i){let a=R(i,"x","sinh"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.sinh(a);return d([a]),g},u,null,xy)}var Wh=O({sinh_:qet});function Het(i,a=-1){let u=R(i,"logits","softmax","float32");if(a===-1&&(a=u.rank-1),a!==u.rank-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${u.rank} and dim was ${a}`);let h={logits:u},d={dim:a};return V.runKernelFunc((g,x)=>{let w=g.softmax(u,a);return x([w]),w},h,null,Sy,d)}var gk=O({softmax_:Het});function jet(i){U(i.dtype==="complex64",()=>`The dtype for tf.spectral.fft() must be complex64 but got ${i.dtype}.`);let a={input:i};return V.runKernelFunc(u=>{let h=i.shape[i.shape.length-1],d=i.size/h,g=i.as2D(d,h),x=u.fft(g);return x.reshape(i.shape)},a,null,o2)}var Ua=O({fft_:jet});function Ket(i){U(i.dtype==="complex64",()=>`The dtype for tf.spectral.ifft() must be complex64 but got ${i.dtype}.`);let a={input:i};return V.runKernelFunc(u=>{let h=i.shape[i.shape.length-1],d=i.size/h,g=K(i,[d,h]),x=u.ifft(g);return K(x,i.shape)},a,null,c2)}var Pi=O({ifft_:Ket});function Xet(i){let a=i.shape[i.shape.length-1],u=i.size/a,h;if(a<=2){let d=K(i,[u,a]);h=Pi(d)}else{let d=[u,2*(a-1)],g=K(Ai(i),[u,a]),x=K(La(i),[u,a]),w=Ri(ve(g,[0,1],[u,a-2]),1),k=at(Ri(ve(x,[0,1],[u,a-2]),1),Ot(-1)),C=mn([g,w],1),$=mn([x,k],1),F=K(no(C,$),[d[0],d[1]]);h=Pi(F)}if(h=Ai(h),i.rank===3&&i.shape[0]!==0){let d=h,g=i.shape[0];h=K(h,[g,h.shape[0]/g,h.shape[1]]),d.dispose()}return h}var Vh=O({irfft_:Xet});function _D(i,a,u=0){let h=[];if(typeof a=="number")U(i.shape[u]%a===0,()=>"Number of splits must evenly divide the axis."),h=new Array(a).fill(i.shape[u]/a);else{let d=a.reduce((x,w)=>(w===-1&&(x+=1),x),0);U(d<=1,()=>"There should be only one negative value in split array.");let g=a.indexOf(-1);if(g!==-1){let x=a.reduce((w,k)=>k>0?w+k:w);a[g]=i.shape[u]-x}U(i.shape[u]===a.reduce((x,w)=>x+w),()=>"The sum of sizes must match the size of the axis dimension."),h=a}return h}function Yet(i,a,u=0){let h=R(i,"x","split"),d=(w,k)=>{let C=Zt(u,h.shape)[0],$=_D(h,a,C);return w.split(h,$,C)},g={x:h},x={numOrSizeSplits:a,axis:u};return V.runKernelFunc(d,g,null,Cy,x)}var os=O({split_:Yet});function Jet(i,a){U(i.dtype==="float32",()=>`The dtype for rfft() must be real value but got ${i.dtype}`);let u=i.shape[i.shape.length-1],h=i.size/u,d;if(a!=null&&a0),tt=i.shape.map(G=>G);tt[i.shape.length-1]=a,d=ve(i,et,tt),u=a}else if(a!=null&&a>u){let et=i.shape.map(tt=>tt);et[i.shape.length-1]=a-u,d=mn([i,Hr(et)],i.shape.length-1),u=a}else d=i;let g=jt(d),x=K(no(d,g),[h,u]),w=Ua(x),k=Math.floor(u/2)+1,C=Ai(w),$=La(w),F=os(C,[k,u-k],C.shape.length-1),_=os($,[k,u-k],$.shape.length-1),W=d.shape.slice();return W[d.shape.length-1]=k,K(no(F[0],_[0]),W)}var qa=O({rfft_:Jet});function Zet(i){let a=R(i,"x","sqrt"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.sqrt(a);return d([a]),g},u,null,ky)}var Qn=O({sqrt_:Zet});function Qet(i,a){let u=R(i,"a","squaredDifference"),h=R(a,"b","squaredDifference");[u,h]=he(u,h),ne(u.shape,h.shape);let d=(w,k)=>{let C=w.squaredDifference(u,h);return k([u,h]),C},g={a:u,b:h},x={};return V.runKernelFunc(d,g,null,$y,x)}var Ha=O({squaredDifference_:Qet});function tnt(i,a){let u=R(i,"x","squeeze");return K(u,IE(u.shape,a).newShape)}var Gh=O({squeeze_:tnt});function ent(i,a=0){let u=Vy(i,"tensors","stack");if(U(u.length>=1,()=>"Pass at least one tensor to tf.stack"),u.length===1)return mr(u[0],a);let h=u[0].rank,d=u[0].shape,g=u[0].dtype;U(a<=h,()=>"Axis must be <= rank of the tensor"),u.forEach(w=>{ae(d,w.shape,"All tensors passed to stack must have matching shapes"),U(g===w.dtype,()=>"All tensors passed to stack must have matching dtypes")});let x=u.map(w=>mr(w,a));return mn(x,a)}var Cr=O({stack_:ent});function nnt(i,a=0){let u=R(i,"x","step"),h={x:u},d={alpha:a};return V.runKernelFunc(g=>g.step(u,a),h,null,Ly,d)}var Gs=O({step_:nnt});function rnt(i,a,u,h,d=0,g=0,x=0,w=0,k=0){let C=R(i,"x","stridedSlice"),$=W=>{h==null&&(h=new Array(a.length));let et=Uy(x);if(et.length>1)throw new Error("Multiple ellipses in slice is not allowed.");if(x!==0&&w!==0)throw new Error("Using both ellipsisMask and newAxisMask is not yet supported.");if(x!==0&&k!==0)throw new Error("Using both ellipsisMask and shrinkAxisMask is not yet supported.");let tt=C.rank-a.length,G=Uy(w),mt=C.shape.slice();G.forEach(Vt=>{a[Vt]=0,u[Vt]=1,mt.splice(Vt,0,1)}),C=K(C,mt);let{begin:lt,end:gt,strides:_t}=mD(C.shape,et,tt,a,u,h,d,g,x);a=lt,u=gt,h=_t;let Gt=Uy(k);Gt.forEach(Vt=>{u[Vt]=a[Vt]+1,h[Vt]=1});let se=hD(a,u,h),fe=se.filter((Vt,ln)=>Gt.indexOf(ln)===-1),_e=h.every(Vt=>Vt===1);if(_e)return K(ve(C,a,se),fe);let Ge=W.stridedSlice(C,a,u,h);return K(Ge,fe)},F={x:C},_={begin:a,end:u,strides:h,beginMask:d,endMask:g,ellipsisMask:x,newAxisMask:w,shrinkAxisMask:k};return V.runKernelFunc($,F,null,E2,_)}var yk=O({stridedSlice_:rnt});function ont(i){let a=R(i,"x","tan"),u={x:a};return V.runKernelFunc((h,d)=>{let g=h.tan(a);return d([a]),g},u,null,Ey)}var bk=O({tan_:ont});function ja(i,a,u){if(wl(i),a!=null&&a.length!==2)throw new Error("tensor2d() requires shape to have two numbers");let h=Ps(i,u);if(h.length!==2&&h.length!==1)throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray");if(h.length===1&&a==null)throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray");return ts(i,a,h,u)}function snt(i,a=1,u=!0){let h=R(i,"x","topk");if(h.rank===0)throw new Error("topk() expects the input to be of rank 1 or higher");let d=h.shape[h.shape.length-1];if(a>d)throw new Error(`'k' passed to topk() must be <= the last dimension (${d}) but got ${a}`);let g={x:h},x={k:a,sorted:u},[w,k]=V.runKernelFunc(C=>C.topk(h,a,u),g,null,D2,x);return{values:w,indices:k}}var xk=O({topk_:snt});function int(i,a=0){let u=R(i,"x","unique",null);U(u.rank>0,()=>"The input tensor must be at least 1D");let h={x:u},d={axis:a},[g,x]=V.runKernel(A2,h,d);return{values:g,indices:x}}var wk=O({unique_:int});function ant(i,a,u){let h=R(i,"x","unsortedSegmentSum"),d=R(a,"segmentIds","unsortedSegmentSum","int32");U(Te(u),()=>"numSegments must be of dtype int");let g={x:h,segmentIds:d},x={numSegments:u},w=(k,C)=>{let $=k.unsortedSegmentSum(h,d,u);return C([d]),$};return V.runKernelFunc(w,g,null,Py,x)}var Uh=O({unsortedSegmentSum_:ant});function cnt(i,a=0){let u=R(i,"x","unstack");U(a>=-u.shape.length&&a`Axis = ${a} is not in [-${u.shape.length}, ${u.shape.length})`),a<0&&(a+=u.shape.length);let h={value:u},d={axis:a},g=x=>x.unstack(u,a);return V.runKernelFunc(g,h,null,Ry,d)}var Ka=O({unstack_:cnt});function lnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","notEqualStrict"),h=R(a,"b","notEqualStrict");return ae(u.shape,h.shape,"Error in notEqualStrict: "),Wa(u,h)}function unt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","lessStrict"),h=R(a,"b","lessStrict");return ae(u.shape,h.shape,"Error in lessStrict: "),Ma(u,h)}function pnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","equalStrict"),h=R(a,"b","equalStrict");return ae(u.shape,h.shape,"Error in equalStrict: "),zs(u,h)}function hnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","lessEqualStrict"),h=R(a,"b","lessEqualStrict");return ae(u.shape,h.shape,"Error in lessEqualStrict: "),qr(u,h)}function fnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","greaterStrict"),h=R(a,"b","greaterStrict");return ae(u.shape,h.shape,"Error in greaterStrict: "),gr(u,h)}function dnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","greaterEqualStrict"),h=R(a,"b","greaterEqualStrict");return ae(u.shape,h.shape,"Error in greaterEqualStrict: "),Ur(u,h)}var CD=O({equalStrict_:pnt}),SD=O({greaterEqualStrict_:dnt}),$D=O({greaterStrict_:fnt}),ID=O({lessEqualStrict_:hnt}),ED=O({lessStrict_:unt}),DD=O({notEqualStrict_:lnt});function mnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","addStrict"),h=R(a,"b","addStrict");return ae(u.shape,h.shape,"Error in addStrict: "),ke(u,h)}function gnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","subStrict"),h=R(a,"b","subStrict");return ae(u.shape,h.shape,"Error in subStrict: "),Pt(u,h)}function ynt(i,a){return kn("strict variants of ops have been deprecated and will be removed in future"),ae(i.shape,a.shape,"Error in powStrict: "),Ao(i,a)}function bnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","mul"),h=R(a,"b","mul");return ae(u.shape,h.shape,"Error in multiplyStrict: "),at(u,h)}function xnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","div"),h=R(a,"b","div");return ae(u.shape,h.shape,"Error in divideStrict: "),Wt(u,h)}function wnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","modStrict"),h=R(a,"b","modStrict");return ae(u.shape,h.shape,"Error in modStrict: "),Lh(u,h)}function vnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","minimumStrict"),h=R(a,"b","minimumStrict");return ae(u.shape,h.shape,"Error in minimumStrict: "),za(u,h)}function Tnt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","maximumStrict"),h=R(a,"b","maximumStrict");return ae(u.shape,h.shape,"Error in maximumStrict: "),Vs(u,h)}function knt(i,a){kn("strict variants of ops have been deprecated and will be removed in future");let u=R(i,"a","squaredDifferenceStrict"),h=R(a,"b","squaredDifferenceStrict");return ae(u.shape,h.shape,"Error in squaredDifferenceStrict: "),Ha(u,h)}var AD=O({addStrict_:mnt}),FD=O({divStrict_:xnt}),RD=O({maximumStrict_:Tnt}),PD=O({minimumStrict_:vnt}),OD=O({modStrict_:wnt}),LD=O({mulStrict_:bnt}),MD=O({powStrict_:ynt}),BD=O({squaredDifferenceStrict_:knt}),zD=O({subStrict_:gnt});function Nnt(i,a="euclidean",u=null,h=!1){i=R(i,"x","norm");let d=WD(i,a,u),g=d.shape;if(h){let x=Zt(u,i.shape);g=dn(d.shape,x)}return K(d,g)}function WD(i,a,u=null){if(i.rank===0)return Bn(i);if(i.rank!==1&&u===null)return WD(K(i,[-1]),a,u);if(i.rank===1||typeof u=="number"||Array.isArray(u)&&u.length===1){if(a===1)return Ft(Bn(i),u);if(a===Infinity)return Do(Bn(i),u);if(a===-Infinity)return Al(Bn(i),u);if(a==="euclidean"||a===2)return Qn(Ft(Ao(Bn(i),Ot(2,"int32")),u));throw new Error(`Error in norm: invalid ord value: ${a}`)}if(Array.isArray(u)&&u.length===2){if(a===1)return Do(Ft(Bn(i),u[0]),u[1]-1);if(a===Infinity)return Do(Ft(Bn(i),u[1]),u[0]);if(a===-Infinity)return Al(Ft(Bn(i),u[1]),u[0]);if(a==="fro"||a==="euclidean")return Qn(Ft(be(i),u));throw new Error(`Error in norm: invalid ord value: ${a}`)}throw new Error(`Error in norm: invalid axis: ${u}`)}var Fl=O({norm_:Nnt});function VD(i){return Math.floor(Math.pow(2,Math.ceil(Math.log(i)/Math.log(2))))}function Zy(i,a,u){let h=1-i%2,d=new Float32Array(i);for(let g=0;g`Error in conv2dDerFilter: input must be rank 4, but got shape ${w.shape}.`),U(k.rank===4,()=>`Error in conv2dDerFilter: dy must be rank 4, but got shape ${k.shape}.`),U(u.length===4,()=>`Error in conv2dDerFilter: filterShape must be length 4, but got ${u}.`);let C=g==="NHWC"?w.shape[3]:w.shape[1],$=g==="NHWC"?k.shape[3]:k.shape[1];U(C===u[2],()=>`Error in conv2dDerFilter: depth of input ${C}) must match input depth in filter (${u[2]}.`),U($===u[3],()=>`Error in conv2dDerFilter: depth of dy (${$}) must match output depth for filter (${u[3]}).`),x!=null&&U(Te(d),()=>`Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${x} but got pad ${d}.`);let F=et=>{let tt=1,G=Il(g),mt=Gr(w.shape,u,h,tt,d,x,!1,G);return et.conv2dDerFilter(w,k,mt)},_={x:w,dy:k},W={strides:h,pad:d,dataFormat:g,dimRoundingMode:x,filterShape:u};return V.runKernelFunc(F,_,null,HE,W)}var Qy=O({conv2DBackpropFilter_:_nt});function Cnt(i,a,u,h,d,g=[1,1],x){let w=i;i.rank===3&&(w=K(i,[1,i.shape[0],i.shape[1],i.shape[2]]));let k=a;k.rank===3&&(k=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]));let C=_=>{let W=Gr(i.shape,u,h,g,d,x,!0);return _.depthwiseConv2DDerFilter(w,k,W)},$={x:w,dy:k},F={strides:h,pad:d,dimRoundingMode:x,dilations:g,filterShape:u};return V.runKernelFunc(C,$,null,ZE,F)}var GD=O({depthwiseConv2dNativeBackpropFilter_:Cnt});function Snt(i,a,u,h,d,g=[1,1],x){let w=a,k=!1;a.rank===3&&(k=!0,w=K(a,[1,a.shape[0],a.shape[1],a.shape[2]]));let C=W=>{let et=Gr(i,u.shape,h,g,d,x,!0);return W.depthwiseConv2DDerInput(w,u,et)},$={dy:w,filter:u},F={strides:h,pad:d,dimRoundingMode:x,dilations:g,inputShape:i},_=V.runKernelFunc(C,$,null,QE,F);return k?K(_,[_.shape[1],_.shape[2],_.shape[3]]):_}var UD=O({depthwiseConv2dNativeBackpropInput_:Snt});function $nt(i){return Zy(i,.54,.46)}var qD=O({hammingWindow_:$nt});function Int(i){return Zy(i,.5,.5)}var tb=O({hannWindow_:Int});function Ent(i,a,u,h=!1,d=0){let g=0,x=[];for(;g+a<=i.size;)x.push(ve(i,g,a)),g+=u;if(h)for(;g`Error in cropAndResize: image must be rank 4,but got rank ${x.rank}.`),U(w.rank===2&&w.shape[1]===4,()=>`Error in cropAndResize: boxes must be have size [${C},4] but had shape ${w.shape}.`),U(k.rank===1&&k.shape[0]===C,()=>`Error in cropAndResize: boxInd must be have size [${C}] but had shape ${w.shape}.`),U(h.length===2,()=>`Error in cropAndResize: cropSize must be of length 2, but got length ${h.length}.`),U(h[0]>=1&&h[1]>=1,()=>`cropSize must be atleast [1,1], but was ${h}`),U(d==="bilinear"||d==="nearest",()=>`method must be bilinear or nearest, but was ${d}`);let $=et=>et.cropAndResize(x,w,k,h,d,g),F={image:x,boxes:w,boxInd:k},_={method:d,extrapolationValue:g,cropSize:h},W=V.runKernelFunc($,F,null,YE,_);return W}var jD=O({cropAndResize_:Ant});function Fnt(i){let a=R(i,"image","flipLeftRight","float32");U(a.rank===4,()=>`Error in flipLeftRight: image must be rank 4,but got rank ${a.rank}.`);let u={image:a},h=V.runKernel(i2,u,{});return h}var KD=O({flipLeftRight_:Fnt});function Rnt(i,a,u=0,h=.5){let d=R(i,"image","rotateWithOffset","float32");U(d.rank===4,()=>`Error in rotateWithOffset: image must be rank 4,but got rank ${d.rank}.`);let g={image:d},x={radians:a,fillValue:u,center:h},w=V.runKernel(F2,g,x);return w}var XD=O({rotateWithOffset_:Rnt});function so(i,a,u,h,d,g){h==null&&(h=.5),d==null&&(d=Number.NEGATIVE_INFINITY),g==null&&(g=0);let x=i.shape[0];return u=Math.min(u,x),U(0<=h&&h<=1,()=>`iouThreshold must be in [0, 1], but was '${h}'`),U(i.rank===2,()=>`boxes must be a 2D tensor, but was of rank '${i.rank}'`),U(i.shape[1]===4,()=>`boxes must have 4 columns, but 2nd dimension was ${i.shape[1]}`),U(a.rank===1,()=>"scores must be a 1D tensor"),U(a.shape[0]===x,()=>`scores has incompatible shape with boxes. Expected ${x}, but was ${a.shape[0]}`),U(0<=g&&g<=1,()=>`softNmsSigma must be in [0, 1], but was '${g}'`),{maxOutputSize:u,iouThreshold:h,scoreThreshold:d,softNmsSigma:g}}function Pnt(i,a,u,h=.5,d=Number.NEGATIVE_INFINITY){let g=R(i,"boxes","nonMaxSuppression"),x=R(a,"scores","nonMaxSuppression"),w=so(g,x,u,h,d);u=w.maxOutputSize,h=w.iouThreshold,d=w.scoreThreshold;let k={maxOutputSize:u,iouThreshold:h,scoreThreshold:d};return V.runKernelFunc(C=>C.nonMaxSuppression(g,x,u,h,d),{boxes:g,scores:x},null,v2,k)}var YD=O({nonMaxSuppression_:Pnt});function JD(i,a,u){let h=Ont(i,a,u),d=h<0?-(h+1):h;i.splice(d,0,a)}function Ont(i,a,u){return Mnt(i,a,u||Lnt)}function Lnt(i,a){return i>a?1:i>>1);let w=u(a,i[g]);w>0?h=g+1:(d=g,x=!w)}return x?h:-h-1}function ZD(i,a,u,h,d){return vk(i,a,u,h,d,0).selectedIndices}function QD(i,a,u,h,d,g){return vk(i,a,u,h,d,0,!1,g,!0)}function tA(i,a,u,h,d,g){return vk(i,a,u,h,d,g,!0)}function vk(i,a,u,h,d,g,x=!1,w=!1,k=!1){let C=[];for(let G=0;Gd&&C.push({score:a[G],boxIndex:G,suppressBeginIndex:0});C.sort(eA);let $=g>0?-.5/g:0,F=[],_=[];for(;F.length0;){let G=C.pop(),{score:mt,boxIndex:lt,suppressBeginIndex:gt}=G;if(mt=gt;--Gt){let se=Bnt(i,lt,F[Gt]);if(se>=h){_t=!0;break}if(G.score=G.score*znt(h,$,se),G.score<=d)break}G.suppressBeginIndex=F.length,_t||(G.score===mt?(F.push(lt),_.push(G.score)):G.score>d&&JD(C,G,eA))}let W=F.length,et=u-W;w&&et>0&&(F.push(...new Array(et).fill(0)),_.push(...new Array(et).fill(0)));let tt={selectedIndices:Fi(F,"int32")};return x&&(tt.selectedScores=Fi(_,"float32")),k&&(tt.validOutputs=Ot(W,"int32")),tt}function Bnt(i,a,u){let h=i.subarray(a*4,a*4+4),d=i.subarray(u*4,u*4+4),g=Math.min(h[0],h[2]),x=Math.min(h[1],h[3]),w=Math.max(h[0],h[2]),k=Math.max(h[1],h[3]),C=Math.min(d[0],d[2]),$=Math.min(d[1],d[3]),F=Math.max(d[0],d[2]),_=Math.max(d[1],d[3]),W=(w-g)*(k-x),et=(F-C)*(_-$);if(W<=0||et<=0)return 0;let tt=Math.max(g,C),G=Math.max(x,$),mt=Math.min(w,F),lt=Math.min(k,_),gt=Math.max(mt-tt,0)*Math.max(lt-G,0);return gt/(W+et-gt)}function znt(i,a,u){let h=Math.exp(a*u*u);return u<=i?h:0}function eA(i,a){return i.score-a.score||i.score===a.score&&a.boxIndex-i.boxIndex}async function Wnt(i,a,u,h=.5,d=Number.NEGATIVE_INFINITY){let g=R(i,"boxes","nonMaxSuppressionAsync"),x=R(a,"scores","nonMaxSuppressionAsync"),w=so(g,x,u,h,d);u=w.maxOutputSize,h=w.iouThreshold,d=w.scoreThreshold;let k=await Promise.all([g.data(),x.data()]),C=k[0],$=k[1],F=ZD(C,$,u,h,d);return g!==i&&g.dispose(),x!==a&&x.dispose(),F}var nA=Wnt;function Vnt(i,a,u,h=.5,d=Number.NEGATIVE_INFINITY,g=0){let x=R(i,"boxes","nonMaxSuppression"),w=R(a,"scores","nonMaxSuppression"),k=so(x,w,u,h,d,g);u=k.maxOutputSize,h=k.iouThreshold,d=k.scoreThreshold,g=k.softNmsSigma;let C={boxes:x,scores:w},$={maxOutputSize:u,iouThreshold:h,scoreThreshold:d,softNmsSigma:g},F=V.runKernel(k2,C,$);return{selectedIndices:F[0],selectedScores:F[1]}}var rA=O({nonMaxSuppressionWithScore_:Vnt});async function Gnt(i,a,u,h=.5,d=Number.NEGATIVE_INFINITY,g=0){let x=R(i,"boxes","nonMaxSuppressionAsync"),w=R(a,"scores","nonMaxSuppressionAsync"),k=so(x,w,u,h,d,g);u=k.maxOutputSize,h=k.iouThreshold,d=k.scoreThreshold,g=k.softNmsSigma;let C=await Promise.all([x.data(),w.data()]),$=C[0],F=C[1],_=tA($,F,u,h,d,g);return x!==i&&x.dispose(),w!==a&&w.dispose(),_}var oA=Gnt;function Unt(i,a,u,h=.5,d=Number.NEGATIVE_INFINITY,g=!1){let x=R(i,"boxes","nonMaxSuppression"),w=R(a,"scores","nonMaxSuppression"),k=so(x,w,u,h,d,null),C=k.maxOutputSize,$=k.iouThreshold,F=k.scoreThreshold,_={boxes:x,scores:w},W={maxOutputSize:C,iouThreshold:$,scoreThreshold:F,padToMaxOutputSize:g},et=V.runKernel(T2,_,W);return{selectedIndices:et[0],validOutputs:et[1]}}var sA=O({nonMaxSuppressionPadded_:Unt});async function qnt(i,a,u,h=.5,d=Number.NEGATIVE_INFINITY,g=!1){let x=R(i,"boxes","nonMaxSuppressionAsync"),w=R(a,"scores","nonMaxSuppressionAsync"),k=so(x,w,u,h,d,null),C=k.maxOutputSize,$=k.iouThreshold,F=k.scoreThreshold,[_,W]=await Promise.all([x.data(),w.data()]),et=QD(_,W,C,$,F,g);return x!==i&&x.dispose(),w!==a&&w.dispose(),et}var iA=qnt;function Hnt(i,a,u=!1){let h=R(i,"images","resizeBilinear");U(h.rank===3||h.rank===4,()=>`Error in resizeBilinear: x must be rank 3 or 4, but got rank ${h.rank}.`),U(a.length===2,()=>`Error in resizeBilinear: new shape must 2D, but got shape ${a}.`);let d=h,g=!1;h.rank===3&&(g=!0,d=K(h,[1,h.shape[0],h.shape[1],h.shape[2]]));let[x,w]=a,k=(_,W)=>(W([d]),_.resizeBilinear(d,x,w,u)),C={images:d},$={alignCorners:u,size:a},F=V.runKernelFunc(k,C,null,uy,$);return g?K(F,[F.shape[1],F.shape[2],F.shape[3]]):F}var nb=O({resizeBilinear_:Hnt});function jnt(i,a,u=!1){let h=R(i,"images","resizeNearestNeighbor");U(h.rank===3||h.rank===4,()=>`Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${h.rank}.`),U(a.length===2,()=>`Error in resizeNearestNeighbor: new shape must 2D, but got shape ${a}.`),U(h.dtype==="float32"||h.dtype==="int32",()=>"`images` must have `int32` or `float32` as dtype");let d=h,g=!1;h.rank===3&&(g=!0,d=K(h,[1,h.shape[0],h.shape[1],h.shape[2]]));let[x,w]=a,k={images:d},C={alignCorners:u,size:a},$=(_,W)=>(W([d]),_.resizeNearestNeighbor(d,x,w,u)),F=V.runKernelFunc($,k,null,ly,C);return g?K(F,[F.shape[1],F.shape[2],F.shape[3]]):F}var rb=O({resizeNearestNeighbor_:jnt});function Knt(i,a,u){U(a%1===0,()=>`bandPart(): numLower must be an integer, got ${a}.`),U(u%1===0,()=>`bandPart(): numUpper must be an integer, got ${u}.`);let h=R(i,"a","bandPart");U(h.rank>=2,()=>`bandPart(): Rank must be at least 2, got ${h.rank}.`);let d=h.shape,[g,x]=h.shape.slice(-2);if(!(a<=g))throw new Error(`bandPart(): numLower (${a}) must not be greater than the number of rows (${g}).`);if(!(u<=x))throw new Error(`bandPart(): numUpper (${u}) must not be greater than the number of columns (${x}).`);a<0&&(a=g),u<0&&(u=x);let w=K(Jy(0,g,1,"int32"),[-1,1]),k=Jy(0,x,1,"int32"),C=Pt(w,k),$=oo(qr(C,Ot(+a,"int32")),Ur(C,Ot(-u,"int32"))),F=Hr([g,x],h.dtype);return K(Cr(Ka(K(h,[-1,g,x])).map(_=>_n($,_,F))),d)}var aA=O({bandPart_:Knt});function Xnt(i){let a;if(Array.isArray(i)){a=!1,U(i!=null&&i.length>0,()=>"Gram-Schmidt process: input must not be null, undefined, or empty");let d=i[0].shape[0];for(let g=1;g`Gram-Schmidt: Non-unique lengths found in the input vectors: (${i[g].shape[0]} vs. ${d})`)}else a=!0,i=os(i,i.shape[0],0).map(d=>Gh(d,[0]));U(i.length<=i[0].shape[0],()=>`Gram-Schmidt: Number of vectors (${i.length}) exceeds number of dimensions (${i[0].shape[0]}).`);let u=[],h=i;for(let d=0;d{let g=h[d];if(d>0)for(let x=0;x=2,()=>`qr() requires input tensor to have a rank >= 2, but got rank ${i.rank}`),i.rank===2)return lA(i,a);{let u=i.shape.slice(0,i.shape.length-2).reduce((k,C)=>k*C),h=Ka(K(i,[u,i.shape[i.shape.length-2],i.shape[i.shape.length-1]]),0),d=[],g=[];h.forEach(k=>{let[C,$]=lA(k,a);d.push(C),g.push($)});let x=K(Cr(d,0),i.shape),w=K(Cr(g,0),i.shape);return[x,w]}}function lA(i,a=!1){return V.tidy(()=>{U(i.shape.length===2,()=>`qr2d() requires a 2D Tensor, but got a ${i.shape.length}D Tensor.`);let u=i.shape[0],h=i.shape[1],d=YT(u),g=Eo(i),x=ja([[1]],[1,1]),w=Eo(x),k=u>=h?h:u;for(let C=0;C{let W=ve(g,[C,C],[u-C,1]),et=Fl(W),tt=ve(g,[C,C],[1,1]),G=_n(gr(tt,0),ja([[-1]]),ja([[1]])),mt=Pt(tt,at(G,et)),lt=Wt(W,mt);lt.shape[0]===1?w=Eo(x):w=mn([x,ve(lt,[1,0],[lt.shape[0]-1,lt.shape[1]])],0);let gt=me(Wt(We(G,mt),et)),_t=ve(g,[C,0],[u-C,h]),Gt=at(gt,w),se=Ie(w);if(C===0)g=Pt(_t,We(Gt,We(se,_t)));else{let Ge=Pt(_t,We(Gt,We(se,_t)));g=mn([ve(g,[0,0],[C,h]),Ge],0)}let fe=Ie(Gt),_e=ve(d,[0,C],[u,d.shape[1]-C]);if(C===0)d=Pt(_e,We(We(_e,w),fe));else{let Ge=Pt(_e,We(We(_e,w),fe));d=mn([ve(d,[0,0],[u,C]),Ge],1)}return[w,g,d]}),yD([$,F,_])}return!a&&u>h&&(d=ve(d,[0,0],[u,h]),g=ve(g,[0,0],[h,h])),[d,g]})}var uA=O({qr_:Ynt});var cn;(function(i){i[i.NONE=0]="NONE",i[i.MEAN=1]="MEAN",i[i.SUM=2]="SUM",i[i.SUM_BY_NONZERO_WEIGHTS=3]="SUM_BY_NONZERO_WEIGHTS"})(cn||(cn={}));function Jnt(i,a,u=cn.SUM_BY_NONZERO_WEIGHTS){let h=R(i,"losses","computeWeightedLoss"),d=null;a!=null&&(d=R(a,"weights","computeWeightedLoss"));let g=d==null?h:at(h,d);if(u===cn.NONE)return g;if(u===cn.SUM)return Ft(g);if(u===cn.MEAN){if(d==null)return Oh(g);{let x=h.size/d.size,w=Wt(Ft(g),Ft(d));return x>1?Wt(w,Ot(x)):w}}if(u===cn.SUM_BY_NONZERO_WEIGHTS){if(d==null)return Wt(Ft(g),Ot(h.size));{let x=at(d,rs(h.shape)),w=wt(Ft(Wa(x,Ot(0))),"float32");return Wt(Ft(g),w)}}throw Error(`Unknown reduction: ${u}`)}var Vn=O({computeWeightedLoss_:Jnt});function Znt(i,a,u,h=cn.SUM_BY_NONZERO_WEIGHTS){let d=R(i,"labels","absoluteDifference"),g=R(a,"predictions","absoluteDifference"),x=null;u!=null&&(x=R(u,"weights","absoluteDifference")),ae(d.shape,g.shape,"Error in absoluteDifference: ");let w=Bn(Pt(d,g));return Vn(w,x,h)}var pA=O({absoluteDifference_:Znt});function Qnt(i,a,u,h,d=cn.SUM_BY_NONZERO_WEIGHTS){let g=R(i,"labels","cosineDistance"),x=R(a,"predictions","cosineDistance"),w=null;h!=null&&(w=R(h,"weights","cosineDistance")),ae(g.shape,x.shape,"Error in cosineDistance: ");let k=Ot(1),C=Pt(k,Ft(at(g,x),u,!0));return Vn(C,w,d)}var hA=O({cosineDistance_:Qnt});function trt(i,a,u,h=cn.SUM_BY_NONZERO_WEIGHTS){let d=R(i,"labels","hingeLoss"),g=R(a,"predictions","hingeLoss"),x=null;u!=null&&(x=R(u,"weights","hingeLoss")),ae(d.shape,g.shape,"Error in hingeLoss: ");let w=Ot(1);d=Pt(at(Ot(2),d),w);let k=Ga(Pt(w,at(d,g)));return Vn(k,x,h)}var fA=O({hingeLoss_:trt});function ert(i,a,u,h=1,d=cn.SUM_BY_NONZERO_WEIGHTS){let g=R(i,"labels","huberLoss"),x=R(a,"predictions","huberLoss"),w=null;u!=null&&(w=R(u,"weights","huberLoss")),ae(g.shape,x.shape,"Error in huberLoss: ");let k=Ot(h),C=Bn(Pt(x,g)),$=za(C,k),F=Pt(C,$),_=ke(at(Ot(.5),be($)),at(k,F));return Vn(_,w,d)}var dA=O({huberLoss_:ert});function nrt(i,a,u,h=1e-7,d=cn.SUM_BY_NONZERO_WEIGHTS){let g=R(i,"labels","logLoss"),x=R(a,"predictions","logLoss"),w=null;u!=null&&(w=R(u,"weights","logLoss")),ae(g.shape,x.shape,"Error in logLoss: ");let k=Ot(1),C=Ot(h),$=me(at(g,ro(ke(x,C)))),F=at(Pt(k,g),ro(ke(Pt(k,x),C))),_=Pt($,F);return Vn(_,w,d)}var mA=O({logLoss_:nrt});function rrt(i,a,u,h=cn.SUM_BY_NONZERO_WEIGHTS){let d=R(i,"labels","meanSquaredError"),g=R(a,"predictions","meanSquaredError"),x=null;u!=null&&(x=R(u,"weights","meanSquaredError")),ae(d.shape,g.shape,"Error in meanSquaredError: ");let w=Ha(d,g);return Vn(w,x,h)}var gA=O({meanSquaredError_:rrt});function ort(i,a){let u=R(i,"labels","sigmoidCrossEntropyWithLogits"),h=R(a,"logits","sigmoidCrossEntropyWithLogits");ae(u.shape,h.shape,"Error in sigmoidCrossEntropyWithLogits: ");let d=Ga(h),g=at(h,u),x=Dh(Cn(me(Bn(h))));return ke(Pt(d,g),x)}function srt(i,a,u,h=0,d=cn.SUM_BY_NONZERO_WEIGHTS){let g=R(i,"multiClassLabels","sigmoidCrossEntropy"),x=R(a,"logits","sigmoidCrossEntropy"),w=null;if(u!=null&&(w=R(u,"weights","sigmoidCrossEntropy")),ae(g.shape,x.shape,"Error in sigmoidCrossEntropy: "),h>0){let C=Ot(h),$=Ot(1),F=Ot(.5);g=ke(at(g,Pt($,C)),at(F,C))}let k=ort(g,x);return Vn(k,w,d)}var yA=O({sigmoidCrossEntropy_:srt});function irt(i,a,u=-1){if(u===-1&&(u=a.rank-1),u!==a.rank-1)throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${a.rank} and dim was ${u}`);let h=Dl((d,g,x)=>{let w=!0,k=Fh(g,[u],w),C=Pt(wt(g,"float32"),k);x([d,C]);let $=me(at(C,d)),F=Ft($,[u]),_=(W,et)=>{let[tt,G]=et,mt=dn(W.shape,[u]);return[at(K(W,mt),Pt(wt(tt,"float32"),Cn(G))),at(K(W,mt),Pt(Cn(G),wt(tt,"float32")))]};return{value:F,gradFunc:_}});return h(i,a)}function art(i,a,u,h=0,d=cn.SUM_BY_NONZERO_WEIGHTS){let g=R(i,"onehotLabels","softmaxCrossEntropy"),x=R(a,"logits","softmaxCrossEntropy"),w=null;if(u!=null&&(w=R(u,"weights","softmaxCrossEntropy")),ae(g.shape,x.shape,"Error in softmaxCrossEntropy: "),h>0){let C=Ot(h),$=Ot(1),F=Ot(g.shape[1]);g=ke(at(g,Pt($,C)),Wt(C,F))}let k=irt(g,x);return Vn(k,w,d)}var bA=O({softmaxCrossEntropy_:art});var TRt={fft:Ua,ifft:Pi,rfft:qa,irfft:Vh},kRt={hammingWindow:qD,hannWindow:tb,frame:eb,stft:HD},xA={flipLeftRight:KD,resizeNearestNeighbor:rb,resizeBilinear:nb,rotateWithOffset:XD,cropAndResize:jD,nonMaxSuppression:YD,nonMaxSuppressionAsync:nA,nonMaxSuppressionWithScore:rA,nonMaxSuppressionWithScoreAsync:oA,nonMaxSuppressionPadded:sA,nonMaxSuppressionPaddedAsync:iA},NRt={bandPart:aA,gramSchmidt:cA,qr:uA},_Rt={absoluteDifference:pA,computeWeightedLoss:Vn,cosineDistance:hA,hingeLoss:fA,huberLoss:dA,logLoss:mA,meanSquaredError:gA,sigmoidCrossEntropy:yA,softmaxCrossEntropy:bA};var wA=1.7580993408473768,vA=1.0507009873554805;var TA={kernelName:og,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(i,Gs(wt(u,"float32"),-1))}}};var kA={kernelName:sg,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>{let h=be(wt(u,"float32")),d=Qn(Pt(Ot(1),h));return me(Wt(i,d))}}}};var NA={kernelName:ig,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>{let h=Qn(Pt(be(wt(u,"float32")),1));return Wt(i,h)}}}};var _A={kernelName:Tl,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=ne(u.shape,h.shape),g=()=>{let w=i,k=Be(u.shape,d);return k.length>0&&(w=Ft(w,k)),K(w,u.shape)},x=()=>{let w=i,k=Be(h.shape,d);return k.length>0&&(w=Ft(w,k)),K(w,h.shape)};return{a:g,b:x}}};var CA={kernelName:BE,saveAllInputs:!0,gradFunc:(i,a)=>{let u={};return a.forEach((h,d)=>{u[d]=()=>i.clone()}),u}};var SA={kernelName:ag,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>jt(u)}}};var $A={kernelName:cg,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>jt(u)}}};var IA={kernelName:lg,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>Wt(i,Qn(Pt(Ot(1),be(wt(u,"float32")))))}}};var EA={kernelName:ug,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>{let h=Qn(ke(Ot(1),be(wt(u,"float32"))));return Wt(i,h)}}}};var DA={kernelName:fg,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=ne(u.shape,h.shape),g=()=>{let w=ke(be(u),be(h)),k=at(i,Wt(h,w)),C=Be(u.shape,d);return C.length>0&&(k=Ft(k,C)),K(k,u.shape)},x=()=>{let w=ke(be(u),be(h)),k=me(at(i,Wt(u,w))),C=Be(h.shape,d);return C.length>0&&(k=Ft(k,C)),K(k,h.shape)};return{a:g,b:x}}};var AA={kernelName:pg,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>Wt(i,ke(be(wt(u,"float32")),1))}}};var FA={kernelName:hg,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>Wt(i,Pt(Ot(1),be(wt(u,"float32"))))}}};function crt(i,a,u,h,d=[1,1,1],g,x){let w=R(i,"dy","avgPool3dBackprop"),k=R(a,"input","avgPool3dBackprop"),C=w,$=k,F=!1;k.rank===4&&(F=!0,C=K(w,[1,w.shape[0],w.shape[1],w.shape[2],w.shape[3]]),$=K(k,[1,k.shape[0],k.shape[1],k.shape[2],k.shape[3]])),U(C.rank===5,()=>`Error in avgPool3dBackprop: dy must be rank 5 but got rank ${C.rank}.`),U($.rank===5,()=>`Error in avgPool3dBackprop: input must be rank 5 but got rank ${$.rank}.`),U(Wn(h,d),()=>`Error in avgPool3dBackprop: Either strides or dilations must be 1. Got strides ${h} and dilations '${d}'`),x!=null&&U(Te(g),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${x} but got pad ${g}.`);let _=G=>{let mt=jy($.shape,u,h,d,g,x);return G.avgPool3dBackprop(C,$,mt)},W={dy:C,input:$},et={filterSize:u,strides:h,dilations:d,pad:g,dimRoundingMode:x},tt=V.runKernelFunc(_,W,null,UE,et);return F?K(tt,[tt.shape[1],tt.shape[2],tt.shape[3],tt.shape[4]]):tt}var RA=O({avgPool3dBackprop_:crt});var PA={kernelName:GE,inputsToSave:["x"],gradFunc:(i,a,u)=>{let[h]=a,{filterSize:d,strides:g,dilations:x,pad:w,dimRoundingMode:k}=u,C=x==null?[1,1,1]:x;return{x:()=>RA(i,h,d,g,C,w,k)}}};function lrt(i,a,u,h,d){let g=R(i,"dy","avgPoolBackprop"),x=R(a,"input","avgPoolBackprop");U(x.rank===g.rank,()=>`Rank of input (${x.rank}) does not match rank of dy (${g.rank})`);let w=x,k=g,C=!1;x.rank===3&&(C=!0,w=K(x,[1,x.shape[0],x.shape[1],x.shape[2]]),k=K(g,[1,g.shape[0],g.shape[1],g.shape[2]])),U(k.rank===4,()=>`Error in avgPoolBackprop: dy must be rank 4 but got rank ${k.rank}.`),U(w.rank===4,()=>`Error in avgPoolBackprop: input must be rank 4 but got rank ${w.rank}.`);let $=et=>{let tt=ns(w.shape,u,h,1,d);return et.avgPoolBackprop(k,w,tt)},F={dy:k,input:w},_={filterSize:u,strides:h,pad:d},W=V.runKernelFunc($,F,null,VE,_);return C?K(W,[W.shape[1],W.shape[2],W.shape[3]]):W}var OA=O({avgPoolBackprop_:lrt});var LA={kernelName:dg,inputsToSave:["x"],gradFunc:(i,a,u)=>{let[h]=a,{filterSize:d,strides:g,pad:x}=u;return{x:()=>OA(i,h,d,g,x)}}};var MA={kernelName:mg,inputsToSave:["a","b"],gradFunc:(i,a,u)=>{let[h,d]=a,{transposeA:g,transposeB:x}=u;return!g&&!x?{a:()=>We(i,d,!1,!0),b:()=>We(h,i,!0,!1)}:!g&&x?{a:()=>We(i,d,!1,!1),b:()=>We(i,h,!0,!1)}:g&&!x?{a:()=>We(d,i,!1,!0),b:()=>We(h,i,!1,!1)}:{a:()=>We(d,i,!0,!0),b:()=>We(i,h,!0,!0)}}};var BA={kernelName:gg,gradFunc:(i,a,u)=>{let{blockShape:h,crops:d}=u;return{x:()=>Va(i,h,d)}}};var zA={kernelName:yg,gradFunc:(i,a,u)=>{let h=u,d=h.inputShape,g=h.shape,x=Array.from(g);for(let k=d.length-1;k>=0;k--)if(d[k]===g[k])x[k]=1;else if(d[k]!==1)throw new Error(`broadcastTo(): [${d}] cannot be broadcast to [${g}].`);let w=[];for(let k=0;k1&&w.push(k);return{x:()=>Ft(i,w,!0)}}};var WA={kernelName:kl,gradFunc:i=>({x:()=>i.clone()})};var VA={kernelName:bg,gradFunc:i=>({x:()=>jt(i)})};var GA={kernelName:xg,inputsToSave:["x"],gradFunc:(i,a,u)=>{let[h]=a,{clipValueMin:d,clipValueMax:g}=u;return{x:()=>_n(oo(Ur(h,d),qr(h,g)),i,jt(i))}}};var UA={kernelName:wg,saveAllInputs:!0,gradFunc:(i,a,u)=>{let h=a.map(k=>k.shape),{axis:d}=u,g=Zt(d,a[0].shape)[0],x=h.map(k=>k[g]),w=os(i,x,g);return w.map(k=>()=>k)}};var qA={kernelName:vg,inputsToSave:["x","filter"],gradFunc:(i,a,u)=>{let[h,d]=a,{dilations:g,strides:x,pad:w,dataFormat:k}=u;return U(Di(g),()=>`Error in gradient of conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${g}'`),{x:()=>Xy(h.shape,i,d,x,w,k),filter:()=>Qy(h,i,d.shape,x,w,k)}}};var HA={kernelName:Tg,inputsToSave:["dy","filter"],gradFunc:(i,a,u)=>{let[h,d]=a,{strides:g,pad:x,dataFormat:w,dimRoundingMode:k}=u;return{dy:()=>Bs(i,d,g,x,w,1,k),filter:()=>Qy(i,h,d.shape,g,x,w,k)}}};function urt(i,a,u,h,d){let g=i;i.rank===4&&(g=K(i,[1,i.shape[0],i.shape[1],i.shape[2],i.shape[3]]));let x=a;x.rank===4&&(x=K(a,[1,a.shape[0],a.shape[1],a.shape[2],a.shape[3]])),U(g.rank===5,()=>`Error in conv3dDerFilter: input must be rank 5, but got shape ${g.shape}.`),U(x.rank===5,()=>`Error in conv3dDerFilter: dy must be rank 5, but got shape ${x.shape}.`),U(u.length===5,()=>`Error in conv3dDerFilter: filterShape must be length 5, but got ${u}.`),U(g.shape[4]===u[3],()=>`Error in conv3dDerFilter: depth of input ${g.shape[4]}) must match input depth in filter (${u[3]}.`),U(x.shape[4]===u[4],()=>`Error in conv3dDerFilter: depth of dy (${x.shape[4]}) must match output depth for filter (${u[4]}).`);let w=$=>{let F=1,_=_h(g.shape,u,h,F,d);return $.conv3dDerFilter(g,x,_)},k={x:g,dy:x},C={strides:h,pad:d,filterShape:u};return V.runKernelFunc(w,k,null,KE,C)}var jA=O({conv3DBackpropFilter_:urt});var KA={kernelName:jE,inputsToSave:["x","filter"],gradFunc:(i,a,u)=>{let{dilations:h,strides:d,pad:g}=u;U(Di(h),()=>`Error in gradient of conv3D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${h}'`);let[x,w]=a;return{x:()=>kD(x.shape,i,w,d,g),filter:()=>jA(x,i,w.shape,d,g)}}};var XA={kernelName:kg,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(me(zh(wt(u,"float32"))),i)}}};var YA={kernelName:Ng,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(Wh(wt(u,"float32")),i)}}};var JA={kernelName:_g,inputsToSave:["x"],gradFunc:(i,a,u)=>{let[h]=a,{axis:d,exclusive:g,reverse:x}=u;return{x:()=>{let w=Nn([d],h.rank),k=$h(i,d,g,!x);return w!=null&&(k=Ie(k,w)),k}}}};var ZA={kernelName:Cg,inputsToSave:["x","filter"],gradFunc:(i,a,u)=>{let{dilations:h,strides:d,pad:g,dimRoundingMode:x}=u,w=h==null?[1,1]:h;U(Di(w),()=>`Error in gradient of depthwiseConv2dNative: dilation rates greater than 1 are not yet supported. Got dilations '${w}'`);let[k,C]=a;return U(k.rank===4,()=>`Error in gradient of depthwiseConv2dNative: input must be rank 4, but got rank ${k.rank}.`),U(C.rank===4,()=>`Error in gradient of depthwiseConv2dNative: filter must be rank 4, but got rank ${C.rank}.`),U(k.shape[3]===C.shape[2],()=>`Error in gradient of depthwiseConv2d: number of input channels (${k.shape[3]}) must match the inChannels dimension in filter ${C.shape[2]}.`),U(Wn(d,w),()=>`Error in gradient of depthwiseConv2d: Either strides or dilations must be 1. Got strides ${d} and dilations '${w}'.`),x!=null&&U(Te(g),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${x} but got pad ${g}.`),{x:()=>UD(k.shape,i,C,d,g,h,x),filter:()=>GD(k,i,C.shape,d,g,h,x)}}};var QA={kernelName:Sg,inputsToSave:["x","filter"],gradFunc:(i,a,u)=>{let[h,d]=a,g={x:h,filter:d,dy:i},x={x:h,filter:d,dy:i};return{x:()=>V.runKernel(t2,g,u),filter:()=>V.runKernel(e2,x,u)}}};var tF={kernelName:$g,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=ne(u.shape,h.shape),g=()=>{let w=Wt(i,wt(h,"float32")),k=Be(u.shape,d);return k.length>0?K(Ft(w,k),u.shape):w},x=()=>{let w=at(i,wt(u,"float32")),k=Be(h.shape,d);k.length>0&&(w=K(Ft(w,k),h.shape));let C=be(h);return me(Wt(w,wt(C,"float32")))};return{a:g,b:x}}};var eF={kernelName:Ig,outputsToSave:[!0],gradFunc:(i,a)=>{let[u]=a,h=g=>g.eluDer(i,u),d={dy:i,y:u};return{x:()=>V.runKernelFunc(h,d,null,n2)}}};var nF={kernelName:Eg,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a,h=at(Cn(me(be(u))),2/Math.sqrt(Math.PI));return{x:()=>at(i,h)}}};var rF={kernelName:Dg,outputsToSave:[!0],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(i,u)}}};var oF={kernelName:Ag,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(i,Cn(u))}}};var sF={kernelName:Fg,gradFunc:i=>({x:()=>jt(i)})};var iF={kernelName:Rg,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=ne(u.shape,h.shape),g=()=>{let w=Wt(i,wt(h,"float32")),k=Be(u.shape,d);return k.length>0?K(Ft(w,k),u.shape):w},x=()=>{let w=at(i,wt(u,"float32")),k=Be(h.shape,d);k.length>0&&(w=K(Ft(w,k),h.shape));let C=be(h);return me(Wt(w,wt(C,"float32")))};return{a:g,b:x}}};var aF={kernelName:Pg,inputsToSave:["x","mean","variance","scale"],gradFunc:(i,a,u)=>{let{varianceEpsilon:h}=u,[d,g,x,w]=a,k=w==null?Ot(1):w,C=Be(g.shape,d.shape),$=[];if(g.rank===1){for(let _t=0;_tg.rank===1?K(at(at(i,Ws(K(W,[1,1,1,g.shape[0]]),$)),k),d.shape):K(at(at(i,W),k),d.shape),G=()=>{let _t=at(at(W,Ot(-1)),_);return g.rank===1&&(_t=Ft(_t,C)),K(_t,g.shape)},mt=()=>{let _t=at(at(et,F),_);return g.rank===1&&(_t=Ft(_t,C)),K(_t,g.shape)},lt=()=>{let _t=at(F,W),Gt=at(i,_t);return g.rank===1&&(Gt=Ft(Gt,C)),K(Gt,g.shape)},gt=()=>{let _t=i;return g.rank===1&&(_t=Ft(_t,C)),K(_t,g.shape)};return{x:tt,mean:G,variance:mt,scale:lt,offset:gt}}};var uF={kernelName:Og,inputsToSave:["x","indices"],gradFunc:(i,a,u)=>{let[h,d]=a,{axis:g}=u,x=Zt(g,h.shape)[0],w=()=>{let k=h.shape,C=d.size,$=k.slice(0,x),F=$.length,_=k.slice(g,k.length).slice(1),W=_.length,et=cF(0,F),tt=cF(F+1,F+1+W),G=lF([$,[C],_]),mt=K(i,G),lt=K(d,[C]),gt=lF([[F],et,tt]),_t=Ie(mt,gt),Gt=Uh(_t,lt,h.shape[x]),se=Sl(gt);return Gt=Ie(Gt,se),Gt};return{x:w,indices:()=>d}}};function cF(i,a){let u=[];for(let h=i;h{let[u,h]=a;return{a:()=>jt(u),b:()=>jt(h)}}};var hF={kernelName:Mg,gradFunc:i=>({x:()=>wt(i,"float32")})};var fF={kernelName:Bg,gradFunc:i=>({x:()=>jt(i)})};var dF={kernelName:zg,gradFunc:i=>({x:()=>jt(i)})};var mF={kernelName:Wg,gradFunc:i=>({x:()=>jt(i)})};var gF={kernelName:Gg,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>Wt(i,ke(u,1))}}};var yF={kernelName:Vg,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>Wt(i,wt(u,"float32"))}}};var bF={kernelName:Ug,inputsToSave:[],outputsToSave:[!0],gradFunc:(i,a,u)=>{let[h]=a,{axis:d}=u;return{logits:()=>{let g=!0,x=Cn(h);return Pt(i,at(Ft(i,d,g),x))}}}};function prt(i,a,u,h=5,d=1,g=1,x=.5){let w=$=>$.LRNGrad(u,i,a,h,d,g,x),k={x:i,y:a,dy:u},C={depthRadius:h,bias:d,alpha:g,beta:x};return V.runKernelFunc(w,k,null,m2,C)}var xF=O({localResponseNormalizationBackprop_:prt});var wF={kernelName:qg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(i,a,u)=>{let[h,d]=a,{depthRadius:g,bias:x,alpha:w,beta:k}=u;return{x:()=>xF(h,d,i,g,x,w,k)}}};function ob(i,a,u,h){return a.rank{let d=at(i,wt(zs(u,a),i.dtype));return d}}}var Tk={kernelName:Hg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(i,a,u)=>{let h=u,{reductionIndices:d}=h,g=a[0],x=a[1],w=Zt(d,g.shape),k=ob(i,x,g,w);return{x:()=>k.x()}}};var vF={kernelName:jg,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=()=>at(i,wt(Ur(u,h),"float32")),g=()=>at(i,wt(Ma(u,h),"float32"));return{a:d,b:g}}};function hrt(i,a,u,h,d,g=[1,1,1],x,w){let k=R(i,"dy","maxPool3dBackprop"),C=R(a,"input","maxPool3dBackprop"),$=R(u,"output","maxPool3dBackprop"),F=k,_=C,W=$,et=!1;C.rank===4&&(et=!0,F=K(k,[1,k.shape[0],k.shape[1],k.shape[2],k.shape[3]]),_=K(C,[1,C.shape[0],C.shape[1],C.shape[2],C.shape[3]]),W=K($,[1,$.shape[0],$.shape[1],$.shape[2],$.shape[3]])),U(F.rank===5,()=>`Error in maxPool3dBackprop: dy must be rank 5 but got rank ${F.rank}.`),U(_.rank===5,()=>`Error in maxPool3dBackprop: input must be rank 5 but got rank ${_.rank}.`),U(W.rank===5,()=>`Error in maxPool3dBackprop: output must be rank 5 but got rank ${W.rank}.`),U(Wn(d,g),()=>`Error in maxPool3dBackprop: Either strides or dilations must be 1. Got strides ${d} and dilations '${g}'`),w!=null&&U(Te(x),()=>`Error in maxPool3dBackprop: pad must be an integer when using, dimRoundingMode ${w} but got pad ${x}.`);let tt=gt=>{let _t=jy(_.shape,h,d,g,x,w);return gt.maxPool3dBackprop(F,_,W,_t)},G={dy:F,input:_,output:W},mt={filterSize:h,strides:d,dilations:g,pad:x,dimRoundingMode:w},lt=V.runKernelFunc(tt,G,null,b2,mt);return et?K(lt,[lt.shape[1],lt.shape[2],lt.shape[3],lt.shape[4]]):lt}var TF=O({maxPool3dBackprop_:hrt});var kF={kernelName:y2,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(i,a,u)=>{let[h,d]=a,{filterSize:g,strides:x,dilations:w,pad:k,dimRoundingMode:C}=u,$=w==null?[1,1,1]:w;return{x:()=>TF(i,h,d,g,x,$,k,C)}}};function frt(i,a,u,h,d,g,x){let w=R(i,"dy","maxPoolBackprop"),k=R(a,"input","maxPoolBackprop"),C=R(u,"output","maxPoolBackprop");U(k.rank===w.rank,()=>`Rank of input (${k.rank}) does not match rank of dy (${w.rank})`),U(w.rank===4,()=>`Error in maxPoolBackprop: dy must be rank 4 but got rank ${w.rank}.`),U(k.rank===4,()=>`Error in maxPoolBackprop: input must be rank 4 but got rank ${k.rank}.`),x!=null&&U(Te(g),()=>`Error in maxPoolBackprop: pad must be an integer when using, dimRoundingMode ${x} but got pad ${g}.`);let $=W=>{let et=ns(k.shape,h,d,1,g,x);return W.maxPoolBackprop(w,k,C,et)},F={dy:w,input:k,output:C},_={filterSize:h,strides:d,pad:g,dimRoundingMode:x};return V.runKernelFunc($,F,null,g2,_)}var NF=O({maxPoolBackprop_:frt});var _F={kernelName:Kg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(i,a,u)=>{let[h,d]=a,{filterSize:g,strides:x,pad:w}=u;return{x:()=>NF(i,h,d,g,x,w)}}};var CF={kernelName:Xg,inputsToSave:["x"],outputsToSave:[!0],gradFunc:(i,a,u)=>{let h=u,{axis:d}=h,[g,x]=a,w=Zt(d,g.shape),k=ob(i,x,g,w);return{x:()=>k.x()}}};var SF={kernelName:Yg,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=()=>at(i,wt(qr(u,h),"float32")),g=()=>at(i,wt(gr(u,h),"float32"));return{a:d,b:g}}};var $F={kernelName:Jg,inputsToSave:["x"],gradFunc:(i,a,u)=>{let h=a[0],{paddings:d}=u,g=d.map(x=>x[0]);return{x:()=>ve(i,g,h.shape)}}};var IF={kernelName:Zg,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=ne(u.shape,h.shape),g=()=>{let w=Be(u.shape,d);return w.length>0?K(Ft(i,w),u.shape):i},x=()=>{let w=at(i,me(Ih(Wt(u,h)))),k=Be(h.shape,d);return k.length>0?K(Ft(w,k),h.shape):w};return{a:g,b:x}}};var EF={kernelName:Qg,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=ne(u.shape,h.shape),g=()=>{let w=at(i,wt(h,"float32")),k=Be(u.shape,d);return k.length>0?K(Ft(w,k),u.shape):w},x=()=>{let w=at(i,wt(u,"float32")),k=Be(h.shape,d);return k.length>0?K(Ft(w,k),h.shape):w};return{a:g,b:x}}};var DF={kernelName:ty,gradFunc:i=>({x:()=>me(i)})};var AF={kernelName:ny,inputsToSave:["indices"],gradFunc:(i,a)=>{let u=a[0];return{indices:()=>Hr(u.shape,"float32")}}};var FF={kernelName:ey,gradFunc:i=>({x:()=>jt(i)})};var kk={kernelName:ry,inputsToSave:["x"],gradFunc:(i,a,u)=>{let h=a[0],{paddings:d}=u,g=d.map(x=>x[0]);return{x:()=>ve(i,g,h.shape)}}};var RF={kernelName:oy,inputsToSave:["a","b"],outputsToSave:[!0],gradFunc:(i,a)=>{let[u,h,d]=a,g=u,x=h,w=ne(g.shape,x.shape),k=()=>{let $=wt(x,"float32"),F=at(i,at($,Ao(g,Pt($,Ot(1))))),_=Be(g.shape,w);return _.length>0&&(F=Ft(F,_)),K(F,g.shape)},C=()=>{let $=gr(g,0),F=_n($,ro(g),jt(g)),_=at(i,at(d,F)),W=Be(x.shape,w);return W.length>0&&(_=Ft(_,W)),K(_,x.shape)};return{a:k,b:C}}};var PF={kernelName:sy,inputsToSave:["x","alpha"],gradFunc:(i,a)=>{let[u,h]=a,d=gr(u,0);return{x:()=>_n(d,i,at(i,h)),alpha:()=>{let g=_n(d,jt(i),at(i,u)),x=Be(h.shape,i.shape);return x.length>0&&(g=Ft(g,x)),K(g,h.shape)}}}};var OF={kernelName:iy,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>Wt(i,me(be(u)))}}};var LF={kernelName:py,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a,h=at(qr(u,6),Gs(u));return{x:()=>at(i,wt(h,"float32"))}}};var MF={kernelName:ay,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(i,wt(Gs(u),"float32"))}}};var BF={kernelName:cy,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>K(i,u.shape)}}};var zF={kernelName:uy,inputsToSave:["images"],gradFunc:(i,a,u)=>{let[h]=a,d=w=>{let{alignCorners:k}=u;return w.resizeBilinearBackprop(i,h,k)},g={images:h},x=()=>V.runKernelFunc(d,g,null,$2,u);return{images:x}}};var WF={kernelName:ly,inputsToSave:["images"],gradFunc:(i,a,u)=>{let[h]=a,d=w=>{let{alignCorners:k}=u;return w.resizeNearestNeighborBackprop(i,h,k)},g={images:h},x=()=>V.runKernelFunc(d,g,null,S2,u);return{images:x}}};var VF={kernelName:hy,gradFunc:(i,a,u)=>{let{dims:h}=u,d=Zt(h,i.shape);return{x:()=>Ri(i,d)}}};var GF={kernelName:fy,gradFunc:i=>({x:()=>jt(i)})};var UF={kernelName:dy,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>me(Wt(i,at(Ao(u,1.5),2)))}}};var qF={kernelName:my,inputsToSave:["condition"],gradFunc:(i,a)=>{let[u]=a;return{condition:()=>wt(jt(u),"float32"),t:()=>at(i,wt(u,i.dtype)),e:()=>at(i,wt(Ba(u),i.dtype))}}};var HF={kernelName:gy,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>{let h=gr(u,Ot(0)),d=Ot(wA),g=Ot(vA),x=at(i,g),w=at(at(i,d),Cn(wt(u,"float32")));return _n(h,x,w)}}}};var jF={kernelName:vy,outputsToSave:[!0],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(i,at(u,Pt(Ot(1),u)))}}};var KF={kernelName:wy,gradFunc:i=>({x:()=>jt(i)})};var XF={kernelName:by,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(Pa(wt(u,"float32")),i)}}};var YF={kernelName:xy,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(Sh(wt(u,"float32")),i)}}};var JF={kernelName:yy,inputsToSave:["x"],gradFunc:(i,a,u)=>{let[h]=a,{begin:d,size:g}=u,x=h.shape,[w,k]=qy(h,d,g),C=[];for(let $=0;$Mh(i,C)}}};var ZF={kernelName:Sy,outputsToSave:[!0],gradFunc:(i,a,u)=>{let[h]=a,{dim:d}=u,g=!0,x=at(i,h);return{logits:()=>Pt(x,at(Ft(x,[d],g),h))}}};var QF={kernelName:Ty,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(i,Fa(u))}}};var Nk={kernelName:_y,gradFunc:(i,a,u)=>{let{blockShape:h,paddings:d}=u;return{x:()=>Ra(i,h,d)}}};var _k={kernelName:Cy,gradFunc:(i,a,u)=>{let{axis:h}=u;return{x:()=>mn(i,h)}}};var tR={kernelName:ky,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>Wt(i,at(Qn(wt(u,"float32")),2))}}};var eR={kernelName:I2,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(i,at(wt(u,"float32"),2))}}};var nR={kernelName:$y,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=Ot(2),g=()=>at(i,at(d,Pt(u,h))),x=()=>at(i,at(d,Pt(h,u)));return{a:g,b:x}}};var rR={kernelName:Ly,gradFunc:i=>({x:()=>jt(i)})};var oR={kernelName:Iy,inputsToSave:["a","b"],gradFunc:(i,a)=>{let[u,h]=a,d=ne(u.shape,h.shape),g=()=>{let w=i,k=Be(u.shape,d);return k.length>0&&(w=Ft(w,k)),K(w,u.shape)},x=()=>{let w=i,k=Be(h.shape,d);return k.length>0&&(w=Ft(w,k)),K(me(w),h.shape)};return{a:g,b:x}}};var sR={kernelName:Ny,inputsToSave:["x"],gradFunc:(i,a,u)=>{let[h]=a,d=h.shape.slice(),{axis:g}=u,x=Zt(g,h.shape);x.forEach(C=>{d[C]=1});let w=K(i,d),k=at(w,rs(h.shape,"float32"));return{x:()=>k}}};var iR={kernelName:Ey,inputsToSave:["x"],gradFunc:(i,a)=>{let[u]=a;return{x:()=>Wt(i,be(Pa(u)))}}};var aR={kernelName:Dy,outputsToSave:[!0],gradFunc:(i,a)=>{let[u]=a;return{x:()=>at(Pt(Ot(1),be(u)),i)}}};var cR={kernelName:Ay,inputsToSave:["x"],gradFunc:(i,a,u)=>{let[h]=a,{reps:d}=u,g=()=>{let x=jt(h);if(h.rank===1)for(let w=0;w{let h=u,{perm:d}=h,g=Sl(d);return{x:()=>Ie(i,g)}}};var uR={kernelName:Ry,gradFunc:(i,a,u)=>{let h=u,{axis:d}=h;return{value:()=>Cr(i,d)}}};var pR={kernelName:Py,inputsToSave:["segmentIds"],gradFunc:(i,a)=>{let[u]=a,h=()=>drt(i,u);return{x:h}}};function drt(i,a){let u=Vs(a,jt(a)),h=Eh(i,u),d=Ur(a,Ot(0,"int32")),g=h.rank-d.rank;for(let w=0;w({x:()=>jt(i)})};var mrt=[TA,kA,NA,_A,CA,SA,$A,IA,EA,DA,AA,FA,PA,LA,MA,BA,zA,WA,VA,GA,UA,HA,qA,KA,XA,YA,JA,ZA,QA,tF,eF,nF,rF,oF,iF,sF,aF,uF,pF,hF,fF,dF,mF,gF,yF,bF,wF,Tk,Tk,vF,kF,_F,CF,SF,$F,IF,EF,DF,AF,FF,kk,kk,RF,PF,OF,LF,MF,BF,zF,WF,VF,GF,UF,qF,HF,jF,KF,XF,YF,JF,ZF,QF,Nk,Nk,_k,_k,tR,nR,eR,rR,oR,sR,iR,aR,cR,lR,uR,pR,hR];for(let i of mrt)P2(i);z.prototype.abs=function(){return this.throwIfDisposed(),Bn(this)};z.prototype.acos=function(){return this.throwIfDisposed(),_T(this)};z.prototype.acosh=function(){return this.throwIfDisposed(),CT(this)};z.prototype.addStrict=function(i){return this.throwIfDisposed(),AD(this,i)};z.prototype.add=function(i){return this.throwIfDisposed(),ke(this,i)};z.prototype.all=function(i,a){return this.throwIfDisposed(),ST(this,i,a)};z.prototype.any=function(i,a){return this.throwIfDisposed(),$T(this,i,a)};z.prototype.argMax=function(i){return this.throwIfDisposed(),IT(this,i)};z.prototype.argMin=function(i){return this.throwIfDisposed(),ET(this,i)};z.prototype.asScalar=function(){return this.throwIfDisposed(),U(this.size===1,()=>"The array must have only 1 element."),K(this,[])};z.prototype.asType=function(i){return this.throwIfDisposed(),wt(this,i)};z.prototype.as1D=function(){return this.throwIfDisposed(),K(this,[this.size])};z.prototype.as2D=function(i,a){return this.throwIfDisposed(),K(this,[i,a])};z.prototype.as3D=function(i,a,u){return this.throwIfDisposed(),K(this,[i,a,u])};z.prototype.as4D=function(i,a,u,h){return this.throwIfDisposed(),K(this,[i,a,u,h])};z.prototype.as5D=function(i,a,u,h,d){return this.throwIfDisposed(),K(this,[i,a,u,h,d])};z.prototype.asin=function(){return this.throwIfDisposed(),DT(this)};z.prototype.asinh=function(){return this.throwIfDisposed(),AT(this)};z.prototype.atan=function(){return this.throwIfDisposed(),FT(this)};z.prototype.atan2=function(i){return this.throwIfDisposed(),RT(this,i)};z.prototype.atanh=function(){return this.throwIfDisposed(),PT(this)};z.prototype.avgPool=function(i,a,u,h){return this.throwIfDisposed(),Ch(this,i,a,u,h)};z.prototype.batchToSpaceND=function(i,a){return this.throwIfDisposed(),Ra(this,i,a)};z.prototype.batchNorm=function(i,a,u,h,d){return this.throwIfDisposed(),MT(this,i,a,u,h,d)};z.prototype.broadcastTo=function(i){return this.throwIfDisposed(),El(this,i)};z.prototype.cast=function(i){return this.throwIfDisposed(),wt(this,i)};z.prototype.ceil=function(){return this.throwIfDisposed(),BT(this)};z.prototype.clipByValue=function(i,a){return this.throwIfDisposed(),zT(this,i,a)};z.prototype.concat=function(i,a){return this.throwIfDisposed(),i instanceof z&&(i=[i]),mn([this,...i],a)};z.prototype.conv1d=function(i,a,u,h,d,g){return this.throwIfDisposed(),WT(this,i,a,u,h,d,g)};z.prototype.conv2dTranspose=function(i,a,u,h,d){return this.throwIfDisposed(),VT(this,i,a,u,h,d)};z.prototype.conv2d=function(i,a,u,h,d,g){return this.throwIfDisposed(),Bs(this,i,a,u,h,d,g)};z.prototype.cos=function(){return this.throwIfDisposed(),Pa(this)};z.prototype.cosh=function(){return this.throwIfDisposed(),Sh(this)};z.prototype.cumsum=function(i,a,u){return this.throwIfDisposed(),$h(this,i,a,u)};z.prototype.depthToSpace=function(i,a){return this.throwIfDisposed(),GT(this,i,a)};z.prototype.depthwiseConv2D=function(i,a,u,h,d,g){return kn("depthwiseConv2D is deprecated, use depthwiseConv2d instead"),this.throwIfDisposed(),Oa(this,i,a,u,h,d,g)};z.prototype.depthwiseConv2d=function(i,a,u,h,d,g){return this.throwIfDisposed(),Oa(this,i,a,u,h,d,g)};z.prototype.dilation2d=function(i,a,u,h,d){return this.throwIfDisposed(),UT(this,i,a,u,h,d)};z.prototype.divNoNan=function(i){return this.throwIfDisposed(),qT(this,i)};z.prototype.divStrict=function(i){return this.throwIfDisposed(),FD(this,i)};z.prototype.div=function(i){return this.throwIfDisposed(),Wt(this,i)};z.prototype.dot=function(i){return this.throwIfDisposed(),HT(this,i)};z.prototype.elu=function(){return this.throwIfDisposed(),jT(this)};z.prototype.equalStrict=function(i){return this.throwIfDisposed(),CD(this,i)};z.prototype.equal=function(i){return this.throwIfDisposed(),zs(this,i)};z.prototype.erf=function(){return this.throwIfDisposed(),KT(this)};z.prototype.exp=function(){return this.throwIfDisposed(),Cn(this)};z.prototype.expandDims=function(i){return this.throwIfDisposed(),mr(this,i)};z.prototype.expm1=function(){return this.throwIfDisposed(),XT(this)};z.prototype.fft=function(){return this.throwIfDisposed(),Ua(this)};z.prototype.flatten=function(){return this.throwIfDisposed(),K(this,[this.size])};z.prototype.floor=function(){return this.throwIfDisposed(),Ih(this)};z.prototype.floorDiv=function(i){return this.throwIfDisposed(),Nh(this,i)};z.prototype.gather=function(i,a){return this.throwIfDisposed(),Eh(this,i,a)};z.prototype.greaterEqualStrict=function(i){return this.throwIfDisposed(),SD(this,i)};z.prototype.greaterEqual=function(i){return this.throwIfDisposed(),Ur(this,i)};z.prototype.greaterStrict=function(i){return this.throwIfDisposed(),$D(this,i)};z.prototype.greater=function(i){return this.throwIfDisposed(),gr(this,i)};z.prototype.ifft=function(){return this.throwIfDisposed(),Pi(this)};z.prototype.irfft=function(){return this.throwIfDisposed(),Vh(this)};z.prototype.isFinite=function(){return this.throwIfDisposed(),ZT(this)};z.prototype.isInf=function(){return this.throwIfDisposed(),QT(this)};z.prototype.isNaN=function(){return this.throwIfDisposed(),tk(this)};z.prototype.leakyRelu=function(i){return this.throwIfDisposed(),ek(this,i)};z.prototype.lessEqualStrict=function(i){return this.throwIfDisposed(),ID(this,i)};z.prototype.lessEqual=function(i){return this.throwIfDisposed(),qr(this,i)};z.prototype.lessStrict=function(i){return this.throwIfDisposed(),ED(this,i)};z.prototype.less=function(i){return this.throwIfDisposed(),Ma(this,i)};z.prototype.localResponseNormalization=function(i,a,u,h){return this.throwIfDisposed(),nk(this,i,a,u,h)};z.prototype.logSigmoid=function(){return this.throwIfDisposed(),rk(this)};z.prototype.logSoftmax=function(i){return this.throwIfDisposed(),ok(this,i)};z.prototype.logSumExp=function(i,a){return this.throwIfDisposed(),Fh(this,i,a)};z.prototype.log=function(){return this.throwIfDisposed(),ro(this)};z.prototype.log1p=function(){return this.throwIfDisposed(),Dh(this)};z.prototype.logicalAnd=function(i){return this.throwIfDisposed(),oo(this,i)};z.prototype.logicalNot=function(){return this.throwIfDisposed(),Ba(this)};z.prototype.logicalOr=function(i){return this.throwIfDisposed(),Rh(this,i)};z.prototype.logicalXor=function(i){return this.throwIfDisposed(),sk(this,i)};z.prototype.matMul=function(i,a,u){return this.throwIfDisposed(),We(this,i,a,u)};z.prototype.maxPool=function(i,a,u,h){return this.throwIfDisposed(),Ph(this,i,a,u,h)};z.prototype.max=function(i,a){return this.throwIfDisposed(),Do(this,i,a)};z.prototype.maximumStrict=function(i){return this.throwIfDisposed(),RD(this,i)};z.prototype.maximum=function(i){return this.throwIfDisposed(),Vs(this,i)};z.prototype.mean=function(i,a){return this.throwIfDisposed(),Oh(this,i,a)};z.prototype.min=function(i,a){return this.throwIfDisposed(),Al(this,i,a)};z.prototype.minimumStrict=function(i){return this.throwIfDisposed(),PD(this,i)};z.prototype.minimum=function(i){return this.throwIfDisposed(),za(this,i)};z.prototype.mirrorPad=function(i,a){return this.throwIfDisposed(),ik(this,i,a)};z.prototype.modStrict=function(i){return this.throwIfDisposed(),OD(this,i)};z.prototype.mod=function(i){return this.throwIfDisposed(),Lh(this,i)};z.prototype.mulStrict=function(i){return this.throwIfDisposed(),LD(this,i)};z.prototype.mul=function(i){return this.throwIfDisposed(),at(this,i)};z.prototype.neg=function(){return this.throwIfDisposed(),me(this)};z.prototype.norm=function(i,a,u){return this.throwIfDisposed(),Fl(this,i,a,u)};z.prototype.notEqualStrict=function(i){return this.throwIfDisposed(),DD(this,i)};z.prototype.notEqual=function(i){return this.throwIfDisposed(),Wa(this,i)};z.prototype.oneHot=function(i,a=1,u=0){return this.throwIfDisposed(),TT(this,i,a,u)};z.prototype.onesLike=function(){return this.throwIfDisposed(),Yy(this)};z.prototype.pad=function(i,a){return this.throwIfDisposed(),Mh(this,i,a)};z.prototype.pool=function(i,a,u,h,d){return this.throwIfDisposed(),ak(this,i,a,u,h,d)};z.prototype.powStrict=function(i){return this.throwIfDisposed(),MD(this,i)};z.prototype.pow=function(i){return this.throwIfDisposed(),Ao(this,i)};z.prototype.prelu=function(i){return this.throwIfDisposed(),ck(this,i)};z.prototype.prod=function(i,a){return this.throwIfDisposed(),lk(this,i,a)};z.prototype.reciprocal=function(){return this.throwIfDisposed(),uk(this)};z.prototype.relu=function(){return this.throwIfDisposed(),Ga(this)};z.prototype.relu6=function(){return this.throwIfDisposed(),pk(this)};z.prototype.reshapeAs=function(i){return this.throwIfDisposed(),K(this,i.shape)};z.prototype.reshape=function(i){return this.throwIfDisposed(),K(this,i)};z.prototype.resizeBilinear=function(i,a){return this.throwIfDisposed(),nb(this,i,a)};z.prototype.resizeNearestNeighbor=function(i,a){return this.throwIfDisposed(),rb(this,i,a)};z.prototype.reverse=function(i){return this.throwIfDisposed(),Ri(this,i)};z.prototype.rfft=function(){return this.throwIfDisposed(),qa(this)};z.prototype.round=function(){return this.throwIfDisposed(),hk(this)};z.prototype.rsqrt=function(){return this.throwIfDisposed(),Bh(this)};z.prototype.selu=function(){return this.throwIfDisposed(),fk(this)};z.prototype.separableConv2d=function(i,a,u,h,d,g){return this.throwIfDisposed(),dk(this,i,a,u,h,d,g)};z.prototype.sigmoid=function(){return this.throwIfDisposed(),Fa(this)};z.prototype.sign=function(){return this.throwIfDisposed(),mk(this)};z.prototype.sin=function(){return this.throwIfDisposed(),zh(this)};z.prototype.sinh=function(){return this.throwIfDisposed(),Wh(this)};z.prototype.slice=function(i,a){return this.throwIfDisposed(),ve(this,i,a)};z.prototype.softmax=function(i){return this.throwIfDisposed(),gk(this,i)};z.prototype.softplus=function(){return this.throwIfDisposed(),Ah(this)};z.prototype.spaceToBatchND=function(i,a){return this.throwIfDisposed(),Va(this,i,a)};z.prototype.split=function(i,a){return this.throwIfDisposed(),os(this,i,a)};z.prototype.sqrt=function(){return this.throwIfDisposed(),Qn(this)};z.prototype.square=function(){return this.throwIfDisposed(),be(this)};z.prototype.squaredDifference=function(i){return this.throwIfDisposed(),Ha(this,i)};z.prototype.squaredDifferenceStrict=function(i){return this.throwIfDisposed(),BD(this,i)};z.prototype.squeeze=function(i){return this.throwIfDisposed(),Gh(this,i)};z.prototype.stack=function(i,a){this.throwIfDisposed();let u=i instanceof z?[this,i]:[this,...i];return Cr(u,a)};z.prototype.step=function(i){return this.throwIfDisposed(),Gs(this,i)};z.prototype.stridedSlice=function(i,a,u,h,d,g,x,w){return this.throwIfDisposed(),yk(this,i,a,u,h,d,g,x,w)};z.prototype.subStrict=function(i){return this.throwIfDisposed(),zD(this,i)};z.prototype.sub=function(i){return this.throwIfDisposed(),Pt(this,i)};z.prototype.sum=function(i,a){return this.throwIfDisposed(),Ft(this,i,a)};z.prototype.tan=function(){return this.throwIfDisposed(),bk(this)};z.prototype.tanh=function(){return this.throwIfDisposed(),LT(this)};z.prototype.tile=function(i){return this.throwIfDisposed(),Ws(this,i)};z.prototype.toBool=function(){return this.throwIfDisposed(),wt(this,"bool")};z.prototype.toFloat=function(){return this.throwIfDisposed(),wt(this,"float32")};z.prototype.toInt=function(){return this.throwIfDisposed(),wt(this,"int32")};z.prototype.topk=function(i,a){return this.throwIfDisposed(),xk(this,i,a)};z.prototype.transpose=function(i){return this.throwIfDisposed(),Ie(this,i)};z.prototype.unique=function(i){return this.throwIfDisposed(),wk(this,i)};z.prototype.unsortedSegmentSum=function(i,a){return this.throwIfDisposed(),Uh(this,i,a)};z.prototype.unstack=function(i){return this.throwIfDisposed(),Ka(this,i)};z.prototype.where=function(i,a){return this.throwIfDisposed(),_n(i,this,a)};z.prototype.zerosLike=function(){return this.throwIfDisposed(),jt(this)};function Ck(i,a,u=!1){let{Image:h,Canvas:d}=$e.getEnv();if(!(i instanceof h||i instanceof d))throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement");let g=Sa(i),x=a/Math.max(g.height,g.width),w=x*g.width,k=x*g.height,C=xl({width:a,height:a}),$=i instanceof d?i:dh(i),F=Math.abs(w-k)/2,_=u&&w{if(Ds(h)){this._imageTensors[d]=h,this._inputDimensions[d]=h.shape;return}if(zr(h)){let x=h.shape[0];if(x!==1)throw new Error(`NetInput - tf.Tensor4D with batchSize ${x} passed, but not supported in input array`);this._imageTensors[d]=h,this._inputDimensions[d]=h.shape.slice(1);return}let g=h instanceof $e.getEnv().Canvas?h:dh(h);this._canvases[d]=g,this._inputDimensions[d]=[g.height,g.width,3]})}get imageTensors(){return this._imageTensors}get canvases(){return this._canvases}get isBatchInput(){return this.batchSize>1||this._treatAsBatchInput}get batchSize(){return this._batchSize}get inputDimensions(){return this._inputDimensions}get inputSize(){return this._inputSize}get reshapedInputDimensions(){return Xo(this.batchSize,0,1).map((a,u)=>this.getReshapedInputDimensions(u))}getInput(a){return this.canvases[a]||this.imageTensors[a]}getInputDimensions(a){return this._inputDimensions[a]}getInputHeight(a){return this._inputDimensions[a][0]}getInputWidth(a){return this._inputDimensions[a][1]}getReshapedInputDimensions(a){if(typeof this.inputSize!="number")throw new Error("getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet");let u=this.getInputWidth(a),h=this.getInputHeight(a);return E1({width:u,height:h},this.inputSize)}toBatchTensor(a,u=!0){return this._inputSize=a,gD(()=>{let h=Xo(this.batchSize,0,1).map(g=>{let x=this.getInput(g);if(x instanceof z){let w=zr(x)?x:x.expandDims();return w=R1(w,u),(w.shape[1]!==a||w.shape[2]!==a)&&(w=xA.resizeBilinear(w,[a,a])),w.as3D(a,a,3)}if(x instanceof $e.getEnv().Canvas)return NT.fromPixels(Ck(x,a,u));throw new Error(`toBatchTensor - at batchIdx ${g}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${x}`)}),d=Cr(h.map(g=>wt(g,"float32"))).as4D(this.batchSize,a,a,3);return d})}};async function Ve(i){if(i instanceof Us)return i;let a=Array.isArray(i)?i:[i];if(!a.length)throw new Error("toNetInput - empty array passed as input");let u=d=>Array.isArray(i)?` at input index ${d}:`:"",h=a.map(Ca);return h.forEach((d,g)=>{if(!tg(d)&&!Ds(d)&&!zr(d))throw typeof a[g]=="string"?new Error(`toNetInput -${u(g)} string passed, but could not resolve HTMLElement for element id ${a[g]}`):new Error(`toNetInput -${u(g)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`);if(zr(d)){let x=d.shape[0];if(x!==1)throw new Error(`toNetInput -${u(g)} tf.Tensor4D with batchSize ${x} passed, but not supported in input array`)}}),await Promise.all(h.map(d=>tg(d)&&V1(d))),new Us(h,Array.isArray(i))}async function Rl(i,a){let{Canvas:u}=$e.getEnv(),h=i;if(!(i instanceof u)){let x=await Ve(i);if(x.batchSize>1)throw new Error("extractFaces - batchSize > 1 not supported");let w=x.getInput(0);h=w instanceof u?w:await U1(w)}let d=dr(h),g=a.map(x=>x instanceof Ue?x.forSize(h.width,h.height).box.floor():x).map(x=>x.clipAtImageBorders(h.width,h.height));return g.map(({x,y:w,width:k,height:C})=>{let $=xl({width:k,height:C});return dr($).putImageData(d.getImageData(x,w,k,C),0,0),$})}var sb=te(ee());async function Pl(i,a){if(!Ds(i)&&!zr(i))throw new Error("extractFaceTensors - expected image tensor to be 3D or 4D");if(zr(i)&&i.shape[0]>1)throw new Error("extractFaceTensors - batchSize > 1 not supported");return sb.tidy(()=>{let[u,h,d]=i.shape.slice(zr(i)?1:0),g=a.map(w=>w instanceof Ue?w.forSize(h,u).box:w).map(w=>w.clipAtImageBorders(h,u)),x=g.map(({x:w,y:k,width:C,height:$})=>sb.slice3d(i.as3D(u,h,d),[k,w,0],[$,C,d]));return x})}async function Xa(i,a){let u=$e.getEnv().fetch,h=await u(i,a);if(!(h.status<400))throw new Error(`failed to fetch: (${h.status}) ${h.statusText}, from url: ${h.url}`);return h}async function grt(i){let a=await Xa(i),u=await a.blob();if(!u.type.startsWith("image/"))throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${u.type}, for url: ${a.url}`);return G1(u)}async function Sk(i){return(await Xa(i)).json()}async function yrt(i){return new Float32Array(await(await Xa(i)).arrayBuffer())}var fR=te(ee());function ib(i,a){let u=`${a}-weights_manifest.json`;if(!i)return{modelBaseUri:"",manifestUri:u};if(i==="/")return{modelBaseUri:"/",manifestUri:`/${u}`};let h=i.startsWith("http://")?"http://":i.startsWith("https://")?"https://":"";i=i.replace(h,"");let d=i.split("/").filter(w=>w),g=i.endsWith(".json")?d[d.length-1]:u,x=h+(i.endsWith(".json")?d.slice(0,d.length-1):d).join("/");return x=i.startsWith("/")?`/${x}`:x,{modelBaseUri:x,manifestUri:x==="/"?`/${g}`:`${x}/${g}`}}async function $k(i,a){let{manifestUri:u,modelBaseUri:h}=ib(i,a),d=await Sk(u);return fR.io.loadWeights(d,h)}function brt(i,a,u=!1){let{width:h,height:d}=u?Sa(a):a;return i.width=h,i.height=d,{width:h,height:d}}var Li=te(ee()),qs=te(ee()),Gn=class{constructor(a){this._name=a;this._params=void 0;this._paramMappings=[]}get params(){return this._params}get paramMappings(){return this._paramMappings}get isLoaded(){return!!this.params}getParamFromPath(a){let{obj:u,objProp:h}=this.traversePropertyPath(a);return u[h]}reassignParamFromPath(a,u){let{obj:h,objProp:d}=this.traversePropertyPath(a);h[d].dispose(),h[d]=u}getParamList(){return this._paramMappings.map(({paramPath:a})=>({path:a,tensor:this.getParamFromPath(a)}))}getTrainableParams(){return this.getParamList().filter(a=>a.tensor instanceof qs.Variable)}getFrozenParams(){return this.getParamList().filter(a=>!(a.tensor instanceof qs.Variable))}variable(){this.getFrozenParams().forEach(({path:a,tensor:u})=>{this.reassignParamFromPath(a,u.variable())})}freeze(){this.getTrainableParams().forEach(({path:a,tensor:u})=>{let h=qs.tensor(u.dataSync());u.dispose(),this.reassignParamFromPath(a,h)})}dispose(a=!0){this.getParamList().forEach(u=>{if(a&&u.tensor.isDisposed)throw new Error(`param tensor has already been disposed for path ${u.path}`);u.tensor.dispose()}),this._params=void 0}serializeParams(){return new Float32Array(this.getParamList().map(({tensor:a})=>Array.from(a.dataSync())).reduce((a,u)=>a.concat(u)))}async load(a){if(a instanceof Float32Array){this.extractWeights(a);return}await this.loadFromUri(a)}async loadFromUri(a){if(a&&typeof a!="string")throw new Error(`${this._name}.loadFromUri - expected model uri`);let u=await $k(a,this.getDefaultModelName());this.loadFromWeightMap(u)}async loadFromDisk(a){if(a&&typeof a!="string")throw new Error(`${this._name}.loadFromDisk - expected model file path`);let{readFile:u}=$e.getEnv(),{manifestUri:h,modelBaseUri:d}=ib(a,this.getDefaultModelName()),g=C=>Promise.all(C.map($=>u($).then(F=>F.buffer))),x=qs.io.weightsLoaderFactory(g),w=JSON.parse((await u(h)).toString()),k=await x(w,d);this.loadFromWeightMap(k)}loadFromWeightMap(a){let{paramMappings:u,params:h}=this.extractParamsFromWeigthMap(a);this._paramMappings=u,this._params=h}extractWeights(a){let{paramMappings:u,params:h}=this.extractParams(a);this._paramMappings=u,this._params=h}traversePropertyPath(a){if(!this.params)throw new Error("traversePropertyPath - model has no loaded params");let u=a.split("/").reduce((g,x)=>{if(!g.nextObj.hasOwnProperty(x))throw new Error(`traversePropertyPath - object does not have property ${x}, for path ${a}`);return{obj:g.nextObj,objProp:x,nextObj:g.nextObj[x]}},{nextObj:this.params}),{obj:h,objProp:d}=u;if(!h||!d||!(h[d]instanceof qs.Tensor))throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${a}`);return{obj:h,objProp:d}}},qe=te(ee()),Ol=te(ee());function yr(i,a,u){return Ol.tidy(()=>{let h=Ol.separableConv2d(i,a.depthwise_filter,a.pointwise_filter,u,"same");return h=Ol.add(h,a.bias),h})}function ab(i,a,u=!1){return qe.tidy(()=>{let h=qe.relu(u?qe.add(qe.conv2d(i,a.conv0.filters,[2,2],"same"),a.conv0.bias):yr(i,a.conv0,[2,2])),d=yr(h,a.conv1,[1,1]),g=qe.relu(qe.add(h,d)),x=yr(g,a.conv2,[1,1]);return qe.relu(qe.add(h,qe.add(d,x)))})}function qh(i,a,u=!1,h=!0){return qe.tidy(()=>{let d=qe.relu(u?qe.add(qe.conv2d(i,a.conv0.filters,h?[2,2]:[1,1],"same"),a.conv0.bias):yr(i,a.conv0,h?[2,2]:[1,1])),g=yr(d,a.conv1,[1,1]),x=qe.relu(qe.add(d,g)),w=yr(x,a.conv2,[1,1]),k=qe.relu(qe.add(d,qe.add(g,w))),C=yr(k,a.conv3,[1,1]);return qe.relu(qe.add(d,qe.add(g,qe.add(w,C))))})}var Oi=te(ee());function Ya(i,a,u="same",h=!1){return Oi.tidy(()=>{let d=Oi.add(Oi.conv2d(i,a.filters,[1,1],u),a.bias);return h?Oi.relu(d):d})}function tr(i,a){Object.keys(i).forEach(u=>{a.some(h=>h.originalPath===u)||i[u].dispose()})}var cb=te(ee());function Ll(i,a){return function(u,h,d,g){let x=cb.tensor4d(i(u*h*d*d),[d,d,u,h]),w=cb.tensor1d(i(h));return a.push({paramPath:`${g}/filters`},{paramPath:`${g}/bias`}),{filters:x,bias:w}}}var lb=te(ee());function ub(i,a){return function(u,h,d){let g=lb.tensor2d(i(u*h),[u,h]),x=lb.tensor1d(i(h));return a.push({paramPath:`${d}/weights`},{paramPath:`${d}/bias`}),{weights:g,bias:x}}}var Hh=te(ee()),pb=class{constructor(a,u,h){this.depthwise_filter=a;this.pointwise_filter=u;this.bias=h}};function Ml(i,a){return function(u,h,d){let g=Hh.tensor4d(i(3*3*u),[3,3,u,1]),x=Hh.tensor4d(i(u*h),[1,1,u,h]),w=Hh.tensor1d(i(h));return a.push({paramPath:`${d}/depthwise_filter`},{paramPath:`${d}/pointwise_filter`},{paramPath:`${d}/bias`}),new pb(g,x,w)}}function Bl(i){return function(a){let u=i(`${a}/depthwise_filter`,4),h=i(`${a}/pointwise_filter`,4),d=i(`${a}/bias`,1);return new pb(u,h,d)}}function Sr(i,a){return function(u,h,d){let g=i[u];if(!Ta(g,h))throw new Error(`expected weightMap[${u}] to be a Tensor${h}D, instead have ${g}`);return a.push({originalPath:u,paramPath:d||u}),g}}function er(i){let a=i;function u(d){let g=a.slice(0,d);return a=a.slice(d),g}function h(){return a}return{extractWeights:u,getRemainingWeights:h}}function hb(i,a){let u=Ll(i,a),h=Ml(i,a);function d(x,w,k,C=!1){let $=C?u(x,w,3,`${k}/conv0`):h(x,w,`${k}/conv0`),F=h(w,w,`${k}/conv1`),_=h(w,w,`${k}/conv2`);return{conv0:$,conv1:F,conv2:_}}function g(x,w,k,C=!1){let{conv0:$,conv1:F,conv2:_}=d(x,w,k,C),W=h(w,w,`${k}/conv3`);return{conv0:$,conv1:F,conv2:_,conv3:W}}return{extractDenseBlock3Params:d,extractDenseBlock4Params:g}}function dR(i){let a=[],{extractWeights:u,getRemainingWeights:h}=er(i),{extractDenseBlock4Params:d}=hb(u,a),g=d(3,32,"dense0",!0),x=d(32,64,"dense1"),w=d(64,128,"dense2"),k=d(128,256,"dense3");if(h().length!==0)throw new Error(`weights remaing after extract: ${h().length}`);return{paramMappings:a,params:{dense0:g,dense1:x,dense2:w,dense3:k}}}function fb(i){return function(a){let u=i(`${a}/filters`,4),h=i(`${a}/bias`,1);return{filters:u,bias:h}}}function db(i,a){let u=Sr(i,a),h=fb(u),d=Bl(u);function g(w,k=!1){let C=k?h(`${w}/conv0`):d(`${w}/conv0`),$=d(`${w}/conv1`),F=d(`${w}/conv2`);return{conv0:C,conv1:$,conv2:F}}function x(w,k=!1){let C=k?h(`${w}/conv0`):d(`${w}/conv0`),$=d(`${w}/conv1`),F=d(`${w}/conv2`),_=d(`${w}/conv3`);return{conv0:C,conv1:$,conv2:F,conv3:_}}return{extractDenseBlock3Params:g,extractDenseBlock4Params:x}}function mR(i){let a=[],{extractDenseBlock4Params:u}=db(i,a),h={dense0:u("dense0",!0),dense1:u("dense1"),dense2:u("dense2"),dense3:u("dense3")};return tr(i,a),{params:h,paramMappings:a}}var jh=class extends Gn{constructor(){super("FaceFeatureExtractor")}forwardInput(a){let{params:u}=this;if(!u)throw new Error("FaceFeatureExtractor - load model before inference");return Li.tidy(()=>{let h=Li.cast(a.toBatchTensor(112,!0),"float32"),d=[122.782,117.001,104.298],g=Io(h,d).div(Li.scalar(255)),x=qh(g,u.dense0,!0);return x=qh(x,u.dense1),x=qh(x,u.dense2),x=qh(x,u.dense3),x=Li.avgPool(x,[7,7],[2,2],"valid"),x})}async forward(a){return this.forwardInput(await Ve(a))}getDefaultModelName(){return"face_feature_extractor_model"}extractParamsFromWeigthMap(a){return mR(a)}extractParams(a){return dR(a)}},bR=te(ee()),zl=te(ee());function Kh(i,a){return zl.tidy(()=>zl.add(zl.matMul(i,a.weights),a.bias))}function gR(i,a,u){let h=[],{extractWeights:d,getRemainingWeights:g}=er(i),x=ub(d,h),w=x(a,u,"fc");if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{paramMappings:h,params:{fc:w}}}function yR(i){let a=[],u=Sr(i,a);function h(g){let x=u(`${g}/weights`,2),w=u(`${g}/bias`,1);return{weights:x,bias:w}}let d={fc:h("fc")};return tr(i,a),{params:d,paramMappings:a}}function mb(i){let a={},u={};return Object.keys(i).forEach(h=>{let d=h.startsWith("fc")?u:a;d[h]=i[h]}),{featureExtractorMap:a,classifierMap:u}}var Xh=class extends Gn{constructor(a,u){super(a);this._faceFeatureExtractor=u}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(a){let{params:u}=this;if(!u)throw new Error(`${this._name} - load model before inference`);return bR.tidy(()=>{let h=a instanceof Us?this.faceFeatureExtractor.forwardInput(a):a;return Kh(h.as2D(h.shape[0],-1),u.fc)})}dispose(a=!0){this.faceFeatureExtractor.dispose(a),super.dispose(a)}loadClassifierParams(a){let{params:u,paramMappings:h}=this.extractClassifierParams(a);this._params=u,this._paramMappings=h}extractClassifierParams(a){return gR(a,this.getClassifierChannelsIn(),this.getClassifierChannelsOut())}extractParamsFromWeigthMap(a){let{featureExtractorMap:u,classifierMap:h}=mb(a);return this.faceFeatureExtractor.loadFromWeightMap(u),yR(h)}extractParams(a){let u=this.getClassifierChannelsIn(),h=this.getClassifierChannelsOut(),d=h*u+h,g=a.slice(0,a.length-d),x=a.slice(a.length-d);return this.faceFeatureExtractor.extractWeights(g),this.extractClassifierParams(x)}},Ik=["neutral","happy","sad","angry","fearful","disgusted","surprised"],Mi=class{constructor(a){if(a.length!==7)throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${a.length}`);Ik.forEach((u,h)=>{this[u]=a[h]})}asSortedArray(){return Ik.map(a=>({expression:a,probability:this[a]})).sort((a,u)=>u.probability-a.probability)}},gb=class extends Xh{constructor(a=new jh){super("FaceExpressionNet",a)}forwardInput(a){return Wl.tidy(()=>Wl.softmax(this.runNet(a)))}async forward(a){return this.forwardInput(await Ve(a))}async predictExpressions(a){let u=await Ve(a),h=await this.forwardInput(u),d=await Promise.all(Wl.unstack(h).map(async x=>{let w=await x.data();return x.dispose(),w}));h.dispose();let g=d.map(x=>new Mi(x));return u.isBatchInput?g:g[0]}getDefaultModelName(){return"face_expression_model"}getClassifierChannelsIn(){return 256}getClassifierChannelsOut(){return 7}};function Ek(i){return i.expressions instanceof Mi}function yb(i,a){let u={expressions:a};return Object.assign({},i,u)}function xrt(i,a,u=.1,h){let d=Array.isArray(a)?a:[a];d.forEach(g=>{let x=g instanceof Mi?g:Ek(g)?g.expressions:void 0;if(!x)throw new Error("drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof");let w=x.asSortedArray(),k=w.filter(F=>F.probability>u),C=Jo(g)?g.detection.box.bottomLeft:h||new Jt(0,0),$=new Ii(k.map(F=>`${F.expression} (${ka(F.probability)})`),C);$.draw(i)})}function Ja(i){return Jo(i)&&i.landmarks instanceof Wr&&i.unshiftedLandmarks instanceof Wr&&i.alignedRect instanceof Ue}function Vl(i,a){let{box:u}=i.detection,h=a.shiftBy(u.x,u.y),d=h.align(),{imageDims:g}=i.detection,x=new Ue(i.detection.score,d.rescale(g.reverse()),g),w={landmarks:h,unshiftedLandmarks:a,alignedRect:x};return Object.assign({},i,w)}var Dk=class{constructor(a={}){let{drawLines:u=!0,drawPoints:h=!0,lineWidth:d,lineColor:g,pointSize:x,pointColor:w}=a;this.drawLines=u,this.drawPoints=h,this.lineWidth=d||1,this.pointSize=x||2,this.lineColor=g||"rgba(0, 255, 255, 1)",this.pointColor=w||"rgba(255, 0, 255, 1)"}},Ak=class{constructor(a,u={}){this.faceLandmarks=a,this.options=new Dk(u)}draw(a){let u=dr(a),{drawLines:h,drawPoints:d,lineWidth:g,lineColor:x,pointSize:w,pointColor:k}=this.options;if(h&&this.faceLandmarks instanceof bl&&(u.strokeStyle=x,u.lineWidth=g,Es(u,this.faceLandmarks.getJawOutline()),Es(u,this.faceLandmarks.getLeftEyeBrow()),Es(u,this.faceLandmarks.getRightEyeBrow()),Es(u,this.faceLandmarks.getNose()),Es(u,this.faceLandmarks.getLeftEye(),!0),Es(u,this.faceLandmarks.getRightEye(),!0),Es(u,this.faceLandmarks.getMouth(),!0)),d){u.strokeStyle=k,u.fillStyle=k;let C=$=>{u.beginPath(),u.arc($.x,$.y,w,0,2*Math.PI),u.fill()};this.faceLandmarks.positions.forEach(C)}}};function wrt(i,a){let u=Array.isArray(a)?a:[a];u.forEach(h=>{let d=h instanceof Wr?h:Ja(h)?h.landmarks:void 0;if(!d)throw new Error("drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof");new Ak(d).draw(i)})}var ss=te(ee()),pn=te(ee());function vrt(i,a){let u=Ll(i,a),h=Ml(i,a);function d(x,w,k){let C=h(x,w,`${k}/separable_conv0`),$=h(w,w,`${k}/separable_conv1`),F=u(x,w,1,`${k}/expansion_conv`);return{separable_conv0:C,separable_conv1:$,expansion_conv:F}}function g(x,w){let k=h(x,x,`${w}/separable_conv0`),C=h(x,x,`${w}/separable_conv1`),$=h(x,x,`${w}/separable_conv2`);return{separable_conv0:k,separable_conv1:C,separable_conv2:$}}return{extractConvParams:u,extractSeparableConvParams:h,extractReductionBlockParams:d,extractMainBlockParams:g}}function xR(i,a){let u=[],{extractWeights:h,getRemainingWeights:d}=er(i),{extractConvParams:g,extractSeparableConvParams:x,extractReductionBlockParams:w,extractMainBlockParams:k}=vrt(h,u),C=g(3,32,3,"entry_flow/conv_in"),$=w(32,64,"entry_flow/reduction_block_0"),F=w(64,128,"entry_flow/reduction_block_1"),_={conv_in:C,reduction_block_0:$,reduction_block_1:F},W={};Xo(a,0,1).forEach(mt=>{W[`main_block_${mt}`]=k(128,`middle_flow/main_block_${mt}`)});let et=w(128,256,"exit_flow/reduction_block"),tt=x(256,512,"exit_flow/separable_conv"),G={reduction_block:et,separable_conv:tt};if(d().length!==0)throw new Error(`weights remaing after extract: ${d().length}`);return{paramMappings:u,params:{entry_flow:_,middle_flow:W,exit_flow:G}}}function Trt(i,a){let u=Sr(i,a),h=fb(u),d=Bl(u);function g(w){let k=d(`${w}/separable_conv0`),C=d(`${w}/separable_conv1`),$=h(`${w}/expansion_conv`);return{separable_conv0:k,separable_conv1:C,expansion_conv:$}}function x(w){let k=d(`${w}/separable_conv0`),C=d(`${w}/separable_conv1`),$=d(`${w}/separable_conv2`);return{separable_conv0:k,separable_conv1:C,separable_conv2:$}}return{extractConvParams:h,extractSeparableConvParams:d,extractReductionBlockParams:g,extractMainBlockParams:x}}function wR(i,a){let u=[],{extractConvParams:h,extractSeparableConvParams:d,extractReductionBlockParams:g,extractMainBlockParams:x}=Trt(i,u),w=h("entry_flow/conv_in"),k=g("entry_flow/reduction_block_0"),C=g("entry_flow/reduction_block_1"),$={conv_in:w,reduction_block_0:k,reduction_block_1:C},F={};Xo(a,0,1).forEach(tt=>{F[`main_block_${tt}`]=x(`middle_flow/main_block_${tt}`)});let _=g("exit_flow/reduction_block"),W=d("exit_flow/separable_conv"),et={reduction_block:_,separable_conv:W};return tr(i,u),{params:{entry_flow:$,middle_flow:F,exit_flow:et},paramMappings:u}}function vR(i,a,u){return pn.add(pn.conv2d(i,a.filters,u,"same"),a.bias)}function Rk(i,a,u=!0){let h=u?pn.relu(i):i;return h=yr(h,a.separable_conv0,[1,1]),h=yr(pn.relu(h),a.separable_conv1,[1,1]),h=pn.maxPool(h,[3,3],[2,2],"same"),h=pn.add(h,vR(i,a.expansion_conv,[2,2])),h}function krt(i,a){let u=yr(pn.relu(i),a.separable_conv0,[1,1]);return u=yr(pn.relu(u),a.separable_conv1,[1,1]),u=yr(pn.relu(u),a.separable_conv2,[1,1]),u=pn.add(u,i),u}var Pk=class extends Gn{constructor(a){super("TinyXception");this._numMainBlocks=a}forwardInput(a){let{params:u}=this;if(!u)throw new Error("TinyXception - load model before inference");return pn.tidy(()=>{let h=pn.cast(a.toBatchTensor(112,!0),"float32"),d=[122.782,117.001,104.298],g=Io(h,d).div(pn.scalar(256)),x=pn.relu(vR(g,u.entry_flow.conv_in,[2,2]));return x=Rk(x,u.entry_flow.reduction_block_0,!1),x=Rk(x,u.entry_flow.reduction_block_1),Xo(this._numMainBlocks,0,1).forEach(w=>{x=krt(x,u.middle_flow[`main_block_${w}`])}),x=Rk(x,u.exit_flow.reduction_block),x=pn.relu(yr(x,u.exit_flow.separable_conv,[1,1])),x})}async forward(a){return this.forwardInput(await Ve(a))}getDefaultModelName(){return"tiny_xception_model"}extractParamsFromWeigthMap(a){return wR(a,this._numMainBlocks)}extractParams(a){return xR(a,this._numMainBlocks)}};function TR(i){let a=[],{extractWeights:u,getRemainingWeights:h}=er(i),d=ub(u,a),g=d(512,1,"fc/age"),x=d(512,2,"fc/gender");if(h().length!==0)throw new Error(`weights remaing after extract: ${h().length}`);return{paramMappings:a,params:{fc:{age:g,gender:x}}}}function kR(i){let a=[],u=Sr(i,a);function h(g){let x=u(`${g}/weights`,2),w=u(`${g}/bias`,1);return{weights:x,bias:w}}let d={fc:{age:h("fc/age"),gender:h("fc/gender")}};return tr(i,a),{params:d,paramMappings:a}}var Hs;(function(i){i.FEMALE="female",i.MALE="male"})(Hs||(Hs={}));var bb=class extends Gn{constructor(a=new Pk(2)){super("AgeGenderNet");this._faceFeatureExtractor=a}get faceFeatureExtractor(){return this._faceFeatureExtractor}runNet(a){let{params:u}=this;if(!u)throw new Error(`${this._name} - load model before inference`);return ss.tidy(()=>{let h=a instanceof Us?this.faceFeatureExtractor.forwardInput(a):a,d=ss.avgPool(h,[7,7],[2,2],"valid").as2D(h.shape[0],-1),g=Kh(d,u.fc.age).as1D(),x=Kh(d,u.fc.gender);return{age:g,gender:x}})}forwardInput(a){return ss.tidy(()=>{let{age:u,gender:h}=this.runNet(a);return{age:u,gender:ss.softmax(h)}})}async forward(a){return this.forwardInput(await Ve(a))}async predictAgeAndGender(a){let u=await Ve(a),h=await this.forwardInput(u),d=ss.unstack(h.age),g=ss.unstack(h.gender),x=d.map((k,C)=>({ageTensor:k,genderTensor:g[C]})),w=await Promise.all(x.map(async({ageTensor:k,genderTensor:C})=>{let $=(await k.data())[0],F=(await C.data())[0],_=F>.5,W=_?Hs.MALE:Hs.FEMALE,et=_?F:1-F;return k.dispose(),C.dispose(),{age:$,gender:W,genderProbability:et}}));return h.age.dispose(),h.gender.dispose(),u.isBatchInput?w:w[0]}getDefaultModelName(){return"age_gender_model"}dispose(a=!0){this.faceFeatureExtractor.dispose(a),super.dispose(a)}loadClassifierParams(a){let{params:u,paramMappings:h}=this.extractClassifierParams(a);this._params=u,this._paramMappings=h}extractClassifierParams(a){return TR(a)}extractParamsFromWeigthMap(a){let{featureExtractorMap:u,classifierMap:h}=mb(a);return this.faceFeatureExtractor.loadFromWeightMap(u),kR(h)}extractParams(a){let u=512*1+1+(512*2+2),h=a.slice(0,a.length-u),d=a.slice(a.length-u);return this.faceFeatureExtractor.extractWeights(h),this.extractClassifierParams(d)}};var $r=te(ee()),Yh=class extends Xh{postProcess(a,u,h){let d=h.map(({width:x,height:w})=>{let k=u/Math.max(w,x);return{width:x*k,height:w*k}}),g=d.length;return $r.tidy(()=>{let x=(F,_)=>$r.stack([$r.fill([68],F,"float32"),$r.fill([68],_,"float32")],1).as2D(1,136).as1D(),w=(F,_)=>{let{width:W,height:et}=d[F];return _(W,et)?Math.abs(W-et)/2:0},k=F=>w(F,(_,W)=>_w(F,(_,W)=>W<_),$=a.mul($r.fill([g,136],u,"float32")).sub($r.stack(Array.from(Array(g),(F,_)=>x(k(_),C(_))))).div($r.stack(Array.from(Array(g),(F,_)=>x(d[_].width,d[_].height))));return $})}forwardInput(a){return $r.tidy(()=>{let u=this.runNet(a);return this.postProcess(u,a.inputSize,a.inputDimensions.map(([h,d])=>({height:h,width:d})))})}async forward(a){return this.forwardInput(await Ve(a))}async detectLandmarks(a){let u=await Ve(a),h=$r.tidy(()=>$r.unstack(this.forwardInput(u))),d=await Promise.all(h.map(async(g,x)=>{let w=Array.from(await g.data()),k=w.filter(($,F)=>Xm(F)),C=w.filter(($,F)=>!Xm(F));return new bl(Array(68).fill(0).map(($,F)=>new Jt(k[F],C[F])),{height:u.getInputHeight(x),width:u.getInputWidth(x)})}));return h.forEach(g=>g.dispose()),u.isBatchInput?d:d[0]}getClassifierChannelsOut(){return 136}},Gl=class extends Yh{constructor(a=new jh){super("FaceLandmark68Net",a)}getDefaultModelName(){return"face_landmark_68_model"}getClassifierChannelsIn(){return 256}};var Bi=te(ee());function NR(i){let a=[],{extractDenseBlock3Params:u}=db(i,a),h={dense0:u("dense0",!0),dense1:u("dense1"),dense2:u("dense2")};return tr(i,a),{params:h,paramMappings:a}}function _R(i){let a=[],{extractWeights:u,getRemainingWeights:h}=er(i),{extractDenseBlock3Params:d}=hb(u,a),g=d(3,32,"dense0",!0),x=d(32,64,"dense1"),w=d(64,128,"dense2");if(h().length!==0)throw new Error(`weights remaing after extract: ${h().length}`);return{paramMappings:a,params:{dense0:g,dense1:x,dense2:w}}}var Ok=class extends Gn{constructor(){super("TinyFaceFeatureExtractor")}forwardInput(a){let{params:u}=this;if(!u)throw new Error("TinyFaceFeatureExtractor - load model before inference");return Bi.tidy(()=>{let h=Bi.cast(a.toBatchTensor(112,!0),"float32"),d=[122.782,117.001,104.298],g=Io(h,d).div(Bi.scalar(255)),x=ab(g,u.dense0,!0);return x=ab(x,u.dense1),x=ab(x,u.dense2),x=Bi.avgPool(x,[14,14],[2,2],"valid"),x})}async forward(a){return this.forwardInput(await Ve(a))}getDefaultModelName(){return"face_feature_extractor_tiny_model"}extractParamsFromWeigthMap(a){return NR(a)}extractParams(a){return _R(a)}},xb=class extends Yh{constructor(a=new Ok){super("FaceLandmark68TinyNet",a)}getDefaultModelName(){return"face_landmark_68_tiny_model"}getClassifierChannelsIn(){return 128}},CR=class extends Gl{};var jr=te(ee()),Ul=te(ee()),wb=te(ee());function SR(i,a){return wb.add(wb.mul(i,a.weights),a.biases)}function Lk(i,a,u,h,d="same"){let{filters:g,bias:x}=a.conv,w=Ul.conv2d(i,g,u,d);return w=Ul.add(w,x),w=SR(w,a.scale),h?Ul.relu(w):w}function $R(i,a){return Lk(i,a,[1,1],!0)}function Mk(i,a){return Lk(i,a,[1,1],!1)}function vb(i,a){return Lk(i,a,[2,2],!0,"valid")}var Ir=te(ee());function Nrt(i,a){function u(w,k,C){let $=i(w),F=$.length/(k*C*C);if(I1(F))throw new Error(`depth has to be an integer: ${F}, weights.length: ${$.length}, numFilters: ${k}, filterSize: ${C}`);return Ir.tidy(()=>Ir.transpose(Ir.tensor4d($,[k,F,C,C]),[2,3,1,0]))}function h(w,k,C,$){let F=u(w,k,C),_=Ir.tensor1d(i(k));return a.push({paramPath:`${$}/filters`},{paramPath:`${$}/bias`}),{filters:F,bias:_}}function d(w,k){let C=Ir.tensor1d(i(w)),$=Ir.tensor1d(i(w));return a.push({paramPath:`${k}/weights`},{paramPath:`${k}/biases`}),{weights:C,biases:$}}function g(w,k,C,$){let F=h(w,k,C,`${$}/conv`),_=d(k,`${$}/scale`);return{conv:F,scale:_}}function x(w,k,C,$,F=!1){let _=g((F?.5:1)*w,k,C,`${$}/conv1`),W=g(w,k,C,`${$}/conv2`);return{conv1:_,conv2:W}}return{extractConvLayerParams:g,extractResidualLayerParams:x}}function IR(i){let{extractWeights:a,getRemainingWeights:u}=er(i),h=[],{extractConvLayerParams:d,extractResidualLayerParams:g}=Nrt(a,h),x=d(4704,32,7,"conv32_down"),w=g(9216,32,3,"conv32_1"),k=g(9216,32,3,"conv32_2"),C=g(9216,32,3,"conv32_3"),$=g(36864,64,3,"conv64_down",!0),F=g(36864,64,3,"conv64_1"),_=g(36864,64,3,"conv64_2"),W=g(36864,64,3,"conv64_3"),et=g(147456,128,3,"conv128_down",!0),tt=g(147456,128,3,"conv128_1"),G=g(147456,128,3,"conv128_2"),mt=g(589824,256,3,"conv256_down",!0),lt=g(589824,256,3,"conv256_1"),gt=g(589824,256,3,"conv256_2"),_t=g(589824,256,3,"conv256_down_out"),Gt=Ir.tidy(()=>Ir.transpose(Ir.tensor2d(a(256*128),[128,256]),[1,0]));if(h.push({paramPath:"fc"}),u().length!==0)throw new Error(`weights remaing after extract: ${u().length}`);let se={conv32_down:x,conv32_1:w,conv32_2:k,conv32_3:C,conv64_down:$,conv64_1:F,conv64_2:_,conv64_3:W,conv128_down:et,conv128_1:tt,conv128_2:G,conv256_down:mt,conv256_1:lt,conv256_2:gt,conv256_down_out:_t,fc:Gt};return{params:se,paramMappings:h}}function _rt(i,a){let u=Sr(i,a);function h(x){let w=u(`${x}/scale/weights`,1),k=u(`${x}/scale/biases`,1);return{weights:w,biases:k}}function d(x){let w=u(`${x}/conv/filters`,4),k=u(`${x}/conv/bias`,1),C=h(x);return{conv:{filters:w,bias:k},scale:C}}function g(x){return{conv1:d(`${x}/conv1`),conv2:d(`${x}/conv2`)}}return{extractConvLayerParams:d,extractResidualLayerParams:g}}function ER(i){let a=[],{extractConvLayerParams:u,extractResidualLayerParams:h}=_rt(i,a),d=u("conv32_down"),g=h("conv32_1"),x=h("conv32_2"),w=h("conv32_3"),k=h("conv64_down"),C=h("conv64_1"),$=h("conv64_2"),F=h("conv64_3"),_=h("conv128_down"),W=h("conv128_1"),et=h("conv128_2"),tt=h("conv256_down"),G=h("conv256_1"),mt=h("conv256_2"),lt=h("conv256_down_out"),gt=i.fc;if(a.push({originalPath:"fc",paramPath:"fc"}),!$1(gt))throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${gt}`);let _t={conv32_down:d,conv32_1:g,conv32_2:x,conv32_3:w,conv64_down:k,conv64_1:C,conv64_2:$,conv64_3:F,conv128_down:_,conv128_1:W,conv128_2:et,conv256_down:tt,conv256_1:G,conv256_2:mt,conv256_down_out:lt,fc:gt};return tr(i,a),{params:_t,paramMappings:a}}var nr=te(ee());function Fo(i,a){let u=$R(i,a.conv1);return u=Mk(u,a.conv2),u=nr.add(u,i),u=nr.relu(u),u}function Jh(i,a){let u=vb(i,a.conv1);u=Mk(u,a.conv2);let h=nr.avgPool(i,2,2,"valid"),d=nr.zeros(h.shape),g=h.shape[3]!==u.shape[3],x=h.shape[1]!==u.shape[1]||h.shape[2]!==u.shape[2];if(x){let w=[...u.shape];w[1]=1;let k=nr.zeros(w);u=nr.concat([u,k],1);let C=[...u.shape];C[2]=1;let $=nr.zeros(C);u=nr.concat([u,$],2)}return h=g?nr.concat([h,d],3):h,u=nr.add(h,u),u=nr.relu(u),u}var ql=class extends Gn{constructor(){super("FaceRecognitionNet")}forwardInput(a){let{params:u}=this;if(!u)throw new Error("FaceRecognitionNet - load model before inference");return jr.tidy(()=>{let h=jr.cast(a.toBatchTensor(150,!0),"float32"),d=[122.782,117.001,104.298],g=Io(h,d).div(jr.scalar(256)),x=vb(g,u.conv32_down);x=jr.maxPool(x,3,2,"valid"),x=Fo(x,u.conv32_1),x=Fo(x,u.conv32_2),x=Fo(x,u.conv32_3),x=Jh(x,u.conv64_down),x=Fo(x,u.conv64_1),x=Fo(x,u.conv64_2),x=Fo(x,u.conv64_3),x=Jh(x,u.conv128_down),x=Fo(x,u.conv128_1),x=Fo(x,u.conv128_2),x=Jh(x,u.conv256_down),x=Fo(x,u.conv256_1),x=Fo(x,u.conv256_2),x=Jh(x,u.conv256_down_out);let w=x.mean([1,2]),k=jr.matMul(w,u.fc);return k})}async forward(a){return this.forwardInput(await Ve(a))}async computeFaceDescriptor(a){let u=await Ve(a),h=jr.tidy(()=>jr.unstack(this.forwardInput(u))),d=await Promise.all(h.map(g=>g.data()));return h.forEach(g=>g.dispose()),u.isBatchInput?d:d[0]}getDefaultModelName(){return"face_recognition_model"}extractParamsFromWeigthMap(a){return ER(a)}extractParams(a){return IR(a)}};function Crt(i){let a=new ql;return a.extractWeights(i),a}function Tb(i,a){let u={descriptor:a};return Object.assign({},i,u)}function Srt(i){return typeof i.age=="number"}function kb(i,a){let u={age:a};return Object.assign({},i,u)}function $rt(i){return(i.gender===Hs.MALE||i.gender===Hs.FEMALE)&&ml(i.genderProbability)}function Nb(i,a,u){let h={gender:a,genderProbability:u};return Object.assign({},i,h)}var Po=te(ee()),Ro=te(ee());function Irt(i,a){function u(k,C){let $=Ro.tensor4d(i(3*3*k),[3,3,k,1]),F=Ro.tensor1d(i(k)),_=Ro.tensor1d(i(k)),W=Ro.tensor1d(i(k)),et=Ro.tensor1d(i(k));return a.push({paramPath:`${C}/filters`},{paramPath:`${C}/batch_norm_scale`},{paramPath:`${C}/batch_norm_offset`},{paramPath:`${C}/batch_norm_mean`},{paramPath:`${C}/batch_norm_variance`}),{filters:$,batch_norm_scale:F,batch_norm_offset:_,batch_norm_mean:W,batch_norm_variance:et}}function h(k,C,$,F,_){let W=Ro.tensor4d(i(k*C*$*$),[$,$,k,C]),et=Ro.tensor1d(i(C));return a.push({paramPath:`${F}/filters`},{paramPath:`${F}/${_?"batch_norm_offset":"bias"}`}),{filters:W,bias:et}}function d(k,C,$,F){let{filters:_,bias:W}=h(k,C,$,F,!0);return{filters:_,batch_norm_offset:W}}function g(k,C,$){let F=u(k,`${$}/depthwise_conv`),_=d(k,C,1,`${$}/pointwise_conv`);return{depthwise_conv:F,pointwise_conv:_}}function x(){let k=d(3,32,3,"mobilenetv1/conv_0"),C=g(32,64,"mobilenetv1/conv_1"),$=g(64,128,"mobilenetv1/conv_2"),F=g(128,128,"mobilenetv1/conv_3"),_=g(128,256,"mobilenetv1/conv_4"),W=g(256,256,"mobilenetv1/conv_5"),et=g(256,512,"mobilenetv1/conv_6"),tt=g(512,512,"mobilenetv1/conv_7"),G=g(512,512,"mobilenetv1/conv_8"),mt=g(512,512,"mobilenetv1/conv_9"),lt=g(512,512,"mobilenetv1/conv_10"),gt=g(512,512,"mobilenetv1/conv_11"),_t=g(512,1024,"mobilenetv1/conv_12"),Gt=g(1024,1024,"mobilenetv1/conv_13");return{conv_0:k,conv_1:C,conv_2:$,conv_3:F,conv_4:_,conv_5:W,conv_6:et,conv_7:tt,conv_8:G,conv_9:mt,conv_10:lt,conv_11:gt,conv_12:_t,conv_13:Gt}}function w(){let k=d(1024,256,1,"prediction_layer/conv_0"),C=d(256,512,3,"prediction_layer/conv_1"),$=d(512,128,1,"prediction_layer/conv_2"),F=d(128,256,3,"prediction_layer/conv_3"),_=d(256,128,1,"prediction_layer/conv_4"),W=d(128,256,3,"prediction_layer/conv_5"),et=d(256,64,1,"prediction_layer/conv_6"),tt=d(64,128,3,"prediction_layer/conv_7"),G=h(512,12,1,"prediction_layer/box_predictor_0/box_encoding_predictor"),mt=h(512,9,1,"prediction_layer/box_predictor_0/class_predictor"),lt=h(1024,24,1,"prediction_layer/box_predictor_1/box_encoding_predictor"),gt=h(1024,18,1,"prediction_layer/box_predictor_1/class_predictor"),_t=h(512,24,1,"prediction_layer/box_predictor_2/box_encoding_predictor"),Gt=h(512,18,1,"prediction_layer/box_predictor_2/class_predictor"),se=h(256,24,1,"prediction_layer/box_predictor_3/box_encoding_predictor"),fe=h(256,18,1,"prediction_layer/box_predictor_3/class_predictor"),_e=h(256,24,1,"prediction_layer/box_predictor_4/box_encoding_predictor"),Ge=h(256,18,1,"prediction_layer/box_predictor_4/class_predictor"),Vt=h(128,24,1,"prediction_layer/box_predictor_5/box_encoding_predictor"),ln=h(128,18,1,"prediction_layer/box_predictor_5/class_predictor"),Ce={box_encoding_predictor:G,class_predictor:mt},rr={box_encoding_predictor:lt,class_predictor:gt},Ys={box_encoding_predictor:_t,class_predictor:Gt},Js={box_encoding_predictor:se,class_predictor:fe},sc={box_encoding_predictor:_e,class_predictor:Ge},gn={box_encoding_predictor:Vt,class_predictor:ln};return{conv_0:k,conv_1:C,conv_2:$,conv_3:F,conv_4:_,conv_5:W,conv_6:et,conv_7:tt,box_predictor_0:Ce,box_predictor_1:rr,box_predictor_2:Ys,box_predictor_3:Js,box_predictor_4:sc,box_predictor_5:gn}}return{extractMobilenetV1Params:x,extractPredictionLayerParams:w}}function DR(i){let a=[],{extractWeights:u,getRemainingWeights:h}=er(i),{extractMobilenetV1Params:d,extractPredictionLayerParams:g}=Irt(u,a),x=d(),w=g(),k=Ro.tensor3d(u(5118*4),[1,5118,4]),C={extra_dim:k};if(a.push({paramPath:"output_layer/extra_dim"}),h().length!==0)throw new Error(`weights remaing after extract: ${h().length}`);return{params:{mobilenetv1:x,prediction_layer:w,output_layer:C},paramMappings:a}}function Ert(i,a){let u=Sr(i,a);function h(C,$,F){let _=u(`${C}/Conv2d_${$}_pointwise/weights`,4,`${F}/filters`),W=u(`${C}/Conv2d_${$}_pointwise/convolution_bn_offset`,1,`${F}/batch_norm_offset`);return{filters:_,batch_norm_offset:W}}function d(C){let $=`mobilenetv1/conv_${C}`,F=`MobilenetV1/Conv2d_${C}_depthwise`,_=`${$}/depthwise_conv`,W=`${$}/pointwise_conv`,et=u(`${F}/depthwise_weights`,4,`${_}/filters`),tt=u(`${F}/BatchNorm/gamma`,1,`${_}/batch_norm_scale`),G=u(`${F}/BatchNorm/beta`,1,`${_}/batch_norm_offset`),mt=u(`${F}/BatchNorm/moving_mean`,1,`${_}/batch_norm_mean`),lt=u(`${F}/BatchNorm/moving_variance`,1,`${_}/batch_norm_variance`);return{depthwise_conv:{filters:et,batch_norm_scale:tt,batch_norm_offset:G,batch_norm_mean:mt,batch_norm_variance:lt},pointwise_conv:h("MobilenetV1",C,W)}}function g(){return{conv_0:h("MobilenetV1",0,"mobilenetv1/conv_0"),conv_1:d(1),conv_2:d(2),conv_3:d(3),conv_4:d(4),conv_5:d(5),conv_6:d(6),conv_7:d(7),conv_8:d(8),conv_9:d(9),conv_10:d(10),conv_11:d(11),conv_12:d(12),conv_13:d(13)}}function x(C,$){let F=u(`${C}/weights`,4,`${$}/filters`),_=u(`${C}/biases`,1,`${$}/bias`);return{filters:F,bias:_}}function w(C){let $=x(`Prediction/BoxPredictor_${C}/BoxEncodingPredictor`,`prediction_layer/box_predictor_${C}/box_encoding_predictor`),F=x(`Prediction/BoxPredictor_${C}/ClassPredictor`,`prediction_layer/box_predictor_${C}/class_predictor`);return{box_encoding_predictor:$,class_predictor:F}}function k(){return{conv_0:h("Prediction",0,"prediction_layer/conv_0"),conv_1:h("Prediction",1,"prediction_layer/conv_1"),conv_2:h("Prediction",2,"prediction_layer/conv_2"),conv_3:h("Prediction",3,"prediction_layer/conv_3"),conv_4:h("Prediction",4,"prediction_layer/conv_4"),conv_5:h("Prediction",5,"prediction_layer/conv_5"),conv_6:h("Prediction",6,"prediction_layer/conv_6"),conv_7:h("Prediction",7,"prediction_layer/conv_7"),box_predictor_0:w(0),box_predictor_1:w(1),box_predictor_2:w(2),box_predictor_3:w(3),box_predictor_4:w(4),box_predictor_5:w(5)}}return{extractMobilenetV1Params:g,extractPredictionLayerParams:k}}function AR(i){let a=[],{extractMobilenetV1Params:u,extractPredictionLayerParams:h}=Ert(i,a),d=i["Output/extra_dim"];if(a.push({originalPath:"Output/extra_dim",paramPath:"output_layer/extra_dim"}),!Ds(d))throw new Error(`expected weightMap['Output/extra_dim'] to be a Tensor3D, instead have ${d}`);let g={mobilenetv1:u(),prediction_layer:h(),output_layer:{extra_dim:d}};return tr(i,a),{params:g,paramMappings:a}}var js=te(ee()),zi=te(ee());function io(i,a,u){return zi.tidy(()=>{let h=zi.conv2d(i,a.filters,u,"same");return h=zi.add(h,a.batch_norm_offset),zi.clipByValue(h,0,6)})}var Drt=.0010000000474974513;function Art(i,a,u){return js.tidy(()=>{let h=js.depthwiseConv2d(i,a.filters,u,"same");return h=js.batchNorm(h,a.batch_norm_mean,a.batch_norm_variance,a.batch_norm_offset,a.batch_norm_scale,Drt),js.clipByValue(h,0,6)})}function Frt(i){return[2,4,6,12].some(a=>a===i)?[2,2]:[1,1]}function FR(i,a){return js.tidy(()=>{let u,h=io(i,a.conv_0,[2,2]),d=[a.conv_1,a.conv_2,a.conv_3,a.conv_4,a.conv_5,a.conv_6,a.conv_7,a.conv_8,a.conv_9,a.conv_10,a.conv_11,a.conv_12,a.conv_13];if(d.forEach((g,x)=>{let w=x+1,k=Frt(w);h=Art(h,g.depthwise_conv,k),h=io(h,g.pointwise_conv,[1,1]),w===11&&(u=h)}),u===null)throw new Error("mobileNetV1 - output of conv layer 11 is null");return{out:h,conv11:u}})}function RR(i,a,u,h,d){let g=i.shape[0],x=Math.min(u,g),w=a.map(($,F)=>({score:$,boxIndex:F})).filter($=>$.score>d).sort(($,F)=>F.score-$.score),k=$=>$<=h?1:0,C=[];return w.forEach($=>{if(C.length>=x)return;let F=$.score;for(let _=C.length-1;_>=0;--_){let W=Rrt(i,$.boxIndex,C[_]);if(W===0)continue;if($.score*=k(W),$.score<=d)break}F===$.score&&C.push($.boxIndex)}),C}function Rrt(i,a,u){let h=i.arraySync(),d=Math.min(h[a][0],h[a][2]),g=Math.min(h[a][1],h[a][3]),x=Math.max(h[a][0],h[a][2]),w=Math.max(h[a][1],h[a][3]),k=Math.min(h[u][0],h[u][2]),C=Math.min(h[u][1],h[u][3]),$=Math.max(h[u][0],h[u][2]),F=Math.max(h[u][1],h[u][3]),_=(x-d)*(w-g),W=($-k)*(F-C);if(_<=0||W<=0)return 0;let et=Math.max(d,k),tt=Math.max(g,C),G=Math.min(x,$),mt=Math.min(w,F),lt=Math.max(G-et,0)*Math.max(mt-tt,0);return lt/(_+W-lt)}var Mt=te(ee());function Prt(i){let a=Mt.unstack(Mt.transpose(i,[1,0])),u=[Mt.sub(a[2],a[0]),Mt.sub(a[3],a[1])],h=[Mt.add(a[0],Mt.div(u[0],Mt.scalar(2))),Mt.add(a[1],Mt.div(u[1],Mt.scalar(2)))];return{sizes:u,centers:h}}function Ort(i,a){let{sizes:u,centers:h}=Prt(i),d=Mt.unstack(Mt.transpose(a,[1,0])),g=Mt.div(Mt.mul(Mt.exp(Mt.div(d[2],Mt.scalar(5))),u[0]),Mt.scalar(2)),x=Mt.add(Mt.mul(Mt.div(d[0],Mt.scalar(10)),u[0]),h[0]),w=Mt.div(Mt.mul(Mt.exp(Mt.div(d[3],Mt.scalar(5))),u[1]),Mt.scalar(2)),k=Mt.add(Mt.mul(Mt.div(d[1],Mt.scalar(10)),u[1]),h[1]);return Mt.transpose(Mt.stack([Mt.sub(x,g),Mt.sub(k,w),Mt.add(x,g),Mt.add(k,w)]),[1,0])}function PR(i,a,u){return Mt.tidy(()=>{let h=i.shape[0],d=Ort(Mt.reshape(Mt.tile(u.extra_dim,[h,1,1]),[-1,4]),Mt.reshape(i,[-1,4]));d=Mt.reshape(d,[h,d.shape[0]/h,4]);let g=Mt.sigmoid(Mt.slice(a,[0,0,1],[-1,-1,-1])),x=Mt.slice(g,[0,0,0],[-1,-1,1]);x=Mt.reshape(x,[h,x.shape[1]]);let w=Mt.unstack(d),k=Mt.unstack(x);return{boxes:w,scores:k}})}var Qh=te(ee()),Zh=te(ee());function Za(i,a){return Zh.tidy(()=>{let u=i.shape[0],h=Zh.reshape(Ya(i,a.box_encoding_predictor),[u,-1,1,4]),d=Zh.reshape(Ya(i,a.class_predictor),[u,-1,3]);return{boxPredictionEncoding:h,classPrediction:d}})}function OR(i,a,u){return Qh.tidy(()=>{let h=io(i,u.conv_0,[1,1]),d=io(h,u.conv_1,[2,2]),g=io(d,u.conv_2,[1,1]),x=io(g,u.conv_3,[2,2]),w=io(x,u.conv_4,[1,1]),k=io(w,u.conv_5,[2,2]),C=io(k,u.conv_6,[1,1]),$=io(C,u.conv_7,[2,2]),F=Za(a,u.box_predictor_0),_=Za(i,u.box_predictor_1),W=Za(d,u.box_predictor_2),et=Za(x,u.box_predictor_3),tt=Za(k,u.box_predictor_4),G=Za($,u.box_predictor_5),mt=Qh.concat([F.boxPredictionEncoding,_.boxPredictionEncoding,W.boxPredictionEncoding,et.boxPredictionEncoding,tt.boxPredictionEncoding,G.boxPredictionEncoding],1),lt=Qh.concat([F.classPrediction,_.classPrediction,W.classPrediction,et.classPrediction,tt.classPrediction,G.classPrediction],1);return{boxPredictions:mt,classPredictions:lt}})}var ao=class{constructor({minConfidence:a,maxResults:u}={}){this._name="SsdMobilenetv1Options";if(this._minConfidence=a||.5,this._maxResults=u||100,typeof this._minConfidence!="number"||this._minConfidence<=0||this._minConfidence>=1)throw new Error(`${this._name} - expected minConfidence to be a number between 0 and 1`);if(typeof this._maxResults!="number")throw new Error(`${this._name} - expected maxResults to be a number`)}get minConfidence(){return this._minConfidence}get maxResults(){return this._maxResults}},Qa=class extends Gn{constructor(){super("SsdMobilenetv1")}forwardInput(a){let{params:u}=this;if(!u)throw new Error("SsdMobilenetv1 - load model before inference");return Po.tidy(()=>{let h=Po.cast(a.toBatchTensor(512,!1),"float32"),d=Po.sub(Po.mul(h,Po.scalar(.007843137718737125)),Po.scalar(1)),g=FR(d,u.mobilenetv1),{boxPredictions:x,classPredictions:w}=OR(g.out,g.conv11,u.prediction_layer);return PR(x,w,u.output_layer)})}async forward(a){return this.forwardInput(await Ve(a))}async locateFaces(a,u={}){let{maxResults:h,minConfidence:d}=new ao(u),g=await Ve(a),{boxes:x,scores:w}=this.forwardInput(g),k=x[0],C=w[0];for(let gt=1;gt{let[_t,Gt]=[Math.max(0,mt[gt][0]),Math.min(1,mt[gt][2])].map(_e=>_e*G),[se,fe]=[Math.max(0,mt[gt][1]),Math.min(1,mt[gt][3])].map(_e=>_e*tt);return new Ue($[gt],new yl(se,_t,fe-se,Gt-_t),{height:g.getInputHeight(0),width:g.getInputWidth(0)})});return k.dispose(),C.dispose(),lt}getDefaultModelName(){return"ssd_mobilenetv1_model"}extractParamsFromWeigthMap(a){return AR(a)}extractParams(a){return DR(a)}};function LR(i){let a=new Qa;return a.extractWeights(i),a}function Lrt(i){return LR(i)}var MR=class extends Qa{},BR=.4,zR=[new Jt(.738768,.874946),new Jt(2.42204,2.65704),new Jt(4.30971,7.04493),new Jt(10.246,4.59428),new Jt(12.6868,11.8741)],WR=[new Jt(1.603231,2.094468),new Jt(6.041143,7.080126),new Jt(2.882459,3.518061),new Jt(4.266906,5.178857),new Jt(9.041765,10.66308)],VR=[117.001,114.697,97.404],GR="tiny_yolov2_model",UR="tiny_yolov2_separable_conv_model",He=te(ee()),_b=i=>typeof i=="number";function Bk(i){if(!i)throw new Error(`invalid config: ${i}`);if(typeof i.withSeparableConvs!="boolean")throw new Error(`config.withSeparableConvs has to be a boolean, have: ${i.withSeparableConvs}`);if(!_b(i.iouThreshold)||i.iouThreshold<0||i.iouThreshold>1)throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${i.iouThreshold}`);if(!Array.isArray(i.classes)||!i.classes.length||!i.classes.every(a=>typeof a=="string"))throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(i.classes)}`);if(!Array.isArray(i.anchors)||!i.anchors.length||!i.anchors.map(a=>a||{}).every(a=>_b(a.x)&&_b(a.y)))throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(i.anchors)}`);if(i.meanRgb&&(!Array.isArray(i.meanRgb)||i.meanRgb.length!==3||!i.meanRgb.every(_b)))throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(i.meanRgb)}`)}var lo=te(ee()),co=te(ee());function Hl(i){return co.tidy(()=>{let a=co.mul(i,co.scalar(.10000000149011612));return co.add(co.relu(co.sub(i,a)),a)})}function Ks(i,a){return lo.tidy(()=>{let u=lo.pad(i,[[0,0],[1,1],[1,1],[0,0]]);return u=lo.conv2d(u,a.conv.filters,[1,1],"valid"),u=lo.sub(u,a.bn.sub),u=lo.mul(u,a.bn.truediv),u=lo.add(u,a.conv.bias),Hl(u)})}var Wi=te(ee());function Xs(i,a){return Wi.tidy(()=>{let u=Wi.pad(i,[[0,0],[1,1],[1,1],[0,0]]);return u=Wi.separableConv2d(u,a.depthwise_filter,a.pointwise_filter,[1,1],"valid"),u=Wi.add(u,a.bias),Hl(u)})}var zk=te(ee());function Mrt(i,a){let u=Ll(i,a);function h(x,w){let k=zk.tensor1d(i(x)),C=zk.tensor1d(i(x));return a.push({paramPath:`${w}/sub`},{paramPath:`${w}/truediv`}),{sub:k,truediv:C}}function d(x,w,k){let C=u(x,w,3,`${k}/conv`),$=h(w,`${k}/bn`);return{conv:C,bn:$}}let g=Ml(i,a);return{extractConvParams:u,extractConvWithBatchNormParams:d,extractSeparableConvParams:g}}function qR(i,a,u,h){let{extractWeights:d,getRemainingWeights:g}=er(i),x=[],{extractConvParams:w,extractConvWithBatchNormParams:k,extractSeparableConvParams:C}=Mrt(d,x),$;if(a.withSeparableConvs){let[F,_,W,et,tt,G,mt,lt,gt]=h,_t=a.isFirstLayerConv2d?w(F,_,3,"conv0"):C(F,_,"conv0"),Gt=C(_,W,"conv1"),se=C(W,et,"conv2"),fe=C(et,tt,"conv3"),_e=C(tt,G,"conv4"),Ge=C(G,mt,"conv5"),Vt=lt?C(mt,lt,"conv6"):void 0,ln=gt?C(lt,gt,"conv7"):void 0,Ce=w(gt||lt||mt,5*u,1,"conv8");$={conv0:_t,conv1:Gt,conv2:se,conv3:fe,conv4:_e,conv5:Ge,conv6:Vt,conv7:ln,conv8:Ce}}else{let[F,_,W,et,tt,G,mt,lt,gt]=h,_t=k(F,_,"conv0"),Gt=k(_,W,"conv1"),se=k(W,et,"conv2"),fe=k(et,tt,"conv3"),_e=k(tt,G,"conv4"),Ge=k(G,mt,"conv5"),Vt=k(mt,lt,"conv6"),ln=k(lt,gt,"conv7"),Ce=w(gt,5*u,1,"conv8");$={conv0:_t,conv1:Gt,conv2:se,conv3:fe,conv4:_e,conv5:Ge,conv6:Vt,conv7:ln,conv8:Ce}}if(g().length!==0)throw new Error(`weights remaing after extract: ${g().length}`);return{params:$,paramMappings:x}}function Brt(i,a){let u=Sr(i,a);function h(w){let k=u(`${w}/sub`,1),C=u(`${w}/truediv`,1);return{sub:k,truediv:C}}function d(w){let k=u(`${w}/filters`,4),C=u(`${w}/bias`,1);return{filters:k,bias:C}}function g(w){let k=d(`${w}/conv`),C=h(`${w}/bn`);return{conv:k,bn:C}}let x=Bl(u);return{extractConvParams:d,extractConvWithBatchNormParams:g,extractSeparableConvParams:x}}function HR(i,a){let u=[],{extractConvParams:h,extractConvWithBatchNormParams:d,extractSeparableConvParams:g}=Brt(i,u),x;if(a.withSeparableConvs){let w=a.filterSizes&&a.filterSizes.length||9;x={conv0:a.isFirstLayerConv2d?h("conv0"):g("conv0"),conv1:g("conv1"),conv2:g("conv2"),conv3:g("conv3"),conv4:g("conv4"),conv5:g("conv5"),conv6:w>7?g("conv6"):void 0,conv7:w>8?g("conv7"):void 0,conv8:h("conv8")}}else x={conv0:d("conv0"),conv1:d("conv1"),conv2:d("conv2"),conv3:d("conv3"),conv4:d("conv4"),conv5:d("conv5"),conv6:d("conv6"),conv7:d("conv7"),conv8:h("conv8")};return tr(i,u),{params:x,paramMappings:u}}var Wk;(function(i){i[i.XS=224]="XS",i[i.SM=320]="SM",i[i.MD=416]="MD",i[i.LG=608]="LG"})(Wk||(Wk={}));var is=class{constructor({inputSize:a,scoreThreshold:u}={}){this._name="TinyYolov2Options";if(this._inputSize=a||416,this._scoreThreshold=u||.5,typeof this._inputSize!="number"||this._inputSize%32!==0)throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`);if(typeof this._scoreThreshold!="number"||this._scoreThreshold<=0||this._scoreThreshold>=1)throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`)}get inputSize(){return this._inputSize}get scoreThreshold(){return this._scoreThreshold}},Vk=class extends Gn{constructor(a){super("TinyYolov2");Bk(a),this._config=a}get config(){return this._config}get withClassScores(){return this.config.withClassScores||this.config.classes.length>1}get boxEncodingSize(){return 5+(this.withClassScores?this.config.classes.length:0)}runTinyYolov2(a,u){let h=Ks(a,u.conv0);return h=He.maxPool(h,[2,2],[2,2],"same"),h=Ks(h,u.conv1),h=He.maxPool(h,[2,2],[2,2],"same"),h=Ks(h,u.conv2),h=He.maxPool(h,[2,2],[2,2],"same"),h=Ks(h,u.conv3),h=He.maxPool(h,[2,2],[2,2],"same"),h=Ks(h,u.conv4),h=He.maxPool(h,[2,2],[2,2],"same"),h=Ks(h,u.conv5),h=He.maxPool(h,[2,2],[1,1],"same"),h=Ks(h,u.conv6),h=Ks(h,u.conv7),Ya(h,u.conv8,"valid",!1)}runMobilenet(a,u){let h=this.config.isFirstLayerConv2d?Hl(Ya(a,u.conv0,"valid",!1)):Xs(a,u.conv0);return h=He.maxPool(h,[2,2],[2,2],"same"),h=Xs(h,u.conv1),h=He.maxPool(h,[2,2],[2,2],"same"),h=Xs(h,u.conv2),h=He.maxPool(h,[2,2],[2,2],"same"),h=Xs(h,u.conv3),h=He.maxPool(h,[2,2],[2,2],"same"),h=Xs(h,u.conv4),h=He.maxPool(h,[2,2],[2,2],"same"),h=Xs(h,u.conv5),h=He.maxPool(h,[2,2],[1,1],"same"),h=u.conv6?Xs(h,u.conv6):h,h=u.conv7?Xs(h,u.conv7):h,Ya(h,u.conv8,"valid",!1)}forwardInput(a,u){let{params:h}=this;if(!h)throw new Error("TinyYolov2 - load model before inference");return He.tidy(()=>{let d=He.cast(a.toBatchTensor(u,!1),"float32");return d=this.config.meanRgb?Io(d,this.config.meanRgb):d,d=d.div(He.scalar(256)),this.config.withSeparableConvs?this.runMobilenet(d,h):this.runTinyYolov2(d,h)})}async forward(a,u){return await this.forwardInput(await Ve(a),u)}async detect(a,u={}){let{inputSize:h,scoreThreshold:d}=new is(u),g=await Ve(a),x=await this.forwardInput(g,h),w=He.tidy(()=>He.unstack(x)[0].expandDims()),k={width:g.getInputWidth(0),height:g.getInputHeight(0)},C=await this.extractBoxes(w,g.getReshapedInputDimensions(0),d);x.dispose(),w.dispose();let $=C.map(G=>G.box),F=C.map(G=>G.score),_=C.map(G=>G.classScore),W=C.map(G=>this.config.classes[G.label]),et=F1($.map(G=>G.rescale(h)),F,this.config.iouThreshold,!0),tt=et.map(G=>new Si(F[G],_[G],W[G],$[G],k));return tt}getDefaultModelName(){return""}extractParamsFromWeigthMap(a){return HR(a,this.config)}extractParams(a){let u=this.config.filterSizes||Vk.DEFAULT_FILTER_SIZES,h=u?u.length:void 0;if(h!==7&&h!==8&&h!==9)throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${h} filterSizes in config`);return qR(a,this.config,this.boxEncodingSize,u)}async extractBoxes(a,u,h){let{width:d,height:g}=u,x=Math.max(d,g),w=x/d,k=x/g,C=a.shape[1],$=this.config.anchors.length,[F,_,W]=He.tidy(()=>{let mt=a.reshape([C,C,$,this.boxEncodingSize]),lt=mt.slice([0,0,0,0],[C,C,$,4]),gt=mt.slice([0,0,0,4],[C,C,$,1]),_t=this.withClassScores?He.softmax(mt.slice([0,0,0,5],[C,C,$,this.config.classes.length]),3):He.scalar(0);return[lt,gt,_t]}),et=[],tt=await _.array(),G=await F.array();for(let mt=0;mth){let Gt=(lt+lh(G[mt][lt][gt][0]))/C*w,se=(mt+lh(G[mt][lt][gt][1]))/C*k,fe=Math.exp(G[mt][lt][gt][2])*this.config.anchors[gt].x/C*w,_e=Math.exp(G[mt][lt][gt][3])*this.config.anchors[gt].y/C*k,Ge=Gt-fe/2,Vt=se-_e/2,ln={row:mt,col:lt,anchor:gt},{classScore:Ce,label:rr}=this.withClassScores?await this.extractPredictedClass(W,ln):{classScore:1,label:0};et.push({box:new gl(Ge,Vt,Ge+fe,Vt+_e),score:_t,classScore:_t*Ce,label:rr,...ln})}}return F.dispose(),_.dispose(),W.dispose(),et}async extractPredictedClass(a,u){let{row:h,col:d,anchor:g}=u,x=await a.array();return Array(this.config.classes.length).fill(0).map((w,k)=>x[h][d][g][k]).map((w,k)=>({classScore:w,label:k})).reduce((w,k)=>w.classScore>k.classScore?w:k)}},jl=Vk;jl.DEFAULT_FILTER_SIZES=[3,16,32,64,128,256,512,1024,1024];var Kl=class extends jl{constructor(a=!0){let u=Object.assign({},{withSeparableConvs:a,iouThreshold:BR,classes:["face"]},a?{anchors:WR,meanRgb:VR}:{anchors:zR,withClassScores:!0});super(u)}get withSeparableConvs(){return this.config.withSeparableConvs}get anchors(){return this.config.anchors}async locateFaces(a,u){let h=await this.detect(a,u);return h.map(d=>new Ue(d.score,d.relativeBox,{width:d.imageWidth,height:d.imageHeight}))}getDefaultModelName(){return this.withSeparableConvs?UR:GR}extractParamsFromWeigthMap(a){return super.extractParamsFromWeigthMap(a)}};function zrt(i,a=!0){let u=new Kl(a);return u.extractWeights(i),u}var Cb=class extends is{constructor(){super(...arguments);this._name="TinyFaceDetectorOptions"}},uo=class{async then(a){return a(await this.run())}async run(){throw new Error("ComposableTask - run is not implemented")}},tf=te(ee()),Gk=te(ee());async function tc(i,a,u,h,d=({alignedRect:g})=>g){let g=i.map(k=>Ja(k)?d(k):k.detection),x=h||(a instanceof Gk.Tensor?await Pl(a,g):await Rl(a,g)),w=await u(x);return x.forEach(k=>k instanceof Gk.Tensor&&k.dispose()),w}async function Xl(i,a,u,h,d){return tc([i],a,async g=>u(g[0]),h,d)}var jR=.4,KR=[new Jt(1.603231,2.094468),new Jt(6.041143,7.080126),new Jt(2.882459,3.518061),new Jt(4.266906,5.178857),new Jt(9.041765,10.66308)],XR=[117.001,114.697,97.404],Yl=class extends jl{constructor(){let a={withSeparableConvs:!0,iouThreshold:jR,classes:["face"],anchors:KR,meanRgb:XR,isFirstLayerConv2d:!0,filterSizes:[3,16,32,64,128,256,512]};super(a)}get anchors(){return this.config.anchors}async locateFaces(a,u){let h=await this.detect(a,u);return h.map(d=>new Ue(d.score,d.relativeBox,{width:d.imageWidth,height:d.imageHeight}))}getDefaultModelName(){return"tiny_face_detector_model"}extractParamsFromWeigthMap(a){return super.extractParamsFromWeigthMap(a)}},Ne={ssdMobilenetv1:new Qa,tinyFaceDetector:new Yl,tinyYolov2:new Kl,faceLandmark68Net:new Gl,faceLandmark68TinyNet:new xb,faceRecognitionNet:new ql,faceExpressionNet:new gb,ageGenderNet:new bb},YR=(i,a)=>Ne.ssdMobilenetv1.locateFaces(i,a),Wrt=(i,a)=>Ne.tinyFaceDetector.locateFaces(i,a),Vrt=(i,a)=>Ne.tinyYolov2.locateFaces(i,a),JR=i=>Ne.faceLandmark68Net.detectLandmarks(i),Grt=i=>Ne.faceLandmark68TinyNet.detectLandmarks(i),Urt=i=>Ne.faceRecognitionNet.computeFaceDescriptor(i),qrt=i=>Ne.faceExpressionNet.predictExpressions(i),Hrt=i=>Ne.ageGenderNet.predictAgeAndGender(i),ZR=i=>Ne.ssdMobilenetv1.load(i),jrt=i=>Ne.tinyFaceDetector.load(i),Krt=i=>Ne.tinyYolov2.load(i),Xrt=i=>Ne.faceLandmark68Net.load(i),Yrt=i=>Ne.faceLandmark68TinyNet.load(i),Jrt=i=>Ne.faceRecognitionNet.load(i),Zrt=i=>Ne.faceExpressionNet.load(i),Qrt=i=>Ne.ageGenderNet.load(i),tot=ZR,eot=YR,not=JR,Uk=class extends uo{constructor(a,u,h){super();this.parentTask=a;this.input=u;this.extractedFaces=h}},Ql=class extends Uk{async run(){let a=await this.parentTask,u=await tc(a,this.input,async h=>await Promise.all(h.map(d=>Ne.faceExpressionNet.predictExpressions(d))),this.extractedFaces);return a.map((h,d)=>yb(h,u[d]))}withAgeAndGender(){return new Jl(this,this.input)}},tu=class extends Uk{async run(){let a=await this.parentTask;if(!a)return;let u=await Xl(a,this.input,h=>Ne.faceExpressionNet.predictExpressions(h),this.extractedFaces);return yb(a,u)}withAgeAndGender(){return new Zl(this,this.input)}},rc=class extends Ql{withAgeAndGender(){return new ec(this,this.input)}withFaceDescriptors(){return new Vi(this,this.input)}},oc=class extends tu{withAgeAndGender(){return new nc(this,this.input)}withFaceDescriptor(){return new Gi(this,this.input)}},qk=class extends uo{constructor(a,u,h){super();this.parentTask=a;this.input=u;this.extractedFaces=h}},Jl=class extends qk{async run(){let a=await this.parentTask,u=await tc(a,this.input,async h=>await Promise.all(h.map(d=>Ne.ageGenderNet.predictAgeAndGender(d))),this.extractedFaces);return a.map((h,d)=>{let{age:g,gender:x,genderProbability:w}=u[d];return kb(Nb(h,x,w),g)})}withFaceExpressions(){return new Ql(this,this.input)}},Zl=class extends qk{async run(){let a=await this.parentTask;if(!a)return;let{age:u,gender:h,genderProbability:d}=await Xl(a,this.input,g=>Ne.ageGenderNet.predictAgeAndGender(g),this.extractedFaces);return kb(Nb(a,h,d),u)}withFaceExpressions(){return new tu(this,this.input)}},ec=class extends Jl{withFaceExpressions(){return new rc(this,this.input)}withFaceDescriptors(){return new Vi(this,this.input)}},nc=class extends Zl{withFaceExpressions(){return new oc(this,this.input)}withFaceDescriptor(){return new Gi(this,this.input)}},Sb=class extends uo{constructor(a,u){super();this.parentTask=a;this.input=u}},Vi=class extends Sb{async run(){let a=await this.parentTask,u=await tc(a,this.input,h=>Promise.all(h.map(d=>Ne.faceRecognitionNet.computeFaceDescriptor(d))),null,h=>h.landmarks.align(null,{useDlibAlignment:!0}));return u.map((h,d)=>Tb(a[d],h))}withFaceExpressions(){return new rc(this,this.input)}withAgeAndGender(){return new ec(this,this.input)}},Gi=class extends Sb{async run(){let a=await this.parentTask;if(!a)return;let u=await Xl(a,this.input,h=>Ne.faceRecognitionNet.computeFaceDescriptor(h),null,h=>h.landmarks.align(null,{useDlibAlignment:!0}));return Tb(a,u)}withFaceExpressions(){return new oc(this,this.input)}withAgeAndGender(){return new nc(this,this.input)}},$b=class extends uo{constructor(a,u,h){super();this.parentTask=a;this.input=u;this.useTinyLandmarkNet=h}get landmarkNet(){return this.useTinyLandmarkNet?Ne.faceLandmark68TinyNet:Ne.faceLandmark68Net}},Ib=class extends $b{async run(){let a=await this.parentTask,u=a.map(g=>g.detection),h=this.input instanceof tf.Tensor?await Pl(this.input,u):await Rl(this.input,u),d=await Promise.all(h.map(g=>this.landmarkNet.detectLandmarks(g)));return h.forEach(g=>g instanceof tf.Tensor&&g.dispose()),a.map((g,x)=>Vl(g,d[x]))}withFaceExpressions(){return new rc(this,this.input)}withAgeAndGender(){return new ec(this,this.input)}withFaceDescriptors(){return new Vi(this,this.input)}},Eb=class extends $b{async run(){let a=await this.parentTask;if(!a)return;let{detection:u}=a,h=this.input instanceof tf.Tensor?await Pl(this.input,[u]):await Rl(this.input,[u]),d=await this.landmarkNet.detectLandmarks(h[0]);return h.forEach(g=>g instanceof tf.Tensor&&g.dispose()),Vl(a,d)}withFaceExpressions(){return new oc(this,this.input)}withAgeAndGender(){return new nc(this,this.input)}withFaceDescriptor(){return new Gi(this,this.input)}},Db=class extends uo{constructor(a,u=new ao){super();this.input=a;this.options=u}},ef=class extends Db{async run(){let{input:a,options:u}=this,h=u instanceof Cb?d=>Ne.tinyFaceDetector.locateFaces(d,u):u instanceof ao?d=>Ne.ssdMobilenetv1.locateFaces(d,u):u instanceof is?d=>Ne.tinyYolov2.locateFaces(d,u):null;if(!h)throw new Error("detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options");return h(a)}runAndExtendWithFaceDetections(){return new Promise(async a=>{let u=await this.run();return a(u.map(h=>_a({},h)))})}withFaceLandmarks(a=!1){return new Ib(this.runAndExtendWithFaceDetections(),this.input,a)}withFaceExpressions(){return new Ql(this.runAndExtendWithFaceDetections(),this.input)}withAgeAndGender(){return new Jl(this.runAndExtendWithFaceDetections(),this.input)}},Ab=class extends Db{async run(){let a=await new ef(this.input,this.options),u=a[0];return a.forEach(h=>{h.score>u.score&&(u=h)}),u}runAndExtendWithFaceDetection(){return new Promise(async a=>{let u=await this.run();return a(u?_a({},u):void 0)})}withFaceLandmarks(a=!1){return new Eb(this.runAndExtendWithFaceDetection(),this.input,a)}withFaceExpressions(){return new tu(this.runAndExtendWithFaceDetection(),this.input)}withAgeAndGender(){return new Zl(this.runAndExtendWithFaceDetection(),this.input)}};function rot(i,a=new ao){return new Ab(i,a)}function Fb(i,a=new ao){return new ef(i,a)}async function QR(i,a){return console.warn("allFacesSsdMobilenetv1 is deprecated and will be removed soon, use the high level api instead"),await Fb(i,new ao(a?{minConfidence:a}:{})).withFaceLandmarks().withFaceDescriptors()}async function oot(i,a={}){return console.warn("allFacesTinyYolov2 is deprecated and will be removed soon, use the high level api instead"),await Fb(i,new is(a)).withFaceLandmarks().withFaceDescriptors()}var sot=QR;function Hk(i,a){if(i.length!==a.length)throw new Error("euclideanDistance: arr1.length !== arr2.length");let u=Array.from(i),h=Array.from(a);return Math.sqrt(u.map((d,g)=>d-h[g]).reduce((d,g)=>d+Math.pow(g,2),0))}var Rb=class{constructor(a,u=.6){this._distanceThreshold=u;let h=Array.isArray(a)?a:[a];if(!h.length)throw new Error("FaceRecognizer.constructor - expected atleast one input");let d=1,g=()=>`person ${d++}`;this._labeledDescriptors=h.map(x=>{if(x instanceof As)return x;if(x instanceof Float32Array)return new As(g(),[x]);if(x.descriptor&&x.descriptor instanceof Float32Array)return new As(g(),[x.descriptor]);throw new Error("FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>")})}get labeledDescriptors(){return this._labeledDescriptors}get distanceThreshold(){return this._distanceThreshold}computeMeanDistance(a,u){return u.map(h=>Hk(h,a)).reduce((h,d)=>h+d,0)/(u.length||1)}matchDescriptor(a){return this.labeledDescriptors.map(({descriptors:u,label:h})=>new uh(h,this.computeMeanDistance(a,u))).reduce((u,h)=>u.distancea.toJSON())}}static fromJSON(a){let u=a.labeledDescriptors.map(h=>As.fromJSON(h));return new Rb(u,a.distanceThreshold)}};function iot(i){let a=new Yl;return a.extractWeights(i),a}function tP(i,a){let{width:u,height:h}=new Zn(a.width,a.height);if(u<=0||h<=0)throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width:u,height:h})}`);if(Array.isArray(i))return i.map(d=>tP(d,{width:u,height:h}));if(Ja(i)){let d=i.detection.forSize(u,h),g=i.unshiftedLandmarks.forSize(d.box.width,d.box.height);return Vl(_a(i,d),g)}return Jo(i)?_a(i,i.detection.forSize(u,h)):i instanceof Wr||i instanceof Ue?i.forSize(u,h):i}var eP="0.8.8",cot=typeof process!="undefined",lot=typeof navigator!="undefined"&&typeof navigator.userAgent!="undefined",uot={faceapi:eP,node:cot,browser:lot};export{bb as AgeGenderNet,gl as BoundingBox,Fe as Box,uo as ComposableTask,Vi as ComputeAllFaceDescriptorsTask,Sb as ComputeFaceDescriptorsTaskBase,Gi as ComputeSingleFaceDescriptorTask,Ib as DetectAllFaceLandmarksTask,ef as DetectAllFacesTask,$b as DetectFaceLandmarksTaskBase,Db as DetectFacesTaskBase,Eb as DetectSingleFaceLandmarksTask,Ab as DetectSingleFaceTask,Zn as Dimensions,Ik as FACE_EXPRESSION_LABELS,Ue as FaceDetection,MR as FaceDetectionNet,gb as FaceExpressionNet,Mi as FaceExpressions,Gl as FaceLandmark68Net,xb as FaceLandmark68TinyNet,CR as FaceLandmarkNet,Wr as FaceLandmarks,NE as FaceLandmarks5,bl as FaceLandmarks68,uh as FaceMatch,Rb as FaceMatcher,ql as FaceRecognitionNet,Hs as Gender,ph as LabeledBox,As as LabeledFaceDescriptors,Us as NetInput,Gn as NeuralNetwork,Si as ObjectDetection,Jt as Point,_E as PredictedBox,yl as Rect,Qa as SsdMobilenetv1,ao as SsdMobilenetv1Options,Yl as TinyFaceDetector,Cb as TinyFaceDetectorOptions,Kl as TinyYolov2,is as TinyYolov2Options,Wk as TinyYolov2SizeType,sot as allFaces,QR as allFacesSsdMobilenetv1,oot as allFacesTinyYolov2,V1 as awaitMediaLoaded,G1 as bufferToImage,Urt as computeFaceDescriptor,xl as createCanvas,dh as createCanvasFromMedia,Lrt as createFaceDetectionNet,Crt as createFaceRecognitionNet,LR as createSsdMobilenetv1,iot as createTinyFaceDetector,zrt as createTinyYolov2,Fb as detectAllFaces,JR as detectFaceLandmarks,Grt as detectFaceLandmarksTiny,not as detectLandmarks,rot as detectSingleFace,Fk as draw,$e as env,Hk as euclideanDistance,kb as extendWithAge,Tb as extendWithFaceDescriptor,_a as extendWithFaceDetection,yb as extendWithFaceExpressions,Vl as extendWithFaceLandmarks,Nb as extendWithGender,Pl as extractFaceTensors,Rl as extractFaces,grt as fetchImage,Sk as fetchJson,yrt as fetchNetWeights,Xa as fetchOrThrow,dr as getContext2dOrThrow,Sa as getMediaDimensions,U1 as imageTensorToCanvas,Ck as imageToSquare,V9 as inverseSigmoid,D1 as iou,tg as isMediaElement,fh as isMediaLoaded,Srt as isWithAge,Jo as isWithFaceDetection,Ek as isWithFaceExpressions,Ja as isWithFaceLandmarks,$rt as isWithGender,Qrt as loadAgeGenderModel,tot as loadFaceDetectionModel,Zrt as loadFaceExpressionModel,Xrt as loadFaceLandmarkModel,Yrt as loadFaceLandmarkTinyModel,Jrt as loadFaceRecognitionModel,ZR as loadSsdMobilenetv1Model,jrt as loadTinyFaceDetectorModel,Krt as loadTinyYolov2Model,$k as loadWeightMap,eot as locateFaces,brt as matchDimensions,A1 as minBbox,Ne as nets,F1 as nonMaxSuppression,Io as normalize,R1 as padToSquare,Hrt as predictAgeAndGender,qrt as recognizeFaceExpressions,tP as resizeResults,Ca as resolveInput,W9 as shuffleArray,lh as sigmoid,YR as ssdMobilenetv1,aot as tf,Wrt as tinyFaceDetector,Vrt as tinyYolov2,Ve as toNetInput,S1 as utils,Bk as validateConfig,uot as version}; /** * @license * Copyright 2017 Google LLC. All Rights Reserved. diff --git a/dist/face-api.esm.js.map b/dist/face-api.esm.js.map index 2ae77d2..8b5a8aa 100644 --- a/dist/face-api.esm.js.map +++ b/dist/face-api.esm.js.map @@ -1,7 +1,7 @@ { "version": 3, - "sources": ["node_modules/node-fetch/browser.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/backend.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/util_base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/environment.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/global_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/kernel_names.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/kernel_registry.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/profiler.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor_format.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/types.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/engine.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/device_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/flags.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/tensor_util_env.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/operation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/complex.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor_ops_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/types.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/io_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/router_registry.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/indexed_db.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/local_storage.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/model_management.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/platforms/platform_browser.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/platforms/platform_node.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/buffer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/clone.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/print.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/base_side_effects.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/browser_files.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/progress.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/weights_loader.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/http.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/passthrough.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/io/io.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mat_mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/one_hot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/confusion_matrix.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/math.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/browser.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/serialization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/test_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/globals.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/floorDiv.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/abs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/acos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/acosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/add_n.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/axis_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/all.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/any.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/arg_max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/arg_min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/asin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/asinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/atan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/atan2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/atanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/basic_lstm_cell.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batch_to_space_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_to.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ceil.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/clip_by_value.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/concat_4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_input.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_input.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/cumsum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depth_to_space.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/diag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dilation2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/where.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/zeros_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/div_no_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/elu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/erf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/expand_dims.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/expm1.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tile.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/eye.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fill.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/floor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reduce_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/segment_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/gather.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/greater.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/greater_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/imag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/is_finite.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/is_inf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/is_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/maximum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/scalar.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/leaky_relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/less.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/less_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linspace.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log1p.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/neg.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/softplus.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log_sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log_softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/log_sum_exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_and.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_not.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_or.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/logical_xor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_with_argmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/zeros.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ones.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mean.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/minimum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mirror_pad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/mod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/moments.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/multi_rnn_cell.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/multinomial.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/not_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/real.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ones_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/outer_product.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pad4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/space_to_batch_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/pow.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/prelu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/prod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rand.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/alea.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xor128.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xorwow.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xorshift7.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/xor4096.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/lib/tychei.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/seedrandom.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/seedrandom/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rand_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/random_gamma.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/random_normal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/random_uniform.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/range.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reciprocal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/relu6.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/reverse_4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/round.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rsqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/selu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/separable_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/setdiff1d_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sign.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/slice4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/fft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/ifft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/irfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/split_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/split.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/spectral/rfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/squared_difference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/squeeze.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/stack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/step.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/strided_slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor5d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/tensor6d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/topk.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/truncated_normal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/unsorted_segment_sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/unstack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/variable.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/where_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/where_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/boolean_mask.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/compare.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/binary_ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/norm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/moving_average.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dropout_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/dropout.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal_ops_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/in_top_k.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_filter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused/conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_filter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_input.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused/depthwise_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused/mat_mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/fused_ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/hamming_window.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/hann_window.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/frame.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/signal/stft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/crop_and_resize.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/flip_left_right.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/rotate_with_offset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/nonmax_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/array_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/non_max_suppression_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded_async.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_bilinear.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_nearest_neighbor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/band_part.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/gram_schmidt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/linalg/qr.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/loss_ops_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/compute_weighted_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/absolute_difference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/cosine_distance.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/hinge_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/huber_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/log_loss.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/mean_squared_error.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/sigmoid_cross_entropy.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/losses/softmax_cross_entropy.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adadelta_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adagrad_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adam_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/adamax_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/sgd_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/momentum_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/rmsprop_optimizer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer_constructors.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/train.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/browser_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/rotate_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/array_ops_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/selu_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/erf_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/complex_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/backend_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/split_shared.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/tile_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/topk_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/backends/kernel_impls.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Abs_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Acos_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Acosh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Add_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/AddN_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ArgMax_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ArgMin_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Asin_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Asinh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Atan2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Atan_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Atanh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool3D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/AvgPool_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/BatchMatMul_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/BatchToSpaceND_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/BroadcastTo_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cast_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Ceil_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ClipByValue_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Concat_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv2DBackpropInput_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_filter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Conv3D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cos_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cosh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Cumsum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/DepthwiseConv2dNative_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Dilation2D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Div_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Elu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Erf_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Exp_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Expm1_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Floor_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/FloorDiv_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/FusedBatchNorm_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/GatherV2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/GreaterEqual_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Identity_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/IsFinite_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/IsInf_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/IsNan_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Log1p_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Log_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/LogSoftmax_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/LRN_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/min_max_grad_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Max_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Maximum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool3D_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_backprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/MaxPool_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Min_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Minimum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/MirrorPad_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Mod_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Multiply_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Negate_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/OneHot_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/OnesLike_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/PadV2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Pow_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Prelu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Reciprocal_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Relu6_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Relu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Reshape_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ResizeBilinear_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ResizeNearestNeighbor_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Reverse_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Round_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Rsqrt_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SelectV2_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Selu_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sigmoid_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sign_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sin_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sinh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Slice_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Softmax_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Softplus_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SpaceToBatchND_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SplitV_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sqrt_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Square_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/SquaredDifference_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Step_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sub_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Sum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Tan_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Tanh_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Tile_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Transpose_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/Unpack_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/UnsortedSegmentSum_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/gradients/ZerosLike_grad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/register_all_gradients.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/abs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/acos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/acosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/add_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/all.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/any.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/arg_max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/arg_min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as_scalar.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as_type.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as3d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as4d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/as5d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/asin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/asinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atan2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/atanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/avg_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/batch_to_space_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/batchnorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/broadcast_to.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ceil.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/clip_by_value.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv1d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv2d_transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/cumsum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depth_to_space.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depthwise_conv2D_deprecated.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/depthwise_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/dilation2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div_no_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/dot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/elu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/erf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/expand_dims.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/expm1.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/fft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/flatten.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/floor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/floorDiv.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/gather.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/greater.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ifft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/irfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_finite.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_inf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/is_nan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/leaky_relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/less.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/local_response_normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log_sum_exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/log1p.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_and.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_not.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_or.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/logical_xor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mat_mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/max_pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/maximum_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/maximum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mean.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/min.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/minimum_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/minimum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mirror_pad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mod_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mul_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/mul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/neg.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/norm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/not_equal_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/not_equal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/one_hot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/ones_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pow_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/pow.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/prelu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/prod.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reciprocal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/relu6.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reshape_as.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/resize_bilinear.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/resize_nearest_neighbor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/reverse.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/rfft.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/round.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/rsqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/selu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/separable_conv2d.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sign.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/softmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/softplus.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/space_to_batch_nd.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/split.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squared_difference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squared_difference_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/squeeze.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/stack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/step.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/strided_slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sub_strict.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/tile.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_bool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_float.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/to_int.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/topk.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unsorted_segment_sum.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/unstack.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/where.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/zeros_like.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/public/chained_ops/register_all_chained_ops.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/backend/common.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/errors.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/generic_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/constraints.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_constraints.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/keras_format/common.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/common.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/math_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/backend/tfjs_backend.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/keras_format/initializer_config.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/initializers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_initializers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/backend/state.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/types_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/variable_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/variables.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/topology.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/input_layer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/logs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/base_callbacks.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/serialization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/losses.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/metrics.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/optimizers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/user_defined_metadata.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/layer_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/serialization_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/container.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training_dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training_tensors.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/engine/training.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/models.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/activations.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/regularizers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/advanced_activations.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/utils/conv_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/recurrent.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/convolutional_recurrent.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/core.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/embeddings.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/merge.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/noise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/padding.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/pooling.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/layers/wrappers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_layers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_metrics.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_models.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/exports_regularizers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/callbacks.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-layers/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/data/compiled_api.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/register.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/arithmetic.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/basic_math.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/control.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/convolution.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/creation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/dynamic.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/evaluation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/graph.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/hash_table.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/image.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/logical.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/matrices.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/normalization.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/reduction.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/slice_join.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/spectral.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/op_list/transformation.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/operation_mapper.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/node_value_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-core/dist/ops/ops_for_converter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/arithmetic_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/basic_math_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_array.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_list.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/control_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/convolution_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/creation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/dynamic_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/evaluation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/graph_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/hash_table.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/hash_table_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/image_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/logical_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/matrices_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/normalization_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/reduction_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/slice_join_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/spectral_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/executors/transformation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/operations/operation_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/execution_context.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/model_analysis.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/graph_executor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/resource_manager.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/executor/graph_model.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-converter/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/deep_map.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/deep_clone.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/ring_buffer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/growing_ring_buffer.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/lazy_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/datasets/text_line_dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/datasets/csv_dataset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/microphone_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/webcam_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/datasource.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/string_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/byte_chunk_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/file_chunk_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/iterators/url_chunk_iterator.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/util/source_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/sources/file_data_source.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/sources/url_data_source.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/readers.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-data/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/cpu_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/backend_cpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Abs.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/binary_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Complex.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Identity.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Real.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/kernel_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/unary_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/unary_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Ceil.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Exp.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Expm1.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Floor.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Log.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Max_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Multiply.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NotEqual.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Rsqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Slice.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/SquaredDifference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Transpose_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Unique_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/shared.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Elu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Prelu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Relu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Relu6.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/fused_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/BatchMatMul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/_FusedMatMul.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Acos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Acosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Asin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Asinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Atan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Atanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/pool_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/AvgPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/AvgPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/BatchNorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Clip.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Imag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv2D.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv2DBackpropFilter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv2DBackpropInput.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv3D.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv3DBackpropFilterV2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Conv3DBackpropInputV2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Cosh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/DepthwiseConv2dNative.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/DepthwiseConv2dNativeBackpropFilter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/DepthwiseConv2dNativeBackpropInput.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2D.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2DBackpropFilter.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Dilation2DBackpropInput.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Erf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/fft_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FFT.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Fill.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FlipLeftRight.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FusedConv2D.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/FusedDepthwiseConv2D.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IFFT.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsFinite.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsInf.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/IsNaN.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Log1p.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/LogicalNot.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolWithArgmax_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MaxPoolWithArgmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/MirrorPad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NonMaxSuppressionV4.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/NonMaxSuppressionV5.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/PadV2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Reciprocal.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/RotateWithOffset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Round.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Selu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sigmoid.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sign.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sinh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Softplus.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/SpaceToBatchND.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Sqrt.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Step.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Tanh.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/register_all_kernels.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-cpu/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/canvas_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/tex_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/webgl_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/flags_webgl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/shared.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/addn_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/addn_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/argminmax_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/packing_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/glsl_version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/shader_compiler_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/shader_compiler.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/argminmax_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/avg_pool_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/clip_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/clip_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/complex_abs_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_backprop_gpu_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_gpu_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/conv_packed_gpu_depthwise.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/crop_and_resize_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/cumsum_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/decode_matrix_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/decode_matrix_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/depth_to_space_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/diag_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_float_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_float_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_matrix_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/encode_matrix_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/fill_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gather_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gather_nd_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_util.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_context.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/gpgpu_math.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/im2col_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_grad_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/lrn_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/max_pool_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/mulmat_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/multinomial_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/onehot_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pack_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pad_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pad_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/pool_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reduce_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reshape_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_bilinear_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_nearest_neighbor_backprop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/resize_nearest_neighbor_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reverse_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/reverse_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/scatter_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/segment_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/select_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/slice_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/slice_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/strided_slice_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/texture_manager.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/tile_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/unaryop_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/unaryop_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/unpack_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/backend_webgl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/version.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/webgl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/base.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Identity.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Complex.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/kernel_funcs_utils.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Add.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Atan2.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/AvgPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/AvgPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/batchnorm_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/batchnorm_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/BatchNorm.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NotEqual.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Real.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/int.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Cast.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/concat_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/concat_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Imag.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Reshape.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Concat_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Concat.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Cos.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Div.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/fft_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FFT_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FFT.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/flip_left_right_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FlipLeftRight.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels_utils/from_pixels_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels_utils/from_pixels_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/FromPixels.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/IFFT.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/mean_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernel_utils/reduce.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Max_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/transpose_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/transpose_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Transpose_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Max.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPool.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolBackprop.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolWithArgmax_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MaxPoolWithArgmax.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Mean_impl.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Mean.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/mirror_pad_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/mirror_pad_packed_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/MirrorPad.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/binaryop_complex_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Multiply.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV3.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV4.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/NonMaxSuppressionV5.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/rotate_gpu.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/RotateWithOffset.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Sin.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Square.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/SquaredDifference.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Sub.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Tan.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Transpose.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/kernels/Unique.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/register_all_kernels.js", "node_modules/@tensorflow/tfjs/dist/../node_modules/@tensorflow/tfjs-backend-webgl/dist/index.js", "node_modules/@tensorflow/tfjs/dist/../src/version.ts", "node_modules/@tensorflow/tfjs/dist/../src/index.ts", "src/env/isNodejs.ts", "src/draw/drawContour.ts", "src/classes/Dimensions.ts", "src/utils/index.ts", "src/classes/Point.ts", "src/classes/Box.ts", "src/classes/BoundingBox.ts", "src/classes/ObjectDetection.ts", "src/classes/FaceDetection.ts", "src/ops/iou.ts", "src/ops/minBbox.ts", "src/ops/nonMaxSuppression.ts", "src/ops/normalize.ts", "src/ops/padToSquare.ts", "src/ops/shuffleArray.ts", "src/ops/index.ts", "src/classes/Rect.ts", "src/classes/FaceLandmarks.ts", "src/classes/FaceLandmarks5.ts", "src/classes/FaceLandmarks68.ts", "src/classes/FaceMatch.ts", "src/classes/LabeledBox.ts", "src/classes/LabeledFaceDescriptors.ts", "src/classes/PredictedBox.ts", "src/factories/WithFaceDetection.ts", "src/env/createBrowserEnv.ts", "src/env/createFileSystem.ts", "src/env/createNodejsEnv.ts", "src/env/isBrowser.ts", "src/env/index.ts", "src/dom/resolveInput.ts", "src/dom/getContext2dOrThrow.ts", "src/draw/DrawTextField.ts", "src/draw/DrawBox.ts", "src/draw/drawDetections.ts", "src/dom/isMediaLoaded.ts", "src/dom/awaitMediaLoaded.ts", "src/dom/bufferToImage.ts", "src/dom/getMediaDimensions.ts", "src/dom/createCanvas.ts", "src/dom/imageTensorToCanvas.ts", "src/dom/isMediaElement.ts", "node_modules/@tensorflow/tfjs-core/dist/backends/../../src/backends/backend.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/util_base.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/environment.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/global_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/kernel_names.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/kernel_registry.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/profiler.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tape.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor_format.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/types.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/engine.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/device_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/flags.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/tensor_util_env.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/operation.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/complex.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor_ops_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/buffer.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cast.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/clone.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/print.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/base_side_effects.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/reshape.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/mat_mul.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/transpose.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor3d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/browser.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/slice_util.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/globals.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/add.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/floorDiv.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/div.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/mul.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/abs.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/axis_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/concat_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/concat.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sigmoid.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/slice.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/batch_to_space_nd.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/broadcast_to.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv2d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv2d_backprop_input.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv3d_backprop_input.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cos.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cosh.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/cumsum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/broadcast_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/where.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/zeros_like.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/exp.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/expand_dims.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tile.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/eye.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/fill.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/floor.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/segment_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/gather.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/greater.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/greater_equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/imag.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/maximum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/scalar.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/less.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/less_equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/log.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/log1p.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/gradients.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/neg.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/max.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sub.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/log_sum_exp.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/logical_and.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/logical_not.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/zeros.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/ones.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/mean.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/min.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/minimum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/square.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/not_equal.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/real.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/pad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/space_to_batch_nd.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/pow.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor1d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/range.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/relu.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/reverse.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/rsqrt.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sin.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sinh.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/fft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/ifft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/irfft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/split_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/split.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/spectral/../../../src/ops/spectral/rfft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/sqrt.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/squared_difference.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/squeeze.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/stack.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/step.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/tensor2d.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/unsorted_segment_sum.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/unstack.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/norm.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/signal_ops_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv2d_backprop_filter.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/depthwise_conv2d_native_backprop_filter.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/depthwise_conv2d_native_backprop_input.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/hamming_window.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/hann_window.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/frame.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/signal/../../../src/ops/signal/stft.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/crop_and_resize.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/flip_left_right.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/rotate_with_offset.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/nonmax_util.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression.ts", "node_modules/@tensorflow/tfjs-core/dist/backends/../../src/backends/array_util.ts", "node_modules/@tensorflow/tfjs-core/dist/backends/../../src/backends/non_max_suppression_impl.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_async.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_with_score.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_with_score_async.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_padded.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/non_max_suppression_padded_async.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/resize_bilinear.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/image/../../../src/ops/image/resize_nearest_neighbor.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/linalg/../../../src/ops/linalg/band_part.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/linalg/../../../src/ops/linalg/gram_schmidt.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/linalg/../../../src/ops/linalg/qr.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/loss_ops_utils.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/compute_weighted_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/absolute_difference.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/cosine_distance.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/hinge_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/huber_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/log_loss.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/mean_squared_error.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/sigmoid_cross_entropy.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/losses/../../../src/ops/losses/softmax_cross_entropy.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/ops.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/selu_util.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Abs_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Acos_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Acosh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Add_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/AddN_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ArgMax_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ArgMin_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Asin_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Asinh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Atan2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Atan_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Atanh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/avg_pool_3d_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/AvgPool3D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/avg_pool_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/AvgPool_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/BatchMatMul_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/BatchToSpaceND_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/BroadcastTo_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cast_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Ceil_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ClipByValue_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Concat_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Conv2D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Conv2DBackpropInput_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/conv3d_backprop_filter.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Conv3D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cos_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cosh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Cumsum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/DepthwiseConv2dNative_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Dilation2D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Div_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Elu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Erf_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Exp_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Expm1_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Floor_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/FloorDiv_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/FusedBatchNorm_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/GatherV2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/GreaterEqual_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Identity_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/IsFinite_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/IsInf_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/IsNan_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Log1p_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Log_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/LogSoftmax_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/local_response_normalization_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/LRN_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/min_max_grad_util.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Max_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Maximum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/max_pool_3d_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/MaxPool3D_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/ops/../../src/ops/max_pool_backprop.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/MaxPool_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Min_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Minimum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/MirrorPad_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Mod_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Multiply_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Negate_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/OneHot_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/OnesLike_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/PadV2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Pow_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Prelu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Reciprocal_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Relu6_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Relu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Reshape_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ResizeBilinear_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ResizeNearestNeighbor_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Reverse_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Round_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Rsqrt_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SelectV2_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Selu_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sigmoid_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sign_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sin_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sinh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Slice_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Softmax_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Softplus_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SpaceToBatchND_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SplitV_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sqrt_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Square_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/SquaredDifference_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Step_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sub_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Sum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Tan_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Tanh_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Tile_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Transpose_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/Unpack_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/UnsortedSegmentSum_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/gradients/../../src/gradients/ZerosLike_grad.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/register_all_gradients.ts", "node_modules/@tensorflow/tfjs-core/dist/../src/index.ts", "src/dom/imageToSquare.ts", "src/dom/NetInput.ts", "src/dom/toNetInput.ts", "src/dom/extractFaces.ts", "src/dom/extractFaceTensors.ts", "src/dom/fetchOrThrow.ts", "src/dom/fetchImage.ts", "src/dom/fetchJson.ts", "src/dom/fetchNetWeights.ts", "src/common/getModelUris.ts", "src/dom/loadWeightMap.ts", "src/dom/matchDimensions.ts", "src/NeuralNetwork.ts", "src/common/depthwiseSeparableConv.ts", "src/faceFeatureExtractor/denseBlock.ts", "src/common/convLayer.ts", "src/common/disposeUnusedWeightTensors.ts", "src/common/extractConvParamsFactory.ts", "src/common/extractFCParamsFactory.ts", "src/common/types.ts", "src/common/extractSeparableConvParamsFactory.ts", "src/common/extractWeightEntryFactory.ts", "src/common/extractWeightsFactory.ts", "src/faceFeatureExtractor/extractorsFactory.ts", "src/faceFeatureExtractor/extractParams.ts", "src/common/loadConvParamsFactory.ts", "src/faceFeatureExtractor/loadParamsFactory.ts", "src/faceFeatureExtractor/extractParamsFromWeigthMap.ts", "src/faceFeatureExtractor/FaceFeatureExtractor.ts", "src/common/fullyConnectedLayer.ts", "src/faceProcessor/extractParams.ts", "src/faceProcessor/extractParamsFromWeigthMap.ts", "src/faceProcessor/util.ts", "src/faceProcessor/FaceProcessor.ts", "src/faceExpressionNet/FaceExpressions.ts", "src/faceExpressionNet/FaceExpressionNet.ts", "src/factories/WithFaceExpressions.ts", "src/draw/drawFaceExpressions.ts", "src/factories/WithFaceLandmarks.ts", "src/draw/DrawFaceLandmarks.ts", "src/draw/index.ts", "src/xception/extractParams.ts", "src/xception/extractParamsFromWeigthMap.ts", "src/xception/TinyXception.ts", "src/ageGenderNet/extractParams.ts", "src/ageGenderNet/extractParamsFromWeigthMap.ts", "src/ageGenderNet/types.ts", "src/ageGenderNet/AgeGenderNet.ts", "src/faceLandmarkNet/FaceLandmark68NetBase.ts", "src/faceLandmarkNet/FaceLandmark68Net.ts", "src/faceFeatureExtractor/extractParamsFromWeigthMapTiny.ts", "src/faceFeatureExtractor/extractParamsTiny.ts", "src/faceFeatureExtractor/TinyFaceFeatureExtractor.ts", "src/faceLandmarkNet/FaceLandmark68TinyNet.ts", "src/faceLandmarkNet/index.ts", "src/faceRecognitionNet/scaleLayer.ts", "src/faceRecognitionNet/convLayer.ts", "src/faceRecognitionNet/extractParams.ts", "src/faceRecognitionNet/extractParamsFromWeigthMap.ts", "src/faceRecognitionNet/residualLayer.ts", "src/faceRecognitionNet/FaceRecognitionNet.ts", "src/faceRecognitionNet/index.ts", "src/factories/WithFaceDescriptor.ts", "src/factories/WithAge.ts", "src/factories/WithGender.ts", "src/ssdMobilenetv1/extractParams.ts", "src/ssdMobilenetv1/extractParamsFromWeigthMap.ts", "src/ssdMobilenetv1/pointwiseConvLayer.ts", "src/ssdMobilenetv1/mobileNetV1.ts", "src/ssdMobilenetv1/nonMaxSuppression.ts", "src/ssdMobilenetv1/outputLayer.ts", "src/ssdMobilenetv1/boxPredictionLayer.ts", "src/ssdMobilenetv1/predictionLayer.ts", "src/ssdMobilenetv1/SsdMobilenetv1Options.ts", "src/ssdMobilenetv1/SsdMobilenetv1.ts", "src/ssdMobilenetv1/index.ts", "src/tinyYolov2/const.ts", "src/tinyYolov2/config.ts", "src/tinyYolov2/leaky.ts", "src/tinyYolov2/convWithBatchNorm.ts", "src/tinyYolov2/depthwiseSeparableConv.ts", "src/tinyYolov2/extractParams.ts", "src/tinyYolov2/extractParamsFromWeigthMap.ts", "src/tinyYolov2/TinyYolov2Options.ts", "src/tinyYolov2/TinyYolov2Base.ts", "src/tinyYolov2/TinyYolov2.ts", "src/tinyYolov2/index.ts", "src/tinyFaceDetector/TinyFaceDetectorOptions.ts", "src/globalApi/ComposableTask.ts", "src/globalApi/extractFacesAndComputeResults.ts", "src/tinyFaceDetector/const.ts", "src/tinyFaceDetector/TinyFaceDetector.ts", "src/globalApi/nets.ts", "src/globalApi/PredictFaceExpressionsTask.ts", "src/globalApi/PredictAgeAndGenderTask.ts", "src/globalApi/ComputeFaceDescriptorsTasks.ts", "src/globalApi/DetectFaceLandmarksTasks.ts", "src/globalApi/DetectFacesTasks.ts", "src/globalApi/detectFaces.ts", "src/globalApi/allFaces.ts", "src/euclideanDistance.ts", "src/globalApi/FaceMatcher.ts", "src/tinyFaceDetector/index.ts", "src/resizeResults.ts", "src/index.ts"], - "sourcesContent": ["\"use strict\";\n\n// ref: https://github.com/tc39/proposal-global\nvar getGlobal = function () {\n\t// the only reliable means to get the global object is\n\t// `Function('return this')()`\n\t// However, this causes CSP violations in Chrome apps.\n\tif (typeof self !== 'undefined') { return self; }\n\tif (typeof window !== 'undefined') { return window; }\n\tif (typeof global !== 'undefined') { return global; }\n\tthrow new Error('unable to locate global object');\n}\n\nvar global = getGlobal();\n\nmodule.exports = exports = global.fetch;\n\n// Needed for TypeScript and Webpack.\nif (global.fetch) {\n\texports.default = global.fetch.bind(global);\n}\n\nexports.Headers = global.Headers;\nexports.Request = global.Request;\nexports.Response = global.Response;", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport const EPSILON_FLOAT32 = 1e-7;\nexport const EPSILON_FLOAT16 = 1e-4;\n/** Convenient class for storing tensor-related data. */\nexport class DataStorage {\n constructor(backend, dataMover) {\n this.backend = backend;\n this.dataMover = dataMover;\n this.data = new WeakMap();\n this.dataIdsCount = 0;\n }\n get(dataId) {\n if (!this.data.has(dataId)) {\n this.dataMover.moveData(this.backend, dataId);\n }\n return this.data.get(dataId);\n }\n set(dataId, value) {\n this.dataIdsCount++;\n this.data.set(dataId, value);\n }\n has(dataId) {\n return this.data.has(dataId);\n }\n delete(dataId) {\n this.dataIdsCount--;\n return this.data.delete(dataId);\n }\n numDataIds() {\n return this.dataIdsCount;\n }\n}\n/**\n * The interface that defines the kernels that should be implemented when\n * adding a new backend. New backends don't need to implement every one of the\n * methods, this can be done gradually (throw an error for unimplemented\n * methods).\n */\nexport class KernelBackend {\n time(f) {\n return notYetImplemented('time');\n }\n read(dataId) {\n return notYetImplemented('read');\n }\n readSync(dataId) {\n return notYetImplemented('readSync');\n }\n numDataIds() {\n return notYetImplemented('numDataIds');\n }\n disposeData(dataId) {\n return notYetImplemented('disposeData');\n }\n write(values, shape, dtype) {\n return notYetImplemented('write');\n }\n move(dataId, values, shape, dtype) {\n return notYetImplemented('move');\n }\n memory() {\n return notYetImplemented('memory');\n }\n /** Returns the highest precision for floats in bits (e.g. 16 or 32) */\n floatPrecision() {\n return notYetImplemented('floatPrecision');\n }\n /** Returns the smallest representable number. */\n epsilon() {\n return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;\n }\n batchMatMul(a, b, transposeA, transposeB) {\n return notYetImplemented('batchMatMul');\n }\n fusedBatchMatMul({ a, b, transposeA, transposeB, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedBatchMatMul');\n }\n slice(x, begin, size) {\n return notYetImplemented('slice');\n }\n stridedSlice(x, begin, end, strides) {\n return notYetImplemented('stridedSlice');\n }\n unstack(x, axis) {\n return notYetImplemented('unstack');\n }\n reverse(a, axis) {\n return notYetImplemented('reverse');\n }\n concat(tensors, axis) {\n return notYetImplemented('concat');\n }\n neg(a) {\n return notYetImplemented('neg');\n }\n add(a, b) {\n return notYetImplemented('add');\n }\n addN(tensors) {\n return notYetImplemented('addN');\n }\n subtract(a, b) {\n return notYetImplemented('subtract');\n }\n multiply(a, b) {\n return notYetImplemented('multiply');\n }\n realDivide(a, b) {\n return notYetImplemented('realDivide');\n }\n floorDiv(a, b) {\n return notYetImplemented('floorDiv');\n }\n sum(x, axes) {\n return notYetImplemented('sum');\n }\n prod(x, axes) {\n return notYetImplemented('prod');\n }\n unsortedSegmentSum(x, segmentIds, numSegments) {\n return notYetImplemented('unsortedSegmentSum');\n }\n argMin(x, axis) {\n return notYetImplemented('argMin');\n }\n argMax(x, axis) {\n return notYetImplemented('argMax');\n }\n equal(a, b) {\n return notYetImplemented('equal');\n }\n notEqual(a, b) {\n return notYetImplemented('notEqual');\n }\n less(a, b) {\n return notYetImplemented('less');\n }\n lessEqual(a, b) {\n return notYetImplemented('lessEqual');\n }\n greater(a, b) {\n return notYetImplemented('greater');\n }\n greaterEqual(a, b) {\n return notYetImplemented('greaterEqual');\n }\n logicalNot(a) {\n return notYetImplemented('logicalNot');\n }\n logicalAnd(a, b) {\n return notYetImplemented('logicalAnd');\n }\n logicalOr(a, b) {\n return notYetImplemented('logicalOr');\n }\n where(condition) {\n return notYetImplemented('where');\n }\n select(condition, a, b) {\n return notYetImplemented('select');\n }\n topk(x, k, sorted) {\n return notYetImplemented('topk');\n }\n min(x, axes) {\n return notYetImplemented('min');\n }\n minimum(a, b) {\n return notYetImplemented('minimum');\n }\n mod(a, b) {\n return notYetImplemented('mod');\n }\n max(x, axes) {\n return notYetImplemented('max');\n }\n maximum(a, b) {\n return notYetImplemented('maximum');\n }\n all(x, axes) {\n return notYetImplemented('all');\n }\n any(x, axes) {\n return notYetImplemented('any');\n }\n squaredDifference(a, b) {\n return notYetImplemented('squaredDifference');\n }\n ceil(x) {\n return notYetImplemented('ceil');\n }\n floor(x) {\n return notYetImplemented('floor');\n }\n round(x) {\n return notYetImplemented('round');\n }\n sign(x) {\n return notYetImplemented('sign');\n }\n isNaN(x) {\n return notYetImplemented('isNaN');\n }\n isInf(x) {\n return notYetImplemented('isInf');\n }\n isFinite(x) {\n return notYetImplemented('isFinite');\n }\n pow(a, b) {\n return notYetImplemented('pow');\n }\n exp(x) {\n return notYetImplemented('exp');\n }\n expm1(x) {\n return notYetImplemented('expm1');\n }\n softmax(x, dim) {\n return notYetImplemented('softmax');\n }\n log(x) {\n return notYetImplemented('log');\n }\n log1p(x) {\n return notYetImplemented('log1p');\n }\n sqrt(x) {\n return notYetImplemented('sqrt');\n }\n rsqrt(x) {\n return notYetImplemented('rsqrt');\n }\n square(x) {\n return notYetImplemented('square');\n }\n reciprocal(x) {\n return notYetImplemented('reciprocal');\n }\n relu(x) {\n return notYetImplemented('relu');\n }\n relu6(x) {\n return notYetImplemented('relu6');\n }\n prelu(x, a) {\n return notYetImplemented('prelu');\n }\n elu(x) {\n return notYetImplemented('elu');\n }\n eluDer(dy, y) {\n return notYetImplemented('eluDer');\n }\n selu(x) {\n return notYetImplemented('selu');\n }\n int(x) {\n return notYetImplemented('int');\n }\n clip(x, min, max) {\n return notYetImplemented('clip');\n }\n abs(x) {\n return notYetImplemented('abs');\n }\n complexAbs(x) {\n return notYetImplemented('complexAbs');\n }\n sigmoid(x) {\n return notYetImplemented('sigmoid');\n }\n softplus(x) {\n return notYetImplemented('softplus');\n }\n sin(x) {\n return notYetImplemented('sin');\n }\n cos(x) {\n return notYetImplemented('cos');\n }\n tan(x) {\n return notYetImplemented('tan');\n }\n asin(x) {\n return notYetImplemented('asin');\n }\n acos(x) {\n return notYetImplemented('acos');\n }\n atan(x) {\n return notYetImplemented('atan');\n }\n atan2(a, b) {\n return notYetImplemented('atan2');\n }\n sinh(x) {\n return notYetImplemented('sinh');\n }\n cosh(x) {\n return notYetImplemented('cosh');\n }\n tanh(x) {\n return notYetImplemented('tanh');\n }\n asinh(x) {\n return notYetImplemented('asinh');\n }\n acosh(x) {\n return notYetImplemented('acosh');\n }\n atanh(x) {\n return notYetImplemented('atanh');\n }\n erf(x) {\n return notYetImplemented('erf');\n }\n step(x, alpha) {\n return notYetImplemented('step');\n }\n fusedConv2d({ input, filter, convInfo, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedConv2d');\n }\n conv2d(x, filter, convInfo) {\n return notYetImplemented('conv2d');\n }\n conv2dDerInput(dy, filter, convInfo) {\n return notYetImplemented('conv2dDerInput');\n }\n conv2dDerFilter(x, dY, convInfo) {\n return notYetImplemented('conv2dDerFilter');\n }\n fusedDepthwiseConv2D({ input, filter, convInfo, bias, activation, preluActivationWeights }) {\n return notYetImplemented('fusedDepthwiseConv2D');\n }\n depthwiseConv2D(input, filter, convInfo) {\n return notYetImplemented('depthwiseConv2D');\n }\n depthwiseConv2DDerInput(dy, filter, convInfo) {\n return notYetImplemented('depthwiseConv2DDerInput');\n }\n depthwiseConv2DDerFilter(x, dY, convInfo) {\n return notYetImplemented('depthwiseConv2DDerFilter');\n }\n conv3d(x, filter, convInfo) {\n return notYetImplemented('conv3d');\n }\n conv3dDerInput(dy, filter, convInfo) {\n return notYetImplemented('conv3dDerInput');\n }\n conv3dDerFilter(x, dY, convInfo) {\n return notYetImplemented('conv3dDerFilter');\n }\n maxPool(x, convInfo) {\n return notYetImplemented('maxPool');\n }\n maxPoolBackprop(dy, x, y, convInfo) {\n return notYetImplemented('maxPoolBackprop');\n }\n avgPool(x, convInfo) {\n return notYetImplemented('avgPool');\n }\n avgPoolBackprop(dy, x, convInfo) {\n return notYetImplemented('avgPoolBackprop');\n }\n avgPool3d(x, convInfo) {\n return notYetImplemented('avgPool3d');\n }\n avgPool3dBackprop(dy, x, convInfo) {\n return notYetImplemented('avgPool3dBackprop');\n }\n maxPool3d(x, convInfo) {\n return notYetImplemented('maxPool3d');\n }\n maxPool3dBackprop(dy, x, y, convInfo) {\n return notYetImplemented('maxPool3dBackprop');\n }\n reshape(x, shape) {\n return notYetImplemented('reshape');\n }\n cast(x, dtype) {\n return notYetImplemented('cast');\n }\n tile(x, reps) {\n return notYetImplemented('tile');\n }\n pad(x, paddings, constantValue) {\n return notYetImplemented('pad');\n }\n transpose(x, perm) {\n return notYetImplemented('transpose');\n }\n gather(x, indices, axis) {\n return notYetImplemented('gather');\n }\n gatherND(x, indices) {\n return notYetImplemented('gatherND');\n }\n scatterND(indices, updates, shape) {\n return notYetImplemented('scatterND');\n }\n batchToSpaceND(x, blockShape, crops) {\n return notYetImplemented('batchToSpaceND');\n }\n spaceToBatchND(x, blockShape, paddings) {\n return notYetImplemented('spaceToBatchND');\n }\n resizeBilinear(x, newHeight, newWidth, alignCorners) {\n return notYetImplemented('resizeBilinear');\n }\n resizeBilinearBackprop(dy, x, alignCorners) {\n return notYetImplemented('resizeBilinearBackprop');\n }\n resizeNearestNeighbor(x, newHEight, newWidth, alignCorners) {\n return notYetImplemented('resizeNearestNeighbor');\n }\n resizeNearestNeighborBackprop(dy, x, alignCorners) {\n return notYetImplemented('resizeNearestNeighborBackprop');\n }\n batchNorm(x, mean, variance, offset, scale, varianceEpsilon) {\n return notYetImplemented('batchNorm');\n }\n localResponseNormalization4D(x, radius, bias, alpha, beta) {\n return notYetImplemented('localResponseNormalization4D');\n }\n LRNGrad(dy, inputImage, outputImage, radius, bias, alpha, beta) {\n return notYetImplemented('LRNGrad');\n }\n multinomial(logits, normalized, numSamples, seed) {\n return notYetImplemented('multinomial');\n }\n oneHot(indices, depth, onValue, offValue) {\n return notYetImplemented('oneHot');\n }\n cumsum(x, axis, exclusive, reverse) {\n return notYetImplemented('cumsum');\n }\n nonMaxSuppression(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {\n return notYetImplemented('nonMaxSuppression');\n }\n fft(x) {\n return notYetImplemented('fft');\n }\n ifft(x) {\n return notYetImplemented('ifft');\n }\n complex(real, imag) {\n return notYetImplemented('complex');\n }\n real(input) {\n return notYetImplemented('real');\n }\n imag(input) {\n return notYetImplemented('imag');\n }\n cropAndResize(image, boxes, boxIndex, cropSize, method, extrapolationValue) {\n return notYetImplemented('cropAndResize');\n }\n depthToSpace(x, blockSize, dataFormat) {\n return notYetImplemented('depthToSpace');\n }\n // Aligns with the \"SplitV\" kernel in TensorFlow.\n split(value, sizeSplits, axis) {\n return notYetImplemented('split');\n }\n sparseToDense(sparseIndices, sparseValues, outputShape, defaultValue) {\n return notYetImplemented('sparseToDense');\n }\n diag(x) {\n return notYetImplemented('diag');\n }\n fill(shape, value, dtype) {\n return notYetImplemented('fill');\n }\n onesLike(x) {\n return notYetImplemented('onesLike');\n }\n zerosLike(x) {\n return notYetImplemented('zerosLike');\n }\n linspace(start, stop, num) {\n return notYetImplemented('linspace');\n }\n dispose() {\n return notYetImplemented('dispose');\n }\n}\nfunction notYetImplemented(kernelName) {\n throw new Error(`'${kernelName}' not yet implemented or not found in the registry. ` +\n `This kernel may not be supported by the tfjs backend you have chosen`);\n}\n//# sourceMappingURL=backend.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Shuffles the array in-place using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1, 2, 3, 4, 5];\n * tf.util.shuffle(a);\n * console.log(a);\n * ```\n *\n * @param array The array to shuffle in-place.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\n// tslint:disable-next-line:no-any\nexport function shuffle(array) {\n let counter = array.length;\n let temp = 0;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element with it\n temp = array[counter];\n array[counter] = array[index];\n array[index] = temp;\n }\n}\n/** Clamps a value to a specified range. */\nexport function clamp(min, x, max) {\n return Math.max(min, Math.min(x, max));\n}\nexport function nearestLargerEven(val) {\n return val % 2 === 0 ? val : val + 1;\n}\nexport function sum(arr) {\n let sum = 0;\n for (let i = 0; i < arr.length; i++) {\n sum += arr[i];\n }\n return sum;\n}\n/**\n * Returns a sample from a uniform [a, b) distribution.\n *\n * @param a The minimum support (inclusive).\n * @param b The maximum support (exclusive).\n * @return A pseudorandom number on the half-open interval [a,b).\n */\nexport function randUniform(a, b) {\n const r = Math.random();\n return (b * r) + (1 - r) * a;\n}\n/** Returns the squared Euclidean distance between two vectors. */\nexport function distSquared(a, b) {\n let result = 0;\n for (let i = 0; i < a.length; i++) {\n const diff = Number(a[i]) - Number(b[i]);\n result += diff * diff;\n }\n return result;\n}\n/**\n * Asserts that the expression is true. Otherwise throws an error with the\n * provided message.\n *\n * ```js\n * const x = 2;\n * tf.util.assert(x === 2, 'x is not 2');\n * ```\n *\n * @param expr The expression to assert (as a boolean).\n * @param msg A function that returns the message to report when throwing an\n * error. We use a function for performance reasons.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function assert(expr, msg) {\n if (!expr) {\n throw new Error(typeof msg === 'string' ? msg : msg());\n }\n}\nexport function assertShapesMatch(shapeA, shapeB, errorMessagePrefix = '') {\n assert(arraysEqual(shapeA, shapeB), () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);\n}\nexport function assertNonNull(a) {\n assert(a != null, () => `The input to the tensor constructor must be a non-null value.`);\n}\n// NOTE: We explicitly type out what T extends instead of any so that\n// util.flatten on a nested array of number doesn't try to infer T as a\n// number[][], causing us to explicitly type util.flatten().\n/**\n * Flattens an arbitrarily nested array.\n *\n * ```js\n * const a = [[1, 2], [3, 4], [5, [6, [7]]]];\n * const flat = tf.util.flatten(a);\n * console.log(flat);\n * ```\n *\n * @param arr The nested array to flatten.\n * @param result The destination array which holds the elements.\n * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults\n * to false.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function flatten(arr, result = [], skipTypedArray = false) {\n if (result == null) {\n result = [];\n }\n if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) {\n for (let i = 0; i < arr.length; ++i) {\n flatten(arr[i], result, skipTypedArray);\n }\n }\n else {\n result.push(arr);\n }\n return result;\n}\n/**\n * Returns the size (number of elements) of the tensor given its shape.\n *\n * ```js\n * const shape = [3, 4, 2];\n * const size = tf.util.sizeFromShape(shape);\n * console.log(size);\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function sizeFromShape(shape) {\n if (shape.length === 0) {\n // Scalar.\n return 1;\n }\n let size = shape[0];\n for (let i = 1; i < shape.length; i++) {\n size *= shape[i];\n }\n return size;\n}\nexport function isScalarShape(shape) {\n return shape.length === 0;\n}\nexport function arraysEqual(n1, n2) {\n if (n1 === n2) {\n return true;\n }\n if (n1 == null || n2 == null) {\n return false;\n }\n if (n1.length !== n2.length) {\n return false;\n }\n for (let i = 0; i < n1.length; i++) {\n if (n1[i] !== n2[i]) {\n return false;\n }\n }\n return true;\n}\nexport function isInt(a) {\n return a % 1 === 0;\n}\nexport function tanh(x) {\n // tslint:disable-next-line:no-any\n if (Math.tanh != null) {\n // tslint:disable-next-line:no-any\n return Math.tanh(x);\n }\n if (x === Infinity) {\n return 1;\n }\n else if (x === -Infinity) {\n return -1;\n }\n else {\n const e2x = Math.exp(2 * x);\n return (e2x - 1) / (e2x + 1);\n }\n}\nexport function sizeToSquarishShape(size) {\n const width = Math.ceil(Math.sqrt(size));\n return [width, Math.ceil(size / width)];\n}\n/**\n * Creates a new array with randomized indicies to a given quantity.\n *\n * ```js\n * const randomTen = tf.util.createShuffledIndices(10);\n * console.log(randomTen);\n * ```\n *\n * @param number Quantity of how many shuffled indicies to create.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function createShuffledIndices(n) {\n const shuffledIndices = new Uint32Array(n);\n for (let i = 0; i < n; ++i) {\n shuffledIndices[i] = i;\n }\n shuffle(shuffledIndices);\n return shuffledIndices;\n}\nexport function rightPad(a, size) {\n if (size <= a.length) {\n return a;\n }\n return a + ' '.repeat(size - a.length);\n}\nexport function repeatedTry(checkFn, delayFn = (counter) => 0, maxCounter) {\n return new Promise((resolve, reject) => {\n let tryCount = 0;\n const tryFn = () => {\n if (checkFn()) {\n resolve();\n return;\n }\n tryCount++;\n const nextBackoff = delayFn(tryCount);\n if (maxCounter != null && tryCount >= maxCounter) {\n reject();\n return;\n }\n setTimeout(tryFn, nextBackoff);\n };\n tryFn();\n });\n}\n/**\n * Given the full size of the array and a shape that may contain -1 as the\n * implicit dimension, returns the inferred shape where -1 is replaced.\n * E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].\n *\n * @param shape The shape, which may contain -1 in some dimension.\n * @param size The full size (number of elements) of the array.\n * @return The inferred shape where -1 is replaced with the inferred size.\n */\nexport function inferFromImplicitShape(shape, size) {\n let shapeProd = 1;\n let implicitIdx = -1;\n for (let i = 0; i < shape.length; ++i) {\n if (shape[i] >= 0) {\n shapeProd *= shape[i];\n }\n else if (shape[i] === -1) {\n if (implicitIdx !== -1) {\n throw Error(`Shapes can only have 1 implicit size. ` +\n `Found -1 at dim ${implicitIdx} and dim ${i}`);\n }\n implicitIdx = i;\n }\n else if (shape[i] < 0) {\n throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`);\n }\n }\n if (implicitIdx === -1) {\n if (size > 0 && size !== shapeProd) {\n throw Error(`Size(${size}) must match the product of shape ${shape}`);\n }\n return shape;\n }\n if (shapeProd === 0) {\n throw Error(`Cannot infer the missing size in [${shape}] when ` +\n `there are 0 elements`);\n }\n if (size % shapeProd !== 0) {\n throw Error(`The implicit shape can't be a fractional number. ` +\n `Got ${size} / ${shapeProd}`);\n }\n const newShape = shape.slice();\n newShape[implicitIdx] = size / shapeProd;\n return newShape;\n}\nexport function parseAxisParam(axis, shape) {\n const rank = shape.length;\n // Normalize input\n axis = axis == null ? shape.map((s, i) => i) : [].concat(axis);\n // Check for valid range\n assert(axis.every(ax => ax >= -rank && ax < rank), () => `All values in axis param must be in range [-${rank}, ${rank}) but ` +\n `got axis ${axis}`);\n // Check for only integers\n assert(axis.every(ax => isInt(ax)), () => `All values in axis param must be integers but ` +\n `got axis ${axis}`);\n // Handle negative axis.\n return axis.map(a => a < 0 ? rank + a : a);\n}\n/** Reduces the shape by removing all dimensions of shape 1. */\nexport function squeezeShape(shape, axis) {\n const newShape = [];\n const keptDims = [];\n const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;\n const axes = (axis == null || isEmptyArray) ?\n null :\n parseAxisParam(axis, shape).sort();\n let j = 0;\n for (let i = 0; i < shape.length; ++i) {\n if (axes != null) {\n if (axes[j] === i && shape[i] !== 1) {\n throw new Error(`Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`);\n }\n if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n if (axes[j] <= i) {\n j++;\n }\n }\n if (shape[i] !== 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n }\n return { newShape, keptDims };\n}\nexport function getTypedArrayFromDType(dtype, size) {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(size);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values;\n}\nexport function getArrayFromDType(dtype, size) {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(size);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(size);\n }\n else if (dtype === 'string') {\n values = new Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values;\n}\nexport function checkConversionForErrors(vals, dtype) {\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i];\n if (isNaN(num) || !isFinite(num)) {\n throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`);\n }\n }\n}\n/** Returns true if the dtype is valid. */\nexport function isValidDtype(dtype) {\n return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||\n dtype === 'int32' || dtype === 'string';\n}\n/**\n * Returns true if the new type can't encode the old type without loss of\n * precision.\n */\nexport function hasEncodingLoss(oldType, newType) {\n if (newType === 'complex64') {\n return false;\n }\n if (newType === 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'bool' && oldType === 'bool') {\n return false;\n }\n return true;\n}\nexport function isTypedArray(a) {\n return a instanceof Float32Array || a instanceof Int32Array ||\n a instanceof Uint8Array;\n}\nexport function bytesPerElement(dtype) {\n if (dtype === 'float32' || dtype === 'int32') {\n return 4;\n }\n else if (dtype === 'complex64') {\n return 8;\n }\n else if (dtype === 'bool') {\n return 1;\n }\n else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n/**\n * Returns the approximate number of bytes allocated in the string array - 2\n * bytes per character. Computing the exact bytes for a native string in JS is\n * not possible since it depends on the encoding of the html page that serves\n * the website.\n */\nexport function bytesFromStringArray(arr) {\n if (arr == null) {\n return 0;\n }\n let bytes = 0;\n arr.forEach(x => bytes += x.length);\n return bytes;\n}\n/** Returns true if the value is a string. */\nexport function isString(value) {\n return typeof value === 'string' || value instanceof String;\n}\nexport function isBoolean(value) {\n return typeof value === 'boolean';\n}\nexport function isNumber(value) {\n return typeof value === 'number';\n}\nexport function inferDtype(values) {\n if (Array.isArray(values)) {\n return inferDtype(values[0]);\n }\n if (values instanceof Float32Array) {\n return 'float32';\n }\n else if (values instanceof Int32Array || values instanceof Uint8Array) {\n return 'int32';\n }\n else if (isNumber(values)) {\n return 'float32';\n }\n else if (isString(values)) {\n return 'string';\n }\n else if (isBoolean(values)) {\n return 'bool';\n }\n return 'float32';\n}\nexport function isFunction(f) {\n return !!(f && f.constructor && f.call && f.apply);\n}\nexport function nearestDivisor(size, start) {\n for (let i = start; i < size; ++i) {\n if (size % i === 0) {\n return i;\n }\n }\n return size;\n}\nexport function computeStrides(shape) {\n const rank = shape.length;\n if (rank < 2) {\n return [];\n }\n // Last dimension has implicit stride of 1, thus having D-1 (instead of D)\n // strides.\n const strides = new Array(rank - 1);\n strides[rank - 2] = shape[rank - 1];\n for (let i = rank - 3; i >= 0; --i) {\n strides[i] = strides[i + 1] * shape[i + 1];\n }\n return strides;\n}\nfunction createNestedArray(offset, shape, a) {\n const ret = new Array();\n if (shape.length === 1) {\n const d = shape[0];\n for (let i = 0; i < d; i++) {\n ret[i] = a[offset + i];\n }\n }\n else {\n const d = shape[0];\n const rest = shape.slice(1);\n const len = rest.reduce((acc, c) => acc * c);\n for (let i = 0; i < d; i++) {\n ret[i] = createNestedArray(offset + i * len, rest, a);\n }\n }\n return ret;\n}\n// Provide a nested array of TypedArray in given shape.\nexport function toNestedArray(shape, a) {\n if (shape.length === 0) {\n // Scalar type should return a single number.\n return a[0];\n }\n const size = shape.reduce((acc, c) => acc * c);\n if (size === 0) {\n // A tensor with shape zero should be turned into empty list.\n return [];\n }\n if (size !== a.length) {\n throw new Error(`[${shape}] does not match the input size ${a.length}.`);\n }\n return createNestedArray(0, shape, a);\n}\nexport function makeOnesTypedArray(size, dtype) {\n const array = makeZerosTypedArray(size, dtype);\n for (let i = 0; i < array.length; i++) {\n array[i] = 1;\n }\n return array;\n}\nexport function makeZerosTypedArray(size, dtype) {\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(size);\n }\n else if (dtype === 'int32') {\n return new Int32Array(size);\n }\n else if (dtype === 'bool') {\n return new Uint8Array(size);\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n/**\n * Make nested `TypedArray` filled with zeros.\n * @param shape The shape information for the nested array.\n * @param dtype dtype of the array element.\n */\nexport function makeZerosNestedTypedArray(shape, dtype) {\n const size = shape.reduce((prev, curr) => prev * curr, 1);\n if (dtype == null || dtype === 'float32') {\n return toNestedArray(shape, new Float32Array(size));\n }\n else if (dtype === 'int32') {\n return toNestedArray(shape, new Int32Array(size));\n }\n else if (dtype === 'bool') {\n return toNestedArray(shape, new Uint8Array(size));\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\nexport function assertNonNegativeIntegerDimensions(shape) {\n shape.forEach(dimSize => {\n assert(Number.isInteger(dimSize) && dimSize >= 0, () => `Tensor must have a shape comprised of positive integers but got ` +\n `shape [${shape}].`);\n });\n}\n/**\n * Computes flat index for a given location (multidimentionsal index) in a\n * Tensor/multidimensional array.\n *\n * @param locs Location in the tensor.\n * @param rank Rank of the tensor.\n * @param strides Tensor strides.\n */\nexport function locToIndex(locs, rank, strides) {\n if (rank === 0) {\n return 0;\n }\n else if (rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += strides[i] * locs[i];\n }\n return index;\n}\n/**\n * Computes the location (multidimensional index) in a tensor/multidimentional\n * array for a given flat index.\n *\n * @param index Index in flat array.\n * @param rank Rank of tensor.\n * @param strides Strides of tensor.\n */\nexport function indexToLoc(index, rank, strides) {\n if (rank === 0) {\n return [];\n }\n else if (rank === 1) {\n return [index];\n }\n const locs = new Array(rank);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / strides[i]);\n index -= locs[i] * strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n}\n/**\n * This method asserts whether an object is a Promise instance.\n * @param object\n */\n// tslint:disable-next-line: no-any\nexport function isPromise(object) {\n // We chose to not use 'obj instanceOf Promise' for two reasons:\n // 1. It only reliably works for es6 Promise, not other Promise\n // implementations.\n // 2. It doesn't work with framework that uses zone.js. zone.js monkey patch\n // the async calls, so it is possible the obj (patched) is comparing to a\n // pre-patched Promise.\n return object && object.then && typeof object.then === 'function';\n}\n//# sourceMappingURL=util_base.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { isPromise } from './util_base';\n// Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.\nconst TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';\n/**\n * The environment contains evaluated flags as well as the registered platform.\n * This is always used as a global singleton and can be retrieved with\n * `tf.env()`.\n *\n * @doc {heading: 'Environment'}\n */\nexport class Environment {\n // tslint:disable-next-line: no-any\n constructor(global) {\n this.global = global;\n this.flags = {};\n this.flagRegistry = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n setPlatform(platformName, platform) {\n if (this.platform != null) {\n console.warn(`Platform ${this.platformName} has already been set. ` +\n `Overwriting the platform with ${platform}.`);\n }\n this.platformName = platformName;\n this.platform = platform;\n }\n registerFlag(flagName, evaluationFn, setHook) {\n this.flagRegistry[flagName] = { evaluationFn, setHook };\n // Override the flag value from the URL. This has to happen here because the\n // environment is initialized before flags get registered.\n if (this.urlFlags[flagName] != null) {\n const flagValue = this.urlFlags[flagName];\n console.warn(`Setting feature override from URL ${flagName}: ${flagValue}.`);\n this.set(flagName, flagValue);\n }\n }\n async getAsync(flagName) {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n this.flags[flagName] = await this.evaluateFlag(flagName);\n return this.flags[flagName];\n }\n get(flagName) {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n const flagValue = this.evaluateFlag(flagName);\n if (isPromise(flagValue)) {\n throw new Error(`Flag ${flagName} cannot be synchronously evaluated. ` +\n `Please use getAsync() instead.`);\n }\n this.flags[flagName] = flagValue;\n return this.flags[flagName];\n }\n getNumber(flagName) {\n return this.get(flagName);\n }\n getBool(flagName) {\n return this.get(flagName);\n }\n getFlags() {\n return this.flags;\n }\n // For backwards compatibility.\n get features() {\n return this.flags;\n }\n set(flagName, value) {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(`Cannot set flag ${flagName} as it has not been registered.`);\n }\n this.flags[flagName] = value;\n if (this.flagRegistry[flagName].setHook != null) {\n this.flagRegistry[flagName].setHook(value);\n }\n }\n evaluateFlag(flagName) {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(`Cannot evaluate flag '${flagName}': no evaluation function found.`);\n }\n return this.flagRegistry[flagName].evaluationFn();\n }\n setFlags(flags) {\n this.flags = Object.assign({}, flags);\n }\n reset() {\n this.flags = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n populateURLFlags() {\n if (typeof this.global === 'undefined' ||\n typeof this.global.location === 'undefined' ||\n typeof this.global.location.search === 'undefined') {\n return;\n }\n const urlParams = getQueryParams(this.global.location.search);\n if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {\n const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');\n keyValues.forEach(keyValue => {\n const [key, value] = keyValue.split(':');\n this.urlFlags[key] = parseValue(key, value);\n });\n }\n }\n}\nexport function getQueryParams(queryString) {\n const params = {};\n queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => {\n decodeParam(params, t[0], t[1]);\n return t.join('=');\n });\n return params;\n}\nfunction decodeParam(params, name, value) {\n params[decodeURIComponent(name)] = decodeURIComponent(value || '');\n}\nfunction parseValue(flagName, value) {\n value = value.toLowerCase();\n if (value === 'true' || value === 'false') {\n return value === 'true';\n }\n else if (`${+value}` === value) {\n return +value;\n }\n throw new Error(`Could not parse value flag value ${value} for flag ${flagName}.`);\n}\n/**\n * Returns the current environment (a global singleton).\n *\n * The environment object contains the evaluated feature values as well as the\n * active platform.\n *\n * @doc {heading: 'Environment'}\n */\nexport function env() {\n return ENV;\n}\nexport let ENV = null;\nexport function setEnvironmentGlobal(environment) {\n ENV = environment;\n}\n//# sourceMappingURL=environment.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Note that the identifier globalNameSpace is scoped to this module, but will\n// always resolve to the same global object regardless of how the module is\n// resolved.\n// tslint:disable-next-line:no-any\nlet globalNameSpace;\n// tslint:disable-next-line:no-any\nexport function getGlobalNamespace() {\n if (globalNameSpace == null) {\n // tslint:disable-next-line:no-any\n let ns;\n if (typeof (window) !== 'undefined') {\n ns = window;\n }\n else if (typeof (global) !== 'undefined') {\n ns = global;\n }\n else if (typeof (process) !== 'undefined') {\n ns = process;\n }\n else if (typeof (self) !== 'undefined') {\n ns = self;\n }\n else {\n throw new Error('Could not find a global object');\n }\n globalNameSpace = ns;\n }\n return globalNameSpace;\n}\n// tslint:disable-next-line:no-any\nfunction getGlobalMap() {\n const ns = getGlobalNamespace();\n if (ns._tfGlobals == null) {\n ns._tfGlobals = new Map();\n }\n return ns._tfGlobals;\n}\n/**\n * Returns a globally accessible 'singleton' object.\n *\n * @param key the name of the object\n * @param init a function to initialize to initialize this object\n * the first time it is fetched.\n */\nexport function getGlobal(key, init) {\n const globalMap = getGlobalMap();\n if (globalMap.has(key)) {\n return globalMap.get(key);\n }\n else {\n const singleton = init();\n globalMap.set(key, singleton);\n return globalMap.get(key);\n }\n}\n//# sourceMappingURL=global_util.js.map", "export const Abs = 'Abs';\nexport const Acos = 'Acos';\nexport const Acosh = 'Acosh';\nexport const Add = 'Add';\nexport const AddN = 'AddN';\nexport const All = 'All';\nexport const Any = 'Any';\nexport const ArgMax = 'ArgMax';\nexport const ArgMin = 'ArgMin';\nexport const Asin = 'Asin';\nexport const Asinh = 'Asinh';\nexport const Atan = 'Atan';\nexport const Atanh = 'Atanh';\nexport const Atan2 = 'Atan2';\nexport const AvgPool = 'AvgPool';\nexport const AvgPoolBackprop = 'AvgPoolBackprop';\nexport const AvgPool3D = 'AvgPool3D';\nexport const AvgPool3DBackprop = 'AvgPool3DBackprop';\nexport const BatchMatMul = 'BatchMatMul';\nexport const BatchToSpaceND = 'BatchToSpaceND';\nexport const BroadcastTo = 'BroadcastTo';\nexport const Cast = 'Cast';\nexport const Ceil = 'Ceil';\nexport const ClipByValue = 'ClipByValue';\nexport const Complex = 'Complex';\nexport const Concat = 'Concat';\nexport const Conv2D = 'Conv2D';\nexport const Conv2DBackpropFilter = 'Conv2DBackpropFilter';\nexport const Conv2DBackpropInput = 'Conv2DBackpropInput';\nexport const Conv3D = 'Conv3D';\nexport const Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2';\nexport const Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';\nexport const Cos = 'Cos';\nexport const Cosh = 'Cosh';\nexport const Cumsum = 'Cumsum';\nexport const CropAndResize = 'CropAndResize';\nexport const DepthToSpace = 'DepthToSpace';\nexport const DepthwiseConv2dNative = 'DepthwiseConv2dNative';\nexport const DepthwiseConv2dNativeBackpropFilter = 'DepthwiseConv2dNativeBackpropFilter';\nexport const DepthwiseConv2dNativeBackpropInput = 'DepthwiseConv2dNativeBackpropInput';\nexport const Diag = 'Diag';\nexport const Dilation2D = 'Dilation2D';\nexport const Dilation2DBackpropInput = 'Dilation2DBackpropInput';\nexport const Dilation2DBackpropFilter = 'Dilation2DBackpropFilter';\nexport const Div = 'Div';\nexport const Elu = 'Elu';\nexport const EluGrad = 'EluGrad';\nexport const Erf = 'Erf';\nexport const Equal = 'Equal';\nexport const Exp = 'Exp';\nexport const Expm1 = 'Expm1';\nexport const FFT = 'FFT';\nexport const Fill = 'Fill';\nexport const FlipLeftRight = 'FlipLeftRight';\nexport const Floor = 'Floor';\nexport const FloorDiv = 'FloorDiv';\nexport const FusedBatchNorm = 'FusedBatchNorm';\nexport const GatherV2 = 'GatherV2';\nexport const GatherNd = 'GatherNd';\nexport const Greater = 'Greater';\nexport const GreaterEqual = 'GreaterEqual';\nexport const Identity = 'Identity';\nexport const IFFT = 'IFFT';\nexport const Imag = 'Imag';\nexport const IsFinite = 'IsFinite';\nexport const IsInf = 'IsInf';\nexport const IsNan = 'IsNan';\nexport const Less = 'Less';\nexport const LessEqual = 'LessEqual';\nexport const LinSpace = 'LinSpace';\nexport const Log = 'Log';\nexport const Log1p = 'Log1p';\nexport const LogicalAnd = 'LogicalAnd';\nexport const LogicalNot = 'LogicalNot';\nexport const LogicalOr = 'LogicalOr';\nexport const LogSoftmax = 'LogSoftmax';\nexport const LRN = 'LRN';\nexport const LRNBackprop = 'LRNBackprop';\nexport const Max = 'Max';\nexport const Maximum = 'Maximum';\nexport const MaxPool = 'MaxPool';\nexport const MaxPoolBackprop = 'MaxPoolBackprop';\nexport const MaxPool3D = 'MaxPool3D';\nexport const MaxPool3DBackprop = 'MaxPool3DBackprop';\nexport const MaxPoolWithArgmax = 'MaxPoolWithArgmax';\nexport const Mean = 'Mean';\nexport const Min = 'Min';\nexport const Minimum = 'Minimum';\nexport const MirrorPad = 'MirrorPad';\nexport const Mod = 'Mod';\nexport const Multiply = 'Multiply';\nexport const Negate = 'Negate';\nexport const NotEqual = 'NotEqual';\nexport const NonMaxSuppressionV3 = 'NonMaxSuppressionV3';\nexport const NonMaxSuppressionV4 = 'NonMaxSuppressionV4';\nexport const NonMaxSuppressionV5 = 'NonMaxSuppressionV5';\nexport const OnesLike = 'OnesLike';\nexport const OneHot = 'OneHot';\nexport const PadV2 = 'PadV2';\nexport const Pool = 'Pool';\nexport const Pow = 'Pow';\nexport const Prelu = 'Prelu';\nexport const Prod = 'Prod';\nexport const Range = 'Range';\nexport const Real = 'Real';\nexport const Reciprocal = 'Reciprocal';\nexport const Relu = 'Relu';\nexport const Reshape = 'Reshape';\nexport const ResizeNearestNeighbor = 'ResizeNearestNeighbor';\nexport const ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad';\nexport const ResizeBilinear = 'ResizeBilinear';\nexport const ResizeBilinearGrad = 'ResizeBilinearGrad';\nexport const Relu6 = 'Relu6';\nexport const Reverse = 'Reverse';\nexport const Round = 'Round';\nexport const Rsqrt = 'Rsqrt';\nexport const ScatterNd = 'ScatterNd';\nexport const SelectV2 = 'SelectV2';\nexport const Selu = 'Selu';\nexport const Slice = 'Slice';\nexport const Sin = 'Sin';\nexport const Sinh = 'Sinh';\nexport const Sign = 'Sign';\nexport const Sigmoid = 'Sigmoid';\nexport const Softplus = 'Softplus';\nexport const Sqrt = 'Sqrt';\nexport const Sum = 'Sum';\nexport const SpaceToBatchND = 'SpaceToBatchND';\nexport const SplitV = 'SplitV';\nexport const Softmax = 'Softmax';\nexport const SquaredDifference = 'SquaredDifference';\nexport const Square = 'Square';\nexport const Sub = 'Sub';\nexport const SparseToDense = 'SparseToDense';\nexport const StridedSlice = 'StridedSlice';\nexport const Tan = 'Tan';\nexport const Tanh = 'Tanh';\nexport const Tile = 'Tile';\nexport const TopK = 'TopK';\nexport const Transpose = 'Transpose';\nexport const Unique = 'Unique';\nexport const Unpack = 'Unpack';\nexport const UnsortedSegmentSum = 'UnsortedSegmentSum';\nexport const ZerosLike = 'ZerosLike';\n/**\n * TensorFlow.js-only kernels\n */\nexport const Step = 'Step';\nexport const FromPixels = 'FromPixels';\nexport const RotateWithOffset = 'RotateWithOffset';\nexport const _FusedMatMul = '_FusedMatMul';\nexport const FusedConv2D = 'FusedConv2D';\nexport const FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';\n//# sourceMappingURL=kernel_names.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from './environment';\nimport { getGlobal } from './global_util';\nconst kernelRegistry = getGlobal('kernelRegistry', () => new Map());\nconst gradRegistry = getGlobal('gradRegistry', () => new Map());\n/**\n * Returns the kernel function (code) associated with the provided names.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n */\nexport function getKernel(kernelName, backendName) {\n const key = makeKey(kernelName, backendName);\n return kernelRegistry.get(key);\n}\n/**\n * Returns the registered gradient info associated with the provided kernel.\n * @param kernelName The official TF kernel name.\n */\nexport function getGradient(kernelName) {\n return gradRegistry.get(kernelName);\n}\nexport function getKernelsForBackend(backendName) {\n const it = kernelRegistry.entries();\n const result = [];\n while (true) {\n const { done, value } = it.next();\n if (done) {\n break;\n }\n const [key, config] = value;\n const [backend,] = key.split('_');\n if (backend === backendName) {\n result.push(config);\n }\n }\n return result;\n}\n/**\n * Registers the function (forward pass) for the kernel in a global registry.\n *\n * @param config A config object with the following properties:\n * - `kernelName` The official name of the kernel.\n * - `backendName` The official name of the backend.\n * - `kernelFunc` The function to run during the forward pass of the kernel.\n * - `setupFunc` Optional. Gets called once, after the backend initializes.\n * - `disposeFunc` Optional. Gets called once, right before the backend is\n * disposed.\n */\nexport function registerKernel(config) {\n const { kernelName, backendName } = config;\n const key = makeKey(kernelName, backendName);\n if (kernelRegistry.has(key)) {\n console.warn(`The kernel '${kernelName}' for backend ` +\n `'${backendName}' is already registered`);\n }\n kernelRegistry.set(key, config);\n}\n/**\n * Registers a gradient function for a given kernel in the global registry,\n * to be used during the back-propagation of that kernel.\n *\n * @param config An object with the following properties:\n * - `kernelName` The name of the kernel that the gradient function is for.\n * - `gradFunc` The function to run during back-propagation.\n */\nexport function registerGradient(config) {\n const { kernelName } = config;\n if (gradRegistry.has(kernelName)) {\n // TODO (yassogba) after 3.0 assess whether we need to keep this gated\n // to debug mode.\n if (env().getBool('DEBUG')) {\n console.warn(`Overriding the gradient for '${kernelName}'`);\n }\n }\n gradRegistry.set(kernelName, config);\n}\n/**\n * Removes the kernel function from the registry.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n *\n */\nexport function unregisterKernel(kernelName, backendName) {\n const key = makeKey(kernelName, backendName);\n if (!kernelRegistry.has(key)) {\n throw new Error(`The kernel '${kernelName}' for backend ` +\n `'${backendName}' is not registered`);\n }\n kernelRegistry.delete(key);\n}\n/** Removes the registered gradient from the global registry. */\nexport function unregisterGradient(kernelName) {\n if (!gradRegistry.has(kernelName)) {\n throw new Error(`The gradient '${kernelName}' for backend is not registered`);\n }\n gradRegistry.delete(kernelName);\n}\n/**\n * Finds kernels that have already been registered to a backend and re-registers\n * them for a new backend. Useful for registering custom backends.\n * @param registeredBackendName Already registered backend.\n * @param newBackendName New backend.\n */\nexport function copyRegisteredKernels(registeredBackendName, newBackendName) {\n const kernels = getKernelsForBackend(registeredBackendName);\n kernels.forEach(kernelConfig => {\n const newKernelConfig = Object.assign({}, kernelConfig, { backendName: newBackendName });\n registerKernel(newKernelConfig);\n });\n}\nfunction makeKey(kernelName, backendName) {\n return `${backendName}_${kernelName}`;\n}\n//# sourceMappingURL=kernel_registry.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from './environment';\nimport * as base from './util_base';\nexport * from './util_base';\n/**\n * Create typed array for scalar value. Used for storing in `DataStorage`.\n */\nexport function createScalarValue(value, dtype) {\n if (dtype === 'string') {\n return encodeString(value);\n }\n return toTypedArray([value], dtype);\n}\nfunction noConversionNeeded(a, dtype) {\n return (a instanceof Float32Array && dtype === 'float32') ||\n (a instanceof Int32Array && dtype === 'int32') ||\n (a instanceof Uint8Array && dtype === 'bool');\n}\nexport function toTypedArray(a, dtype) {\n if (dtype === 'string') {\n throw new Error('Cannot convert a string[] to a TypedArray');\n }\n if (Array.isArray(a)) {\n a = base.flatten(a);\n }\n if (env().getBool('DEBUG')) {\n base.checkConversionForErrors(a, dtype);\n }\n if (noConversionNeeded(a, dtype)) {\n return a;\n }\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(a);\n }\n else if (dtype === 'int32') {\n return new Int32Array(a);\n }\n else if (dtype === 'bool') {\n const bool = new Uint8Array(a.length);\n for (let i = 0; i < bool.length; ++i) {\n if (Math.round(a[i]) !== 0) {\n bool[i] = 1;\n }\n }\n return bool;\n }\n else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n/**\n * Returns the current high-resolution time in milliseconds relative to an\n * arbitrary time in the past. It works across different platforms (node.js,\n * browsers).\n *\n * ```js\n * console.log(tf.util.now());\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function now() {\n return env().platform.now();\n}\n/**\n * Returns a platform-specific implementation of\n * [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n *\n * If `fetch` is defined on the global object (`window`, `process`, etc.),\n * `tf.util.fetch` returns that function.\n *\n * If not, `tf.util.fetch` returns a platform-specific solution.\n *\n * ```js\n * const resource = await tf.util.fetch('https://unpkg.com/@tensorflow/tfjs');\n * // handle response\n * ```\n *\n * @doc {heading: 'Util'}\n */\nexport function fetch(path, requestInits) {\n return env().platform.fetch(path, requestInits);\n}\n/**\n * Encodes the provided string into bytes using the provided encoding scheme.\n *\n * @param s The string to encode.\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function encodeString(s, encoding = 'utf-8') {\n encoding = encoding || 'utf-8';\n return env().platform.encode(s, encoding);\n}\n/**\n * Decodes the provided bytes into a string using the provided encoding scheme.\n * @param bytes The bytes to decode.\n *\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function decodeString(bytes, encoding = 'utf-8') {\n encoding = encoding || 'utf-8';\n return env().platform.decode(bytes, encoding);\n}\n//# sourceMappingURL=util.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as util from './util';\nexport class Profiler {\n constructor(backendTimer, logger) {\n this.backendTimer = backendTimer;\n this.logger = logger;\n if (logger == null) {\n this.logger = new Logger();\n }\n }\n profileKernel(kernelName, inputs, f) {\n let outputs;\n const holdResultWrapperFn = () => {\n outputs = f();\n };\n const timer = this.backendTimer.time(holdResultWrapperFn);\n for (let i = 0; i < outputs.length; i++) {\n const output = outputs[i];\n // Dangling promise here because we don't want to propagate up\n // asynchronicity.\n output.data().then(tensorVals => {\n checkComputationForErrors(tensorVals, output.dtype, kernelName);\n });\n }\n const kernelProfile = {\n kernelName,\n outputs,\n inputs,\n timeMs: timer.then(timing => timing.kernelMs),\n extraInfo: timer.then(timing => timing.getExtraProfileInfo != null ?\n timing.getExtraProfileInfo() :\n '')\n };\n return kernelProfile;\n }\n logKernelProfile(kernelProfile) {\n const { kernelName, outputs, timeMs, inputs, extraInfo } = kernelProfile;\n outputs.forEach(result => {\n Promise.all([result.data(), timeMs, extraInfo]).then(valueContainer => {\n this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]);\n });\n });\n }\n}\nexport function checkComputationForErrors(vals, dtype, kernelName) {\n if (dtype !== 'float32') {\n // Only floating point computations will generate NaN values\n return false;\n }\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i];\n if (isNaN(num) || !isFinite(num)) {\n // Throwing custom exception so behavior is testable.\n console.warn(`Found ${num} in the result of '${kernelName}'`);\n return true;\n }\n }\n return false;\n}\nexport class Logger {\n logKernelProfile(name, result, vals, timeMs, inputs, extraInfo) {\n const time = typeof timeMs === 'number' ? util.rightPad(`${timeMs}ms`, 9) :\n timeMs['error'];\n const paddedName = util.rightPad(name, 25);\n const rank = result.rank;\n const size = result.size;\n const shape = util.rightPad(result.shape.toString(), 14);\n let inputShapesDescription = '';\n for (const name in inputs) {\n const input = inputs[name];\n if (input != null) {\n // The input might be a non-tensor (e.g HTMLImageElement), in which case\n // we claim the output shape as input shape.\n const inputShape = input.shape || result.shape;\n const inputRank = inputShape.length;\n inputShapesDescription +=\n `${name}: ${inputRank}D ${inputRank > 0 ? inputShape : ''} `;\n }\n }\n console.log(`%c${paddedName}\\t%c${time}\\t%c${rank}D ${shape}\\t%c${size}\\t%c${inputShapesDescription}\\t%c${extraInfo}`, 'font-weight:bold', 'color:red', 'color:blue', 'color: orange', 'color: green', 'color: steelblue');\n }\n}\n//# sourceMappingURL=profiler.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as util from './util';\n/**\n * Computes a list of TapeNodes that connect x to y, filtering everything else\n * out and preserving the order of the original tape elements.\n *\n * @param tape The tape elements to filter.\n * @param xs The input Tensors.\n * @param y The output Tensor.\n */\nexport function getFilteredNodesXToY(tape, xs, y) {\n // Forward pass to compute all the nodes and Tensors that are transitively a\n // function of x.\n const tensorsFromX = {};\n const nodesFromX = {};\n for (let i = 0; i < xs.length; i++) {\n tensorsFromX[xs[i].id] = true;\n }\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n for (const inputName in nodeInputs) {\n const input = nodeInputs[inputName];\n let anyInputFromX = false;\n for (let j = 0; j < xs.length; j++) {\n if (tensorsFromX[input.id]) {\n node.outputs.forEach(output => tensorsFromX[output.id] = true);\n anyInputFromX = true;\n nodesFromX[node.id] = true;\n break;\n }\n }\n if (anyInputFromX) {\n break;\n }\n }\n }\n // Backward pass to find all of the nodes and Tensors that lead to y.\n const tensorsLeadToY = {};\n tensorsLeadToY[y.id] = true;\n const nodesToY = {};\n for (let i = tape.length - 1; i >= 0; i--) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n // If any of the outputs lead to y, mark all of the inputs as leading to y.\n for (let j = 0; j < node.outputs.length; j++) {\n if (tensorsLeadToY[node.outputs[j].id]) {\n for (const inputName in nodeInputs) {\n tensorsLeadToY[nodeInputs[inputName].id] = true;\n nodesToY[node.id] = true;\n }\n break;\n }\n }\n }\n // Return the paths that come from x and lead to y.\n const filteredTape = [];\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n if (nodesFromX[node.id] && nodesToY[node.id]) {\n // Prune the inputs from the node that aren't a function of x.\n const prunedInputs = {};\n for (const inputName in node.inputs) {\n const nodeInput = node.inputs[inputName];\n if (tensorsFromX[nodeInput.id]) {\n prunedInputs[inputName] = nodeInput;\n }\n }\n // Copy the node and overwrite inputsAndArgs to the pruned version.\n const prunedNode = Object.assign({}, node);\n prunedNode.inputs = prunedInputs;\n prunedNode.outputs = node.outputs;\n filteredTape.push(prunedNode);\n }\n }\n return filteredTape;\n}\n/**\n * Backpropagate gradients through the filtered TapeNodes.\n *\n * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map\n * is mutated by this method.\n * @param filteredTape The filtered TapeNodes to backprop through.\n */\nexport function backpropagateGradients(tensorAccumulatedGradientMap, filteredTape, tidy, add) {\n // Walk the tape backward and keep a map of Tensor to its gradient.\n for (let i = filteredTape.length - 1; i >= 0; i--) {\n const node = filteredTape[i];\n const dys = [];\n node.outputs.forEach(o => {\n const gradTensor = tensorAccumulatedGradientMap[o.id];\n if (gradTensor != null) {\n dys.push(gradTensor);\n }\n else {\n // This particular output is not in the back-propagation subgraph, so it\n // does not affect the final output, thus we put null for its dy.\n dys.push(null);\n }\n });\n if (node.gradient == null) {\n throw new Error(`Cannot compute gradient: gradient function not found ` +\n `for ${node.kernelName}.`);\n }\n // Backprop dy through this node and accumulate gradients over the inputs.\n const inputGradients = node.gradient(dys);\n for (const inputName in node.inputs) {\n if (!(inputName in inputGradients)) {\n throw new Error(`Cannot backprop through input ${inputName}. ` +\n `Available gradients found: ${Object.keys(inputGradients)}.`);\n }\n // Call the gradient function.\n const dx = tidy(() => inputGradients[inputName]());\n if (dx.dtype !== 'float32') {\n throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +\n `${inputName} must have 'float32' dtype, but has '${dx.dtype}'`);\n }\n const x = node.inputs[inputName];\n if (!util.arraysEqual(dx.shape, x.shape)) {\n throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +\n `'${inputName}' has shape '${dx.shape}', which does not match ` +\n `the shape of the input '${x.shape}'`);\n }\n if (tensorAccumulatedGradientMap[x.id] == null) {\n tensorAccumulatedGradientMap[x.id] = dx;\n }\n else {\n const curGradient = tensorAccumulatedGradientMap[x.id];\n tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);\n curGradient.dispose();\n }\n }\n }\n}\n//# sourceMappingURL=tape.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { computeStrides, isString, rightPad, sizeFromShape } from './util';\n// Maximum number of values before we decide to show ellipsis.\nconst FORMAT_LIMIT_NUM_VALS = 20;\n// Number of first and last values to show when displaying a, b,...,y, z.\nconst FORMAT_NUM_FIRST_LAST_VALS = 3;\n// Number of significant digits to show.\nconst FORMAT_NUM_SIG_DIGITS = 7;\nexport function tensorToString(vals, shape, dtype, verbose) {\n const strides = computeStrides(shape);\n const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);\n const rank = shape.length;\n const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);\n const lines = ['Tensor'];\n if (verbose) {\n lines.push(` dtype: ${dtype}`);\n lines.push(` rank: ${rank}`);\n lines.push(` shape: [${shape}]`);\n lines.push(` values:`);\n }\n lines.push(valsLines.map(l => ' ' + l).join('\\n'));\n return lines.join('\\n');\n}\nfunction computeMaxSizePerColumn(vals, shape, dtype, strides) {\n const n = sizeFromShape(shape);\n const numCols = strides[strides.length - 1];\n const padPerCol = new Array(numCols).fill(0);\n const rank = shape.length;\n const valuesOrTuples = dtype === 'complex64' ? createComplexTuples(vals) : vals;\n if (rank > 1) {\n for (let row = 0; row < n / numCols; row++) {\n const offset = row * numCols;\n for (let j = 0; j < numCols; j++) {\n padPerCol[j] = Math.max(padPerCol[j], valToString(valuesOrTuples[offset + j], 0, dtype).length);\n }\n }\n }\n return padPerCol;\n}\nfunction valToString(val, pad, dtype) {\n let valStr;\n if (Array.isArray(val)) {\n valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ` +\n `${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`;\n }\n else if (isString(val)) {\n valStr = `'${val}'`;\n }\n else if (dtype === 'bool') {\n valStr = boolNumToString(val);\n }\n else {\n valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();\n }\n return rightPad(valStr, pad);\n}\nfunction boolNumToString(v) {\n return v === 0 ? 'false' : 'true';\n}\nfunction subTensorToString(vals, shape, dtype, strides, padPerCol, isLast = true) {\n const storagePerElement = dtype === 'complex64' ? 2 : 1;\n const size = shape[0];\n const rank = shape.length;\n if (rank === 0) {\n if (dtype === 'complex64') {\n const complexTuple = createComplexTuples(vals);\n return [valToString(complexTuple[0], 0, dtype)];\n }\n if (dtype === 'bool') {\n return [boolNumToString(vals[0])];\n }\n return [vals[0].toString()];\n }\n if (rank === 1) {\n if (size > FORMAT_LIMIT_NUM_VALS) {\n const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;\n let firstVals = Array.from(vals.slice(0, firstValsSize));\n let lastVals = Array.from(vals.slice((size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement, size * storagePerElement));\n if (dtype === 'complex64') {\n firstVals = createComplexTuples(firstVals);\n lastVals = createComplexTuples(lastVals);\n }\n return [\n '[' +\n firstVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ', ..., ' +\n lastVals\n .map((x, i) => valToString(x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype))\n .join(', ') +\n ']'\n ];\n }\n const displayVals = dtype === 'complex64' ? createComplexTuples(vals) :\n Array.from(vals);\n return [\n '[' +\n displayVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ']'\n ];\n }\n // The array is rank 2 or more.\n const subshape = shape.slice(1);\n const substrides = strides.slice(1);\n const stride = strides[0] * storagePerElement;\n const lines = [];\n if (size > FORMAT_LIMIT_NUM_VALS) {\n for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, false /* isLast */));\n }\n lines.push('...');\n for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));\n }\n }\n else {\n for (let i = 0; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));\n }\n }\n const sep = rank === 2 ? ',' : '';\n lines[0] = '[' + lines[0] + sep;\n for (let i = 1; i < lines.length - 1; i++) {\n lines[i] = ' ' + lines[i] + sep;\n }\n let newLineSep = ',\\n';\n for (let i = 2; i < rank; i++) {\n newLineSep += '\\n';\n }\n lines[lines.length - 1] =\n ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);\n return lines;\n}\nfunction createComplexTuples(vals) {\n const complexTuples = [];\n for (let i = 0; i < vals.length; i += 2) {\n complexTuples.push([vals[i], vals[i + 1]]);\n }\n return complexTuples;\n}\n//# sourceMappingURL=tensor_format.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { tensorToString } from './tensor_format';\nimport * as util from './util';\nimport { computeStrides, toNestedArray } from './util';\n/**\n * A mutable object, similar to `tf.Tensor`, that allows users to set values\n * at locations before converting to an immutable `tf.Tensor`.\n *\n * See `tf.buffer` for creating a tensor buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class TensorBuffer {\n constructor(shape, dtype, values) {\n this.dtype = dtype;\n this.shape = shape.slice();\n this.size = util.sizeFromShape(shape);\n if (values != null) {\n const n = values.length;\n util.assert(n === this.size, () => `Length of values '${n}' does not match the size ` +\n `inferred by the shape '${this.size}'.`);\n }\n if (dtype === 'complex64') {\n throw new Error(`complex64 dtype TensorBuffers are not supported. Please create ` +\n `a TensorBuffer for the real and imaginary parts separately and ` +\n `call tf.complex(real, imag).`);\n }\n this.values = values || util.getArrayFromDType(dtype, this.size);\n this.strides = computeStrides(shape);\n }\n /**\n * Sets a value in the buffer at a given location.\n *\n * @param value The value to set.\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n set(value, ...locs) {\n if (locs.length === 0) {\n locs = [0];\n }\n util.assert(locs.length === this.rank, () => `The number of provided coordinates (${locs.length}) must ` +\n `match the rank (${this.rank})`);\n const index = this.locToIndex(locs);\n this.values[index] = value;\n }\n /**\n * Returns the value in the buffer at the provided location.\n *\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n get(...locs) {\n if (locs.length === 0) {\n locs = [0];\n }\n let i = 0;\n for (const loc of locs) {\n if (loc < 0 || loc >= this.shape[i]) {\n const msg = `Requested out of range element at ${locs}. ` +\n ` Buffer shape=${this.shape}`;\n throw new Error(msg);\n }\n i++;\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return this.values[index];\n }\n locToIndex(locs) {\n if (this.rank === 0) {\n return 0;\n }\n else if (this.rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return index;\n }\n indexToLoc(index) {\n if (this.rank === 0) {\n return [];\n }\n else if (this.rank === 1) {\n return [index];\n }\n const locs = new Array(this.shape.length);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / this.strides[i]);\n index -= locs[i] * this.strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n }\n get rank() {\n return this.shape.length;\n }\n /**\n * Creates an immutable `tf.Tensor` object from the buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n toTensor() {\n return trackerFn().makeTensor(this.values, this.shape, this.dtype);\n }\n}\n// For tracking tensor creation and disposal.\nlet trackerFn = null;\n// Used by chaining methods to call into ops.\nlet opHandler = null;\n// Used to warn about deprecated methods.\nlet deprecationWarningFn = null;\n// This here so that we can use this method on dev branches and keep the\n// functionality at master.\n// tslint:disable-next-line:no-unused-expression\n[deprecationWarningFn];\n/**\n * An external consumer can register itself as the tensor tracker. This way\n * the Tensor class can notify the tracker for every tensor created and\n * disposed.\n */\nexport function setTensorTracker(fn) {\n trackerFn = fn;\n}\n/**\n * An external consumer can register itself as the op handler. This way the\n * Tensor class can have chaining methods that call into ops via the op\n * handler.\n */\nexport function setOpHandler(handler) {\n opHandler = handler;\n}\n/**\n * Sets the deprecation warning function to be used by this file. This way the\n * Tensor class can be a leaf but still use the environment.\n */\nexport function setDeprecationWarningFn(fn) {\n deprecationWarningFn = fn;\n}\n/**\n * A `tf.Tensor` object represents an immutable, multidimensional array of\n * numbers that has a shape and a data type.\n *\n * See `tf.tensor` for details on how to create a `tf.Tensor`.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Tensor {\n constructor(shape, dtype, dataId, id) {\n /** Whether this tensor has been globally kept. */\n this.kept = false;\n this.isDisposedInternal = false;\n this.shape = shape.slice();\n this.dtype = dtype || 'float32';\n this.size = util.sizeFromShape(shape);\n this.strides = computeStrides(shape);\n this.dataId = dataId;\n this.id = id;\n this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher');\n }\n get rank() {\n return this.shape.length;\n }\n /**\n * Returns a promise of `tf.TensorBuffer` that holds the underlying data.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async buffer() {\n const vals = await this.data();\n return opHandler.buffer(this.shape, this.dtype, vals);\n }\n /**\n * Returns a `tf.TensorBuffer` that holds the underlying data.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n bufferSync() {\n return opHandler.buffer(this.shape, this.dtype, this.dataSync());\n }\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * asynchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async array() {\n const vals = await this.data();\n return toNestedArray(this.shape, vals);\n }\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * synchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n arraySync() {\n return toNestedArray(this.shape, this.dataSync());\n }\n /**\n * Asynchronously downloads the values from the `tf.Tensor`. Returns a\n * promise of `TypedArray` that resolves when the computation has finished.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async data() {\n this.throwIfDisposed();\n const data = trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n const bytes = await data;\n try {\n return bytes.map(b => util.decodeString(b));\n }\n catch (_a) {\n throw new Error('Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data;\n }\n /**\n * Synchronously downloads the values from the `tf.Tensor`. This blocks the\n * UI thread until the values are ready, which can cause performance issues.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dataSync() {\n this.throwIfDisposed();\n const data = trackerFn().readSync(this.dataId);\n if (this.dtype === 'string') {\n try {\n return data.map(b => util.decodeString(b));\n }\n catch (_a) {\n throw new Error('Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data;\n }\n /** Returns the underlying bytes of the tensor's data. */\n async bytes() {\n this.throwIfDisposed();\n const data = await trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n return data;\n }\n else {\n return new Uint8Array(data.buffer);\n }\n }\n /**\n * Disposes `tf.Tensor` from memory.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dispose() {\n if (this.isDisposed) {\n return;\n }\n trackerFn().disposeTensor(this);\n this.isDisposedInternal = true;\n }\n get isDisposed() {\n return this.isDisposedInternal;\n }\n throwIfDisposed() {\n if (this.isDisposed) {\n throw new Error(`Tensor is disposed.`);\n }\n }\n /**\n * Prints the `tf.Tensor`. See `tf.print` for details.\n *\n * @param verbose Whether to print verbose information about the tensor,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n print(verbose = false) {\n return opHandler.print(this, verbose);\n }\n /**\n * Returns a copy of the tensor. See `tf.clone` for details.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n clone() {\n this.throwIfDisposed();\n return opHandler.clone(this);\n }\n /**\n * Returns a human-readable description of the tensor. Useful for logging.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n toString(verbose = false) {\n const vals = this.dataSync();\n return tensorToString(vals, this.shape, this.dtype, verbose);\n }\n cast(dtype) {\n this.throwIfDisposed();\n return opHandler.cast(this, dtype);\n }\n variable(trainable = true, name, dtype) {\n this.throwIfDisposed();\n return trackerFn().makeVariable(this, trainable, name, dtype);\n }\n}\nObject.defineProperty(Tensor, Symbol.hasInstance, {\n value: (instance) => {\n // Implementation note: we should use properties of the object that will be\n // defined before the constructor body has finished executing (methods).\n // This is because when this code is transpiled by babel, babel will call\n // classCallCheck before the constructor body is run.\n // See https://github.com/tensorflow/tfjs/issues/3384 for backstory.\n return !!instance && instance.data != null && instance.dataSync != null &&\n instance.throwIfDisposed != null;\n }\n});\n/**\n * A mutable `tf.Tensor`, useful for persisting state, e.g. for training.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Variable extends Tensor {\n constructor(initialValue, trainable, name, tensorId) {\n super(initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId);\n this.trainable = trainable;\n this.name = name;\n }\n /**\n * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have\n * the same shape and dtype as the old `tf.Tensor`.\n *\n * @param newValue New tensor to be assigned to this variable.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n assign(newValue) {\n if (newValue.dtype !== this.dtype) {\n throw new Error(`dtype of the new value (${newValue.dtype}) and ` +\n `previous value (${this.dtype}) must match`);\n }\n if (!util.arraysEqual(newValue.shape, this.shape)) {\n throw new Error(`shape of the new value (${newValue.shape}) and ` +\n `previous value (${this.shape}) must match`);\n }\n trackerFn().disposeTensor(this);\n this.dataId = newValue.dataId;\n trackerFn().incRef(this, null /* backend */);\n }\n dispose() {\n trackerFn().disposeVariable(this);\n this.isDisposedInternal = true;\n }\n}\nObject.defineProperty(Variable, Symbol.hasInstance, {\n value: (instance) => {\n return instance instanceof Tensor && instance.assign != null &&\n instance.assign instanceof Function;\n }\n});\n//# sourceMappingURL=tensor.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport var Rank;\n(function (Rank) {\n Rank[\"R0\"] = \"R0\";\n Rank[\"R1\"] = \"R1\";\n Rank[\"R2\"] = \"R2\";\n Rank[\"R3\"] = \"R3\";\n Rank[\"R4\"] = \"R4\";\n Rank[\"R5\"] = \"R5\";\n Rank[\"R6\"] = \"R6\";\n})(Rank || (Rank = {}));\n// Looks for upcasting types. Used, for example, in operations with mixed dtype\n// inputs.\nvar UpcastInt32AndMap;\n(function (UpcastInt32AndMap) {\n UpcastInt32AndMap[\"float32\"] = \"float32\";\n UpcastInt32AndMap[\"int32\"] = \"int32\";\n UpcastInt32AndMap[\"bool\"] = \"int32\";\n UpcastInt32AndMap[\"complex64\"] = \"complex64\";\n})(UpcastInt32AndMap || (UpcastInt32AndMap = {}));\nvar UpcastBoolAndMap;\n(function (UpcastBoolAndMap) {\n UpcastBoolAndMap[\"float32\"] = \"float32\";\n UpcastBoolAndMap[\"int32\"] = \"int32\";\n UpcastBoolAndMap[\"bool\"] = \"bool\";\n UpcastBoolAndMap[\"complex64\"] = \"complex64\";\n})(UpcastBoolAndMap || (UpcastBoolAndMap = {}));\nvar UpcastFloat32AndMap;\n(function (UpcastFloat32AndMap) {\n UpcastFloat32AndMap[\"float32\"] = \"float32\";\n UpcastFloat32AndMap[\"int32\"] = \"float32\";\n UpcastFloat32AndMap[\"bool\"] = \"float32\";\n UpcastFloat32AndMap[\"complex64\"] = \"complex64\";\n})(UpcastFloat32AndMap || (UpcastFloat32AndMap = {}));\nvar UpcastComplex64AndMap;\n(function (UpcastComplex64AndMap) {\n UpcastComplex64AndMap[\"float32\"] = \"complex64\";\n UpcastComplex64AndMap[\"int32\"] = \"complex64\";\n UpcastComplex64AndMap[\"bool\"] = \"complex64\";\n UpcastComplex64AndMap[\"complex64\"] = \"complex64\";\n})(UpcastComplex64AndMap || (UpcastComplex64AndMap = {}));\nconst upcastTypeMap = {\n 'float32': UpcastFloat32AndMap,\n 'int32': UpcastInt32AndMap,\n 'bool': UpcastBoolAndMap,\n 'complex64': UpcastComplex64AndMap\n};\nexport function upcastType(typeA, typeB) {\n if (typeA === 'string' || typeB === 'string') {\n if (typeA === 'string' && typeB === 'string') {\n return 'string';\n }\n throw new Error(`Can not upcast ${typeA} with ${typeB}`);\n }\n return upcastTypeMap[typeA][typeB];\n}\n/** Returns the output type after summation. */\nexport function sumOutType(type) {\n return upcastType(type, 'int32');\n}\n//# sourceMappingURL=types.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { Tensor } from './tensor';\nimport { upcastType } from './types';\nimport { assert } from './util';\nexport function makeTypesMatch(a, b) {\n if (a.dtype === b.dtype) {\n return [a, b];\n }\n const dtype = upcastType(a.dtype, b.dtype);\n return [a.cast(dtype), b.cast(dtype)];\n}\nexport function assertTypesMatch(a, b) {\n assert(a.dtype === b.dtype, () => `The dtypes of the first(${a.dtype}) and` +\n ` second(${b.dtype}) input must match`);\n}\nexport function isTensorInList(tensor, tensorList) {\n return tensorList.some(x => x.id === tensor.id);\n}\n/**\n * Extracts any `Tensor`s found within the provided object.\n *\n * @param container an object that may be a `Tensor` or may directly contain\n * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it\n * is safe to pass any object here, except that `Promise`s are not\n * supported.\n * @returns An array of `Tensors` found within the passed object. If the\n * argument is simply a `Tensor', a list containing that `Tensor` is\n * returned. If the object is not a `Tensor` or does not\n * contain `Tensors`, an empty list is returned.\n */\nexport function getTensorsInContainer(result) {\n const list = [];\n const seen = new Set();\n walkTensorContainer(result, list, seen);\n return list;\n}\nfunction walkTensorContainer(container, list, seen) {\n if (container == null) {\n return;\n }\n if (container instanceof Tensor) {\n list.push(container);\n return;\n }\n if (!isIterable(container)) {\n return;\n }\n // Iteration over keys works also for arrays.\n const iterable = container;\n for (const k in iterable) {\n const val = iterable[k];\n if (!seen.has(val)) {\n seen.add(val);\n walkTensorContainer(val, list, seen);\n }\n }\n}\n// tslint:disable-next-line:no-any\nfunction isIterable(obj) {\n return Array.isArray(obj) || typeof obj === 'object';\n}\n//# sourceMappingURL=tensor_util.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { KernelBackend } from './backends/backend';\nimport { Environment, setEnvironmentGlobal } from './environment';\nimport { getGlobalNamespace } from './global_util';\nimport { Add, Cast } from './kernel_names';\nimport { getGradient, getKernel, getKernelsForBackend } from './kernel_registry';\nimport { Profiler } from './profiler';\nimport { backpropagateGradients, getFilteredNodesXToY } from './tape';\nimport { setTensorTracker, Tensor, Variable } from './tensor';\nimport { getTensorsInContainer } from './tensor_util';\nimport * as util from './util';\nimport { bytesFromStringArray, makeOnesTypedArray, now, sizeFromShape } from './util';\nclass EngineState {\n constructor() {\n // Public since optimizers will use it.\n this.registeredVariables = {};\n this.nextTapeNodeId = 0;\n this.numBytes = 0;\n this.numTensors = 0;\n this.numStringTensors = 0;\n this.numDataBuffers = 0;\n // Number of nested tf.grad() statements when computing higher-order\n // gradients. E.g. `1` for first-order gradients and `2` for second-order\n // gradients. Used to track if the tape should be removed after a backprop.\n this.gradientDepth = 0;\n // Number of nested kernel calls. When kernel depth is greater than 1, we turn\n // off the tape.\n this.kernelDepth = 0;\n this.scopeStack = [];\n /**\n * Keeps track of the number of data moves during a kernel execution. We\n * maintain a stack since kernels can call other kernels, recursively.\n */\n this.numDataMovesStack = [];\n this.nextScopeId = 0;\n this.tensorInfo = new WeakMap();\n this.profiling = false;\n this.activeProfile = { newBytes: 0, newTensors: 0, peakBytes: 0, kernels: [], result: null };\n }\n dispose() {\n for (const variableName in this.registeredVariables) {\n this.registeredVariables[variableName].dispose();\n }\n }\n}\nexport class Engine {\n constructor(ENV) {\n this.ENV = ENV;\n this.registry = {};\n this.registryFactory = {};\n this.pendingBackendInitId = 0;\n this.state = new EngineState();\n }\n async ready() {\n if (this.pendingBackendInit != null) {\n return this.pendingBackendInit.then(() => { });\n }\n if (this.backendInstance != null) {\n return;\n }\n const sortedBackends = this.getSortedBackends();\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const success = await this.initializeBackend(backendName).success;\n if (success) {\n await this.setBackend(backendName);\n return;\n }\n }\n throw new Error(`Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n get backend() {\n if (this.pendingBackendInit != null) {\n throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make ` +\n `sure to await tf.ready() or await tf.setBackend() before calling ` +\n `other methods`);\n }\n if (this.backendInstance == null) {\n const { name, asyncInit } = this.initializeBackendsAndReturnBest();\n if (asyncInit) {\n throw new Error(`The highest priority backend '${name}' has not yet been ` +\n `initialized. Make sure to await tf.ready() or ` +\n `await tf.setBackend() before calling other methods`);\n }\n this.setBackend(name);\n }\n return this.backendInstance;\n }\n backendNames() {\n return Object.keys(this.registryFactory);\n }\n findBackend(backendName) {\n if (!(backendName in this.registry)) {\n // If the backend hasn't been initialized but we have a registry entry for\n // it, initialize it and return it.\n if (backendName in this.registryFactory) {\n const { asyncInit } = this.initializeBackend(backendName);\n if (asyncInit) {\n // Backend is not ready yet.\n return null;\n }\n }\n else {\n return null;\n }\n }\n return this.registry[backendName];\n }\n findBackendFactory(backendName) {\n if (!(backendName in this.registryFactory)) {\n return null;\n }\n return this.registryFactory[backendName].factory;\n }\n registerBackend(backendName, factory, priority = 1) {\n if (backendName in this.registryFactory) {\n console.warn(`${backendName} backend was already registered. ` +\n `Reusing existing backend factory.`);\n return false;\n }\n this.registryFactory[backendName] = { factory, priority };\n return true;\n }\n async setBackend(backendName) {\n if (this.registryFactory[backendName] == null) {\n throw new Error(`Backend name '${backendName}' not found in registry`);\n }\n this.backendName = backendName;\n if (this.registry[backendName] == null) {\n this.backendInstance = null;\n const { success, asyncInit } = this.initializeBackend(backendName);\n const result = asyncInit ? await success : success;\n if (!result) {\n return false;\n }\n }\n this.backendInstance = this.registry[backendName];\n this.setupRegisteredKernels();\n // Reset the profiler.\n this.profiler = new Profiler(this.backendInstance);\n return true;\n }\n setupRegisteredKernels() {\n const kernels = getKernelsForBackend(this.backendName);\n kernels.forEach(kernel => {\n if (kernel.setupFunc != null) {\n kernel.setupFunc(this.backendInstance);\n }\n });\n }\n disposeRegisteredKernels(backendName) {\n const kernels = getKernelsForBackend(backendName);\n kernels.forEach(kernel => {\n if (kernel.disposeFunc != null) {\n kernel.disposeFunc(this.registry[backendName]);\n }\n });\n }\n /**\n * Initializes a backend by looking up the backend name in the factory\n * registry and calling the factory method. Returns a boolean representing\n * whether the initialization of the backend suceeded. Throws an error if\n * there is no backend in the factory registry.\n */\n initializeBackend(backendName) {\n const registryFactoryEntry = this.registryFactory[backendName];\n if (registryFactoryEntry == null) {\n throw new Error(`Cannot initialize backend ${backendName}, no registration found.`);\n }\n try {\n const backend = registryFactoryEntry.factory();\n /* Test if the factory returns a promise.\n Done in a more liberal way than\n previous 'Promise.resolve(backend)===backend'\n as we needed to account for custom Promise\n implementations (e.g. Angular) */\n if (backend && !(backend instanceof KernelBackend)\n && typeof backend.then === 'function') {\n const promiseId = ++this.pendingBackendInitId;\n const success = backend\n .then(backendInstance => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.registry[backendName] = backendInstance;\n this.pendingBackendInit = null;\n return true;\n })\n .catch(err => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.pendingBackendInit = null;\n console.warn(`Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return false;\n });\n this.pendingBackendInit = success;\n return { success, asyncInit: true };\n }\n else {\n this.registry[backendName] = backend;\n return { success: true, asyncInit: false };\n }\n }\n catch (err) {\n console.warn(`Initialization of backend ${backendName} failed`);\n console.warn(err.stack || err.message);\n return { success: false, asyncInit: false };\n }\n }\n removeBackend(backendName) {\n if (!(backendName in this.registryFactory)) {\n throw new Error(`${backendName} backend not found in registry`);\n }\n if (this.backendName === backendName && this.pendingBackendInit != null) {\n // There is a pending promise of the backend we want to remove. Make it\n // obsolete.\n this.pendingBackendInitId++;\n }\n if (backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n delete this.registryFactory[backendName];\n // Unset the backend if it is active.\n if (this.backendName === backendName) {\n this.pendingBackendInit = null;\n this.backendName = null;\n this.backendInstance = null;\n }\n }\n getSortedBackends() {\n if (Object.keys(this.registryFactory).length === 0) {\n throw new Error('No backend found in registry.');\n }\n return Object.keys(this.registryFactory).sort((a, b) => {\n // Highest priority comes first.\n return this.registryFactory[b].priority -\n this.registryFactory[a].priority;\n });\n }\n initializeBackendsAndReturnBest() {\n const sortedBackends = this.getSortedBackends();\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const { success, asyncInit } = this.initializeBackend(backendName);\n if (asyncInit || success) {\n return { name: backendName, asyncInit };\n }\n }\n throw new Error(`Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n moveData(backend, dataId) {\n const info = this.state.tensorInfo.get(dataId);\n const srcBackend = info.backend;\n const values = this.readSync(dataId);\n // Delete the tensor from the old backend and move it to the new\n // backend.\n srcBackend.disposeData(dataId);\n info.backend = backend;\n backend.move(dataId, values, info.shape, info.dtype);\n if (this.shouldCheckForMemLeaks()) {\n // Track the number of moves during a kernel execution to correctly\n // detect memory leaks.\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;\n }\n }\n tidy(nameOrFn, fn) {\n let name = null;\n if (fn == null) {\n // Called with only 1 argument.\n if (typeof nameOrFn !== 'function') {\n throw new Error('Please provide a function to tidy()');\n }\n fn = nameOrFn;\n }\n else {\n // Called with 2 arguments.\n if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {\n throw new Error('When calling with two arguments, the first argument ' +\n 'to tidy() must be a string');\n }\n if (typeof fn !== 'function') {\n throw new Error('When calling with two arguments, the 2nd argument ' +\n 'to tidy() must be a function');\n }\n name = nameOrFn;\n // TODO(nsthorat,smilkov): Do operation logging and performance\n // profiling.\n }\n let result;\n return this.scopedRun(() => this.startScope(name), () => this.endScope(result), () => {\n result = fn();\n if (result instanceof Promise) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n return result;\n });\n }\n scopedRun(start, end, f) {\n start();\n try {\n const res = f();\n end();\n return res;\n }\n catch (ex) {\n end();\n throw ex;\n }\n }\n nextTensorId() {\n return Engine.nextTensorId++;\n }\n nextVariableId() {\n return Engine.nextVariableId++;\n }\n /**\n * This method is called instead of the public-facing tensor.clone() when\n * saving a tensor for backwards pass. It makes sure to add the clone\n * operation to the tape regardless of being called inside a kernel\n * execution.\n *\n * This method will go away once all kernels are modularized since we won't\n * need to turn off the tape inside runKernel().\n */\n clone(x) {\n const y = this.makeTensorFromDataId(x.dataId, x.shape, x.dtype);\n const inputs = { x };\n const grad = (dy) => ({\n x: () => {\n const dtype = 'float32';\n const gradInputs = { x: dy };\n const attrs = { dtype };\n return ENGINE.runKernelFunc(backend => backend.cast(dy, dtype), gradInputs, null /* grad */, Cast, attrs);\n }\n });\n const saved = [];\n this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});\n return y;\n }\n /**\n * Execute a kernel with the given name and return the output tensor.\n *\n * @param kernelName The name of the kernel to execute.\n * @param inputs A map of input names to tensors.\n * @param attrs A map of attribute names to their values. An attribute is a\n * primitive (non-tensor) input to the kernel.\n * @param inputsToSave A list of tensors, inputs to save for the backprop\n * computation.\n * @param outputsToSave A list of booleans, specifying which output to save\n * for the backprop computation. These are booleans since the output\n * tensors are not visible to the user.\n */\n runKernel(kernelName, inputs, attrs, inputsToSave, outputsToSave) {\n const forwardFunc = null;\n const backwardsFunc = null;\n // Call runKernel as a stop-gap until we modularize all kernels.\n // Once we modularize all kernels, we will remove the existing\n // `runKernelFunc`.\n return this.runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave);\n }\n shouldCheckForMemLeaks() {\n return this.ENV.getBool('IS_TEST');\n }\n checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos) {\n const numDataIdsAfter = this.backend.numDataIds();\n // Count the number of data ids associated with the result of the kernel.\n let numOutputDataIds = 0;\n outInfos.forEach(info => {\n // Complex numbers allocate 3 data ids, one for 'real', one for\n // 'imaginary', and one for the container that holds the former two.\n numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);\n });\n // Account for the number of moves during kernel execution. A \"data move\"\n // can happen in the middle of a kernel execution, placing a new (key,value)\n // pair in the data storage. Since data moves have net zero effect (we\n // always remove the data from the old backend), we have to cancel them out\n // when detecting memory leaks.\n const numMoves = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];\n const dataIdsLeaked = numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;\n if (dataIdsLeaked > 0) {\n throw new Error(`Backend '${this.backendName}' has an internal memory leak ` +\n `(${dataIdsLeaked} data ids) after running '${kernelName}'`);\n }\n }\n /**\n * @deprecated Use `runKernel` for newly added kernels. Keep using this method\n * only for kernels that are not yet fully modularized.\n */\n runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave) {\n let outputs;\n let saved = [];\n const isTapeOn = this.isTapeOn();\n if (kernelName == null) {\n kernelName =\n this.state.activeScope != null ? this.state.activeScope.name : '';\n }\n const startingBytecount = this.state.numBytes;\n const startingNumTensors = this.state.numTensors;\n if (this.shouldCheckForMemLeaks()) {\n this.state.numDataMovesStack.push(0);\n }\n let kernelFunc;\n const kernel = getKernel(kernelName, this.backendName);\n let out;\n if (kernel != null) {\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = kernel.kernelFunc({ inputs, attrs, backend: this.backend });\n const outInfos = Array.isArray(out) ? out : [out];\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos);\n }\n const outTensors = outInfos.map(({ dataId, shape, dtype }) => this.makeTensorFromDataId(dataId, shape, dtype));\n // Save the inputs and outputs.\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (isTapeOn) {\n let tensorsToSave = this.getTensorsForGradient(kernelName, inputs, outTensors);\n if (tensorsToSave == null) {\n // Fallback for ops that call runKernelFunc and pass in\n // inputsToSave and outputsToSave. Currently this is the set of ops\n // with kernel support in the WASM backend. Once those ops and\n // respective gradients are modularised we can remove this path.\n if (outputsToSave == null) {\n outputsToSave = [];\n }\n const outsToSave = outTensors.filter((_, i) => outputsToSave[i]);\n tensorsToSave = (inputsToSave || []).slice().concat(outsToSave);\n }\n saved = this.saveTensorsForBackwardMode(tensorsToSave);\n }\n return outTensors;\n };\n }\n else {\n const saveFunc = (tensors) => {\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (!isTapeOn) {\n return;\n }\n saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n };\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = this.tidy(() => forwardFunc(this.backend, saveFunc));\n const outs = (Array.isArray(out) ? out : [out]);\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outs);\n }\n return outs;\n };\n }\n // Stop recording to a tape when running a kernel.\n let kernelProfile;\n this.scopedRun(() => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {\n if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {\n outputs = kernelFunc();\n }\n else {\n kernelProfile = this.profiler.profileKernel(kernelName, inputs, () => kernelFunc());\n if (this.ENV.getBool('DEBUG')) {\n this.profiler.logKernelProfile(kernelProfile);\n }\n outputs = kernelProfile.outputs;\n }\n });\n if (isTapeOn) {\n this.addTapeNode(kernelName, inputs, outputs, backwardsFunc, saved, attrs);\n }\n if (this.state.profiling) {\n this.state.activeProfile.kernels.push({\n name: kernelName,\n bytesAdded: this.state.numBytes - startingBytecount,\n totalBytesSnapshot: this.state.numBytes,\n tensorsAdded: this.state.numTensors - startingNumTensors,\n totalTensorsSnapshot: this.state.numTensors,\n inputShapes: Object.keys(inputs).map(key => inputs[key] != null ? inputs[key].shape : null),\n outputShapes: outputs.map(item => item.shape),\n kernelTimeMs: kernelProfile.timeMs,\n extraInfo: kernelProfile.extraInfo\n });\n }\n return (Array.isArray(out) ? outputs : outputs[0]);\n }\n /**\n * Saves tensors used in forward mode for use in backward mode.\n *\n * @param tensors the list of tensors to save.\n */\n saveTensorsForBackwardMode(tensors) {\n const saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n return saved;\n }\n /**\n * Returns a list of tensors to save for a given gradient calculation.\n *\n * Returns undefined if their is no registered gradient for this kernel in the\n * gradient registry.\n *\n * @param kernelName name of kernel to look up gradient for.\n * @param inputs a map of input tensors.\n * @param outputs an array of output tensors from forward mode of kernel.\n */\n getTensorsForGradient(kernelName, inputs, outputs) {\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n const inputsToSave = gradConfig.inputsToSave || [];\n const outputsToSave = gradConfig.outputsToSave || [];\n // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs\n // specified in inputsToSave will be saved.\n let inputTensorsToSave;\n if (gradConfig.saveAllInputs) {\n util.assert(Array.isArray(inputs), () => 'saveAllInputs is true, expected inputs to be an array.');\n inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]);\n }\n else {\n inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]);\n }\n const outputTensorsToSave = outputs.filter((_, i) => outputsToSave[i]);\n return inputTensorsToSave.concat(outputTensorsToSave);\n }\n // TODO(yassogba) throw exception here once all runkernelFunc calls with\n // inputsToSave/outputsToSave are removed\n return null;\n }\n /**\n * Internal method used by public APIs for tensor creation. Makes a new\n * tensor with the provided shape, dtype and values. It always\n * creates a new data id and writes the values to the underlying backend.\n */\n makeTensor(values, shape, dtype, backend) {\n if (values == null) {\n throw new Error('Values passed to engine.makeTensor() are null');\n }\n dtype = dtype || 'float32';\n backend = backend || this.backend;\n let backendVals = values;\n if (dtype === 'string' && util.isString(values[0])) {\n backendVals = values.map(d => util.encodeString(d));\n }\n const dataId = backend.write(backendVals, shape, dtype);\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.incRef(t, backend);\n // Count bytes for string tensors.\n if (dtype === 'string') {\n const info = this.state.tensorInfo.get(dataId);\n const newBytes = bytesFromStringArray(backendVals);\n this.state.numBytes += newBytes - info.bytes;\n info.bytes = newBytes;\n }\n return t;\n }\n /**\n * Internal method used by backends. Makes a new tensor\n * that is a wrapper around an existing data id. It doesn't create\n * a new data id, only increments the ref count used in memory tracking.\n */\n makeTensorFromDataId(dataId, shape, dtype, backend) {\n dtype = dtype || 'float32';\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.incRef(t, backend);\n return t;\n }\n makeVariable(initialValue, trainable = true, name, dtype) {\n name = name || this.nextVariableId().toString();\n if (dtype != null && dtype !== initialValue.dtype) {\n initialValue = initialValue.cast(dtype);\n }\n const v = new Variable(initialValue, trainable, name, this.nextTensorId());\n if (this.state.registeredVariables[v.name] != null) {\n throw new Error(`Variable with name ${v.name} was already registered`);\n }\n this.state.registeredVariables[v.name] = v;\n this.incRef(v, this.backend);\n return v;\n }\n incRef(a, backend) {\n const refCount = this.state.tensorInfo.has(a.dataId) ?\n this.state.tensorInfo.get(a.dataId).refCount :\n 0;\n this.state.numTensors++;\n if (a.dtype === 'string') {\n this.state.numStringTensors++;\n }\n if (refCount === 0) {\n this.state.numDataBuffers++;\n // Bytes for complex numbers are counted by their components. Bytes for\n // string tensors are counted when writing values.\n let bytes = 0;\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n bytes = a.size * util.bytesPerElement(a.dtype);\n }\n this.state.tensorInfo.set(a.dataId, {\n backend: backend || this.backend,\n dtype: a.dtype,\n shape: a.shape,\n bytes,\n refCount: 0\n });\n this.state.numBytes += bytes;\n }\n this.state.tensorInfo.get(a.dataId).refCount++;\n if (!(a instanceof Variable)) {\n this.track(a);\n }\n }\n disposeTensor(a) {\n if (!this.state.tensorInfo.has(a.dataId)) {\n return;\n }\n this.state.numTensors--;\n if (a.dtype === 'string') {\n this.state.numStringTensors--;\n }\n const info = this.state.tensorInfo.get(a.dataId);\n const refCount = info.refCount;\n if (refCount <= 1) {\n // Don't count bytes for complex numbers as they are counted by their\n // components.\n if (a.dtype !== 'complex64') {\n this.state.numBytes -= info.bytes;\n }\n this.state.numDataBuffers--;\n info.backend.disposeData(a.dataId);\n this.state.tensorInfo.delete(a.dataId);\n }\n else {\n this.state.tensorInfo.get(a.dataId).refCount--;\n }\n // TODO(nsthorat): Construct an error and save the stack trace for\n // debugging when in debug mode. Creating a stack trace is too expensive\n // to do unconditionally.\n }\n disposeVariables() {\n for (const varName in this.state.registeredVariables) {\n const v = this.state.registeredVariables[varName];\n this.disposeVariable(v);\n }\n }\n disposeVariable(v) {\n this.disposeTensor(v);\n if (this.state.registeredVariables[v.name] != null) {\n delete this.state.registeredVariables[v.name];\n }\n }\n memory() {\n const info = this.backend.memory();\n info.numTensors = this.state.numTensors;\n info.numDataBuffers = this.state.numDataBuffers;\n info.numBytes = this.state.numBytes;\n if (this.state.numStringTensors > 0) {\n info.unreliable = true;\n if (info.reasons == null) {\n info.reasons = [];\n }\n info.reasons.push('Memory usage by string tensors is approximate ' +\n '(2 bytes per character)');\n }\n return info;\n }\n async profile(query) {\n this.state.profiling = true;\n const startBytes = this.state.numBytes;\n const startNumTensors = this.state.numTensors;\n this.state.activeProfile.kernels = [];\n this.state.activeProfile.result = await query();\n this.state.profiling = false;\n this.state.activeProfile.peakBytes = Math.max(...this.state.activeProfile.kernels.map(d => d.totalBytesSnapshot));\n this.state.activeProfile.newBytes = this.state.numBytes - startBytes;\n this.state.activeProfile.newTensors =\n this.state.numTensors - startNumTensors;\n for (const kernel of this.state.activeProfile.kernels) {\n kernel.kernelTimeMs = await kernel.kernelTimeMs;\n kernel.extraInfo = await kernel.extraInfo;\n }\n return this.state.activeProfile;\n }\n isTapeOn() {\n return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;\n }\n addTapeNode(kernelName, inputs, outputs, gradientsFunc, saved, attrs) {\n const tapeNode = { id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved };\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n gradientsFunc = gradConfig.gradFunc;\n }\n if (gradientsFunc != null) {\n tapeNode.gradient = (dys) => {\n // TODO(smilkov): To optimize back-prop, pass dys that are not used in\n // the backprop graph to the user as null instead of zeros\n dys = dys.map((dy, i) => {\n if (dy == null) {\n const output = outputs[i];\n const vals = util.makeZerosTypedArray(output.size, output.dtype);\n return this.makeTensor(vals, output.shape, output.dtype);\n }\n return dy;\n });\n // Grad functions of ops with single outputs expect a dy, while ops\n // with multiple outputs expect dys (array of dy).\n return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);\n };\n }\n this.state.activeTape.push(tapeNode);\n }\n keep(result) {\n result.kept = true;\n return result;\n }\n startTape() {\n if (this.state.gradientDepth === 0) {\n this.state.activeTape = [];\n }\n this.state.gradientDepth++;\n }\n endTape() {\n this.state.gradientDepth--;\n }\n /**\n * Start a scope. Use this with endScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n startScope(name) {\n const scopeInfo = {\n track: [],\n name: 'unnamed scope',\n id: this.state.nextScopeId++\n };\n if (name) {\n scopeInfo.name = name;\n }\n this.state.scopeStack.push(scopeInfo);\n this.state.activeScope = scopeInfo;\n }\n /**\n * End a scope. Use this with startScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n endScope(result) {\n const tensorsToTrackInParent = getTensorsInContainer(result);\n const tensorsToTrackInParentSet = new Set(tensorsToTrackInParent.map(t => t.id));\n // Dispose the arrays tracked in this scope.\n for (let i = 0; i < this.state.activeScope.track.length; i++) {\n const tensor = this.state.activeScope.track[i];\n if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {\n tensor.dispose();\n }\n }\n const oldScope = this.state.scopeStack.pop();\n this.state.activeScope = this.state.scopeStack.length === 0 ?\n null :\n this.state.scopeStack[this.state.scopeStack.length - 1];\n // Track the current result in the parent scope.\n tensorsToTrackInParent.forEach(tensor => {\n // Only track the tensor if was allocated in the inner scope and is not\n // globally kept.\n if (!tensor.kept && tensor.scopeId === oldScope.id) {\n this.track(tensor);\n }\n });\n }\n /**\n * Returns gradients of `f` with respect to each of the `xs`. The gradients\n * returned are of the same length as `xs`, but some might be null if `f`\n * was not a function of that `x`. It also takes optional dy to multiply the\n * gradient, which defaults to `1`.\n */\n gradients(f, xs, dy, allowNoGradients = false) {\n util.assert(xs.length > 0, () => 'gradients() received an empty list of xs.');\n if (dy != null && dy.dtype !== 'float32') {\n throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`);\n }\n const y = this.scopedRun(() => this.startTape(), () => this.endTape(), () => this.tidy('forward', f));\n util.assert(y instanceof Tensor, () => 'The result y returned by f() must be a tensor.');\n // Filter out the nodes that don't connect x => y.\n const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);\n if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {\n throw new Error('Cannot compute gradient of y=f(x) with respect to x. Make sure ' +\n 'that the f you passed encloses all operations that lead from x ' +\n 'to y.');\n }\n return this.tidy('backward', () => {\n const accumulatedGradientMap = {};\n accumulatedGradientMap[y.id] = (dy == null) ? ones(y.shape) : dy;\n // Backprop gradients through the filtered nodes.\n backpropagateGradients(accumulatedGradientMap, filteredTape, \n // Pass the tidy function to avoid circular dep with `tape.ts`.\n f => this.tidy(f), \n // Pass an add function to avoide a circular dep with `tape.ts`.\n add);\n const grads = xs.map(x => accumulatedGradientMap[x.id]);\n if (this.state.gradientDepth === 0) {\n // This means that we are not computing higher-order gradients\n // and can clean up the tape.\n this.state.activeTape.forEach(node => {\n for (const tensor of node.saved) {\n tensor.dispose();\n }\n });\n this.state.activeTape = null;\n }\n return { value: y, grads };\n });\n }\n customGrad(f) {\n util.assert(util.isFunction(f), () => 'The f passed in customGrad(f) must be a function.');\n return (...inputs) => {\n util.assert(inputs.every(t => t instanceof Tensor), () => 'The args passed in customGrad(f)(x1, x2,...) must all be ' +\n 'tensors');\n let res;\n const inputMap = {};\n inputs.forEach((input, i) => {\n inputMap[i] = input;\n });\n return this.runKernelFunc((_, save) => {\n res = f(...[...inputs, save]);\n util.assert(res.value instanceof Tensor, () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.value` is a tensor');\n util.assert(util.isFunction(res.gradFunc), () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function.');\n return res.value;\n }, inputMap, (dy, saved) => {\n const gradRes = res.gradFunc(dy, saved);\n const grads = Array.isArray(gradRes) ? gradRes : [gradRes];\n util.assert(grads.length === inputs.length, () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'the same number of tensors as inputs passed to f(...).');\n util.assert(grads.every(t => t instanceof Tensor), () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'a list of only tensors.');\n const gradMap = {};\n grads.forEach((grad, i) => {\n gradMap[i] = () => grad;\n });\n return gradMap;\n });\n };\n }\n readSync(dataId) {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.readSync(dataId);\n }\n read(dataId) {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.read(dataId);\n }\n async time(query) {\n const start = now();\n const timingInfo = await this.backend.time(query);\n timingInfo.wallMs = now() - start;\n return timingInfo;\n }\n /**\n * Tracks a Tensor in the current scope to be automatically cleaned up\n * when the current scope ends, and returns the value.\n *\n * @param result The Tensor to track in the current scope.\n */\n track(result) {\n if (this.state.activeScope != null) {\n result.scopeId = this.state.activeScope.id;\n this.state.activeScope.track.push(result);\n }\n return result;\n }\n get registeredVariables() {\n return this.state.registeredVariables;\n }\n /**\n * Resets the engine state. Removes all backends but does not remove\n * registered backend factories.\n */\n reset() {\n // Make any pending promise obsolete.\n this.pendingBackendInitId++;\n this.state.dispose();\n this.ENV.reset();\n this.state = new EngineState();\n for (const backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n this.backendName = null;\n this.backendInstance = null;\n this.pendingBackendInit = null;\n }\n}\nEngine.nextTensorId = 0;\nEngine.nextVariableId = 0;\nfunction ones(shape) {\n const values = makeOnesTypedArray(sizeFromShape(shape), 'float32');\n return ENGINE.makeTensor(values, shape, 'float32');\n}\nexport function getOrMakeEngine() {\n const ns = getGlobalNamespace();\n if (ns._tfengine == null) {\n const environment = new Environment(ns);\n ns._tfengine = new Engine(environment);\n }\n setEnvironmentGlobal(ns._tfengine.ENV);\n // Tell the current tensor interface that the global engine is responsible\n // for tracking.\n setTensorTracker(() => ns._tfengine);\n return ns._tfengine;\n}\nexport const ENGINE = getOrMakeEngine();\n/**\n * A implementation of the add op for use within engine and tape.\n *\n * This allows us to avoid a circular dependency between add.ts and engine.\n * It is exported to be available in tape tests.\n */\nexport function add(a, b) {\n // We duplicate Add here to avoid a circular dependency with add.ts.\n const inputs = { a, b };\n return ENGINE.runKernelFunc((backend, save) => {\n const res = backend.add(a, b);\n save([a, b]);\n return res;\n }, inputs, null /* gradient */, Add);\n}\n//# sourceMappingURL=engine.js.map", "/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// tslint:disable-next-line:no-any\nfunction _isNavigatorDefined() {\n return typeof navigator !== 'undefined' && navigator != null;\n}\nexport function isMobile() {\n if (_isNavigatorDefined()) {\n // tslint:disable-next-line:no-any\n const a = navigator.userAgent || navigator.vendor || window.opera;\n // tslint:disable-next-line:max-line-length\n return /(android|bb\\d+|meego).+mobile|avantgo|bada\\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i\n .test(a) ||\n // tslint:disable-next-line:max-line-length\n /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\\-(n|u)|c55\\/|capi|ccwa|cdm\\-|cell|chtm|cldc|cmd\\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\\-s|devi|dica|dmob|do(c|p)o|ds(12|\\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\\-|_)|g1 u|g560|gene|gf\\-5|g\\-mo|go(\\.w|od)|gr(ad|un)|haie|hcit|hd\\-(m|p|t)|hei\\-|hi(pt|ta)|hp( i|ip)|hs\\-c|ht(c(\\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\\-(20|go|ma)|i230|iac( |\\-|\\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\\/)|klon|kpt |kwc\\-|kyo(c|k)|le(no|xi)|lg( g|\\/(k|l|u)|50|54|\\-[a-w])|libw|lynx|m1\\-w|m3ga|m50\\/|ma(te|ui|xo)|mc(01|21|ca)|m\\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\\-2|po(ck|rt|se)|prox|psio|pt\\-g|qa\\-a|qc(07|12|21|32|60|\\-[2-7]|i\\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\\-|oo|p\\-)|sdk\\/|se(c(\\-|0|1)|47|mc|nd|ri)|sgh\\-|shar|sie(\\-|m)|sk\\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\\-|v\\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\\-|tdg\\-|tel(i|m)|tim\\-|t\\-mo|to(pl|sh)|ts(70|m\\-|m3|m5)|tx\\-9|up(\\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\\-|your|zeto|zte\\-/i\n .test(a.substr(0, 4));\n }\n return false;\n}\nexport function isBrowser() {\n return (typeof window !== 'undefined' && window.document != null) ||\n //@ts-ignore\n (typeof WorkerGlobalScope !== 'undefined');\n}\n//# sourceMappingURL=device_util.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport './engine';\nimport * as device_util from './device_util';\nimport { env } from './environment';\nconst ENV = env();\n/**\n * This file contains environment-related flag registrations.\n */\n/** Whether to enable debug mode. */\nENV.registerFlag('DEBUG', () => false, debugValue => {\n if (debugValue) {\n console.warn('Debugging mode is ON. The output of every math call will ' +\n 'be downloaded to CPU and checked for NaNs. ' +\n 'This significantly impacts performance.');\n }\n});\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_BROWSER', () => device_util.isBrowser());\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_NODE', () => (typeof process !== 'undefined') &&\n (typeof process.versions !== 'undefined') &&\n (typeof process.versions.node !== 'undefined'));\n/** Whether this browser is Chrome. */\nENV.registerFlag('IS_CHROME', () => typeof navigator !== 'undefined' && navigator != null &&\n navigator.userAgent != null && /Chrome/.test(navigator.userAgent) &&\n /Google Inc/.test(navigator.vendor));\n/**\n * True when the environment is \"production\" where we disable safety checks\n * to gain performance.\n */\nENV.registerFlag('PROD', () => false);\n/**\n * Whether to do sanity checks when inferring a shape from user-provided\n * values, used when creating a new tensor.\n */\nENV.registerFlag('TENSORLIKE_CHECK_SHAPE_CONSISTENCY', () => ENV.getBool('DEBUG'));\n/** Whether deprecation warnings are enabled. */\nENV.registerFlag('DEPRECATION_WARNINGS_ENABLED', () => true);\n/** True if running unit tests. */\nENV.registerFlag('IS_TEST', () => false);\n//# sourceMappingURL=flags.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from './engine';\nimport { env } from './environment';\nimport { Tensor } from './tensor';\nimport { assert, flatten, inferDtype, isTypedArray, toTypedArray } from './util';\nexport function inferShape(val, dtype) {\n let firstElem = val;\n if (isTypedArray(val)) {\n return dtype === 'string' ? [] : [val.length];\n }\n if (!Array.isArray(val)) {\n return []; // Scalar.\n }\n const shape = [];\n while (Array.isArray(firstElem) ||\n isTypedArray(firstElem) && dtype !== 'string') {\n shape.push(firstElem.length);\n firstElem = firstElem[0];\n }\n if (Array.isArray(val) &&\n env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {\n deepAssertShapeConsistency(val, shape, []);\n }\n return shape;\n}\nfunction deepAssertShapeConsistency(val, shape, indices) {\n indices = indices || [];\n if (!(Array.isArray(val)) && !isTypedArray(val)) {\n assert(shape.length === 0, () => `Element arr[${indices.join('][')}] is a primitive, ` +\n `but should be an array/TypedArray of ${shape[0]} elements`);\n return;\n }\n assert(shape.length > 0, () => `Element arr[${indices.join('][')}] should be a primitive, ` +\n `but is an array of ${val.length} elements`);\n assert(val.length === shape[0], () => `Element arr[${indices.join('][')}] should have ${shape[0]} ` +\n `elements, but has ${val.length} elements`);\n const subShape = shape.slice(1);\n for (let i = 0; i < val.length; ++i) {\n deepAssertShapeConsistency(val[i], subShape, indices.concat(i));\n }\n}\nfunction assertDtype(expectedDtype, actualDType, argName, functionName) {\n if (expectedDtype == null) {\n return;\n }\n if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||\n expectedDtype === 'numeric' && actualDType === 'string') {\n throw new Error(`Argument '${argName}' passed to '${functionName}' must ` +\n `be ${expectedDtype} tensor, but got ${actualDType} tensor`);\n }\n}\nexport function convertToTensor(x, argName, functionName, parseAsDtype = 'numeric') {\n if (x instanceof Tensor) {\n assertDtype(parseAsDtype, x.dtype, argName, functionName);\n return x;\n }\n let inferredDtype = inferDtype(x);\n // If the user expects a bool/int/float, use that info to update the\n // inferredDtype when it is not a string.\n if (inferredDtype !== 'string' &&\n ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {\n inferredDtype = parseAsDtype;\n }\n assertDtype(parseAsDtype, inferredDtype, argName, functionName);\n if ((x == null) ||\n (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' &&\n typeof x !== 'boolean' && typeof x !== 'string')) {\n const type = x == null ? 'null' : x.constructor.name;\n throw new Error(`Argument '${argName}' passed to '${functionName}' must be a ` +\n `Tensor or TensorLike, but got '${type}'`);\n }\n const inferredShape = inferShape(x, inferredDtype);\n if (!isTypedArray(x) && !Array.isArray(x)) {\n x = [x];\n }\n const skipTypedArray = true;\n const values = inferredDtype !== 'string' ?\n toTypedArray(x, inferredDtype) :\n flatten(x, [], skipTypedArray);\n return ENGINE.makeTensor(values, inferredShape, inferredDtype);\n}\nexport function convertToTensorArray(arg, argName, functionName, parseAsDtype = 'numeric') {\n if (!Array.isArray(arg)) {\n throw new Error(`Argument ${argName} passed to ${functionName} must be a ` +\n '`Tensor[]` or `TensorLike[]`');\n }\n const tensors = arg;\n return tensors.map((t, i) => convertToTensor(t, `${argName}[${i}]`, functionName), parseAsDtype);\n}\n//# sourceMappingURL=tensor_util_env.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { isPromise } from '../util';\nexport const OP_SCOPE_SUFFIX = '__op';\n/**\n * Used for wrapping functions that perform math operations on\n * Tensors. The function will be wrapped in a named scope that cleans all\n * memory usage after the function is done.\n */\nexport function op(f) {\n const keys = Object.keys(f);\n if (keys.length !== 1) {\n throw new Error(`Please provide an object with a single key ` +\n `(operation name) mapping to a function. Got an object with ` +\n `${keys.length} keys.`);\n }\n let opName = keys[0];\n const fn = f[opName];\n // Strip the underscore from the end of the function name.\n if (opName.endsWith('_')) {\n opName = opName.substring(0, opName.length - 1);\n }\n // add an __op suffix to distinguish ops from kernels in tf.profile\n opName = opName + OP_SCOPE_SUFFIX;\n // tslint:disable-next-line:no-any\n const f2 = (...args) => {\n ENGINE.startScope(opName);\n try {\n const result = fn(...args);\n if (isPromise(result)) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n ENGINE.endScope(result);\n return result;\n }\n catch (ex) {\n ENGINE.endScope(null);\n throw ex;\n }\n };\n Object.defineProperty(f2, 'name', { value: opName, configurable: true });\n // tslint:disable-next-line:no-any\n return f2;\n}\n//# sourceMappingURL=operation.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Complex } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Converts two real numbers to a complex number.\n *\n * Given a tensor `real` representing the real part of a complex number, and a\n * tensor `imag` representing the imaginary part of a complex number, this\n * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],\n * where r represents the real part and i represents the imag part.\n *\n * The input tensors real and imag must have the same shape.\n *\n * ```js\n * const real = tf.tensor1d([2.25, 3.25]);\n * const imag = tf.tensor1d([4.75, 5.75]);\n * const complex = tf.complex(real, imag);\n *\n * complex.print();\n * ```\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction complex_(real, imag) {\n const $real = convertToTensor(real, 'real', 'complex');\n const $imag = convertToTensor(imag, 'imag', 'complex');\n util.assertShapesMatch($real.shape, $imag.shape, `real and imag shapes, ${$real.shape} and ${$imag.shape}, ` +\n `must match in call to tf.complex().`);\n const forward = (backend) => {\n return backend.complex($real, $imag);\n };\n const inputs = { real: $real, imag: $imag };\n return ENGINE.runKernelFunc(forward, inputs, null /* gradient */, Complex);\n}\nexport const complex = op({ complex_ });\n//# sourceMappingURL=complex.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { assert, assertNonNegativeIntegerDimensions, flatten, inferDtype, isTypedArray, sizeFromShape, toTypedArray } from '../util';\n/** This is shared code across all tensor creation methods. */\nexport function makeTensor(values, shape, inferredShape, dtype) {\n if (dtype == null) {\n dtype = inferDtype(values);\n }\n if (dtype === 'complex64') {\n throw new Error(`Cannot construct a complex64 tensor directly. ` +\n `Please use tf.complex(real, imag).`);\n }\n if (!isTypedArray(values) && !Array.isArray(values) &&\n typeof values !== 'number' && typeof values !== 'boolean' &&\n typeof values !== 'string') {\n throw new Error('values passed to tensor(values) must be a number/boolean/string or ' +\n 'an array of numbers/booleans/strings, or a TypedArray');\n }\n if (shape != null) {\n assertNonNegativeIntegerDimensions(shape);\n const providedSize = sizeFromShape(shape);\n const inferredSize = sizeFromShape(inferredShape);\n assert(providedSize === inferredSize, () => `Based on the provided shape, [${shape}], the tensor should have ` +\n `${providedSize} values but has ${inferredSize}`);\n for (let i = 0; i < inferredShape.length; ++i) {\n const inferred = inferredShape[i];\n const flatDimsDontMatch = i === inferredShape.length - 1 ?\n inferred !== sizeFromShape(shape.slice(i)) :\n true;\n assert(inferredShape[i] === shape[i] || !flatDimsDontMatch, () => `Error creating a new Tensor. Inferred shape ` +\n `(${inferredShape}) does not match the provided ` +\n `shape (${shape}). `);\n }\n }\n if (!isTypedArray(values) && !Array.isArray(values)) {\n values = [values];\n }\n shape = shape || inferredShape;\n values = dtype !== 'string' ?\n toTypedArray(values, dtype) :\n flatten(values, [], true);\n return ENGINE.makeTensor(values, shape, dtype);\n}\n//# sourceMappingURL=tensor_ops_util.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { inferShape } from '../tensor_util_env';\nimport { makeTensor } from './tensor_ops_util';\n/**\n * Creates a `tf.Tensor` with the provided values, shape and dtype.\n *\n * ```js\n * // Pass an array of values to create a vector.\n * tf.tensor([1, 2, 3, 4]).print();\n * ```\n *\n * ```js\n * // Pass a nested array of values to make a matrix or a higher\n * // dimensional tensor.\n * tf.tensor([[1, 2], [3, 4]]).print();\n * ```\n *\n * ```js\n * // Pass a flat array and specify a shape yourself.\n * tf.tensor([1, 2, 3, 4], [2, 2]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`. If the values are strings,\n * they will be encoded as utf-8 and kept as `Uint8Array[]`.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor(values, shape, dtype) {\n const inferredShape = inferShape(values, dtype);\n return makeTensor(values, shape, inferredShape, dtype);\n}\n//# sourceMappingURL=tensor.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/* Type definitions for exporting and importing of models. */\n/**\n * A map from Tensor dtype to number of bytes per element of the Tensor.\n */\nexport const DTYPE_VALUE_SIZE_MAP = {\n 'float32': 4,\n 'float16': 2,\n 'int32': 4,\n 'uint16': 2,\n 'uint8': 1,\n 'bool': 1,\n 'complex64': 8\n};\n//# sourceMappingURL=types.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { complex } from '../ops/complex';\nimport { tensor } from '../ops/tensor';\nimport { sizeFromShape } from '../util';\nimport { DTYPE_VALUE_SIZE_MAP } from './types';\n/** Number of bytes reserved for the length of the string. (32bit integer). */\nconst NUM_BYTES_STRING_LENGTH = 4;\n/**\n * Encode a map from names to weight values as an ArrayBuffer, along with an\n * `Array` of `WeightsManifestEntry` as specification of the encoded weights.\n *\n * This function does not perform sharding.\n *\n * This function is the reverse of `decodeWeights`.\n *\n * @param tensors A map (\"dict\") from names to tensors.\n * @param group Group to which the weights belong (optional).\n * @returns A `Promise` of\n * - A flat `ArrayBuffer` with all the binary values of the `Tensor`s\n * concatenated.\n * - An `Array` of `WeightManifestEntry`s, carrying information including\n * tensor names, `dtype`s and shapes.\n * @throws Error: on unsupported tensor `dtype`.\n */\nexport async function encodeWeights(tensors, group) {\n // TODO(adarob, cais): Support quantization.\n const specs = [];\n const dataPromises = [];\n const names = Array.isArray(tensors) ?\n tensors.map(tensor => tensor.name) :\n Object.keys(tensors);\n for (let i = 0; i < names.length; ++i) {\n const name = names[i];\n const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];\n if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&\n t.dtype !== 'string' && t.dtype !== 'complex64') {\n throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`);\n }\n const spec = { name, shape: t.shape, dtype: t.dtype };\n if (t.dtype === 'string') {\n const utf8bytes = new Promise(async (resolve) => {\n const vals = await t.bytes();\n const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) +\n NUM_BYTES_STRING_LENGTH * vals.length;\n const bytes = new Uint8Array(totalNumBytes);\n let offset = 0;\n for (let i = 0; i < vals.length; i++) {\n const val = vals[i];\n const bytesOfLength = new Uint8Array(new Uint32Array([val.length]).buffer);\n bytes.set(bytesOfLength, offset);\n offset += NUM_BYTES_STRING_LENGTH;\n bytes.set(val, offset);\n offset += val.length;\n }\n resolve(bytes);\n });\n dataPromises.push(utf8bytes);\n }\n else {\n dataPromises.push(t.data());\n }\n if (group != null) {\n spec.group = group;\n }\n specs.push(spec);\n }\n const tensorValues = await Promise.all(dataPromises);\n return { data: concatenateTypedArrays(tensorValues), specs };\n}\n/**\n * Decode flat ArrayBuffer as weights.\n *\n * This function does not handle sharding.\n *\n * This function is the reverse of `encodeWeights`.\n *\n * @param buffer A flat ArrayBuffer carrying the binary values of the tensors\n * concatenated in the order specified in `specs`.\n * @param specs Specifications of the names, dtypes and shapes of the tensors\n * whose value are encoded by `buffer`.\n * @return A map from tensor name to tensor value, with the names corresponding\n * to names in `specs`.\n * @throws Error, if any of the tensors has unsupported dtype.\n */\nexport function decodeWeights(buffer, specs) {\n // TODO(adarob, cais): Support quantization.\n const out = {};\n let float16Decode;\n let offset = 0;\n for (const spec of specs) {\n const name = spec.name;\n const dtype = spec.dtype;\n const shape = spec.shape;\n const size = sizeFromShape(shape);\n let values;\n if ('quantization' in spec) {\n const quantization = spec.quantization;\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n if (!('min' in quantization && 'scale' in quantization)) {\n throw new Error(`Weight ${spec.name} with quantization ${quantization.dtype} ` +\n `doesn't have corresponding metadata min and scale.`);\n }\n }\n else if (quantization.dtype === 'float16') {\n if (dtype !== 'float32') {\n throw new Error(`Weight ${spec.name} is quantized with ${quantization.dtype} ` +\n `which only supports weights of type float32 not ${dtype}.`);\n }\n }\n else {\n throw new Error(`Weight ${spec.name} has unknown ` +\n `quantization dtype ${quantization.dtype}. ` +\n `Supported quantization dtypes are: ` +\n `'uint8', 'uint16', and 'float16'.`);\n }\n const quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype];\n const byteBuffer = buffer.slice(offset, offset + size * quantizationSizeFactor);\n const quantizedArray = (quantization.dtype === 'uint8') ?\n new Uint8Array(byteBuffer) :\n new Uint16Array(byteBuffer);\n if (dtype === 'float32') {\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n values = new Float32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = v * quantization.scale + quantization.min;\n }\n }\n else if (quantization.dtype === 'float16') {\n if (float16Decode === undefined) {\n float16Decode = getFloat16Decoder();\n }\n values = float16Decode(quantizedArray);\n }\n else {\n throw new Error(`Unsupported quantization type ${quantization.dtype} ` +\n `for weight type float32.`);\n }\n }\n else if (dtype === 'int32') {\n if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {\n throw new Error(`Unsupported quantization type ${quantization.dtype} ` +\n `for weight type int32.`);\n }\n values = new Int32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = Math.round(v * quantization.scale + quantization.min);\n }\n }\n else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * quantizationSizeFactor;\n }\n else if (dtype === 'string') {\n const size = sizeFromShape(spec.shape);\n values = [];\n for (let i = 0; i < size; i++) {\n const byteLength = new Uint32Array(buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];\n offset += NUM_BYTES_STRING_LENGTH;\n const bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));\n values.push(bytes);\n offset += byteLength;\n }\n }\n else {\n const dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype];\n const byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);\n if (dtype === 'float32') {\n values = new Float32Array(byteBuffer);\n }\n else if (dtype === 'int32') {\n values = new Int32Array(byteBuffer);\n }\n else if (dtype === 'bool') {\n values = new Uint8Array(byteBuffer);\n }\n else if (dtype === 'complex64') {\n values = new Float32Array(byteBuffer);\n const real = new Float32Array(values.length / 2);\n const image = new Float32Array(values.length / 2);\n for (let i = 0; i < real.length; i++) {\n real[i] = values[i * 2];\n image[i] = values[i * 2 + 1];\n }\n const realTensor = tensor(real, shape, 'float32');\n const imageTensor = tensor(image, shape, 'float32');\n out[name] = complex(realTensor, imageTensor);\n realTensor.dispose();\n imageTensor.dispose();\n }\n else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * dtypeFactor;\n }\n if (dtype !== 'complex64') {\n out[name] = tensor(values, shape, dtype);\n }\n }\n return out;\n}\n/**\n * Concatenate TypedArrays into an ArrayBuffer.\n */\nexport function concatenateTypedArrays(xs) {\n // TODO(adarob, cais): Support quantization.\n if (xs === null) {\n throw new Error(`Invalid input value: ${JSON.stringify(xs)}`);\n }\n let totalByteLength = 0;\n // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'\n // can have a different byte length from that of the `TypedArray` itself,\n // for example, when the `TypedArray` is created from an offset in an\n // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match\n // the `TypedArray` in byte length. If an element of `xs` does not show\n // this property, a new `TypedArray` that satisfy this property will be\n // constructed and pushed into `normalizedXs`.\n const normalizedXs = [];\n xs.forEach((x) => {\n totalByteLength += x.byteLength;\n // tslint:disable:no-any\n normalizedXs.push(x.byteLength === x.buffer.byteLength ? x :\n new x.constructor(x));\n if (!(x instanceof Float32Array || x instanceof Int32Array ||\n x instanceof Uint8Array)) {\n throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`);\n }\n // tslint:enable:no-any\n });\n const y = new Uint8Array(totalByteLength);\n let offset = 0;\n normalizedXs.forEach((x) => {\n y.set(new Uint8Array(x.buffer), offset);\n offset += x.byteLength;\n });\n return y.buffer;\n}\n// Use Buffer on Node.js instead of Blob/atob/btoa\nconst useNodeBuffer = typeof Buffer !== 'undefined' &&\n (typeof Blob === 'undefined' || typeof atob === 'undefined' ||\n typeof btoa === 'undefined');\n/**\n * Calculate the byte length of a JavaScript string.\n *\n * Note that a JavaScript string can contain wide characters, therefore the\n * length of the string is not necessarily equal to the byte length.\n *\n * @param str Input string.\n * @returns Byte length.\n */\nexport function stringByteLength(str) {\n if (useNodeBuffer) {\n return Buffer.byteLength(str);\n }\n return new Blob([str]).size;\n}\n/**\n * Encode an ArrayBuffer as a base64 encoded string.\n *\n * @param buffer `ArrayBuffer` to be converted.\n * @returns A string that base64-encodes `buffer`.\n */\nexport function arrayBufferToBase64String(buffer) {\n if (useNodeBuffer) {\n return Buffer.from(buffer).toString('base64');\n }\n const buf = new Uint8Array(buffer);\n let s = '';\n for (let i = 0, l = buf.length; i < l; i++) {\n s += String.fromCharCode(buf[i]);\n }\n return btoa(s);\n}\n/**\n * Decode a base64 string as an ArrayBuffer.\n *\n * @param str Base64 string.\n * @returns Decoded `ArrayBuffer`.\n */\nexport function base64StringToArrayBuffer(str) {\n if (useNodeBuffer) {\n const buf = Buffer.from(str, 'base64');\n return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);\n }\n const s = atob(str);\n const buffer = new Uint8Array(s.length);\n for (let i = 0; i < s.length; ++i) {\n buffer.set([s.charCodeAt(i)], i);\n }\n return buffer.buffer;\n}\n/**\n * Concatenate a number of ArrayBuffers into one.\n *\n * @param buffers A number of array buffers to concatenate.\n * @returns Result of concatenating `buffers` in order.\n */\nexport function concatenateArrayBuffers(buffers) {\n if (buffers.length === 1) {\n return buffers[0];\n }\n let totalByteLength = 0;\n buffers.forEach((buffer) => {\n totalByteLength += buffer.byteLength;\n });\n const temp = new Uint8Array(totalByteLength);\n let offset = 0;\n buffers.forEach((buffer) => {\n temp.set(new Uint8Array(buffer), offset);\n offset += buffer.byteLength;\n });\n return temp.buffer;\n}\n/**\n * Get the basename of a path.\n *\n * Behaves in a way analogous to Linux's basename command.\n *\n * @param path\n */\nexport function basename(path) {\n const SEPARATOR = '/';\n path = path.trim();\n while (path.endsWith(SEPARATOR)) {\n path = path.slice(0, path.length - 1);\n }\n const items = path.split(SEPARATOR);\n return items[items.length - 1];\n}\n/**\n * Populate ModelArtifactsInfo fields for a model with JSON topology.\n * @param modelArtifacts\n * @returns A ModelArtifactsInfo object.\n */\nexport function getModelArtifactsInfoForJSON(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('Expected JSON model topology, received ArrayBuffer.');\n }\n return {\n dateSaved: new Date(),\n modelTopologyType: 'JSON',\n modelTopologyBytes: modelArtifacts.modelTopology == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),\n weightSpecsBytes: modelArtifacts.weightSpecs == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),\n weightDataBytes: modelArtifacts.weightData == null ?\n 0 :\n modelArtifacts.weightData.byteLength,\n };\n}\n/**\n * Computes mantisa table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 2048 mantissa lookup values.\n */\nfunction computeFloat16MantisaTable() {\n const convertMantissa = (i) => {\n let m = i << 13;\n let e = 0;\n while ((m & 0x00800000) === 0) {\n e -= 0x00800000;\n m <<= 1;\n }\n m &= ~0x00800000;\n e += 0x38800000;\n return m | e;\n };\n const mantisaTable = new Uint32Array(2048);\n mantisaTable[0] = 0;\n for (let i = 1; i < 1024; i++) {\n mantisaTable[i] = convertMantissa(i);\n }\n for (let i = 1024; i < 2048; i++) {\n mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);\n }\n return mantisaTable;\n}\n/**\n * Computes exponent table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 64 exponent lookup values.\n */\nfunction computeFloat16ExponentTable() {\n const exponentTable = new Uint32Array(64);\n exponentTable[0] = 0;\n exponentTable[31] = 0x47800000;\n exponentTable[32] = 0x80000000;\n exponentTable[63] = 0xc7800000;\n for (let i = 1; i < 31; i++) {\n exponentTable[i] = i << 23;\n }\n for (let i = 33; i < 63; i++) {\n exponentTable[i] = 0x80000000 + ((i - 32) << 23);\n }\n return exponentTable;\n}\n/**\n * Computes offset table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 6d offset values.\n */\nfunction computeFloat16OffsetTable() {\n const offsetTable = new Uint32Array(64);\n for (let i = 0; i < 64; i++) {\n offsetTable[i] = 1024;\n }\n offsetTable[0] = offsetTable[32] = 0;\n return offsetTable;\n}\n/**\n * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values\n * to a Float32Array.\n *\n * @returns Function (buffer: Uint16Array) => Float32Array which decodes\n * the Uint16Array of Float16 bytes to a Float32Array.\n */\nexport function getFloat16Decoder() {\n // Algorithm is based off of\n // http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n // Cache lookup tables\n const mantisaTable = computeFloat16MantisaTable();\n const exponentTable = computeFloat16ExponentTable();\n const offsetTable = computeFloat16OffsetTable();\n return (quantizedArray) => {\n const buffer = new ArrayBuffer(4 * quantizedArray.length);\n const bufferUint32View = new Uint32Array(buffer);\n for (let index = 0; index < quantizedArray.length; index++) {\n const float16Bits = quantizedArray[index];\n const float32Bits = mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +\n exponentTable[float16Bits >> 10];\n bufferUint32View[index] = float32Bits;\n }\n return new Float32Array(buffer);\n };\n}\n//# sourceMappingURL=io_utils.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nexport class IORouterRegistry {\n constructor() {\n this.saveRouters = [];\n this.loadRouters = [];\n }\n static getInstance() {\n if (IORouterRegistry.instance == null) {\n IORouterRegistry.instance = new IORouterRegistry();\n }\n return IORouterRegistry.instance;\n }\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerSaveRouter(saveRouter) {\n IORouterRegistry.getInstance().saveRouters.push(saveRouter);\n }\n /**\n * Register a load-handler router.\n *\n * @param loadRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `load` method defined or `null`.\n */\n static registerLoadRouter(loadRouter) {\n IORouterRegistry.getInstance().loadRouters.push(loadRouter);\n }\n /**\n * Look up IOHandler for saving, given a URL-like string.\n *\n * @param url\n * @returns If only one match is found, an instance of IOHandler with the\n * `save` method defined. If no match is found, `null`.\n * @throws Error, if more than one match is found.\n */\n static getSaveHandlers(url) {\n return IORouterRegistry.getHandlers(url, 'save');\n }\n /**\n * Look up IOHandler for loading, given a URL-like string.\n *\n * @param url\n * @param loadOptions Optional, custom load options.\n * @returns All valid handlers for `url`, given the currently registered\n * handler routers.\n */\n static getLoadHandlers(url, loadOptions) {\n return IORouterRegistry.getHandlers(url, 'load', loadOptions);\n }\n static getHandlers(url, handlerType, loadOptions) {\n const validHandlers = [];\n const routers = handlerType === 'load' ?\n IORouterRegistry.getInstance().loadRouters :\n IORouterRegistry.getInstance().saveRouters;\n routers.forEach(router => {\n const handler = router(url, loadOptions);\n if (handler !== null) {\n validHandlers.push(handler);\n }\n });\n return validHandlers;\n }\n}\nexport const registerSaveRouter = (loudRouter) => IORouterRegistry.registerSaveRouter(loudRouter);\nexport const registerLoadRouter = (loudRouter) => IORouterRegistry.registerLoadRouter(loudRouter);\nexport const getSaveHandlers = (url) => IORouterRegistry.getSaveHandlers(url);\nexport const getLoadHandlers = (url, loadOptions) => IORouterRegistry.getLoadHandlers(url, loadOptions);\n//# sourceMappingURL=router_registry.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst DATABASE_NAME = 'tensorflowjs';\nconst DATABASE_VERSION = 1;\n// Model data and ModelArtifactsInfo (metadata) are stored in two separate\n// stores for efficient access of the list of stored models and their metadata.\n// 1. The object store for model data: topology, weights and weight manifests.\nconst MODEL_STORE_NAME = 'models_store';\n// 2. The object store for ModelArtifactsInfo, including meta-information such\n// as the type of topology (JSON vs binary), byte size of the topology, byte\n// size of the weights, etc.\nconst INFO_STORE_NAME = 'model_info_store';\n/**\n * Delete the entire database for tensorflow.js, including the models store.\n */\nexport async function deleteDatabase() {\n const idbFactory = getIndexedDBFactory();\n return new Promise((resolve, reject) => {\n const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);\n deleteRequest.onsuccess = () => resolve();\n deleteRequest.onerror = error => reject(error);\n });\n}\nfunction getIndexedDBFactory() {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Add more info about what IOHandler subtypes are available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error('Failed to obtain IndexedDB factory because the current environment' +\n 'is not a web browser.');\n }\n // tslint:disable-next-line:no-any\n const theWindow = typeof window === 'undefined' ? self : window;\n const factory = theWindow.indexedDB || theWindow.mozIndexedDB ||\n theWindow.webkitIndexedDB || theWindow.msIndexedDB ||\n theWindow.shimIndexedDB;\n if (factory == null) {\n throw new Error('The current browser does not appear to support IndexedDB.');\n }\n return factory;\n}\nfunction setUpDatabase(openRequest) {\n const db = openRequest.result;\n db.createObjectStore(MODEL_STORE_NAME, { keyPath: 'modelPath' });\n db.createObjectStore(INFO_STORE_NAME, { keyPath: 'modelPath' });\n}\n/**\n * IOHandler subclass: Browser IndexedDB.\n *\n * See the doc string of `browserIndexedDB` for more details.\n */\nexport class BrowserIndexedDB {\n constructor(modelPath) {\n this.indexedDB = getIndexedDBFactory();\n if (modelPath == null || !modelPath) {\n throw new Error('For IndexedDB, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n }\n async save(modelArtifacts) {\n // TODO(cais): Support saving GraphDef models.\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n return this.databaseAction(this.modelPath, modelArtifacts);\n }\n async load() {\n return this.databaseAction(this.modelPath);\n }\n /**\n * Perform database action to put model artifacts into or read model artifacts\n * from IndexedDB object store.\n *\n * Whether the action is put or get depends on whether `modelArtifacts` is\n * specified. If it is specified, the action will be put; otherwise the action\n * will be get.\n *\n * @param modelPath A unique string path for the model.\n * @param modelArtifacts If specified, it will be the model artifacts to be\n * stored in IndexedDB.\n * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`\n * of `ModelArtifacts`, if the action is get.\n */\n databaseAction(modelPath, modelArtifacts) {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n if (modelArtifacts == null) {\n // Read model out from object store.\n const modelTx = db.transaction(MODEL_STORE_NAME, 'readonly');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const getRequest = modelStore.get(this.modelPath);\n getRequest.onsuccess = () => {\n if (getRequest.result == null) {\n db.close();\n return reject(new Error(`Cannot find model with path '${this.modelPath}' ` +\n `in IndexedDB.`));\n }\n else {\n resolve(getRequest.result.modelArtifacts);\n }\n };\n getRequest.onerror = error => {\n db.close();\n return reject(getRequest.error);\n };\n modelTx.oncomplete = () => db.close();\n }\n else {\n // Put model into object store.\n const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts);\n // First, put ModelArtifactsInfo into info store.\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n let infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const putInfoRequest = infoStore.put({ modelPath: this.modelPath, modelArtifactsInfo });\n let modelTx;\n putInfoRequest.onsuccess = () => {\n // Second, put model data into model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const putModelRequest = modelStore.put({\n modelPath: this.modelPath,\n modelArtifacts,\n modelArtifactsInfo\n });\n putModelRequest.onsuccess = () => resolve({ modelArtifactsInfo });\n putModelRequest.onerror = error => {\n // If the put-model request fails, roll back the info entry as\n // well.\n infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const deleteInfoRequest = infoStore.delete(this.modelPath);\n deleteInfoRequest.onsuccess = () => {\n db.close();\n return reject(putModelRequest.error);\n };\n deleteInfoRequest.onerror = error => {\n db.close();\n return reject(putModelRequest.error);\n };\n };\n };\n putInfoRequest.onerror = error => {\n db.close();\n return reject(putInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n }\n else {\n modelTx.oncomplete = () => db.close();\n }\n };\n }\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\nBrowserIndexedDB.URL_SCHEME = 'indexeddb://';\nexport const indexedDBRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserIndexedDB.URL_SCHEME)) {\n return browserIndexedDB(url.slice(BrowserIndexedDB.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(indexedDBRouter);\nIORouterRegistry.registerLoadRouter(indexedDBRouter);\n/**\n * Creates a browser IndexedDB IOHandler for saving and loading models.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save('indexeddb://MyModel'));\n * console.log(saveResult);\n * ```\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),\n * which can be used with, e.g., `tf.Model.save`.\n */\nexport function browserIndexedDB(modelPath) {\n return new BrowserIndexedDB(modelPath);\n}\nfunction maybeStripScheme(key) {\n return key.startsWith(BrowserIndexedDB.URL_SCHEME) ?\n key.slice(BrowserIndexedDB.URL_SCHEME.length) :\n key;\n}\nexport class BrowserIndexedDBManager {\n constructor() {\n this.indexedDB = getIndexedDBFactory();\n }\n async listModels() {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const tx = db.transaction(INFO_STORE_NAME, 'readonly');\n const store = tx.objectStore(INFO_STORE_NAME);\n // tslint:disable:max-line-length\n // Need to cast `store` as `any` here because TypeScript's DOM\n // library does not have the `getAll()` method even though the\n // method is supported in the latest version of most mainstream\n // browsers:\n // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll\n // tslint:enable:max-line-length\n // tslint:disable-next-line:no-any\n const getAllInfoRequest = store.getAll();\n getAllInfoRequest.onsuccess = () => {\n const out = {};\n for (const item of getAllInfoRequest.result) {\n out[item.modelPath] = item.modelArtifactsInfo;\n }\n resolve(out);\n };\n getAllInfoRequest.onerror = error => {\n db.close();\n return reject(getAllInfoRequest.error);\n };\n tx.oncomplete = () => db.close();\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n async removeModel(path) {\n path = maybeStripScheme(path);\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n const infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const getInfoRequest = infoStore.get(path);\n let modelTx;\n getInfoRequest.onsuccess = () => {\n if (getInfoRequest.result == null) {\n db.close();\n return reject(new Error(`Cannot find model with path '${path}' ` +\n `in IndexedDB.`));\n }\n else {\n // First, delete the entry in the info store.\n const deleteInfoRequest = infoStore.delete(path);\n const deleteModelData = () => {\n // Second, delete the entry in the model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const deleteModelRequest = modelStore.delete(path);\n deleteModelRequest.onsuccess = () => resolve(getInfoRequest.result.modelArtifactsInfo);\n deleteModelRequest.onerror = error => reject(getInfoRequest.error);\n };\n // Proceed with deleting model data regardless of whether deletion\n // of info data succeeds or not.\n deleteInfoRequest.onsuccess = deleteModelData;\n deleteInfoRequest.onerror = error => {\n deleteModelData();\n db.close();\n return reject(getInfoRequest.error);\n };\n }\n };\n getInfoRequest.onerror = error => {\n db.close();\n return reject(getInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n }\n else {\n modelTx.oncomplete = () => db.close();\n }\n };\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n//# sourceMappingURL=indexed_db.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { assert } from '../util';\nimport { arrayBufferToBase64String, base64StringToArrayBuffer, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst PATH_SEPARATOR = '/';\nconst PATH_PREFIX = 'tensorflowjs_models';\nconst INFO_SUFFIX = 'info';\nconst MODEL_TOPOLOGY_SUFFIX = 'model_topology';\nconst WEIGHT_SPECS_SUFFIX = 'weight_specs';\nconst WEIGHT_DATA_SUFFIX = 'weight_data';\nconst MODEL_METADATA_SUFFIX = 'model_metadata';\n/**\n * Purge all tensorflow.js-saved model artifacts from local storage.\n *\n * @returns Paths of the models purged.\n */\nexport function purgeLocalStorageArtifacts() {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n throw new Error('purgeLocalStorageModels() cannot proceed because local storage is ' +\n 'unavailable in the current environment.');\n }\n const LS = window.localStorage;\n const purgedModelPaths = [];\n for (let i = 0; i < LS.length; ++i) {\n const key = LS.key(i);\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n if (key.startsWith(prefix) && key.length > prefix.length) {\n LS.removeItem(key);\n const modelName = getModelPathFromKey(key);\n if (purgedModelPaths.indexOf(modelName) === -1) {\n purgedModelPaths.push(modelName);\n }\n }\n }\n return purgedModelPaths;\n}\nfunction getModelKeys(path) {\n return {\n info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),\n topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),\n weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),\n weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),\n modelMetadata: [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR)\n };\n}\n/**\n * Get model path from a local-storage key.\n *\n * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'\n *\n * @param key\n */\nfunction getModelPathFromKey(key) {\n const items = key.split(PATH_SEPARATOR);\n if (items.length < 3) {\n throw new Error(`Invalid key format: ${key}`);\n }\n return items.slice(1, items.length - 1).join(PATH_SEPARATOR);\n}\nfunction maybeStripScheme(key) {\n return key.startsWith(BrowserLocalStorage.URL_SCHEME) ?\n key.slice(BrowserLocalStorage.URL_SCHEME.length) :\n key;\n}\n/**\n * IOHandler subclass: Browser Local Storage.\n *\n * See the doc string to `browserLocalStorage` for more details.\n */\nexport class BrowserLocalStorage {\n constructor(modelPath) {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n // TODO(cais): Add more info about what IOHandler subtypes are\n // available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error('The current environment does not support local storage.');\n }\n this.LS = window.localStorage;\n if (modelPath == null || !modelPath) {\n throw new Error('For local storage, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n this.keys = getModelKeys(this.modelPath);\n }\n /**\n * Save model artifacts to browser local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @param modelArtifacts The model artifacts to be stored.\n * @returns An instance of SaveResult.\n */\n async save(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n else {\n const topology = JSON.stringify(modelArtifacts.modelTopology);\n const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);\n const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts);\n try {\n this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));\n this.LS.setItem(this.keys.topology, topology);\n this.LS.setItem(this.keys.weightSpecs, weightSpecs);\n this.LS.setItem(this.keys.weightData, arrayBufferToBase64String(modelArtifacts.weightData));\n this.LS.setItem(this.keys.modelMetadata, JSON.stringify({\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata\n }));\n return { modelArtifactsInfo };\n }\n catch (err) {\n // If saving failed, clean up all items saved so far.\n this.LS.removeItem(this.keys.info);\n this.LS.removeItem(this.keys.topology);\n this.LS.removeItem(this.keys.weightSpecs);\n this.LS.removeItem(this.keys.weightData);\n this.LS.removeItem(this.keys.modelMetadata);\n throw new Error(`Failed to save model '${this.modelPath}' to local storage: ` +\n `size quota being exceeded is a possible cause of this failure: ` +\n `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +\n `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +\n `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`);\n }\n }\n }\n /**\n * Load a model from local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @returns The loaded model (if loading succeeds).\n */\n async load() {\n const info = JSON.parse(this.LS.getItem(this.keys.info));\n if (info == null) {\n throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);\n }\n if (info.modelTopologyType !== 'JSON') {\n throw new Error('BrowserLocalStorage does not support loading non-JSON model ' +\n 'topology yet.');\n }\n const out = {};\n // Load topology.\n const topology = JSON.parse(this.LS.getItem(this.keys.topology));\n if (topology == null) {\n throw new Error(`In local storage, the topology of model '${this.modelPath}' ` +\n `is missing.`);\n }\n out.modelTopology = topology;\n // Load weight specs.\n const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));\n if (weightSpecs == null) {\n throw new Error(`In local storage, the weight specs of model '${this.modelPath}' ` +\n `are missing.`);\n }\n out.weightSpecs = weightSpecs;\n // Load meta-data fields.\n const metadataString = this.LS.getItem(this.keys.modelMetadata);\n if (metadataString != null) {\n const metadata = JSON.parse(metadataString);\n out.format = metadata['format'];\n out.generatedBy = metadata['generatedBy'];\n out.convertedBy = metadata['convertedBy'];\n out.userDefinedMetadata = metadata['userDefinedMetadata'];\n }\n // Load weight data.\n const weightDataBase64 = this.LS.getItem(this.keys.weightData);\n if (weightDataBase64 == null) {\n throw new Error(`In local storage, the binary weight values of model ` +\n `'${this.modelPath}' are missing.`);\n }\n out.weightData = base64StringToArrayBuffer(weightDataBase64);\n return out;\n }\n}\nBrowserLocalStorage.URL_SCHEME = 'localstorage://';\nexport const localStorageRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserLocalStorage.URL_SCHEME)) {\n return browserLocalStorage(url.slice(BrowserLocalStorage.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(localStorageRouter);\nIORouterRegistry.registerLoadRouter(localStorageRouter);\n/**\n * Factory function for local storage IOHandler.\n *\n * This `IOHandler` supports both `save` and `load`.\n *\n * For each model's saved artifacts, four items are saved to local storage.\n * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the\n * model, such as date saved, type of the topology, size in bytes, etc.\n * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-\n * style models, this is a stringized JSON.\n * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the\n * model, can be used to decode the saved binary weight values (see\n * item below).\n * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary\n * weight values, stored as a base64-encoded string.\n *\n * Saving may throw an `Error` if the total size of the artifacts exceed the\n * browser-specific quota.\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `IOHandler`, which can be used with, e.g.,\n * `tf.Model.save`.\n */\nexport function browserLocalStorage(modelPath) {\n return new BrowserLocalStorage(modelPath);\n}\nexport class BrowserLocalStorageManager {\n constructor() {\n assert(env().getBool('IS_BROWSER'), () => 'Current environment is not a web browser');\n assert(typeof window === 'undefined' ||\n typeof window.localStorage !== 'undefined', () => 'Current browser does not appear to support localStorage');\n this.LS = window.localStorage;\n }\n async listModels() {\n const out = {};\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n const suffix = PATH_SEPARATOR + INFO_SUFFIX;\n for (let i = 0; i < this.LS.length; ++i) {\n const key = this.LS.key(i);\n if (key.startsWith(prefix) && key.endsWith(suffix)) {\n const modelPath = getModelPathFromKey(key);\n out[modelPath] = JSON.parse(this.LS.getItem(key));\n }\n }\n return out;\n }\n async removeModel(path) {\n path = maybeStripScheme(path);\n const keys = getModelKeys(path);\n if (this.LS.getItem(keys.info) == null) {\n throw new Error(`Cannot find model at path '${path}'`);\n }\n const info = JSON.parse(this.LS.getItem(keys.info));\n this.LS.removeItem(keys.info);\n this.LS.removeItem(keys.topology);\n this.LS.removeItem(keys.weightSpecs);\n this.LS.removeItem(keys.weightData);\n return info;\n }\n}\n//# sourceMappingURL=local_storage.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Classes and functions for model management across multiple storage mediums.\n *\n * Supported client actions:\n * - Listing models on all registered storage mediums.\n * - Remove model by URL from any registered storage mediums, by using URL\n * string.\n * - Moving or copying model from one path to another in the same medium or from\n * one medium to another, by using URL strings.\n */\nimport { assert } from '../util';\nimport { IORouterRegistry } from './router_registry';\nconst URL_SCHEME_SUFFIX = '://';\nexport class ModelStoreManagerRegistry {\n constructor() {\n this.managers = {};\n }\n static getInstance() {\n if (ModelStoreManagerRegistry.instance == null) {\n ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry();\n }\n return ModelStoreManagerRegistry.instance;\n }\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerManager(scheme, manager) {\n assert(scheme != null, () => 'scheme must not be undefined or null.');\n if (scheme.endsWith(URL_SCHEME_SUFFIX)) {\n scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));\n }\n assert(scheme.length > 0, () => 'scheme must not be an empty string.');\n const registry = ModelStoreManagerRegistry.getInstance();\n assert(registry.managers[scheme] == null, () => `A model store manager is already registered for scheme '${scheme}'.`);\n registry.managers[scheme] = manager;\n }\n static getManager(scheme) {\n const manager = this.getInstance().managers[scheme];\n if (manager == null) {\n throw new Error(`Cannot find model manager for scheme '${scheme}'`);\n }\n return manager;\n }\n static getSchemes() {\n return Object.keys(this.getInstance().managers);\n }\n}\n/**\n * Helper method for parsing a URL string into a scheme and a path.\n *\n * @param url E.g., 'localstorage://my-model'\n * @returns A dictionary with two fields: scheme and path.\n * Scheme: e.g., 'localstorage' in the example above.\n * Path: e.g., 'my-model' in the example above.\n */\nfunction parseURL(url) {\n if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {\n throw new Error(`The url string provided does not contain a scheme. ` +\n `Supported schemes are: ` +\n `${ModelStoreManagerRegistry.getSchemes().join(',')}`);\n }\n return {\n scheme: url.split(URL_SCHEME_SUFFIX)[0],\n path: url.split(URL_SCHEME_SUFFIX)[1],\n };\n}\nasync function cloneModelInternal(sourceURL, destURL, deleteSource = false) {\n assert(sourceURL !== destURL, () => `Old path and new path are the same: '${sourceURL}'`);\n const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);\n assert(loadHandlers.length > 0, () => `Copying failed because no load handler is found for source URL ${sourceURL}.`);\n assert(loadHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `load handlers for source URL ${sourceURL}.`);\n const loadHandler = loadHandlers[0];\n const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);\n assert(saveHandlers.length > 0, () => `Copying failed because no save handler is found for destination ` +\n `URL ${destURL}.`);\n assert(saveHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `save handlers for destination URL ${destURL}.`);\n const saveHandler = saveHandlers[0];\n const sourceScheme = parseURL(sourceURL).scheme;\n const sourcePath = parseURL(sourceURL).path;\n const sameMedium = sourceScheme === parseURL(sourceURL).scheme;\n const modelArtifacts = await loadHandler.load();\n // If moving within the same storage medium, remove the old model as soon as\n // the loading is done. Without doing this, it is possible that the combined\n // size of the two models will cause the cloning to fail.\n if (deleteSource && sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n const saveResult = await saveHandler.save(modelArtifacts);\n // If moving between mediums, the deletion is done after the save succeeds.\n // This guards against the case in which saving to the destination medium\n // fails.\n if (deleteSource && !sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n return saveResult.modelArtifactsInfo;\n}\n/**\n * List all models stored in registered storage mediums.\n *\n * For a web browser environment, the registered mediums are Local Storage and\n * IndexedDB.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @returns A `Promise` of a dictionary mapping URLs of existing models to\n * their model artifacts info. URLs include medium-specific schemes, e.g.,\n * 'indexeddb://my/model/1'. Model artifacts info include type of the\n * model's topology, byte sizes of the topology, weights, etc.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function listModels() {\n const schemes = ModelStoreManagerRegistry.getSchemes();\n const out = {};\n for (const scheme of schemes) {\n const schemeOut = await ModelStoreManagerRegistry.getManager(scheme).listModels();\n for (const path in schemeOut) {\n const url = scheme + URL_SCHEME_SUFFIX + path;\n out[url] = schemeOut[path];\n }\n }\n return out;\n}\n/**\n * Remove a model specified by URL from a reigstered storage medium.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @param url A URL to a stored model, with a scheme prefix, e.g.,\n * 'localstorage://my-model-1', 'indexeddb://my/model/2'.\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function removeModel(url) {\n const schemeAndPath = parseURL(url);\n const manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);\n return manager.removeModel(schemeAndPath.path);\n}\n/**\n * Copy a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Copying within a storage medium, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Copying between two storage mediums, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Copy the model, from Local Storage to IndexedDB.\n * await tf.io.copyModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove both models.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of copying.\n * @param destURL Destination URL of copying.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function copyModel(sourceURL, destURL) {\n const deleteSource = false;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n/**\n * Move a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Moving within a storage medium, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Moving between two storage mediums, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Move the model, from Local Storage to IndexedDB.\n * await tf.io.moveModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove the moved model.\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of moving.\n * @param destURL Destination URL of moving.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function moveModel(sourceURL, destURL) {\n const deleteSource = true;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\nexport { moveModel, copyModel, removeModel, listModels };\n//# sourceMappingURL=model_management.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { BrowserIndexedDB, BrowserIndexedDBManager } from '../io/indexed_db';\nimport { BrowserLocalStorage, BrowserLocalStorageManager } from '../io/local_storage';\nimport { ModelStoreManagerRegistry } from '../io/model_management';\nexport class PlatformBrowser {\n fetch(path, init) {\n return fetch(path, init);\n }\n now() {\n return performance.now();\n }\n encode(text, encoding) {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(`Browser's encoder only supports utf-8, but got ${encoding}`);\n }\n if (this.textEncoder == null) {\n this.textEncoder = new TextEncoder();\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes, encoding) {\n return new TextDecoder(encoding).decode(bytes);\n }\n}\nif (env().get('IS_BROWSER')) {\n env().setPlatform('browser', new PlatformBrowser());\n // Register LocalStorage IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(BrowserLocalStorage.URL_SCHEME, new BrowserLocalStorageManager());\n }\n catch (err) {\n }\n // Register IndexedDB IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager());\n }\n catch (err) {\n }\n}\n//# sourceMappingURL=platform_browser.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from '../environment';\n// We are wrapping this within an object so it can be stubbed by Jasmine.\nexport const getNodeFetch = {\n // tslint:disable-next-line:no-require-imports\n importFetch: () => require('node-fetch')\n};\nlet systemFetch;\n// These getters and setters are for testing so we don't export a mutable\n// variable.\nexport function resetSystemFetch() {\n systemFetch = null;\n}\nexport function setSystemFetch(fetchFn) {\n systemFetch = fetchFn;\n}\nexport function getSystemFetch() {\n return systemFetch;\n}\nexport class PlatformNode {\n constructor() {\n // tslint:disable-next-line:no-require-imports\n this.util = require('util');\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n this.textEncoder = new this.util.TextEncoder();\n }\n fetch(path, requestInits) {\n if (env().global.fetch != null) {\n return env().global.fetch(path, requestInits);\n }\n if (systemFetch == null) {\n systemFetch = getNodeFetch.importFetch();\n }\n return systemFetch(path, requestInits);\n }\n now() {\n const time = process.hrtime();\n return time[0] * 1000 + time[1] / 1000000;\n }\n encode(text, encoding) {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(`Node built-in encoder only supports utf-8, but got ${encoding}`);\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes, encoding) {\n if (bytes.length === 0) {\n return '';\n }\n return new this.util.TextDecoder(encoding).decode(bytes);\n }\n}\nif (env().get('IS_NODE')) {\n env().setPlatform('node', new PlatformNode());\n}\n//# sourceMappingURL=platform_node.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { TensorBuffer } from '../tensor';\nimport * as util from '../util';\n/**\n * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.\n *\n * The values are stored in CPU as `TypedArray`. Fill the buffer using\n * `buffer.set()`, or by modifying directly `buffer.values`.\n *\n * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with\n * those values.\n *\n * ```js\n * // Create a buffer and set values at particular indices.\n * const buffer = tf.buffer([2, 2]);\n * buffer.set(3, 0, 0);\n * buffer.set(5, 1, 0);\n *\n * // Convert the buffer back to a tensor.\n * buffer.toTensor().print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The dtype of the buffer. Defaults to 'float32'.\n * @param values The values of the buffer as `TypedArray`. Defaults to\n * zeros.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function buffer(shape, dtype = 'float32', values) {\n dtype = dtype || 'float32';\n util.assertNonNegativeIntegerDimensions(shape);\n return new TensorBuffer(shape, dtype, values);\n}\n//# sourceMappingURL=buffer.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Cast } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Casts a `tf.Tensor` to a new dtype.\n *\n * ```js\n * const x = tf.tensor1d([1.5, 2.5, 3]);\n * tf.cast(x, 'int32').print();\n * ```\n * @param x The input tensor to be casted.\n * @param dtype The dtype to cast the input tensor to.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction cast_(x, dtype) {\n const $x = convertToTensor(x, 'x', 'cast');\n // Sanity checks.\n if (!util.isValidDtype(dtype)) {\n throw new Error(`Failed to cast to unknown dtype ${dtype}`);\n }\n if (dtype === 'string' && $x.dtype !== 'string' ||\n dtype !== 'string' && $x.dtype === 'string') {\n throw new Error('Only strings can be casted to strings');\n }\n const inputs = { x: $x };\n const attrs = { dtype };\n return ENGINE.runKernelFunc(backend => backend.cast($x, dtype), inputs, null /* grad */, Cast, attrs);\n}\nexport const cast = op({ cast_ });\n//# sourceMappingURL=cast.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Identity } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport { op } from './operation';\n/**\n * Creates a new tensor with the same values and shape as the specified\n * tensor.\n *\n * ```js\n * const x = tf.tensor([1, 2]);\n *\n * x.clone().print();\n * ```\n *\n * @param x The tensor to clone.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction clone_(x) {\n const $x = convertToTensor(x, 'x', 'clone', null);\n const forward = () => ENGINE.makeTensorFromDataId($x.dataId, $x.shape, $x.dtype);\n const inputs = { x: $x };\n // Note this op is called tf.identity in python. Hence the kernel name used\n // here.\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, Identity);\n}\nexport const clone = op({ clone_ });\n//# sourceMappingURL=clone.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Prints information about the `tf.Tensor` including its data.\n *\n * ```js\n * const verbose = true;\n * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);\n * ```\n * @param x The tensor to be printed.\n * @param verbose Whether to print verbose information about the ` Tensor`,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function print(x, verbose = false) {\n console.log(x.toString(verbose));\n}\n//# sourceMappingURL=print.js.map", "/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Required side effectful code for tfjs-core\n// Set up Engine and ENV\nimport { getOrMakeEngine } from './engine';\ngetOrMakeEngine();\n// Register backend-agnostic flags.\nimport './flags';\n// Register platforms\nimport './platforms/platform_browser';\nimport './platforms/platform_node';\n// Set up OpHandler\nimport { buffer } from './ops/buffer';\nimport { cast } from './ops/cast';\nimport { clone } from './ops/clone';\nimport { print } from './ops/print';\nimport { setOpHandler } from './tensor';\nconst opHandler = {\n buffer,\n cast,\n clone,\n print\n};\nsetOpHandler(opHandler);\n//# sourceMappingURL=base_side_effects.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * IOHandlers related to files, such as browser-triggered file downloads,\n * user-selected files in browser.\n */\nimport '../flags';\nimport { env } from '../environment';\nimport { basename, concatenateArrayBuffers, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nconst DEFAULT_FILE_NAME_PREFIX = 'model';\nconst DEFAULT_JSON_EXTENSION_NAME = '.json';\nconst DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin';\nfunction defer(f) {\n return new Promise(resolve => setTimeout(resolve)).then(f);\n}\nexport class BrowserDownloads {\n constructor(fileNamePrefix) {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Provide info on what IOHandlers are available under the\n // current environment.\n throw new Error('browserDownloads() cannot proceed because the current environment ' +\n 'is not a browser.');\n }\n if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) {\n fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length);\n }\n if (fileNamePrefix == null || fileNamePrefix.length === 0) {\n fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;\n }\n this.modelTopologyFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;\n this.weightDataFileName =\n fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;\n }\n async save(modelArtifacts) {\n if (typeof (document) === 'undefined') {\n throw new Error('Browser downloads are not supported in ' +\n 'this environment since `document` is not present');\n }\n const weightsURL = window.URL.createObjectURL(new Blob([modelArtifacts.weightData], { type: 'application/octet-stream' }));\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserDownloads.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n else {\n const weightsManifest = [{\n paths: ['./' + this.weightDataFileName],\n weights: modelArtifacts.weightSpecs\n }];\n const modelTopologyAndWeightManifest = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n weightsManifest\n };\n const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: 'application/json' }));\n // If anchor elements are not provided, create them without attaching them\n // to parents, so that the downloaded file names can be controlled.\n const jsonAnchor = this.jsonAnchor == null ? document.createElement('a') :\n this.jsonAnchor;\n jsonAnchor.download = this.modelTopologyFileName;\n jsonAnchor.href = modelTopologyAndWeightManifestURL;\n // Trigger downloads by evoking a click event on the download anchors.\n // When multiple downloads are started synchronously, Firefox will only\n // save the last one.\n await defer(() => jsonAnchor.dispatchEvent(new MouseEvent('click')));\n if (modelArtifacts.weightData != null) {\n const weightDataAnchor = this.weightDataAnchor == null ?\n document.createElement('a') :\n this.weightDataAnchor;\n weightDataAnchor.download = this.weightDataFileName;\n weightDataAnchor.href = weightsURL;\n await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent('click')));\n }\n return { modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts) };\n }\n }\n}\nBrowserDownloads.URL_SCHEME = 'downloads://';\nclass BrowserFiles {\n constructor(files) {\n if (files == null || files.length < 1) {\n throw new Error(`When calling browserFiles, at least 1 file is required, ` +\n `but received ${files}`);\n }\n this.files = files;\n }\n async load() {\n const jsonFile = this.files[0];\n const weightFiles = this.files.slice(1);\n return new Promise((resolve, reject) => {\n const jsonReader = new FileReader();\n jsonReader.onload = (event) => {\n // tslint:disable-next-line:no-any\n const modelJSON = JSON.parse(event.target.result);\n const modelTopology = modelJSON.modelTopology;\n if (modelTopology == null) {\n reject(new Error(`modelTopology field is missing from file ${jsonFile.name}`));\n return;\n }\n if (weightFiles.length === 0) {\n resolve({ modelTopology });\n }\n const weightsManifest = modelJSON.weightsManifest;\n if (weightsManifest == null) {\n reject(new Error(`weightManifest field is missing from file ${jsonFile.name}`));\n return;\n }\n let pathToFile;\n try {\n pathToFile =\n this.checkManifestAndWeightFiles(weightsManifest, weightFiles);\n }\n catch (err) {\n reject(err);\n return;\n }\n const weightSpecs = [];\n const paths = [];\n const perFileBuffers = [];\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n paths.push(path);\n perFileBuffers.push(null);\n });\n weightSpecs.push(...weightsGroup.weights);\n });\n weightsManifest.forEach(weightsGroup => {\n weightsGroup.paths.forEach(path => {\n const weightFileReader = new FileReader();\n weightFileReader.onload = (event) => {\n // tslint:disable-next-line:no-any\n const weightData = event.target.result;\n const index = paths.indexOf(path);\n perFileBuffers[index] = weightData;\n if (perFileBuffers.indexOf(null) === -1) {\n resolve({\n modelTopology,\n weightSpecs,\n weightData: concatenateArrayBuffers(perFileBuffers),\n format: modelJSON.format,\n generatedBy: modelJSON.generatedBy,\n convertedBy: modelJSON.convertedBy,\n userDefinedMetadata: modelJSON.userDefinedMetadata\n });\n }\n };\n weightFileReader.onerror = error => reject(`Failed to weights data from file of path '${path}'.`);\n weightFileReader.readAsArrayBuffer(pathToFile[path]);\n });\n });\n };\n jsonReader.onerror = error => reject(`Failed to read model topology and weights manifest JSON ` +\n `from file '${jsonFile.name}'. BrowserFiles supports loading ` +\n `Keras-style tf.Model artifacts only.`);\n jsonReader.readAsText(jsonFile);\n });\n }\n /**\n * Check the compatibility between weights manifest and weight files.\n */\n checkManifestAndWeightFiles(manifest, files) {\n const basenames = [];\n const fileNames = files.map(file => basename(file.name));\n const pathToFile = {};\n for (const group of manifest) {\n group.paths.forEach(path => {\n const pathBasename = basename(path);\n if (basenames.indexOf(pathBasename) !== -1) {\n throw new Error(`Duplicate file basename found in weights manifest: ` +\n `'${pathBasename}'`);\n }\n basenames.push(pathBasename);\n if (fileNames.indexOf(pathBasename) === -1) {\n throw new Error(`Weight file with basename '${pathBasename}' is not provided.`);\n }\n else {\n pathToFile[path] = files[fileNames.indexOf(pathBasename)];\n }\n });\n }\n if (basenames.length !== files.length) {\n throw new Error(`Mismatch in the number of files in weights manifest ` +\n `(${basenames.length}) and the number of weight files provided ` +\n `(${files.length}).`);\n }\n return pathToFile;\n }\n}\nexport const browserDownloadsRouter = (url) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n }\n else {\n if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) {\n return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length));\n }\n else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(browserDownloadsRouter);\n/**\n * Creates an IOHandler that triggers file downloads from the browser.\n *\n * The returned `IOHandler` instance can be used as model exporting methods such\n * as `tf.Model.save` and supports only saving.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * const saveResult = await model.save('downloads://mymodel');\n * // This will trigger downloading of two files:\n * // 'mymodel.json' and 'mymodel.weights.bin'.\n * console.log(saveResult);\n * ```\n *\n * @param fileNamePrefix Prefix name of the files to be downloaded. For use with\n * `tf.Model`, `fileNamePrefix` should follow either of the following two\n * formats:\n * 1. `null` or `undefined`, in which case the default file\n * names will be used:\n * - 'model.json' for the JSON file containing the model topology and\n * weights manifest.\n * - 'model.weights.bin' for the binary file containing the binary weight\n * values.\n * 2. A single string or an Array of a single string, as the file name prefix.\n * For example, if `'foo'` is provided, the downloaded JSON\n * file and binary weights file will be named 'foo.json' and\n * 'foo.weights.bin', respectively.\n * @param config Additional configuration for triggering downloads.\n * @returns An instance of `BrowserDownloads` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserDownloads(fileNamePrefix = 'model') {\n return new BrowserDownloads(fileNamePrefix);\n}\n/**\n * Creates an IOHandler that loads model artifacts from user-selected files.\n *\n * This method can be used for loading from files such as user-selected files\n * in the browser.\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * // Note: This code snippet won't run properly without the actual file input\n * // elements in the HTML DOM.\n *\n * // Suppose there are two HTML file input (``)\n * // elements.\n * const uploadJSONInput = document.getElementById('upload-json');\n * const uploadWeightsInput = document.getElementById('upload-weights');\n * const model = await tf.loadLayersModel(tf.io.browserFiles(\n * [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));\n * ```\n *\n * @param files `File`s to load from. Currently, this function supports only\n * loading from files that contain Keras-style models (i.e., `tf.Model`s), for\n * which an `Array` of `File`s is expected (in that order):\n * - A JSON file containing the model topology and weight manifest.\n * - Optionally, One or more binary files containing the binary weights.\n * These files must have names that match the paths in the `weightsManifest`\n * contained by the aforementioned JSON file, or errors will be thrown\n * during loading. These weights files have the same format as the ones\n * generated by `tensorflowjs_converter` that comes with the `tensorflowjs`\n * Python PIP package. If no weights files are provided, only the model\n * topology will be loaded from the JSON file above.\n * @returns An instance of `Files` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserFiles(files) {\n return new BrowserFiles(files);\n}\n//# sourceMappingURL=browser_files.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { assert } from '../util';\n/**\n * Monitor Promise.all progress, fire onProgress callback function.\n *\n * @param promises Promise list going to be monitored\n * @param onProgress Callback function. Fired when a promise resolved.\n * @param startFraction Optional fraction start. Default to 0.\n * @param endFraction Optional fraction end. Default to 1.\n */\nexport function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) {\n checkPromises(promises);\n startFraction = startFraction == null ? 0 : startFraction;\n endFraction = endFraction == null ? 1 : endFraction;\n checkFraction(startFraction, endFraction);\n let resolvedPromise = 0;\n const registerMonitor = (promise) => {\n promise.then(value => {\n const fraction = startFraction +\n ++resolvedPromise / promises.length * (endFraction - startFraction);\n // pass fraction as parameter to callback function.\n onProgress(fraction);\n return value;\n });\n return promise;\n };\n function checkPromises(promises) {\n assert(promises != null && Array.isArray(promises) && promises.length > 0, () => 'promises must be a none empty array');\n }\n function checkFraction(startFraction, endFraction) {\n assert(startFraction >= 0 && startFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +\n `got startFraction ${startFraction}`);\n assert(endFraction >= 0 && endFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +\n `got endFraction ${endFraction}`);\n assert(endFraction >= startFraction, () => `startFraction must be no more than endFraction, but ` +\n `got startFraction ${startFraction} and endFraction ` +\n `${endFraction}`);\n }\n return Promise.all(promises.map(registerMonitor));\n}\n//# sourceMappingURL=progress.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { env } from '../environment';\nimport * as util from '../util';\nimport { decodeWeights } from './io_utils';\nimport { monitorPromisesProgress } from './progress';\nimport { DTYPE_VALUE_SIZE_MAP } from './types';\n/**\n * Reads binary weights data from a number of URLs.\n *\n * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.\n * @param requestOptions RequestInit (options) for the HTTP requests.\n * @param fetchFunc Optional overriding value for the `window.fetch` function.\n * @param onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same\n * length as `fetchURLs`.\n */\nexport async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) {\n if (loadOptions == null) {\n loadOptions = {};\n }\n const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch :\n loadOptions.fetchFunc;\n // Create the requests for all of the weights in parallel.\n const requests = fetchURLs.map(fetchURL => fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true }));\n const fetchStartFraction = 0;\n const fetchEndFraction = 0.5;\n const responses = loadOptions.onProgress == null ?\n await Promise.all(requests) :\n await monitorPromisesProgress(requests, loadOptions.onProgress, fetchStartFraction, fetchEndFraction);\n const bufferPromises = responses.map(response => response.arrayBuffer());\n const bufferStartFraction = 0.5;\n const bufferEndFraction = 1;\n const buffers = loadOptions.onProgress == null ?\n await Promise.all(bufferPromises) :\n await monitorPromisesProgress(bufferPromises, loadOptions.onProgress, bufferStartFraction, bufferEndFraction);\n return buffers;\n}\n/**\n * Reads a weights manifest JSON configuration, fetches the weights and\n * returns them as `Tensor`s.\n *\n * @param manifest The weights manifest JSON.\n * @param filePathPrefix The path prefix for filenames given in the manifest.\n * Defaults to the empty string.\n * @param weightNames The names of the weights to be fetched.\n */\nexport async function loadWeights(manifest, filePathPrefix = '', weightNames, requestInit) {\n // TODO(nsthorat): Groups are currently fetched atomically. If you need a\n // single weight from a group, the whole group will be fetched. At a future\n // date, we should support fetching only the individual shards within a\n // group that are needed to reconstruct the requested weight.\n // TODO(cais): Use `decodeWeights` for implementation.\n const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, { requestInit });\n const loadWeights = weightsLoaderFactory(fetchWeights);\n return loadWeights(manifest, filePathPrefix, weightNames);\n}\n/**\n * Creates a function, which reads a weights manifest JSON configuration,\n * fetches the weight files using the specified function and returns them as\n * `Tensor`s.\n *\n * ```js\n * // example for creating a nodejs weight loader, which reads the weight files\n * // from disk using fs.readFileSync\n *\n * import * as fs from 'fs'\n *\n * const fetchWeightsFromDisk = (filePaths: string[]) =>\n * filePaths.map(filePath => fs.readFileSync(filePath).buffer)\n *\n * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)\n *\n * const manifest = JSON.parse(\n * fs.readFileSync('./my_model-weights_manifest').toString()\n * )\n * const weightMap = await loadWeights(manifest, './')\n * ```\n * @param fetchWeightsFunction The function used for fetching the weight files.\n * @returns Weight loading function.\n */\nexport function weightsLoaderFactory(fetchWeightsFunction) {\n return async (manifest, filePathPrefix = '', weightNames) => {\n // Collect all the groups, weights, and their relative offsets to be\n // fetched.\n const groupIndicesToFetchMap = manifest.map(() => false);\n const groupWeightsToFetch = {};\n const weightsFound = weightNames != null ? weightNames.map(() => false) : [];\n const allManifestWeightNames = [];\n manifest.forEach((manifestGroupConfig, groupIndex) => {\n let groupOffset = 0;\n manifestGroupConfig.weights.forEach(weightsEntry => {\n const rawDtype = ('quantization' in weightsEntry) ?\n weightsEntry.quantization.dtype :\n weightsEntry.dtype;\n const weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] *\n util.sizeFromShape(weightsEntry.shape);\n const enqueueWeightsForFetchingFn = () => {\n groupIndicesToFetchMap[groupIndex] = true;\n if (groupWeightsToFetch[groupIndex] == null) {\n groupWeightsToFetch[groupIndex] = [];\n }\n groupWeightsToFetch[groupIndex].push({\n manifestEntry: weightsEntry,\n groupOffset,\n sizeBytes: weightsBytes\n });\n };\n if (weightNames != null) {\n weightNames.forEach((weightName, weightIndex) => {\n if (weightName === weightsEntry.name) {\n enqueueWeightsForFetchingFn();\n weightsFound[weightIndex] = true;\n }\n });\n }\n else {\n enqueueWeightsForFetchingFn();\n }\n allManifestWeightNames.push(weightsEntry.name);\n groupOffset += weightsBytes;\n });\n });\n if (!weightsFound.every(found => found)) {\n const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);\n throw new Error(`Could not find weights in manifest with names: ` +\n `${weightsNotFound.join(', ')}. \\n` +\n `Manifest JSON has weights with names: ` +\n `${allManifestWeightNames.join(', ')}.`);\n }\n // Convert the one-hot boolean groupId => shouldFetch map to a list of group\n // IDs.\n const groupIndicesToFetch = groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => {\n if (shouldFetch) {\n accumulator.push(i);\n }\n return accumulator;\n }, []);\n const fetchUrls = [];\n groupIndicesToFetch.forEach(i => {\n manifest[i].paths.forEach(filepath => {\n const fetchUrl = filePathPrefix +\n (!filePathPrefix.endsWith('/') ? '/' : '') + filepath;\n fetchUrls.push(fetchUrl);\n });\n });\n const buffers = await fetchWeightsFunction(fetchUrls);\n const weightsTensorMap = {};\n let bufferIndexOffset = 0;\n groupIndicesToFetch.forEach(i => {\n const numBuffers = manifest[i].paths.length;\n let groupBytes = 0;\n for (let i = 0; i < numBuffers; i++) {\n groupBytes += buffers[bufferIndexOffset + i].byteLength;\n }\n // Create a buffer for the whole group.\n const groupBuffer = new ArrayBuffer(groupBytes);\n const groupByteBuffer = new Uint8Array(groupBuffer);\n let groupBufferOffset = 0;\n for (let i = 0; i < numBuffers; i++) {\n const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);\n groupByteBuffer.set(buffer, groupBufferOffset);\n groupBufferOffset += buffer.byteLength;\n }\n const weightsEntries = groupWeightsToFetch[i];\n weightsEntries.forEach(weightsEntry => {\n const byteBuffer = groupBuffer.slice(weightsEntry.groupOffset, weightsEntry.groupOffset + weightsEntry.sizeBytes);\n const nameToTensorMap = decodeWeights(byteBuffer, [weightsEntry.manifestEntry]);\n for (const name in nameToTensorMap) {\n weightsTensorMap[name] = nameToTensorMap[name];\n }\n });\n bufferIndexOffset += numBuffers;\n });\n return weightsTensorMap;\n };\n}\n//# sourceMappingURL=weights_loader.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * IOHandler implementations based on HTTP requests in the web browser.\n *\n * Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n */\nimport { env } from '../environment';\nimport { assert } from '../util';\nimport { concatenateArrayBuffers, getModelArtifactsInfoForJSON } from './io_utils';\nimport { IORouterRegistry } from './router_registry';\nimport { loadWeightsAsArrayBuffer } from './weights_loader';\nconst OCTET_STREAM_MIME_TYPE = 'application/octet-stream';\nconst JSON_TYPE = 'application/json';\nexport class HTTPRequest {\n constructor(path, loadOptions) {\n this.DEFAULT_METHOD = 'POST';\n if (loadOptions == null) {\n loadOptions = {};\n }\n this.weightPathPrefix = loadOptions.weightPathPrefix;\n this.onProgress = loadOptions.onProgress;\n this.weightUrlConverter = loadOptions.weightUrlConverter;\n if (loadOptions.fetchFunc != null) {\n assert(typeof loadOptions.fetchFunc === 'function', () => 'Must pass a function that matches the signature of ' +\n '`fetch` (see ' +\n 'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)');\n this.fetch = loadOptions.fetchFunc;\n }\n else {\n this.fetch = env().platform.fetch;\n }\n assert(path != null && path.length > 0, () => 'URL path for http must not be null, undefined or ' +\n 'empty.');\n if (Array.isArray(path)) {\n assert(path.length === 2, () => 'URL paths for http must have a length of 2, ' +\n `(actual length is ${path.length}).`);\n }\n this.path = path;\n if (loadOptions.requestInit != null &&\n loadOptions.requestInit.body != null) {\n throw new Error('requestInit is expected to have no pre-existing body, but has one.');\n }\n this.requestInit = loadOptions.requestInit || {};\n }\n async save(modelArtifacts) {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('BrowserHTTPRequest.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n const init = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit);\n init.body = new FormData();\n const weightsManifest = [{\n paths: ['./model.weights.bin'],\n weights: modelArtifacts.weightSpecs,\n }];\n const modelTopologyAndWeightManifest = {\n modelTopology: modelArtifacts.modelTopology,\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata,\n weightsManifest\n };\n init.body.append('model.json', new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }), 'model.json');\n if (modelArtifacts.weightData != null) {\n init.body.append('model.weights.bin', new Blob([modelArtifacts.weightData], { type: OCTET_STREAM_MIME_TYPE }), 'model.weights.bin');\n }\n const response = await this.fetch(this.path, init);\n if (response.ok) {\n return {\n modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts),\n responses: [response],\n };\n }\n else {\n throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ` +\n `${response.status}.`);\n }\n }\n /**\n * Load model artifacts via HTTP request(s).\n *\n * See the documentation to `tf.io.http` for details on the saved\n * artifacts.\n *\n * @returns The loaded model artifacts (if loading succeeds).\n */\n async load() {\n const modelConfigRequest = await this.fetch(this.path, this.requestInit);\n if (!modelConfigRequest.ok) {\n throw new Error(`Request to ${this.path} failed with status code ` +\n `${modelConfigRequest.status}. Please verify this URL points to ` +\n `the model JSON of the model to load.`);\n }\n let modelConfig;\n try {\n modelConfig = await modelConfigRequest.json();\n }\n catch (e) {\n let message = `Failed to parse model JSON of response from ${this.path}.`;\n // TODO(nsthorat): Remove this after some time when we're comfortable that\n // .pb files are mostly gone.\n if (this.path.endsWith('.pb')) {\n message += ' Your path contains a .pb file extension. ' +\n 'Support for .pb models have been removed in TensorFlow.js 1.0 ' +\n 'in favor of .json models. You can re-convert your Python ' +\n 'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' +\n 'or you can convert your.pb models with the \\'pb2json\\'' +\n 'NPM script in the tensorflow/tfjs-converter repository.';\n }\n else {\n message += ' Please make sure the server is serving valid ' +\n 'JSON for this request.';\n }\n throw new Error(message);\n }\n const modelTopology = modelConfig.modelTopology;\n const weightsManifest = modelConfig.weightsManifest;\n const generatedBy = modelConfig.generatedBy;\n const convertedBy = modelConfig.convertedBy;\n const format = modelConfig.format;\n const userDefinedMetadata = modelConfig.userDefinedMetadata;\n // We do not allow both modelTopology and weightsManifest to be missing.\n if (modelTopology == null && weightsManifest == null) {\n throw new Error(`The JSON from HTTP path ${this.path} contains neither model ` +\n `topology or manifest for weights.`);\n }\n let weightSpecs;\n let weightData;\n if (weightsManifest != null) {\n const results = await this.loadWeights(weightsManifest);\n [weightSpecs, weightData] = results;\n }\n const artifacts = {\n modelTopology,\n weightSpecs,\n weightData,\n userDefinedMetadata,\n generatedBy,\n convertedBy,\n format\n };\n const initializer = modelConfig.modelInitializer;\n if (initializer) {\n artifacts.modelInitializer = initializer;\n }\n return artifacts;\n }\n async loadWeights(weightsManifest) {\n const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;\n const [prefix, suffix] = parseUrl(weightPath);\n const pathPrefix = this.weightPathPrefix || prefix;\n const weightSpecs = [];\n for (const entry of weightsManifest) {\n weightSpecs.push(...entry.weights);\n }\n const fetchURLs = [];\n const urlPromises = [];\n for (const weightsGroup of weightsManifest) {\n for (const path of weightsGroup.paths) {\n if (this.weightUrlConverter != null) {\n urlPromises.push(this.weightUrlConverter(path));\n }\n else {\n fetchURLs.push(pathPrefix + path + suffix);\n }\n }\n }\n if (this.weightUrlConverter) {\n fetchURLs.push(...await Promise.all(urlPromises));\n }\n const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {\n requestInit: this.requestInit,\n fetchFunc: this.fetch,\n onProgress: this.onProgress\n });\n return [weightSpecs, concatenateArrayBuffers(buffers)];\n }\n}\nHTTPRequest.URL_SCHEME_REGEX = /^https?:\\/\\//;\n/**\n * Extract the prefix and suffix of the url, where the prefix is the path before\n * the last file, and suffix is the search params after the last file.\n * ```\n * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'\n * [prefix, suffix] = parseUrl(url)\n * // prefix = 'http://tfhub.dev/model/1/'\n * // suffix = '?tfjs-format=file'\n * ```\n * @param url the model url to be parsed.\n */\nexport function parseUrl(url) {\n const lastSlash = url.lastIndexOf('/');\n const lastSearchParam = url.lastIndexOf('?');\n const prefix = url.substring(0, lastSlash);\n const suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : '';\n return [prefix + '/', suffix];\n}\nexport function isHTTPScheme(url) {\n return url.match(HTTPRequest.URL_SCHEME_REGEX) != null;\n}\nexport const httpRouter = (url, loadOptions) => {\n if (typeof fetch === 'undefined' &&\n (loadOptions == null || loadOptions.fetchFunc == null)) {\n // `http` uses `fetch` or `node-fetch`, if one wants to use it in\n // an environment that is not the browser or node they have to setup a\n // global fetch polyfill.\n return null;\n }\n else {\n let isHTTP = true;\n if (Array.isArray(url)) {\n isHTTP = url.every(urlItem => isHTTPScheme(urlItem));\n }\n else {\n isHTTP = isHTTPScheme(url);\n }\n if (isHTTP) {\n return http(url, loadOptions);\n }\n }\n return null;\n};\nIORouterRegistry.registerSaveRouter(httpRouter);\nIORouterRegistry.registerLoadRouter(httpRouter);\n/**\n * Creates an IOHandler subtype that sends model artifacts to HTTP server.\n *\n * An HTTP request of the `multipart/form-data` mime type will be sent to the\n * `path` URL. The form data includes artifacts that represent the topology\n * and/or weights of the model. In the case of Keras-style `tf.Model`, two\n * blobs (files) exist in form-data:\n * - A JSON file consisting of `modelTopology` and `weightsManifest`.\n * - A binary weights file consisting of the concatenated weight values.\n * These files are in the same format as the one generated by\n * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).\n *\n * The following code snippet exemplifies the client-side code that uses this\n * function:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save(tf.io.http(\n * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));\n * console.log(saveResult);\n * ```\n *\n * If the default `POST` method is to be used, without any custom parameters\n * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:\n *\n * ```js\n * const saveResult = await model.save('http://model-server:5000/upload');\n * ```\n *\n * The following GitHub Gist\n * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864\n * implements a server based on [flask](https://github.com/pallets/flask) that\n * can receive the request. Upon receiving the model artifacts via the requst,\n * this particular server reconsistutes instances of [Keras\n * Models](https://keras.io/models/model/) in memory.\n *\n *\n * @param path A URL path to the model.\n * Can be an absolute HTTP path (e.g.,\n * 'http://localhost:8000/model-upload)') or a relative path (e.g.,\n * './model-upload').\n * @param requestInit Request configurations to be used when sending\n * HTTP request to server using `fetch`. It can contain fields such as\n * `method`, `credentials`, `headers`, `mode`, etc. See\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request\n * for more information. `requestInit` must not have a body, because the\n * body will be set by TensorFlow.js. File blobs representing the model\n * topology (filename: 'model.json') and the weights of the model (filename:\n * 'model.weights.bin') will be appended to the body. If `requestInit` has a\n * `body`, an Error will be thrown.\n * @param loadOptions Optional configuration for the loading. It includes the\n * following fields:\n * - weightPathPrefix Optional, this specifies the path prefix for weight\n * files, by default this is calculated from the path param.\n * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,\n * the `fetch` from node-fetch can be used here.\n * - onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns An instance of `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function http(path, loadOptions) {\n return new HTTPRequest(path, loadOptions);\n}\n/**\n * Deprecated. Use `tf.io.http`.\n * @param path\n * @param loadOptions\n */\nexport function browserHTTPRequest(path, loadOptions) {\n return http(path, loadOptions);\n}\n//# sourceMappingURL=http.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nclass PassthroughLoader {\n constructor(modelArtifacts) {\n this.modelArtifacts = modelArtifacts;\n }\n async load() {\n return this.modelArtifacts;\n }\n}\nclass PassthroughSaver {\n constructor(saveHandler) {\n this.saveHandler = saveHandler;\n }\n async save(modelArtifacts) {\n return this.saveHandler(modelArtifacts);\n }\n}\n/**\n * Creates an IOHandler that loads model artifacts from memory.\n *\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * const model = await tf.loadLayersModel(tf.io.fromMemory(\n * modelTopology, weightSpecs, weightData));\n * ```\n *\n * @param modelArtifacts a object containing model topology (i.e., parsed from\n * the JSON format).\n * @param weightSpecs An array of `WeightsManifestEntry` objects describing the\n * names, shapes, types, and quantization of the weight data.\n * @param weightData A single `ArrayBuffer` containing the weight data,\n * concatenated in the order described by the weightSpecs.\n * @param trainingConfig Model training configuration. Optional.\n *\n * @returns A passthrough `IOHandler` that simply loads the provided data.\n */\nexport function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) {\n if (arguments.length === 1) {\n const isModelArtifacts = modelArtifacts.modelTopology != null ||\n modelArtifacts.weightSpecs != null;\n if (isModelArtifacts) {\n return new PassthroughLoader(modelArtifacts);\n }\n else {\n // Legacy support: with only modelTopology.\n // TODO(cais): Remove this deprecated API.\n console.warn('Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({ modelTopology: modelArtifacts });\n }\n }\n else {\n // Legacy support.\n // TODO(cais): Remove this deprecated API.\n console.warn('Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({\n modelTopology: modelArtifacts,\n weightSpecs,\n weightData,\n trainingConfig\n });\n }\n}\n/**\n * Creates an IOHandler that passes saved model artifacts to a callback.\n *\n * ```js\n * function handleSave(artifacts) {\n * // ... do something with the artifacts ...\n * return {modelArtifactsInfo: {...}, ...};\n * }\n *\n * const saveResult = model.save(tf.io.withSaveHandler(handleSave));\n * ```\n *\n * @param saveHandler A function that accepts a `ModelArtifacts` and returns a\n * `SaveResult`.\n */\nexport function withSaveHandler(saveHandler) {\n return new PassthroughSaver(saveHandler);\n}\n//# sourceMappingURL=passthrough.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Importing local_storage and indexed_db is necessary for the routers to be\n// registered.\nimport './indexed_db';\nimport './local_storage';\nimport { browserFiles } from './browser_files';\nimport { browserHTTPRequest, http, isHTTPScheme } from './http';\nimport { concatenateArrayBuffers, decodeWeights, encodeWeights, getModelArtifactsInfoForJSON } from './io_utils';\nimport { fromMemory, withSaveHandler } from './passthrough';\nimport { getLoadHandlers, getSaveHandlers, registerLoadRouter, registerSaveRouter } from './router_registry';\nimport { loadWeights, weightsLoaderFactory } from './weights_loader';\nexport { copyModel, listModels, moveModel, removeModel } from './model_management';\nexport { browserFiles, browserHTTPRequest, concatenateArrayBuffers, decodeWeights, encodeWeights, fromMemory, getLoadHandlers, getModelArtifactsInfoForJSON, getSaveHandlers, http, isHTTPScheme, loadWeights, registerLoadRouter, registerSaveRouter, weightsLoaderFactory, withSaveHandler };\n//# sourceMappingURL=io.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Reshape } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Reshapes a `tf.Tensor` to a given shape.\n *\n * Given an input tensor, returns a new tensor with the same values as the\n * input tensor with shape `shape`.\n *\n * If one component of shape is the special value -1, the size of that\n * dimension is computed so that the total size remains constant. In\n * particular, a shape of [-1] flattens into 1-D. At most one component of\n * shape can be -1.\n *\n * If shape is 1-D or higher, then the operation returns a tensor with shape\n * shape filled with the values of tensor. In this case, the number of\n * elements implied by shape must be the same as the number of elements in\n * tensor.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n * x.reshape([2, 2]).print();\n * ```\n *\n * @param x The input tensor to be reshaped.\n * @param shape An array of integers defining the output tensor shape.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction reshape_(x, shape) {\n const $x = convertToTensor(x, 'x', 'reshape', null);\n const inputs = { x: $x };\n const attrs = { shape };\n const forward = (backend, save) => {\n shape = util.inferFromImplicitShape(shape, $x.size);\n util.assert($x.size === util.sizeFromShape(shape), () => 'new shape and old shape must have the same number of elements.');\n save([$x]);\n return backend.reshape($x, shape);\n };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, Reshape, attrs);\n}\nexport const reshape = op({ reshape_ });\n//# sourceMappingURL=reshape.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { BatchMatMul } from '../kernel_names';\nimport { makeTypesMatch } from '../tensor_util';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\nimport { reshape } from './reshape';\n/**\n * Computes the dot product of two matrices, A * B. These must be matrices.\n *\n * ```js\n * const a = tf.tensor2d([1, 2], [1, 2]);\n * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * a.matMul(b).print(); // or tf.matMul(a, b)\n * ```\n * @param a First matrix in dot product operation.\n * @param b Second matrix in dot product operation.\n * @param transposeA If true, `a` is transposed before multiplication.\n * @param transposeB If true, `b` is transposed before multiplication.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction matMul_(a, b, transposeA = false, transposeB = false) {\n let $a = convertToTensor(a, 'a', 'matMul');\n let $b = convertToTensor(b, 'b', 'matMul');\n [$a, $b] = makeTypesMatch($a, $b);\n const forward = (backend, save) => {\n save([$a, $b]);\n const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];\n const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];\n const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];\n const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];\n const outerDimsA = $a.shape.slice(0, -2);\n const outerDimsB = $b.shape.slice(0, -2);\n const batchDimA = util.sizeFromShape(outerDimsA);\n const batchDimB = util.sizeFromShape(outerDimsB);\n const batchDimsCompatible = batchDimA === batchDimB || batchDimA === 1 || batchDimB === 1;\n util.assert($a.rank >= 2 && $b.rank >= 2 && batchDimsCompatible, () => `Error in matMul: the input batch dimensions must either be the ` +\n `same or at least one input batch dimension must be 1. Got input ` +\n `batch dimensions of (${outerDimsA}) and (${outerDimsB}).`);\n util.assert(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (` +\n `${innerShapeB}) of Tensors with shapes ${$a.shape} and ` +\n `${$b.shape} and transposeA=${transposeA}` +\n ` and transposeB=${transposeB} must match.`);\n const outShapeOuterDims = batchDimA > batchDimB ? outerDimsA : outerDimsB;\n const outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]);\n const a3D = transposeA ?\n reshape($a, [batchDimA, innerShapeA, outerShapeA]) :\n reshape($a, [batchDimA, outerShapeA, innerShapeA]);\n const b3D = transposeB ?\n reshape($b, [batchDimB, outerShapeB, innerShapeB]) :\n reshape($b, [batchDimB, innerShapeB, outerShapeB]);\n const res3d = backend.batchMatMul(a3D, b3D, transposeA, transposeB);\n return reshape(res3d, outShape);\n };\n const inputs = { a: $a, b: $b };\n const attrs = { transposeA, transposeB };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, BatchMatMul, attrs);\n}\nexport const matMul = op({ matMul_ });\n//# sourceMappingURL=mat_mul.js.map", "/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { OneHot } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport { op } from './operation';\nimport { reshape } from './reshape';\n/**\n * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take\n * value `onValue` (defaults to 1), while all other locations take value\n * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank\n * `R+1` with the last axis of size `depth`.\n *\n * ```js\n * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();\n * ```\n *\n * @param indices `tf.Tensor` of indices with dtype `int32`.\n * @param depth The depth of the one hot dimension.\n * @param onValue A number used to fill in the output when the index matches\n * the location.\n * @param offValue A number used to fill in the output when the index does\n * not match the location.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction oneHot_(indices, depth, onValue = 1, offValue = 0) {\n if (depth < 2) {\n throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`);\n }\n const $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32');\n const outShape = [...$indices.shape, depth];\n const forward = (backend, save) => {\n save([$indices]);\n return reshape(backend.oneHot(reshape($indices, [$indices.size]), depth, onValue, offValue), outShape);\n };\n const inputs = { indices: $indices };\n const attrs = { depth, onValue, offValue };\n return ENGINE.runKernelFunc(forward, inputs, null /* grad */, OneHot, attrs);\n}\nexport const oneHot = op({ oneHot_ });\n//# sourceMappingURL=one_hot.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { Transpose } from '../kernel_names';\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { op } from './operation';\n/**\n * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.\n *\n * The returned `tf.Tensor`'s dimension `i` will correspond to the input\n * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,\n * where `n` is the rank of the input `tf.Tensor`. Hence by default, this\n * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.\n *\n * ```js\n * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);\n *\n * a.transpose().print(); // or tf.transpose(a)\n * ```\n *\n * @param x The tensor to transpose.\n * @param perm The permutation of the dimensions of a.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction transpose_(x, perm) {\n const $x = convertToTensor(x, 'x', 'transpose');\n if (perm == null) {\n perm = $x.shape.map((s, i) => i).reverse();\n }\n util.assert($x.rank === perm.length, () => `Error in transpose: rank of input ${$x.rank} ` +\n `must match length of perm ${perm}.`);\n perm.forEach(axis => {\n util.assert(axis >= 0 && axis < $x.rank, () => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` +\n ` but got ${perm}`);\n });\n if ($x.rank <= 1) {\n return $x.clone();\n }\n const inputs = { x: $x };\n const attrs = { perm };\n return ENGINE.runKernelFunc(backend => backend.transpose($x, perm), inputs, null /* gradient */, Transpose, attrs);\n}\nexport const transpose = op({ transpose_ });\n//# sourceMappingURL=transpose.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { convertToTensor } from '../tensor_util_env';\nimport * as util from '../util';\nimport { cast } from './cast';\nimport { matMul } from './mat_mul';\nimport { oneHot } from './one_hot';\nimport { op } from './operation';\nimport { transpose } from './transpose';\n/**\n * Computes the confusion matrix from true labels and predicted labels.\n *\n * ```js\n * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');\n * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');\n * const numClasses = 3;\n * const out = tf.math.confusionMatrix(labels, predictions, numClasses);\n * out.print();\n * // Expected output matrix:\n * // [[2, 0, 0],\n * // [0, 1, 1],\n * // [0, 0, 1]]\n * ```\n *\n * @param labels The target labels, assumed to be 0-based integers\n * for the classes. The shape is `[numExamples]`, where\n * `numExamples` is the number of examples included.\n * @param predictions The predicted classes, assumed to be\n * 0-based integers for the classes. Must have the same shape as `labels`.\n * @param numClasses Number of all classes, as an integer.\n * Its value must be larger than the largest element in `labels` and\n * `predictions`.\n * @returns The confusion matrix as a int32-type 2D tensor. The value at\n * row `r` and column `c` is the number of times examples of actual class\n * `r` were predicted as class `c`.\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nexport function confusionMatrix_(labels, predictions, numClasses) {\n const $labels = convertToTensor(labels, 'labels', 'confusionMatrix');\n const $predictions = convertToTensor(predictions, 'predictions', 'confusionMatrix');\n util.assert(numClasses == null || numClasses > 0 && Number.isInteger(numClasses), () => `If provided, numClasses must be a positive integer, ` +\n `but got ${numClasses}`);\n util.assert($labels.rank === 1, () => `Expected the rank of labels to be 1, but got ${$labels.rank}`);\n util.assert($predictions.rank === 1, () => `Expected the rank of predictions to be 1, ` +\n `but got ${$predictions.rank}`);\n util.assert($labels.shape[0] === $predictions.shape[0], () => `Mismatch in the number of examples: ` +\n `${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +\n `Labels and predictions should have the same number of elements.`);\n util.assert(numClasses > 0 && Number.isInteger(numClasses), () => `numClasses is required to be a positive integer, but got ` +\n `${numClasses}`);\n // TODO(cais): In the future, if oneHot supports tensors inputs for\n // `numClasses`, `confusionMatrix` can make `numClasses` optional.\n const oneHotLabels = oneHot(cast($labels, 'int32'), numClasses);\n const oneHotPredictions = oneHot(cast($predictions, 'int32'), numClasses);\n const oneHotLabelsT = transpose(oneHotLabels);\n const product = matMul(oneHotLabelsT, oneHotPredictions);\n return cast(product, 'int32');\n}\nexport const confusionMatrix = op({ confusionMatrix_ });\n//# sourceMappingURL=confusion_matrix.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * Exports under the tf.math.* namespace.\n */\nimport { confusionMatrix } from './ops/confusion_matrix';\nexport { confusionMatrix };\n//# sourceMappingURL=math.js.map", "/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { inferShape } from '../tensor_util_env';\nimport { assertNonNull } from '../util';\nimport { makeTensor } from './tensor_ops_util';\n/**\n * Creates rank-3 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor3d` as it makes the code more readable.\n *\n * ```js\n * // Pass a nested array.\n * tf.tensor3d([[[1], [2]], [[3], [4]]]).print();\n * ```\n * ```js\n * // Pass a flat array and specify a shape.\n * tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`.\n * @param shape The shape of the tensor. If not provided, it is inferred from\n * `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor3d(values, shape, dtype) {\n assertNonNull(values);\n if (shape != null && shape.length !== 3) {\n throw new Error('tensor3d() requires shape to have three numbers');\n }\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 3 && inferredShape.length !== 1) {\n throw new Error('tensor3d() requires values to be number[][][] or flat/TypedArray');\n }\n if (inferredShape.length === 1 && shape == null) {\n throw new Error('tensor3d() requires shape to be provided when `values` ' +\n 'are a flat array');\n }\n return makeTensor(values, shape, inferredShape, dtype);\n}\n//# sourceMappingURL=tensor3d.js.map", "/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { ENGINE } from '../engine';\nimport { FromPixels } from '../kernel_names';\nimport { getKernel } from '../kernel_registry';\nimport { Tensor } from '../tensor';\nimport { convertToTensor } from '../tensor_util_env';\nimport { cast } from './cast';\nimport { op } from './operation';\nimport { tensor3d } from './tensor3d';\nlet fromPixels2DContext;\n/**\n * Creates a `tf.Tensor` from an image.\n *\n * ```js\n * const image = new ImageData(1, 1);\n * image.data[0] = 100;\n * image.data[1] = 150;\n * image.data[2] = 200;\n * image.data[3] = 255;\n *\n * tf.browser.fromPixels(image).print();\n * ```\n *\n * @param pixels The input image to construct the tensor from. The\n * supported image types are all 4-channel. You can also pass in an image\n * object with following attributes:\n * `{data: Uint8Array; width: number; height: number}`\n * @param numChannels The number of channels of the output tensor. A\n * numChannels value less than 4 allows you to ignore channels. Defaults to\n * 3 (ignores alpha channel of input image).\n *\n * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}\n */\nfunction fromPixels_(pixels, numChannels = 3) {\n // Sanity checks.\n if (numChannels > 4) {\n throw new Error('Cannot construct Tensor with more than 4 channels from pixels.');\n }\n if (pixels == null) {\n throw new Error('pixels passed to tf.browser.fromPixels() can not be null');\n }\n let isPixelData = false;\n let isImageData = false;\n let isVideo = false;\n let isImage = false;\n let isCanvasLike = false;\n if (pixels.data instanceof Uint8Array) {\n isPixelData = true;\n }\n else if (typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {\n isImageData = true;\n }\n else if (typeof (HTMLVideoElement) !== 'undefined' &&\n pixels instanceof HTMLVideoElement) {\n isVideo = true;\n }\n else if (typeof (HTMLImageElement) !== 'undefined' &&\n pixels instanceof HTMLImageElement) {\n isImage = true;\n // tslint:disable-next-line: no-any\n }\n else if (pixels.getContext != null) {\n isCanvasLike = true;\n }\n else {\n throw new Error('pixels passed to tf.browser.fromPixels() must be either an ' +\n `HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +\n `in browser, or OffscreenCanvas, ImageData in webworker` +\n ` or {data: Uint32Array, width: number, height: number}, ` +\n `but was ${pixels.constructor.name}`);\n }\n if (isVideo) {\n const HAVE_CURRENT_DATA_READY_STATE = 2;\n if (isVideo &&\n pixels.readyState <\n HAVE_CURRENT_DATA_READY_STATE) {\n throw new Error('The video element has not loaded data yet. Please wait for ' +\n '`loadeddata` event on the