face-api/dist/face-api.min.js

100 lines
318 KiB
JavaScript
Raw Normal View History

var faceapi=(()=>{var Ts=Object.defineProperty,Nb=Object.prototype.hasOwnProperty,As=Object.assign,St=(e,t)=>()=>(t||(t={exports:{}},e(t.exports,t)),t.exports),ql=e=>Ts(e,"__esModule",{value:!0}),_e=(e,t)=>{ql(e);for(var n in t)Ts(e,n,{get:t[n],enumerable:!0})},Rb=(e,t)=>{if(ql(e),typeof t=="object"||typeof t=="function")for(let n in t)!Nb.call(e,n)&&n!=="default"&&Ts(e,n,{get:()=>t[n],enumerable:!0});return e},zl=e=>e&&e.__esModule?e:Rb(Ts({},"default",{value:e,enumerable:!0}),e);var Yl=St((Hl,uc)=>{(function(e,t,n){function r(s){var c=this,p=a();c.next=function(){var l=2091639*c.s0+c.c*23283064365386963e-26;return c.s0=c.s1,c.s1=c.s2,c.s2=l-(c.c=l|0)},c.c=1,c.s0=p(" "),c.s1=p(" "),c.s2=p(" "),c.s0-=p(s),c.s0<0&&(c.s0+=1),c.s1-=p(s),c.s1<0&&(c.s1+=1),c.s2-=p(s),c.s2<0&&(c.s2+=1),p=null}function o(s,c){return c.c=s.c,c.s0=s.s0,c.s1=s.s1,c.s2=s.s2,c}function i(s,c){var p=new r(s),l=c&&c.state,h=p.next;return h.int32=function(){return p.next()*4294967296|0},h.double=function(){return h()+(h()*2097152|0)*11102230246251565e-32},h.quick=h,l&&(typeof l=="object"&&o(l,p),h.state=function(){return o(p,{})}),h}function a(){var s=4022871197,c=function(p){p=p.toString();for(var l=0;l<p.length;l++){s+=p.charCodeAt(l);var h=.02519603282416938*s;s=h>>>0,h-=s,h*=s,s=h>>>0,h-=s,s+=h*4294967296}return(s>>>0)*23283064365386963e-26};return c}t&&t.exports?t.exports=i:n&&n.amd?n(function(){return i}):this.alea=i})(Hl,typeof uc=="object"&&uc,typeof define=="function"&&define)});var Kl=St((Vl,dc)=>{(function(e,t,n){function r(a){var s=this,c="";s.x=0,s.y=0,s.z=0,s.w=0,s.next=function(){var l=s.x^s.x<<11;return s.x=s.y,s.y=s.z,s.z=s.w,s.w^=s.w>>>19^l^l>>>8},a===(a|0)?s.x=a:c+=a;for(var p=0;p<c.length+64;p++)s.x^=c.charCodeAt(p)|0,s.next()}function o(a,s){return s.x=a.x,s.y=a.y,s.z=a.z,s.w=a.w,s}function i(a,s){var c=new r(a),p=s&&s.state,l=function(){return(c.next()>>>0)/4294967296};return l.double=function(){do var h=c.next()>>>11,d=(c.next()>>>0)/4294967296,g=(h+d)/(1<<21);while(g===0);return g},l.int32=c.next,l.quick=l,p&&(typeof p=="object"&&o(p,c),l.state=function(){return o(c,{})}),l}t&&t.exports?t.exports=i:n&&n.amd?n(function(){return i}):this.xor128=i})(Vl,typeof dc=="object"&&dc,typeof define=="function"&&define)});var Xl=St((Jl,mc)=>{(function(e,t,n){function r(a){var s=this,c="";s.next=function(){var l=s.x^s.x>>>2;return s.x=s.y,s.y=s.z,s.z=s.w,s.w=s.v,(s.d=s.d+362437|0)+(s.v=s.v^s.v<<4^(l^l<<1))|0},s.x=0,s.y=0,s.z=0,s.w=0,s.v=0,a===(a|0)?s.x=a:c+=a;for(var p=0;p<c.length+64;p++)s.x^=c.charCodeAt(p)|0,p==c.length&&(s.d=s.x<<10^s.x>>>4),s.next()}function o(a,s){return s.x=a.x,s.y=a.y,s.z=a.z,s.w=a.w,s.v=a.v,s.d=a.d,s}function i(a,s){var c=new r(a),p=s&&s.state,l=function(){return(c.next()>>>0)/4294967296};return l.double=function(){do var h=c.next()>>>11,d=(c.next()>>>0)/4294967296,g=(h+d)/(1<<21);while(g===0);return g},l.int32=c.next,l.quick=l,p&&(typeof p=="object"&&o(p,c),l.state=function(){return o(c,{})}),l}t&&t.exports?t.exports=i:n&&n.amd?n(function(){return i}):this.xorwow=i})(Jl,typeof mc=="object"&&mc,typeof define=="function"&&define)});var Ql=St((Zl,fc)=>{(function(e,t,n){function r(a){var s=this;s.next=function(){var p=s.x,l=s.i,h,d,g;return h=p[l],h^=h>>>7,d=h^h<<24,h=p[l+1&7],d^=h^h>>>10,h=p[l+3&7],d^=h^h>>>3,h=p[l+4&7],d^=h^h<<7,h=p[l+7&7],h=h^h<<13,d^=h^h<<9,p[l]=d,s.i=l+1&7,d};function c(p,l){var h,d,g=[];if(l===(l|0))d=g[0]=l;else for(l=""+l,h=0;h<l.length;++h)g[h&7]=g[h&7]<<15^l.charCodeAt(h)+g[h+1&7]<<13;for(;g.length<8;)g.push(0);for(h=0;h<8&&g[h]===0;++h);for(h==8?d=g[7]=-1:d=g[h],p.x=g,p.i=0,h=256;h>0;--h)p.next()}c(s,a)}function o(a,s){return s.x=a.x.slice(),s.i=a.i,s}function i(a,s){a==null&&(a=+new Date);var c=new r(a),p=s&&s.state,l=function(){return(c.next()>>>0)/4294967296};return l.double=function(){do var h=c.next()>>>11,d=(c.next()>>>0)/4294967296,g=(h+d)/(1<<21);while(g===0);return g},l.int32=c.next,l.quick=l,p&&(p.x&&o(p,c),l.state=function(){return o(c,{})}),l}t&&t.exports?t.exports=i:n&&n.amd?n(function(){return i}):this.xorshift7=i})(Zl,typeof fc=="object"&&fc,typeof define=="function"&&define)}
2020-08-26 00:24:48 +02:00
`)),c.join(`
`)}function hw(e,t,n,r){const o=ee(t),i=r[r.length-1],a=new Array(i).fill(0),s=t.length,c=n==="complex64"?ns(e):e;if(s>1)for(let p=0;p<o/i;p++){const l=p*i;for(let h=0;h<i;h++)a[h]=Math.max(a[h],ts(c[l+h],0,n).length)}return a}function ts(e,t,n){let r;return Array.isArray(e)?r=`${parseFloat(e[0].toFixed(Up))} + ${parseFloat(e[1].toFixed(Up))}j`:Tt(e)?r=`'${e}'`:n==="bool"?r=su(e):r=parseFloat(e.toFixed(Up)).toString(),On(r,t)}function su(e){return e===0?"false":"true"}function la(e,t,n,r,o,i=!0){const a=n==="complex64"?2:1,s=t[0],c=t.length;if(c===0){if(n==="complex64"){const w=ns(e);return[ts(w[0],0,n)]}return n==="bool"?[su(e[0])]:[e[0].toString()]}if(c===1){if(s>ou){const L=es*a;let S=Array.from(e.slice(0,L)),I=Array.from(e.slice((s-es)*a,s*a));return n==="complex64"&&(S=ns(S),I=ns(I)),["["+S.map((R,A)=>ts(R,o[A],n)).join(", ")+", ..., "+I.map((R,A)=>ts(R,o[s-es+A],n)).join(", ")+"]"]}const w=n==="complex64"?ns(e):Array.from(e);return["["+w.map((L,S)=>ts(L,o[S],n)).join(", ")+"]"]}const p=t.slice(1),l=r.slice(1),h=r[0]*a,d=[];if(s>ou){for(let w=0;w<es;w++){const L=w*h,S=L+h;d.push(...la(e.slice(L,S),p,n,l,o,!1))}d.push("...");for(let w=s-es;w<s;w++){const L=w*h,S=L+h;d.push(...la(e.slice(L,S),p,n,l,o,w===s-1))}}else for(let w=0;w<s;w++){const L=w*h,S=L+h;d.push(...la(e.slice(L,S),p,n,l,o,w===s-1))}const g=c===2?",":"";d[0]="["+d[0]+g;for(let w=1;w<d.length-1;w++)d[w]=" "+d[w]+g;let x=`,
2020-08-26 00:24:48 +02:00
`;for(let w=2;w<c;w++)x+=`
`;return d[d.length-1]=" "+d[d.length-1]+"]"+(i?"":x),d}function ns(e){const t=[];for(let n=0;n<e.length;n+=2)t.push([e[n],e[n+1]]);return t}class br{constructor(e,t,n){if(this.dtype=t,this.shape=e.slice(),this.size=ee(e),n!=null){const r=n.length;f(r===this.size,()=>`Length of values '${r}' does not match the size inferred by the shape '${this.size}'.`)}if(t==="complex64")throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).");this.values=n||Ep(t,this.size),this.strides=Ut(e)}set(e,...t){t.length===0&&(t=[0]),f(t.length===this.rank,()=>`The number of provided coordinates (${t.length}) must match the rank (${this.rank})`);const n=this.locToIndex(t);this.values[n]=e}get(...e){e.length===0&&(e=[0]);let t=0;for(const r of e){if(r<0||r>=this.shape[t]){const o=`Requested out of range element at ${e}. Buffer shape=${this.shape}`;throw new Error(o)}t++}let n=e[e.length-1];for(let r=0;r<e.length-1;++r)n+=this.strides[r]*e[r];return this.values[n]}locToIndex(e){if(this.rank===0)return 0;if(this.rank===1)return e[0];let t=e[e.length-1];for(let n=0;n<e.length-1;++n)t+=this.strides[n]*e[n];return t}indexToLoc(e){if(this.rank===0)return[];if(this.rank===1)return[e];const t=new Array(this.shape.length);for(let n=0;n<t.length-1;++n)t[n]=Math.floor(e/this.strides[n]),e-=t[n]*this.strides[n];return t[t.length-1]=e,t}get rank(){return this.shape.length}toTensor(){return Bt().makeTensor(this.values,this.shape,this.dtype)}}let Bt=null,wr=null,uw=null;function au(e){Bt=e}function cu(e){wr=e}function pu(e){uw=e}class te{constructor(e,t,n,r){this.kept=!1,this.isDisposedInternal=!1,this.shape=e.slice(),this.dtype=t||"float32",this.size=ee(e),this.strides=Ut(e),this.dataId=n,this.id=r,this.rankType=this.rank<5?this.rank.toString():"higher"}get rank(){return this.shape.length}async buffer(){const e=await this.data();return wr.buffer(this.shape,this.dtype,e)}bufferSync(){return wr.buffer(this.shape,this.dtype,this.dataSync())}async array(){const e=await this.data();return kn(this.shape,e)}arraySync(){return kn(this.shape,this.dataSync())}async data(){this.throwIfDisposed();const e=Bt().read(this.dataId);if(this.dtype==="string"){const t=await e;try{return t.map(n=>pa(n))}catch(n){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}}return e}dataSync(){this.throwIfDisposed();const e=Bt().readSync(this.dataId);if(this.dtype==="string")try{return e.map(t=>pa(t))}catch(t){throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes().")}return e}async bytes(){this.throwIfDisposed();const e=await Bt().read(this.dataId);return this.dtype==="string"?e:new Uint8Array(e.buffer)}dispose(){if(this.isDisposed)return;Bt().disposeTensor(this),this.isDisposedInternal=!0}get isDisposed(){return this.isDisposedInternal}throwIfDisposed(){if(this.isDisposed)throw new Error("Tensor is disposed.")}print(e=!1){return wr.print(this,e)}clone(){return this.throwIfDisposed(),wr.clone(this)}toString(e=!1){const t=this.dataSync();return iu(t,this.shape,this.dtype,e)}cast(e){return this.throwIfDisposed(),wr.cast(this,e)}variable(e=!0,t,n){return this.throwIfDisposed(),Bt().makeVariable(this,e,t,n)}}Object.defineProperty(te,Symbol.hasInstance,{value:e=>!!e&&e.data!=null&&e.dataSync!=null&&e.throwIfDisposed!=null});class $t extends te{constructor(e,t,n,r){super(e.shape,e.dtype,e.dataId,r);this.trainable=t,this.name=n}assign(e){if(e.dtype!==this.dtype)throw new Error(`dtype of the new value (${e.dtype}) and previous value (${this.dtype}) must match`);if(!Se(e.shape,this.shape))throw new Error(`shape of the new value (${e.shape}) and previous value (${this.shape}) must match`);Bt().disposeTensor(this),this.dataId=e.dataId,Bt().incRef(this,null)}dispose(){Bt().disposeVariable(this),this.isDisposedInternal=!0}}Object.defineProperty($t,Symbol.hasInstance,{value:e=>e instanceof te&&e.assign!=null&&e.assign instanceof Function});var Wp;(function(e){e.R0="R0",e
Manifest JSON has weights with names: ${s.join(", ")}.`)}const c=o.reduce((g,x,w)=>(x&&g.push(w),g),[]),p=[];c.forEach(g=>{t[g].paths.forEach(x=>{const w=n+(n.endsWith("/")?"":"/")+x;p.push(w)})});const l=await e(p),h={};let d=0;return c.forEach(g=>{const x=t[g].paths.length;let w=0;for(let A=0;A<x;A++)w+=l[d+A].byteLength;const L=new ArrayBuffer(w),S=new Uint8Array(L);let I=0;for(let A=0;A<x;A++){const E=new Uint8Array(l[d+A]);S.set(E,I),I+=E.byteLength}const R=i[g];R.forEach(A=>{const E=L.slice(A.groupOffset,A.groupOffset+A.sizeBytes),F=Yp(E,[A.manifestEntry]);for(const M in F)h[M]=F[M]}),d+=x}),h}}const qw="application/octet-stream",zw="application/json";class Xp{constructor(e,t){if(this.DEFAULT_METHOD="POST",t==null&&(t={}),this.weightPathPrefix=t.weightPathPrefix,this.onProgress=t.onProgress,this.weightUrlConverter=t.weightUrlConverter,t.fetchFunc!=null?(f(typeof t.fetchFunc=="function",()=>"Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"),this.fetch=t.fetchFunc):this.fetch=pe().platform.fetch,f(e!=null&&e.length>0,()=>"URL path for http must not be null, undefined or empty."),Array.isArray(e)&&f(e.length===2,()=>`URL paths for http must have a length of 2, (actual length is ${e.length}).`),this.path=e,t.requestInit!=null&&t.requestInit.body!=null)throw new Error("requestInit is expected to have no pre-existing body, but has one.");this.requestInit=t.requestInit||{}}async save(e){if(e.modelTopology instanceof ArrayBuffer)throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet.");const t=Object.assign({method:this.DEFAULT_METHOD},this.requestInit);t.body=new FormData;const n=[{paths:["./model.weights.bin"],weights:e.weightSpecs}],r={modelTopology:e.modelTopology,format:e.format,generatedBy:e.generatedBy,convertedBy:e.convertedBy,userDefinedMetadata:e.userDefinedMetadata,weightsManifest:n};t.body.append("model.json",new Blob([JSON.stringify(r)],{type:zw}),"model.json"),e.weightData!=null&&t.body.append("model.weights.bin",new Blob([e.weightData],{type:qw}),"model.weights.bin");const o=await this.fetch(this.path,t);if(o.ok)return{modelArtifactsInfo:as(e),responses:[o]};throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${o.status}.`)}async load(){const e=await this.fetch(this.path,this.requestInit);if(!e.ok)throw new Error(`Request to ${this.path} failed with status code ${e.status}. Please verify this URL points to the model JSON of the model to load.`);let t;try{t=await e.json()}catch(l){let h=`Failed to parse model JSON of response from ${this.path}.`;throw this.path.endsWith(".pb")?h+=" Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository.":h+=" Please make sure the server is serving valid JSON for this request.",new Error(h)}const n=t.modelTopology,r=t.weightsManifest,o=t.generatedBy,i=t.convertedBy,a=t.format,s=t.userDefinedMetadata;if(n==null&&r==null)throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`);let c,p;if(r!=null){const l=await this.loadWeights(r);[c,p]=l}return{modelTopology:n,weightSpecs:c,weightData:p,userDefinedMetadata:s,generatedBy:o,convertedBy:i,format:a}}async loadWeights(e){const t=Array.isArray(this.path)?this.path[1]:this.path,[n,r]=Hw(t),o=this.weightPathPrefix||n,i=[];for(const p of e)i.push(...p.weights);const a=[],s=[];for(const p of e)for(const l of p.paths)this.weightUrlConverter!=null?s.push(this.weightUrlConverter(l)):a.push(o+l+r);this.weightUrlConverter&&a.push(...await Promise.all(s));const c=await Jp(a,{requestInit:this.requestInit,fetchFunc:this.fetch,onProgress:this.onProgress});return[i,ss(c)]}}Xp.URL_SCHEME_REGEX=/^https?:\/\//;function Hw(e){const t=e.lastIndexOf("/"),n=e.lastIndexOf("?"),r=e.
Actual: ${o}.
Expected: ${i}.`);for(let a=0;a<i.length;++a){const s=o[a],c=i[a];if(!n(s,c))throw new Error(`Arrays differ: actual[${a}] = ${s}, expected[${a}] = ${c}.
Actual: ${o}.
Expected: ${i}.`)}}function lx(e,t){e().then(()=>t.fail(),()=>t())}function hx(e,t){const n=typeof t=="string"||typeof t=="number"||typeof t=="boolean"?[t]:t;return Tt(e)||Tt(e[0])||Tt(t)||Tt(t[0])?cl(e,n,(r,o)=>r==o):cl(e,t,(r,o)=>pl(r,o,0))}function ux(e,t,n){if(n==null&&(n=al()),!pl(e,t,n))throw new Error(`Numbers differ: actual === ${e}, expected === ${t}`)}function pl(e,t,n){return!isFinite(e)&&!isFinite(t)?!0:!(isNaN(e)||isNaN(t)||Math.abs(e-t)>n)}function dx(e,t,n){for(let r=0;r<e.length;r++)if(e[r]<t||e[r]>n)throw new Error(`Value out of range:${e[r]} low: ${t}, high: ${n}`)}function mx(e,t){expect(new Float32Array(e)).toEqual(new Float32Array(t))}const xc="2.4.0";function fx(){pe().set("PROD",!0)}function gx(){pe().set("DEBUG",!0)}function bx(){pe().set("DEPRECATION_WARNINGS_ENABLED",!1),console.warn("TensorFlow.js deprecation warnings have been disabled.")}function ye(e){pe().getBool("DEPRECATION_WARNINGS_ENABLED")&&console.warn(e+" You can disable deprecation warnings with tf.disableDeprecationWarnings().")}pu(ye);function wx(){b.disposeVariables()}function xx(){return b}function yx(){return b.memory()}function Lx(e){return b.profile(e)}function O(e,t){return b.tidy(e,t)}function Le(e){const t=rs(e);t.forEach(n=>n.dispose())}function ll(e){return b.keep(e)}function vx(e){return b.time(e)}function Sx(e){return b.setBackend(e)}function Ix(){return b.ready()}function wc(){return b.backendName}function Tx(e){b.removeBackend(e)}function Ax(e){return b.findBackend(e)}function Nx(e){return b.findBackendFactory(e)}function Rx(e,t,n=1){return b.registerBackend(e,t,n)}function _x(){return b.backend}function Cx(e,t){pe().setPlatform(e,t)}function Ex(e,t){let n=u(e,"a","add"),r=u(t,"b","add");[n,r]=Y(n,r);const o=(a,s)=>{const c=a.add(n,r);return s([n,r]),c},i={a:n,b:r};return b.runKernelFunc(o,i,null,Cn)}const N=m({add_:Ex});function Ox(e,t){let n=u(e,"a","floorDiv"),r=u(t,"b","floorDiv");[n,r]=Y(n,r);const o=(a,s)=>{const c=a.floorDiv(n,r);return s([n,r]),c},i={a:n,b:r};return b.runKernelFunc(o,i,null,zo)}const fa=m({floorDiv_:Ox});function kx(e,t){let n=u(e,"a","div"),r=u(t,"b","div");if([n,r]=Y(n,r),n.dtype==="int32"&&r.dtype==="int32")return fa(n,r);const o=(s,c)=>{const p=s.realDivide(n,r);return c([n,r]),p},i={a:n,b:r},a={};return b.runKernelFunc(o,i,null,Bo,a)}const D=m({div_:kx});function Dx(e,t){let n=u(e,"a","mul"),r=u(t,"b","mul");[n,r]=Y(n,r);const o=(a,s)=>{const c=a.multiply(n,r);return s([n,r]),c},i={a:n,b:r};return b.runKernelFunc(o,i,null,li)}const v=m({mul_:Dx});function Fx(e){const t=u(e,"x","abs"),n={x:t};return b.runKernelFunc((r,o)=>(o([t]),t.dtype==="complex64"?r.complexAbs(t):r.abs(t)),n,null,ho)}const Oe=m({abs_:Fx});function Mx(e){const t=u(e,"x","acos"),n={x:t};return b.runKernelFunc((r,o)=>{const i=r.acos(t);return o([t]),i},n,null,uo)}const ju=m({acos_:Mx});function Ux(e){const t=u(e,"x","acosh"),n={x:t};return b.runKernelFunc((r,o)=>{const i=r.acosh(t);return o([t]),i},n,null,mo)}const Gu=m({acosh_:Ux});function Wx(e){f(Array.isArray(e),()=>"The argument passed to tf.addN() must be a list of tensors"),f(e.length>=1,()=>`Must pass at least one tensor to tf.addN(), but got ${e.length}`);const t=e.map((i,a)=>u(i,`tensors${a}`,"addN")),n=t[0];t.forEach(i=>{if(i.dtype!==n.dtype)throw new Error("All tensors passed to tf.addN() must have the same dtype")}),t.forEach(i=>{if(!Se(i.shape,n.shape))throw new Error("All tensors passed to tf.addN() must have the same shape")});const r=(i,a)=>{const s=i.addN(t);return a(t),s},o=t;return b.runKernelFunc(r,o,null,fo)}const Pu=m({addN_:Wx});function hl(e,t){for(let n=0;n<e.length;++n)if(e[e.length-n-1]!==t-1-n)return!1;return!0}function qu(e,t,n){const r=e.length+t.length,o=[];let i=0,a=0;for(let s=0;s<r;s++)n.indexOf(s)===-1?o.push(e[i++]):o.push(t[a++]);return o}function ul(e,t){const n=[],r=e.length;for(let i=0;i<r;i++)t.indexOf(i)===-1&&n.push(e[i]);const o=t.map(i=>e[i]);return[n,o]}function we(e,t){const n=t.map(r=>1);return qu(e,n,t)}function Bx(e,t,n){f(hl(t,n),()=>`${e} supports only inner-most axes for now. Got axes ${t} and rank-${n} input.`)}func
with dtype ${p.dtype}. `)});const r=H(t,n[0].shape)[0],o=gl(n.map(p=>p.shape),r);if(ee(o)===0)return Xe([],o);if(n=n.filter(p=>p.size>0),n.length===1)return n[0];const i=n.map(p=>p.shape);fl(i,r);const a=(p,l)=>{const h=p.concat(n,r);return l(n),h},s=n,c={axis:t};return b.runKernelFunc(a,s,null,Co,c)}const Z=m({concat_:ny});function ry(e){const t=u(e,"x","sigmoid"),n={x:t};return b.runKernelFunc((r,o)=>{const i=r.sigmoid(t);return o([i]),i},n,null,Oi)}const ft=m({sigmoid_:ry});function oy(e,t,n){const r=u(e,"x","slice");if(r.rank===0)throw new Error("Slicing scalar is not possible");const[o,i]=ls(r,t,n);rl(r,o,i);const a=(p,l)=>(l([r]),p.slice(r,o,i)),s={x:r},c={begin:t,size:n};return b.runKernelFunc(a,s,null,Ri,c)}const P=m({slice_:oy});function iy(e){const t=u(e,"x","tanh"),n={x:t};return b.runKernelFunc((r,o)=>{const i=r.tanh(t);return o([i]),i},n,null,Gi)}const hs=m({tanh_:iy});function sy(e,t,n,r,o,i){const a=u(e,"forgetBias","basicLSTMCell"),s=u(t,"lstmKernel","basicLSTMCell"),c=u(n,"lstmBias","basicLSTMCell"),p=u(r,"data","basicLSTMCell"),l=u(o,"c","basicLSTMCell"),h=u(i,"h","basicLSTMCell"),d=Z([p,h],1),g=V(d,s),x=N(g,c),w=x.shape[0],L=x.shape[1]/4,S=[w,L],I=P(x,[0,0],S),R=P(x,[0,L],S),A=P(x,[0,L*2],S),E=P(x,[0,L*3],S),F=N(v(ft(I),hs(R)),v(l,ft(N(a,A)))),M=v(hs(F),ft(E));return[F,M]}const td=m({basicLSTMCell_:sy});function ay(e,t,n){const r=u(e,"x","batchToSpaceND"),o=t.reduce((c,p)=>c*p);f(r.rank>=1+t.length,()=>`input rank is ${r.rank} but should be > than blockShape.length ${t.length}`),f(n.length===t.length,()=>`crops.length is ${n.length} but should be equal to blockShape.length ${t.length}`),f(r.shape[0]%o===0,()=>`input tensor batch is ${r.shape[0]} but is not divisible by the product of the elements of blockShape ${t.join(" * ")} === ${o}`);const i=c=>c.batchToSpaceND(r,t,n),a={x:r},s={blockShape:t,crops:n};return b.runKernelFunc(i,a,null,Ao,s)}const Nr=m({batchToSpaceND_:ay});function nd(e){let t;return e.rank===0||e.rank===1?t=y(e,[1,1,1,e.size]):e.rank===2?t=y(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?t=y(e,[1,e.shape[0],e.shape[1],e.shape[2]]):t=e,t}function cy(e,t,n,r,o,i){i==null&&(i=.001);const a=u(e,"x","batchNorm"),s=u(t,"mean","batchNorm"),c=u(n,"variance","batchNorm");let p;o!=null&&(p=u(o,"scale","batchNorm"));let l;r!=null&&(l=u(r,"offset","batchNorm")),f(s.rank===c.rank,()=>"Batch normalization gradient requires mean and variance to have equal ranks."),f(l==null||s.rank===l.rank,()=>"Batch normalization gradient requires mean and offset to have equal ranks."),f(p==null||s.rank===p.rank,()=>"Batch normalization gradient requires mean and scale to have equal ranks.");const h=nd(a),d=(L,S)=>(S([h,s,c,p]),L.batchNorm(h,ba(s),ba(c),ba(l),ba(p),i)),g={x:h,scale:p,offset:l,mean:s,variance:c},x={varianceEpsilon:i},w=b.runKernelFunc(d,g,null,Ho,x);return y(w,a.shape)}function ba(e){return e==null?null:e.rank===0?y(e,[e.size]):e.rank===1?e:e.rank===2?y(e,[1,1,e.shape[0],e.shape[1]]):e.rank===3?y(e,[1,e.shape[0],e.shape[1],e.shape[2]]):e}const jt=m({batchNorm_:cy});function py(e,t,n,r,o,i){const a=u(e,"x","batchNorm"),s=u(t,"mean","batchNorm"),c=u(n,"variance","batchNorm");let p;o!=null&&(p=u(o,"scale","batchNorm"));let l;return r!=null&&(l=u(r,"offset","batchNorm")),f(a.rank===2,()=>`Error in batchNorm2D: x must be rank 2 but got rank ${a.rank}.`),f(s.rank===2||s.rank===1,()=>`Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${s.rank}.`),f(c.rank===2||c.rank===1,()=>`Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${c.rank}.`),p!=null&&f(p.rank===2||p.rank===1,()=>`Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${p.rank}.`),l!=null&&f(l.rank===2||l.rank===1,()=>`Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${l.rank}.`),jt(a,s,c,l,p,i)}const rd=m({batchNorm2d_:py});function ly(e,t,n,r,o,i){const a=u(e,"x","batchNorm"),s=u(t,"mean","batchNorm"),c=u(n,"variance","batchNorm");let p;o!=null&&(p=u(o,"scale","batchNorm"));let l;return r!=null&&(l=u(r,"offset","batchNorm")),f(a.rank===3,()=>`Error in batchNorm3D: x must
${o} and ${t} for depthToSpace with input shape
${r.shape}`),f(i*t>=0,()=>`Negative dimension size caused by overflow when multiplying
2020-08-26 00:24:48 +02:00
${i} and ${t} for depthToSpace with input shape
${r.shape}`),f(a%(t*t)===0,()=>`Dimension size must be evenly divisible by ${t*t} but is ${a} for depthToSpace with input shape ${r.shape}`);const s=l=>l.depthToSpace(r,t,n),c={x:r},p={blockSize:t,dataFormat:n};return b.runKernelFunc(s,c,null,jc,p)}const fd=m({depthToSpace_:_y});function Cy(e,t,n,r,o="NHWC",i=[1,1],a){const s=u(e,"x","depthwiseConv2d"),c=u(t,"filter","depthwiseConv2d");let p=s,l=!1;s.rank===3&&(l=!0,p=y(s,[1,s.shape[0],s.shape[1],s.shape[2]])),f(p.rank===4,()=>`Error in depthwiseConv2d: input must be rank 4, but got rank ${p.rank}.`),f(c.rank===4,()=>`Error in depthwiseConv2d: filter must be rank 4, but got rank ${c.rank}.`),f(p.shape[3]===c.shape[2],()=>`Error in depthwiseConv2d: number of input channels (${p.shape[3]}) must match the inChannels dimension in filter ${c.shape[2]}.`),a!=null&&f(X(r),()=>`Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${a} but got pad ${r}.`);const h=(w,L)=>{i==null&&(i=[1,1]),f(le(n,i),()=>`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${n} and dilations '${i}'`);const S=je(p.shape,c.shape,n,i,r,a,!0),I=w.depthwiseConv2D(p,c,S);return L([p,c]),I},d={x:p,filter:c},g={strides:n,pad:r,dataFormat:o,dilations:i,dimRoundingMode:a},x=b.runKernelFunc(h,d,null,Uo,g);return l?y(x,[x.shape[1],x.shape[2],x.shape[3]]):x}const wn=m({depthwiseConv2d_:Cy});function Ey(e){const t=u(e,"x","diag"),n=o=>{const i=y(t,[t.size]),a=o.diag(i),s=[...e.shape,...e.shape];return y(a,s)},r={x:t};return b.runKernelFunc(n,r,null,qc)}const gd=m({diag_:Ey});function Oy(e,t,n,r,o=[1,1],i="NHWC"){const a=u(e,"x","dilation2d"),s=u(t,"filter","dilation2d");f(a.rank===3||a.rank===4,()=>`Error in dilation2d: input must be rank 3 or 4, but got rank ${a.rank}.`),f(s.rank===3,()=>`Error in dilation2d: filter must be rank 3, but got rank ${s.rank}.`),f(i==="NHWC",()=>`Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${i}`);let c=a,p=!1;a.rank===3&&(c=y(a,[1,a.shape[0],a.shape[1],a.shape[2]]),p=!0);const l={x:c,filter:s},h={strides:n,pad:r,dilations:o},d=b.runKernel(Wo,l,h);return p?y(d,[d.shape[1],d.shape[2],d.shape[3]]):d}const bd=m({dilation2d_:Oy});function ky(e,t){const n=e.length,r=[];for(let o=0;o<n;o++){const i=n-1-o,a=e[i]||1,s=t[t.length-1-o]||1;s>1&&a===1&&r.unshift(i)}return r}function ae(e,t){const n=[];for(let r=0;r<t.length;r++){const o=e[e.length-r-1],i=t.length-r-1,a=t[i];(o==null||o===1&&a>1)&&n.unshift(i)}return n}function q(e,t){const n=[],r=Math.max(e.length,t.length);for(let o=0;o<r;o++){let i=e[e.length-o-1];i==null&&(i=1);let a=t[t.length-o-1];if(a==null&&(a=1),i===1)n.unshift(a);else if(a===1)n.unshift(i);else if(i!==a){const s=`Operands could not be broadcast together with shapes ${e} and ${t}.`;throw Error(s)}else n.unshift(i)}return n}function Dy(e,t){let n=u(e,"a","equal"),r=u(t,"b","equal");[n,r]=Y(n,r),q(n.shape,r.shape);const o=a=>a.equal(n,r),i={a:n,b:r};return b.runKernelFunc(o,i,null,Vc)}const xn=m({equal_:Dy});function Fy(e,t,n){const r=u(t,"a","where"),o=u(n,"b","where"),i=u(e,"condition","where","bool"),a=q(r.shape,o.shape),s=us(r,a),c=us(o,a);i.rank===1&&f(i.shape[0]===r.shape[0],()=>"The first dimension of `a` must match the size of `condition`."),i.rank!==1&&G(i.shape,c.shape,"Error in where: ");const p=(h,d)=>{const g=h.select(i,s,c);return d([i]),g},l={condition:i,t:s,e:c};return b.runKernelFunc(p,l,null,Ai)}const De=m({where_:Fy});function My(e){const t=u(e,"x","zerosLike"),n={x:t};return b.runKernelFunc(r=>r.zerosLike(t),n,null,Yi)}const B=m({zerosLike_:My});function Uy(e,t){let n=u(e,"a","div"),r=u(t,"b","div");[n,r]=Y(n,r);const o=D(n,r),i=B(o),a=xn(r,i);return De(a,i,o)}const wd=m({divNoNan_:Uy});function Wy(e,t){const n=u(e,"t1","dot"),r=u(t,"t2","dot");f((n.rank===1||n.rank===2)&&(r.rank===1||r.rank===2),()=>`Error in dot: inputs must all be rank 1 or 2, but got ranks ${n.rank} and ${r.rank}.`);const o=n.rank===1?n.size:n.shape[1],i=r.rank===1?r.size:r.shape[0];if(f(o===i,()=>`Error in dot: inner dimensions of inputs must match, but got ${o} and ${i}.`),n.rank===1&&r.ran
rank ${i.rank}.`),f(X(t),()=>`Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${t}.`);let a=i,s=!1;i.rank===3&&(s=!0,a=y(i,[1,i.shape[0],i.shape[1],i.shape[2]]));const c=(d,g)=>{const x=d.localResponseNormalization4D(a,t,n,r,o);return g([a,x]),x},p={x:a},l={depthRadius:t,bias:n,alpha:r,beta:o},h=b.runKernelFunc(c,p,null,ni,l);return s?y(h,[h.shape[1],h.shape[2],h.shape[3]]):h}const Rd=m({localResponseNormalization_:aL});function cL(e){const t=u(e,"x","log"),n={x:t};return b.runKernelFunc((r,o)=>{const i=r.log(t);return o([t]),i},n,null,Qo)}const Rt=m({log_:cL});function pL(e){const t=u(e,"x","log1p"),n={x:t};return b.runKernelFunc((r,o)=>{const i=r.log1p(t);return o([t]),i},n,null,ei)}const Ia=m({log1p_:pL});function lL(e){return f(Mt(e),()=>"The f passed in grad(f) must be a function"),(t,n)=>{const r=u(t,"x","tf.grad",null),o=n!=null?u(n,"dy","tf.grad"):null;return b.tidy(()=>{const{value:i,grads:a}=b.gradients(()=>e(r),[r],o);return o!=null&&G(i.shape,o.shape,"The shape of dy passed in grad(f)(x, dy) must match the shape returned by f(x)"),Ta(a),a[0]})}}function hL(e){return f(Mt(e),()=>"The f passed in grads(f) must be a function"),(t,n)=>{f(Array.isArray(t),()=>"The args passed in grads(f)(args) must be an array of `Tensor`s or `TensorLike`s");const r=Jt(t,"args","tf.grads",null),o=n!=null?u(n,"dy","tf.grads"):null;return b.tidy(()=>{const{value:i,grads:a}=b.gradients(()=>e(...r),r,o);return o!=null&&G(i.shape,o.shape,"The shape of dy passed in grads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),Ta(a),a})}}function uL(e){return f(Mt(e),()=>"The f passed in valueAndGrad(f) must be a function"),(t,n)=>{f(t instanceof te,()=>"The x passed in valueAndGrad(f)(x) must be a tensor"),f(n==null||n instanceof te,()=>"The dy passed in valueAndGrad(f)(x, dy) must be a tensor");const{grads:r,value:o}=b.gradients(()=>e(t),[t],n);return Ta(r),{grad:r[0],value:o}}}function dL(e){return f(Mt(e),()=>"The f passed in valueAndGrads(f) must be a function"),(t,n)=>{f(Array.isArray(t)&&t.every(o=>o instanceof te),()=>"The args passed in valueAndGrads(f)(args) must be array of tensors"),f(n==null||n instanceof te,()=>"The dy passed in valueAndGrads(f)(args, dy) must be a tensor");const r=b.gradients(()=>e(...t),t,n);return n!=null&&G(r.value.shape,n.shape,"The shape of dy passed in valueAndGrads(f)([x1,...], dy) must match the shape returned by f([x1,...])"),Ta(r.grads),r}}function wl(e,t){f(Mt(e),()=>"The f passed in variableGrads(f) must be a function"),f(t==null||Array.isArray(t)&&t.every(p=>p instanceof $t),()=>"The varList passed in variableGrads(f, varList) must be an array of variables");const n=t!=null;if(!n){t=[];for(const p in b.registeredVariables)t.push(b.registeredVariables[p])}const r=n?t.filter(p=>!p.trainable):null,o=t.length;t=t.filter(p=>p.trainable),f(t.length>0,()=>`variableGrads() expects at least one of the input variables to be trainable, but none of the ${o} variables is trainable.`);const i=!0,{value:a,grads:s}=b.gradients(e,t,null,i);f(s.some(p=>p!=null),()=>"Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize()."),f(a.rank===0,()=>`The f passed in variableGrads(f) must return a scalar, but it returned a rank-${a.rank} tensor`);const c={};return t.forEach((p,l)=>{s[l]!=null&&(c[p.name]=s[l])}),r!=null&&r.forEach(p=>c[p.name]=null),{value:a,grads:c}}function ze(e){return b.customGrad(e)}function Ta(e){const t=e.filter(n=>n==null).length;if(t>0)throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that
the f you passed encloses all operations that lead from x to y.`)}function mL(e){const t=u(e,"x","neg"),n={x:t};return b.runKernelFunc(r=>r.neg(t),n,null,hi)}const re=m({neg_:mL});function fL(e){const t=u(e,"x","softplus"),n={x:t};return b.runKernelFunc((r,o)=>{const i=r.softplus(t);return o([t]),i},n,null,ki)}const Aa=m({softplus_:fL});function gL(e){const t=u(e,"x","logSigmoid"),n=ze(r=>{const o=re(Aa(re(r))),i=a=>{const s=v(a,ft(re(r)));return s};return{value:o,gradFunc:i}});return n(t)}const _d=m({logSigmoid_:gL});function bL(e,t=null,n=!1){const r=u(e,"x","max"),o=(s,c)=>{const p=H(t,r.shape);let l=p;const h=fe(l,r.rank);let d=r;h!=null&&(d=K(r,h),l=ke(l.length,d.rank));const g=s.max(d,l);h!=null&&d.dispose();let x=g;if(n){const w=we(x.shape,H(t,r.shape));x=y(x,w),g.dispose()}return c([r,x]),x},i={x:r},a={reductionIndices:t,keepDims:n};return b.runKernelFunc(o,i,null,ri,a)}const qt=m({max_:bL});function wL(e,t){let n=u(e,"a","sub"),r=u(t,"b","sub");[n,r]=Y(n,r);const o=(a,s)=>{const c=a.subtract(n,r);return s([n,r]),c},i={a:n,b:r};return b.runKernelFunc(o,i,null,$i)}const k=m({sub_:wL});function xL(e,t=null,n=!1){let r=u(e,"x","sum");r.dtype==="bool"&&(r=_(r,"int32"));const o=(s,c)=>{c([r]);const p=H(t,r.shape),l=fe(p,r.rank);let h=p,d=r;l!=null&&(d=K(r,l),h=ke(h.length,r.rank));let g=s.sum(d,h);if(n){const x=we(g.shape,p);g=y(g,x)}return g},i={x:r},a={axis:t,keepDims:n};return b.runKernelFunc(o,i,null,Fi,a)}const W=m({sum_:xL});function yL(e,t=-1){const n=u(e,"logits","logSoftmax");if(t===-1&&(t=n.rank-1),t!==n.rank-1)throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${n.rank} and axis was ${t}`);const r=(a,s)=>{const c=!0,p=qt(e,t,!0),l=k(e,p),h=k(_(l,"float32"),Rt(W(Te(l),t,c)));return s([h]),h},o={logits:n},i={axis:t};return b.runKernelFunc(r,o,null,ti,i)}const Cd=m({logSoftmax_:yL});function LL(e,t=null,n=!1){const r=u(e,"x","logSumExp"),o=H(t,r.shape),i=qt(r,o,!0),a=k(r,i),s=Te(a),c=W(s,o),p=Rt(c),l=N(y(i,p.shape),p);if(n){const h=we(l.shape,o);return y(l,h)}return l}const Na=m({logSumExp_:LL});function vL(e,t){const n=u(e,"a","logicalAnd","bool"),r=u(t,"b","logicalAnd","bool");q(n.shape,r.shape);const o={a:n,b:r};return b.runKernelFunc(i=>i.logicalAnd(n,r),o,null,ip)}const _t=m({logicalAnd_:vL});function SL(e){const t=u(e,"x","logicalNot","bool"),n={x:t};return b.runKernelFunc(r=>r.logicalNot(t),n,null,sp)}const Dr=m({logicalNot_:SL});function IL(e,t){const n=u(e,"a","logicalOr","bool"),r=u(t,"b","logicalOr","bool");q(n.shape,r.shape);const o={a:n,b:r};return b.runKernelFunc(i=>i.logicalOr(n,r),o,null,ap)}const Ra=m({logicalOr_:IL});function TL(e,t){const n=u(e,"a","logicalXor","bool"),r=u(t,"b","logicalXor","bool");return q(n.shape,r.shape),_t(Ra(e,t),Dr(_t(e,t)))}const Ed=m({logicalXor_:TL});function AL(e,t,n,r,o){const i=u(e,"x","maxPool"),a=1;let s=i,c=!1;i.rank===3&&(c=!0,s=y(i,[1,i.shape[0],i.shape[1],i.shape[2]])),f(s.rank===4,()=>`Error in maxPool: input must be rank 4 but got rank ${s.rank}.`),f(le(n,a),()=>`Error in maxPool: Either strides or dilations must be 1. Got strides ${n} and dilations '${a}'`),o!=null&&f(X(r),()=>`Error in maxPool: pad must be an integer when using, dimRoundingMode ${o} but got pad ${r}.`);const p=(g,x)=>{const w=Nt(s.shape,t,n,1,r,o);let L;return w.filterWidth===1&&w.filterHeight===1&&Se(w.inShape,w.outShape)?L=s.clone():L=g.maxPool(s,w),x([s,L]),L},l={x:s},h={filterSize:t,strides:n,pad:r,dimRoundingMode:o},d=b.runKernelFunc(p,l,null,ii,h);return c?y(d,[d.shape[1],d.shape[2],d.shape[3]]):d}const Re=m({maxPool_:AL});function NL(e,t=[1,1,1],n,r,o,i="NDHWC",a){a==null?a=[1,1,1]:ye("dilations is deprecated, this field will be gone in v3.0.0.");const s=u(e,"x","maxPool3d");let c=s,p=!1;s.rank===4&&(p=!0,c=y(s,[1,s.shape[0],s.shape[1],s.shape[2],s.shape[3]])),f(c.rank===5,()=>`Error in maxPool3d: x must be rank 5 but got rank ${c.rank}.`),f(i==="NDHWC",()=>`Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${i}`),f(le(n,a),()=>`Error in maxPool3d: Either strides or dilations must be 1. Got strides ${n} and d
2020-08-18 14:04:15 +02:00
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/** @license See the LICENSE file. */
//# sourceMappingURL=face-api.min.js.map