var faceapi = (() => { var __defineProperty = Object.defineProperty; var __hasOwnProperty = Object.prototype.hasOwnProperty; var __commonJS = (callback, module) => () => { if (!module) { module = {exports: {}}; callback(module.exports, module); } return module.exports; }; var __markAsModule = (target) => { return __defineProperty(target, "__esModule", {value: true}); }; var __export = (target, all3) => { __markAsModule(target); for (var name in all3) __defineProperty(target, name, {get: all3[name], enumerable: true}); }; var __exportStar = (target, module) => { __markAsModule(target); if (typeof module === "object" || typeof module === "function") { for (let key in module) if (!__hasOwnProperty.call(target, key) && key !== "default") __defineProperty(target, key, {get: () => module[key], enumerable: true}); } return target; }; var __toModule = (module) => { if (module && module.__esModule) return module; return __exportStar(__defineProperty({}, "default", {value: module, enumerable: true}), module); }; // node_modules/tslib/tslib.es6.js var require_tslib_es6 = __commonJS((exports) => { __export(exports, { __assign: () => __assign, __asyncDelegator: () => __asyncDelegator, __asyncGenerator: () => __asyncGenerator, __asyncValues: () => __asyncValues, __await: () => __await, __awaiter: () => __awaiter, __classPrivateFieldGet: () => __classPrivateFieldGet, __classPrivateFieldSet: () => __classPrivateFieldSet, __createBinding: () => __createBinding, __decorate: () => __decorate, __exportStar: () => __exportStar2, __extends: () => __extends, __generator: () => __generator, __importDefault: () => __importDefault, __importStar: () => __importStar, __makeTemplateObject: () => __makeTemplateObject, __metadata: () => __metadata, __param: () => __param, __read: () => __read, __rest: () => __rest, __spread: () => __spread, __spreadArrays: () => __spreadArrays, __values: () => __values }); /*! ***************************************************************************** Copyright (c) Microsoft Corporation. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ***************************************************************************** */ var extendStatics = function(d, b) { extendStatics = Object.setPrototypeOf || {__proto__: []} instanceof Array && function(d2, b2) { d2.__proto__ = b2; } || function(d2, b2) { for (var p in b2) if (Object.prototype.hasOwnProperty.call(b2, p)) d2[p] = b2[p]; }; return extendStatics(d, b); }; function __extends(d, b) { extendStatics(d, b); function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); } var __assign = function() { __assign = Object.assign || function __assign2(t) { for (var s, i = 1, n = arguments.length; i < n; i++) { s = arguments[i]; for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; } return t; }; return __assign.apply(this, arguments); }; function __rest(s, e) { var t = {}; for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0) t[p] = s[p]; if (s != null && typeof Object.getOwnPropertySymbols === "function") for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) { if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i])) t[p[i]] = s[p[i]]; } return t; } function __decorate(decorators, target, key, desc) { var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; return c > 3 && r && Object.defineProperty(target, key, r), r; } function __param(paramIndex, decorator) { return function(target, key) { decorator(target, key, paramIndex); }; } function __metadata(metadataKey, metadataValue) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(metadataKey, metadataValue); } function __awaiter(thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function(resolve) { resolve(value); }); } return new (P || (P = Promise))(function(resolve, reject) { function fulfilled(value) { try { step7(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step7(generator["throw"](value)); } catch (e) { reject(e); } } function step7(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step7((generator = generator.apply(thisArg, _arguments || [])).next()); }); } function __generator(thisArg, body) { var _ = {label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: []}, f, y, t, g; return g = {next: verb(0), throw: verb(1), return: verb(2)}, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; function verb(n) { return function(v) { return step7([n, v]); }; } function step7(op2) { if (f) throw new TypeError("Generator is already executing."); while (_) try { if (f = 1, y && (t = op2[0] & 2 ? y["return"] : op2[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op2[1])).done) return t; if (y = 0, t) op2 = [op2[0] & 2, t.value]; switch (op2[0]) { case 0: case 1: t = op2; break; case 4: _.label++; return {value: op2[1], done: false}; case 5: _.label++; y = op2[1]; op2 = [0]; continue; case 7: op2 = _.ops.pop(); _.trys.pop(); continue; default: if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op2[0] === 6 || op2[0] === 2)) { _ = 0; continue; } if (op2[0] === 3 && (!t || op2[1] > t[0] && op2[1] < t[3])) { _.label = op2[1]; break; } if (op2[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op2; break; } if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op2); break; } if (t[2]) _.ops.pop(); _.trys.pop(); continue; } op2 = body.call(thisArg, _); } catch (e) { op2 = [6, e]; y = 0; } finally { f = t = 0; } if (op2[0] & 5) throw op2[1]; return {value: op2[0] ? op2[1] : void 0, done: true}; } } var __createBinding = Object.create ? function(o, m, k, k2) { if (k2 === void 0) k2 = k; Object.defineProperty(o, k2, {enumerable: true, get: function() { return m[k]; }}); } : function(o, m, k, k2) { if (k2 === void 0) k2 = k; o[k2] = m[k]; }; function __exportStar2(m, o) { for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p); } function __values(o) { var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0; if (m) return m.call(o); if (o && typeof o.length === "number") return { next: function() { if (o && i >= o.length) o = void 0; return {value: o && o[i++], done: !o}; } }; throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined."); } function __read(o, n) { var m = typeof Symbol === "function" && o[Symbol.iterator]; if (!m) return o; var i = m.call(o), r, ar = [], e; try { while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); } catch (error) { e = {error}; } finally { try { if (r && !r.done && (m = i["return"])) m.call(i); } finally { if (e) throw e.error; } } return ar; } function __spread() { for (var ar = [], i = 0; i < arguments.length; i++) ar = ar.concat(__read(arguments[i])); return ar; } function __spreadArrays() { for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length; for (var r = Array(s), k = 0, i = 0; i < il; i++) for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++) r[k] = a[j]; return r; } function __await(v) { return this instanceof __await ? (this.v = v, this) : new __await(v); } function __asyncGenerator(thisArg, _arguments, generator) { if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined."); var g = generator.apply(thisArg, _arguments || []), i, q = []; return i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function() { return this; }, i; function verb(n) { if (g[n]) i[n] = function(v) { return new Promise(function(a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; } function resume(n, v) { try { step7(g[n](v)); } catch (e) { settle(q[0][3], e); } } function step7(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); } function fulfill(value) { resume("next", value); } function reject(value) { resume("throw", value); } function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); } } function __asyncDelegator(o) { var i, p; return i = {}, verb("next"), verb("throw", function(e) { throw e; }), verb("return"), i[Symbol.iterator] = function() { return this; }, i; function verb(n, f) { i[n] = o[n] ? function(v) { return (p = !p) ? {value: __await(o[n](v)), done: n === "return"} : f ? f(v) : v; } : f; } } function __asyncValues(o) { if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined."); var m = o[Symbol.asyncIterator], i; return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function() { return this; }, i); function verb(n) { i[n] = o[n] && function(v) { return new Promise(function(resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; } function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v2) { resolve({value: v2, done: d}); }, reject); } } function __makeTemplateObject(cooked, raw) { if (Object.defineProperty) { Object.defineProperty(cooked, "raw", {value: raw}); } else { cooked.raw = raw; } return cooked; } var __setModuleDefault = Object.create ? function(o, v) { Object.defineProperty(o, "default", {enumerable: true, value: v}); } : function(o, v) { o["default"] = v; }; function __importStar(mod4) { if (mod4 && mod4.__esModule) return mod4; var result = {}; if (mod4 != null) { for (var k in mod4) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod4, k)) __createBinding(result, mod4, k); } __setModuleDefault(result, mod4); return result; } function __importDefault(mod4) { return mod4 && mod4.__esModule ? mod4 : {default: mod4}; } function __classPrivateFieldGet(receiver, privateMap) { if (!privateMap.has(receiver)) { throw new TypeError("attempted to get private field on non-instance"); } return privateMap.get(receiver); } function __classPrivateFieldSet(receiver, privateMap, value) { if (!privateMap.has(receiver)) { throw new TypeError("attempted to set private field on non-instance"); } privateMap.set(receiver, value); return value; } }); // node_modules/seedrandom/lib/alea.js var require_alea = __commonJS((exports, module) => { (function(global2, module2, define2) { function Alea(seed) { var me = this, mash = Mash(); me.next = function() { var t = 2091639 * me.s0 + me.c * 23283064365386963e-26; me.s0 = me.s1; me.s1 = me.s2; return me.s2 = t - (me.c = t | 0); }; me.c = 1; me.s0 = mash(" "); me.s1 = mash(" "); me.s2 = mash(" "); me.s0 -= mash(seed); if (me.s0 < 0) { me.s0 += 1; } me.s1 -= mash(seed); if (me.s1 < 0) { me.s1 += 1; } me.s2 -= mash(seed); if (me.s2 < 0) { me.s2 += 1; } mash = null; } function copy(f, t) { t.c = f.c; t.s0 = f.s0; t.s1 = f.s1; t.s2 = f.s2; return t; } function impl(seed, opts) { var xg = new Alea(seed), state = opts && opts.state, prng = xg.next; prng.int32 = function() { return xg.next() * 4294967296 | 0; }; prng.double = function() { return prng() + (prng() * 2097152 | 0) * 11102230246251565e-32; }; prng.quick = prng; if (state) { if (typeof state == "object") copy(state, xg); prng.state = function() { return copy(xg, {}); }; } return prng; } function Mash() { var n = 4022871197; var mash = function(data) { data = data.toString(); for (var i = 0; i < data.length; i++) { n += data.charCodeAt(i); var h = 0.02519603282416938 * n; n = h >>> 0; h -= n; h *= n; n = h >>> 0; h -= n; n += h * 4294967296; } return (n >>> 0) * 23283064365386963e-26; }; return mash; } if (module2 && module2.exports) { module2.exports = impl; } else if (define2 && define2.amd) { define2(function() { return impl; }); } else { this.alea = impl; } })(exports, typeof module == "object" && module, typeof define == "function" && define); }); // node_modules/seedrandom/lib/xor128.js var require_xor128 = __commonJS((exports, module) => { (function(global2, module2, define2) { function XorGen(seed) { var me = this, strseed = ""; me.x = 0; me.y = 0; me.z = 0; me.w = 0; me.next = function() { var t = me.x ^ me.x << 11; me.x = me.y; me.y = me.z; me.z = me.w; return me.w ^= me.w >>> 19 ^ t ^ t >>> 8; }; if (seed === (seed | 0)) { me.x = seed; } else { strseed += seed; } for (var k = 0; k < strseed.length + 64; k++) { me.x ^= strseed.charCodeAt(k) | 0; me.next(); } } function copy(f, t) { t.x = f.x; t.y = f.y; t.z = f.z; t.w = f.w; return t; } function impl(seed, opts) { var xg = new XorGen(seed), state = opts && opts.state, prng = function() { return (xg.next() >>> 0) / 4294967296; }; prng.double = function() { do { var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 4294967296, result = (top + bot) / (1 << 21); } while (result === 0); return result; }; prng.int32 = xg.next; prng.quick = prng; if (state) { if (typeof state == "object") copy(state, xg); prng.state = function() { return copy(xg, {}); }; } return prng; } if (module2 && module2.exports) { module2.exports = impl; } else if (define2 && define2.amd) { define2(function() { return impl; }); } else { this.xor128 = impl; } })(exports, typeof module == "object" && module, typeof define == "function" && define); }); // node_modules/seedrandom/lib/xorwow.js var require_xorwow = __commonJS((exports, module) => { (function(global2, module2, define2) { function XorGen(seed) { var me = this, strseed = ""; me.next = function() { var t = me.x ^ me.x >>> 2; me.x = me.y; me.y = me.z; me.z = me.w; me.w = me.v; return (me.d = me.d + 362437 | 0) + (me.v = me.v ^ me.v << 4 ^ (t ^ t << 1)) | 0; }; me.x = 0; me.y = 0; me.z = 0; me.w = 0; me.v = 0; if (seed === (seed | 0)) { me.x = seed; } else { strseed += seed; } for (var k = 0; k < strseed.length + 64; k++) { me.x ^= strseed.charCodeAt(k) | 0; if (k == strseed.length) { me.d = me.x << 10 ^ me.x >>> 4; } me.next(); } } function copy(f, t) { t.x = f.x; t.y = f.y; t.z = f.z; t.w = f.w; t.v = f.v; t.d = f.d; return t; } function impl(seed, opts) { var xg = new XorGen(seed), state = opts && opts.state, prng = function() { return (xg.next() >>> 0) / 4294967296; }; prng.double = function() { do { var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 4294967296, result = (top + bot) / (1 << 21); } while (result === 0); return result; }; prng.int32 = xg.next; prng.quick = prng; if (state) { if (typeof state == "object") copy(state, xg); prng.state = function() { return copy(xg, {}); }; } return prng; } if (module2 && module2.exports) { module2.exports = impl; } else if (define2 && define2.amd) { define2(function() { return impl; }); } else { this.xorwow = impl; } })(exports, typeof module == "object" && module, typeof define == "function" && define); }); // node_modules/seedrandom/lib/xorshift7.js var require_xorshift7 = __commonJS((exports, module) => { (function(global2, module2, define2) { function XorGen(seed) { var me = this; me.next = function() { var X = me.x, i = me.i, t, v, w; t = X[i]; t ^= t >>> 7; v = t ^ t << 24; t = X[i + 1 & 7]; v ^= t ^ t >>> 10; t = X[i + 3 & 7]; v ^= t ^ t >>> 3; t = X[i + 4 & 7]; v ^= t ^ t << 7; t = X[i + 7 & 7]; t = t ^ t << 13; v ^= t ^ t << 9; X[i] = v; me.i = i + 1 & 7; return v; }; function init(me2, seed2) { var j, w, X = []; if (seed2 === (seed2 | 0)) { w = X[0] = seed2; } else { seed2 = "" + seed2; for (j = 0; j < seed2.length; ++j) { X[j & 7] = X[j & 7] << 15 ^ seed2.charCodeAt(j) + X[j + 1 & 7] << 13; } } while (X.length < 8) X.push(0); for (j = 0; j < 8 && X[j] === 0; ++j) ; if (j == 8) w = X[7] = -1; else w = X[j]; me2.x = X; me2.i = 0; for (j = 256; j > 0; --j) { me2.next(); } } init(me, seed); } function copy(f, t) { t.x = f.x.slice(); t.i = f.i; return t; } function impl(seed, opts) { if (seed == null) seed = +new Date(); var xg = new XorGen(seed), state = opts && opts.state, prng = function() { return (xg.next() >>> 0) / 4294967296; }; prng.double = function() { do { var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 4294967296, result = (top + bot) / (1 << 21); } while (result === 0); return result; }; prng.int32 = xg.next; prng.quick = prng; if (state) { if (state.x) copy(state, xg); prng.state = function() { return copy(xg, {}); }; } return prng; } if (module2 && module2.exports) { module2.exports = impl; } else if (define2 && define2.amd) { define2(function() { return impl; }); } else { this.xorshift7 = impl; } })(exports, typeof module == "object" && module, typeof define == "function" && define); }); // node_modules/seedrandom/lib/xor4096.js var require_xor4096 = __commonJS((exports, module) => { (function(global2, module2, define2) { function XorGen(seed) { var me = this; me.next = function() { var w = me.w, X = me.X, i = me.i, t, v; me.w = w = w + 1640531527 | 0; v = X[i + 34 & 127]; t = X[i = i + 1 & 127]; v ^= v << 13; t ^= t << 17; v ^= v >>> 15; t ^= t >>> 12; v = X[i] = v ^ t; me.i = i; return v + (w ^ w >>> 16) | 0; }; function init(me2, seed2) { var t, v, i, j, w, X = [], limit = 128; if (seed2 === (seed2 | 0)) { v = seed2; seed2 = null; } else { seed2 = seed2 + "\0"; v = 0; limit = Math.max(limit, seed2.length); } for (i = 0, j = -32; j < limit; ++j) { if (seed2) v ^= seed2.charCodeAt((j + 32) % seed2.length); if (j === 0) w = v; v ^= v << 10; v ^= v >>> 15; v ^= v << 4; v ^= v >>> 13; if (j >= 0) { w = w + 1640531527 | 0; t = X[j & 127] ^= v + w; i = t == 0 ? i + 1 : 0; } } if (i >= 128) { X[(seed2 && seed2.length || 0) & 127] = -1; } i = 127; for (j = 4 * 128; j > 0; --j) { v = X[i + 34 & 127]; t = X[i = i + 1 & 127]; v ^= v << 13; t ^= t << 17; v ^= v >>> 15; t ^= t >>> 12; X[i] = v ^ t; } me2.w = w; me2.X = X; me2.i = i; } init(me, seed); } function copy(f, t) { t.i = f.i; t.w = f.w; t.X = f.X.slice(); return t; } ; function impl(seed, opts) { if (seed == null) seed = +new Date(); var xg = new XorGen(seed), state = opts && opts.state, prng = function() { return (xg.next() >>> 0) / 4294967296; }; prng.double = function() { do { var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 4294967296, result = (top + bot) / (1 << 21); } while (result === 0); return result; }; prng.int32 = xg.next; prng.quick = prng; if (state) { if (state.X) copy(state, xg); prng.state = function() { return copy(xg, {}); }; } return prng; } if (module2 && module2.exports) { module2.exports = impl; } else if (define2 && define2.amd) { define2(function() { return impl; }); } else { this.xor4096 = impl; } })(exports, typeof module == "object" && module, typeof define == "function" && define); }); // node_modules/seedrandom/lib/tychei.js var require_tychei = __commonJS((exports, module) => { (function(global2, module2, define2) { function XorGen(seed) { var me = this, strseed = ""; me.next = function() { var b = me.b, c = me.c, d = me.d, a = me.a; b = b << 25 ^ b >>> 7 ^ c; c = c - d | 0; d = d << 24 ^ d >>> 8 ^ a; a = a - b | 0; me.b = b = b << 20 ^ b >>> 12 ^ c; me.c = c = c - d | 0; me.d = d << 16 ^ c >>> 16 ^ a; return me.a = a - b | 0; }; me.a = 0; me.b = 0; me.c = 2654435769 | 0; me.d = 1367130551; if (seed === Math.floor(seed)) { me.a = seed / 4294967296 | 0; me.b = seed | 0; } else { strseed += seed; } for (var k = 0; k < strseed.length + 20; k++) { me.b ^= strseed.charCodeAt(k) | 0; me.next(); } } function copy(f, t) { t.a = f.a; t.b = f.b; t.c = f.c; t.d = f.d; return t; } ; function impl(seed, opts) { var xg = new XorGen(seed), state = opts && opts.state, prng = function() { return (xg.next() >>> 0) / 4294967296; }; prng.double = function() { do { var top = xg.next() >>> 11, bot = (xg.next() >>> 0) / 4294967296, result = (top + bot) / (1 << 21); } while (result === 0); return result; }; prng.int32 = xg.next; prng.quick = prng; if (state) { if (typeof state == "object") copy(state, xg); prng.state = function() { return copy(xg, {}); }; } return prng; } if (module2 && module2.exports) { module2.exports = impl; } else if (define2 && define2.amd) { define2(function() { return impl; }); } else { this.tychei = impl; } })(exports, typeof module == "object" && module, typeof define == "function" && define); }); // disabled:crypto var require_disabled_crypto = __commonJS(() => { }); // node_modules/seedrandom/seedrandom.js var require_seedrandom = __commonJS((exports, module) => { (function(pool3, math) { var global2 = this, width = 256, chunks = 6, digits = 52, rngname = "random", startdenom = math.pow(width, chunks), significance = math.pow(2, digits), overflow = significance * 2, mask = width - 1, nodecrypto; function seedrandom2(seed, options, callback) { var key = []; options = options == true ? {entropy: true} : options || {}; var shortseed = mixkey(flatten2(options.entropy ? [seed, tostring(pool3)] : seed == null ? autoseed() : seed, 3), key); var arc4 = new ARC4(key); var prng = function() { var n = arc4.g(chunks), d = startdenom, x = 0; while (n < significance) { n = (n + x) * width; d *= width; x = arc4.g(1); } while (n >= overflow) { n /= 2; d /= 2; x >>>= 1; } return (n + x) / d; }; prng.int32 = function() { return arc4.g(4) | 0; }; prng.quick = function() { return arc4.g(4) / 4294967296; }; prng.double = prng; mixkey(tostring(arc4.S), pool3); return (options.pass || callback || function(prng2, seed2, is_math_call, state) { if (state) { if (state.S) { copy(state, arc4); } prng2.state = function() { return copy(arc4, {}); }; } if (is_math_call) { math[rngname] = prng2; return seed2; } else return prng2; })(prng, shortseed, "global" in options ? options.global : this == math, options.state); } math["seed" + rngname] = seedrandom2; function ARC4(key) { var t, keylen = key.length, me = this, i = 0, j = me.i = me.j = 0, s = me.S = []; if (!keylen) { key = [keylen++]; } while (i < width) { s[i] = i++; } for (i = 0; i < width; i++) { s[i] = s[j = mask & j + key[i % keylen] + (t = s[i])]; s[j] = t; } (me.g = function(count) { var t2, r = 0, i2 = me.i, j2 = me.j, s2 = me.S; while (count--) { t2 = s2[i2 = mask & i2 + 1]; r = r * width + s2[mask & (s2[i2] = s2[j2 = mask & j2 + t2]) + (s2[j2] = t2)]; } me.i = i2; me.j = j2; return r; })(width); } function copy(f, t) { t.i = f.i; t.j = f.j; t.S = f.S.slice(); return t; } ; function flatten2(obj, depth) { var result = [], typ = typeof obj, prop; if (depth && typ == "object") { for (prop in obj) { try { result.push(flatten2(obj[prop], depth - 1)); } catch (e) { } } } return result.length ? result : typ == "string" ? obj : obj + "\0"; } function mixkey(seed, key) { var stringseed = seed + "", smear, j = 0; while (j < stringseed.length) { key[mask & j] = mask & (smear ^= key[mask & j] * 19) + stringseed.charCodeAt(j++); } return tostring(key); } function autoseed() { try { var out; if (nodecrypto && (out = nodecrypto.randomBytes)) { out = out(width); } else { out = new Uint8Array(width); (global2.crypto || global2.msCrypto).getRandomValues(out); } return tostring(out); } catch (e) { var browser = global2.navigator, plugins = browser && browser.plugins; return [+new Date(), global2, plugins, global2.screen, tostring(pool3)]; } } function tostring(a) { return String.fromCharCode.apply(0, a); } mixkey(math.random(), pool3); if (typeof module == "object" && module.exports) { module.exports = seedrandom2; try { nodecrypto = require_disabled_crypto(); } catch (ex) { } } else if (typeof define == "function" && define.amd) { define(function() { return seedrandom2; }); } })([], Math); }); // node_modules/seedrandom/index.js var require_seedrandom2 = __commonJS((exports, module) => { var alea2 = require_alea(); var xor128 = require_xor128(); var xorwow = require_xorwow(); var xorshift7 = require_xorshift7(); var xor4096 = require_xor4096(); var tychei = require_tychei(); var sr = require_seedrandom(); sr.alea = alea2; sr.xor128 = xor128; sr.xorwow = xorwow; sr.xorshift7 = xorshift7; sr.xor4096 = xor4096; sr.tychei = tychei; module.exports = sr; }); // node_modules/@tensorflow/tfjs-core/dist/index.js var require_dist = __commonJS((exports) => { __export(exports, { Abs: () => Abs, Acos: () => Acos, Acosh: () => Acosh, AdadeltaOptimizer: () => AdadeltaOptimizer, AdagradOptimizer: () => AdagradOptimizer, AdamOptimizer: () => AdamOptimizer, AdamaxOptimizer: () => AdamaxOptimizer, Add: () => Add, AddN: () => AddN, All: () => All, Any: () => Any, ArgMax: () => ArgMax, ArgMin: () => ArgMin, Asin: () => Asin, Asinh: () => Asinh, Atan: () => Atan, Atan2: () => Atan2, Atanh: () => Atanh, AvgPool: () => AvgPool, AvgPool3D: () => AvgPool3D, AvgPool3DBackprop: () => AvgPool3DBackprop, AvgPoolBackprop: () => AvgPoolBackprop, BatchMatMul: () => BatchMatMul, BatchToSpaceND: () => BatchToSpaceND, BroadcastTo: () => BroadcastTo, Cast: () => Cast, Ceil: () => Ceil, ClipByValue: () => ClipByValue, Complex: () => Complex, Concat: () => Concat, Conv2D: () => Conv2D, Conv2DBackpropFilter: () => Conv2DBackpropFilter, Conv2DBackpropInput: () => Conv2DBackpropInput, Conv3D: () => Conv3D, Conv3DBackpropFilterV2: () => Conv3DBackpropFilterV2, Conv3DBackpropInputV2: () => Conv3DBackpropInputV2, Cos: () => Cos, Cosh: () => Cosh, CropAndResize: () => CropAndResize, Cumsum: () => Cumsum, DataStorage: () => DataStorage, DepthToSpace: () => DepthToSpace, DepthwiseConv2dNative: () => DepthwiseConv2dNative, DepthwiseConv2dNativeBackpropFilter: () => DepthwiseConv2dNativeBackpropFilter, DepthwiseConv2dNativeBackpropInput: () => DepthwiseConv2dNativeBackpropInput, Diag: () => Diag, Dilation2D: () => Dilation2D, Dilation2DBackpropFilter: () => Dilation2DBackpropFilter, Dilation2DBackpropInput: () => Dilation2DBackpropInput, Div: () => Div, ENV: () => ENV, Elu: () => Elu, EluGrad: () => EluGrad, Environment: () => Environment, Equal: () => Equal, Erf: () => Erf, Exp: () => Exp, Expm1: () => Expm1, FFT: () => FFT, Fill: () => Fill, FlipLeftRight: () => FlipLeftRight, Floor: () => Floor, FloorDiv: () => FloorDiv, FromPixels: () => FromPixels, FusedBatchNorm: () => FusedBatchNorm, FusedConv2D: () => FusedConv2D, FusedDepthwiseConv2D: () => FusedDepthwiseConv2D, GatherNd: () => GatherNd, GatherV2: () => GatherV2, Greater: () => Greater, GreaterEqual: () => GreaterEqual, IFFT: () => IFFT, Identity: () => Identity, Imag: () => Imag, IsFinite: () => IsFinite, IsInf: () => IsInf, IsNan: () => IsNan, KernelBackend: () => KernelBackend, LRN: () => LRN, LRNBackprop: () => LRNBackprop, Less: () => Less, LessEqual: () => LessEqual, LinSpace: () => LinSpace, Log: () => Log, Log1p: () => Log1p, LogSoftmax: () => LogSoftmax, LogicalAnd: () => LogicalAnd, LogicalNot: () => LogicalNot, LogicalOr: () => LogicalOr, Max: () => Max, MaxPool: () => MaxPool, MaxPool3D: () => MaxPool3D, MaxPool3DBackprop: () => MaxPool3DBackprop, MaxPoolBackprop: () => MaxPoolBackprop, MaxPoolWithArgmax: () => MaxPoolWithArgmax, Maximum: () => Maximum, Mean: () => Mean, Min: () => Min, Minimum: () => Minimum, Mod: () => Mod, MomentumOptimizer: () => MomentumOptimizer, Multiply: () => Multiply, Negate: () => Negate, NonMaxSuppressionV3: () => NonMaxSuppressionV3, NonMaxSuppressionV4: () => NonMaxSuppressionV4, NonMaxSuppressionV5: () => NonMaxSuppressionV5, NotEqual: () => NotEqual, OneHot: () => OneHot, OnesLike: () => OnesLike, Optimizer: () => Optimizer, PadV2: () => PadV2, Pool: () => Pool, Pow: () => Pow, Prelu: () => Prelu, Prod: () => Prod, RMSPropOptimizer: () => RMSPropOptimizer, Range: () => Range, Rank: () => Rank, Real: () => Real, Reciprocal: () => Reciprocal, Reduction: () => Reduction, Relu: () => Relu, Relu6: () => Relu6, Reshape: () => Reshape, ResizeBilinear: () => ResizeBilinear, ResizeBilinearGrad: () => ResizeBilinearGrad, ResizeNearestNeighbor: () => ResizeNearestNeighbor, ResizeNearestNeighborGrad: () => ResizeNearestNeighborGrad, Reverse: () => Reverse, RotateWithOffset: () => RotateWithOffset, Round: () => Round, Rsqrt: () => Rsqrt, SGDOptimizer: () => SGDOptimizer, ScatterNd: () => ScatterNd, SelectV2: () => SelectV2, Selu: () => Selu, Sigmoid: () => Sigmoid, Sign: () => Sign, Sin: () => Sin, Sinh: () => Sinh, Slice: () => Slice, Softmax: () => Softmax, Softplus: () => Softplus, SpaceToBatchND: () => SpaceToBatchND, SparseToDense: () => SparseToDense, SplitV: () => SplitV, Sqrt: () => Sqrt, Square: () => Square, SquaredDifference: () => SquaredDifference, Step: () => Step, StridedSlice: () => StridedSlice, Sub: () => Sub, Sum: () => Sum, Tan: () => Tan, Tanh: () => Tanh, Tensor: () => Tensor, TensorBuffer: () => TensorBuffer, Tile: () => Tile, TopK: () => TopK, Transpose: () => Transpose, Unpack: () => Unpack, UnsortedSegmentSum: () => UnsortedSegmentSum, Variable: () => Variable, ZerosLike: () => ZerosLike, _FusedMatMul: () => _FusedMatMul, abs: () => abs, acos: () => acos, acosh: () => acosh, add: () => add2, addN: () => addN, addStrict: () => addStrict, all: () => all, any: () => any, argMax: () => argMax, argMin: () => argMin, asin: () => asin, asinh: () => asinh, atan: () => atan, atan2: () => atan2, atanh: () => atanh, avgPool: () => avgPool, avgPool3d: () => avgPool3d, backend: () => backend, backend_util: () => backend_util_exports, basicLSTMCell: () => basicLSTMCell, batchNorm: () => batchNorm, batchNorm2d: () => batchNorm2d, batchNorm3d: () => batchNorm3d, batchNorm4d: () => batchNorm4d, batchToSpaceND: () => batchToSpaceND, booleanMaskAsync: () => booleanMaskAsync, broadcastTo: () => broadcastTo, browser: () => browser_exports, buffer: () => buffer, cast: () => cast, ceil: () => ceil, clipByValue: () => clipByValue, clone: () => clone, complex: () => complex, concat: () => concat, concat1d: () => concat1d, concat2d: () => concat2d, concat3d: () => concat3d, concat4d: () => concat4d, conv1d: () => conv1d, conv2d: () => conv2d, conv2dTranspose: () => conv2dTranspose, conv3d: () => conv3d, conv3dTranspose: () => conv3dTranspose, cos: () => cos, cosh: () => cosh, cosineWindow: () => cosineWindow, cumsum: () => cumsum, customGrad: () => customGrad, deprecationWarn: () => deprecationWarn, depthToSpace: () => depthToSpace, depthwiseConv2d: () => depthwiseConv2d, device_util: () => device_util_exports, diag: () => diag, dilation2d: () => dilation2d, disableDeprecationWarnings: () => disableDeprecationWarnings, dispose: () => dispose, disposeVariables: () => disposeVariables, div: () => div, divNoNan: () => divNoNan, divStrict: () => divStrict, dot: () => dot, dropout: () => dropout, elu: () => elu, enableDebugMode: () => enableDebugMode, enableProdMode: () => enableProdMode, enclosingPowerOfTwo: () => enclosingPowerOfTwo, engine: () => engine22, env: () => env, equal: () => equal, equalStrict: () => equalStrict, erf: () => erf, exp: () => exp, expandDims: () => expandDims, expm1: () => expm1, eye: () => eye, fft: () => fft, fill: () => fill, findBackend: () => findBackend, findBackendFactory: () => findBackendFactory, floor: () => floor, floorDiv: () => floorDiv, fused: () => fused_ops_exports, gather: () => gather, gatherND: () => gatherND, gather_util: () => gather_nd_util_exports, getBackend: () => getBackend, getGradient: () => getGradient, getKernel: () => getKernel, getKernelsForBackend: () => getKernelsForBackend, grad: () => grad, grads: () => grads, greater: () => greater, greaterEqual: () => greaterEqual, greaterEqualStrict: () => greaterEqualStrict, greaterStrict: () => greaterStrict, ifft: () => ifft, imag: () => imag, image: () => image, inTopKAsync: () => inTopKAsync, io: () => io_exports, irfft: () => irfft, isFinite: () => isFinite2, isInf: () => isInf, isNaN: () => isNaN2, keep: () => keep, kernel_impls: () => kernel_impls_exports, leakyRelu: () => leakyRelu, less: () => less, lessEqual: () => lessEqual, lessEqualStrict: () => lessEqualStrict, lessStrict: () => lessStrict, linalg: () => linalg, linspace: () => linspace, localResponseNormalization: () => localResponseNormalization, log: () => log, log1p: () => log1p, logSigmoid: () => logSigmoid, logSoftmax: () => logSoftmax, logSumExp: () => logSumExp, logicalAnd: () => logicalAnd, logicalNot: () => logicalNot, logicalOr: () => logicalOr, logicalXor: () => logicalXor, losses: () => losses, matMul: () => matMul, math: () => math_exports, max: () => max, maxPool: () => maxPool, maxPool3d: () => maxPool3d, maxPoolWithArgmax: () => maxPoolWithArgmax, maximum: () => maximum, maximumStrict: () => maximumStrict, mean: () => mean, memory: () => memory, min: () => min, minimum: () => minimum, minimumStrict: () => minimumStrict, mod: () => mod, modStrict: () => modStrict, moments: () => moments, movingAverage: () => movingAverage, mul: () => mul, mulStrict: () => mulStrict, multiRNNCell: () => multiRNNCell, multinomial: () => multinomial, neg: () => neg, nextFrame: () => nextFrame, norm: () => norm, notEqual: () => notEqual, notEqualStrict: () => notEqualStrict, oneHot: () => oneHot, ones: () => ones2, onesLike: () => onesLike, op: () => op, outerProduct: () => outerProduct, pad: () => pad, pad1d: () => pad1d, pad2d: () => pad2d, pad3d: () => pad3d, pad4d: () => pad4d, pool: () => pool, pow: () => pow, powStrict: () => powStrict, prelu: () => prelu, print: () => print, prod: () => prod, profile: () => profile, rand: () => rand, randomGamma: () => randomGamma, randomNormal: () => randomNormal, randomUniform: () => randomUniform, range: () => range, ready: () => ready, real: () => real, reciprocal: () => reciprocal, registerBackend: () => registerBackend, registerGradient: () => registerGradient, registerKernel: () => registerKernel, relu: () => relu, relu6: () => relu6, removeBackend: () => removeBackend, reshape: () => reshape, reverse: () => reverse, reverse1d: () => reverse1d, reverse2d: () => reverse2d, reverse3d: () => reverse3d, reverse4d: () => reverse4d, rfft: () => rfft, round: () => round, rsqrt: () => rsqrt, scalar: () => scalar, scatterND: () => scatterND, scatter_util: () => scatter_nd_util_exports, selu: () => selu, separableConv2d: () => separableConv2d, serialization: () => serialization_exports, setBackend: () => setBackend, setPlatform: () => setPlatform, setdiff1dAsync: () => setdiff1dAsync, sigmoid: () => sigmoid, sign: () => sign, signal: () => signal, sin: () => sin, sinh: () => sinh, slice: () => slice, slice1d: () => slice1d, slice2d: () => slice2d, slice3d: () => slice3d, slice4d: () => slice4d, slice_util: () => slice_util_exports, softmax: () => softmax, softplus: () => softplus, spaceToBatchND: () => spaceToBatchND, sparseToDense: () => sparseToDense, spectral: () => spectral, split: () => split, sqrt: () => sqrt, square: () => square, squaredDifference: () => squaredDifference, squaredDifferenceStrict: () => squaredDifferenceStrict, squeeze: () => squeeze, stack: () => stack, step: () => step, stridedSlice: () => stridedSlice, sub: () => sub, subStrict: () => subStrict, sum: () => sum2, sumOutType: () => sumOutType, tan: () => tan, tanh: () => tanh2, tensor: () => tensor5, tensor1d: () => tensor1d, tensor2d: () => tensor2d, tensor3d: () => tensor3d, tensor4d: () => tensor4d, tensor5d: () => tensor5d, tensor6d: () => tensor6d, tensor_util: () => tensor_util_exports, test_util: () => test_util_exports, tidy: () => tidy, tile: () => tile, time: () => time, topk: () => topk, train: () => train, transpose: () => transpose, truncatedNormal: () => truncatedNormal, unregisterGradient: () => unregisterGradient, unregisterKernel: () => unregisterKernel, unsortedSegmentSum: () => unsortedSegmentSum, unstack: () => unstack, upcastType: () => upcastType, util: () => util_exports, valueAndGrad: () => valueAndGrad, valueAndGrads: () => valueAndGrads, variable: () => variable, variableGrads: () => variableGrads, version_core: () => version, where: () => where, whereAsync: () => whereAsync, zeros: () => zeros, zerosLike: () => zerosLike }); /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ }); // build/draw/drawContour.js var require_drawContour = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.drawContour = void 0; function drawContour(ctx, points, isClosed = false) { ctx.beginPath(); points.slice(1).forEach(({x, y}, prevIdx) => { const from = points[prevIdx]; ctx.moveTo(from.x, from.y); ctx.lineTo(x, y); }); if (isClosed) { const from = points[points.length - 1]; const to = points[0]; if (!from || !to) { return; } ctx.moveTo(from.x, from.y); ctx.lineTo(to.x, to.y); } ctx.stroke(); } exports.drawContour = drawContour; }); // build/classes/Dimensions.js var require_Dimensions = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.Dimensions = void 0; const utils_1 = require_utils(); class Dimensions { constructor(width, height) { if (!utils_1.isValidNumber(width) || !utils_1.isValidNumber(height)) { throw new Error(`Dimensions.constructor - expected width and height to be valid numbers, instead have ${JSON.stringify({width, height})}`); } this._width = width; this._height = height; } get width() { return this._width; } get height() { return this._height; } reverse() { return new Dimensions(1 / this.width, 1 / this.height); } } exports.Dimensions = Dimensions; }); // build/utils/index.js var require_utils = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.isValidProbablitiy = exports.isValidNumber = exports.range = exports.getCenterPoint = exports.computeReshapedDimensions = exports.isDimensions = exports.round = exports.isEven = exports.isFloat = exports.isTensor4D = exports.isTensor3D = exports.isTensor2D = exports.isTensor1D = exports.isTensor = void 0; const tf = require_dist(); const classes_1 = require_classes(); const Dimensions_1 = require_Dimensions(); function isTensor(tensor17, dim) { return tensor17 instanceof tf.Tensor && tensor17.shape.length === dim; } exports.isTensor = isTensor; function isTensor1D(tensor17) { return isTensor(tensor17, 1); } exports.isTensor1D = isTensor1D; function isTensor2D(tensor17) { return isTensor(tensor17, 2); } exports.isTensor2D = isTensor2D; function isTensor3D(tensor17) { return isTensor(tensor17, 3); } exports.isTensor3D = isTensor3D; function isTensor4D(tensor17) { return isTensor(tensor17, 4); } exports.isTensor4D = isTensor4D; function isFloat(num) { return num % 1 !== 0; } exports.isFloat = isFloat; function isEven(num) { return num % 2 === 0; } exports.isEven = isEven; function round3(num, prec = 2) { const f = Math.pow(10, prec); return Math.floor(num * f) / f; } exports.round = round3; function isDimensions(obj) { return obj && obj.width && obj.height; } exports.isDimensions = isDimensions; function computeReshapedDimensions({width, height}, inputSize) { const scale = inputSize / Math.max(height, width); return new Dimensions_1.Dimensions(Math.round(width * scale), Math.round(height * scale)); } exports.computeReshapedDimensions = computeReshapedDimensions; function getCenterPoint(pts) { return pts.reduce((sum26, pt) => sum26.add(pt), new classes_1.Point(0, 0)).div(new classes_1.Point(pts.length, pts.length)); } exports.getCenterPoint = getCenterPoint; function range4(num, start, step7) { return Array(num).fill(0).map((_, i) => start + i * step7); } exports.range = range4; function isValidNumber(num) { return !!num && num !== Infinity && num !== -Infinity && !isNaN(num) || num === 0; } exports.isValidNumber = isValidNumber; function isValidProbablitiy(num) { return isValidNumber(num) && 0 <= num && num <= 1; } exports.isValidProbablitiy = isValidProbablitiy; }); // build/classes/Point.js var require_Point = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.Point = void 0; class Point { constructor(x, y) { this._x = x; this._y = y; } get x() { return this._x; } get y() { return this._y; } add(pt) { return new Point(this.x + pt.x, this.y + pt.y); } sub(pt) { return new Point(this.x - pt.x, this.y - pt.y); } mul(pt) { return new Point(this.x * pt.x, this.y * pt.y); } div(pt) { return new Point(this.x / pt.x, this.y / pt.y); } abs() { return new Point(Math.abs(this.x), Math.abs(this.y)); } magnitude() { return Math.sqrt(Math.pow(this.x, 2) + Math.pow(this.y, 2)); } floor() { return new Point(Math.floor(this.x), Math.floor(this.y)); } } exports.Point = Point; }); // build/classes/Box.js var require_Box = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.Box = void 0; const utils_1 = require_utils(); const Point_1 = require_Point(); class Box { constructor(_box, allowNegativeDimensions = true) { const box = _box || {}; const isBbox = [box.left, box.top, box.right, box.bottom].every(utils_1.isValidNumber); const isRect = [box.x, box.y, box.width, box.height].every(utils_1.isValidNumber); if (!isRect && !isBbox) { throw new Error(`Box.constructor - expected box to be IBoundingBox | IRect, instead have ${JSON.stringify(box)}`); } const [x, y, width, height] = isRect ? [box.x, box.y, box.width, box.height] : [box.left, box.top, box.right - box.left, box.bottom - box.top]; Box.assertIsValidBox({x, y, width, height}, "Box.constructor", allowNegativeDimensions); this._x = x; this._y = y; this._width = width; this._height = height; } static isRect(rect) { return !!rect && [rect.x, rect.y, rect.width, rect.height].every(utils_1.isValidNumber); } static assertIsValidBox(box, callee, allowNegativeDimensions = false) { if (!Box.isRect(box)) { throw new Error(`${callee} - invalid box: ${JSON.stringify(box)}, expected object with properties x, y, width, height`); } if (!allowNegativeDimensions && (box.width < 0 || box.height < 0)) { throw new Error(`${callee} - width (${box.width}) and height (${box.height}) must be positive numbers`); } } get x() { return this._x; } get y() { return this._y; } get width() { return this._width; } get height() { return this._height; } get left() { return this.x; } get top() { return this.y; } get right() { return this.x + this.width; } get bottom() { return this.y + this.height; } get area() { return this.width * this.height; } get topLeft() { return new Point_1.Point(this.left, this.top); } get topRight() { return new Point_1.Point(this.right, this.top); } get bottomLeft() { return new Point_1.Point(this.left, this.bottom); } get bottomRight() { return new Point_1.Point(this.right, this.bottom); } round() { const [x, y, width, height] = [this.x, this.y, this.width, this.height].map((val) => Math.round(val)); return new Box({x, y, width, height}); } floor() { const [x, y, width, height] = [this.x, this.y, this.width, this.height].map((val) => Math.floor(val)); return new Box({x, y, width, height}); } toSquare() { let {x, y, width, height} = this; const diff = Math.abs(width - height); if (width < height) { x -= diff / 2; width += diff; } if (height < width) { y -= diff / 2; height += diff; } return new Box({x, y, width, height}); } rescale(s) { const scaleX = utils_1.isDimensions(s) ? s.width : s; const scaleY = utils_1.isDimensions(s) ? s.height : s; return new Box({ x: this.x * scaleX, y: this.y * scaleY, width: this.width * scaleX, height: this.height * scaleY }); } pad(padX, padY) { let [x, y, width, height] = [ this.x - padX / 2, this.y - padY / 2, this.width + padX, this.height + padY ]; return new Box({x, y, width, height}); } clipAtImageBorders(imgWidth, imgHeight) { const {x, y, right, bottom} = this; const clippedX = Math.max(x, 0); const clippedY = Math.max(y, 0); const newWidth = right - clippedX; const newHeight = bottom - clippedY; const clippedWidth = Math.min(newWidth, imgWidth - clippedX); const clippedHeight = Math.min(newHeight, imgHeight - clippedY); return new Box({x: clippedX, y: clippedY, width: clippedWidth, height: clippedHeight}).floor(); } shift(sx, sy) { const {width, height} = this; const x = this.x + sx; const y = this.y + sy; return new Box({x, y, width, height}); } padAtBorders(imageHeight, imageWidth) { const w = this.width + 1; const h = this.height + 1; let dx = 1; let dy = 1; let edx = w; let edy = h; let x = this.left; let y = this.top; let ex = this.right; let ey = this.bottom; if (ex > imageWidth) { edx = -ex + imageWidth + w; ex = imageWidth; } if (ey > imageHeight) { edy = -ey + imageHeight + h; ey = imageHeight; } if (x < 1) { edy = 2 - x; x = 1; } if (y < 1) { edy = 2 - y; y = 1; } return {dy, edy, dx, edx, y, ey, x, ex, w, h}; } calibrate(region) { return new Box({ left: this.left + region.left * this.width, top: this.top + region.top * this.height, right: this.right + region.right * this.width, bottom: this.bottom + region.bottom * this.height }).toSquare().round(); } } exports.Box = Box; }); // build/classes/BoundingBox.js var require_BoundingBox = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.BoundingBox = void 0; const Box_1 = require_Box(); class BoundingBox extends Box_1.Box { constructor(left, top, right, bottom, allowNegativeDimensions = false) { super({left, top, right, bottom}, allowNegativeDimensions); } } exports.BoundingBox = BoundingBox; }); // build/classes/ObjectDetection.js var require_ObjectDetection = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.ObjectDetection = void 0; const Box_1 = require_Box(); const Dimensions_1 = require_Dimensions(); class ObjectDetection { constructor(score, classScore, className, relativeBox, imageDims) { this._imageDims = new Dimensions_1.Dimensions(imageDims.width, imageDims.height); this._score = score; this._classScore = classScore; this._className = className; this._box = new Box_1.Box(relativeBox).rescale(this._imageDims); } get score() { return this._score; } get classScore() { return this._classScore; } get className() { return this._className; } get box() { return this._box; } get imageDims() { return this._imageDims; } get imageWidth() { return this.imageDims.width; } get imageHeight() { return this.imageDims.height; } get relativeBox() { return new Box_1.Box(this._box).rescale(this.imageDims.reverse()); } forSize(width, height) { return new ObjectDetection(this.score, this.classScore, this.className, this.relativeBox, {width, height}); } } exports.ObjectDetection = ObjectDetection; }); // build/classes/FaceDetection.js var require_FaceDetection = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceDetection = void 0; const ObjectDetection_1 = require_ObjectDetection(); class FaceDetection extends ObjectDetection_1.ObjectDetection { constructor(score, relativeBox, imageDims) { super(score, score, "", relativeBox, imageDims); } forSize(width, height) { const {score, relativeBox, imageDims} = super.forSize(width, height); return new FaceDetection(score, relativeBox, imageDims); } } exports.FaceDetection = FaceDetection; }); // build/ops/iou.js var require_iou = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.iou = void 0; function iou(box1, box2, isIOU = true) { const width = Math.max(0, Math.min(box1.right, box2.right) - Math.max(box1.left, box2.left)); const height = Math.max(0, Math.min(box1.bottom, box2.bottom) - Math.max(box1.top, box2.top)); const interSection = width * height; return isIOU ? interSection / (box1.area + box2.area - interSection) : interSection / Math.min(box1.area, box2.area); } exports.iou = iou; }); // build/ops/minBbox.js var require_minBbox = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.minBbox = void 0; const classes_1 = require_classes(); function minBbox(pts) { const xs = pts.map((pt) => pt.x); const ys = pts.map((pt) => pt.y); const minX = xs.reduce((min5, x) => x < min5 ? x : min5, Infinity); const minY = ys.reduce((min5, y) => y < min5 ? y : min5, Infinity); const maxX = xs.reduce((max7, x) => max7 < x ? x : max7, 0); const maxY = ys.reduce((max7, y) => max7 < y ? y : max7, 0); return new classes_1.BoundingBox(minX, minY, maxX, maxY); } exports.minBbox = minBbox; }); // build/ops/nonMaxSuppression.js var require_nonMaxSuppression = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.nonMaxSuppression = void 0; const iou_1 = require_iou(); function nonMaxSuppression2(boxes, scores, iouThreshold, isIOU = true) { let indicesSortedByScore = scores.map((score, boxIndex) => ({score, boxIndex})).sort((c1, c2) => c1.score - c2.score).map((c) => c.boxIndex); const pick = []; while (indicesSortedByScore.length > 0) { const curr = indicesSortedByScore.pop(); pick.push(curr); const indices = indicesSortedByScore; const outputs = []; for (let i = 0; i < indices.length; i++) { const idx = indices[i]; const currBox = boxes[curr]; const idxBox = boxes[idx]; outputs.push(iou_1.iou(currBox, idxBox, isIOU)); } indicesSortedByScore = indicesSortedByScore.filter((_, j) => outputs[j] <= iouThreshold); } return pick; } exports.nonMaxSuppression = nonMaxSuppression2; }); // build/ops/normalize.js var require_normalize = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.normalize = void 0; const tf = require_dist(); function normalize(x, meanRgb) { return tf.tidy(() => { const [r, g, b] = meanRgb; const avg_r = tf.fill([...x.shape.slice(0, 3), 1], r); const avg_g = tf.fill([...x.shape.slice(0, 3), 1], g); const avg_b = tf.fill([...x.shape.slice(0, 3), 1], b); const avg_rgb = tf.concat([avg_r, avg_g, avg_b], 3); return tf.sub(x, avg_rgb); }); } exports.normalize = normalize; }); // build/ops/padToSquare.js var require_padToSquare = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.padToSquare = void 0; const tf = require_dist(); function padToSquare(imgTensor, isCenterImage = false) { return tf.tidy(() => { const [height, width] = imgTensor.shape.slice(1); if (height === width) { return imgTensor; } const dimDiff = Math.abs(height - width); const paddingAmount = Math.round(dimDiff * (isCenterImage ? 0.5 : 1)); const paddingAxis = height > width ? 2 : 1; const createPaddingTensor = (paddingAmount2) => { const paddingTensorShape = imgTensor.shape.slice(); paddingTensorShape[paddingAxis] = paddingAmount2; return tf.fill(paddingTensorShape, 0); }; const paddingTensorAppend = createPaddingTensor(paddingAmount); const remainingPaddingAmount = dimDiff - paddingTensorAppend.shape[paddingAxis]; const paddingTensorPrepend = isCenterImage && remainingPaddingAmount ? createPaddingTensor(remainingPaddingAmount) : null; const tensorsToStack = [ paddingTensorPrepend, imgTensor, paddingTensorAppend ].filter((t) => !!t).map((t) => t.toFloat()); return tf.concat(tensorsToStack, paddingAxis); }); } exports.padToSquare = padToSquare; }); // build/ops/shuffleArray.js var require_shuffleArray = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.shuffleArray = void 0; function shuffleArray(inputArray) { const array = inputArray.slice(); for (let i = array.length - 1; i > 0; i--) { const j = Math.floor(Math.random() * (i + 1)); const x = array[i]; array[i] = array[j]; array[j] = x; } return array; } exports.shuffleArray = shuffleArray; }); // build/ops/index.js var require_ops = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.inverseSigmoid = exports.sigmoid = void 0; const tslib_1 = require_tslib_es6(); tslib_1.__exportStar(require_iou(), exports); tslib_1.__exportStar(require_minBbox(), exports); tslib_1.__exportStar(require_nonMaxSuppression(), exports); tslib_1.__exportStar(require_normalize(), exports); tslib_1.__exportStar(require_padToSquare(), exports); tslib_1.__exportStar(require_shuffleArray(), exports); function sigmoid6(x) { return 1 / (1 + Math.exp(-x)); } exports.sigmoid = sigmoid6; function inverseSigmoid(x) { return Math.log(x / (1 - x)); } exports.inverseSigmoid = inverseSigmoid; }); // build/classes/Rect.js var require_Rect = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.Rect = void 0; const Box_1 = require_Box(); class Rect extends Box_1.Box { constructor(x, y, width, height, allowNegativeDimensions = false) { super({x, y, width, height}, allowNegativeDimensions); } } exports.Rect = Rect; }); // build/classes/FaceLandmarks.js var require_FaceLandmarks = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceLandmarks = void 0; const ops_1 = require_ops(); const utils_1 = require_utils(); const Box_1 = require_Box(); const Dimensions_1 = require_Dimensions(); const FaceDetection_1 = require_FaceDetection(); const Point_1 = require_Point(); const Rect_1 = require_Rect(); const relX = 0.5; const relY = 0.43; const relScale = 0.45; class FaceLandmarks { constructor(relativeFaceLandmarkPositions, imgDims, shift = new Point_1.Point(0, 0)) { const {width, height} = imgDims; this._imgDims = new Dimensions_1.Dimensions(width, height); this._shift = shift; this._positions = relativeFaceLandmarkPositions.map((pt) => pt.mul(new Point_1.Point(width, height)).add(shift)); } get shift() { return new Point_1.Point(this._shift.x, this._shift.y); } get imageWidth() { return this._imgDims.width; } get imageHeight() { return this._imgDims.height; } get positions() { return this._positions; } get relativePositions() { return this._positions.map((pt) => pt.sub(this._shift).div(new Point_1.Point(this.imageWidth, this.imageHeight))); } forSize(width, height) { return new this.constructor(this.relativePositions, {width, height}); } shiftBy(x, y) { return new this.constructor(this.relativePositions, this._imgDims, new Point_1.Point(x, y)); } shiftByPoint(pt) { return this.shiftBy(pt.x, pt.y); } align(detection, options = {}) { if (detection) { const box = detection instanceof FaceDetection_1.FaceDetection ? detection.box.floor() : new Box_1.Box(detection); return this.shiftBy(box.x, box.y).align(null, options); } const {useDlibAlignment, minBoxPadding} = Object.assign({}, {useDlibAlignment: false, minBoxPadding: 0.2}, options); if (useDlibAlignment) { return this.alignDlib(); } return this.alignMinBbox(minBoxPadding); } alignDlib() { const centers = this.getRefPointsForAlignment(); const [leftEyeCenter, rightEyeCenter, mouthCenter] = centers; const distToMouth = (pt) => mouthCenter.sub(pt).magnitude(); const eyeToMouthDist = (distToMouth(leftEyeCenter) + distToMouth(rightEyeCenter)) / 2; const size = Math.floor(eyeToMouthDist / relScale); const refPoint = utils_1.getCenterPoint(centers); const x = Math.floor(Math.max(0, refPoint.x - relX * size)); const y = Math.floor(Math.max(0, refPoint.y - relY * size)); return new Rect_1.Rect(x, y, Math.min(size, this.imageWidth + x), Math.min(size, this.imageHeight + y)); } alignMinBbox(padding) { const box = ops_1.minBbox(this.positions); return box.pad(box.width * padding, box.height * padding); } getRefPointsForAlignment() { throw new Error("getRefPointsForAlignment not implemented by base class"); } } exports.FaceLandmarks = FaceLandmarks; }); // build/classes/FaceLandmarks5.js var require_FaceLandmarks5 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceLandmarks5 = void 0; const utils_1 = require_utils(); const FaceLandmarks_1 = require_FaceLandmarks(); class FaceLandmarks5 extends FaceLandmarks_1.FaceLandmarks { getRefPointsForAlignment() { const pts = this.positions; return [ pts[0], pts[1], utils_1.getCenterPoint([pts[3], pts[4]]) ]; } } exports.FaceLandmarks5 = FaceLandmarks5; }); // build/classes/FaceLandmarks68.js var require_FaceLandmarks68 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceLandmarks68 = void 0; const utils_1 = require_utils(); const FaceLandmarks_1 = require_FaceLandmarks(); class FaceLandmarks68 extends FaceLandmarks_1.FaceLandmarks { getJawOutline() { return this.positions.slice(0, 17); } getLeftEyeBrow() { return this.positions.slice(17, 22); } getRightEyeBrow() { return this.positions.slice(22, 27); } getNose() { return this.positions.slice(27, 36); } getLeftEye() { return this.positions.slice(36, 42); } getRightEye() { return this.positions.slice(42, 48); } getMouth() { return this.positions.slice(48, 68); } getRefPointsForAlignment() { return [ this.getLeftEye(), this.getRightEye(), this.getMouth() ].map(utils_1.getCenterPoint); } } exports.FaceLandmarks68 = FaceLandmarks68; }); // build/classes/FaceMatch.js var require_FaceMatch = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceMatch = void 0; const utils_1 = require_utils(); class FaceMatch { constructor(label, distance) { this._label = label; this._distance = distance; } get label() { return this._label; } get distance() { return this._distance; } toString(withDistance = true) { return `${this.label}${withDistance ? ` (${utils_1.round(this.distance)})` : ""}`; } } exports.FaceMatch = FaceMatch; }); // build/classes/LabeledBox.js var require_LabeledBox = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.LabeledBox = void 0; const utils_1 = require_utils(); const Box_1 = require_Box(); class LabeledBox extends Box_1.Box { constructor(box, label) { super(box); this._label = label; } static assertIsValidLabeledBox(box, callee) { Box_1.Box.assertIsValidBox(box, callee); if (!utils_1.isValidNumber(box.label)) { throw new Error(`${callee} - expected property label (${box.label}) to be a number`); } } get label() { return this._label; } } exports.LabeledBox = LabeledBox; }); // build/classes/LabeledFaceDescriptors.js var require_LabeledFaceDescriptors = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.LabeledFaceDescriptors = void 0; class LabeledFaceDescriptors { constructor(label, descriptors) { if (!(typeof label === "string")) { throw new Error("LabeledFaceDescriptors - constructor expected label to be a string"); } if (!Array.isArray(descriptors) || descriptors.some((desc) => !(desc instanceof Float32Array))) { throw new Error("LabeledFaceDescriptors - constructor expected descriptors to be an array of Float32Array"); } this._label = label; this._descriptors = descriptors; } get label() { return this._label; } get descriptors() { return this._descriptors; } toJSON() { return { label: this.label, descriptors: this.descriptors.map((d) => Array.from(d)) }; } static fromJSON(json) { const descriptors = json.descriptors.map((d) => { return new Float32Array(d); }); return new LabeledFaceDescriptors(json.label, descriptors); } } exports.LabeledFaceDescriptors = LabeledFaceDescriptors; }); // build/classes/PredictedBox.js var require_PredictedBox = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.PredictedBox = void 0; const utils_1 = require_utils(); const LabeledBox_1 = require_LabeledBox(); class PredictedBox extends LabeledBox_1.LabeledBox { constructor(box, label, score, classScore) { super(box, label); this._score = score; this._classScore = classScore; } static assertIsValidPredictedBox(box, callee) { LabeledBox_1.LabeledBox.assertIsValidLabeledBox(box, callee); if (!utils_1.isValidProbablitiy(box.score) || !utils_1.isValidProbablitiy(box.classScore)) { throw new Error(`${callee} - expected properties score (${box.score}) and (${box.classScore}) to be a number between [0, 1]`); } } get score() { return this._score; } get classScore() { return this._classScore; } } exports.PredictedBox = PredictedBox; }); // build/classes/index.js var require_classes = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); const tslib_1 = require_tslib_es6(); tslib_1.__exportStar(require_BoundingBox(), exports); tslib_1.__exportStar(require_Box(), exports); tslib_1.__exportStar(require_Dimensions(), exports); tslib_1.__exportStar(require_FaceDetection(), exports); tslib_1.__exportStar(require_FaceLandmarks(), exports); tslib_1.__exportStar(require_FaceLandmarks5(), exports); tslib_1.__exportStar(require_FaceLandmarks68(), exports); tslib_1.__exportStar(require_FaceMatch(), exports); tslib_1.__exportStar(require_LabeledBox(), exports); tslib_1.__exportStar(require_LabeledFaceDescriptors(), exports); tslib_1.__exportStar(require_ObjectDetection(), exports); tslib_1.__exportStar(require_Point(), exports); tslib_1.__exportStar(require_PredictedBox(), exports); tslib_1.__exportStar(require_Rect(), exports); }); // build/factories/WithFaceDetection.js var require_WithFaceDetection = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extendWithFaceDetection = exports.isWithFaceDetection = void 0; const FaceDetection_1 = require_FaceDetection(); function isWithFaceDetection(obj) { return obj["detection"] instanceof FaceDetection_1.FaceDetection; } exports.isWithFaceDetection = isWithFaceDetection; function extendWithFaceDetection(sourceObj, detection) { const extension = {detection}; return Object.assign({}, sourceObj, extension); } exports.extendWithFaceDetection = extendWithFaceDetection; }); // build/env/createBrowserEnv.js var require_createBrowserEnv = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.createBrowserEnv = void 0; function createBrowserEnv() { const fetch3 = window["fetch"] || function() { throw new Error("fetch - missing fetch implementation for browser environment"); }; const readFile = function() { throw new Error("readFile - filesystem not available for browser environment"); }; return { Canvas: HTMLCanvasElement, CanvasRenderingContext2D, Image: HTMLImageElement, ImageData, Video: HTMLVideoElement, createCanvasElement: () => document.createElement("canvas"), createImageElement: () => document.createElement("img"), fetch: fetch3, readFile }; } exports.createBrowserEnv = createBrowserEnv; }); // build/env/createFileSystem.js var require_createFileSystem = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.createFileSystem = void 0; function createFileSystem(fs) { let requireFsError = ""; if (!fs) { try { fs = require("fs"); } catch (err) { requireFsError = err.toString(); } } const readFile = fs ? function(filePath) { return new Promise((res, rej) => { fs.readFile(filePath, function(err, buffer10) { return err ? rej(err) : res(buffer10); }); }); } : function() { throw new Error(`readFile - failed to require fs in nodejs environment with error: ${requireFsError}`); }; return { readFile }; } exports.createFileSystem = createFileSystem; }); // build/env/createNodejsEnv.js var require_createNodejsEnv = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.createNodejsEnv = void 0; const createFileSystem_1 = require_createFileSystem(); function createNodejsEnv() { const Canvas = global["Canvas"] || global["HTMLCanvasElement"]; const Image = global["Image"] || global["HTMLImageElement"]; const createCanvasElement = function() { if (Canvas) { return new Canvas(); } throw new Error("createCanvasElement - missing Canvas implementation for nodejs environment"); }; const createImageElement = function() { if (Image) { return new Image(); } throw new Error("createImageElement - missing Image implementation for nodejs environment"); }; const fetch3 = global["fetch"] || function() { throw new Error("fetch - missing fetch implementation for nodejs environment"); }; const fileSystem = createFileSystem_1.createFileSystem(); return { Canvas: Canvas || class { }, CanvasRenderingContext2D: global["CanvasRenderingContext2D"] || class { }, Image: Image || class { }, ImageData: global["ImageData"] || class { }, Video: global["HTMLVideoElement"] || class { }, createCanvasElement, createImageElement, fetch: fetch3, ...fileSystem }; } exports.createNodejsEnv = createNodejsEnv; }); // build/env/isBrowser.js var require_isBrowser = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.isBrowser = void 0; function isBrowser2() { return typeof window === "object" && typeof document !== "undefined" && typeof HTMLImageElement !== "undefined" && typeof HTMLCanvasElement !== "undefined" && typeof HTMLVideoElement !== "undefined" && typeof ImageData !== "undefined" && typeof CanvasRenderingContext2D !== "undefined"; } exports.isBrowser = isBrowser2; }); // build/env/isNodejs.js var require_isNodejs = __commonJS((exports, module) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.isNodejs = void 0; function isNodejs() { return typeof global === "object" && true && typeof module !== "undefined" && typeof process !== "undefined" && !!process.version; } exports.isNodejs = isNodejs; }); // build/env/types.js var require_types = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); }); // build/env/index.js var require_env = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.env = void 0; const tslib_1 = require_tslib_es6(); const createBrowserEnv_1 = require_createBrowserEnv(); const createFileSystem_1 = require_createFileSystem(); const createNodejsEnv_1 = require_createNodejsEnv(); const isBrowser_1 = require_isBrowser(); const isNodejs_1 = require_isNodejs(); let environment11; function getEnv() { if (!environment11) { throw new Error("getEnv - environment is not defined, check isNodejs() and isBrowser()"); } return environment11; } function setEnv(env2) { environment11 = env2; } function initialize() { if (isBrowser_1.isBrowser()) { return setEnv(createBrowserEnv_1.createBrowserEnv()); } if (isNodejs_1.isNodejs()) { return setEnv(createNodejsEnv_1.createNodejsEnv()); } } function monkeyPatch(env2) { if (!environment11) { initialize(); } if (!environment11) { throw new Error("monkeyPatch - environment is not defined, check isNodejs() and isBrowser()"); } const {Canvas = environment11.Canvas, Image = environment11.Image} = env2; environment11.Canvas = Canvas; environment11.Image = Image; environment11.createCanvasElement = env2.createCanvasElement || (() => new Canvas()); environment11.createImageElement = env2.createImageElement || (() => new Image()); environment11.ImageData = env2.ImageData || environment11.ImageData; environment11.Video = env2.Video || environment11.Video; environment11.fetch = env2.fetch || environment11.fetch; environment11.readFile = env2.readFile || environment11.readFile; } exports.env = { getEnv, setEnv, initialize, createBrowserEnv: createBrowserEnv_1.createBrowserEnv, createFileSystem: createFileSystem_1.createFileSystem, createNodejsEnv: createNodejsEnv_1.createNodejsEnv, monkeyPatch, isBrowser: isBrowser_1.isBrowser, isNodejs: isNodejs_1.isNodejs }; initialize(); tslib_1.__exportStar(require_types(), exports); }); // build/dom/resolveInput.js var require_resolveInput = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.resolveInput = void 0; const env_1 = require_env(); function resolveInput(arg) { if (!env_1.env.isNodejs() && typeof arg === "string") { return document.getElementById(arg); } return arg; } exports.resolveInput = resolveInput; }); // build/dom/getContext2dOrThrow.js var require_getContext2dOrThrow = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.getContext2dOrThrow = void 0; const env_1 = require_env(); const resolveInput_1 = require_resolveInput(); function getContext2dOrThrow(canvasArg) { const {Canvas, CanvasRenderingContext2D: CanvasRenderingContext2D2} = env_1.env.getEnv(); if (canvasArg instanceof CanvasRenderingContext2D2) { return canvasArg; } const canvas = resolveInput_1.resolveInput(canvasArg); if (!(canvas instanceof Canvas)) { throw new Error("resolveContext2d - expected canvas to be of instance of Canvas"); } const ctx = canvas.getContext("2d"); if (!ctx) { throw new Error("resolveContext2d - canvas 2d context is null"); } return ctx; } exports.getContext2dOrThrow = getContext2dOrThrow; }); // build/draw/DrawTextField.js var require_DrawTextField = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.DrawTextField = exports.DrawTextFieldOptions = exports.AnchorPosition = void 0; const getContext2dOrThrow_1 = require_getContext2dOrThrow(); const resolveInput_1 = require_resolveInput(); var AnchorPosition; (function(AnchorPosition2) { AnchorPosition2["TOP_LEFT"] = "TOP_LEFT"; AnchorPosition2["TOP_RIGHT"] = "TOP_RIGHT"; AnchorPosition2["BOTTOM_LEFT"] = "BOTTOM_LEFT"; AnchorPosition2["BOTTOM_RIGHT"] = "BOTTOM_RIGHT"; })(AnchorPosition = exports.AnchorPosition || (exports.AnchorPosition = {})); class DrawTextFieldOptions { constructor(options = {}) { const {anchorPosition, backgroundColor, fontColor, fontSize, fontStyle, padding} = options; this.anchorPosition = anchorPosition || AnchorPosition.TOP_LEFT; this.backgroundColor = backgroundColor || "rgba(0, 0, 0, 0.5)"; this.fontColor = fontColor || "rgba(255, 255, 255, 1)"; this.fontSize = fontSize || 14; this.fontStyle = fontStyle || "Georgia"; this.padding = padding || 4; } } exports.DrawTextFieldOptions = DrawTextFieldOptions; class DrawTextField { constructor(text, anchor, options = {}) { this.text = typeof text === "string" ? [text] : text instanceof DrawTextField ? text.text : text; this.anchor = anchor; this.options = new DrawTextFieldOptions(options); } measureWidth(ctx) { const {padding} = this.options; return this.text.map((l) => ctx.measureText(l).width).reduce((w0, w1) => w0 < w1 ? w1 : w0, 0) + 2 * padding; } measureHeight() { const {fontSize, padding} = this.options; return this.text.length * fontSize + 2 * padding; } getUpperLeft(ctx, canvasDims) { const {anchorPosition} = this.options; const isShiftLeft = anchorPosition === AnchorPosition.BOTTOM_RIGHT || anchorPosition === AnchorPosition.TOP_RIGHT; const isShiftTop = anchorPosition === AnchorPosition.BOTTOM_LEFT || anchorPosition === AnchorPosition.BOTTOM_RIGHT; const textFieldWidth = this.measureWidth(ctx); const textFieldHeight = this.measureHeight(); const x = isShiftLeft ? this.anchor.x - textFieldWidth : this.anchor.x; const y = isShiftTop ? this.anchor.y - textFieldHeight : this.anchor.y; if (canvasDims) { const {width, height} = canvasDims; const newX = Math.max(Math.min(x, width - textFieldWidth), 0); const newY = Math.max(Math.min(y, height - textFieldHeight), 0); return {x: newX, y: newY}; } return {x, y}; } draw(canvasArg) { const canvas = resolveInput_1.resolveInput(canvasArg); const ctx = getContext2dOrThrow_1.getContext2dOrThrow(canvas); const {backgroundColor, fontColor, fontSize, fontStyle, padding} = this.options; ctx.font = `${fontSize}px ${fontStyle}`; const maxTextWidth = this.measureWidth(ctx); const textHeight = this.measureHeight(); ctx.fillStyle = backgroundColor; const upperLeft = this.getUpperLeft(ctx, canvas); ctx.fillRect(upperLeft.x, upperLeft.y, maxTextWidth, textHeight); ctx.fillStyle = fontColor; this.text.forEach((textLine, i) => { const x = padding + upperLeft.x; const y = padding + upperLeft.y + (i + 1) * fontSize; ctx.fillText(textLine, x, y); }); } } exports.DrawTextField = DrawTextField; }); // build/draw/DrawBox.js var require_DrawBox = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.DrawBox = exports.DrawBoxOptions = void 0; const classes_1 = require_classes(); const getContext2dOrThrow_1 = require_getContext2dOrThrow(); const DrawTextField_1 = require_DrawTextField(); class DrawBoxOptions { constructor(options = {}) { const {boxColor, lineWidth, label, drawLabelOptions} = options; this.boxColor = boxColor || "rgba(0, 0, 255, 1)"; this.lineWidth = lineWidth || 2; this.label = label; const defaultDrawLabelOptions = { anchorPosition: DrawTextField_1.AnchorPosition.BOTTOM_LEFT, backgroundColor: this.boxColor }; this.drawLabelOptions = new DrawTextField_1.DrawTextFieldOptions(Object.assign({}, defaultDrawLabelOptions, drawLabelOptions)); } } exports.DrawBoxOptions = DrawBoxOptions; class DrawBox { constructor(box, options = {}) { this.box = new classes_1.Box(box); this.options = new DrawBoxOptions(options); } draw(canvasArg) { const ctx = getContext2dOrThrow_1.getContext2dOrThrow(canvasArg); const {boxColor, lineWidth} = this.options; const {x, y, width, height} = this.box; ctx.strokeStyle = boxColor; ctx.lineWidth = lineWidth; ctx.strokeRect(x, y, width, height); const {label} = this.options; if (label) { new DrawTextField_1.DrawTextField([label], {x: x - lineWidth / 2, y}, this.options.drawLabelOptions).draw(canvasArg); } } } exports.DrawBox = DrawBox; }); // build/draw/drawDetections.js var require_drawDetections = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.drawDetections = void 0; const classes_1 = require_classes(); const FaceDetection_1 = require_FaceDetection(); const WithFaceDetection_1 = require_WithFaceDetection(); const utils_1 = require_utils(); const DrawBox_1 = require_DrawBox(); function drawDetections(canvasArg, detections) { const detectionsArray = Array.isArray(detections) ? detections : [detections]; detectionsArray.forEach((det) => { const score = det instanceof FaceDetection_1.FaceDetection ? det.score : WithFaceDetection_1.isWithFaceDetection(det) ? det.detection.score : void 0; const box = det instanceof FaceDetection_1.FaceDetection ? det.box : WithFaceDetection_1.isWithFaceDetection(det) ? det.detection.box : new classes_1.Box(det); const label = score ? `${utils_1.round(score)}` : void 0; new DrawBox_1.DrawBox(box, {label}).draw(canvasArg); }); } exports.drawDetections = drawDetections; }); // build/dom/isMediaLoaded.js var require_isMediaLoaded = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.isMediaLoaded = void 0; const env_1 = require_env(); function isMediaLoaded(media) { const {Image, Video} = env_1.env.getEnv(); return media instanceof Image && media.complete || media instanceof Video && media.readyState >= 3; } exports.isMediaLoaded = isMediaLoaded; }); // build/dom/awaitMediaLoaded.js var require_awaitMediaLoaded = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.awaitMediaLoaded = void 0; const env_1 = require_env(); const isMediaLoaded_1 = require_isMediaLoaded(); function awaitMediaLoaded(media) { return new Promise((resolve, reject) => { if (media instanceof env_1.env.getEnv().Canvas || isMediaLoaded_1.isMediaLoaded(media)) { return resolve(); } function onLoad(e) { if (!e.currentTarget) return; e.currentTarget.removeEventListener("load", onLoad); e.currentTarget.removeEventListener("error", onError); resolve(e); } function onError(e) { if (!e.currentTarget) return; e.currentTarget.removeEventListener("load", onLoad); e.currentTarget.removeEventListener("error", onError); reject(e); } media.addEventListener("load", onLoad); media.addEventListener("error", onError); }); } exports.awaitMediaLoaded = awaitMediaLoaded; }); // build/dom/bufferToImage.js var require_bufferToImage = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.bufferToImage = void 0; const env_1 = require_env(); function bufferToImage(buf) { return new Promise((resolve, reject) => { if (!(buf instanceof Blob)) { return reject("bufferToImage - expected buf to be of type: Blob"); } const reader = new FileReader(); reader.onload = () => { if (typeof reader.result !== "string") { return reject("bufferToImage - expected reader.result to be a string, in onload"); } const img = env_1.env.getEnv().createImageElement(); img.onload = () => resolve(img); img.onerror = reject; img.src = reader.result; }; reader.onerror = reject; reader.readAsDataURL(buf); }); } exports.bufferToImage = bufferToImage; }); // build/dom/getMediaDimensions.js var require_getMediaDimensions = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.getMediaDimensions = void 0; const Dimensions_1 = require_Dimensions(); const env_1 = require_env(); function getMediaDimensions(input) { const {Image, Video} = env_1.env.getEnv(); if (input instanceof Image) { return new Dimensions_1.Dimensions(input.naturalWidth, input.naturalHeight); } if (input instanceof Video) { return new Dimensions_1.Dimensions(input.videoWidth, input.videoHeight); } return new Dimensions_1.Dimensions(input.width, input.height); } exports.getMediaDimensions = getMediaDimensions; }); // build/dom/createCanvas.js var require_createCanvas = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.createCanvasFromMedia = exports.createCanvas = void 0; const env_1 = require_env(); const getContext2dOrThrow_1 = require_getContext2dOrThrow(); const getMediaDimensions_1 = require_getMediaDimensions(); const isMediaLoaded_1 = require_isMediaLoaded(); function createCanvas({width, height}) { const {createCanvasElement} = env_1.env.getEnv(); const canvas = createCanvasElement(); canvas.width = width; canvas.height = height; return canvas; } exports.createCanvas = createCanvas; function createCanvasFromMedia(media, dims) { const {ImageData: ImageData2} = env_1.env.getEnv(); if (!(media instanceof ImageData2) && !isMediaLoaded_1.isMediaLoaded(media)) { throw new Error("createCanvasFromMedia - media has not finished loading yet"); } const {width, height} = dims || getMediaDimensions_1.getMediaDimensions(media); const canvas = createCanvas({width, height}); if (media instanceof ImageData2) { getContext2dOrThrow_1.getContext2dOrThrow(canvas).putImageData(media, 0, 0); } else { getContext2dOrThrow_1.getContext2dOrThrow(canvas).drawImage(media, 0, 0, width, height); } return canvas; } exports.createCanvasFromMedia = createCanvasFromMedia; }); // build/dom/imageTensorToCanvas.js var require_imageTensorToCanvas = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.imageTensorToCanvas = void 0; const tf = require_dist(); const env_1 = require_env(); const utils_1 = require_utils(); async function imageTensorToCanvas(imgTensor, canvas) { const targetCanvas = canvas || env_1.env.getEnv().createCanvasElement(); const [height, width, numChannels] = imgTensor.shape.slice(utils_1.isTensor4D(imgTensor) ? 1 : 0); const imgTensor3D = tf.tidy(() => imgTensor.as3D(height, width, numChannels).toInt()); await tf.browser.toPixels(imgTensor3D, targetCanvas); imgTensor3D.dispose(); return targetCanvas; } exports.imageTensorToCanvas = imageTensorToCanvas; }); // build/dom/isMediaElement.js var require_isMediaElement = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.isMediaElement = void 0; const env_1 = require_env(); function isMediaElement(input) { const {Image, Canvas, Video} = env_1.env.getEnv(); return input instanceof Image || input instanceof Canvas || input instanceof Video; } exports.isMediaElement = isMediaElement; }); // build/dom/imageToSquare.js var require_imageToSquare = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.imageToSquare = void 0; const env_1 = require_env(); const createCanvas_1 = require_createCanvas(); const getContext2dOrThrow_1 = require_getContext2dOrThrow(); const getMediaDimensions_1 = require_getMediaDimensions(); function imageToSquare(input, inputSize, centerImage = false) { const {Image, Canvas} = env_1.env.getEnv(); if (!(input instanceof Image || input instanceof Canvas)) { throw new Error("imageToSquare - expected arg0 to be HTMLImageElement | HTMLCanvasElement"); } const dims = getMediaDimensions_1.getMediaDimensions(input); const scale = inputSize / Math.max(dims.height, dims.width); const width = scale * dims.width; const height = scale * dims.height; const targetCanvas = createCanvas_1.createCanvas({width: inputSize, height: inputSize}); const inputCanvas = input instanceof Canvas ? input : createCanvas_1.createCanvasFromMedia(input); const offset = Math.abs(width - height) / 2; const dx = centerImage && width < height ? offset : 0; const dy = centerImage && height < width ? offset : 0; getContext2dOrThrow_1.getContext2dOrThrow(targetCanvas).drawImage(inputCanvas, dx, dy, width, height); return targetCanvas; } exports.imageToSquare = imageToSquare; }); // build/dom/NetInput.js var require_NetInput = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.NetInput = void 0; const tf = require_dist(); const env_1 = require_env(); const padToSquare_1 = require_padToSquare(); const utils_1 = require_utils(); const createCanvas_1 = require_createCanvas(); const imageToSquare_1 = require_imageToSquare(); class NetInput { constructor(inputs, treatAsBatchInput = false) { this._imageTensors = []; this._canvases = []; this._treatAsBatchInput = false; this._inputDimensions = []; if (!Array.isArray(inputs)) { throw new Error(`NetInput.constructor - expected inputs to be an Array of TResolvedNetInput or to be instanceof tf.Tensor4D, instead have ${inputs}`); } this._treatAsBatchInput = treatAsBatchInput; this._batchSize = inputs.length; inputs.forEach((input, idx) => { if (utils_1.isTensor3D(input)) { this._imageTensors[idx] = input; this._inputDimensions[idx] = input.shape; return; } if (utils_1.isTensor4D(input)) { const batchSize = input.shape[0]; if (batchSize !== 1) { throw new Error(`NetInput - tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`); } this._imageTensors[idx] = input; this._inputDimensions[idx] = input.shape.slice(1); return; } const canvas = input instanceof env_1.env.getEnv().Canvas ? input : createCanvas_1.createCanvasFromMedia(input); this._canvases[idx] = canvas; this._inputDimensions[idx] = [canvas.height, canvas.width, 3]; }); } get imageTensors() { return this._imageTensors; } get canvases() { return this._canvases; } get isBatchInput() { return this.batchSize > 1 || this._treatAsBatchInput; } get batchSize() { return this._batchSize; } get inputDimensions() { return this._inputDimensions; } get inputSize() { return this._inputSize; } get reshapedInputDimensions() { return utils_1.range(this.batchSize, 0, 1).map((_, batchIdx) => this.getReshapedInputDimensions(batchIdx)); } getInput(batchIdx) { return this.canvases[batchIdx] || this.imageTensors[batchIdx]; } getInputDimensions(batchIdx) { return this._inputDimensions[batchIdx]; } getInputHeight(batchIdx) { return this._inputDimensions[batchIdx][0]; } getInputWidth(batchIdx) { return this._inputDimensions[batchIdx][1]; } getReshapedInputDimensions(batchIdx) { if (typeof this.inputSize !== "number") { throw new Error("getReshapedInputDimensions - inputSize not set, toBatchTensor has not been called yet"); } const width = this.getInputWidth(batchIdx); const height = this.getInputHeight(batchIdx); return utils_1.computeReshapedDimensions({width, height}, this.inputSize); } toBatchTensor(inputSize, isCenterInputs = true) { this._inputSize = inputSize; return tf.tidy(() => { const inputTensors = utils_1.range(this.batchSize, 0, 1).map((batchIdx) => { const input = this.getInput(batchIdx); if (input instanceof tf.Tensor) { let imgTensor = utils_1.isTensor4D(input) ? input : input.expandDims(); imgTensor = padToSquare_1.padToSquare(imgTensor, isCenterInputs); if (imgTensor.shape[1] !== inputSize || imgTensor.shape[2] !== inputSize) { imgTensor = tf.image.resizeBilinear(imgTensor, [inputSize, inputSize]); } return imgTensor.as3D(inputSize, inputSize, 3); } if (input instanceof env_1.env.getEnv().Canvas) { return tf.browser.fromPixels(imageToSquare_1.imageToSquare(input, inputSize, isCenterInputs)); } throw new Error(`toBatchTensor - at batchIdx ${batchIdx}, expected input to be instanceof tf.Tensor or instanceof HTMLCanvasElement, instead have ${input}`); }); const batchTensor = tf.stack(inputTensors.map((t) => t.toFloat())).as4D(this.batchSize, inputSize, inputSize, 3); return batchTensor; }); } } exports.NetInput = NetInput; }); // build/dom/toNetInput.js var require_toNetInput = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.toNetInput = void 0; const utils_1 = require_utils(); const awaitMediaLoaded_1 = require_awaitMediaLoaded(); const isMediaElement_1 = require_isMediaElement(); const NetInput_1 = require_NetInput(); const resolveInput_1 = require_resolveInput(); async function toNetInput(inputs) { if (inputs instanceof NetInput_1.NetInput) { return inputs; } let inputArgArray = Array.isArray(inputs) ? inputs : [inputs]; if (!inputArgArray.length) { throw new Error("toNetInput - empty array passed as input"); } const getIdxHint = (idx) => Array.isArray(inputs) ? ` at input index ${idx}:` : ""; const inputArray = inputArgArray.map(resolveInput_1.resolveInput); inputArray.forEach((input, i) => { if (!isMediaElement_1.isMediaElement(input) && !utils_1.isTensor3D(input) && !utils_1.isTensor4D(input)) { if (typeof inputArgArray[i] === "string") { throw new Error(`toNetInput -${getIdxHint(i)} string passed, but could not resolve HTMLElement for element id ${inputArgArray[i]}`); } throw new Error(`toNetInput -${getIdxHint(i)} expected media to be of type HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | tf.Tensor3D, or to be an element id`); } if (utils_1.isTensor4D(input)) { const batchSize = input.shape[0]; if (batchSize !== 1) { throw new Error(`toNetInput -${getIdxHint(i)} tf.Tensor4D with batchSize ${batchSize} passed, but not supported in input array`); } } }); await Promise.all(inputArray.map((input) => isMediaElement_1.isMediaElement(input) && awaitMediaLoaded_1.awaitMediaLoaded(input))); return new NetInput_1.NetInput(inputArray, Array.isArray(inputs)); } exports.toNetInput = toNetInput; }); // build/dom/extractFaces.js var require_extractFaces = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractFaces = void 0; const FaceDetection_1 = require_FaceDetection(); const env_1 = require_env(); const createCanvas_1 = require_createCanvas(); const getContext2dOrThrow_1 = require_getContext2dOrThrow(); const imageTensorToCanvas_1 = require_imageTensorToCanvas(); const toNetInput_1 = require_toNetInput(); async function extractFaces(input, detections) { const {Canvas} = env_1.env.getEnv(); let canvas = input; if (!(input instanceof Canvas)) { const netInput = await toNetInput_1.toNetInput(input); if (netInput.batchSize > 1) { throw new Error("extractFaces - batchSize > 1 not supported"); } const tensorOrCanvas = netInput.getInput(0); canvas = tensorOrCanvas instanceof Canvas ? tensorOrCanvas : await imageTensorToCanvas_1.imageTensorToCanvas(tensorOrCanvas); } const ctx = getContext2dOrThrow_1.getContext2dOrThrow(canvas); const boxes = detections.map((det) => det instanceof FaceDetection_1.FaceDetection ? det.forSize(canvas.width, canvas.height).box.floor() : det).map((box) => box.clipAtImageBorders(canvas.width, canvas.height)); return boxes.map(({x, y, width, height}) => { const faceImg = createCanvas_1.createCanvas({width, height}); getContext2dOrThrow_1.getContext2dOrThrow(faceImg).putImageData(ctx.getImageData(x, y, width, height), 0, 0); return faceImg; }); } exports.extractFaces = extractFaces; }); // build/dom/extractFaceTensors.js var require_extractFaceTensors = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractFaceTensors = void 0; const tf = require_dist(); const FaceDetection_1 = require_FaceDetection(); const utils_1 = require_utils(); async function extractFaceTensors(imageTensor, detections) { if (!utils_1.isTensor3D(imageTensor) && !utils_1.isTensor4D(imageTensor)) { throw new Error("extractFaceTensors - expected image tensor to be 3D or 4D"); } if (utils_1.isTensor4D(imageTensor) && imageTensor.shape[0] > 1) { throw new Error("extractFaceTensors - batchSize > 1 not supported"); } return tf.tidy(() => { const [imgHeight, imgWidth, numChannels] = imageTensor.shape.slice(utils_1.isTensor4D(imageTensor) ? 1 : 0); const boxes = detections.map((det) => det instanceof FaceDetection_1.FaceDetection ? det.forSize(imgWidth, imgHeight).box : det).map((box) => box.clipAtImageBorders(imgWidth, imgHeight)); const faceTensors = boxes.map(({x, y, width, height}) => tf.slice3d(imageTensor.as3D(imgHeight, imgWidth, numChannels), [y, x, 0], [height, width, numChannels])); return faceTensors; }); } exports.extractFaceTensors = extractFaceTensors; }); // build/dom/fetchOrThrow.js var require_fetchOrThrow = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.fetchOrThrow = void 0; const env_1 = require_env(); async function fetchOrThrow(url, init) { const fetch3 = env_1.env.getEnv().fetch; const res = await fetch3(url, init); if (!(res.status < 400)) { throw new Error(`failed to fetch: (${res.status}) ${res.statusText}, from url: ${res.url}`); } return res; } exports.fetchOrThrow = fetchOrThrow; }); // build/dom/fetchImage.js var require_fetchImage = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.fetchImage = void 0; const bufferToImage_1 = require_bufferToImage(); const fetchOrThrow_1 = require_fetchOrThrow(); async function fetchImage(uri) { const res = await fetchOrThrow_1.fetchOrThrow(uri); const blob = await res.blob(); if (!blob.type.startsWith("image/")) { throw new Error(`fetchImage - expected blob type to be of type image/*, instead have: ${blob.type}, for url: ${res.url}`); } return bufferToImage_1.bufferToImage(blob); } exports.fetchImage = fetchImage; }); // build/dom/fetchJson.js var require_fetchJson = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.fetchJson = void 0; const fetchOrThrow_1 = require_fetchOrThrow(); async function fetchJson(uri) { return (await fetchOrThrow_1.fetchOrThrow(uri)).json(); } exports.fetchJson = fetchJson; }); // build/dom/fetchNetWeights.js var require_fetchNetWeights = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.fetchNetWeights = void 0; const fetchOrThrow_1 = require_fetchOrThrow(); async function fetchNetWeights(uri) { return new Float32Array(await (await fetchOrThrow_1.fetchOrThrow(uri)).arrayBuffer()); } exports.fetchNetWeights = fetchNetWeights; }); // build/common/getModelUris.js var require_getModelUris = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.getModelUris = void 0; function getModelUris(uri, defaultModelName) { const defaultManifestFilename = `${defaultModelName}-weights_manifest.json`; if (!uri) { return { modelBaseUri: "", manifestUri: defaultManifestFilename }; } if (uri === "/") { return { modelBaseUri: "/", manifestUri: `/${defaultManifestFilename}` }; } const protocol = uri.startsWith("http://") ? "http://" : uri.startsWith("https://") ? "https://" : ""; uri = uri.replace(protocol, ""); const parts = uri.split("/").filter((s) => s); const manifestFile = uri.endsWith(".json") ? parts[parts.length - 1] : defaultManifestFilename; let modelBaseUri = protocol + (uri.endsWith(".json") ? parts.slice(0, parts.length - 1) : parts).join("/"); modelBaseUri = uri.startsWith("/") ? `/${modelBaseUri}` : modelBaseUri; return { modelBaseUri, manifestUri: modelBaseUri === "/" ? `/${manifestFile}` : `${modelBaseUri}/${manifestFile}` }; } exports.getModelUris = getModelUris; }); // build/dom/loadWeightMap.js var require_loadWeightMap = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.loadWeightMap = void 0; const tf = require_dist(); const getModelUris_1 = require_getModelUris(); const fetchJson_1 = require_fetchJson(); async function loadWeightMap(uri, defaultModelName) { const {manifestUri, modelBaseUri} = getModelUris_1.getModelUris(uri, defaultModelName); const manifest = await fetchJson_1.fetchJson(manifestUri); return tf.io.loadWeights(manifest, modelBaseUri); } exports.loadWeightMap = loadWeightMap; }); // build/dom/matchDimensions.js var require_matchDimensions = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.matchDimensions = void 0; const getMediaDimensions_1 = require_getMediaDimensions(); function matchDimensions(input, reference, useMediaDimensions = false) { const {width, height} = useMediaDimensions ? getMediaDimensions_1.getMediaDimensions(reference) : reference; input.width = width; input.height = height; return {width, height}; } exports.matchDimensions = matchDimensions; }); // build/dom/types.js var require_types2 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); }); // build/dom/index.js var require_dom = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); const tslib_1 = require_tslib_es6(); tslib_1.__exportStar(require_awaitMediaLoaded(), exports); tslib_1.__exportStar(require_bufferToImage(), exports); tslib_1.__exportStar(require_createCanvas(), exports); tslib_1.__exportStar(require_extractFaces(), exports); tslib_1.__exportStar(require_extractFaceTensors(), exports); tslib_1.__exportStar(require_fetchImage(), exports); tslib_1.__exportStar(require_fetchJson(), exports); tslib_1.__exportStar(require_fetchNetWeights(), exports); tslib_1.__exportStar(require_fetchOrThrow(), exports); tslib_1.__exportStar(require_getContext2dOrThrow(), exports); tslib_1.__exportStar(require_getMediaDimensions(), exports); tslib_1.__exportStar(require_imageTensorToCanvas(), exports); tslib_1.__exportStar(require_imageToSquare(), exports); tslib_1.__exportStar(require_isMediaElement(), exports); tslib_1.__exportStar(require_isMediaLoaded(), exports); tslib_1.__exportStar(require_loadWeightMap(), exports); tslib_1.__exportStar(require_matchDimensions(), exports); tslib_1.__exportStar(require_NetInput(), exports); tslib_1.__exportStar(require_resolveInput(), exports); tslib_1.__exportStar(require_toNetInput(), exports); tslib_1.__exportStar(require_types2(), exports); }); // build/NeuralNetwork.js var require_NeuralNetwork = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.NeuralNetwork = void 0; const tf = require_dist(); const getModelUris_1 = require_getModelUris(); const dom_1 = require_dom(); const env_1 = require_env(); class NeuralNetwork { constructor(_name) { this._name = _name; this._params = void 0; this._paramMappings = []; } get params() { return this._params; } get paramMappings() { return this._paramMappings; } get isLoaded() { return !!this.params; } getParamFromPath(paramPath) { const {obj, objProp} = this.traversePropertyPath(paramPath); return obj[objProp]; } reassignParamFromPath(paramPath, tensor17) { const {obj, objProp} = this.traversePropertyPath(paramPath); obj[objProp].dispose(); obj[objProp] = tensor17; } getParamList() { return this._paramMappings.map(({paramPath}) => ({ path: paramPath, tensor: this.getParamFromPath(paramPath) })); } getTrainableParams() { return this.getParamList().filter((param) => param.tensor instanceof tf.Variable); } getFrozenParams() { return this.getParamList().filter((param) => !(param.tensor instanceof tf.Variable)); } variable() { this.getFrozenParams().forEach(({path, tensor: tensor17}) => { this.reassignParamFromPath(path, tensor17.variable()); }); } freeze() { this.getTrainableParams().forEach(({path, tensor: variable3}) => { const tensor17 = tf.tensor(variable3.dataSync()); variable3.dispose(); this.reassignParamFromPath(path, tensor17); }); } dispose(throwOnRedispose = true) { this.getParamList().forEach((param) => { if (throwOnRedispose && param.tensor.isDisposed) { throw new Error(`param tensor has already been disposed for path ${param.path}`); } param.tensor.dispose(); }); this._params = void 0; } serializeParams() { return new Float32Array(this.getParamList().map(({tensor: tensor17}) => Array.from(tensor17.dataSync())).reduce((flat, arr) => flat.concat(arr))); } async load(weightsOrUrl) { if (weightsOrUrl instanceof Float32Array) { this.extractWeights(weightsOrUrl); return; } await this.loadFromUri(weightsOrUrl); } async loadFromUri(uri) { if (uri && typeof uri !== "string") { throw new Error(`${this._name}.loadFromUri - expected model uri`); } const weightMap = await dom_1.loadWeightMap(uri, this.getDefaultModelName()); this.loadFromWeightMap(weightMap); } async loadFromDisk(filePath) { if (filePath && typeof filePath !== "string") { throw new Error(`${this._name}.loadFromDisk - expected model file path`); } const {readFile} = env_1.env.getEnv(); const {manifestUri, modelBaseUri} = getModelUris_1.getModelUris(filePath, this.getDefaultModelName()); const fetchWeightsFromDisk = (filePaths) => Promise.all(filePaths.map((filePath2) => readFile(filePath2).then((buf) => buf.buffer))); const loadWeights2 = tf.io.weightsLoaderFactory(fetchWeightsFromDisk); const manifest = JSON.parse((await readFile(manifestUri)).toString()); const weightMap = await loadWeights2(manifest, modelBaseUri); this.loadFromWeightMap(weightMap); } loadFromWeightMap(weightMap) { const {paramMappings, params} = this.extractParamsFromWeigthMap(weightMap); this._paramMappings = paramMappings; this._params = params; } extractWeights(weights) { const {paramMappings, params} = this.extractParams(weights); this._paramMappings = paramMappings; this._params = params; } traversePropertyPath(paramPath) { if (!this.params) { throw new Error(`traversePropertyPath - model has no loaded params`); } const result = paramPath.split("/").reduce((res, objProp2) => { if (!res.nextObj.hasOwnProperty(objProp2)) { throw new Error(`traversePropertyPath - object does not have property ${objProp2}, for path ${paramPath}`); } return {obj: res.nextObj, objProp: objProp2, nextObj: res.nextObj[objProp2]}; }, {nextObj: this.params}); const {obj, objProp} = result; if (!obj || !objProp || !(obj[objProp] instanceof tf.Tensor)) { throw new Error(`traversePropertyPath - parameter is not a tensor, for path ${paramPath}`); } return {obj, objProp}; } } exports.NeuralNetwork = NeuralNetwork; }); // build/common/depthwiseSeparableConv.js var require_depthwiseSeparableConv = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.depthwiseSeparableConv = void 0; const tf = require_dist(); function depthwiseSeparableConv(x, params, stride) { return tf.tidy(() => { let out = tf.separableConv2d(x, params.depthwise_filter, params.pointwise_filter, stride, "same"); out = tf.add(out, params.bias); return out; }); } exports.depthwiseSeparableConv = depthwiseSeparableConv; }); // build/faceFeatureExtractor/denseBlock.js var require_denseBlock = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.denseBlock4 = exports.denseBlock3 = void 0; const tf = require_dist(); const depthwiseSeparableConv_1 = require_depthwiseSeparableConv(); function denseBlock3(x, denseBlockParams, isFirstLayer = false) { return tf.tidy(() => { const out1 = tf.relu(isFirstLayer ? tf.add(tf.conv2d(x, denseBlockParams.conv0.filters, [2, 2], "same"), denseBlockParams.conv0.bias) : depthwiseSeparableConv_1.depthwiseSeparableConv(x, denseBlockParams.conv0, [2, 2])); const out2 = depthwiseSeparableConv_1.depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]); const in3 = tf.relu(tf.add(out1, out2)); const out3 = depthwiseSeparableConv_1.depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]); return tf.relu(tf.add(out1, tf.add(out2, out3))); }); } exports.denseBlock3 = denseBlock3; function denseBlock4(x, denseBlockParams, isFirstLayer = false, isScaleDown = true) { return tf.tidy(() => { const out1 = tf.relu(isFirstLayer ? tf.add(tf.conv2d(x, denseBlockParams.conv0.filters, isScaleDown ? [2, 2] : [1, 1], "same"), denseBlockParams.conv0.bias) : depthwiseSeparableConv_1.depthwiseSeparableConv(x, denseBlockParams.conv0, isScaleDown ? [2, 2] : [1, 1])); const out2 = depthwiseSeparableConv_1.depthwiseSeparableConv(out1, denseBlockParams.conv1, [1, 1]); const in3 = tf.relu(tf.add(out1, out2)); const out3 = depthwiseSeparableConv_1.depthwiseSeparableConv(in3, denseBlockParams.conv2, [1, 1]); const in4 = tf.relu(tf.add(out1, tf.add(out2, out3))); const out4 = depthwiseSeparableConv_1.depthwiseSeparableConv(in4, denseBlockParams.conv3, [1, 1]); return tf.relu(tf.add(out1, tf.add(out2, tf.add(out3, out4)))); }); } exports.denseBlock4 = denseBlock4; }); // build/common/convLayer.js var require_convLayer = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.convLayer = void 0; const tf = require_dist(); function convLayer(x, params, padding = "same", withRelu = false) { return tf.tidy(() => { const out = tf.add(tf.conv2d(x, params.filters, [1, 1], padding), params.bias); return withRelu ? tf.relu(out) : out; }); } exports.convLayer = convLayer; }); // build/common/disposeUnusedWeightTensors.js var require_disposeUnusedWeightTensors = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.disposeUnusedWeightTensors = void 0; function disposeUnusedWeightTensors(weightMap, paramMappings) { Object.keys(weightMap).forEach((path) => { if (!paramMappings.some((pm) => pm.originalPath === path)) { weightMap[path].dispose(); } }); } exports.disposeUnusedWeightTensors = disposeUnusedWeightTensors; }); // build/common/extractConvParamsFactory.js var require_extractConvParamsFactory = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractConvParamsFactory = void 0; const tf = require_dist(); function extractConvParamsFactory(extractWeights, paramMappings) { return function(channelsIn, channelsOut, filterSize, mappedPrefix) { const filters = tf.tensor4d(extractWeights(channelsIn * channelsOut * filterSize * filterSize), [filterSize, filterSize, channelsIn, channelsOut]); const bias = tf.tensor1d(extractWeights(channelsOut)); paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/bias`}); return {filters, bias}; }; } exports.extractConvParamsFactory = extractConvParamsFactory; }); // build/common/extractFCParamsFactory.js var require_extractFCParamsFactory = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractFCParamsFactory = void 0; const tf = require_dist(); function extractFCParamsFactory(extractWeights, paramMappings) { return function(channelsIn, channelsOut, mappedPrefix) { const fc_weights = tf.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut]); const fc_bias = tf.tensor1d(extractWeights(channelsOut)); paramMappings.push({paramPath: `${mappedPrefix}/weights`}, {paramPath: `${mappedPrefix}/bias`}); return { weights: fc_weights, bias: fc_bias }; }; } exports.extractFCParamsFactory = extractFCParamsFactory; }); // build/common/types.js var require_types3 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.SeparableConvParams = void 0; class SeparableConvParams { constructor(depthwise_filter, pointwise_filter, bias) { this.depthwise_filter = depthwise_filter; this.pointwise_filter = pointwise_filter; this.bias = bias; } } exports.SeparableConvParams = SeparableConvParams; }); // build/common/extractSeparableConvParamsFactory.js var require_extractSeparableConvParamsFactory = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.loadSeparableConvParamsFactory = exports.extractSeparableConvParamsFactory = void 0; const tf = require_dist(); const types_1 = require_types3(); function extractSeparableConvParamsFactory(extractWeights, paramMappings) { return function(channelsIn, channelsOut, mappedPrefix) { const depthwise_filter = tf.tensor4d(extractWeights(3 * 3 * channelsIn), [3, 3, channelsIn, 1]); const pointwise_filter = tf.tensor4d(extractWeights(channelsIn * channelsOut), [1, 1, channelsIn, channelsOut]); const bias = tf.tensor1d(extractWeights(channelsOut)); paramMappings.push({paramPath: `${mappedPrefix}/depthwise_filter`}, {paramPath: `${mappedPrefix}/pointwise_filter`}, {paramPath: `${mappedPrefix}/bias`}); return new types_1.SeparableConvParams(depthwise_filter, pointwise_filter, bias); }; } exports.extractSeparableConvParamsFactory = extractSeparableConvParamsFactory; function loadSeparableConvParamsFactory(extractWeightEntry) { return function(prefix) { const depthwise_filter = extractWeightEntry(`${prefix}/depthwise_filter`, 4); const pointwise_filter = extractWeightEntry(`${prefix}/pointwise_filter`, 4); const bias = extractWeightEntry(`${prefix}/bias`, 1); return new types_1.SeparableConvParams(depthwise_filter, pointwise_filter, bias); }; } exports.loadSeparableConvParamsFactory = loadSeparableConvParamsFactory; }); // build/common/extractWeightEntryFactory.js var require_extractWeightEntryFactory = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractWeightEntryFactory = void 0; const utils_1 = require_utils(); function extractWeightEntryFactory(weightMap, paramMappings) { return function(originalPath, paramRank, mappedPath) { const tensor17 = weightMap[originalPath]; if (!utils_1.isTensor(tensor17, paramRank)) { throw new Error(`expected weightMap[${originalPath}] to be a Tensor${paramRank}D, instead have ${tensor17}`); } paramMappings.push({originalPath, paramPath: mappedPath || originalPath}); return tensor17; }; } exports.extractWeightEntryFactory = extractWeightEntryFactory; }); // build/common/extractWeightsFactory.js var require_extractWeightsFactory = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractWeightsFactory = void 0; function extractWeightsFactory(weights) { let remainingWeights = weights; function extractWeights(numWeights) { const ret = remainingWeights.slice(0, numWeights); remainingWeights = remainingWeights.slice(numWeights); return ret; } function getRemainingWeights() { return remainingWeights; } return { extractWeights, getRemainingWeights }; } exports.extractWeightsFactory = extractWeightsFactory; }); // build/common/index.js var require_common = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); const tslib_1 = require_tslib_es6(); tslib_1.__exportStar(require_convLayer(), exports); tslib_1.__exportStar(require_depthwiseSeparableConv(), exports); tslib_1.__exportStar(require_disposeUnusedWeightTensors(), exports); tslib_1.__exportStar(require_extractConvParamsFactory(), exports); tslib_1.__exportStar(require_extractFCParamsFactory(), exports); tslib_1.__exportStar(require_extractSeparableConvParamsFactory(), exports); tslib_1.__exportStar(require_extractWeightEntryFactory(), exports); tslib_1.__exportStar(require_extractWeightsFactory(), exports); tslib_1.__exportStar(require_getModelUris(), exports); tslib_1.__exportStar(require_types3(), exports); }); // build/faceFeatureExtractor/extractorsFactory.js var require_extractorsFactory = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractorsFactory = void 0; const common_1 = require_common(); function extractorsFactory(extractWeights, paramMappings) { const extractConvParams = common_1.extractConvParamsFactory(extractWeights, paramMappings); const extractSeparableConvParams = common_1.extractSeparableConvParamsFactory(extractWeights, paramMappings); function extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer = false) { const conv0 = isFirstLayer ? extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv0`) : extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/conv0`); const conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv1`); const conv2 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv2`); return {conv0, conv1, conv2}; } function extractDenseBlock4Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer = false) { const {conv0, conv1, conv2} = extractDenseBlock3Params(channelsIn, channelsOut, mappedPrefix, isFirstLayer); const conv3 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/conv3`); return {conv0, conv1, conv2, conv3}; } return { extractDenseBlock3Params, extractDenseBlock4Params }; } exports.extractorsFactory = extractorsFactory; }); // build/faceFeatureExtractor/extractParams.js var require_extractParams = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParams = void 0; const common_1 = require_common(); const extractorsFactory_1 = require_extractorsFactory(); function extractParams(weights) { const paramMappings = []; const {extractWeights, getRemainingWeights} = common_1.extractWeightsFactory(weights); const {extractDenseBlock4Params} = extractorsFactory_1.extractorsFactory(extractWeights, paramMappings); const dense0 = extractDenseBlock4Params(3, 32, "dense0", true); const dense1 = extractDenseBlock4Params(32, 64, "dense1"); const dense2 = extractDenseBlock4Params(64, 128, "dense2"); const dense3 = extractDenseBlock4Params(128, 256, "dense3"); if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } return { paramMappings, params: {dense0, dense1, dense2, dense3} }; } exports.extractParams = extractParams; }); // build/common/loadConvParamsFactory.js var require_loadConvParamsFactory = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.loadConvParamsFactory = void 0; function loadConvParamsFactory(extractWeightEntry) { return function(prefix) { const filters = extractWeightEntry(`${prefix}/filters`, 4); const bias = extractWeightEntry(`${prefix}/bias`, 1); return {filters, bias}; }; } exports.loadConvParamsFactory = loadConvParamsFactory; }); // build/faceFeatureExtractor/loadParamsFactory.js var require_loadParamsFactory = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.loadParamsFactory = void 0; const common_1 = require_common(); const loadConvParamsFactory_1 = require_loadConvParamsFactory(); function loadParamsFactory(weightMap, paramMappings) { const extractWeightEntry = common_1.extractWeightEntryFactory(weightMap, paramMappings); const extractConvParams = loadConvParamsFactory_1.loadConvParamsFactory(extractWeightEntry); const extractSeparableConvParams = common_1.loadSeparableConvParamsFactory(extractWeightEntry); function extractDenseBlock3Params(prefix, isFirstLayer = false) { const conv0 = isFirstLayer ? extractConvParams(`${prefix}/conv0`) : extractSeparableConvParams(`${prefix}/conv0`); const conv1 = extractSeparableConvParams(`${prefix}/conv1`); const conv2 = extractSeparableConvParams(`${prefix}/conv2`); return {conv0, conv1, conv2}; } function extractDenseBlock4Params(prefix, isFirstLayer = false) { const conv0 = isFirstLayer ? extractConvParams(`${prefix}/conv0`) : extractSeparableConvParams(`${prefix}/conv0`); const conv1 = extractSeparableConvParams(`${prefix}/conv1`); const conv2 = extractSeparableConvParams(`${prefix}/conv2`); const conv3 = extractSeparableConvParams(`${prefix}/conv3`); return {conv0, conv1, conv2, conv3}; } return { extractDenseBlock3Params, extractDenseBlock4Params }; } exports.loadParamsFactory = loadParamsFactory; }); // build/faceFeatureExtractor/extractParamsFromWeigthMap.js var require_extractParamsFromWeigthMap = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParamsFromWeigthMap = void 0; const common_1 = require_common(); const loadParamsFactory_1 = require_loadParamsFactory(); function extractParamsFromWeigthMap(weightMap) { const paramMappings = []; const {extractDenseBlock4Params} = loadParamsFactory_1.loadParamsFactory(weightMap, paramMappings); const params = { dense0: extractDenseBlock4Params("dense0", true), dense1: extractDenseBlock4Params("dense1"), dense2: extractDenseBlock4Params("dense2"), dense3: extractDenseBlock4Params("dense3") }; common_1.disposeUnusedWeightTensors(weightMap, paramMappings); return {params, paramMappings}; } exports.extractParamsFromWeigthMap = extractParamsFromWeigthMap; }); // build/faceFeatureExtractor/FaceFeatureExtractor.js var require_FaceFeatureExtractor = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceFeatureExtractor = void 0; const tf = require_dist(); const dom_1 = require_dom(); const NeuralNetwork_1 = require_NeuralNetwork(); const ops_1 = require_ops(); const denseBlock_1 = require_denseBlock(); const extractParams_1 = require_extractParams(); const extractParamsFromWeigthMap_1 = require_extractParamsFromWeigthMap(); class FaceFeatureExtractor extends NeuralNetwork_1.NeuralNetwork { constructor() { super("FaceFeatureExtractor"); } forwardInput(input) { const {params} = this; if (!params) { throw new Error("FaceFeatureExtractor - load model before inference"); } return tf.tidy(() => { const batchTensor = input.toBatchTensor(112, true); const meanRgb = [122.782, 117.001, 104.298]; const normalized = ops_1.normalize(batchTensor, meanRgb).div(tf.scalar(255)); let out = denseBlock_1.denseBlock4(normalized, params.dense0, true); out = denseBlock_1.denseBlock4(out, params.dense1); out = denseBlock_1.denseBlock4(out, params.dense2); out = denseBlock_1.denseBlock4(out, params.dense3); out = tf.avgPool(out, [7, 7], [2, 2], "valid"); return out; }); } async forward(input) { return this.forwardInput(await dom_1.toNetInput(input)); } getDefaultModelName() { return "face_feature_extractor_model"; } extractParamsFromWeigthMap(weightMap) { return extractParamsFromWeigthMap_1.extractParamsFromWeigthMap(weightMap); } extractParams(weights) { return extractParams_1.extractParams(weights); } } exports.FaceFeatureExtractor = FaceFeatureExtractor; }); // build/common/fullyConnectedLayer.js var require_fullyConnectedLayer = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.fullyConnectedLayer = void 0; const tf = require_dist(); function fullyConnectedLayer(x, params) { return tf.tidy(() => tf.add(tf.matMul(x, params.weights), params.bias)); } exports.fullyConnectedLayer = fullyConnectedLayer; }); // build/faceProcessor/extractParams.js var require_extractParams2 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParams = void 0; const common_1 = require_common(); function extractParams(weights, channelsIn, channelsOut) { const paramMappings = []; const {extractWeights, getRemainingWeights} = common_1.extractWeightsFactory(weights); const extractFCParams = common_1.extractFCParamsFactory(extractWeights, paramMappings); const fc = extractFCParams(channelsIn, channelsOut, "fc"); if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } return { paramMappings, params: {fc} }; } exports.extractParams = extractParams; }); // build/faceProcessor/extractParamsFromWeigthMap.js var require_extractParamsFromWeigthMap2 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParamsFromWeigthMap = void 0; const common_1 = require_common(); function extractParamsFromWeigthMap(weightMap) { const paramMappings = []; const extractWeightEntry = common_1.extractWeightEntryFactory(weightMap, paramMappings); function extractFcParams(prefix) { const weights = extractWeightEntry(`${prefix}/weights`, 2); const bias = extractWeightEntry(`${prefix}/bias`, 1); return {weights, bias}; } const params = { fc: extractFcParams("fc") }; common_1.disposeUnusedWeightTensors(weightMap, paramMappings); return {params, paramMappings}; } exports.extractParamsFromWeigthMap = extractParamsFromWeigthMap; }); // build/faceProcessor/util.js var require_util = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.seperateWeightMaps = void 0; function seperateWeightMaps(weightMap) { const featureExtractorMap = {}; const classifierMap = {}; Object.keys(weightMap).forEach((key) => { const map = key.startsWith("fc") ? classifierMap : featureExtractorMap; map[key] = weightMap[key]; }); return {featureExtractorMap, classifierMap}; } exports.seperateWeightMaps = seperateWeightMaps; }); // build/faceProcessor/FaceProcessor.js var require_FaceProcessor = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceProcessor = void 0; const tf = require_dist(); const fullyConnectedLayer_1 = require_fullyConnectedLayer(); const dom_1 = require_dom(); const NeuralNetwork_1 = require_NeuralNetwork(); const extractParams_1 = require_extractParams2(); const extractParamsFromWeigthMap_1 = require_extractParamsFromWeigthMap2(); const util_1 = require_util(); class FaceProcessor extends NeuralNetwork_1.NeuralNetwork { constructor(_name, faceFeatureExtractor) { super(_name); this._faceFeatureExtractor = faceFeatureExtractor; } get faceFeatureExtractor() { return this._faceFeatureExtractor; } runNet(input) { const {params} = this; if (!params) { throw new Error(`${this._name} - load model before inference`); } return tf.tidy(() => { const bottleneckFeatures = input instanceof dom_1.NetInput ? this.faceFeatureExtractor.forwardInput(input) : input; return fullyConnectedLayer_1.fullyConnectedLayer(bottleneckFeatures.as2D(bottleneckFeatures.shape[0], -1), params.fc); }); } dispose(throwOnRedispose = true) { this.faceFeatureExtractor.dispose(throwOnRedispose); super.dispose(throwOnRedispose); } loadClassifierParams(weights) { const {params, paramMappings} = this.extractClassifierParams(weights); this._params = params; this._paramMappings = paramMappings; } extractClassifierParams(weights) { return extractParams_1.extractParams(weights, this.getClassifierChannelsIn(), this.getClassifierChannelsOut()); } extractParamsFromWeigthMap(weightMap) { const {featureExtractorMap, classifierMap} = util_1.seperateWeightMaps(weightMap); this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap); return extractParamsFromWeigthMap_1.extractParamsFromWeigthMap(classifierMap); } extractParams(weights) { const cIn = this.getClassifierChannelsIn(); const cOut = this.getClassifierChannelsOut(); const classifierWeightSize = cOut * cIn + cOut; const featureExtractorWeights = weights.slice(0, weights.length - classifierWeightSize); const classifierWeights = weights.slice(weights.length - classifierWeightSize); this.faceFeatureExtractor.extractWeights(featureExtractorWeights); return this.extractClassifierParams(classifierWeights); } } exports.FaceProcessor = FaceProcessor; }); // build/faceExpressionNet/FaceExpressions.js var require_FaceExpressions = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceExpressions = exports.FACE_EXPRESSION_LABELS = void 0; exports.FACE_EXPRESSION_LABELS = ["neutral", "happy", "sad", "angry", "fearful", "disgusted", "surprised"]; class FaceExpressions { constructor(probabilities) { if (probabilities.length !== 7) { throw new Error(`FaceExpressions.constructor - expected probabilities.length to be 7, have: ${probabilities.length}`); } exports.FACE_EXPRESSION_LABELS.forEach((expression, idx) => { this[expression] = probabilities[idx]; }); } asSortedArray() { return exports.FACE_EXPRESSION_LABELS.map((expression) => ({expression, probability: this[expression]})).sort((e0, e1) => e1.probability - e0.probability); } } exports.FaceExpressions = FaceExpressions; }); // build/faceExpressionNet/FaceExpressionNet.js var require_FaceExpressionNet = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceExpressionNet = void 0; const tf = require_dist(); const dom_1 = require_dom(); const FaceFeatureExtractor_1 = require_FaceFeatureExtractor(); const FaceProcessor_1 = require_FaceProcessor(); const FaceExpressions_1 = require_FaceExpressions(); class FaceExpressionNet extends FaceProcessor_1.FaceProcessor { constructor(faceFeatureExtractor = new FaceFeatureExtractor_1.FaceFeatureExtractor()) { super("FaceExpressionNet", faceFeatureExtractor); } forwardInput(input) { return tf.tidy(() => tf.softmax(this.runNet(input))); } async forward(input) { return this.forwardInput(await dom_1.toNetInput(input)); } async predictExpressions(input) { const netInput = await dom_1.toNetInput(input); const out = await this.forwardInput(netInput); const probabilitesByBatch = await Promise.all(tf.unstack(out).map(async (t) => { const data = await t.data(); t.dispose(); return data; })); out.dispose(); const predictionsByBatch = probabilitesByBatch.map((probabilites) => new FaceExpressions_1.FaceExpressions(probabilites)); return netInput.isBatchInput ? predictionsByBatch : predictionsByBatch[0]; } getDefaultModelName() { return "face_expression_model"; } getClassifierChannelsIn() { return 256; } getClassifierChannelsOut() { return 7; } } exports.FaceExpressionNet = FaceExpressionNet; }); // build/faceExpressionNet/index.js var require_faceExpressionNet = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); const tslib_1 = require_tslib_es6(); tslib_1.__exportStar(require_FaceExpressionNet(), exports); tslib_1.__exportStar(require_FaceExpressions(), exports); }); // build/factories/WithFaceExpressions.js var require_WithFaceExpressions = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extendWithFaceExpressions = exports.isWithFaceExpressions = void 0; const FaceExpressions_1 = require_FaceExpressions(); function isWithFaceExpressions(obj) { return obj["expressions"] instanceof FaceExpressions_1.FaceExpressions; } exports.isWithFaceExpressions = isWithFaceExpressions; function extendWithFaceExpressions(sourceObj, expressions) { const extension = {expressions}; return Object.assign({}, sourceObj, extension); } exports.extendWithFaceExpressions = extendWithFaceExpressions; }); // build/draw/drawFaceExpressions.js var require_drawFaceExpressions = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.drawFaceExpressions = void 0; const classes_1 = require_classes(); const faceExpressionNet_1 = require_faceExpressionNet(); const WithFaceDetection_1 = require_WithFaceDetection(); const WithFaceExpressions_1 = require_WithFaceExpressions(); const utils_1 = require_utils(); const DrawTextField_1 = require_DrawTextField(); function drawFaceExpressions(canvasArg, faceExpressions, minConfidence = 0.1, textFieldAnchor) { const faceExpressionsArray = Array.isArray(faceExpressions) ? faceExpressions : [faceExpressions]; faceExpressionsArray.forEach((e) => { const expr = e instanceof faceExpressionNet_1.FaceExpressions ? e : WithFaceExpressions_1.isWithFaceExpressions(e) ? e.expressions : void 0; if (!expr) { throw new Error("drawFaceExpressions - expected faceExpressions to be FaceExpressions | WithFaceExpressions<{}> or array thereof"); } const sorted = expr.asSortedArray(); const resultsToDisplay = sorted.filter((expr2) => expr2.probability > minConfidence); const anchor = WithFaceDetection_1.isWithFaceDetection(e) ? e.detection.box.bottomLeft : textFieldAnchor || new classes_1.Point(0, 0); const drawTextField = new DrawTextField_1.DrawTextField(resultsToDisplay.map((expr2) => `${expr2.expression} (${utils_1.round(expr2.probability)})`), anchor); drawTextField.draw(canvasArg); }); } exports.drawFaceExpressions = drawFaceExpressions; }); // build/factories/WithFaceLandmarks.js var require_WithFaceLandmarks = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extendWithFaceLandmarks = exports.isWithFaceLandmarks = void 0; const FaceDetection_1 = require_FaceDetection(); const FaceLandmarks_1 = require_FaceLandmarks(); const WithFaceDetection_1 = require_WithFaceDetection(); function isWithFaceLandmarks(obj) { return WithFaceDetection_1.isWithFaceDetection(obj) && obj["landmarks"] instanceof FaceLandmarks_1.FaceLandmarks && obj["unshiftedLandmarks"] instanceof FaceLandmarks_1.FaceLandmarks && obj["alignedRect"] instanceof FaceDetection_1.FaceDetection; } exports.isWithFaceLandmarks = isWithFaceLandmarks; function extendWithFaceLandmarks(sourceObj, unshiftedLandmarks) { const {box: shift} = sourceObj.detection; const landmarks = unshiftedLandmarks.shiftBy(shift.x, shift.y); const rect = landmarks.align(); const {imageDims} = sourceObj.detection; const alignedRect = new FaceDetection_1.FaceDetection(sourceObj.detection.score, rect.rescale(imageDims.reverse()), imageDims); const extension = { landmarks, unshiftedLandmarks, alignedRect }; return Object.assign({}, sourceObj, extension); } exports.extendWithFaceLandmarks = extendWithFaceLandmarks; }); // build/draw/DrawFaceLandmarks.js var require_DrawFaceLandmarks = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.drawFaceLandmarks = exports.DrawFaceLandmarks = exports.DrawFaceLandmarksOptions = void 0; const FaceLandmarks_1 = require_FaceLandmarks(); const FaceLandmarks68_1 = require_FaceLandmarks68(); const getContext2dOrThrow_1 = require_getContext2dOrThrow(); const WithFaceLandmarks_1 = require_WithFaceLandmarks(); const drawContour_1 = require_drawContour(); class DrawFaceLandmarksOptions { constructor(options = {}) { const {drawLines = true, drawPoints = true, lineWidth, lineColor, pointSize, pointColor} = options; this.drawLines = drawLines; this.drawPoints = drawPoints; this.lineWidth = lineWidth || 1; this.pointSize = pointSize || 2; this.lineColor = lineColor || "rgba(0, 255, 255, 1)"; this.pointColor = pointColor || "rgba(255, 0, 255, 1)"; } } exports.DrawFaceLandmarksOptions = DrawFaceLandmarksOptions; class DrawFaceLandmarks { constructor(faceLandmarks, options = {}) { this.faceLandmarks = faceLandmarks; this.options = new DrawFaceLandmarksOptions(options); } draw(canvasArg) { const ctx = getContext2dOrThrow_1.getContext2dOrThrow(canvasArg); const {drawLines, drawPoints, lineWidth, lineColor, pointSize, pointColor} = this.options; if (drawLines && this.faceLandmarks instanceof FaceLandmarks68_1.FaceLandmarks68) { ctx.strokeStyle = lineColor; ctx.lineWidth = lineWidth; drawContour_1.drawContour(ctx, this.faceLandmarks.getJawOutline()); drawContour_1.drawContour(ctx, this.faceLandmarks.getLeftEyeBrow()); drawContour_1.drawContour(ctx, this.faceLandmarks.getRightEyeBrow()); drawContour_1.drawContour(ctx, this.faceLandmarks.getNose()); drawContour_1.drawContour(ctx, this.faceLandmarks.getLeftEye(), true); drawContour_1.drawContour(ctx, this.faceLandmarks.getRightEye(), true); drawContour_1.drawContour(ctx, this.faceLandmarks.getMouth(), true); } if (drawPoints) { ctx.strokeStyle = pointColor; ctx.fillStyle = pointColor; const drawPoint = (pt) => { ctx.beginPath(); ctx.arc(pt.x, pt.y, pointSize, 0, 2 * Math.PI); ctx.fill(); }; this.faceLandmarks.positions.forEach(drawPoint); } } } exports.DrawFaceLandmarks = DrawFaceLandmarks; function drawFaceLandmarks(canvasArg, faceLandmarks) { const faceLandmarksArray = Array.isArray(faceLandmarks) ? faceLandmarks : [faceLandmarks]; faceLandmarksArray.forEach((f) => { const landmarks = f instanceof FaceLandmarks_1.FaceLandmarks ? f : WithFaceLandmarks_1.isWithFaceLandmarks(f) ? f.landmarks : void 0; if (!landmarks) { throw new Error("drawFaceLandmarks - expected faceExpressions to be FaceLandmarks | WithFaceLandmarks> or array thereof"); } new DrawFaceLandmarks(landmarks).draw(canvasArg); }); } exports.drawFaceLandmarks = drawFaceLandmarks; }); // build/draw/index.js var require_draw = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); const tslib_1 = require_tslib_es6(); tslib_1.__exportStar(require_drawContour(), exports); tslib_1.__exportStar(require_drawDetections(), exports); tslib_1.__exportStar(require_drawFaceExpressions(), exports); tslib_1.__exportStar(require_DrawBox(), exports); tslib_1.__exportStar(require_DrawFaceLandmarks(), exports); tslib_1.__exportStar(require_DrawTextField(), exports); }); // build/xception/extractParams.js var require_extractParams3 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParams = void 0; const common_1 = require_common(); const utils_1 = require_utils(); function extractorsFactory(extractWeights, paramMappings) { const extractConvParams = common_1.extractConvParamsFactory(extractWeights, paramMappings); const extractSeparableConvParams = common_1.extractSeparableConvParamsFactory(extractWeights, paramMappings); function extractReductionBlockParams(channelsIn, channelsOut, mappedPrefix) { const separable_conv0 = extractSeparableConvParams(channelsIn, channelsOut, `${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(channelsOut, channelsOut, `${mappedPrefix}/separable_conv1`); const expansion_conv = extractConvParams(channelsIn, channelsOut, 1, `${mappedPrefix}/expansion_conv`); return {separable_conv0, separable_conv1, expansion_conv}; } function extractMainBlockParams(channels, mappedPrefix) { const separable_conv0 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv1`); const separable_conv2 = extractSeparableConvParams(channels, channels, `${mappedPrefix}/separable_conv2`); return {separable_conv0, separable_conv1, separable_conv2}; } return { extractConvParams, extractSeparableConvParams, extractReductionBlockParams, extractMainBlockParams }; } function extractParams(weights, numMainBlocks) { const paramMappings = []; const {extractWeights, getRemainingWeights} = common_1.extractWeightsFactory(weights); const {extractConvParams, extractSeparableConvParams, extractReductionBlockParams, extractMainBlockParams} = extractorsFactory(extractWeights, paramMappings); const entry_flow_conv_in = extractConvParams(3, 32, 3, "entry_flow/conv_in"); const entry_flow_reduction_block_0 = extractReductionBlockParams(32, 64, "entry_flow/reduction_block_0"); const entry_flow_reduction_block_1 = extractReductionBlockParams(64, 128, "entry_flow/reduction_block_1"); const entry_flow = { conv_in: entry_flow_conv_in, reduction_block_0: entry_flow_reduction_block_0, reduction_block_1: entry_flow_reduction_block_1 }; const middle_flow = {}; utils_1.range(numMainBlocks, 0, 1).forEach((idx) => { middle_flow[`main_block_${idx}`] = extractMainBlockParams(128, `middle_flow/main_block_${idx}`); }); const exit_flow_reduction_block = extractReductionBlockParams(128, 256, "exit_flow/reduction_block"); const exit_flow_separable_conv = extractSeparableConvParams(256, 512, "exit_flow/separable_conv"); const exit_flow = { reduction_block: exit_flow_reduction_block, separable_conv: exit_flow_separable_conv }; if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } return { paramMappings, params: {entry_flow, middle_flow, exit_flow} }; } exports.extractParams = extractParams; }); // build/xception/extractParamsFromWeigthMap.js var require_extractParamsFromWeigthMap3 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParamsFromWeigthMap = void 0; const common_1 = require_common(); const loadConvParamsFactory_1 = require_loadConvParamsFactory(); const utils_1 = require_utils(); function loadParamsFactory(weightMap, paramMappings) { const extractWeightEntry = common_1.extractWeightEntryFactory(weightMap, paramMappings); const extractConvParams = loadConvParamsFactory_1.loadConvParamsFactory(extractWeightEntry); const extractSeparableConvParams = common_1.loadSeparableConvParamsFactory(extractWeightEntry); function extractReductionBlockParams(mappedPrefix) { const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`); const expansion_conv = extractConvParams(`${mappedPrefix}/expansion_conv`); return {separable_conv0, separable_conv1, expansion_conv}; } function extractMainBlockParams(mappedPrefix) { const separable_conv0 = extractSeparableConvParams(`${mappedPrefix}/separable_conv0`); const separable_conv1 = extractSeparableConvParams(`${mappedPrefix}/separable_conv1`); const separable_conv2 = extractSeparableConvParams(`${mappedPrefix}/separable_conv2`); return {separable_conv0, separable_conv1, separable_conv2}; } return { extractConvParams, extractSeparableConvParams, extractReductionBlockParams, extractMainBlockParams }; } function extractParamsFromWeigthMap(weightMap, numMainBlocks) { const paramMappings = []; const {extractConvParams, extractSeparableConvParams, extractReductionBlockParams, extractMainBlockParams} = loadParamsFactory(weightMap, paramMappings); const entry_flow_conv_in = extractConvParams("entry_flow/conv_in"); const entry_flow_reduction_block_0 = extractReductionBlockParams("entry_flow/reduction_block_0"); const entry_flow_reduction_block_1 = extractReductionBlockParams("entry_flow/reduction_block_1"); const entry_flow = { conv_in: entry_flow_conv_in, reduction_block_0: entry_flow_reduction_block_0, reduction_block_1: entry_flow_reduction_block_1 }; const middle_flow = {}; utils_1.range(numMainBlocks, 0, 1).forEach((idx) => { middle_flow[`main_block_${idx}`] = extractMainBlockParams(`middle_flow/main_block_${idx}`); }); const exit_flow_reduction_block = extractReductionBlockParams("exit_flow/reduction_block"); const exit_flow_separable_conv = extractSeparableConvParams("exit_flow/separable_conv"); const exit_flow = { reduction_block: exit_flow_reduction_block, separable_conv: exit_flow_separable_conv }; common_1.disposeUnusedWeightTensors(weightMap, paramMappings); return {params: {entry_flow, middle_flow, exit_flow}, paramMappings}; } exports.extractParamsFromWeigthMap = extractParamsFromWeigthMap; }); // build/xception/TinyXception.js var require_TinyXception = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.TinyXception = void 0; const tf = require_dist(); const common_1 = require_common(); const dom_1 = require_dom(); const NeuralNetwork_1 = require_NeuralNetwork(); const ops_1 = require_ops(); const utils_1 = require_utils(); const extractParams_1 = require_extractParams3(); const extractParamsFromWeigthMap_1 = require_extractParamsFromWeigthMap3(); function conv(x, params, stride) { return tf.add(tf.conv2d(x, params.filters, stride, "same"), params.bias); } function reductionBlock(x, params, isActivateInput = true) { let out = isActivateInput ? tf.relu(x) : x; out = common_1.depthwiseSeparableConv(out, params.separable_conv0, [1, 1]); out = common_1.depthwiseSeparableConv(tf.relu(out), params.separable_conv1, [1, 1]); out = tf.maxPool(out, [3, 3], [2, 2], "same"); out = tf.add(out, conv(x, params.expansion_conv, [2, 2])); return out; } function mainBlock(x, params) { let out = common_1.depthwiseSeparableConv(tf.relu(x), params.separable_conv0, [1, 1]); out = common_1.depthwiseSeparableConv(tf.relu(out), params.separable_conv1, [1, 1]); out = common_1.depthwiseSeparableConv(tf.relu(out), params.separable_conv2, [1, 1]); out = tf.add(out, x); return out; } class TinyXception extends NeuralNetwork_1.NeuralNetwork { constructor(numMainBlocks) { super("TinyXception"); this._numMainBlocks = numMainBlocks; } forwardInput(input) { const {params} = this; if (!params) { throw new Error("TinyXception - load model before inference"); } return tf.tidy(() => { const batchTensor = input.toBatchTensor(112, true); const meanRgb = [122.782, 117.001, 104.298]; const normalized = ops_1.normalize(batchTensor, meanRgb).div(tf.scalar(256)); let out = tf.relu(conv(normalized, params.entry_flow.conv_in, [2, 2])); out = reductionBlock(out, params.entry_flow.reduction_block_0, false); out = reductionBlock(out, params.entry_flow.reduction_block_1); utils_1.range(this._numMainBlocks, 0, 1).forEach((idx) => { out = mainBlock(out, params.middle_flow[`main_block_${idx}`]); }); out = reductionBlock(out, params.exit_flow.reduction_block); out = tf.relu(common_1.depthwiseSeparableConv(out, params.exit_flow.separable_conv, [1, 1])); return out; }); } async forward(input) { return this.forwardInput(await dom_1.toNetInput(input)); } getDefaultModelName() { return "tiny_xception_model"; } extractParamsFromWeigthMap(weightMap) { return extractParamsFromWeigthMap_1.extractParamsFromWeigthMap(weightMap, this._numMainBlocks); } extractParams(weights) { return extractParams_1.extractParams(weights, this._numMainBlocks); } } exports.TinyXception = TinyXception; }); // build/ageGenderNet/extractParams.js var require_extractParams4 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParams = void 0; const common_1 = require_common(); function extractParams(weights) { const paramMappings = []; const {extractWeights, getRemainingWeights} = common_1.extractWeightsFactory(weights); const extractFCParams = common_1.extractFCParamsFactory(extractWeights, paramMappings); const age = extractFCParams(512, 1, "fc/age"); const gender = extractFCParams(512, 2, "fc/gender"); if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } return { paramMappings, params: {fc: {age, gender}} }; } exports.extractParams = extractParams; }); // build/ageGenderNet/extractParamsFromWeigthMap.js var require_extractParamsFromWeigthMap4 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParamsFromWeigthMap = void 0; const common_1 = require_common(); function extractParamsFromWeigthMap(weightMap) { const paramMappings = []; const extractWeightEntry = common_1.extractWeightEntryFactory(weightMap, paramMappings); function extractFcParams(prefix) { const weights = extractWeightEntry(`${prefix}/weights`, 2); const bias = extractWeightEntry(`${prefix}/bias`, 1); return {weights, bias}; } const params = { fc: { age: extractFcParams("fc/age"), gender: extractFcParams("fc/gender") } }; common_1.disposeUnusedWeightTensors(weightMap, paramMappings); return {params, paramMappings}; } exports.extractParamsFromWeigthMap = extractParamsFromWeigthMap; }); // build/ageGenderNet/types.js var require_types4 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.Gender = void 0; var Gender; (function(Gender2) { Gender2["FEMALE"] = "female"; Gender2["MALE"] = "male"; })(Gender = exports.Gender || (exports.Gender = {})); }); // build/ageGenderNet/AgeGenderNet.js var require_AgeGenderNet = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.AgeGenderNet = void 0; const tf = require_dist(); const fullyConnectedLayer_1 = require_fullyConnectedLayer(); const util_1 = require_util(); const TinyXception_1 = require_TinyXception(); const extractParams_1 = require_extractParams4(); const extractParamsFromWeigthMap_1 = require_extractParamsFromWeigthMap4(); const types_1 = require_types4(); const NeuralNetwork_1 = require_NeuralNetwork(); const dom_1 = require_dom(); class AgeGenderNet extends NeuralNetwork_1.NeuralNetwork { constructor(faceFeatureExtractor = new TinyXception_1.TinyXception(2)) { super("AgeGenderNet"); this._faceFeatureExtractor = faceFeatureExtractor; } get faceFeatureExtractor() { return this._faceFeatureExtractor; } runNet(input) { const {params} = this; if (!params) { throw new Error(`${this._name} - load model before inference`); } return tf.tidy(() => { const bottleneckFeatures = input instanceof dom_1.NetInput ? this.faceFeatureExtractor.forwardInput(input) : input; const pooled = tf.avgPool(bottleneckFeatures, [7, 7], [2, 2], "valid").as2D(bottleneckFeatures.shape[0], -1); const age = fullyConnectedLayer_1.fullyConnectedLayer(pooled, params.fc.age).as1D(); const gender = fullyConnectedLayer_1.fullyConnectedLayer(pooled, params.fc.gender); return {age, gender}; }); } forwardInput(input) { return tf.tidy(() => { const {age, gender} = this.runNet(input); return {age, gender: tf.softmax(gender)}; }); } async forward(input) { return this.forwardInput(await dom_1.toNetInput(input)); } async predictAgeAndGender(input) { const netInput = await dom_1.toNetInput(input); const out = await this.forwardInput(netInput); const ages = tf.unstack(out.age); const genders = tf.unstack(out.gender); const ageAndGenderTensors = ages.map((ageTensor, i) => ({ ageTensor, genderTensor: genders[i] })); const predictionsByBatch = await Promise.all(ageAndGenderTensors.map(async ({ageTensor, genderTensor}) => { const age = (await ageTensor.data())[0]; const probMale = (await genderTensor.data())[0]; const isMale = probMale > 0.5; const gender = isMale ? types_1.Gender.MALE : types_1.Gender.FEMALE; const genderProbability = isMale ? probMale : 1 - probMale; ageTensor.dispose(); genderTensor.dispose(); return {age, gender, genderProbability}; })); out.age.dispose(); out.gender.dispose(); return netInput.isBatchInput ? predictionsByBatch : predictionsByBatch[0]; } getDefaultModelName() { return "age_gender_model"; } dispose(throwOnRedispose = true) { this.faceFeatureExtractor.dispose(throwOnRedispose); super.dispose(throwOnRedispose); } loadClassifierParams(weights) { const {params, paramMappings} = this.extractClassifierParams(weights); this._params = params; this._paramMappings = paramMappings; } extractClassifierParams(weights) { return extractParams_1.extractParams(weights); } extractParamsFromWeigthMap(weightMap) { const {featureExtractorMap, classifierMap} = util_1.seperateWeightMaps(weightMap); this.faceFeatureExtractor.loadFromWeightMap(featureExtractorMap); return extractParamsFromWeigthMap_1.extractParamsFromWeigthMap(classifierMap); } extractParams(weights) { const classifierWeightSize = 512 * 1 + 1 + (512 * 2 + 2); const featureExtractorWeights = weights.slice(0, weights.length - classifierWeightSize); const classifierWeights = weights.slice(weights.length - classifierWeightSize); this.faceFeatureExtractor.extractWeights(featureExtractorWeights); return this.extractClassifierParams(classifierWeights); } } exports.AgeGenderNet = AgeGenderNet; }); // build/ageGenderNet/index.js var require_ageGenderNet = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); const tslib_1 = require_tslib_es6(); tslib_1.__exportStar(require_AgeGenderNet(), exports); tslib_1.__exportStar(require_types4(), exports); }); // build/faceLandmarkNet/FaceLandmark68NetBase.js var require_FaceLandmark68NetBase = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceLandmark68NetBase = void 0; const tf = require_dist(); const classes_1 = require_classes(); const FaceLandmarks68_1 = require_FaceLandmarks68(); const dom_1 = require_dom(); const FaceProcessor_1 = require_FaceProcessor(); const utils_1 = require_utils(); class FaceLandmark68NetBase extends FaceProcessor_1.FaceProcessor { postProcess(output, inputSize, originalDimensions) { const inputDimensions = originalDimensions.map(({width, height}) => { const scale = inputSize / Math.max(height, width); return { width: width * scale, height: height * scale }; }); const batchSize = inputDimensions.length; return tf.tidy(() => { const createInterleavedTensor = (fillX, fillY) => tf.stack([ tf.fill([68], fillX), tf.fill([68], fillY) ], 1).as2D(1, 136).as1D(); const getPadding = (batchIdx, cond) => { const {width, height} = inputDimensions[batchIdx]; return cond(width, height) ? Math.abs(width - height) / 2 : 0; }; const getPaddingX = (batchIdx) => getPadding(batchIdx, (w, h) => w < h); const getPaddingY = (batchIdx) => getPadding(batchIdx, (w, h) => h < w); const landmarkTensors = output.mul(tf.fill([batchSize, 136], inputSize)).sub(tf.stack(Array.from(Array(batchSize), (_, batchIdx) => createInterleavedTensor(getPaddingX(batchIdx), getPaddingY(batchIdx))))).div(tf.stack(Array.from(Array(batchSize), (_, batchIdx) => createInterleavedTensor(inputDimensions[batchIdx].width, inputDimensions[batchIdx].height)))); return landmarkTensors; }); } forwardInput(input) { return tf.tidy(() => { const out = this.runNet(input); return this.postProcess(out, input.inputSize, input.inputDimensions.map(([height, width]) => ({height, width}))); }); } async forward(input) { return this.forwardInput(await dom_1.toNetInput(input)); } async detectLandmarks(input) { const netInput = await dom_1.toNetInput(input); const landmarkTensors = tf.tidy(() => tf.unstack(this.forwardInput(netInput))); const landmarksForBatch = await Promise.all(landmarkTensors.map(async (landmarkTensor, batchIdx) => { const landmarksArray = Array.from(await landmarkTensor.data()); const xCoords = landmarksArray.filter((_, i) => utils_1.isEven(i)); const yCoords = landmarksArray.filter((_, i) => !utils_1.isEven(i)); return new FaceLandmarks68_1.FaceLandmarks68(Array(68).fill(0).map((_, i) => new classes_1.Point(xCoords[i], yCoords[i])), { height: netInput.getInputHeight(batchIdx), width: netInput.getInputWidth(batchIdx) }); })); landmarkTensors.forEach((t) => t.dispose()); return netInput.isBatchInput ? landmarksForBatch : landmarksForBatch[0]; } getClassifierChannelsOut() { return 136; } } exports.FaceLandmark68NetBase = FaceLandmark68NetBase; }); // build/faceLandmarkNet/FaceLandmark68Net.js var require_FaceLandmark68Net = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceLandmark68Net = void 0; const FaceFeatureExtractor_1 = require_FaceFeatureExtractor(); const FaceLandmark68NetBase_1 = require_FaceLandmark68NetBase(); class FaceLandmark68Net extends FaceLandmark68NetBase_1.FaceLandmark68NetBase { constructor(faceFeatureExtractor = new FaceFeatureExtractor_1.FaceFeatureExtractor()) { super("FaceLandmark68Net", faceFeatureExtractor); } getDefaultModelName() { return "face_landmark_68_model"; } getClassifierChannelsIn() { return 256; } } exports.FaceLandmark68Net = FaceLandmark68Net; }); // build/faceFeatureExtractor/extractParamsFromWeigthMapTiny.js var require_extractParamsFromWeigthMapTiny = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParamsFromWeigthMapTiny = void 0; const common_1 = require_common(); const loadParamsFactory_1 = require_loadParamsFactory(); function extractParamsFromWeigthMapTiny(weightMap) { const paramMappings = []; const {extractDenseBlock3Params} = loadParamsFactory_1.loadParamsFactory(weightMap, paramMappings); const params = { dense0: extractDenseBlock3Params("dense0", true), dense1: extractDenseBlock3Params("dense1"), dense2: extractDenseBlock3Params("dense2") }; common_1.disposeUnusedWeightTensors(weightMap, paramMappings); return {params, paramMappings}; } exports.extractParamsFromWeigthMapTiny = extractParamsFromWeigthMapTiny; }); // build/faceFeatureExtractor/extractParamsTiny.js var require_extractParamsTiny = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParamsTiny = void 0; const common_1 = require_common(); const extractorsFactory_1 = require_extractorsFactory(); function extractParamsTiny(weights) { const paramMappings = []; const {extractWeights, getRemainingWeights} = common_1.extractWeightsFactory(weights); const {extractDenseBlock3Params} = extractorsFactory_1.extractorsFactory(extractWeights, paramMappings); const dense0 = extractDenseBlock3Params(3, 32, "dense0", true); const dense1 = extractDenseBlock3Params(32, 64, "dense1"); const dense2 = extractDenseBlock3Params(64, 128, "dense2"); if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } return { paramMappings, params: {dense0, dense1, dense2} }; } exports.extractParamsTiny = extractParamsTiny; }); // build/faceFeatureExtractor/TinyFaceFeatureExtractor.js var require_TinyFaceFeatureExtractor = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.TinyFaceFeatureExtractor = void 0; const tf = require_dist(); const dom_1 = require_dom(); const NeuralNetwork_1 = require_NeuralNetwork(); const ops_1 = require_ops(); const denseBlock_1 = require_denseBlock(); const extractParamsFromWeigthMapTiny_1 = require_extractParamsFromWeigthMapTiny(); const extractParamsTiny_1 = require_extractParamsTiny(); class TinyFaceFeatureExtractor extends NeuralNetwork_1.NeuralNetwork { constructor() { super("TinyFaceFeatureExtractor"); } forwardInput(input) { const {params} = this; if (!params) { throw new Error("TinyFaceFeatureExtractor - load model before inference"); } return tf.tidy(() => { const batchTensor = input.toBatchTensor(112, true); const meanRgb = [122.782, 117.001, 104.298]; const normalized = ops_1.normalize(batchTensor, meanRgb).div(tf.scalar(255)); let out = denseBlock_1.denseBlock3(normalized, params.dense0, true); out = denseBlock_1.denseBlock3(out, params.dense1); out = denseBlock_1.denseBlock3(out, params.dense2); out = tf.avgPool(out, [14, 14], [2, 2], "valid"); return out; }); } async forward(input) { return this.forwardInput(await dom_1.toNetInput(input)); } getDefaultModelName() { return "face_feature_extractor_tiny_model"; } extractParamsFromWeigthMap(weightMap) { return extractParamsFromWeigthMapTiny_1.extractParamsFromWeigthMapTiny(weightMap); } extractParams(weights) { return extractParamsTiny_1.extractParamsTiny(weights); } } exports.TinyFaceFeatureExtractor = TinyFaceFeatureExtractor; }); // build/faceLandmarkNet/FaceLandmark68TinyNet.js var require_FaceLandmark68TinyNet = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceLandmark68TinyNet = void 0; const TinyFaceFeatureExtractor_1 = require_TinyFaceFeatureExtractor(); const FaceLandmark68NetBase_1 = require_FaceLandmark68NetBase(); class FaceLandmark68TinyNet extends FaceLandmark68NetBase_1.FaceLandmark68NetBase { constructor(faceFeatureExtractor = new TinyFaceFeatureExtractor_1.TinyFaceFeatureExtractor()) { super("FaceLandmark68TinyNet", faceFeatureExtractor); } getDefaultModelName() { return "face_landmark_68_tiny_model"; } getClassifierChannelsIn() { return 128; } } exports.FaceLandmark68TinyNet = FaceLandmark68TinyNet; }); // build/faceLandmarkNet/index.js var require_faceLandmarkNet = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceLandmarkNet = void 0; const tslib_1 = require_tslib_es6(); const FaceLandmark68Net_1 = require_FaceLandmark68Net(); tslib_1.__exportStar(require_FaceLandmark68Net(), exports); tslib_1.__exportStar(require_FaceLandmark68TinyNet(), exports); class FaceLandmarkNet extends FaceLandmark68Net_1.FaceLandmark68Net { } exports.FaceLandmarkNet = FaceLandmarkNet; }); // build/faceRecognitionNet/scaleLayer.js var require_scaleLayer = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.scale = void 0; const tf = require_dist(); function scale(x, params) { return tf.add(tf.mul(x, params.weights), params.biases); } exports.scale = scale; }); // build/faceRecognitionNet/convLayer.js var require_convLayer2 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.convDown = exports.convNoRelu = exports.conv = void 0; const tf = require_dist(); const scaleLayer_1 = require_scaleLayer(); function convLayer(x, params, strides, withRelu, padding = "same") { const {filters, bias} = params.conv; let out = tf.conv2d(x, filters, strides, padding); out = tf.add(out, bias); out = scaleLayer_1.scale(out, params.scale); return withRelu ? tf.relu(out) : out; } function conv(x, params) { return convLayer(x, params, [1, 1], true); } exports.conv = conv; function convNoRelu(x, params) { return convLayer(x, params, [1, 1], false); } exports.convNoRelu = convNoRelu; function convDown(x, params) { return convLayer(x, params, [2, 2], true, "valid"); } exports.convDown = convDown; }); // build/faceRecognitionNet/extractParams.js var require_extractParams5 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParams = void 0; const tf = require_dist(); const common_1 = require_common(); const utils_1 = require_utils(); function extractorsFactory(extractWeights, paramMappings) { function extractFilterValues(numFilterValues, numFilters, filterSize) { const weights = extractWeights(numFilterValues); const depth = weights.length / (numFilters * filterSize * filterSize); if (utils_1.isFloat(depth)) { throw new Error(`depth has to be an integer: ${depth}, weights.length: ${weights.length}, numFilters: ${numFilters}, filterSize: ${filterSize}`); } return tf.tidy(() => tf.transpose(tf.tensor4d(weights, [numFilters, depth, filterSize, filterSize]), [2, 3, 1, 0])); } function extractConvParams(numFilterValues, numFilters, filterSize, mappedPrefix) { const filters = extractFilterValues(numFilterValues, numFilters, filterSize); const bias = tf.tensor1d(extractWeights(numFilters)); paramMappings.push({paramPath: `${mappedPrefix}/filters`}, {paramPath: `${mappedPrefix}/bias`}); return {filters, bias}; } function extractScaleLayerParams(numWeights, mappedPrefix) { const weights = tf.tensor1d(extractWeights(numWeights)); const biases = tf.tensor1d(extractWeights(numWeights)); paramMappings.push({paramPath: `${mappedPrefix}/weights`}, {paramPath: `${mappedPrefix}/biases`}); return { weights, biases }; } function extractConvLayerParams(numFilterValues, numFilters, filterSize, mappedPrefix) { const conv = extractConvParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv`); const scale = extractScaleLayerParams(numFilters, `${mappedPrefix}/scale`); return {conv, scale}; } function extractResidualLayerParams(numFilterValues, numFilters, filterSize, mappedPrefix, isDown = false) { const conv1 = extractConvLayerParams((isDown ? 0.5 : 1) * numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv1`); const conv2 = extractConvLayerParams(numFilterValues, numFilters, filterSize, `${mappedPrefix}/conv2`); return {conv1, conv2}; } return { extractConvLayerParams, extractResidualLayerParams }; } function extractParams(weights) { const {extractWeights, getRemainingWeights} = common_1.extractWeightsFactory(weights); const paramMappings = []; const {extractConvLayerParams, extractResidualLayerParams} = extractorsFactory(extractWeights, paramMappings); const conv32_down = extractConvLayerParams(4704, 32, 7, "conv32_down"); const conv32_1 = extractResidualLayerParams(9216, 32, 3, "conv32_1"); const conv32_2 = extractResidualLayerParams(9216, 32, 3, "conv32_2"); const conv32_3 = extractResidualLayerParams(9216, 32, 3, "conv32_3"); const conv64_down = extractResidualLayerParams(36864, 64, 3, "conv64_down", true); const conv64_1 = extractResidualLayerParams(36864, 64, 3, "conv64_1"); const conv64_2 = extractResidualLayerParams(36864, 64, 3, "conv64_2"); const conv64_3 = extractResidualLayerParams(36864, 64, 3, "conv64_3"); const conv128_down = extractResidualLayerParams(147456, 128, 3, "conv128_down", true); const conv128_1 = extractResidualLayerParams(147456, 128, 3, "conv128_1"); const conv128_2 = extractResidualLayerParams(147456, 128, 3, "conv128_2"); const conv256_down = extractResidualLayerParams(589824, 256, 3, "conv256_down", true); const conv256_1 = extractResidualLayerParams(589824, 256, 3, "conv256_1"); const conv256_2 = extractResidualLayerParams(589824, 256, 3, "conv256_2"); const conv256_down_out = extractResidualLayerParams(589824, 256, 3, "conv256_down_out"); const fc = tf.tidy(() => tf.transpose(tf.tensor2d(extractWeights(256 * 128), [128, 256]), [1, 0])); paramMappings.push({paramPath: `fc`}); if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } const params = { conv32_down, conv32_1, conv32_2, conv32_3, conv64_down, conv64_1, conv64_2, conv64_3, conv128_down, conv128_1, conv128_2, conv256_down, conv256_1, conv256_2, conv256_down_out, fc }; return {params, paramMappings}; } exports.extractParams = extractParams; }); // build/faceRecognitionNet/extractParamsFromWeigthMap.js var require_extractParamsFromWeigthMap5 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParamsFromWeigthMap = void 0; const common_1 = require_common(); const utils_1 = require_utils(); function extractorsFactory(weightMap, paramMappings) { const extractWeightEntry = common_1.extractWeightEntryFactory(weightMap, paramMappings); function extractScaleLayerParams(prefix) { const weights = extractWeightEntry(`${prefix}/scale/weights`, 1); const biases = extractWeightEntry(`${prefix}/scale/biases`, 1); return {weights, biases}; } function extractConvLayerParams(prefix) { const filters = extractWeightEntry(`${prefix}/conv/filters`, 4); const bias = extractWeightEntry(`${prefix}/conv/bias`, 1); const scale = extractScaleLayerParams(prefix); return {conv: {filters, bias}, scale}; } function extractResidualLayerParams(prefix) { return { conv1: extractConvLayerParams(`${prefix}/conv1`), conv2: extractConvLayerParams(`${prefix}/conv2`) }; } return { extractConvLayerParams, extractResidualLayerParams }; } function extractParamsFromWeigthMap(weightMap) { const paramMappings = []; const {extractConvLayerParams, extractResidualLayerParams} = extractorsFactory(weightMap, paramMappings); const conv32_down = extractConvLayerParams("conv32_down"); const conv32_1 = extractResidualLayerParams("conv32_1"); const conv32_2 = extractResidualLayerParams("conv32_2"); const conv32_3 = extractResidualLayerParams("conv32_3"); const conv64_down = extractResidualLayerParams("conv64_down"); const conv64_1 = extractResidualLayerParams("conv64_1"); const conv64_2 = extractResidualLayerParams("conv64_2"); const conv64_3 = extractResidualLayerParams("conv64_3"); const conv128_down = extractResidualLayerParams("conv128_down"); const conv128_1 = extractResidualLayerParams("conv128_1"); const conv128_2 = extractResidualLayerParams("conv128_2"); const conv256_down = extractResidualLayerParams("conv256_down"); const conv256_1 = extractResidualLayerParams("conv256_1"); const conv256_2 = extractResidualLayerParams("conv256_2"); const conv256_down_out = extractResidualLayerParams("conv256_down_out"); const fc = weightMap["fc"]; paramMappings.push({originalPath: "fc", paramPath: "fc"}); if (!utils_1.isTensor2D(fc)) { throw new Error(`expected weightMap[fc] to be a Tensor2D, instead have ${fc}`); } const params = { conv32_down, conv32_1, conv32_2, conv32_3, conv64_down, conv64_1, conv64_2, conv64_3, conv128_down, conv128_1, conv128_2, conv256_down, conv256_1, conv256_2, conv256_down_out, fc }; common_1.disposeUnusedWeightTensors(weightMap, paramMappings); return {params, paramMappings}; } exports.extractParamsFromWeigthMap = extractParamsFromWeigthMap; }); // build/faceRecognitionNet/residualLayer.js var require_residualLayer = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.residualDown = exports.residual = void 0; const tf = require_dist(); const convLayer_1 = require_convLayer2(); function residual(x, params) { let out = convLayer_1.conv(x, params.conv1); out = convLayer_1.convNoRelu(out, params.conv2); out = tf.add(out, x); out = tf.relu(out); return out; } exports.residual = residual; function residualDown(x, params) { let out = convLayer_1.convDown(x, params.conv1); out = convLayer_1.convNoRelu(out, params.conv2); let pooled = tf.avgPool(x, 2, 2, "valid"); const zeros9 = tf.zeros(pooled.shape); const isPad = pooled.shape[3] !== out.shape[3]; const isAdjustShape = pooled.shape[1] !== out.shape[1] || pooled.shape[2] !== out.shape[2]; if (isAdjustShape) { const padShapeX = [...out.shape]; padShapeX[1] = 1; const zerosW = tf.zeros(padShapeX); out = tf.concat([out, zerosW], 1); const padShapeY = [...out.shape]; padShapeY[2] = 1; const zerosH = tf.zeros(padShapeY); out = tf.concat([out, zerosH], 2); } pooled = isPad ? tf.concat([pooled, zeros9], 3) : pooled; out = tf.add(pooled, out); out = tf.relu(out); return out; } exports.residualDown = residualDown; }); // build/faceRecognitionNet/FaceRecognitionNet.js var require_FaceRecognitionNet = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceRecognitionNet = void 0; const tf = require_dist(); const dom_1 = require_dom(); const NeuralNetwork_1 = require_NeuralNetwork(); const ops_1 = require_ops(); const convLayer_1 = require_convLayer2(); const extractParams_1 = require_extractParams5(); const extractParamsFromWeigthMap_1 = require_extractParamsFromWeigthMap5(); const residualLayer_1 = require_residualLayer(); class FaceRecognitionNet extends NeuralNetwork_1.NeuralNetwork { constructor() { super("FaceRecognitionNet"); } forwardInput(input) { const {params} = this; if (!params) { throw new Error("FaceRecognitionNet - load model before inference"); } return tf.tidy(() => { const batchTensor = input.toBatchTensor(150, true).toFloat(); const meanRgb = [122.782, 117.001, 104.298]; const normalized = ops_1.normalize(batchTensor, meanRgb).div(tf.scalar(256)); let out = convLayer_1.convDown(normalized, params.conv32_down); out = tf.maxPool(out, 3, 2, "valid"); out = residualLayer_1.residual(out, params.conv32_1); out = residualLayer_1.residual(out, params.conv32_2); out = residualLayer_1.residual(out, params.conv32_3); out = residualLayer_1.residualDown(out, params.conv64_down); out = residualLayer_1.residual(out, params.conv64_1); out = residualLayer_1.residual(out, params.conv64_2); out = residualLayer_1.residual(out, params.conv64_3); out = residualLayer_1.residualDown(out, params.conv128_down); out = residualLayer_1.residual(out, params.conv128_1); out = residualLayer_1.residual(out, params.conv128_2); out = residualLayer_1.residualDown(out, params.conv256_down); out = residualLayer_1.residual(out, params.conv256_1); out = residualLayer_1.residual(out, params.conv256_2); out = residualLayer_1.residualDown(out, params.conv256_down_out); const globalAvg = out.mean([1, 2]); const fullyConnected = tf.matMul(globalAvg, params.fc); return fullyConnected; }); } async forward(input) { return this.forwardInput(await dom_1.toNetInput(input)); } async computeFaceDescriptor(input) { const netInput = await dom_1.toNetInput(input); const faceDescriptorTensors = tf.tidy(() => tf.unstack(this.forwardInput(netInput))); const faceDescriptorsForBatch = await Promise.all(faceDescriptorTensors.map((t) => t.data())); faceDescriptorTensors.forEach((t) => t.dispose()); return netInput.isBatchInput ? faceDescriptorsForBatch : faceDescriptorsForBatch[0]; } getDefaultModelName() { return "face_recognition_model"; } extractParamsFromWeigthMap(weightMap) { return extractParamsFromWeigthMap_1.extractParamsFromWeigthMap(weightMap); } extractParams(weights) { return extractParams_1.extractParams(weights); } } exports.FaceRecognitionNet = FaceRecognitionNet; }); // build/faceRecognitionNet/index.js var require_faceRecognitionNet = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.createFaceRecognitionNet = void 0; const tslib_1 = require_tslib_es6(); const FaceRecognitionNet_1 = require_FaceRecognitionNet(); tslib_1.__exportStar(require_FaceRecognitionNet(), exports); function createFaceRecognitionNet(weights) { const net = new FaceRecognitionNet_1.FaceRecognitionNet(); net.extractWeights(weights); return net; } exports.createFaceRecognitionNet = createFaceRecognitionNet; }); // build/factories/WithFaceDescriptor.js var require_WithFaceDescriptor = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extendWithFaceDescriptor = void 0; function extendWithFaceDescriptor(sourceObj, descriptor) { const extension = {descriptor}; return Object.assign({}, sourceObj, extension); } exports.extendWithFaceDescriptor = extendWithFaceDescriptor; }); // build/factories/WithAge.js var require_WithAge = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extendWithAge = exports.isWithAge = void 0; function isWithAge(obj) { return typeof obj["age"] === "number"; } exports.isWithAge = isWithAge; function extendWithAge(sourceObj, age) { const extension = {age}; return Object.assign({}, sourceObj, extension); } exports.extendWithAge = extendWithAge; }); // build/factories/WithGender.js var require_WithGender = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extendWithGender = exports.isWithGender = void 0; const types_1 = require_types4(); const utils_1 = require_utils(); function isWithGender(obj) { return (obj["gender"] === types_1.Gender.MALE || obj["gender"] === types_1.Gender.FEMALE) && utils_1.isValidProbablitiy(obj["genderProbability"]); } exports.isWithGender = isWithGender; function extendWithGender(sourceObj, gender, genderProbability) { const extension = {gender, genderProbability}; return Object.assign({}, sourceObj, extension); } exports.extendWithGender = extendWithGender; }); // build/factories/index.js var require_factories = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); const tslib_1 = require_tslib_es6(); tslib_1.__exportStar(require_WithFaceDescriptor(), exports); tslib_1.__exportStar(require_WithFaceDetection(), exports); tslib_1.__exportStar(require_WithFaceExpressions(), exports); tslib_1.__exportStar(require_WithFaceLandmarks(), exports); tslib_1.__exportStar(require_WithAge(), exports); tslib_1.__exportStar(require_WithGender(), exports); }); // build/globalApi/ComposableTask.js var require_ComposableTask = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.ComposableTask = void 0; class ComposableTask { async then(onfulfilled) { return onfulfilled(await this.run()); } async run() { throw new Error("ComposableTask - run is not implemented"); } } exports.ComposableTask = ComposableTask; }); // build/globalApi/extractFacesAndComputeResults.js var require_extractFacesAndComputeResults = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractSingleFaceAndComputeResult = exports.extractAllFacesAndComputeResults = void 0; const tf = require_dist(); const dom_1 = require_dom(); const WithFaceLandmarks_1 = require_WithFaceLandmarks(); async function extractAllFacesAndComputeResults(parentResults, input, computeResults, extractedFaces, getRectForAlignment = ({alignedRect}) => alignedRect) { const faceBoxes = parentResults.map((parentResult) => WithFaceLandmarks_1.isWithFaceLandmarks(parentResult) ? getRectForAlignment(parentResult) : parentResult.detection); const faces = extractedFaces || (input instanceof tf.Tensor ? await dom_1.extractFaceTensors(input, faceBoxes) : await dom_1.extractFaces(input, faceBoxes)); const results = await computeResults(faces); faces.forEach((f) => f instanceof tf.Tensor && f.dispose()); return results; } exports.extractAllFacesAndComputeResults = extractAllFacesAndComputeResults; async function extractSingleFaceAndComputeResult(parentResult, input, computeResult, extractedFaces, getRectForAlignment) { return extractAllFacesAndComputeResults([parentResult], input, async (faces) => computeResult(faces[0]), extractedFaces, getRectForAlignment); } exports.extractSingleFaceAndComputeResult = extractSingleFaceAndComputeResult; }); // build/tinyYolov2/config.js var require_config = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.validateConfig = void 0; const isNumber2 = (arg) => typeof arg === "number"; function validateConfig(config) { if (!config) { throw new Error(`invalid config: ${config}`); } if (typeof config.withSeparableConvs !== "boolean") { throw new Error(`config.withSeparableConvs has to be a boolean, have: ${config.withSeparableConvs}`); } if (!isNumber2(config.iouThreshold) || config.iouThreshold < 0 || config.iouThreshold > 1) { throw new Error(`config.iouThreshold has to be a number between [0, 1], have: ${config.iouThreshold}`); } if (!Array.isArray(config.classes) || !config.classes.length || !config.classes.every((c) => typeof c === "string")) { throw new Error(`config.classes has to be an array class names: string[], have: ${JSON.stringify(config.classes)}`); } if (!Array.isArray(config.anchors) || !config.anchors.length || !config.anchors.map((a) => a || {}).every((a) => isNumber2(a.x) && isNumber2(a.y))) { throw new Error(`config.anchors has to be an array of { x: number, y: number }, have: ${JSON.stringify(config.anchors)}`); } if (config.meanRgb && (!Array.isArray(config.meanRgb) || config.meanRgb.length !== 3 || !config.meanRgb.every(isNumber2))) { throw new Error(`config.meanRgb has to be an array of shape [number, number, number], have: ${JSON.stringify(config.meanRgb)}`); } } exports.validateConfig = validateConfig; }); // build/tinyYolov2/leaky.js var require_leaky = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.leaky = void 0; const tf = require_dist(); function leaky(x) { return tf.tidy(() => { const min5 = tf.mul(x, tf.scalar(0.10000000149011612)); return tf.add(tf.relu(tf.sub(x, min5)), min5); }); } exports.leaky = leaky; }); // build/tinyYolov2/convWithBatchNorm.js var require_convWithBatchNorm = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.convWithBatchNorm = void 0; const tf = require_dist(); const leaky_1 = require_leaky(); function convWithBatchNorm(x, params) { return tf.tidy(() => { let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]); out = tf.conv2d(out, params.conv.filters, [1, 1], "valid"); out = tf.sub(out, params.bn.sub); out = tf.mul(out, params.bn.truediv); out = tf.add(out, params.conv.bias); return leaky_1.leaky(out); }); } exports.convWithBatchNorm = convWithBatchNorm; }); // build/tinyYolov2/depthwiseSeparableConv.js var require_depthwiseSeparableConv2 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.depthwiseSeparableConv = void 0; const tf = require_dist(); const leaky_1 = require_leaky(); function depthwiseSeparableConv(x, params) { return tf.tidy(() => { let out = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]]); out = tf.separableConv2d(out, params.depthwise_filter, params.pointwise_filter, [1, 1], "valid"); out = tf.add(out, params.bias); return leaky_1.leaky(out); }); } exports.depthwiseSeparableConv = depthwiseSeparableConv; }); // build/tinyYolov2/extractParams.js var require_extractParams6 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParams = void 0; const tf = require_dist(); const common_1 = require_common(); const extractSeparableConvParamsFactory_1 = require_extractSeparableConvParamsFactory(); const extractWeightsFactory_1 = require_extractWeightsFactory(); function extractorsFactory(extractWeights, paramMappings) { const extractConvParams = common_1.extractConvParamsFactory(extractWeights, paramMappings); function extractBatchNormParams(size, mappedPrefix) { const sub32 = tf.tensor1d(extractWeights(size)); const truediv = tf.tensor1d(extractWeights(size)); paramMappings.push({paramPath: `${mappedPrefix}/sub`}, {paramPath: `${mappedPrefix}/truediv`}); return {sub: sub32, truediv}; } function extractConvWithBatchNormParams(channelsIn, channelsOut, mappedPrefix) { const conv = extractConvParams(channelsIn, channelsOut, 3, `${mappedPrefix}/conv`); const bn = extractBatchNormParams(channelsOut, `${mappedPrefix}/bn`); return {conv, bn}; } const extractSeparableConvParams = extractSeparableConvParamsFactory_1.extractSeparableConvParamsFactory(extractWeights, paramMappings); return { extractConvParams, extractConvWithBatchNormParams, extractSeparableConvParams }; } function extractParams(weights, config, boxEncodingSize, filterSizes) { const {extractWeights, getRemainingWeights} = extractWeightsFactory_1.extractWeightsFactory(weights); const paramMappings = []; const {extractConvParams, extractConvWithBatchNormParams, extractSeparableConvParams} = extractorsFactory(extractWeights, paramMappings); let params; if (config.withSeparableConvs) { const [s0, s1, s2, s3, s4, s5, s6, s7, s8] = filterSizes; const conv0 = config.isFirstLayerConv2d ? extractConvParams(s0, s1, 3, "conv0") : extractSeparableConvParams(s0, s1, "conv0"); const conv1 = extractSeparableConvParams(s1, s2, "conv1"); const conv2 = extractSeparableConvParams(s2, s3, "conv2"); const conv3 = extractSeparableConvParams(s3, s4, "conv3"); const conv4 = extractSeparableConvParams(s4, s5, "conv4"); const conv5 = extractSeparableConvParams(s5, s6, "conv5"); const conv6 = s7 ? extractSeparableConvParams(s6, s7, "conv6") : void 0; const conv7 = s8 ? extractSeparableConvParams(s7, s8, "conv7") : void 0; const conv8 = extractConvParams(s8 || s7 || s6, 5 * boxEncodingSize, 1, "conv8"); params = {conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8}; } else { const [s0, s1, s2, s3, s4, s5, s6, s7, s8] = filterSizes; const conv0 = extractConvWithBatchNormParams(s0, s1, "conv0"); const conv1 = extractConvWithBatchNormParams(s1, s2, "conv1"); const conv2 = extractConvWithBatchNormParams(s2, s3, "conv2"); const conv3 = extractConvWithBatchNormParams(s3, s4, "conv3"); const conv4 = extractConvWithBatchNormParams(s4, s5, "conv4"); const conv5 = extractConvWithBatchNormParams(s5, s6, "conv5"); const conv6 = extractConvWithBatchNormParams(s6, s7, "conv6"); const conv7 = extractConvWithBatchNormParams(s7, s8, "conv7"); const conv8 = extractConvParams(s8, 5 * boxEncodingSize, 1, "conv8"); params = {conv0, conv1, conv2, conv3, conv4, conv5, conv6, conv7, conv8}; } if (getRemainingWeights().length !== 0) { throw new Error(`weights remaing after extract: ${getRemainingWeights().length}`); } return {params, paramMappings}; } exports.extractParams = extractParams; }); // build/tinyYolov2/extractParamsFromWeigthMap.js var require_extractParamsFromWeigthMap6 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.extractParamsFromWeigthMap = void 0; const disposeUnusedWeightTensors_1 = require_disposeUnusedWeightTensors(); const extractSeparableConvParamsFactory_1 = require_extractSeparableConvParamsFactory(); const extractWeightEntryFactory_1 = require_extractWeightEntryFactory(); function extractorsFactory(weightMap, paramMappings) { const extractWeightEntry = extractWeightEntryFactory_1.extractWeightEntryFactory(weightMap, paramMappings); function extractBatchNormParams(prefix) { const sub32 = extractWeightEntry(`${prefix}/sub`, 1); const truediv = extractWeightEntry(`${prefix}/truediv`, 1); return {sub: sub32, truediv}; } function extractConvParams(prefix) { const filters = extractWeightEntry(`${prefix}/filters`, 4); const bias = extractWeightEntry(`${prefix}/bias`, 1); return {filters, bias}; } function extractConvWithBatchNormParams(prefix) { const conv = extractConvParams(`${prefix}/conv`); const bn = extractBatchNormParams(`${prefix}/bn`); return {conv, bn}; } const extractSeparableConvParams = extractSeparableConvParamsFactory_1.loadSeparableConvParamsFactory(extractWeightEntry); return { extractConvParams, extractConvWithBatchNormParams, extractSeparableConvParams }; } function extractParamsFromWeigthMap(weightMap, config) { const paramMappings = []; const {extractConvParams, extractConvWithBatchNormParams, extractSeparableConvParams} = extractorsFactory(weightMap, paramMappings); let params; if (config.withSeparableConvs) { const numFilters = config.filterSizes && config.filterSizes.length || 9; params = { conv0: config.isFirstLayerConv2d ? extractConvParams("conv0") : extractSeparableConvParams("conv0"), conv1: extractSeparableConvParams("conv1"), conv2: extractSeparableConvParams("conv2"), conv3: extractSeparableConvParams("conv3"), conv4: extractSeparableConvParams("conv4"), conv5: extractSeparableConvParams("conv5"), conv6: numFilters > 7 ? extractSeparableConvParams("conv6") : void 0, conv7: numFilters > 8 ? extractSeparableConvParams("conv7") : void 0, conv8: extractConvParams("conv8") }; } else { params = { conv0: extractConvWithBatchNormParams("conv0"), conv1: extractConvWithBatchNormParams("conv1"), conv2: extractConvWithBatchNormParams("conv2"), conv3: extractConvWithBatchNormParams("conv3"), conv4: extractConvWithBatchNormParams("conv4"), conv5: extractConvWithBatchNormParams("conv5"), conv6: extractConvWithBatchNormParams("conv6"), conv7: extractConvWithBatchNormParams("conv7"), conv8: extractConvParams("conv8") }; } disposeUnusedWeightTensors_1.disposeUnusedWeightTensors(weightMap, paramMappings); return {params, paramMappings}; } exports.extractParamsFromWeigthMap = extractParamsFromWeigthMap; }); // build/tinyYolov2/TinyYolov2Options.js var require_TinyYolov2Options = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.TinyYolov2Options = exports.TinyYolov2SizeType = void 0; var TinyYolov2SizeType; (function(TinyYolov2SizeType2) { TinyYolov2SizeType2[TinyYolov2SizeType2["XS"] = 224] = "XS"; TinyYolov2SizeType2[TinyYolov2SizeType2["SM"] = 320] = "SM"; TinyYolov2SizeType2[TinyYolov2SizeType2["MD"] = 416] = "MD"; TinyYolov2SizeType2[TinyYolov2SizeType2["LG"] = 608] = "LG"; })(TinyYolov2SizeType = exports.TinyYolov2SizeType || (exports.TinyYolov2SizeType = {})); class TinyYolov2Options { constructor({inputSize, scoreThreshold} = {}) { this._name = "TinyYolov2Options"; this._inputSize = inputSize || 416; this._scoreThreshold = scoreThreshold || 0.5; if (typeof this._inputSize !== "number" || this._inputSize % 32 !== 0) { throw new Error(`${this._name} - expected inputSize to be a number divisible by 32`); } if (typeof this._scoreThreshold !== "number" || this._scoreThreshold <= 0 || this._scoreThreshold >= 1) { throw new Error(`${this._name} - expected scoreThreshold to be a number between 0 and 1`); } } get inputSize() { return this._inputSize; } get scoreThreshold() { return this._scoreThreshold; } } exports.TinyYolov2Options = TinyYolov2Options; }); // build/tinyYolov2/TinyYolov2Base.js var require_TinyYolov2Base = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.TinyYolov2Base = void 0; const tf = require_dist(); const BoundingBox_1 = require_BoundingBox(); const ObjectDetection_1 = require_ObjectDetection(); const common_1 = require_common(); const dom_1 = require_dom(); const NeuralNetwork_1 = require_NeuralNetwork(); const ops_1 = require_ops(); const nonMaxSuppression_1 = require_nonMaxSuppression(); const normalize_1 = require_normalize(); const config_1 = require_config(); const convWithBatchNorm_1 = require_convWithBatchNorm(); const depthwiseSeparableConv_1 = require_depthwiseSeparableConv2(); const extractParams_1 = require_extractParams6(); const extractParamsFromWeigthMap_1 = require_extractParamsFromWeigthMap6(); const leaky_1 = require_leaky(); const TinyYolov2Options_1 = require_TinyYolov2Options(); class TinyYolov2Base extends NeuralNetwork_1.NeuralNetwork { constructor(config) { super("TinyYolov2"); config_1.validateConfig(config); this._config = config; } get config() { return this._config; } get withClassScores() { return this.config.withClassScores || this.config.classes.length > 1; } get boxEncodingSize() { return 5 + (this.withClassScores ? this.config.classes.length : 0); } runTinyYolov2(x, params) { let out = convWithBatchNorm_1.convWithBatchNorm(x, params.conv0); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = convWithBatchNorm_1.convWithBatchNorm(out, params.conv1); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = convWithBatchNorm_1.convWithBatchNorm(out, params.conv2); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = convWithBatchNorm_1.convWithBatchNorm(out, params.conv3); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = convWithBatchNorm_1.convWithBatchNorm(out, params.conv4); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = convWithBatchNorm_1.convWithBatchNorm(out, params.conv5); out = tf.maxPool(out, [2, 2], [1, 1], "same"); out = convWithBatchNorm_1.convWithBatchNorm(out, params.conv6); out = convWithBatchNorm_1.convWithBatchNorm(out, params.conv7); return common_1.convLayer(out, params.conv8, "valid", false); } runMobilenet(x, params) { let out = this.config.isFirstLayerConv2d ? leaky_1.leaky(common_1.convLayer(x, params.conv0, "valid", false)) : depthwiseSeparableConv_1.depthwiseSeparableConv(x, params.conv0); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = depthwiseSeparableConv_1.depthwiseSeparableConv(out, params.conv1); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = depthwiseSeparableConv_1.depthwiseSeparableConv(out, params.conv2); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = depthwiseSeparableConv_1.depthwiseSeparableConv(out, params.conv3); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = depthwiseSeparableConv_1.depthwiseSeparableConv(out, params.conv4); out = tf.maxPool(out, [2, 2], [2, 2], "same"); out = depthwiseSeparableConv_1.depthwiseSeparableConv(out, params.conv5); out = tf.maxPool(out, [2, 2], [1, 1], "same"); out = params.conv6 ? depthwiseSeparableConv_1.depthwiseSeparableConv(out, params.conv6) : out; out = params.conv7 ? depthwiseSeparableConv_1.depthwiseSeparableConv(out, params.conv7) : out; return common_1.convLayer(out, params.conv8, "valid", false); } forwardInput(input, inputSize) { const {params} = this; if (!params) { throw new Error("TinyYolov2 - load model before inference"); } return tf.tidy(() => { let batchTensor = input.toBatchTensor(inputSize, false).toFloat(); batchTensor = this.config.meanRgb ? normalize_1.normalize(batchTensor, this.config.meanRgb) : batchTensor; batchTensor = batchTensor.div(tf.scalar(256)); return this.config.withSeparableConvs ? this.runMobilenet(batchTensor, params) : this.runTinyYolov2(batchTensor, params); }); } async forward(input, inputSize) { return await this.forwardInput(await dom_1.toNetInput(input), inputSize); } async detect(input, forwardParams = {}) { const {inputSize, scoreThreshold} = new TinyYolov2Options_1.TinyYolov2Options(forwardParams); const netInput = await dom_1.toNetInput(input); const out = await this.forwardInput(netInput, inputSize); const out0 = tf.tidy(() => tf.unstack(out)[0].expandDims()); const inputDimensions = { width: netInput.getInputWidth(0), height: netInput.getInputHeight(0) }; const results = await this.extractBoxes(out0, netInput.getReshapedInputDimensions(0), scoreThreshold); out.dispose(); out0.dispose(); const boxes = results.map((res) => res.box); const scores = results.map((res) => res.score); const classScores = results.map((res) => res.classScore); const classNames = results.map((res) => this.config.classes[res.label]); const indices = nonMaxSuppression_1.nonMaxSuppression(boxes.map((box) => box.rescale(inputSize)), scores, this.config.iouThreshold, true); const detections = indices.map((idx) => new ObjectDetection_1.ObjectDetection(scores[idx], classScores[idx], classNames[idx], boxes[idx], inputDimensions)); return detections; } getDefaultModelName() { return ""; } extractParamsFromWeigthMap(weightMap) { return extractParamsFromWeigthMap_1.extractParamsFromWeigthMap(weightMap, this.config); } extractParams(weights) { const filterSizes = this.config.filterSizes || TinyYolov2Base.DEFAULT_FILTER_SIZES; const numFilters = filterSizes ? filterSizes.length : void 0; if (numFilters !== 7 && numFilters !== 8 && numFilters !== 9) { throw new Error(`TinyYolov2 - expected 7 | 8 | 9 convolutional filters, but found ${numFilters} filterSizes in config`); } return extractParams_1.extractParams(weights, this.config, this.boxEncodingSize, filterSizes); } async extractBoxes(outputTensor, inputBlobDimensions, scoreThreshold) { const {width, height} = inputBlobDimensions; const inputSize = Math.max(width, height); const correctionFactorX = inputSize / width; const correctionFactorY = inputSize / height; const numCells = outputTensor.shape[1]; const numBoxes = this.config.anchors.length; const [boxesTensor, scoresTensor, classScoresTensor] = tf.tidy(() => { const reshaped = outputTensor.reshape([numCells, numCells, numBoxes, this.boxEncodingSize]); const boxes = reshaped.slice([0, 0, 0, 0], [numCells, numCells, numBoxes, 4]); const scores = reshaped.slice([0, 0, 0, 4], [numCells, numCells, numBoxes, 1]); const classScores = this.withClassScores ? tf.softmax(reshaped.slice([0, 0, 0, 5], [numCells, numCells, numBoxes, this.config.classes.length]), 3) : tf.scalar(0); return [boxes, scores, classScores]; }); const results = []; const scoresData = await scoresTensor.array(); const boxesData = await boxesTensor.array(); for (let row = 0; row < numCells; row++) { for (let col = 0; col < numCells; col++) { for (let anchor = 0; anchor < numBoxes; anchor++) { const score = ops_1.sigmoid(scoresData[row][col][anchor][0]); if (!scoreThreshold || score > scoreThreshold) { const ctX = (col + ops_1.sigmoid(boxesData[row][col][anchor][0])) / numCells * correctionFactorX; const ctY = (row + ops_1.sigmoid(boxesData[row][col][anchor][1])) / numCells * correctionFactorY; const width2 = Math.exp(boxesData[row][col][anchor][2]) * this.config.anchors[anchor].x / numCells * correctionFactorX; const height2 = Math.exp(boxesData[row][col][anchor][3]) * this.config.anchors[anchor].y / numCells * correctionFactorY; const x = ctX - width2 / 2; const y = ctY - height2 / 2; const pos = {row, col, anchor}; const {classScore, label} = this.withClassScores ? await this.extractPredictedClass(classScoresTensor, pos) : {classScore: 1, label: 0}; results.push({ box: new BoundingBox_1.BoundingBox(x, y, x + width2, y + height2), score, classScore: score * classScore, label, ...pos }); } } } } boxesTensor.dispose(); scoresTensor.dispose(); classScoresTensor.dispose(); return results; } async extractPredictedClass(classesTensor, pos) { const {row, col, anchor} = pos; const classesData = await classesTensor.array(); return Array(this.config.classes.length).fill(0).map((_, i) => classesData[row][col][anchor][i]).map((classScore, label) => ({ classScore, label })).reduce((max7, curr) => max7.classScore > curr.classScore ? max7 : curr); } } exports.TinyYolov2Base = TinyYolov2Base; TinyYolov2Base.DEFAULT_FILTER_SIZES = [ 3, 16, 32, 64, 128, 256, 512, 1024, 1024 ]; }); // build/tinyFaceDetector/const.js var require_const = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.MEAN_RGB = exports.BOX_ANCHORS = exports.IOU_THRESHOLD = void 0; const classes_1 = require_classes(); exports.IOU_THRESHOLD = 0.4; exports.BOX_ANCHORS = [ new classes_1.Point(1.603231, 2.094468), new classes_1.Point(6.041143, 7.080126), new classes_1.Point(2.882459, 3.518061), new classes_1.Point(4.266906, 5.178857), new classes_1.Point(9.041765, 10.66308) ]; exports.MEAN_RGB = [117.001, 114.697, 97.404]; }); // build/tinyFaceDetector/TinyFaceDetector.js var require_TinyFaceDetector = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.TinyFaceDetector = void 0; const classes_1 = require_classes(); const TinyYolov2Base_1 = require_TinyYolov2Base(); const const_1 = require_const(); class TinyFaceDetector extends TinyYolov2Base_1.TinyYolov2Base { constructor() { const config = { withSeparableConvs: true, iouThreshold: const_1.IOU_THRESHOLD, classes: ["face"], anchors: const_1.BOX_ANCHORS, meanRgb: const_1.MEAN_RGB, isFirstLayerConv2d: true, filterSizes: [3, 16, 32, 64, 128, 256, 512] }; super(config); } get anchors() { return this.config.anchors; } async locateFaces(input, forwardParams) { const objectDetections = await this.detect(input, forwardParams); return objectDetections.map((det) => new classes_1.FaceDetection(det.score, det.relativeBox, {width: det.imageWidth, height: det.imageHeight})); } getDefaultModelName() { return "tiny_face_detector_model"; } extractParamsFromWeigthMap(weightMap) { return super.extractParamsFromWeigthMap(weightMap); } } exports.TinyFaceDetector = TinyFaceDetector; }); // build/tinyYolov2/const.js var require_const2 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.DEFAULT_MODEL_NAME_SEPARABLE_CONV = exports.DEFAULT_MODEL_NAME = exports.MEAN_RGB_SEPARABLE = exports.BOX_ANCHORS_SEPARABLE = exports.BOX_ANCHORS = exports.IOU_THRESHOLD = void 0; const classes_1 = require_classes(); exports.IOU_THRESHOLD = 0.4; exports.BOX_ANCHORS = [ new classes_1.Point(0.738768, 0.874946), new classes_1.Point(2.42204, 2.65704), new classes_1.Point(4.30971, 7.04493), new classes_1.Point(10.246, 4.59428), new classes_1.Point(12.6868, 11.8741) ]; exports.BOX_ANCHORS_SEPARABLE = [ new classes_1.Point(1.603231, 2.094468), new classes_1.Point(6.041143, 7.080126), new classes_1.Point(2.882459, 3.518061), new classes_1.Point(4.266906, 5.178857), new classes_1.Point(9.041765, 10.66308) ]; exports.MEAN_RGB_SEPARABLE = [117.001, 114.697, 97.404]; exports.DEFAULT_MODEL_NAME = "tiny_yolov2_model"; exports.DEFAULT_MODEL_NAME_SEPARABLE_CONV = "tiny_yolov2_separable_conv_model"; }); // build/tinyYolov2/TinyYolov2.js var require_TinyYolov2 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.TinyYolov2 = void 0; const classes_1 = require_classes(); const const_1 = require_const2(); const TinyYolov2Base_1 = require_TinyYolov2Base(); class TinyYolov2 extends TinyYolov2Base_1.TinyYolov2Base { constructor(withSeparableConvs = true) { const config = Object.assign({}, { withSeparableConvs, iouThreshold: const_1.IOU_THRESHOLD, classes: ["face"] }, withSeparableConvs ? { anchors: const_1.BOX_ANCHORS_SEPARABLE, meanRgb: const_1.MEAN_RGB_SEPARABLE } : { anchors: const_1.BOX_ANCHORS, withClassScores: true }); super(config); } get withSeparableConvs() { return this.config.withSeparableConvs; } get anchors() { return this.config.anchors; } async locateFaces(input, forwardParams) { const objectDetections = await this.detect(input, forwardParams); return objectDetections.map((det) => new classes_1.FaceDetection(det.score, det.relativeBox, {width: det.imageWidth, height: det.imageHeight})); } getDefaultModelName() { return this.withSeparableConvs ? const_1.DEFAULT_MODEL_NAME_SEPARABLE_CONV : const_1.DEFAULT_MODEL_NAME; } extractParamsFromWeigthMap(weightMap) { return super.extractParamsFromWeigthMap(weightMap); } } exports.TinyYolov2 = TinyYolov2; }); // build/tinyYolov2/types.js var require_types5 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); }); // build/tinyYolov2/index.js var require_tinyYolov2 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.createTinyYolov2 = exports.TinyYolov2 = void 0; const tslib_1 = require_tslib_es6(); const TinyYolov2_1 = require_TinyYolov2(); Object.defineProperty(exports, "TinyYolov2", {enumerable: true, get: function() { return TinyYolov2_1.TinyYolov2; }}); tslib_1.__exportStar(require_TinyYolov2Options(), exports); tslib_1.__exportStar(require_config(), exports); tslib_1.__exportStar(require_types5(), exports); function createTinyYolov2(weights, withSeparableConvs = true) { const net = new TinyYolov2_1.TinyYolov2(withSeparableConvs); net.extractWeights(weights); return net; } exports.createTinyYolov2 = createTinyYolov2; }); // build/globalApi/nets.js var require_nets = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.detectLandmarks = exports.locateFaces = exports.loadFaceDetectionModel = exports.loadAgeGenderModel = exports.loadFaceExpressionModel = exports.loadFaceRecognitionModel = exports.loadFaceLandmarkTinyModel = exports.loadFaceLandmarkModel = exports.loadTinyYolov2Model = exports.loadTinyFaceDetectorModel = exports.predictAgeAndGender = exports.recognizeFaceExpressions = exports.computeFaceDescriptor = exports.detectFaceLandmarksTiny = exports.detectFaceLandmarks = exports.tinyFaceDetector = exports.nets = void 0; const AgeGenderNet_1 = require_AgeGenderNet(); const FaceExpressionNet_1 = require_FaceExpressionNet(); const FaceLandmark68Net_1 = require_FaceLandmark68Net(); const FaceLandmark68TinyNet_1 = require_FaceLandmark68TinyNet(); const FaceRecognitionNet_1 = require_FaceRecognitionNet(); const TinyFaceDetector_1 = require_TinyFaceDetector(); const tinyYolov2_1 = require_tinyYolov2(); exports.nets = { tinyFaceDetector: new TinyFaceDetector_1.TinyFaceDetector(), tinyYolov2: new tinyYolov2_1.TinyYolov2(), faceLandmark68Net: new FaceLandmark68Net_1.FaceLandmark68Net(), faceLandmark68TinyNet: new FaceLandmark68TinyNet_1.FaceLandmark68TinyNet(), faceRecognitionNet: new FaceRecognitionNet_1.FaceRecognitionNet(), faceExpressionNet: new FaceExpressionNet_1.FaceExpressionNet(), ageGenderNet: new AgeGenderNet_1.AgeGenderNet() }; exports.tinyFaceDetector = (input, options) => exports.nets.tinyFaceDetector.locateFaces(input, options); exports.detectFaceLandmarks = (input) => exports.nets.faceLandmark68Net.detectLandmarks(input); exports.detectFaceLandmarksTiny = (input) => exports.nets.faceLandmark68TinyNet.detectLandmarks(input); exports.computeFaceDescriptor = (input) => exports.nets.faceRecognitionNet.computeFaceDescriptor(input); exports.recognizeFaceExpressions = (input) => exports.nets.faceExpressionNet.predictExpressions(input); exports.predictAgeAndGender = (input) => exports.nets.ageGenderNet.predictAgeAndGender(input); exports.loadTinyFaceDetectorModel = (url) => exports.nets.tinyFaceDetector.load(url); exports.loadTinyYolov2Model = (url) => exports.nets.tinyYolov2.load(url); exports.loadFaceLandmarkModel = (url) => exports.nets.faceLandmark68Net.load(url); exports.loadFaceLandmarkTinyModel = (url) => exports.nets.faceLandmark68TinyNet.load(url); exports.loadFaceRecognitionModel = (url) => exports.nets.faceRecognitionNet.load(url); exports.loadFaceExpressionModel = (url) => exports.nets.faceExpressionNet.load(url); exports.loadAgeGenderModel = (url) => exports.nets.ageGenderNet.load(url); exports.loadFaceDetectionModel = exports.loadTinyFaceDetectorModel; exports.locateFaces = TinyFaceDetector_1.TinyFaceDetector; exports.detectLandmarks = exports.detectFaceLandmarks; }); // build/globalApi/PredictFaceExpressionsTask.js var require_PredictFaceExpressionsTask = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.PredictSingleFaceExpressionsWithFaceAlignmentTask = exports.PredictAllFaceExpressionsWithFaceAlignmentTask = exports.PredictSingleFaceExpressionsTask = exports.PredictAllFaceExpressionsTask = exports.PredictFaceExpressionsTaskBase = void 0; const WithFaceExpressions_1 = require_WithFaceExpressions(); const ComposableTask_1 = require_ComposableTask(); const ComputeFaceDescriptorsTasks_1 = require_ComputeFaceDescriptorsTasks(); const extractFacesAndComputeResults_1 = require_extractFacesAndComputeResults(); const nets_1 = require_nets(); const PredictAgeAndGenderTask_1 = require_PredictAgeAndGenderTask(); class PredictFaceExpressionsTaskBase extends ComposableTask_1.ComposableTask { constructor(parentTask, input, extractedFaces) { super(); this.parentTask = parentTask; this.input = input; this.extractedFaces = extractedFaces; } } exports.PredictFaceExpressionsTaskBase = PredictFaceExpressionsTaskBase; class PredictAllFaceExpressionsTask extends PredictFaceExpressionsTaskBase { async run() { const parentResults = await this.parentTask; const faceExpressionsByFace = await extractFacesAndComputeResults_1.extractAllFacesAndComputeResults(parentResults, this.input, async (faces) => await Promise.all(faces.map((face) => nets_1.nets.faceExpressionNet.predictExpressions(face))), this.extractedFaces); return parentResults.map((parentResult, i) => WithFaceExpressions_1.extendWithFaceExpressions(parentResult, faceExpressionsByFace[i])); } withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictAllAgeAndGenderTask(this, this.input); } } exports.PredictAllFaceExpressionsTask = PredictAllFaceExpressionsTask; class PredictSingleFaceExpressionsTask extends PredictFaceExpressionsTaskBase { async run() { const parentResult = await this.parentTask; if (!parentResult) { return; } const faceExpressions = await extractFacesAndComputeResults_1.extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets_1.nets.faceExpressionNet.predictExpressions(face), this.extractedFaces); return WithFaceExpressions_1.extendWithFaceExpressions(parentResult, faceExpressions); } withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictSingleAgeAndGenderTask(this, this.input); } } exports.PredictSingleFaceExpressionsTask = PredictSingleFaceExpressionsTask; class PredictAllFaceExpressionsWithFaceAlignmentTask extends PredictAllFaceExpressionsTask { withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input); } withFaceDescriptors() { return new ComputeFaceDescriptorsTasks_1.ComputeAllFaceDescriptorsTask(this, this.input); } } exports.PredictAllFaceExpressionsWithFaceAlignmentTask = PredictAllFaceExpressionsWithFaceAlignmentTask; class PredictSingleFaceExpressionsWithFaceAlignmentTask extends PredictSingleFaceExpressionsTask { withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input); } withFaceDescriptor() { return new ComputeFaceDescriptorsTasks_1.ComputeSingleFaceDescriptorTask(this, this.input); } } exports.PredictSingleFaceExpressionsWithFaceAlignmentTask = PredictSingleFaceExpressionsWithFaceAlignmentTask; }); // build/globalApi/PredictAgeAndGenderTask.js var require_PredictAgeAndGenderTask = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.PredictSingleAgeAndGenderWithFaceAlignmentTask = exports.PredictAllAgeAndGenderWithFaceAlignmentTask = exports.PredictSingleAgeAndGenderTask = exports.PredictAllAgeAndGenderTask = exports.PredictAgeAndGenderTaskBase = void 0; const WithAge_1 = require_WithAge(); const WithGender_1 = require_WithGender(); const ComposableTask_1 = require_ComposableTask(); const ComputeFaceDescriptorsTasks_1 = require_ComputeFaceDescriptorsTasks(); const extractFacesAndComputeResults_1 = require_extractFacesAndComputeResults(); const nets_1 = require_nets(); const PredictFaceExpressionsTask_1 = require_PredictFaceExpressionsTask(); class PredictAgeAndGenderTaskBase extends ComposableTask_1.ComposableTask { constructor(parentTask, input, extractedFaces) { super(); this.parentTask = parentTask; this.input = input; this.extractedFaces = extractedFaces; } } exports.PredictAgeAndGenderTaskBase = PredictAgeAndGenderTaskBase; class PredictAllAgeAndGenderTask extends PredictAgeAndGenderTaskBase { async run() { const parentResults = await this.parentTask; const ageAndGenderByFace = await extractFacesAndComputeResults_1.extractAllFacesAndComputeResults(parentResults, this.input, async (faces) => await Promise.all(faces.map((face) => nets_1.nets.ageGenderNet.predictAgeAndGender(face))), this.extractedFaces); return parentResults.map((parentResult, i) => { const {age, gender, genderProbability} = ageAndGenderByFace[i]; return WithAge_1.extendWithAge(WithGender_1.extendWithGender(parentResult, gender, genderProbability), age); }); } withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictAllFaceExpressionsTask(this, this.input); } } exports.PredictAllAgeAndGenderTask = PredictAllAgeAndGenderTask; class PredictSingleAgeAndGenderTask extends PredictAgeAndGenderTaskBase { async run() { const parentResult = await this.parentTask; if (!parentResult) { return; } const {age, gender, genderProbability} = await extractFacesAndComputeResults_1.extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets_1.nets.ageGenderNet.predictAgeAndGender(face), this.extractedFaces); return WithAge_1.extendWithAge(WithGender_1.extendWithGender(parentResult, gender, genderProbability), age); } withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictSingleFaceExpressionsTask(this, this.input); } } exports.PredictSingleAgeAndGenderTask = PredictSingleAgeAndGenderTask; class PredictAllAgeAndGenderWithFaceAlignmentTask extends PredictAllAgeAndGenderTask { withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input); } withFaceDescriptors() { return new ComputeFaceDescriptorsTasks_1.ComputeAllFaceDescriptorsTask(this, this.input); } } exports.PredictAllAgeAndGenderWithFaceAlignmentTask = PredictAllAgeAndGenderWithFaceAlignmentTask; class PredictSingleAgeAndGenderWithFaceAlignmentTask extends PredictSingleAgeAndGenderTask { withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input); } withFaceDescriptor() { return new ComputeFaceDescriptorsTasks_1.ComputeSingleFaceDescriptorTask(this, this.input); } } exports.PredictSingleAgeAndGenderWithFaceAlignmentTask = PredictSingleAgeAndGenderWithFaceAlignmentTask; }); // build/globalApi/ComputeFaceDescriptorsTasks.js var require_ComputeFaceDescriptorsTasks = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.ComputeSingleFaceDescriptorTask = exports.ComputeAllFaceDescriptorsTask = exports.ComputeFaceDescriptorsTaskBase = void 0; const WithFaceDescriptor_1 = require_WithFaceDescriptor(); const ComposableTask_1 = require_ComposableTask(); const extractFacesAndComputeResults_1 = require_extractFacesAndComputeResults(); const nets_1 = require_nets(); const PredictAgeAndGenderTask_1 = require_PredictAgeAndGenderTask(); const PredictFaceExpressionsTask_1 = require_PredictFaceExpressionsTask(); class ComputeFaceDescriptorsTaskBase extends ComposableTask_1.ComposableTask { constructor(parentTask, input) { super(); this.parentTask = parentTask; this.input = input; } } exports.ComputeFaceDescriptorsTaskBase = ComputeFaceDescriptorsTaskBase; class ComputeAllFaceDescriptorsTask extends ComputeFaceDescriptorsTaskBase { async run() { const parentResults = await this.parentTask; const descriptors = await extractFacesAndComputeResults_1.extractAllFacesAndComputeResults(parentResults, this.input, (faces) => Promise.all(faces.map((face) => nets_1.nets.faceRecognitionNet.computeFaceDescriptor(face))), null, (parentResult) => parentResult.landmarks.align(null, {useDlibAlignment: true})); return descriptors.map((descriptor, i) => WithFaceDescriptor_1.extendWithFaceDescriptor(parentResults[i], descriptor)); } withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input); } withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input); } } exports.ComputeAllFaceDescriptorsTask = ComputeAllFaceDescriptorsTask; class ComputeSingleFaceDescriptorTask extends ComputeFaceDescriptorsTaskBase { async run() { const parentResult = await this.parentTask; if (!parentResult) { return; } const descriptor = await extractFacesAndComputeResults_1.extractSingleFaceAndComputeResult(parentResult, this.input, (face) => nets_1.nets.faceRecognitionNet.computeFaceDescriptor(face), null, (parentResult2) => parentResult2.landmarks.align(null, {useDlibAlignment: true})); return WithFaceDescriptor_1.extendWithFaceDescriptor(parentResult, descriptor); } withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input); } withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input); } } exports.ComputeSingleFaceDescriptorTask = ComputeSingleFaceDescriptorTask; }); // build/tinyFaceDetector/TinyFaceDetectorOptions.js var require_TinyFaceDetectorOptions = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.TinyFaceDetectorOptions = void 0; const tinyYolov2_1 = require_tinyYolov2(); class TinyFaceDetectorOptions extends tinyYolov2_1.TinyYolov2Options { constructor() { super(...arguments); this._name = "TinyFaceDetectorOptions"; } } exports.TinyFaceDetectorOptions = TinyFaceDetectorOptions; }); // build/globalApi/DetectFaceLandmarksTasks.js var require_DetectFaceLandmarksTasks = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.DetectSingleFaceLandmarksTask = exports.DetectAllFaceLandmarksTask = exports.DetectFaceLandmarksTaskBase = void 0; const tf = require_dist(); const dom_1 = require_dom(); const WithFaceLandmarks_1 = require_WithFaceLandmarks(); const ComposableTask_1 = require_ComposableTask(); const ComputeFaceDescriptorsTasks_1 = require_ComputeFaceDescriptorsTasks(); const nets_1 = require_nets(); const PredictAgeAndGenderTask_1 = require_PredictAgeAndGenderTask(); const PredictFaceExpressionsTask_1 = require_PredictFaceExpressionsTask(); class DetectFaceLandmarksTaskBase extends ComposableTask_1.ComposableTask { constructor(parentTask, input, useTinyLandmarkNet) { super(); this.parentTask = parentTask; this.input = input; this.useTinyLandmarkNet = useTinyLandmarkNet; } get landmarkNet() { return this.useTinyLandmarkNet ? nets_1.nets.faceLandmark68TinyNet : nets_1.nets.faceLandmark68Net; } } exports.DetectFaceLandmarksTaskBase = DetectFaceLandmarksTaskBase; class DetectAllFaceLandmarksTask extends DetectFaceLandmarksTaskBase { async run() { const parentResults = await this.parentTask; const detections = parentResults.map((res) => res.detection); const faces = this.input instanceof tf.Tensor ? await dom_1.extractFaceTensors(this.input, detections) : await dom_1.extractFaces(this.input, detections); const faceLandmarksByFace = await Promise.all(faces.map((face) => this.landmarkNet.detectLandmarks(face))); faces.forEach((f) => f instanceof tf.Tensor && f.dispose()); return parentResults.map((parentResult, i) => WithFaceLandmarks_1.extendWithFaceLandmarks(parentResult, faceLandmarksByFace[i])); } withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictAllFaceExpressionsWithFaceAlignmentTask(this, this.input); } withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictAllAgeAndGenderWithFaceAlignmentTask(this, this.input); } withFaceDescriptors() { return new ComputeFaceDescriptorsTasks_1.ComputeAllFaceDescriptorsTask(this, this.input); } } exports.DetectAllFaceLandmarksTask = DetectAllFaceLandmarksTask; class DetectSingleFaceLandmarksTask extends DetectFaceLandmarksTaskBase { async run() { const parentResult = await this.parentTask; if (!parentResult) { return; } const {detection} = parentResult; const faces = this.input instanceof tf.Tensor ? await dom_1.extractFaceTensors(this.input, [detection]) : await dom_1.extractFaces(this.input, [detection]); const landmarks = await this.landmarkNet.detectLandmarks(faces[0]); faces.forEach((f) => f instanceof tf.Tensor && f.dispose()); return WithFaceLandmarks_1.extendWithFaceLandmarks(parentResult, landmarks); } withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictSingleFaceExpressionsWithFaceAlignmentTask(this, this.input); } withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictSingleAgeAndGenderWithFaceAlignmentTask(this, this.input); } withFaceDescriptor() { return new ComputeFaceDescriptorsTasks_1.ComputeSingleFaceDescriptorTask(this, this.input); } } exports.DetectSingleFaceLandmarksTask = DetectSingleFaceLandmarksTask; }); // build/globalApi/DetectFacesTasks.js var require_DetectFacesTasks = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.DetectSingleFaceTask = exports.DetectAllFacesTask = exports.DetectFacesTaskBase = void 0; const WithFaceDetection_1 = require_WithFaceDetection(); const TinyFaceDetectorOptions_1 = require_TinyFaceDetectorOptions(); const ComposableTask_1 = require_ComposableTask(); const DetectFaceLandmarksTasks_1 = require_DetectFaceLandmarksTasks(); const nets_1 = require_nets(); const PredictAgeAndGenderTask_1 = require_PredictAgeAndGenderTask(); const PredictFaceExpressionsTask_1 = require_PredictFaceExpressionsTask(); class DetectFacesTaskBase extends ComposableTask_1.ComposableTask { constructor(input, options = new TinyFaceDetectorOptions_1.TinyFaceDetectorOptions()) { super(); this.input = input; this.options = options; } } exports.DetectFacesTaskBase = DetectFacesTaskBase; class DetectAllFacesTask extends DetectFacesTaskBase { async run() { const {input, options} = this; const faceDetectionFunction = options instanceof TinyFaceDetectorOptions_1.TinyFaceDetectorOptions ? (input2) => nets_1.nets.tinyFaceDetector.locateFaces(input2, options) : null; if (!faceDetectionFunction) { throw new Error("detectFaces - expected options to be instance of TinyFaceDetectorOptions | SsdMobilenetv1Options | MtcnnOptions | TinyYolov2Options"); } return faceDetectionFunction(input); } runAndExtendWithFaceDetections() { return new Promise(async (res) => { const detections = await this.run(); return res(detections.map((detection) => WithFaceDetection_1.extendWithFaceDetection({}, detection))); }); } withFaceLandmarks(useTinyLandmarkNet = false) { return new DetectFaceLandmarksTasks_1.DetectAllFaceLandmarksTask(this.runAndExtendWithFaceDetections(), this.input, useTinyLandmarkNet); } withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictAllFaceExpressionsTask(this.runAndExtendWithFaceDetections(), this.input); } withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictAllAgeAndGenderTask(this.runAndExtendWithFaceDetections(), this.input); } } exports.DetectAllFacesTask = DetectAllFacesTask; class DetectSingleFaceTask extends DetectFacesTaskBase { async run() { const faceDetections = await new DetectAllFacesTask(this.input, this.options); let faceDetectionWithHighestScore = faceDetections[0]; faceDetections.forEach((faceDetection) => { if (faceDetection.score > faceDetectionWithHighestScore.score) { faceDetectionWithHighestScore = faceDetection; } }); return faceDetectionWithHighestScore; } runAndExtendWithFaceDetection() { return new Promise(async (res) => { const detection = await this.run(); return res(detection ? WithFaceDetection_1.extendWithFaceDetection({}, detection) : void 0); }); } withFaceLandmarks(useTinyLandmarkNet = false) { return new DetectFaceLandmarksTasks_1.DetectSingleFaceLandmarksTask(this.runAndExtendWithFaceDetection(), this.input, useTinyLandmarkNet); } withFaceExpressions() { return new PredictFaceExpressionsTask_1.PredictSingleFaceExpressionsTask(this.runAndExtendWithFaceDetection(), this.input); } withAgeAndGender() { return new PredictAgeAndGenderTask_1.PredictSingleAgeAndGenderTask(this.runAndExtendWithFaceDetection(), this.input); } } exports.DetectSingleFaceTask = DetectSingleFaceTask; }); // build/globalApi/detectFaces.js var require_detectFaces = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.detectAllFaces = exports.detectSingleFace = void 0; const DetectFacesTasks_1 = require_DetectFacesTasks(); const TinyFaceDetectorOptions_1 = require_TinyFaceDetectorOptions(); function detectSingleFace(input, options = new TinyFaceDetectorOptions_1.TinyFaceDetectorOptions()) { return new DetectFacesTasks_1.DetectSingleFaceTask(input, options); } exports.detectSingleFace = detectSingleFace; function detectAllFaces(input, options = new TinyFaceDetectorOptions_1.TinyFaceDetectorOptions()) { return new DetectFacesTasks_1.DetectAllFacesTask(input, options); } exports.detectAllFaces = detectAllFaces; }); // build/euclideanDistance.js var require_euclideanDistance = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.euclideanDistance = void 0; function euclideanDistance(arr1, arr2) { if (arr1.length !== arr2.length) throw new Error("euclideanDistance: arr1.length !== arr2.length"); const desc1 = Array.from(arr1); const desc2 = Array.from(arr2); return Math.sqrt(desc1.map((val, i) => val - desc2[i]).reduce((res, diff) => res + Math.pow(diff, 2), 0)); } exports.euclideanDistance = euclideanDistance; }); // build/globalApi/FaceMatcher.js var require_FaceMatcher = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.FaceMatcher = void 0; const FaceMatch_1 = require_FaceMatch(); const LabeledFaceDescriptors_1 = require_LabeledFaceDescriptors(); const euclideanDistance_1 = require_euclideanDistance(); class FaceMatcher { constructor(inputs, distanceThreshold = 0.6) { this._distanceThreshold = distanceThreshold; const inputArray = Array.isArray(inputs) ? inputs : [inputs]; if (!inputArray.length) { throw new Error(`FaceRecognizer.constructor - expected atleast one input`); } let count = 1; const createUniqueLabel = () => `person ${count++}`; this._labeledDescriptors = inputArray.map((desc) => { if (desc instanceof LabeledFaceDescriptors_1.LabeledFaceDescriptors) { return desc; } if (desc instanceof Float32Array) { return new LabeledFaceDescriptors_1.LabeledFaceDescriptors(createUniqueLabel(), [desc]); } if (desc.descriptor && desc.descriptor instanceof Float32Array) { return new LabeledFaceDescriptors_1.LabeledFaceDescriptors(createUniqueLabel(), [desc.descriptor]); } throw new Error(`FaceRecognizer.constructor - expected inputs to be of type LabeledFaceDescriptors | WithFaceDescriptor | Float32Array | Array | Float32Array>`); }); } get labeledDescriptors() { return this._labeledDescriptors; } get distanceThreshold() { return this._distanceThreshold; } computeMeanDistance(queryDescriptor, descriptors) { return descriptors.map((d) => euclideanDistance_1.euclideanDistance(d, queryDescriptor)).reduce((d1, d2) => d1 + d2, 0) / (descriptors.length || 1); } matchDescriptor(queryDescriptor) { return this.labeledDescriptors.map(({descriptors, label}) => new FaceMatch_1.FaceMatch(label, this.computeMeanDistance(queryDescriptor, descriptors))).reduce((best, curr) => best.distance < curr.distance ? best : curr); } findBestMatch(queryDescriptor) { const bestMatch = this.matchDescriptor(queryDescriptor); return bestMatch.distance < this.distanceThreshold ? bestMatch : new FaceMatch_1.FaceMatch("unknown", bestMatch.distance); } toJSON() { return { distanceThreshold: this.distanceThreshold, labeledDescriptors: this.labeledDescriptors.map((ld) => ld.toJSON()) }; } static fromJSON(json) { const labeledDescriptors = json.labeledDescriptors.map((ld) => LabeledFaceDescriptors_1.LabeledFaceDescriptors.fromJSON(ld)); return new FaceMatcher(labeledDescriptors, json.distanceThreshold); } } exports.FaceMatcher = FaceMatcher; }); // build/globalApi/types.js var require_types6 = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); }); // build/globalApi/index.js var require_globalApi = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); const tslib_1 = require_tslib_es6(); tslib_1.__exportStar(require_ComposableTask(), exports); tslib_1.__exportStar(require_ComputeFaceDescriptorsTasks(), exports); tslib_1.__exportStar(require_detectFaces(), exports); tslib_1.__exportStar(require_DetectFacesTasks(), exports); tslib_1.__exportStar(require_DetectFaceLandmarksTasks(), exports); tslib_1.__exportStar(require_FaceMatcher(), exports); tslib_1.__exportStar(require_nets(), exports); tslib_1.__exportStar(require_types6(), exports); }); // build/tinyFaceDetector/index.js var require_tinyFaceDetector = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.createTinyFaceDetector = void 0; const tslib_1 = require_tslib_es6(); const TinyFaceDetector_1 = require_TinyFaceDetector(); tslib_1.__exportStar(require_TinyFaceDetector(), exports); tslib_1.__exportStar(require_TinyFaceDetectorOptions(), exports); function createTinyFaceDetector(weights) { const net = new TinyFaceDetector_1.TinyFaceDetector(); net.extractWeights(weights); return net; } exports.createTinyFaceDetector = createTinyFaceDetector; }); // build/resizeResults.js var require_resizeResults = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.resizeResults = void 0; const classes_1 = require_classes(); const FaceDetection_1 = require_FaceDetection(); const FaceLandmarks_1 = require_FaceLandmarks(); const WithFaceDetection_1 = require_WithFaceDetection(); const WithFaceLandmarks_1 = require_WithFaceLandmarks(); function resizeResults(results, dimensions) { const {width, height} = new classes_1.Dimensions(dimensions.width, dimensions.height); if (width <= 0 || height <= 0) { throw new Error(`resizeResults - invalid dimensions: ${JSON.stringify({width, height})}`); } if (Array.isArray(results)) { return results.map((obj) => resizeResults(obj, {width, height})); } if (WithFaceLandmarks_1.isWithFaceLandmarks(results)) { const resizedDetection = results.detection.forSize(width, height); const resizedLandmarks = results.unshiftedLandmarks.forSize(resizedDetection.box.width, resizedDetection.box.height); return WithFaceLandmarks_1.extendWithFaceLandmarks(WithFaceDetection_1.extendWithFaceDetection(results, resizedDetection), resizedLandmarks); } if (WithFaceDetection_1.isWithFaceDetection(results)) { return WithFaceDetection_1.extendWithFaceDetection(results, results.detection.forSize(width, height)); } if (results instanceof FaceLandmarks_1.FaceLandmarks || results instanceof FaceDetection_1.FaceDetection) { return results.forSize(width, height); } return results; } exports.resizeResults = resizeResults; }); // build/index.js var require_build = __commonJS((exports) => { "use strict"; Object.defineProperty(exports, "__esModule", {value: true}); exports.tf = exports.utils = exports.draw = void 0; const tslib_1 = require_tslib_es6(); const tf = require_dist(); exports.tf = tf; const draw = require_draw(); exports.draw = draw; const utils = require_utils(); exports.utils = utils; tslib_1.__exportStar(require_ageGenderNet(), exports); tslib_1.__exportStar(require_classes(), exports); tslib_1.__exportStar(require_dom(), exports); tslib_1.__exportStar(require_env(), exports); tslib_1.__exportStar(require_faceExpressionNet(), exports); tslib_1.__exportStar(require_faceLandmarkNet(), exports); tslib_1.__exportStar(require_faceRecognitionNet(), exports); tslib_1.__exportStar(require_factories(), exports); tslib_1.__exportStar(require_globalApi(), exports); tslib_1.__exportStar(require_ops(), exports); tslib_1.__exportStar(require_tinyFaceDetector(), exports); tslib_1.__exportStar(require_tinyYolov2(), exports); tslib_1.__exportStar(require_euclideanDistance(), exports); tslib_1.__exportStar(require_NeuralNetwork(), exports); tslib_1.__exportStar(require_resizeResults(), exports); }); // node_modules/@tensorflow/tfjs-core/dist/environment.js /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const TENSORFLOWJS_FLAGS_PREFIX = "tfjsflags"; class Environment { constructor(global2) { this.global = global2; this.flags = {}; this.flagRegistry = {}; this.urlFlags = {}; this.populateURLFlags(); } setPlatform(platformName, platform) { if (this.platform != null) { console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${platform}.`); } this.platformName = platformName; this.platform = platform; } registerFlag(flagName, evaluationFn, setHook) { this.flagRegistry[flagName] = {evaluationFn, setHook}; if (this.urlFlags[flagName] != null) { const flagValue = this.urlFlags[flagName]; console.warn(`Setting feature override from URL ${flagName}: ${flagValue}.`); this.set(flagName, flagValue); } } async getAsync(flagName) { if (flagName in this.flags) { return this.flags[flagName]; } this.flags[flagName] = await this.evaluateFlag(flagName); return this.flags[flagName]; } get(flagName) { if (flagName in this.flags) { return this.flags[flagName]; } const flagValue = this.evaluateFlag(flagName); if (flagValue instanceof Promise) { throw new Error(`Flag ${flagName} cannot be synchronously evaluated. Please use getAsync() instead.`); } this.flags[flagName] = flagValue; return this.flags[flagName]; } getNumber(flagName) { return this.get(flagName); } getBool(flagName) { return this.get(flagName); } getFlags() { return this.flags; } get features() { return this.flags; } set(flagName, value) { if (this.flagRegistry[flagName] == null) { throw new Error(`Cannot set flag ${flagName} as it has not been registered.`); } this.flags[flagName] = value; if (this.flagRegistry[flagName].setHook != null) { this.flagRegistry[flagName].setHook(value); } } evaluateFlag(flagName) { if (this.flagRegistry[flagName] == null) { throw new Error(`Cannot evaluate flag '${flagName}': no evaluation function found.`); } return this.flagRegistry[flagName].evaluationFn(); } setFlags(flags2) { this.flags = Object.assign({}, flags2); } reset() { this.flags = {}; this.urlFlags = {}; this.populateURLFlags(); } populateURLFlags() { if (typeof this.global === "undefined" || typeof this.global.location === "undefined" || typeof this.global.location.search === "undefined") { return; } const urlParams = getQueryParams(this.global.location.search); if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) { const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(","); keyValues.forEach((keyValue) => { const [key, value] = keyValue.split(":"); this.urlFlags[key] = parseValue(key, value); }); } } } function getQueryParams(queryString) { const params = {}; queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => { decodeParam(params, t[0], t[1]); return t.join("="); }); return params; } function decodeParam(params, name, value) { params[decodeURIComponent(name)] = decodeURIComponent(value || ""); } function parseValue(flagName, value) { value = value.toLowerCase(); if (value === "true" || value === "false") { return value === "true"; } else if (`${+value}` === value) { return +value; } throw new Error(`Could not parse value flag value ${value} for flag ${flagName}.`); } function env() { return ENV; } let ENV = null; function setEnvironmentGlobal(environment11) { ENV = environment11; } // node_modules/@tensorflow/tfjs-core/dist/global_util.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ let globalNameSpace; function getGlobalNamespace() { if (globalNameSpace == null) { let ns; if (typeof window !== "undefined") { ns = window; } else if (typeof global !== "undefined") { ns = global; } else if (typeof process !== "undefined") { ns = process; } else if (typeof self !== "undefined") { ns = self; } else { throw new Error("Could not find a global object"); } globalNameSpace = ns; } return globalNameSpace; } function getGlobalMap() { const ns = getGlobalNamespace(); if (ns._tfGlobals == null) { ns._tfGlobals = new Map(); } return ns._tfGlobals; } function getGlobal(key, init) { const globalMap = getGlobalMap(); if (globalMap.has(key)) { return globalMap.get(key); } else { const singleton = init(); globalMap.set(key, singleton); return globalMap.get(key); } } // node_modules/@tensorflow/tfjs-core/dist/kernel_names.js const Abs = "Abs"; const Acos = "Acos"; const Acosh = "Acosh"; const Add = "Add"; const AddN = "AddN"; const All = "All"; const Any = "Any"; const ArgMax = "ArgMax"; const ArgMin = "ArgMin"; const Asin = "Asin"; const Asinh = "Asinh"; const Atan = "Atan"; const Atanh = "Atanh"; const Atan2 = "Atan2"; const AvgPool = "AvgPool"; const AvgPoolBackprop = "AvgPoolBackprop"; const AvgPool3D = "AvgPool3D"; const AvgPool3DBackprop = "AvgPool3DBackprop"; const BatchMatMul = "BatchMatMul"; const BatchToSpaceND = "BatchToSpaceND"; const BroadcastTo = "BroadcastTo"; const Cast = "Cast"; const Ceil = "Ceil"; const ClipByValue = "ClipByValue"; const Complex = "Complex"; const Concat = "Concat"; const Conv2D = "Conv2D"; const Conv2DBackpropFilter = "Conv2DBackpropFilter"; const Conv2DBackpropInput = "Conv2DBackpropInput"; const Conv3D = "Conv3D"; const Conv3DBackpropFilterV2 = "Conv3DBackpropFilterV2"; const Conv3DBackpropInputV2 = "Conv3DBackpropInputV2"; const Cos = "Cos"; const Cosh = "Cosh"; const Cumsum = "Cumsum"; const CropAndResize = "CropAndResize"; const DepthToSpace = "DepthToSpace"; const DepthwiseConv2dNative = "DepthwiseConv2dNative"; const DepthwiseConv2dNativeBackpropFilter = "DepthwiseConv2dNativeBackpropFilter"; const DepthwiseConv2dNativeBackpropInput = "DepthwiseConv2dNativeBackpropInput"; const Diag = "Diag"; const Dilation2D = "Dilation2D"; const Dilation2DBackpropInput = "Dilation2DBackpropInput"; const Dilation2DBackpropFilter = "Dilation2DBackpropFilter"; const Div = "Div"; const Elu = "Elu"; const EluGrad = "EluGrad"; const Erf = "Erf"; const Equal = "Equal"; const Exp = "Exp"; const Expm1 = "Expm1"; const FFT = "FFT"; const Fill = "Fill"; const FlipLeftRight = "FlipLeftRight"; const Floor = "Floor"; const FloorDiv = "FloorDiv"; const FusedBatchNorm = "FusedBatchNorm"; const GatherV2 = "GatherV2"; const GatherNd = "GatherNd"; const Greater = "Greater"; const GreaterEqual = "GreaterEqual"; const Identity = "Identity"; const IFFT = "IFFT"; const Imag = "Imag"; const IsFinite = "IsFinite"; const IsInf = "IsInf"; const IsNan = "IsNan"; const Less = "Less"; const LessEqual = "LessEqual"; const LinSpace = "LinSpace"; const Log = "Log"; const Log1p = "Log1p"; const LogicalAnd = "LogicalAnd"; const LogicalNot = "LogicalNot"; const LogicalOr = "LogicalOr"; const LogSoftmax = "LogSoftmax"; const LRN = "LRN"; const LRNBackprop = "LRNBackprop"; const Max = "Max"; const Maximum = "Maximum"; const MaxPool = "MaxPool"; const MaxPoolBackprop = "MaxPoolBackprop"; const MaxPool3D = "MaxPool3D"; const MaxPool3DBackprop = "MaxPool3DBackprop"; const MaxPoolWithArgmax = "MaxPoolWithArgmax"; const Mean = "Mean"; const Min = "Min"; const Minimum = "Minimum"; const Mod = "Mod"; const Multiply = "Multiply"; const Negate = "Negate"; const NotEqual = "NotEqual"; const NonMaxSuppressionV3 = "NonMaxSuppressionV3"; const NonMaxSuppressionV4 = "NonMaxSuppressionV4"; const NonMaxSuppressionV5 = "NonMaxSuppressionV5"; const OnesLike = "OnesLike"; const OneHot = "OneHot"; const PadV2 = "PadV2"; const Pool = "Pool"; const Pow = "Pow"; const Prelu = "Prelu"; const Prod = "Prod"; const Range = "Range"; const Real = "Real"; const Reciprocal = "Reciprocal"; const Relu = "Relu"; const Reshape = "Reshape"; const ResizeNearestNeighbor = "ResizeNearestNeighbor"; const ResizeNearestNeighborGrad = "ResizeNearestNeighborGrad"; const ResizeBilinear = "ResizeBilinear"; const ResizeBilinearGrad = "ResizeBilinearGrad"; const Relu6 = "Relu6"; const Reverse = "Reverse"; const Round = "Round"; const Rsqrt = "Rsqrt"; const ScatterNd = "ScatterNd"; const SelectV2 = "SelectV2"; const Selu = "Selu"; const Slice = "Slice"; const Sin = "Sin"; const Sinh = "Sinh"; const Sign = "Sign"; const Sigmoid = "Sigmoid"; const Softplus = "Softplus"; const Sqrt = "Sqrt"; const Sum = "Sum"; const SpaceToBatchND = "SpaceToBatchND"; const SplitV = "SplitV"; const Softmax = "Softmax"; const SquaredDifference = "SquaredDifference"; const Square = "Square"; const Sub = "Sub"; const SparseToDense = "SparseToDense"; const StridedSlice = "StridedSlice"; const Tan = "Tan"; const Tanh = "Tanh"; const Tile = "Tile"; const TopK = "TopK"; const Transpose = "Transpose"; const Unpack = "Unpack"; const UnsortedSegmentSum = "UnsortedSegmentSum"; const ZerosLike = "ZerosLike"; const Step = "Step"; const FromPixels = "FromPixels"; const RotateWithOffset = "RotateWithOffset"; const _FusedMatMul = "_FusedMatMul"; const FusedConv2D = "FusedConv2D"; const FusedDepthwiseConv2D = "FusedDepthwiseConv2D"; // node_modules/@tensorflow/tfjs-core/dist/kernel_registry.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const kernelRegistry = getGlobal("kernelRegistry", () => new Map()); const gradRegistry = getGlobal("gradRegistry", () => new Map()); function getKernel(kernelName, backendName) { const key = makeKey(kernelName, backendName); return kernelRegistry.get(key); } function getGradient(kernelName) { return gradRegistry.get(kernelName); } function getKernelsForBackend(backendName) { const it = kernelRegistry.entries(); const result = []; while (true) { const {done, value} = it.next(); if (done) { break; } const [key, config] = value; const [backend2] = key.split("_"); if (backend2 === backendName) { result.push(config); } } return result; } function registerKernel(config) { const {kernelName, backendName} = config; const key = makeKey(kernelName, backendName); if (kernelRegistry.has(key)) { console.warn(`The kernel '${kernelName}' for backend '${backendName}' is already registered`); } kernelRegistry.set(key, config); } function registerGradient(config) { const {kernelName} = config; if (gradRegistry.has(kernelName)) { if (env().getBool("DEBUG")) { console.warn(`Overriding the gradient for '${kernelName}'`); } } gradRegistry.set(kernelName, config); } function unregisterKernel(kernelName, backendName) { const key = makeKey(kernelName, backendName); if (!kernelRegistry.has(key)) { throw new Error(`The kernel '${kernelName}' for backend '${backendName}' is not registered`); } kernelRegistry.delete(key); } function unregisterGradient(kernelName) { if (!gradRegistry.has(kernelName)) { throw new Error(`The gradient '${kernelName}' for backend is not registered`); } gradRegistry.delete(kernelName); } function makeKey(kernelName, backendName) { return `${backendName}_${kernelName}`; } // node_modules/@tensorflow/tfjs-core/dist/util.js const util_exports = {}; __export(util_exports, { arraysEqual: () => arraysEqual, assert: () => assert, assertNonNegativeIntegerDimensions: () => assertNonNegativeIntegerDimensions, assertNonNull: () => assertNonNull, assertShapesMatch: () => assertShapesMatch, bytesFromStringArray: () => bytesFromStringArray, bytesPerElement: () => bytesPerElement, checkConversionForErrors: () => checkConversionForErrors, clamp: () => clamp, computeStrides: () => computeStrides, createShuffledIndices: () => createShuffledIndices, decodeString: () => decodeString, distSquared: () => distSquared, encodeString: () => encodeString, fetch: () => fetch2, flatten: () => flatten, getArrayFromDType: () => getArrayFromDType, getTypedArrayFromDType: () => getTypedArrayFromDType, hasEncodingLoss: () => hasEncodingLoss, indexToLoc: () => indexToLoc, inferDtype: () => inferDtype, inferFromImplicitShape: () => inferFromImplicitShape, isBoolean: () => isBoolean, isFunction: () => isFunction, isInt: () => isInt, isNumber: () => isNumber, isScalarShape: () => isScalarShape, isString: () => isString, isTypedArray: () => isTypedArray, isValidDtype: () => isValidDtype, locToIndex: () => locToIndex, makeOnesTypedArray: () => makeOnesTypedArray, makeZerosNestedTypedArray: () => makeZerosNestedTypedArray, makeZerosTypedArray: () => makeZerosTypedArray, nearestDivisor: () => nearestDivisor, nearestLargerEven: () => nearestLargerEven, now: () => now, parseAxisParam: () => parseAxisParam, randUniform: () => randUniform, repeatedTry: () => repeatedTry, rightPad: () => rightPad, shuffle: () => shuffle, sizeFromShape: () => sizeFromShape, sizeToSquarishShape: () => sizeToSquarishShape, squeezeShape: () => squeezeShape, sum: () => sum, tanh: () => tanh, toNestedArray: () => toNestedArray, toTypedArray: () => toTypedArray }); /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function shuffle(array) { let counter = array.length; let temp = 0; let index = 0; while (counter > 0) { index = Math.random() * counter | 0; counter--; temp = array[counter]; array[counter] = array[index]; array[index] = temp; } } function clamp(min5, x, max7) { return Math.max(min5, Math.min(x, max7)); } function nearestLargerEven(val) { return val % 2 === 0 ? val : val + 1; } function sum(arr) { let sum26 = 0; for (let i = 0; i < arr.length; i++) { sum26 += arr[i]; } return sum26; } function randUniform(a, b) { const r = Math.random(); return b * r + (1 - r) * a; } function distSquared(a, b) { let result = 0; for (let i = 0; i < a.length; i++) { const diff = Number(a[i]) - Number(b[i]); result += diff * diff; } return result; } function assert(expr, msg) { if (!expr) { throw new Error(typeof msg === "string" ? msg : msg()); } } function assertShapesMatch(shapeA, shapeB, errorMessagePrefix = "") { assert(arraysEqual(shapeA, shapeB), () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`); } function assertNonNull(a) { assert(a != null, () => `The input to the tensor constructor must be a non-null value.`); } function flatten(arr, result = [], skipTypedArray = false) { if (result == null) { result = []; } if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) { for (let i = 0; i < arr.length; ++i) { flatten(arr[i], result, skipTypedArray); } } else { result.push(arr); } return result; } function sizeFromShape(shape) { if (shape.length === 0) { return 1; } let size = shape[0]; for (let i = 1; i < shape.length; i++) { size *= shape[i]; } return size; } function isScalarShape(shape) { return shape.length === 0; } function arraysEqual(n1, n2) { if (n1 === n2) { return true; } if (n1 == null || n2 == null) { return false; } if (n1.length !== n2.length) { return false; } for (let i = 0; i < n1.length; i++) { if (n1[i] !== n2[i]) { return false; } } return true; } function isInt(a) { return a % 1 === 0; } function tanh(x) { if (Math.tanh != null) { return Math.tanh(x); } if (x === Infinity) { return 1; } else if (x === -Infinity) { return -1; } else { const e2x = Math.exp(2 * x); return (e2x - 1) / (e2x + 1); } } function sizeToSquarishShape(size) { const width = Math.ceil(Math.sqrt(size)); return [width, Math.ceil(size / width)]; } function createShuffledIndices(n) { const shuffledIndices = new Uint32Array(n); for (let i = 0; i < n; ++i) { shuffledIndices[i] = i; } shuffle(shuffledIndices); return shuffledIndices; } function rightPad(a, size) { if (size <= a.length) { return a; } return a + " ".repeat(size - a.length); } function repeatedTry(checkFn, delayFn = (counter) => 0, maxCounter) { return new Promise((resolve, reject) => { let tryCount = 0; const tryFn = () => { if (checkFn()) { resolve(); return; } tryCount++; const nextBackoff = delayFn(tryCount); if (maxCounter != null && tryCount >= maxCounter) { reject(); return; } setTimeout(tryFn, nextBackoff); }; tryFn(); }); } function inferFromImplicitShape(shape, size) { let shapeProd = 1; let implicitIdx = -1; for (let i = 0; i < shape.length; ++i) { if (shape[i] >= 0) { shapeProd *= shape[i]; } else if (shape[i] === -1) { if (implicitIdx !== -1) { throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${implicitIdx} and dim ${i}`); } implicitIdx = i; } else if (shape[i] < 0) { throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`); } } if (implicitIdx === -1) { if (size > 0 && size !== shapeProd) { throw Error(`Size(${size}) must match the product of shape ${shape}`); } return shape; } if (shapeProd === 0) { throw Error(`Cannot infer the missing size in [${shape}] when there are 0 elements`); } if (size % shapeProd !== 0) { throw Error(`The implicit shape can't be a fractional number. Got ${size} / ${shapeProd}`); } const newShape = shape.slice(); newShape[implicitIdx] = size / shapeProd; return newShape; } function parseAxisParam(axis, shape) { const rank = shape.length; axis = axis == null ? shape.map((s, i) => i) : [].concat(axis); assert(axis.every((ax) => ax >= -rank && ax < rank), () => `All values in axis param must be in range [-${rank}, ${rank}) but got axis ${axis}`); assert(axis.every((ax) => isInt(ax)), () => `All values in axis param must be integers but got axis ${axis}`); return axis.map((a) => a < 0 ? rank + a : a); } function squeezeShape(shape, axis) { const newShape = []; const keptDims = []; const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0; const axes = axis == null || isEmptyArray ? null : parseAxisParam(axis, shape).sort(); let j = 0; for (let i = 0; i < shape.length; ++i) { if (axes != null) { if (axes[j] === i && shape[i] !== 1) { throw new Error(`Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`); } if ((axes[j] == null || axes[j] > i) && shape[i] === 1) { newShape.push(shape[i]); keptDims.push(i); } if (axes[j] <= i) { j++; } } if (shape[i] !== 1) { newShape.push(shape[i]); keptDims.push(i); } } return {newShape, keptDims}; } function getTypedArrayFromDType(dtype, size) { let values = null; if (dtype == null || dtype === "float32") { values = new Float32Array(size); } else if (dtype === "int32") { values = new Int32Array(size); } else if (dtype === "bool") { values = new Uint8Array(size); } else { throw new Error(`Unknown data type ${dtype}`); } return values; } function getArrayFromDType(dtype, size) { let values = null; if (dtype == null || dtype === "float32") { values = new Float32Array(size); } else if (dtype === "int32") { values = new Int32Array(size); } else if (dtype === "bool") { values = new Uint8Array(size); } else if (dtype === "string") { values = new Array(size); } else { throw new Error(`Unknown data type ${dtype}`); } return values; } function checkConversionForErrors(vals, dtype) { for (let i = 0; i < vals.length; i++) { const num = vals[i]; if (isNaN(num) || !isFinite(num)) { throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`); } } } function isValidDtype(dtype) { return dtype === "bool" || dtype === "complex64" || dtype === "float32" || dtype === "int32" || dtype === "string"; } function hasEncodingLoss(oldType, newType) { if (newType === "complex64") { return false; } if (newType === "float32" && oldType !== "complex64") { return false; } if (newType === "int32" && oldType !== "float32" && oldType !== "complex64") { return false; } if (newType === "bool" && oldType === "bool") { return false; } return true; } function isTypedArray(a) { return a instanceof Float32Array || a instanceof Int32Array || a instanceof Uint8Array; } function bytesPerElement(dtype) { if (dtype === "float32" || dtype === "int32") { return 4; } else if (dtype === "complex64") { return 8; } else if (dtype === "bool") { return 1; } else { throw new Error(`Unknown dtype ${dtype}`); } } function bytesFromStringArray(arr) { if (arr == null) { return 0; } let bytes = 0; arr.forEach((x) => bytes += x.length); return bytes; } function isString(value) { return typeof value === "string" || value instanceof String; } function isBoolean(value) { return typeof value === "boolean"; } function isNumber(value) { return typeof value === "number"; } function inferDtype(values) { if (Array.isArray(values)) { return inferDtype(values[0]); } if (values instanceof Float32Array) { return "float32"; } else if (values instanceof Int32Array || values instanceof Uint8Array) { return "int32"; } else if (isNumber(values)) { return "float32"; } else if (isString(values)) { return "string"; } else if (isBoolean(values)) { return "bool"; } return "float32"; } function isFunction(f) { return !!(f && f.constructor && f.call && f.apply); } function nearestDivisor(size, start) { for (let i = start; i < size; ++i) { if (size % i === 0) { return i; } } return size; } function computeStrides(shape) { const rank = shape.length; if (rank < 2) { return []; } const strides = new Array(rank - 1); strides[rank - 2] = shape[rank - 1]; for (let i = rank - 3; i >= 0; --i) { strides[i] = strides[i + 1] * shape[i + 1]; } return strides; } function toTypedArray(a, dtype) { if (dtype === "string") { throw new Error("Cannot convert a string[] to a TypedArray"); } if (Array.isArray(a)) { a = flatten(a); } if (env().getBool("DEBUG")) { checkConversionForErrors(a, dtype); } if (noConversionNeeded(a, dtype)) { return a; } if (dtype == null || dtype === "float32" || dtype === "complex64") { return new Float32Array(a); } else if (dtype === "int32") { return new Int32Array(a); } else if (dtype === "bool") { const bool = new Uint8Array(a.length); for (let i = 0; i < bool.length; ++i) { if (Math.round(a[i]) !== 0) { bool[i] = 1; } } return bool; } else { throw new Error(`Unknown data type ${dtype}`); } } function createNestedArray(offset, shape, a) { const ret = new Array(); if (shape.length === 1) { const d = shape[0]; for (let i = 0; i < d; i++) { ret[i] = a[offset + i]; } } else { const d = shape[0]; const rest = shape.slice(1); const len = rest.reduce((acc, c) => acc * c); for (let i = 0; i < d; i++) { ret[i] = createNestedArray(offset + i * len, rest, a); } } return ret; } function toNestedArray(shape, a) { if (shape.length === 0) { return a[0]; } const size = shape.reduce((acc, c) => acc * c); if (size === 0) { return []; } if (size !== a.length) { throw new Error(`[${shape}] does not match the input size ${a.length}.`); } return createNestedArray(0, shape, a); } function noConversionNeeded(a, dtype) { return a instanceof Float32Array && dtype === "float32" || a instanceof Int32Array && dtype === "int32" || a instanceof Uint8Array && dtype === "bool"; } function makeOnesTypedArray(size, dtype) { const array = makeZerosTypedArray(size, dtype); for (let i = 0; i < array.length; i++) { array[i] = 1; } return array; } function makeZerosTypedArray(size, dtype) { if (dtype == null || dtype === "float32" || dtype === "complex64") { return new Float32Array(size); } else if (dtype === "int32") { return new Int32Array(size); } else if (dtype === "bool") { return new Uint8Array(size); } else { throw new Error(`Unknown data type ${dtype}`); } } function makeZerosNestedTypedArray(shape, dtype) { const size = shape.reduce((prev, curr) => prev * curr, 1); if (dtype == null || dtype === "float32") { return toNestedArray(shape, new Float32Array(size)); } else if (dtype === "int32") { return toNestedArray(shape, new Int32Array(size)); } else if (dtype === "bool") { return toNestedArray(shape, new Uint8Array(size)); } else { throw new Error(`Unknown data type ${dtype}`); } } function now() { return env().platform.now(); } function assertNonNegativeIntegerDimensions(shape) { shape.forEach((dimSize) => { assert(Number.isInteger(dimSize) && dimSize >= 0, () => `Tensor must have a shape comprised of positive integers but got shape [${shape}].`); }); } function fetch2(path, requestInits) { return env().platform.fetch(path, requestInits); } function encodeString(s, encoding = "utf-8") { encoding = encoding || "utf-8"; return env().platform.encode(s, encoding); } function decodeString(bytes, encoding = "utf-8") { encoding = encoding || "utf-8"; return env().platform.decode(bytes, encoding); } function locToIndex(locs, rank, strides) { if (rank === 0) { return 0; } else if (rank === 1) { return locs[0]; } let index = locs[locs.length - 1]; for (let i = 0; i < locs.length - 1; ++i) { index += strides[i] * locs[i]; } return index; } function indexToLoc(index, rank, strides) { if (rank === 0) { return []; } else if (rank === 1) { return [index]; } const locs = new Array(rank); for (let i = 0; i < locs.length - 1; ++i) { locs[i] = Math.floor(index / strides[i]); index -= locs[i] * strides[i]; } locs[locs.length - 1] = index; return locs; } // node_modules/@tensorflow/tfjs-core/dist/profiler.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ class Profiler { constructor(backendTimer, logger) { this.backendTimer = backendTimer; this.logger = logger; if (logger == null) { this.logger = new Logger(); } } profileKernel(kernelName, inputs, f) { let outputs; const holdResultWrapperFn = () => { outputs = f(); }; const timer = this.backendTimer.time(holdResultWrapperFn); outputs.map((r) => { r.data().then((tensorVals) => { checkComputationForErrors(tensorVals, r.dtype, kernelName); }); }); const kernelProfile = { kernelName, outputs, inputs, timeMs: timer.then((timing) => timing.kernelMs), extraInfo: timer.then((timing) => timing.getExtraProfileInfo != null ? timing.getExtraProfileInfo() : "") }; return kernelProfile; } logKernelProfile(kernelProfile) { const {kernelName, outputs, timeMs, inputs, extraInfo} = kernelProfile; outputs.forEach((result) => { Promise.all([result.data(), timeMs, extraInfo]).then((valueContainer) => { this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]); }); }); } } function checkComputationForErrors(vals, dtype, kernelName) { if (dtype !== "float32") { return false; } for (let i = 0; i < vals.length; i++) { const num = vals[i]; if (isNaN(num) || !isFinite(num)) { console.warn(`Found ${num} in the result of '${kernelName}'`); return true; } } return false; } class Logger { logKernelProfile(name, result, vals, timeMs, inputs, extraInfo) { const time2 = typeof timeMs === "number" ? rightPad(`${timeMs}ms`, 9) : timeMs["error"]; const paddedName = rightPad(name, 25); const rank = result.rank; const size = result.size; const shape = rightPad(result.shape.toString(), 14); let inputShapesDescription = ""; for (const name2 in inputs) { const input = inputs[name2]; if (input != null) { const inputShape = input.shape || result.shape; const inputRank = inputShape.length; inputShapesDescription += `${name2}: ${inputRank}D ${inputRank > 0 ? inputShape : ""} `; } } console.log(`%c${paddedName} %c${time2} %c${rank}D ${shape} %c${size} %c${inputShapesDescription} %c${extraInfo}`, "font-weight:bold", "color:red", "color:blue", "color: orange", "color: green", "color: steelblue"); } } // node_modules/@tensorflow/tfjs-core/dist/tape.js /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function getFilteredNodesXToY(tape2, xs, y) { const tensorsFromX = {}; const nodesFromX = {}; for (let i = 0; i < xs.length; i++) { tensorsFromX[xs[i].id] = true; } for (let i = 0; i < tape2.length; i++) { const node = tape2[i]; const nodeInputs = node.inputs; for (const inputName in nodeInputs) { const input = nodeInputs[inputName]; let anyInputFromX = false; for (let j = 0; j < xs.length; j++) { if (tensorsFromX[input.id]) { node.outputs.forEach((output) => tensorsFromX[output.id] = true); anyInputFromX = true; nodesFromX[node.id] = true; break; } } if (anyInputFromX) { break; } } } const tensorsLeadToY = {}; tensorsLeadToY[y.id] = true; const nodesToY = {}; for (let i = tape2.length - 1; i >= 0; i--) { const node = tape2[i]; const nodeInputs = node.inputs; for (let j = 0; j < node.outputs.length; j++) { if (tensorsLeadToY[node.outputs[j].id]) { for (const inputName in nodeInputs) { tensorsLeadToY[nodeInputs[inputName].id] = true; nodesToY[node.id] = true; } break; } } } const filteredTape = []; for (let i = 0; i < tape2.length; i++) { const node = tape2[i]; if (nodesFromX[node.id] && nodesToY[node.id]) { const prunedInputs = {}; for (const inputName in node.inputs) { const nodeInput = node.inputs[inputName]; if (tensorsFromX[nodeInput.id]) { prunedInputs[inputName] = nodeInput; } } const prunedNode = Object.assign({}, node); prunedNode.inputs = prunedInputs; prunedNode.outputs = node.outputs; filteredTape.push(prunedNode); } } return filteredTape; } function backpropagateGradients(tensorAccumulatedGradientMap, filteredTape, tidy2, add29) { for (let i = filteredTape.length - 1; i >= 0; i--) { const node = filteredTape[i]; const dys = []; node.outputs.forEach((o) => { const gradTensor = tensorAccumulatedGradientMap[o.id]; if (gradTensor != null) { dys.push(gradTensor); } else { dys.push(null); } }); if (node.gradient == null) { throw new Error(`Cannot compute gradient: gradient function not found for ${node.kernelName}.`); } const inputGradients = node.gradient(dys); for (const inputName in node.inputs) { if (!(inputName in inputGradients)) { throw new Error(`Cannot backprop through input ${inputName}. Available gradients found: ${Object.keys(inputGradients)}.`); } const dx = tidy2(() => inputGradients[inputName]()); if (dx.dtype !== "float32") { throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ${inputName} must have 'float32' dtype, but has '${dx.dtype}'`); } const x = node.inputs[inputName]; if (!arraysEqual(dx.shape, x.shape)) { throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input '${inputName}' has shape '${dx.shape}', which does not match the shape of the input '${x.shape}'`); } if (tensorAccumulatedGradientMap[x.id] == null) { tensorAccumulatedGradientMap[x.id] = dx; } else { const curGradient = tensorAccumulatedGradientMap[x.id]; tensorAccumulatedGradientMap[x.id] = add29(curGradient, dx); curGradient.dispose(); } } } } // node_modules/@tensorflow/tfjs-core/dist/tensor_format.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const FORMAT_LIMIT_NUM_VALS = 20; const FORMAT_NUM_FIRST_LAST_VALS = 3; const FORMAT_NUM_SIG_DIGITS = 7; function tensorToString(vals, shape, dtype, verbose) { const strides = computeStrides(shape); const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides); const rank = shape.length; const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol); const lines = ["Tensor"]; if (verbose) { lines.push(` dtype: ${dtype}`); lines.push(` rank: ${rank}`); lines.push(` shape: [${shape}]`); lines.push(` values:`); } lines.push(valsLines.map((l) => " " + l).join("\n")); return lines.join("\n"); } function computeMaxSizePerColumn(vals, shape, dtype, strides) { const n = sizeFromShape(shape); const numCols = strides[strides.length - 1]; const padPerCol = new Array(numCols).fill(0); const rank = shape.length; const valuesOrTuples = dtype === "complex64" ? createComplexTuples(vals) : vals; if (rank > 1) { for (let row = 0; row < n / numCols; row++) { const offset = row * numCols; for (let j = 0; j < numCols; j++) { padPerCol[j] = Math.max(padPerCol[j], valToString(valuesOrTuples[offset + j], 0, dtype).length); } } } return padPerCol; } function valToString(val, pad8, dtype) { let valStr; if (Array.isArray(val)) { valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`; } else if (isString(val)) { valStr = `'${val}'`; } else if (dtype === "bool") { valStr = boolNumToString(val); } else { valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString(); } return rightPad(valStr, pad8); } function boolNumToString(v) { return v === 0 ? "false" : "true"; } function subTensorToString(vals, shape, dtype, strides, padPerCol, isLast = true) { const storagePerElement = dtype === "complex64" ? 2 : 1; const size = shape[0]; const rank = shape.length; if (rank === 0) { if (dtype === "complex64") { const complexTuple = createComplexTuples(vals); return [valToString(complexTuple[0], 0, dtype)]; } if (dtype === "bool") { return [boolNumToString(vals[0])]; } return [vals[0].toString()]; } if (rank === 1) { if (size > FORMAT_LIMIT_NUM_VALS) { const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement; let firstVals = Array.from(vals.slice(0, firstValsSize)); let lastVals = Array.from(vals.slice((size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement, size * storagePerElement)); if (dtype === "complex64") { firstVals = createComplexTuples(firstVals); lastVals = createComplexTuples(lastVals); } return [ "[" + firstVals.map((x, i) => valToString(x, padPerCol[i], dtype)).join(", ") + ", ..., " + lastVals.map((x, i) => valToString(x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype)).join(", ") + "]" ]; } const displayVals = dtype === "complex64" ? createComplexTuples(vals) : Array.from(vals); return [ "[" + displayVals.map((x, i) => valToString(x, padPerCol[i], dtype)).join(", ") + "]" ]; } const subshape = shape.slice(1); const substrides = strides.slice(1); const stride = strides[0] * storagePerElement; const lines = []; if (size > FORMAT_LIMIT_NUM_VALS) { for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) { const start = i * stride; const end = start + stride; lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, false)); } lines.push("..."); for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) { const start = i * stride; const end = start + stride; lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1)); } } else { for (let i = 0; i < size; i++) { const start = i * stride; const end = start + stride; lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1)); } } const sep = rank === 2 ? "," : ""; lines[0] = "[" + lines[0] + sep; for (let i = 1; i < lines.length - 1; i++) { lines[i] = " " + lines[i] + sep; } let newLineSep = ",\n"; for (let i = 2; i < rank; i++) { newLineSep += "\n"; } lines[lines.length - 1] = " " + lines[lines.length - 1] + "]" + (isLast ? "" : newLineSep); return lines; } function createComplexTuples(vals) { const complexTuples = []; for (let i = 0; i < vals.length; i += 2) { complexTuples.push([vals[i], vals[i + 1]]); } return complexTuples; } // node_modules/@tensorflow/tfjs-core/dist/tensor.js /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ class TensorBuffer { constructor(shape, dtype, values) { this.dtype = dtype; this.shape = shape.slice(); this.size = sizeFromShape(shape); if (values != null) { const n = values.length; assert(n === this.size, () => `Length of values '${n}' does not match the size inferred by the shape '${this.size}'.`); } if (dtype === "complex64") { throw new Error(`complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag).`); } this.values = values || getArrayFromDType(dtype, this.size); this.strides = computeStrides(shape); } set(value, ...locs) { if (locs.length === 0) { locs = [0]; } assert(locs.length === this.rank, () => `The number of provided coordinates (${locs.length}) must match the rank (${this.rank})`); const index = this.locToIndex(locs); this.values[index] = value; } get(...locs) { if (locs.length === 0) { locs = [0]; } let i = 0; for (const loc of locs) { if (loc < 0 || loc >= this.shape[i]) { const msg = `Requested out of range element at ${locs}. Buffer shape=${this.shape}`; throw new Error(msg); } i++; } let index = locs[locs.length - 1]; for (let i2 = 0; i2 < locs.length - 1; ++i2) { index += this.strides[i2] * locs[i2]; } return this.values[index]; } locToIndex(locs) { if (this.rank === 0) { return 0; } else if (this.rank === 1) { return locs[0]; } let index = locs[locs.length - 1]; for (let i = 0; i < locs.length - 1; ++i) { index += this.strides[i] * locs[i]; } return index; } indexToLoc(index) { if (this.rank === 0) { return []; } else if (this.rank === 1) { return [index]; } const locs = new Array(this.shape.length); for (let i = 0; i < locs.length - 1; ++i) { locs[i] = Math.floor(index / this.strides[i]); index -= locs[i] * this.strides[i]; } locs[locs.length - 1] = index; return locs; } get rank() { return this.shape.length; } toTensor() { return trackerFn().makeTensor(this.values, this.shape, this.dtype); } } let trackerFn = null; let opHandler = null; let deprecationWarningFn = null; function setTensorTracker(fn) { trackerFn = fn; } function setOpHandler(handler) { opHandler = handler; } function setDeprecationWarningFn(fn) { deprecationWarningFn = fn; } class Tensor { constructor(shape, dtype, dataId, id) { this.kept = false; this.isDisposedInternal = false; this.shape = shape.slice(); this.dtype = dtype || "float32"; this.size = sizeFromShape(shape); this.strides = computeStrides(shape); this.dataId = dataId; this.id = id; this.rankType = this.rank < 5 ? this.rank.toString() : "higher"; } get rank() { return this.shape.length; } async buffer() { const vals = await this.data(); return opHandler.buffer(this.shape, this.dtype, vals); } bufferSync() { return opHandler.buffer(this.shape, this.dtype, this.dataSync()); } async array() { const vals = await this.data(); return toNestedArray(this.shape, vals); } arraySync() { return toNestedArray(this.shape, this.dataSync()); } async data() { this.throwIfDisposed(); const data = trackerFn().read(this.dataId); if (this.dtype === "string") { const bytes = await data; try { return bytes.map((b) => decodeString(b)); } catch (_a) { throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes()."); } } return data; } dataSync() { this.throwIfDisposed(); const data = trackerFn().readSync(this.dataId); if (this.dtype === "string") { try { return data.map((b) => decodeString(b)); } catch (_a) { throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes()."); } } return data; } async bytes() { this.throwIfDisposed(); const data = await trackerFn().read(this.dataId); if (this.dtype === "string") { return data; } else { return new Uint8Array(data.buffer); } } dispose() { if (this.isDisposed) { return; } trackerFn().disposeTensor(this); this.isDisposedInternal = true; } get isDisposed() { return this.isDisposedInternal; } throwIfDisposed() { if (this.isDisposed) { throw new Error(`Tensor is disposed.`); } } print(verbose = false) { return opHandler.print(this, verbose); } clone() { this.throwIfDisposed(); return opHandler.clone(this); } toString(verbose = false) { const vals = this.dataSync(); return tensorToString(vals, this.shape, this.dtype, verbose); } cast(dtype) { this.throwIfDisposed(); return opHandler.cast(this, dtype); } variable(trainable = true, name, dtype) { this.throwIfDisposed(); return trackerFn().makeVariable(this, trainable, name, dtype); } } Object.defineProperty(Tensor, Symbol.hasInstance, { value: (instance) => { return !!instance && instance.dataId != null && instance.shape != null && instance.dtype != null; } }); class Variable extends Tensor { constructor(initialValue, trainable, name, tensorId) { super(initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId); this.trainable = trainable; this.name = name; } assign(newValue) { if (newValue.dtype !== this.dtype) { throw new Error(`dtype of the new value (${newValue.dtype}) and previous value (${this.dtype}) must match`); } if (!arraysEqual(newValue.shape, this.shape)) { throw new Error(`shape of the new value (${newValue.shape}) and previous value (${this.shape}) must match`); } trackerFn().disposeTensor(this); this.dataId = newValue.dataId; trackerFn().incRef(this, null); } dispose() { trackerFn().disposeVariable(this); this.isDisposedInternal = true; } } Object.defineProperty(Variable, Symbol.hasInstance, { value: (instance) => { return instance instanceof Tensor && instance.assign != null && instance.assign instanceof Function; } }); // node_modules/@tensorflow/tfjs-core/dist/types.js /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ var Rank; (function(Rank2) { Rank2["R0"] = "R0"; Rank2["R1"] = "R1"; Rank2["R2"] = "R2"; Rank2["R3"] = "R3"; Rank2["R4"] = "R4"; Rank2["R5"] = "R5"; Rank2["R6"] = "R6"; })(Rank || (Rank = {})); var UpcastInt32AndMap; (function(UpcastInt32AndMap2) { UpcastInt32AndMap2["float32"] = "float32"; UpcastInt32AndMap2["int32"] = "int32"; UpcastInt32AndMap2["bool"] = "int32"; UpcastInt32AndMap2["complex64"] = "complex64"; })(UpcastInt32AndMap || (UpcastInt32AndMap = {})); var UpcastBoolAndMap; (function(UpcastBoolAndMap2) { UpcastBoolAndMap2["float32"] = "float32"; UpcastBoolAndMap2["int32"] = "int32"; UpcastBoolAndMap2["bool"] = "bool"; UpcastBoolAndMap2["complex64"] = "complex64"; })(UpcastBoolAndMap || (UpcastBoolAndMap = {})); var UpcastFloat32AndMap; (function(UpcastFloat32AndMap2) { UpcastFloat32AndMap2["float32"] = "float32"; UpcastFloat32AndMap2["int32"] = "float32"; UpcastFloat32AndMap2["bool"] = "float32"; UpcastFloat32AndMap2["complex64"] = "complex64"; })(UpcastFloat32AndMap || (UpcastFloat32AndMap = {})); var UpcastComplex64AndMap; (function(UpcastComplex64AndMap2) { UpcastComplex64AndMap2["float32"] = "complex64"; UpcastComplex64AndMap2["int32"] = "complex64"; UpcastComplex64AndMap2["bool"] = "complex64"; UpcastComplex64AndMap2["complex64"] = "complex64"; })(UpcastComplex64AndMap || (UpcastComplex64AndMap = {})); const upcastTypeMap = { float32: UpcastFloat32AndMap, int32: UpcastInt32AndMap, bool: UpcastBoolAndMap, complex64: UpcastComplex64AndMap }; function upcastType(typeA, typeB) { if (typeA === "string" || typeB === "string") { if (typeA === "string" && typeB === "string") { return "string"; } throw new Error(`Can not upcast ${typeA} with ${typeB}`); } return upcastTypeMap[typeA][typeB]; } function sumOutType(type) { return upcastType(type, "int32"); } // node_modules/@tensorflow/tfjs-core/dist/tensor_util.js const tensor_util_exports = {}; __export(tensor_util_exports, { assertTypesMatch: () => assertTypesMatch, getTensorsInContainer: () => getTensorsInContainer, isTensorInList: () => isTensorInList, makeTypesMatch: () => makeTypesMatch }); /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function makeTypesMatch(a, b) { if (a.dtype === b.dtype) { return [a, b]; } const dtype = upcastType(a.dtype, b.dtype); return [a.cast(dtype), b.cast(dtype)]; } function assertTypesMatch(a, b) { assert(a.dtype === b.dtype, () => `The dtypes of the first(${a.dtype}) and second(${b.dtype}) input must match`); } function isTensorInList(tensor17, tensorList) { return tensorList.some((x) => x.id === tensor17.id); } function getTensorsInContainer(result) { const list = []; const seen = new Set(); walkTensorContainer(result, list, seen); return list; } function walkTensorContainer(container, list, seen) { if (container == null) { return; } if (container instanceof Tensor) { list.push(container); return; } if (!isIterable(container)) { return; } const iterable = container; for (const k in iterable) { const val = iterable[k]; if (!seen.has(val)) { seen.add(val); walkTensorContainer(val, list, seen); } } } function isIterable(obj) { return Array.isArray(obj) || typeof obj === "object"; } // node_modules/@tensorflow/tfjs-core/dist/engine.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ class EngineState { constructor() { this.registeredVariables = {}; this.nextTapeNodeId = 0; this.numBytes = 0; this.numTensors = 0; this.numStringTensors = 0; this.numDataBuffers = 0; this.gradientDepth = 0; this.kernelDepth = 0; this.scopeStack = []; this.numDataMovesStack = []; this.nextScopeId = 0; this.tensorInfo = new WeakMap(); this.profiling = false; this.activeProfile = {newBytes: 0, newTensors: 0, peakBytes: 0, kernels: [], result: null}; } dispose() { for (const variableName in this.registeredVariables) { this.registeredVariables[variableName].dispose(); } } } class Engine { constructor(ENV3) { this.ENV = ENV3; this.registry = {}; this.registryFactory = {}; this.pendingBackendInitId = 0; this.state = new EngineState(); } async ready() { if (this.pendingBackendInit != null) { return this.pendingBackendInit.then(() => { }); } if (this.backendInstance != null) { return; } const sortedBackends = this.getSortedBackends(); for (let i = 0; i < sortedBackends.length; i++) { const backendName = sortedBackends[i]; const success = await this.initializeBackend(backendName).success; if (success) { await this.setBackend(backendName); return; } } throw new Error(`Could not initialize any backends, all backend initializations failed.`); } get backend() { if (this.pendingBackendInit != null) { throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make sure to await tf.ready() or await tf.setBackend() before calling other methods`); } if (this.backendInstance == null) { const {name, asyncInit} = this.initializeBackendsAndReturnBest(); if (asyncInit) { throw new Error(`The highest priority backend '${name}' has not yet been initialized. Make sure to await tf.ready() or await tf.setBackend() before calling other methods`); } this.setBackend(name); } return this.backendInstance; } backendNames() { return Object.keys(this.registryFactory); } findBackend(backendName) { if (!(backendName in this.registry)) { if (backendName in this.registryFactory) { const {asyncInit} = this.initializeBackend(backendName); if (asyncInit) { return null; } } else { return null; } } return this.registry[backendName]; } findBackendFactory(backendName) { if (!(backendName in this.registryFactory)) { return null; } return this.registryFactory[backendName].factory; } registerBackend(backendName, factory, priority = 1) { if (backendName in this.registryFactory) { console.warn(`${backendName} backend was already registered. Reusing existing backend factory.`); return false; } this.registryFactory[backendName] = {factory, priority}; return true; } async setBackend(backendName) { if (this.registryFactory[backendName] == null) { throw new Error(`Backend name '${backendName}' not found in registry`); } this.backendName = backendName; if (this.registry[backendName] == null) { this.backendInstance = null; const {success, asyncInit} = this.initializeBackend(backendName); const result = asyncInit ? await success : success; if (!result) { return false; } } this.backendInstance = this.registry[backendName]; this.setupRegisteredKernels(); this.profiler = new Profiler(this.backendInstance); return true; } setupRegisteredKernels() { const kernels = getKernelsForBackend(this.backendName); kernels.forEach((kernel) => { if (kernel.setupFunc != null) { kernel.setupFunc(this.backendInstance); } }); } disposeRegisteredKernels(backendName) { const kernels = getKernelsForBackend(backendName); kernels.forEach((kernel) => { if (kernel.disposeFunc != null) { kernel.disposeFunc(this.registry[backendName]); } }); } initializeBackend(backendName) { const registryFactoryEntry = this.registryFactory[backendName]; if (registryFactoryEntry == null) { throw new Error(`Cannot initialize backend ${backendName}, no registration found.`); } try { const backend2 = registryFactoryEntry.factory(); if (Promise.resolve(backend2) === backend2) { const promiseId = ++this.pendingBackendInitId; const success = backend2.then((backendInstance) => { if (promiseId < this.pendingBackendInitId) { return false; } this.registry[backendName] = backendInstance; this.pendingBackendInit = null; return true; }).catch((err) => { if (promiseId < this.pendingBackendInitId) { return false; } this.pendingBackendInit = null; console.warn(`Initialization of backend ${backendName} failed`); console.warn(err.stack || err.message); return false; }); this.pendingBackendInit = success; return {success, asyncInit: true}; } else { this.registry[backendName] = backend2; return {success: true, asyncInit: false}; } } catch (err) { console.warn(`Initialization of backend ${backendName} failed`); console.warn(err.stack || err.message); return {success: false, asyncInit: false}; } } removeBackend(backendName) { if (!(backendName in this.registryFactory)) { throw new Error(`${backendName} backend not found in registry`); } if (this.backendName === backendName && this.pendingBackendInit != null) { this.pendingBackendInitId++; } if (backendName in this.registry) { this.disposeRegisteredKernels(backendName); this.registry[backendName].dispose(); delete this.registry[backendName]; } delete this.registryFactory[backendName]; if (this.backendName === backendName) { this.pendingBackendInit = null; this.backendName = null; this.backendInstance = null; } } getSortedBackends() { if (Object.keys(this.registryFactory).length === 0) { throw new Error("No backend found in registry."); } return Object.keys(this.registryFactory).sort((a, b) => { return this.registryFactory[b].priority - this.registryFactory[a].priority; }); } initializeBackendsAndReturnBest() { const sortedBackends = this.getSortedBackends(); for (let i = 0; i < sortedBackends.length; i++) { const backendName = sortedBackends[i]; const {success, asyncInit} = this.initializeBackend(backendName); if (asyncInit || success) { return {name: backendName, asyncInit}; } } throw new Error(`Could not initialize any backends, all backend initializations failed.`); } moveData(backend2, dataId) { const info = this.state.tensorInfo.get(dataId); const srcBackend = info.backend; const values = this.readSync(dataId); srcBackend.disposeData(dataId); info.backend = backend2; backend2.move(dataId, values, info.shape, info.dtype); if (this.shouldCheckForMemLeaks()) { this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++; } } tidy(nameOrFn, fn) { let name = null; if (fn == null) { if (typeof nameOrFn !== "function") { throw new Error("Please provide a function to tidy()"); } fn = nameOrFn; } else { if (typeof nameOrFn !== "string" && !(nameOrFn instanceof String)) { throw new Error("When calling with two arguments, the first argument to tidy() must be a string"); } if (typeof fn !== "function") { throw new Error("When calling with two arguments, the 2nd argument to tidy() must be a function"); } name = nameOrFn; } let result; return this.scopedRun(() => this.startScope(name), () => this.endScope(result), () => { result = fn(); if (result instanceof Promise) { console.error("Cannot return a Promise inside of tidy."); } return result; }); } scopedRun(start, end, f) { start(); try { const res = f(); end(); return res; } catch (ex) { end(); throw ex; } } nextTensorId() { return Engine.nextTensorId++; } nextVariableId() { return Engine.nextVariableId++; } clone(x) { const y = this.makeTensorFromDataId(x.dataId, x.shape, x.dtype); const inputs = {x}; const grad2 = (dy) => ({ x: () => { const dtype = "float32"; const gradInputs = {x: dy}; const attrs = {dtype}; return ENGINE.runKernelFunc((backend2) => backend2.cast(dy, dtype), gradInputs, null, Cast, attrs); } }); const saved = []; this.addTapeNode(this.state.activeScope.name, inputs, [y], grad2, saved, {}); return y; } runKernel(kernelName, inputs, attrs, inputsToSave, outputsToSave) { const forwardFunc = null; const backwardsFunc = null; return this.runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave); } shouldCheckForMemLeaks() { return this.ENV.getBool("IS_TEST"); } checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos) { const numDataIdsAfter = this.backend.numDataIds(); let numOutputDataIds = 0; outInfos.forEach((info) => { numOutputDataIds += info.dtype === "complex64" ? 3 : 1; }); const numMoves = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]; const dataIdsLeaked = numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves; if (dataIdsLeaked > 0) { throw new Error(`Backend '${this.backendName}' has an internal memory leak (${dataIdsLeaked} data ids) after running '${kernelName}'`); } } runKernelFunc(forwardFunc, inputs, backwardsFunc, kernelName, attrs, inputsToSave, outputsToSave) { let outputs; let saved = []; const isTapeOn = this.isTapeOn(); if (kernelName == null) { kernelName = this.state.activeScope != null ? this.state.activeScope.name : ""; } const startingBytecount = this.state.numBytes; const startingNumTensors = this.state.numTensors; if (this.shouldCheckForMemLeaks()) { this.state.numDataMovesStack.push(0); } let kernelFunc; const kernel = getKernel(kernelName, this.backendName); let out; if (kernel != null) { kernelFunc = () => { const numDataIdsBefore = this.backend.numDataIds(); out = kernel.kernelFunc({inputs, attrs, backend: this.backend}); const outInfos = Array.isArray(out) ? out : [out]; if (this.shouldCheckForMemLeaks()) { this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos); } const outTensors = outInfos.map(({dataId, shape, dtype}) => this.makeTensorFromDataId(dataId, shape, dtype)); if (isTapeOn) { let tensorsToSave = this.getTensorsForGradient(kernelName, inputs, outTensors); if (tensorsToSave == null) { if (outputsToSave == null) { outputsToSave = []; } const outsToSave = outTensors.filter((_, i) => outputsToSave[i]); tensorsToSave = (inputsToSave || []).slice().concat(outsToSave); } saved = this.saveTensorsForBackwardMode(tensorsToSave); } return outTensors; }; } else { const saveFunc = (tensors) => { if (!isTapeOn) { return; } saved = tensors.map((tensor17) => this.keep(this.clone(tensor17))); }; kernelFunc = () => { const numDataIdsBefore = this.backend.numDataIds(); out = this.tidy(() => forwardFunc(this.backend, saveFunc)); const outs = Array.isArray(out) ? out : [out]; if (this.shouldCheckForMemLeaks()) { this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outs); } return outs; }; } let kernelProfile; this.scopedRun(() => this.state.kernelDepth++, () => this.state.kernelDepth--, () => { if (!this.ENV.getBool("DEBUG") && !this.state.profiling) { outputs = kernelFunc(); } else { kernelProfile = this.profiler.profileKernel(kernelName, inputs, () => kernelFunc()); if (this.ENV.getBool("DEBUG")) { this.profiler.logKernelProfile(kernelProfile); } outputs = kernelProfile.outputs; } }); if (isTapeOn) { this.addTapeNode(kernelName, inputs, outputs, backwardsFunc, saved, attrs); } if (this.state.profiling) { this.state.activeProfile.kernels.push({ name: kernelName, bytesAdded: this.state.numBytes - startingBytecount, totalBytesSnapshot: this.state.numBytes, tensorsAdded: this.state.numTensors - startingNumTensors, totalTensorsSnapshot: this.state.numTensors, inputShapes: Object.keys(inputs).map((key) => inputs[key] != null ? inputs[key].shape : null), outputShapes: outputs.map((item) => item.shape), kernelTimeMs: kernelProfile.timeMs, extraInfo: kernelProfile.extraInfo }); } return Array.isArray(out) ? outputs : outputs[0]; } saveTensorsForBackwardMode(tensors) { const saved = tensors.map((tensor17) => this.keep(this.clone(tensor17))); return saved; } getTensorsForGradient(kernelName, inputs, outputs) { const gradConfig = getGradient(kernelName); if (gradConfig != null) { const inputsToSave = gradConfig.inputsToSave || []; const outputsToSave = gradConfig.outputsToSave || []; let inputTensorsToSave; if (gradConfig.saveAllInputs) { assert(Array.isArray(inputs), () => "saveAllInputs is true, expected inputs to be an array."); inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]); } else { inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]); } const outputTensorsToSave = outputs.filter((_, i) => outputsToSave[i]); return inputTensorsToSave.concat(outputTensorsToSave); } return null; } makeTensor(values, shape, dtype, backend2) { if (values == null) { throw new Error("Values passed to engine.makeTensor() are null"); } dtype = dtype || "float32"; backend2 = backend2 || this.backend; let backendVals = values; if (dtype === "string" && isString(values[0])) { backendVals = values.map((d) => encodeString(d)); } const dataId = backend2.write(backendVals, shape, dtype); const t = new Tensor(shape, dtype, dataId, this.nextTensorId()); this.incRef(t, backend2); if (dtype === "string") { const info = this.state.tensorInfo.get(dataId); const newBytes = bytesFromStringArray(backendVals); this.state.numBytes += newBytes - info.bytes; info.bytes = newBytes; } return t; } makeTensorFromDataId(dataId, shape, dtype, backend2) { dtype = dtype || "float32"; const t = new Tensor(shape, dtype, dataId, this.nextTensorId()); this.incRef(t, backend2); return t; } makeVariable(initialValue, trainable = true, name, dtype) { name = name || this.nextVariableId().toString(); if (dtype != null && dtype !== initialValue.dtype) { initialValue = initialValue.cast(dtype); } const v = new Variable(initialValue, trainable, name, this.nextTensorId()); if (this.state.registeredVariables[v.name] != null) { throw new Error(`Variable with name ${v.name} was already registered`); } this.state.registeredVariables[v.name] = v; this.incRef(v, this.backend); return v; } incRef(a, backend2) { const refCount = this.state.tensorInfo.has(a.dataId) ? this.state.tensorInfo.get(a.dataId).refCount : 0; this.state.numTensors++; if (a.dtype === "string") { this.state.numStringTensors++; } if (refCount === 0) { this.state.numDataBuffers++; let bytes = 0; if (a.dtype !== "complex64" && a.dtype !== "string") { bytes = a.size * bytesPerElement(a.dtype); } this.state.tensorInfo.set(a.dataId, { backend: backend2 || this.backend, dtype: a.dtype, shape: a.shape, bytes, refCount: 0 }); this.state.numBytes += bytes; } this.state.tensorInfo.get(a.dataId).refCount++; if (!(a instanceof Variable)) { this.track(a); } } disposeTensor(a) { if (!this.state.tensorInfo.has(a.dataId)) { return; } this.state.numTensors--; if (a.dtype === "string") { this.state.numStringTensors--; } const info = this.state.tensorInfo.get(a.dataId); const refCount = info.refCount; if (refCount <= 1) { if (a.dtype !== "complex64") { this.state.numBytes -= info.bytes; } this.state.numDataBuffers--; info.backend.disposeData(a.dataId); this.state.tensorInfo.delete(a.dataId); } else { this.state.tensorInfo.get(a.dataId).refCount--; } } disposeVariables() { for (const varName in this.state.registeredVariables) { const v = this.state.registeredVariables[varName]; this.disposeVariable(v); } } disposeVariable(v) { this.disposeTensor(v); if (this.state.registeredVariables[v.name] != null) { delete this.state.registeredVariables[v.name]; } } memory() { const info = this.backend.memory(); info.numTensors = this.state.numTensors; info.numDataBuffers = this.state.numDataBuffers; info.numBytes = this.state.numBytes; if (this.state.numStringTensors > 0) { info.unreliable = true; if (info.reasons == null) { info.reasons = []; } info.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)"); } return info; } async profile(query) { this.state.profiling = true; const startBytes = this.state.numBytes; const startNumTensors = this.state.numTensors; this.state.activeProfile.kernels = []; this.state.activeProfile.result = await query(); this.state.profiling = false; this.state.activeProfile.peakBytes = Math.max(...this.state.activeProfile.kernels.map((d) => d.totalBytesSnapshot)); this.state.activeProfile.newBytes = this.state.numBytes - startBytes; this.state.activeProfile.newTensors = this.state.numTensors - startNumTensors; for (const kernel of this.state.activeProfile.kernels) { kernel.kernelTimeMs = await kernel.kernelTimeMs; kernel.extraInfo = await kernel.extraInfo; } return this.state.activeProfile; } isTapeOn() { return this.state.gradientDepth > 0 && this.state.kernelDepth === 0; } addTapeNode(kernelName, inputs, outputs, gradientsFunc, saved, attrs) { const tapeNode = {id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved}; const gradConfig = getGradient(kernelName); if (gradConfig != null) { gradientsFunc = gradConfig.gradFunc; } if (gradientsFunc != null) { tapeNode.gradient = (dys) => { dys = dys.map((dy, i) => { if (dy == null) { const output = outputs[i]; const vals = makeZerosTypedArray(output.size, output.dtype); return this.makeTensor(vals, output.shape, output.dtype); } return dy; }); return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs); }; } this.state.activeTape.push(tapeNode); } keep(result) { result.kept = true; return result; } startTape() { if (this.state.gradientDepth === 0) { this.state.activeTape = []; } this.state.gradientDepth++; } endTape() { this.state.gradientDepth--; } startScope(name) { const scopeInfo = { track: [], name: "unnamed scope", id: this.state.nextScopeId++ }; if (name) { scopeInfo.name = name; } this.state.scopeStack.push(scopeInfo); this.state.activeScope = scopeInfo; } endScope(result) { const tensorsToTrackInParent = getTensorsInContainer(result); const tensorsToTrackInParentSet = new Set(tensorsToTrackInParent.map((t) => t.id)); for (let i = 0; i < this.state.activeScope.track.length; i++) { const tensor17 = this.state.activeScope.track[i]; if (!tensor17.kept && !tensorsToTrackInParentSet.has(tensor17.id)) { tensor17.dispose(); } } const oldScope = this.state.scopeStack.pop(); this.state.activeScope = this.state.scopeStack.length === 0 ? null : this.state.scopeStack[this.state.scopeStack.length - 1]; tensorsToTrackInParent.forEach((tensor17) => { if (!tensor17.kept && tensor17.scopeId === oldScope.id) { this.track(tensor17); } }); } gradients(f, xs, dy, allowNoGradients = false) { assert(xs.length > 0, () => "gradients() received an empty list of xs."); if (dy != null && dy.dtype !== "float32") { throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`); } const y = this.scopedRun(() => this.startTape(), () => this.endTape(), () => this.tidy("forward", f)); assert(y instanceof Tensor, () => "The result y returned by f() must be a tensor."); const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y); if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) { throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y."); } return this.tidy("backward", () => { const accumulatedGradientMap = {}; accumulatedGradientMap[y.id] = dy == null ? ones(y.shape) : dy; backpropagateGradients(accumulatedGradientMap, filteredTape, (f2) => this.tidy(f2), add); const grads2 = xs.map((x) => accumulatedGradientMap[x.id]); if (this.state.gradientDepth === 0) { this.state.activeTape.forEach((node) => { for (const tensor17 of node.saved) { tensor17.dispose(); } }); this.state.activeTape = null; } return {value: y, grads: grads2}; }); } customGrad(f) { assert(isFunction(f), () => "The f passed in customGrad(f) must be a function."); return (...inputs) => { assert(inputs.every((t) => t instanceof Tensor), () => "The args passed in customGrad(f)(x1, x2,...) must all be tensors"); let res; const inputMap = {}; inputs.forEach((input, i) => { inputMap[i] = input; }); return this.runKernelFunc((_, save) => { res = f(...[...inputs, save]); assert(res.value instanceof Tensor, () => "The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"); assert(isFunction(res.gradFunc), () => "The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."); return res.value; }, inputMap, (dy, saved) => { const gradRes = res.gradFunc(dy, saved); const grads2 = Array.isArray(gradRes) ? gradRes : [gradRes]; assert(grads2.length === inputs.length, () => "The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."); assert(grads2.every((t) => t instanceof Tensor), () => "The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors."); const gradMap = {}; grads2.forEach((grad2, i) => { gradMap[i] = () => grad2; }); return gradMap; }); }; } readSync(dataId) { const info = this.state.tensorInfo.get(dataId); return info.backend.readSync(dataId); } read(dataId) { const info = this.state.tensorInfo.get(dataId); return info.backend.read(dataId); } async time(query) { const start = now(); const timingInfo = await this.backend.time(query); timingInfo.wallMs = now() - start; return timingInfo; } track(result) { if (this.state.activeScope != null) { result.scopeId = this.state.activeScope.id; this.state.activeScope.track.push(result); } return result; } get registeredVariables() { return this.state.registeredVariables; } reset() { this.pendingBackendInitId++; this.state.dispose(); this.ENV.reset(); this.state = new EngineState(); for (const backendName in this.registry) { this.disposeRegisteredKernels(backendName); this.registry[backendName].dispose(); delete this.registry[backendName]; } this.backendName = null; this.backendInstance = null; this.pendingBackendInit = null; } } Engine.nextTensorId = 0; Engine.nextVariableId = 0; function ones(shape) { const values = makeOnesTypedArray(sizeFromShape(shape), "float32"); return ENGINE.makeTensor(values, shape, "float32"); } function getOrMakeEngine() { const ns = getGlobalNamespace(); if (ns._tfengine == null) { const environment11 = new Environment(ns); ns._tfengine = new Engine(environment11); } setEnvironmentGlobal(ns._tfengine.ENV); setTensorTracker(() => ns._tfengine); return ns._tfengine; } const ENGINE = getOrMakeEngine(); function add(a, b) { const inputs = {a, b}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.add(a, b); save([a, b]); return res; }, inputs, null, Add); } // node_modules/@tensorflow/tfjs-core/dist/device_util.js const device_util_exports = {}; __export(device_util_exports, { isBrowser: () => isBrowser, isMobile: () => isMobile }); /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function _isNavigatorDefined() { return typeof navigator !== "undefined" && navigator != null; } function isMobile() { if (_isNavigatorDefined()) { const a = navigator.userAgent || navigator.vendor || window.opera; return /(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(a) || /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(a.substr(0, 4)); } return false; } function isBrowser() { return typeof window !== "undefined" && window.document != null || typeof WorkerGlobalScope !== "undefined"; } // node_modules/@tensorflow/tfjs-core/dist/flags.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const ENV2 = env(); ENV2.registerFlag("DEBUG", () => false, (debugValue) => { if (debugValue) { console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance."); } }); ENV2.registerFlag("IS_BROWSER", () => isBrowser()); ENV2.registerFlag("IS_NODE", () => typeof process !== "undefined" && typeof process.versions !== "undefined" && typeof process.versions.node !== "undefined"); ENV2.registerFlag("IS_CHROME", () => typeof navigator !== "undefined" && navigator != null && navigator.userAgent != null && /Chrome/.test(navigator.userAgent) && /Google Inc/.test(navigator.vendor)); ENV2.registerFlag("PROD", () => false); ENV2.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY", () => ENV2.getBool("DEBUG")); ENV2.registerFlag("DEPRECATION_WARNINGS_ENABLED", () => true); ENV2.registerFlag("IS_TEST", () => false); // node_modules/@tensorflow/tfjs-core/dist/tensor_util_env.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function inferShape(val, dtype) { let firstElem = val; if (isTypedArray(val)) { return dtype === "string" ? [] : [val.length]; } if (!Array.isArray(val)) { return []; } const shape = []; while (Array.isArray(firstElem) || isTypedArray(firstElem) && dtype !== "string") { shape.push(firstElem.length); firstElem = firstElem[0]; } if (Array.isArray(val) && env().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY")) { deepAssertShapeConsistency(val, shape, []); } return shape; } function deepAssertShapeConsistency(val, shape, indices) { indices = indices || []; if (!Array.isArray(val) && !isTypedArray(val)) { assert(shape.length === 0, () => `Element arr[${indices.join("][")}] is a primitive, but should be an array/TypedArray of ${shape[0]} elements`); return; } assert(shape.length > 0, () => `Element arr[${indices.join("][")}] should be a primitive, but is an array of ${val.length} elements`); assert(val.length === shape[0], () => `Element arr[${indices.join("][")}] should have ${shape[0]} elements, but has ${val.length} elements`); const subShape = shape.slice(1); for (let i = 0; i < val.length; ++i) { deepAssertShapeConsistency(val[i], subShape, indices.concat(i)); } } function assertDtype(expectedDtype, actualDType, argName, functionName) { if (expectedDtype == null) { return; } if (expectedDtype !== "numeric" && expectedDtype !== actualDType || expectedDtype === "numeric" && actualDType === "string") { throw new Error(`Argument '${argName}' passed to '${functionName}' must be ${expectedDtype} tensor, but got ${actualDType} tensor`); } } function convertToTensor(x, argName, functionName, parseAsDtype = "numeric") { if (x instanceof Tensor) { assertDtype(parseAsDtype, x.dtype, argName, functionName); return x; } let inferredDtype = inferDtype(x); if (inferredDtype !== "string" && ["bool", "int32", "float32"].indexOf(parseAsDtype) >= 0) { inferredDtype = parseAsDtype; } assertDtype(parseAsDtype, inferredDtype, argName, functionName); if (x == null || !isTypedArray(x) && !Array.isArray(x) && typeof x !== "number" && typeof x !== "boolean" && typeof x !== "string") { const type = x == null ? "null" : x.constructor.name; throw new Error(`Argument '${argName}' passed to '${functionName}' must be a Tensor or TensorLike, but got '${type}'`); } const inferredShape = inferShape(x, inferredDtype); if (!isTypedArray(x) && !Array.isArray(x)) { x = [x]; } const skipTypedArray = true; const values = inferredDtype !== "string" ? toTypedArray(x, inferredDtype) : flatten(x, [], skipTypedArray); return ENGINE.makeTensor(values, inferredShape, inferredDtype); } function convertToTensorArray(arg, argName, functionName, parseAsDtype = "numeric") { if (!Array.isArray(arg)) { throw new Error(`Argument ${argName} passed to ${functionName} must be a \`Tensor[]\` or \`TensorLike[]\``); } const tensors = arg; return tensors.map((t, i) => convertToTensor(t, `${argName}[${i}]`, functionName), parseAsDtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/operation.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function op(f) { const keys = Object.keys(f); if (keys.length !== 1) { throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${keys.length} keys.`); } let opName = keys[0]; const fn = f[opName]; if (opName.endsWith("_")) { opName = opName.substring(0, opName.length - 1); } const f2 = (...args) => { ENGINE.startScope(opName); try { const result = fn(...args); if (result instanceof Promise) { console.error("Cannot return a Promise inside of tidy."); } ENGINE.endScope(result); return result; } catch (ex) { ENGINE.endScope(null); throw ex; } }; Object.defineProperty(f2, "name", {value: opName, configurable: true}); return f2; } // node_modules/@tensorflow/tfjs-core/dist/ops/abs.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function abs_(x) { const $x = convertToTensor(x, "x", "abs"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { save([$x]); if ($x.dtype === "complex64") { return backend2.complexAbs($x); } return backend2.abs($x); }, inputs, null, Abs); } const abs = op({abs_}); // node_modules/@tensorflow/tfjs-core/dist/ops/acos.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function acos_(x) { const $x = convertToTensor(x, "x", "acos"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.acos($x); save([$x]); return res; }, inputs, null, Acos); } const acos = op({acos_}); // node_modules/@tensorflow/tfjs-core/dist/ops/acosh.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function acosh_(x) { const $x = convertToTensor(x, "x", "acosh"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.acosh($x); save([$x]); return res; }, inputs, null, Acosh); } const acosh = op({acosh_}); // node_modules/@tensorflow/tfjs-core/dist/ops/add.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function add_(a, b) { let $a = convertToTensor(a, "a", "add"); let $b = convertToTensor(b, "b", "add"); [$a, $b] = makeTypesMatch($a, $b); const forward = (backend2, save) => { const res = backend2.add($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Add); } const add2 = op({add_}); // node_modules/@tensorflow/tfjs-core/dist/ops/add_n.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function addN_(tensors) { assert(Array.isArray(tensors), () => "The argument passed to tf.addN() must be a list of tensors"); assert(tensors.length >= 1, () => `Must pass at least one tensor to tf.addN(), but got ${tensors.length}`); const $tensors = tensors.map((t, i) => convertToTensor(t, `tensors${i}`, "addN")); const firstTensor = $tensors[0]; $tensors.forEach((t) => { if (t.dtype !== firstTensor.dtype) { throw new Error("All tensors passed to tf.addN() must have the same dtype"); } }); $tensors.forEach((t) => { if (!arraysEqual(t.shape, firstTensor.shape)) { throw new Error("All tensors passed to tf.addN() must have the same shape"); } }); const forward = (backend2, save) => { const res = backend2.addN($tensors); save($tensors); return res; }; const inputs = $tensors; return ENGINE.runKernelFunc(forward, inputs, null, AddN); } const addN = op({addN_}); // node_modules/@tensorflow/tfjs-core/dist/ops/axis_util.js /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function axesAreInnerMostDims(axes, rank) { for (let i = 0; i < axes.length; ++i) { if (axes[axes.length - i - 1] !== rank - 1 - i) { return false; } } return true; } function combineLocations(outputLoc, reduceLoc, axes) { const rank = outputLoc.length + reduceLoc.length; const loc = []; let outIdx = 0; let reduceIdx = 0; for (let dim = 0; dim < rank; dim++) { if (axes.indexOf(dim) === -1) { loc.push(outputLoc[outIdx++]); } else { loc.push(reduceLoc[reduceIdx++]); } } return loc; } function computeOutAndReduceShapes(aShape, axes) { const outShape = []; const rank = aShape.length; for (let dim = 0; dim < rank; dim++) { if (axes.indexOf(dim) === -1) { outShape.push(aShape[dim]); } } const reduceShape = axes.map((dim) => aShape[dim]); return [outShape, reduceShape]; } function expandShapeToKeepDim(shape, axes) { const reduceSubShape = axes.map((x) => 1); return combineLocations(shape, reduceSubShape, axes); } function assertAxesAreInnerMostDims(msg, axes, rank) { assert(axesAreInnerMostDims(axes, rank), () => `${msg} supports only inner-most axes for now. Got axes ${axes} and rank-${rank} input.`); } function getAxesPermutation(axes, rank) { if (axesAreInnerMostDims(axes, rank)) { return null; } const result = []; for (let i = 0; i < rank; ++i) { if (axes.indexOf(i) === -1) { result.push(i); } } axes.forEach((axis) => result.push(axis)); return result; } function getUndoAxesPermutation(axes) { return axes.map((axis, i) => [i, axis]).sort((a, b) => a[1] - b[1]).map((x) => x[0]); } function getInnerMostAxes(numAxes, rank) { const res = []; for (let i = rank - numAxes; i < rank; ++i) { res.push(i); } return res; } // node_modules/@tensorflow/tfjs-core/dist/ops/reshape.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function reshape_(x, shape) { const $x = convertToTensor(x, "x", "reshape", null); shape = inferFromImplicitShape(shape, $x.size); assert($x.size === sizeFromShape(shape), () => "new shape and old shape must have the same number of elements."); const inputs = {x: $x}; const attrs = {shape}; const forward = (backend2, save) => { save([$x]); return backend2.reshape($x, shape); }; return ENGINE.runKernelFunc(forward, inputs, null, Reshape, attrs); } const reshape = op({reshape_}); // node_modules/@tensorflow/tfjs-core/dist/ops/transpose.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function transpose_(x, perm) { const $x = convertToTensor(x, "x", "transpose"); if (perm == null) { perm = $x.shape.map((s, i) => i).reverse(); } assert($x.rank === perm.length, () => `Error in transpose: rank of input ${$x.rank} must match length of perm ${perm}.`); perm.forEach((axis) => { assert(axis >= 0 && axis < $x.rank, () => `All entries in 'perm' must be between 0 and ${$x.rank - 1} but got ${perm}`); }); if ($x.rank <= 1) { return $x.clone(); } const inputs = {x: $x}; const attrs = {perm}; return ENGINE.runKernelFunc((backend2) => backend2.transpose($x, perm), inputs, null, Transpose, attrs); } const transpose = op({transpose_}); // node_modules/@tensorflow/tfjs-core/dist/ops/all.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function all_(x, axis = null, keepDims = false) { let $x = convertToTensor(x, "x", "all", "bool"); const forward = (backend2) => { const origAxes = parseAxisParam(axis, $x.shape); let axes = origAxes; const permutedAxes = getAxesPermutation(axes, $x.rank); if (permutedAxes != null) { $x = transpose($x, permutedAxes); axes = getInnerMostAxes(axes.length, $x.rank); } const res = backend2.all($x, axes); if (keepDims) { const newShape = expandShapeToKeepDim(res.shape, origAxes); return reshape(res, newShape); } return res; }; const inputs = {x: $x}; const attrs = {axis, keepDims}; return ENGINE.runKernelFunc(forward, inputs, null, All, attrs); } const all = op({all_}); // node_modules/@tensorflow/tfjs-core/dist/ops/any.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function any_(x, axis = null, keepDims = false) { let $x = convertToTensor(x, "x", "any", "bool"); const forward = (backend2) => { const origAxes = parseAxisParam(axis, $x.shape); let axes = origAxes; const permutedAxes = getAxesPermutation(axes, $x.rank); if (permutedAxes != null) { $x = transpose($x, permutedAxes); axes = getInnerMostAxes(axes.length, $x.rank); } const res = backend2.any($x, axes); if (keepDims) { const newShape = expandShapeToKeepDim(res.shape, origAxes); return reshape(res, newShape); } return res; }; const inputs = {x: $x}; const attrs = {axis, keepDims}; return ENGINE.runKernelFunc(forward, inputs, null, Any, attrs); } const any = op({any_}); // node_modules/@tensorflow/tfjs-core/dist/ops/arg_max.js /** * @license * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function argMax_(x, axis = 0) { let $x = convertToTensor(x, "x", "argMax"); const forward = (backend2, save) => { save([$x]); let axes = parseAxisParam(axis, $x.shape); const permutedAxes = getAxesPermutation(axes, $x.rank); if (permutedAxes != null) { $x = transpose($x, permutedAxes); axes = getInnerMostAxes(axes.length, $x.rank); } return backend2.argMax($x, axes[0]); }; const inputs = {x: $x}; const attrs = {axis}; return ENGINE.runKernelFunc(forward, inputs, null, ArgMax, attrs); } const argMax = op({argMax_}); // node_modules/@tensorflow/tfjs-core/dist/ops/arg_min.js /** * @license * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function argMin_(x, axis = 0) { let $x = convertToTensor(x, "x", "argMin"); const forward = (backend2, save) => { save([$x]); if (axis == null) { axis = 0; } let axes = parseAxisParam(axis, $x.shape); const permutedAxes = getAxesPermutation(axes, $x.rank); if (permutedAxes != null) { $x = transpose($x, permutedAxes); axes = getInnerMostAxes(axes.length, $x.rank); } return backend2.argMin($x, axes[0]); }; const inputs = {x: $x}; const attrs = {axis}; return ENGINE.runKernelFunc(forward, inputs, null, ArgMin, attrs); } const argMin = op({argMin_}); // node_modules/@tensorflow/tfjs-core/dist/ops/asin.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function asin_(x) { const $x = convertToTensor(x, "x", "asin"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.asin($x); save([$x]); return res; }, inputs, null, Asin); } const asin = op({asin_}); // node_modules/@tensorflow/tfjs-core/dist/ops/asinh.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function asinh_(x) { const $x = convertToTensor(x, "x", "asinh"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.asinh($x); save([$x]); return res; }, inputs, null, Asinh); } const asinh = op({asinh_}); // node_modules/@tensorflow/tfjs-core/dist/ops/atan.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function atan_(x) { const $x = convertToTensor(x, "x", "atan"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.atan($x); save([$x]); return res; }, inputs, null, Atan); } const atan = op({atan_}); // node_modules/@tensorflow/tfjs-core/dist/ops/atan2.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function atan2_(a, b) { let $a = convertToTensor(a, "a", "atan2"); let $b = convertToTensor(b, "b", "atan2"); [$a, $b] = makeTypesMatch($a, $b); const forward = (backend2, save) => { const res = backend2.atan2($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Atan2); } const atan2 = op({atan2_}); // node_modules/@tensorflow/tfjs-core/dist/ops/atanh.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function atanh_(x) { const $x = convertToTensor(x, "x", "atanh"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.atanh($x); save([$x]); return res; }, inputs, null, Atanh); } const atanh = op({atanh_}); // node_modules/@tensorflow/tfjs-core/dist/ops/cast.js /** * @license * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function cast_(x, dtype) { const $x = convertToTensor(x, "x", "cast"); if (!isValidDtype(dtype)) { throw new Error(`Failed to cast to unknown dtype ${dtype}`); } if (dtype === "string" && $x.dtype !== "string" || dtype !== "string" && $x.dtype === "string") { throw new Error("Only strings can be casted to strings"); } const inputs = {x: $x}; const attrs = {dtype}; return ENGINE.runKernelFunc((backend2) => backend2.cast($x, dtype), inputs, null, Cast, attrs); } const cast = op({cast_}); // node_modules/@tensorflow/tfjs-core/dist/ops/conv_util.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function computeDilation2DInfo(inputShape, filterShape, strides, pad8, dataFormat = "NHWC", dilations) { const inputChannels = inputShape[3]; const $filterShape = [...filterShape, inputChannels]; const $dataFormat = convertConv2DDataFormat(dataFormat); return computeConv2DInfo(inputShape, $filterShape, strides, dilations, pad8, null, null, $dataFormat); } function computePool2DInfo(inShape, filterSize, strides, dilations, pad8, roundingMode, dataFormat = "channelsLast") { const [filterHeight, filterWidth] = parseTupleParam(filterSize); let filterShape; if (dataFormat === "channelsLast") { filterShape = [filterHeight, filterWidth, inShape[3], inShape[3]]; } else if (dataFormat === "channelsFirst") { filterShape = [filterHeight, filterWidth, inShape[1], inShape[1]]; } else { throw new Error(`Unknown dataFormat ${dataFormat}`); } return computeConv2DInfo(inShape, filterShape, strides, dilations, pad8, roundingMode, false, dataFormat); } function computePool3DInfo(inShape, filterSize, strides, dilations, pad8, roundingMode, dataFormat = "NDHWC") { const [filterDepth, filterHeight, filterWidth] = parse3TupleParam(filterSize); let filterShape; let $dataFormat; if (dataFormat === "NDHWC") { $dataFormat = "channelsLast"; filterShape = [filterDepth, filterHeight, filterWidth, inShape[4], inShape[4]]; } else if (dataFormat === "NCDHW") { $dataFormat = "channelsFirst"; filterShape = [filterDepth, filterHeight, filterWidth, inShape[1], inShape[1]]; } else { throw new Error(`Unknown dataFormat ${dataFormat}`); } return computeConv3DInfo(inShape, filterShape, strides, dilations, pad8, false, $dataFormat, roundingMode); } function computeConv2DInfo(inShape, filterShape, strides, dilations, pad8, roundingMode, depthwise = false, dataFormat = "channelsLast") { let [batchSize, inHeight, inWidth, inChannels] = [-1, -1, -1, -1]; if (dataFormat === "channelsLast") { [batchSize, inHeight, inWidth, inChannels] = inShape; } else if (dataFormat === "channelsFirst") { [batchSize, inChannels, inHeight, inWidth] = inShape; } else { throw new Error(`Unknown dataFormat ${dataFormat}`); } const [filterHeight, filterWidth, , filterChannels] = filterShape; const [strideHeight, strideWidth] = parseTupleParam(strides); const [dilationHeight, dilationWidth] = parseTupleParam(dilations); const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight); const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth); const {padInfo, outHeight, outWidth} = getPadAndOutInfo(pad8, inHeight, inWidth, strideHeight, strideWidth, effectiveFilterHeight, effectiveFilterWidth, roundingMode, dataFormat); const outChannels = depthwise ? filterChannels * inChannels : filterChannels; let outShape; if (dataFormat === "channelsFirst") { outShape = [batchSize, outChannels, outHeight, outWidth]; } else if (dataFormat === "channelsLast") { outShape = [batchSize, outHeight, outWidth, outChannels]; } return { batchSize, dataFormat, inHeight, inWidth, inChannels, outHeight, outWidth, outChannels, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, effectiveFilterHeight, effectiveFilterWidth, dilationHeight, dilationWidth, inShape, outShape, filterShape }; } function computeConv3DInfo(inShape, filterShape, strides, dilations, pad8, depthwise = false, dataFormat = "channelsLast", roundingMode) { let [batchSize, inDepth, inHeight, inWidth, inChannels] = [-1, -1, -1, -1, -1]; if (dataFormat === "channelsLast") { [batchSize, inDepth, inHeight, inWidth, inChannels] = inShape; } else if (dataFormat === "channelsFirst") { [batchSize, inChannels, inDepth, inHeight, inWidth] = inShape; } else { throw new Error(`Unknown dataFormat ${dataFormat}`); } const [filterDepth, filterHeight, filterWidth, , filterChannels] = filterShape; const [strideDepth, strideHeight, strideWidth] = parse3TupleParam(strides); const [dilationDepth, dilationHeight, dilationWidth] = parse3TupleParam(dilations); const effectiveFilterDepth = getEffectiveFilterSize(filterDepth, dilationDepth); const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight); const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth); const {padInfo, outDepth, outHeight, outWidth} = get3DPadAndOutInfo(pad8, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, effectiveFilterDepth, effectiveFilterHeight, effectiveFilterWidth, roundingMode); const outChannels = depthwise ? filterChannels * inChannels : filterChannels; let outShape; if (dataFormat === "channelsFirst") { outShape = [batchSize, outChannels, outDepth, outHeight, outWidth]; } else if (dataFormat === "channelsLast") { outShape = [batchSize, outDepth, outHeight, outWidth, outChannels]; } return { batchSize, dataFormat, inDepth, inHeight, inWidth, inChannels, outDepth, outHeight, outWidth, outChannels, padInfo, strideDepth, strideHeight, strideWidth, filterDepth, filterHeight, filterWidth, effectiveFilterDepth, effectiveFilterHeight, effectiveFilterWidth, dilationDepth, dilationHeight, dilationWidth, inShape, outShape, filterShape }; } function computeOutputShape2D(inShape, fieldSize, stride, zeroPad, roundingMode) { if (zeroPad == null) { zeroPad = computeDefaultPad(inShape, fieldSize, stride); } const inputRows = inShape[0]; const inputCols = inShape[1]; const outputRows = conditionalRound((inputRows - fieldSize + 2 * zeroPad) / stride + 1, roundingMode); assert(isInt(outputRows), () => `The output # of rows (${outputRows}) must be an integer. Change the stride and/or zero pad parameters`); const outputCols = conditionalRound((inputCols - fieldSize + 2 * zeroPad) / stride + 1, roundingMode); assert(isInt(outputCols), () => `The output # of columns (${outputCols}) must be an integer. Change the stride and/or zero pad parameters`); return [outputRows, outputCols]; } function computeOutputShape4D(inShape, fieldSize, outChannels, stride, zeroPad, roundingMode) { if (zeroPad == null) { zeroPad = computeDefaultPad(inShape, fieldSize, stride); } const inputDepth = inShape[0]; const inputRows = inShape[1]; const inputCols = inShape[2]; const outputDepths = conditionalRound((inputDepth - fieldSize + 2 * zeroPad) / stride + 1, roundingMode); assert(isInt(outputDepths), () => `The output # of depths (${outputDepths}) must be an integer. Change the stride and/or zero pad parameters`); const outputRows = conditionalRound((inputRows - fieldSize + 2 * zeroPad) / stride + 1, roundingMode); assert(isInt(outputRows), () => `The output # of rows (${outputRows}) must be an integer. Change the stride and/or zero pad parameters`); const outputCols = conditionalRound((inputCols - fieldSize + 2 * zeroPad) / stride + 1, roundingMode); assert(isInt(outputCols), () => `The output # of columns (${outputCols}) must be an integer. Change the stride and/or zero pad parameters`); return [outputDepths, outputRows, outputCols, outChannels]; } function computeDefaultPad(inputShape, fieldSize, stride, dilation = 1) { const effectiveFieldSize = getEffectiveFilterSize(fieldSize, dilation); return Math.floor((inputShape[0] * (stride - 1) - stride + effectiveFieldSize) / 2); } function parseTupleParam(param) { if (typeof param === "number") { return [param, param, param]; } if (param.length === 2) { return [param[0], param[1], 1]; } return param; } function parse3TupleParam(param) { return typeof param === "number" ? [param, param, param] : param; } function getEffectiveFilterSize(filterSize, dilation) { if (dilation <= 1) { return filterSize; } return filterSize + (filterSize - 1) * (dilation - 1); } function getPadAndOutInfo(pad8, inHeight, inWidth, strideHeight, strideWidth, filterHeight, filterWidth, roundingMode, dataFormat) { let padInfo; let outHeight; let outWidth; if (typeof pad8 === "number") { const padType = pad8 === 0 ? "VALID" : "NUMBER"; padInfo = {top: pad8, bottom: pad8, left: pad8, right: pad8, type: padType}; const outShape = computeOutputShape2D([inHeight, inWidth], filterHeight, strideHeight, pad8, roundingMode); outHeight = outShape[0]; outWidth = outShape[1]; } else if (pad8 === "same") { outHeight = Math.ceil(inHeight / strideHeight); outWidth = Math.ceil(inWidth / strideWidth); const padAlongHeight = Math.max(0, (outHeight - 1) * strideHeight + filterHeight - inHeight); const padAlongWidth = Math.max(0, (outWidth - 1) * strideWidth + filterWidth - inWidth); const top = Math.floor(padAlongHeight / 2); const bottom = padAlongHeight - top; const left = Math.floor(padAlongWidth / 2); const right = padAlongWidth - left; padInfo = {top, bottom, left, right, type: "SAME"}; } else if (pad8 === "valid") { padInfo = {top: 0, bottom: 0, left: 0, right: 0, type: "VALID"}; outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight); outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth); } else if (typeof pad8 === "object") { const top = dataFormat === "channelsLast" ? pad8[1][0] : pad8[2][0]; const bottom = dataFormat === "channelsLast" ? pad8[1][1] : pad8[2][1]; const left = dataFormat === "channelsLast" ? pad8[2][0] : pad8[3][0]; const right = dataFormat === "channelsLast" ? pad8[2][1] : pad8[3][1]; const padType = top === 0 && bottom === 0 && left === 0 && right === 0 ? "VALID" : "EXPLICIT"; padInfo = {top, bottom, left, right, type: padType}; outHeight = conditionalRound((inHeight - filterHeight + top + bottom) / strideHeight + 1, roundingMode); outWidth = conditionalRound((inWidth - filterWidth + left + right) / strideWidth + 1, roundingMode); } else { throw Error(`Unknown padding parameter: ${pad8}`); } return {padInfo, outHeight, outWidth}; } function get3DPadAndOutInfo(pad8, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, filterDepth, filterHeight, filterWidth, roundingMode) { let padInfo; let outDepth; let outHeight; let outWidth; if (typeof pad8 === "number") { const padType = pad8 === 0 ? "VALID" : "NUMBER"; padInfo = { top: pad8, bottom: pad8, left: pad8, right: pad8, front: pad8, back: pad8, type: padType }; const outShape = computeOutputShape4D([inDepth, inHeight, inWidth, 1], filterDepth, 1, strideDepth, pad8, roundingMode); outDepth = outShape[0]; outHeight = outShape[1]; outWidth = outShape[2]; } else if (pad8 === "same") { outDepth = Math.ceil(inDepth / strideDepth); outHeight = Math.ceil(inHeight / strideHeight); outWidth = Math.ceil(inWidth / strideWidth); const padAlongDepth = (outDepth - 1) * strideDepth + filterDepth - inDepth; const padAlongHeight = (outHeight - 1) * strideHeight + filterHeight - inHeight; const padAlongWidth = (outWidth - 1) * strideWidth + filterWidth - inWidth; const front = Math.floor(padAlongDepth / 2); const back = padAlongDepth - front; const top = Math.floor(padAlongHeight / 2); const bottom = padAlongHeight - top; const left = Math.floor(padAlongWidth / 2); const right = padAlongWidth - left; padInfo = {top, bottom, left, right, front, back, type: "SAME"}; } else if (pad8 === "valid") { padInfo = { top: 0, bottom: 0, left: 0, right: 0, front: 0, back: 0, type: "VALID" }; outDepth = Math.ceil((inDepth - filterDepth + 1) / strideDepth); outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight); outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth); } else { throw Error(`Unknown padding parameter: ${pad8}`); } return {padInfo, outDepth, outHeight, outWidth}; } function conditionalRound(value, roundingMode) { if (!roundingMode) { return value; } switch (roundingMode) { case "round": return Math.round(value); case "ceil": return Math.ceil(value); case "floor": return Math.floor(value); default: throw new Error(`Unknown roundingMode ${roundingMode}`); } } function tupleValuesAreOne(param) { const [dimA, dimB, dimC] = parseTupleParam(param); return dimA === 1 && dimB === 1 && dimC === 1; } function eitherStridesOrDilationsAreOne(strides, dilations) { return tupleValuesAreOne(strides) || tupleValuesAreOne(dilations); } function convertConv2DDataFormat(dataFormat) { if (dataFormat === "NHWC") { return "channelsLast"; } else if (dataFormat === "NCHW") { return "channelsFirst"; } else { throw new Error(`Unknown dataFormat ${dataFormat}`); } } // node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function avgPool_(x, filterSize, strides, pad8, dimRoundingMode) { const $x = convertToTensor(x, "x", "avgPool", "float32"); const dilations = 1; assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in avgPool: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } assert(x4D.rank === 4, () => `Error in avgPool: x must be rank 4 but got rank ${x4D.rank}.`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in avgPool: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } const forward = (backend2, save) => { const convInfo = computePool2DInfo(x4D.shape, filterSize, strides, 1, pad8, dimRoundingMode); save([x4D]); if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && arraysEqual(convInfo.inShape, convInfo.outShape)) { return x4D.clone(); } return backend2.avgPool(x4D, convInfo); }; const inputs = {x: x4D}; const attrs = {filterSize, strides, pad: pad8, dimRoundingMode}; let res = ENGINE.runKernelFunc(forward, inputs, null, AvgPool, attrs); res = cast(res, $x.dtype); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const avgPool = op({avgPool_}); // node_modules/@tensorflow/tfjs-core/dist/globals.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function enableProdMode() { env().set("PROD", true); } function enableDebugMode() { env().set("DEBUG", true); } function disableDeprecationWarnings() { env().set("DEPRECATION_WARNINGS_ENABLED", false); console.warn(`TensorFlow.js deprecation warnings have been disabled.`); } function deprecationWarn(msg) { if (env().getBool("DEPRECATION_WARNINGS_ENABLED")) { console.warn(msg + " You can disable deprecation warnings with tf.disableDeprecationWarnings()."); } } setDeprecationWarningFn(deprecationWarn); function disposeVariables() { ENGINE.disposeVariables(); } function engine22() { return ENGINE; } function memory() { return ENGINE.memory(); } function profile(f) { return ENGINE.profile(f); } function tidy(nameOrFn, fn) { return ENGINE.tidy(nameOrFn, fn); } function dispose(container) { const tensors = getTensorsInContainer(container); tensors.forEach((tensor17) => tensor17.dispose()); } function keep(result) { return ENGINE.keep(result); } function time(f) { return ENGINE.time(f); } function setBackend(backendName) { return ENGINE.setBackend(backendName); } function ready() { return ENGINE.ready(); } function getBackend() { return ENGINE.backendName; } function removeBackend(name) { ENGINE.removeBackend(name); } function findBackend(name) { return ENGINE.findBackend(name); } function findBackendFactory(name) { return ENGINE.findBackendFactory(name); } function registerBackend(name, factory, priority = 1) { return ENGINE.registerBackend(name, factory, priority); } function backend() { return ENGINE.backend; } function setPlatform(platformName, platform) { env().setPlatform(platformName, platform); } // node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function avgPool3d_(x, filterSize, strides, pad8, dimRoundingMode, dataFormat = "NDHWC", dilations) { if (dilations == null) { dilations = [1, 1, 1]; } else { deprecationWarn("dilations is deprecated, this field will be gone in v3.0.0."); } const $x = convertToTensor(x, "x", "avgPool3d", "float32"); let x5D = $x; let reshapedTo5D = false; if ($x.rank === 4) { reshapedTo5D = true; x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]); } assert(x5D.rank === 5, () => `Error in avgPool3d: x must be rank 5 but got rank ${x5D.rank}.`); assert(dataFormat === "NDHWC", () => `Error in avgPool3d: Only NDHWC is currently supported, but got dataFormat of ${dataFormat}`); assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in avgPool3d: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in avgPool3d: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } const forward = (backend2, save) => { if (dilations == null) { dilations = [1, 1, 1]; } const convInfo = computePool3DInfo(x5D.shape, filterSize, strides, dilations, pad8, dimRoundingMode, dataFormat); save([x5D]); return backend2.avgPool3d(x5D, convInfo); }; const inputs = {x: x5D}; const attrs = {filterSize, strides, pad: pad8, dimRoundingMode, dataFormat, dilations}; let res = ENGINE.runKernelFunc(forward, inputs, null, AvgPool3D, attrs); res = cast(res, x5D.dtype); if (reshapedTo5D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); } return res; } const avgPool3d = op({avgPool3d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/concat_util.js /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function assertParamsConsistent(shapes, axis) { const rank = shapes[0].length; shapes.forEach((shape, i) => { assert(shape.length === rank, () => `Error in concat${rank}D: rank of tensors[${i}] must be the same as the rank of the rest (${rank})`); }); assert(axis >= 0 && axis < rank, () => `Error in concat${rank}D: axis must be between 0 and ${rank - 1}.`); const firstShape = shapes[0]; shapes.forEach((shape, i) => { for (let r = 0; r < rank; r++) { assert(r === axis || shape[r] === firstShape[r], () => `Error in concat${rank}D: Shape of tensors[${i}] (${shape}) does not match the shape of the rest (${firstShape}) along the non-concatenated axis ${i}.`); } }); } function computeOutShape(shapes, axis) { const outputShape = shapes[0].slice(); for (let i = 1; i < shapes.length; i++) { outputShape[axis] += shapes[i][axis]; } return outputShape; } // node_modules/@tensorflow/tfjs-core/dist/ops/tensor_ops_util.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function makeTensor(values, shape, inferredShape, dtype) { if (dtype == null) { dtype = inferDtype(values); } if (dtype === "complex64") { throw new Error(`Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag).`); } if (!isTypedArray(values) && !Array.isArray(values) && typeof values !== "number" && typeof values !== "boolean" && typeof values !== "string") { throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray"); } if (shape != null) { assertNonNegativeIntegerDimensions(shape); const providedSize = sizeFromShape(shape); const inferredSize = sizeFromShape(inferredShape); assert(providedSize === inferredSize, () => `Based on the provided shape, [${shape}], the tensor should have ${providedSize} values but has ${inferredSize}`); for (let i = 0; i < inferredShape.length; ++i) { const inferred = inferredShape[i]; const flatDimsDontMatch = i === inferredShape.length - 1 ? inferred !== sizeFromShape(shape.slice(i)) : true; assert(inferredShape[i] === shape[i] || !flatDimsDontMatch, () => `Error creating a new Tensor. Inferred shape (${inferredShape}) does not match the provided shape (${shape}). `); } } if (!isTypedArray(values) && !Array.isArray(values)) { values = [values]; } shape = shape || inferredShape; values = dtype !== "string" ? toTypedArray(values, dtype) : flatten(values, [], true); return ENGINE.makeTensor(values, shape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/tensor.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tensor5(values, shape, dtype) { const inferredShape = inferShape(values, dtype); return makeTensor(values, shape, inferredShape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/concat.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function concat_(tensors, axis = 0) { assert(tensors.length >= 1, () => "Pass at least one tensor to concat"); let $tensors = convertToTensorArray(tensors, "tensors", "concat"); if ($tensors[0].dtype === "complex64") { $tensors.forEach((tensor17) => { if (tensor17.dtype !== "complex64") { throw new Error(`Cannot concatenate complex64 tensors with a tensor with dtype ${tensor17.dtype}. `); } }); } const $axis = parseAxisParam(axis, $tensors[0].shape)[0]; const outShape = computeOutShape($tensors.map((t) => t.shape), $axis); if (sizeFromShape(outShape) === 0) { return tensor5([], outShape); } $tensors = $tensors.filter((t) => t.size > 0); if ($tensors.length === 1) { return $tensors[0]; } const shapes = $tensors.map((t) => t.shape); assertParamsConsistent(shapes, $axis); const forward = (backend2, save) => { const res = backend2.concat($tensors, $axis); save($tensors); return res; }; const inputs = $tensors; const attr = {axis}; return ENGINE.runKernelFunc(forward, inputs, null, Concat, attr); } const concat = op({concat_}); // node_modules/@tensorflow/tfjs-core/dist/ops/mat_mul.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function matMul_(a, b, transposeA = false, transposeB = false) { let $a = convertToTensor(a, "a", "matMul"); let $b = convertToTensor(b, "b", "matMul"); [$a, $b] = makeTypesMatch($a, $b); assert($a.rank >= 2 && $b.rank >= 2 && $a.rank === $b.rank, () => `Error in matMul: inputs must have the same rank of at least 2, got ranks ${$a.rank} and ${$b.rank}.`); const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1]; const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2]; const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2]; const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1]; const outerDimsA = $a.shape.slice(0, -2); const outerDimsB = $b.shape.slice(0, -2); const batchDimA = sizeFromShape(outerDimsA); const batchDimB = sizeFromShape(outerDimsB); assert(arraysEqual(outerDimsA, outerDimsB), () => `Error in matMul: outer dimensions (${outerDimsA}) and (${outerDimsB}) of Tensors with shapes ${$a.shape} and ${$b.shape} must match.`); assert(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (${innerShapeB}) of Tensors with shapes ${$a.shape} and ${$b.shape} and transposeA=${transposeA} and transposeB=${transposeB} must match.`); const outShape = $a.shape.slice(0, -2).concat([outerShapeA, outerShapeB]); const a3D = transposeA ? reshape($a, [batchDimA, innerShapeA, outerShapeA]) : reshape($a, [batchDimA, outerShapeA, innerShapeA]); const b3D = transposeB ? reshape($b, [batchDimB, outerShapeB, innerShapeB]) : reshape($b, [batchDimB, innerShapeB, outerShapeB]); const forward = (backend2, save) => { save([a3D, b3D]); return backend2.batchMatMul(a3D, b3D, transposeA, transposeB); }; const inputs = {a: a3D, b: b3D}; const attrs = {transposeA, transposeB}; const res = ENGINE.runKernelFunc(forward, inputs, null, BatchMatMul, attrs); return reshape(res, outShape); } const matMul = op({matMul_}); // node_modules/@tensorflow/tfjs-core/dist/ops/mul.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function mul_(a, b) { let $a = convertToTensor(a, "a", "mul"); let $b = convertToTensor(b, "b", "mul"); [$a, $b] = makeTypesMatch($a, $b); const forward = (backend2, save) => { const res = backend2.multiply($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Multiply); } const mul = op({mul_}); // node_modules/@tensorflow/tfjs-core/dist/ops/sigmoid.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function sigmoid_(x) { const $x = convertToTensor(x, "x", "sigmoid"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.sigmoid($x); save([res]); return res; }, inputs, null, Sigmoid); } const sigmoid = op({sigmoid_}); // node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js const slice_util_exports = {}; __export(slice_util_exports, { assertParamsValid: () => assertParamsValid, computeFlatOffset: () => computeFlatOffset, computeOutShape: () => computeOutShape2, isSliceContinous: () => isSliceContinous, maskToAxes: () => maskToAxes, parseSliceParams: () => parseSliceParams, startForAxis: () => startForAxis, startIndicesWithElidedDims: () => startIndicesWithElidedDims, stopForAxis: () => stopForAxis, stopIndicesWithElidedDims: () => stopIndicesWithElidedDims, stridesForAxis: () => stridesForAxis, stridesWithElidedDims: () => stridesWithElidedDims }); /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function assertParamsValid(input, begin, size) { assert(input.rank === begin.length, () => `Error in slice${input.rank}D: Length of begin ${begin} must match the rank of the array (${input.rank}).`); assert(input.rank === size.length, () => `Error in slice${input.rank}D: Length of size ${size} must match the rank of the array (${input.rank}).`); for (let i = 0; i < input.rank; ++i) { assert(begin[i] + size[i] <= input.shape[i], () => `Error in slice${input.rank}D: begin[${i}] + size[${i}] (${begin[i] + size[i]}) would overflow input.shape[${i}] (${input.shape[i]})`); } } function maskToAxes(mask) { const axes = []; let axis = 0; while (mask > 0) { if (mask & 1) { axes.push(axis); } mask /= 2; axis++; } return axes; } function computeOutShape2(begin, end, strides) { const size = []; for (let axis = 0; axis < begin.length; axis++) { size[axis] = Math.ceil((end[axis] - begin[axis]) / strides[axis]); } return size; } function stridesWithElidedDims(strides, ellipsisInsertionIndex, numElidedAxes, inputShape) { const newStrides = [...strides]; for (let i = newStrides.length; i < inputShape.length; i++) { newStrides.push(1); } for (let i = 0; i < numElidedAxes; i++) { if (i === 0) { newStrides[ellipsisInsertionIndex] = 1; } else { newStrides.splice(ellipsisInsertionIndex, 0, 1); newStrides.pop(); } } return newStrides; } function unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, normalizedAxis) { if (normalizedAxis <= ellipsisInsertionIndex) { return normalizedAxis; } return normalizedAxis - (numElidedAxes - 1); } function getElidedAxes(numElidedAxes, ellipsisInsertionIndex) { const elidedAxes = []; for (let i = 0; i < numElidedAxes; i++) { elidedAxes.push(ellipsisInsertionIndex + i); } return elidedAxes; } function startIndicesWithElidedDims(beginMask, ellipsisInsertionIndex, numElidedAxes, originalBegin, inputShape) { const newIndices = [...inputShape]; const elidedAxes = getElidedAxes(numElidedAxes, ellipsisInsertionIndex); for (let axis = 0; axis < newIndices.length; axis++) { if (elidedAxes.indexOf(axis) > -1) { newIndices[axis] = 0; } else { const originalAxis = unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, axis); let originalValue = originalBegin[originalAxis]; if (beginMask & 1 << originalAxis) { originalValue = 0; } newIndices[axis] = originalValue; } } return newIndices; } function stopIndicesWithElidedDims(endMask, ellipsisInsertionIndex, numElidedAxes, originalEnd, inputShape) { const newIndices = [...inputShape]; const elidedAxes = getElidedAxes(numElidedAxes, ellipsisInsertionIndex); for (let axis = 0; axis < newIndices.length; axis++) { if (elidedAxes.indexOf(axis) > -1) { newIndices[axis] = Number.MAX_SAFE_INTEGER; } else { const originalAxis = unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, axis); let originalValue = originalEnd[originalAxis]; if (endMask & 1 << originalAxis) { originalValue = Number.MAX_SAFE_INTEGER; } newIndices[axis] = originalValue; } } for (let i = 0; i < newIndices.length; i++) { const axisSize = inputShape[i]; if (newIndices[i] < 0) { newIndices[i] += axisSize; } newIndices[i] = clamp(0, newIndices[i], inputShape[i]); } return newIndices; } function stridesForAxis(strides, axis, ellipsisMask) { let stride = strides[axis]; if (ellipsisMask & 1 << axis || stride == null) { stride = 1; } return stride; } function startForAxis(beginMask, startIndices, strides, inputShape, axis, ellipsisMask) { let start = startIndices[axis]; const stride = strides[axis] || 1; if (beginMask & 1 << axis || ellipsisMask & 1 << axis || start == null) { if (stride > 0) { start = Number.MIN_SAFE_INTEGER; } else { start = Number.MAX_SAFE_INTEGER; } } const axisSize = inputShape[axis]; if (start < 0) { start += axisSize; } start = clamp(0, start, axisSize - 1); return start; } function stopForAxis(endMask, stopIndices, strides, inputShape, axis, ellipsisMask) { let stop = stopIndices[axis]; const stride = strides[axis] || 1; if (endMask & 1 << axis || ellipsisMask & 1 << axis || stop == null) { if (stride > 0) { stop = Number.MAX_SAFE_INTEGER; } else { stop = Number.MIN_SAFE_INTEGER; } } const axisSize = inputShape[axis]; if (stop < 0) { stop += axisSize; } if (stride > 0) { stop = clamp(0, stop, axisSize); } else { stop = clamp(-1, stop, axisSize - 1); } return stop; } function isSliceContinous(shape, begin, size) { let firstNonOneAxis = size.length; for (let i = 0; i < size.length; i++) { if (size[i] > 1) { firstNonOneAxis = i; break; } } for (let i = firstNonOneAxis + 1; i < size.length; i++) { if (begin[i] > 0 || size[i] !== shape[i]) { return false; } } return true; } function computeFlatOffset(begin, strides) { let flatOffset = begin.length > 0 ? begin[begin.length - 1] : 1; for (let i = 0; i < begin.length - 1; i++) { flatOffset += begin[i] * strides[i]; } return flatOffset; } function parseSliceParams(x, begin, size) { let begin_; if (typeof begin === "number") { begin_ = [begin, ...new Array(x.rank - 1).fill(0)]; } else if (begin.length < x.rank) { begin_ = begin.concat(new Array(x.rank - begin.length).fill(0)); } else { begin_ = begin.slice(); } begin_.forEach((d) => { assert(d !== -1, () => "slice() does not support negative begin indexing."); }); let size_; if (size == null) { size_ = new Array(x.rank).fill(-1); } else if (typeof size === "number") { size_ = [size, ...new Array(x.rank - 1).fill(-1)]; } else if (size.length < x.rank) { size_ = size.concat(new Array(x.rank - size.length).fill(-1)); } else { size_ = size; } size_ = size_.map((d, i) => { if (d >= 0) { return d; } else { assert(d === -1, () => `Negative size values should be exactly -1 but got ${d} for the slice() size at index ${i}.`); return x.shape[i] - begin_[i]; } }); return [begin_, size_]; } // node_modules/@tensorflow/tfjs-core/dist/ops/slice.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function slice_(x, begin, size) { const $x = convertToTensor(x, "x", "slice"); if ($x.rank === 0) { throw new Error("Slicing scalar is not possible"); } const [begin_, size_] = parseSliceParams($x, begin, size); assertParamsValid($x, begin_, size_); const forward = (backend2, save) => { save([$x]); return backend2.slice($x, begin_, size_); }; const inputs = {x: $x}; const attrs = {begin, size}; return ENGINE.runKernelFunc(forward, inputs, null, Slice, attrs); } const slice = op({slice_}); // node_modules/@tensorflow/tfjs-core/dist/ops/tanh.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tanh_(x) { const $x = convertToTensor(x, "x", "tanh"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const y = backend2.tanh($x); save([y]); return y; }, inputs, null, Tanh); } const tanh2 = op({tanh_}); // node_modules/@tensorflow/tfjs-core/dist/ops/basic_lstm_cell.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function basicLSTMCell_(forgetBias, lstmKernel, lstmBias, data, c, h) { const $forgetBias = convertToTensor(forgetBias, "forgetBias", "basicLSTMCell"); const $lstmKernel = convertToTensor(lstmKernel, "lstmKernel", "basicLSTMCell"); const $lstmBias = convertToTensor(lstmBias, "lstmBias", "basicLSTMCell"); const $data = convertToTensor(data, "data", "basicLSTMCell"); const $c = convertToTensor(c, "c", "basicLSTMCell"); const $h = convertToTensor(h, "h", "basicLSTMCell"); const combined = concat([$data, $h], 1); const weighted = matMul(combined, $lstmKernel); const res = add2(weighted, $lstmBias); const batchSize = res.shape[0]; const sliceCols = res.shape[1] / 4; const sliceSize = [batchSize, sliceCols]; const i = slice(res, [0, 0], sliceSize); const j = slice(res, [0, sliceCols], sliceSize); const f = slice(res, [0, sliceCols * 2], sliceSize); const o = slice(res, [0, sliceCols * 3], sliceSize); const newC = add2(mul(sigmoid(i), tanh2(j)), mul($c, sigmoid(add2($forgetBias, f)))); const newH = mul(tanh2(newC), sigmoid(o)); return [newC, newH]; } const basicLSTMCell = op({basicLSTMCell_}); // node_modules/@tensorflow/tfjs-core/dist/ops/batch_to_space_nd.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function batchToSpaceND_(x, blockShape, crops) { const $x = convertToTensor(x, "x", "batchToSpaceND"); const prod3 = blockShape.reduce((a, b) => a * b); assert($x.rank >= 1 + blockShape.length, () => `input rank is ${$x.rank} but should be > than blockShape.length ${blockShape.length}`); assert(crops.length === blockShape.length, () => `crops.length is ${crops.length} but should be equal to blockShape.length ${blockShape.length}`); assert($x.shape[0] % prod3 === 0, () => `input tensor batch is ${$x.shape[0]} but is not divisible by the product of the elements of blockShape ${blockShape.join(" * ")} === ${prod3}`); const forward = (backend2) => { return backend2.batchToSpaceND($x, blockShape, crops); }; const inputs = {x: $x}; const attrs = {blockShape, crops}; return ENGINE.runKernelFunc(forward, inputs, null, BatchToSpaceND, attrs); } const batchToSpaceND = op({batchToSpaceND_}); // node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm_util.js function xAs4D(x) { let x4D; if (x.rank === 0 || x.rank === 1) { x4D = reshape(x, [1, 1, 1, x.size]); } else if (x.rank === 2) { x4D = reshape(x, [1, 1, x.shape[0], x.shape[1]]); } else if (x.rank === 3) { x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]); } else { x4D = x; } return x4D; } // node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function batchNorm_(x, mean5, variance, offset, scale, varianceEpsilon) { if (varianceEpsilon == null) { varianceEpsilon = 1e-3; } const $x = convertToTensor(x, "x", "batchNorm"); const $mean = convertToTensor(mean5, "mean", "batchNorm"); const $variance = convertToTensor(variance, "variance", "batchNorm"); let $scale; if (scale != null) { $scale = convertToTensor(scale, "scale", "batchNorm"); } let $offset; if (offset != null) { $offset = convertToTensor(offset, "offset", "batchNorm"); } assert($mean.rank === $variance.rank, () => "Batch normalization gradient requires mean and variance to have equal ranks."); assert($offset == null || $mean.rank === $offset.rank, () => "Batch normalization gradient requires mean and offset to have equal ranks."); assert($scale == null || $mean.rank === $scale.rank, () => "Batch normalization gradient requires mean and scale to have equal ranks."); const x4D = xAs4D($x); const forward = (backend2, save) => { save([x4D, $mean, $variance, $scale]); return backend2.batchNorm(x4D, as1DOr4D($mean), as1DOr4D($variance), as1DOr4D($offset), as1DOr4D($scale), varianceEpsilon); }; const inputs = { x: x4D, scale: $scale, offset: $offset, mean: $mean, variance: $variance }; const attrs = {varianceEpsilon}; const res = ENGINE.runKernelFunc(forward, inputs, null, FusedBatchNorm, attrs); return reshape(res, $x.shape); } function as1DOr4D(x) { if (x == null) { return null; } if (x.rank === 0) { return reshape(x, [x.size]); } else if (x.rank === 1) { return x; } else if (x.rank === 2) { return reshape(x, [1, 1, x.shape[0], x.shape[1]]); } else if (x.rank === 3) { return reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]); } return x; } const batchNorm = op({batchNorm_}); // node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm2d.js function batchNorm2d_(x, mean5, variance, offset, scale, varianceEpsilon) { const $x = convertToTensor(x, "x", "batchNorm"); const $mean = convertToTensor(mean5, "mean", "batchNorm"); const $variance = convertToTensor(variance, "variance", "batchNorm"); let $scale; if (scale != null) { $scale = convertToTensor(scale, "scale", "batchNorm"); } let $offset; if (offset != null) { $offset = convertToTensor(offset, "offset", "batchNorm"); } assert($x.rank === 2, () => `Error in batchNorm2D: x must be rank 2 but got rank ${$x.rank}.`); assert($mean.rank === 2 || $mean.rank === 1, () => `Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${$mean.rank}.`); assert($variance.rank === 2 || $variance.rank === 1, () => `Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${$variance.rank}.`); if ($scale != null) { assert($scale.rank === 2 || $scale.rank === 1, () => `Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${$scale.rank}.`); } if ($offset != null) { assert($offset.rank === 2 || $offset.rank === 1, () => `Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${$offset.rank}.`); } return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon); } const batchNorm2d = op({batchNorm2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm3d.js function batchNorm3d_(x, mean5, variance, offset, scale, varianceEpsilon) { const $x = convertToTensor(x, "x", "batchNorm"); const $mean = convertToTensor(mean5, "mean", "batchNorm"); const $variance = convertToTensor(variance, "variance", "batchNorm"); let $scale; if (scale != null) { $scale = convertToTensor(scale, "scale", "batchNorm"); } let $offset; if (offset != null) { $offset = convertToTensor(offset, "offset", "batchNorm"); } assert($x.rank === 3, () => `Error in batchNorm3D: x must be rank 3 but got rank ${$x.rank}.`); assert($mean.rank === 3 || $mean.rank === 1, () => `Error in batchNorm3D: mean must be rank 3 or rank 1 but got rank ${$mean.rank}.`); assert($variance.rank === 3 || $variance.rank === 1, () => `Error in batchNorm3D: variance must be rank 3 or rank 1 but got rank ${$variance.rank}.`); if ($scale != null) { assert($scale.rank === 3 || $scale.rank === 1, () => `Error in batchNorm3D: scale must be rank 3 or rank 1 but got rank ${$scale.rank}.`); } if ($offset != null) { assert($offset.rank === 3 || $offset.rank === 1, () => `Error in batchNorm3D: offset must be rank 3 or rank 1 but got rank ${$offset.rank}.`); } return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon); } const batchNorm3d = op({batchNorm3d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm4d.js function batchNorm4d_(x, mean5, variance, offset, scale, varianceEpsilon) { const $x = convertToTensor(x, "x", "batchNorm"); const $mean = convertToTensor(mean5, "mean", "batchNorm"); const $variance = convertToTensor(variance, "variance", "batchNorm"); let $scale; if (scale != null) { $scale = convertToTensor(scale, "scale", "batchNorm"); } let $offset; if (offset != null) { $offset = convertToTensor(offset, "offset", "batchNorm"); } assert($x.rank === 4, () => `Error in batchNorm4D: x must be rank 4 but got rank ${$x.rank}.`); assert($mean.rank === 4 || $mean.rank === 1, () => `Error in batchNorm4D: mean must be rank 4 or rank 1 but got rank ${$mean.rank}.`); assert($variance.rank === 4 || $variance.rank === 1, () => `Error in batchNorm4D: variance must be rank 4 or rank 1 but got rank ${$variance.rank}.`); if ($scale != null) { assert($scale.rank === 4 || $scale.rank === 1, () => `Error in batchNorm4D: scale must be rank 4 or rank 1 but got rank ${$scale.rank}.`); } if ($offset != null) { assert($offset.rank === 4 || $offset.rank === 1, () => `Error in batchNorm4D: offset must be rank 4 or rank 1 but got rank ${$offset.rank}.`); } return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon); } const batchNorm4d = op({batchNorm4d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/clone.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function clone_(x) { const $x = convertToTensor(x, "x", "clone", null); const forward = () => ENGINE.makeTensorFromDataId($x.dataId, $x.shape, $x.dtype); const inputs = {x: $x}; return ENGINE.runKernelFunc(forward, inputs, null, Identity); } const clone = op({clone_}); // node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_to.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function broadcastTo_(x, shape) { let input = convertToTensor(x, "broadcastTo", "x"); const xShape = input.shape; if (shape.some((d) => !(d > 0) || d % 1 !== 0)) { throw new Error(`broadcastTo(): Invalid broadcast shape [${shape}].`); } if (shape.length < input.rank) { throw new Error(`broadcastTo(): shape.length=${shape.length} < input.rank=${input.rank}.`); } if (shape.length > input.rank) { const newShape = input.shape.slice(); while (newShape.length < shape.length) { newShape.unshift(1); } input = reshape(input, newShape); } const inputShape = input.shape; const reps = Array.from(shape); for (let i = shape.length - 1; i >= 0; i--) { if (inputShape[i] === shape[i]) { reps[i] = 1; } else if (input.shape[i] !== 1) { throw new Error(`broadcastTo(): [${xShape}] cannot be broadcast to [${shape}].`); } } const axes = reps.map((n, i) => n > 1 ? i : -1).filter((i) => i >= 0); if (axes.length === 0) { return clone(input); } const forward = (backend2) => backend2.tile(input, reps); const inputs = {x: input}; const attrs = {shape, inputShape}; return ENGINE.runKernelFunc(forward, inputs, null, BroadcastTo, attrs); } const broadcastTo = op({broadcastTo_}); // node_modules/@tensorflow/tfjs-core/dist/ops/buffer.js /** * @license * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function buffer(shape, dtype = "float32", values) { dtype = dtype || "float32"; assertNonNegativeIntegerDimensions(shape); return new TensorBuffer(shape, dtype, values); } // node_modules/@tensorflow/tfjs-core/dist/ops/ceil.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function ceil_(x) { const $x = convertToTensor(x, "x", "ceil"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.ceil($x), inputs, null, Ceil); } const ceil = op({ceil_}); // node_modules/@tensorflow/tfjs-core/dist/ops/clip_by_value.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function clipByValue_(x, clipValueMin, clipValueMax) { const $x = convertToTensor(x, "x", "clipByValue"); assert(clipValueMin <= clipValueMax, () => `Error in clip: min (${clipValueMin}) must be less than or equal to max (${clipValueMax}).`); const inputs = {x: $x}; const attrs = {clipValueMin, clipValueMax}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.clip($x, clipValueMin, clipValueMax); save([$x]); return res; }, inputs, null, ClipByValue, attrs); } const clipByValue = op({clipByValue_}); // node_modules/@tensorflow/tfjs-core/dist/ops/complex.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function complex_(real6, imag6) { const $real = convertToTensor(real6, "real", "complex"); const $imag = convertToTensor(imag6, "imag", "complex"); assertShapesMatch($real.shape, $imag.shape, `real and imag shapes, ${$real.shape} and ${$imag.shape}, must match in call to tf.complex().`); const forward = (backend2) => { return backend2.complex($real, $imag); }; const inputs = {real: $real, imag: $imag}; return ENGINE.runKernelFunc(forward, inputs, null, Complex); } const complex = op({complex_}); // node_modules/@tensorflow/tfjs-core/dist/ops/concat_1d.js function concat1d_(tensors) { return concat(tensors, 0); } const concat1d = op({concat1d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/concat_2d.js function concat2d_(tensors, axis) { return concat(tensors, axis); } const concat2d = op({concat2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/concat_3d.js function concat3d_(tensors, axis) { return concat(tensors, axis); } const concat3d = op({concat3d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/concat_4d.js function concat4d_(tensors, axis) { return concat(tensors, axis); } const concat4d = op({concat4d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/conv2d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function conv2d_(x, filter, strides, pad8, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode) { const $x = convertToTensor(x, "x", "conv2d"); const $filter = convertToTensor(filter, "filter", "conv2d"); let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } assert(x4D.rank === 4, () => `Error in conv2d: input must be rank 4, but got rank ${x4D.rank}.`); assert($filter.rank === 4, () => `Error in conv2d: filter must be rank 4, but got rank ${$filter.rank}.`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in conv2d: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } const inDepth = dataFormat === "NHWC" ? x4D.shape[3] : x4D.shape[1]; assert(inDepth === $filter.shape[2], () => `Error in conv2d: depth of input (${inDepth}) must match input depth for filter ${$filter.shape[2]}.`); assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in conv2D: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); const forward = (backend2, save) => { const $dataFormat = convertConv2DDataFormat(dataFormat); const convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad8, dimRoundingMode, false, $dataFormat); const res2 = backend2.conv2d(x4D, $filter, convInfo); save([x4D, $filter]); return res2; }; const inputs = {x: x4D, filter: $filter}; const attrs = {strides, pad: pad8, dataFormat, dilations, dimRoundingMode}; const res = ENGINE.runKernelFunc(forward, inputs, null, Conv2D, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const conv2d = op({conv2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/conv1d.js function conv1d_(x, filter, stride, pad8, dataFormat = "NWC", dilation = 1, dimRoundingMode) { const $x = convertToTensor(x, "x", "conv1d"); const $filter = convertToTensor(filter, "filter", "conv1d"); let x3D = $x; let reshapedTo3D = false; if ($x.rank === 2) { reshapedTo3D = true; x3D = reshape($x, [1, $x.shape[0], $x.shape[1]]); } assert(x3D.rank === 3, () => `Error in conv1d: input must be rank 3, but got rank ${x3D.rank}.`); assert($filter.rank === 3, () => `Error in conv1d: filter must be rank 3, but got rank ${$filter.rank}.`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in conv1d: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } assert(x3D.shape[2] === $filter.shape[1], () => `Error in conv1d: depth of input (${x3D.shape[2]}) must match input depth for filter ${$filter.shape[1]}.`); assert(eitherStridesOrDilationsAreOne(stride, dilation), () => `Error in conv1D: Either stride or dilation must be 1. Got stride ${stride} and dilation '${dilation}'`); assert(dataFormat === "NWC", () => `Error in conv1d: got dataFormat of ${dataFormat} but only NWC is currently supported.`); const filter4D = reshape($filter, [1, $filter.shape[0], $filter.shape[1], $filter.shape[2]]); const input4D = reshape(x3D, [x3D.shape[0], 1, x3D.shape[1], x3D.shape[2]]); const strides = [1, stride]; const dilations = [1, dilation]; const conv2dDataFormat = "NHWC"; const res = conv2d(input4D, filter4D, strides, pad8, conv2dDataFormat, dilations, dimRoundingMode); if (reshapedTo3D) { return reshape(res, [res.shape[2], res.shape[3]]); } return reshape(res, [res.shape[0], res.shape[2], res.shape[3]]); } const conv1d = op({conv1d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_input.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function conv2DBackpropInput_(xShape, dy, filter, strides, pad8, dataFormat = "NHWC", dimRoundingMode) { assert(xShape.length === dy.rank, () => `Length of inShape (${xShape.length}) and rank of dy (${dy.rank}) must match`); let xShape4D = xShape; let dy4D = dy; let reshapedTo4D = false; if (dy.rank === 3) { reshapedTo4D = true; dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); xShape4D = [1, xShape[0], xShape[1], xShape[2]]; } assert(xShape4D.length === 4, () => `Error in conv2dDerInput: inShape must be length 4, but got length ${xShape4D.length}.`); assert(dy4D.rank === 4, () => `Error in conv2dDerInput: dy must be rank 4, but got rank ${dy4D.rank}`); assert(filter.rank === 4, () => `Error in conv2dDerInput: filter must be rank 4, but got rank ${filter.rank}`); const inDepth = dataFormat === "NHWC" ? xShape4D[3] : xShape4D[1]; const outDepth = dataFormat === "NHWC" ? dy4D.shape[3] : dy4D.shape[1]; assert(inDepth === filter.shape[2], () => `Error in conv2dDerInput: depth of input (${inDepth}) must match input depth for filter ${filter.shape[2]}.`); assert(outDepth === filter.shape[3], () => `Error in conv2dDerInput: depth of output (${outDepth}) must match output depth for filter ${filter.shape[3]}.`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in conv2dDerInput: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } const forward = (backend2, save) => { const dilations = 1; const $dataFormat = convertConv2DDataFormat(dataFormat); const convInfo = computeConv2DInfo(xShape4D, filter.shape, strides, dilations, pad8, dimRoundingMode, false, $dataFormat); const res2 = backend2.conv2dDerInput(dy4D, filter, convInfo); save([dy4D, filter]); return res2; }; const inputs = {dy: dy4D, filter}; const attrs = {strides, pad: pad8, dataFormat, dimRoundingMode, inputShape: xShape4D}; const res = ENGINE.runKernelFunc(forward, inputs, null, Conv2DBackpropInput, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const conv2DBackpropInput = op({conv2DBackpropInput_}); // node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_transpose.js function conv2dTranspose_(x, filter, outputShape, strides, pad8, dimRoundingMode) { const $x = convertToTensor(x, "x", "conv2dTranspose"); const $filter = convertToTensor(filter, "filter", "conv2dTranspose"); return conv2DBackpropInput(outputShape, $x, $filter, strides, pad8, "NHWC", dimRoundingMode); } const conv2dTranspose = op({conv2dTranspose_}); // node_modules/@tensorflow/tfjs-core/dist/ops/conv3d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function conv3d_(x, filter, strides, pad8, dataFormat = "NDHWC", dilations = [1, 1, 1]) { const $x = convertToTensor(x, "x", "conv3d"); const $filter = convertToTensor(filter, "filter", "conv3d"); let x5D = $x; let reshapedTo5D = false; if ($x.rank === 4) { reshapedTo5D = true; x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]); } assert(x5D.rank === 5, () => `Error in conv3d: input must be rank 5, but got rank ${x5D.rank}.`); assert($filter.rank === 5, () => `Error in conv3d: filter must be rank 5, but got rank ${$filter.rank}.`); assert(x5D.shape[4] === $filter.shape[3], () => `Error in conv3d: depth of input (${x5D.shape[4]}) must match input depth for filter ${$filter.shape[3]}.`); assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in conv3D: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); assert(dataFormat === "NDHWC", () => `Error in conv3d: got dataFormat of ${dataFormat} but only NDHWC is currently supported.`); const forward = (backend2, save) => { const convInfo = computeConv3DInfo(x5D.shape, $filter.shape, strides, dilations, pad8); const res2 = backend2.conv3d(x5D, $filter, convInfo); save([x5D, $filter]); return res2; }; const inputs = {x: x5D, filter: $filter}; const attrs = {strides, pad: pad8, dataFormat, dilations}; const res = ENGINE.runKernelFunc(forward, inputs, null, Conv3D, attrs); if (reshapedTo5D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); } return res; } const conv3d = op({conv3d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_input.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function conv3DBackpropInput_(xShape, dy, filter, strides, pad8) { assert(xShape.length === dy.rank, () => `Length of inShape (${xShape.length}) and rank of dy (${dy.rank}) must match`); let xShape5D = xShape; let dy5D = dy; let reshapedTo5D = false; if (dy.rank === 4) { reshapedTo5D = true; dy5D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]); xShape5D = [1, xShape[0], xShape[1], xShape[2], xShape[3]]; } const inDepth = xShape5D[4]; const outDepth = dy5D.shape[4]; assert(xShape5D.length === 5, () => `Error in conv3dDerInput: inShape must be length 5, but got length ${xShape5D.length}.`); assert(dy5D.rank === 5, () => `Error in conv3dDerInput: dy must be rank 5, but got rank ${dy5D.rank}`); assert(filter.rank === 5, () => `Error in conv3dDerInput: filter must be rank 5, but got rank ${filter.rank}`); assert(inDepth === filter.shape[3], () => `Error in conv3dDerInput: depth of input (${inDepth}) must match input depth for filter ${filter.shape[3]}.`); assert(outDepth === filter.shape[4], () => `Error in conv3dDerInput: depth of output (${outDepth}) must match output depth for filter ${filter.shape[4]}.`); const forward = (backend2) => { const dilations = 1; const convInfo = computeConv3DInfo(xShape5D, filter.shape, strides, dilations, pad8); return backend2.conv3dDerInput(dy5D, filter, convInfo); }; const inputs = {dy: dy5D}; const attrs = {pad: pad8}; const res = ENGINE.runKernelFunc(forward, inputs, null, Conv3DBackpropInputV2, attrs); if (reshapedTo5D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); } return res; } const conv3DBackpropInput = op({conv3DBackpropInput_}); // node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_transpose.js function conv3dTranspose_(x, filter, outputShape, strides, pad8) { const $x = convertToTensor(x, "x", "conv3dTranspose"); const $filter = convertToTensor(filter, "filter", "conv3dTranspose"); return conv3DBackpropInput(outputShape, $x, $filter, strides, pad8); } const conv3dTranspose = op({conv3dTranspose_}); // node_modules/@tensorflow/tfjs-core/dist/ops/cos.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function cos_(x) { const $x = convertToTensor(x, "x", "cos"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.cos($x); save([$x]); return res; }, inputs, null, Cos); } const cos = op({cos_}); // node_modules/@tensorflow/tfjs-core/dist/ops/cosh.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function cosh_(x) { const $x = convertToTensor(x, "x", "cosh"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.cosh($x); save([$x]); return res; }, inputs, null, Cosh); } const cosh = op({cosh_}); // node_modules/@tensorflow/tfjs-core/dist/ops/cumsum.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function cumsum_(x, axis = 0, exclusive = false, reverse9 = false) { const $x = convertToTensor(x, "x", "cumsum"); const forward = (backend2, save) => { const permutation = getAxesPermutation([axis], $x.rank); let permutedX = $x; if (permutation != null) { permutedX = transpose($x, permutation); } const permutedAxis = getInnerMostAxes(1, $x.rank)[0]; let value = backend2.cumsum(permutedX, permutedAxis, exclusive, reverse9); save([$x]); if (permutation != null) { const reversePermutation = getUndoAxesPermutation(permutation); value = transpose(value, reversePermutation); } return value; }; const inputs = {x: $x}; const attrs = {axis, exclusive, reverse: reverse9}; return ENGINE.runKernelFunc(forward, inputs, null, Cumsum, attrs); } const cumsum = op({cumsum_}); // node_modules/@tensorflow/tfjs-core/dist/ops/depth_to_space.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function depthToSpace_(x, blockSize, dataFormat = "NHWC") { const $x = convertToTensor(x, "x", "depthToSpace"); const inputHeight = dataFormat === "NHWC" ? $x.shape[1] : $x.shape[2]; const inputWidth = dataFormat === "NHWC" ? $x.shape[2] : $x.shape[3]; const inputDepth = dataFormat === "NHWC" ? $x.shape[3] : $x.shape[1]; assert(inputHeight * blockSize >= 0, () => `Negative dimension size caused by overflow when multiplying ${inputHeight} and ${blockSize} for depthToSpace with input shape ${$x.shape}`); assert(inputWidth * blockSize >= 0, () => `Negative dimension size caused by overflow when multiplying ${inputWidth} and ${blockSize} for depthToSpace with input shape ${$x.shape}`); assert(inputDepth % (blockSize * blockSize) === 0, () => `Dimension size must be evenly divisible by ${blockSize * blockSize} but is ${inputDepth} for depthToSpace with input shape ${$x.shape}`); const forward = (backend2) => backend2.depthToSpace($x, blockSize, dataFormat); const inputs = {x: $x}; const attrs = {blockSize, dataFormat}; return ENGINE.runKernelFunc(forward, inputs, null, DepthToSpace, attrs); } const depthToSpace = op({depthToSpace_}); // node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function depthwiseConv2d_(x, filter, strides, pad8, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode) { const $x = convertToTensor(x, "x", "depthwiseConv2d"); const $filter = convertToTensor(filter, "filter", "depthwiseConv2d"); let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } assert(x4D.rank === 4, () => `Error in depthwiseConv2d: input must be rank 4, but got rank ${x4D.rank}.`); assert($filter.rank === 4, () => `Error in depthwiseConv2d: filter must be rank 4, but got rank ${$filter.rank}.`); assert(x4D.shape[3] === $filter.shape[2], () => `Error in depthwiseConv2d: number of input channels (${x4D.shape[3]}) must match the inChannels dimension in filter ${$filter.shape[2]}.`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in depthwiseConv2d: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } const forward = (backend2, save) => { if (dilations == null) { dilations = [1, 1]; } assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); const convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad8, dimRoundingMode, true); const res2 = backend2.depthwiseConv2D(x4D, $filter, convInfo); save([x4D, $filter]); return res2; }; const inputs = {x: x4D, filter: $filter}; const attrs = {strides, pad: pad8, dataFormat, dilations, dimRoundingMode}; const res = ENGINE.runKernelFunc(forward, inputs, null, DepthwiseConv2dNative, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const depthwiseConv2d = op({depthwiseConv2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/diag.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function diag_(x) { const $x = convertToTensor(x, "x", "diag"); const forward = (backend2) => { const flat = reshape($x, [$x.size]); const result = backend2.diag(flat); const outShape = [...x.shape, ...x.shape]; return reshape(result, outShape); }; const inputs = {x: $x}; return ENGINE.runKernelFunc(forward, inputs, null, Diag); } const diag = op({diag_}); // node_modules/@tensorflow/tfjs-core/dist/ops/dilation2d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function dilation2d_(x, filter, strides, pad8, dilations = [1, 1], dataFormat = "NHWC") { const $x = convertToTensor(x, "x", "dilation2d"); const $filter = convertToTensor(filter, "filter", "dilation2d"); assert($x.rank === 3 || $x.rank === 4, () => `Error in dilation2d: input must be rank 3 or 4, but got rank ${$x.rank}.`); assert($filter.rank === 3, () => `Error in dilation2d: filter must be rank 3, but got rank ${$filter.rank}.`); assert(dataFormat === "NHWC", () => `Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${dataFormat}`); let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); reshapedTo4D = true; } const inputs = {x: x4D, filter: $filter}; const attrs = {strides, pad: pad8, dilations}; const res = ENGINE.runKernel(Dilation2D, inputs, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const dilation2d = op({dilation2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/floorDiv.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function floorDiv_(a, b) { let $a = convertToTensor(a, "a", "floorDiv"); let $b = convertToTensor(b, "b", "floorDiv"); [$a, $b] = makeTypesMatch($a, $b); const forward = (backend2, save) => { const res = backend2.floorDiv($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, FloorDiv); } const floorDiv = op({floorDiv_}); // node_modules/@tensorflow/tfjs-core/dist/ops/div.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function div_(a, b) { let $a = convertToTensor(a, "a", "div"); let $b = convertToTensor(b, "b", "div"); [$a, $b] = makeTypesMatch($a, $b); if ($a.dtype === "int32" && $b.dtype === "int32") { return floorDiv($a, $b); } const forward = (backend2, save) => { const res = backend2.realDivide($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; const attrs = {}; return ENGINE.runKernelFunc(forward, inputs, null, Div, attrs); } const div = op({div_}); // node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_util.js /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function getBroadcastDims(inShape, outShape) { const inRank = inShape.length; const dims = []; for (let i = 0; i < inRank; i++) { const dim = inRank - 1 - i; const a = inShape[dim] || 1; const b = outShape[outShape.length - 1 - i] || 1; if (b > 1 && a === 1) { dims.unshift(dim); } } return dims; } function getReductionAxes(inShape, outShape) { const result = []; for (let i = 0; i < outShape.length; i++) { const inDim = inShape[inShape.length - i - 1]; const outAxis = outShape.length - i - 1; const outDim = outShape[outAxis]; if (inDim == null || inDim === 1 && outDim > 1) { result.unshift(outAxis); } } return result; } function assertAndGetBroadcastShape(shapeA, shapeB) { const result = []; const l = Math.max(shapeA.length, shapeB.length); for (let i = 0; i < l; i++) { let a = shapeA[shapeA.length - i - 1]; if (a == null) { a = 1; } let b = shapeB[shapeB.length - i - 1]; if (b == null) { b = 1; } if (a === 1) { result.unshift(b); } else if (b === 1) { result.unshift(a); } else if (a !== b) { const errMsg = `Operands could not be broadcast together with shapes ${shapeA} and ${shapeB}.`; throw Error(errMsg); } else { result.unshift(a); } } return result; } // node_modules/@tensorflow/tfjs-core/dist/ops/equal.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function equal_(a, b) { let $a = convertToTensor(a, "a", "equal"); let $b = convertToTensor(b, "b", "equal"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); const forward = (backend2) => backend2.equal($a, $b); const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Equal); } const equal = op({equal_}); // node_modules/@tensorflow/tfjs-core/dist/ops/where.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function where_(condition, a, b) { const $a = convertToTensor(a, "a", "where"); const $b = convertToTensor(b, "b", "where"); const $condition = convertToTensor(condition, "condition", "where", "bool"); const broadcastShape = assertAndGetBroadcastShape($a.shape, $b.shape); const $broadcastedA = broadcastTo($a, broadcastShape); const $broadcastedB = broadcastTo($b, broadcastShape); if ($condition.rank === 1) { assert($condition.shape[0] === $a.shape[0], () => "The first dimension of `a` must match the size of `condition`."); } if ($condition.rank !== 1) { assertShapesMatch($condition.shape, $broadcastedB.shape, "Error in where: "); } const forward = (backend2, save) => { const res = backend2.select($condition, $broadcastedA, $broadcastedB); save([$condition]); return res; }; const inputs = { condition: $condition, t: $broadcastedA, e: $broadcastedB }; return ENGINE.runKernelFunc(forward, inputs, null, SelectV2); } const where = op({where_}); // node_modules/@tensorflow/tfjs-core/dist/ops/zeros_like.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function zerosLike_(x) { const $x = convertToTensor(x, "x", "zerosLike"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.zerosLike($x), inputs, null, ZerosLike); } const zerosLike = op({zerosLike_}); // node_modules/@tensorflow/tfjs-core/dist/ops/div_no_nan.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function divNoNan_(a, b) { let $a = convertToTensor(a, "a", "div"); let $b = convertToTensor(b, "b", "div"); [$a, $b] = makeTypesMatch($a, $b); const divResult = div($a, $b); const zeros9 = zerosLike(divResult); const bEqualsZero = equal($b, zeros9); return where(bEqualsZero, zeros9, divResult); } const divNoNan = op({divNoNan_}); // node_modules/@tensorflow/tfjs-core/dist/ops/dot.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function dot_(t1, t2) { const $t1 = convertToTensor(t1, "t1", "dot"); const $t2 = convertToTensor(t2, "t2", "dot"); assert(($t1.rank === 1 || $t1.rank === 2) && ($t2.rank === 1 || $t2.rank === 2), () => `Error in dot: inputs must all be rank 1 or 2, but got ranks ${$t1.rank} and ${$t2.rank}.`); const t1Inner = $t1.rank === 1 ? $t1.size : $t1.shape[1]; const t2Inner = $t2.rank === 1 ? $t2.size : $t2.shape[0]; assert(t1Inner === t2Inner, () => `Error in dot: inner dimensions of inputs must match, but got ${t1Inner} and ${t2Inner}.`); if ($t1.rank === 1 && $t2.rank === 1) { const t12D = reshape($t1, [1, -1]); const t22D = reshape($t2, [-1, 1]); const t1t2 = matMul(t12D, t22D); return reshape(t1t2, []); } else if ($t1.rank === 1 && $t2.rank === 2) { const t12D = reshape($t1, [1, -1]); const t22D = reshape($t2, [$t2.shape[0], $t2.shape[1]]); const t1t2 = matMul(t12D, t22D); return reshape(t1t2, [t1t2.size]); } else if ($t1.rank === 2 && $t2.rank === 1) { const t22D = reshape($t2, [-1, 1]); const t1t2 = matMul($t1, t22D); return reshape(t1t2, [t1t2.size]); } else { const t22D = reshape($t2, [$t2.shape[0], $t2.shape[1]]); const t1t2 = matMul($t1, t22D); return t1t2; } } const dot = op({dot_}); // node_modules/@tensorflow/tfjs-core/dist/ops/elu.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function elu_(x) { const $x = convertToTensor(x, "x", "elu"); const forward = (backend2, save) => { const y = backend2.elu($x); save([y]); return y; }; const inputs = {x: $x}; return ENGINE.runKernelFunc(forward, inputs, null, Elu); } const elu = op({elu_}); // node_modules/@tensorflow/tfjs-core/dist/ops/erf.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function erf_(x) { let $x = convertToTensor(x, "x", "erf"); assert($x.dtype === "int32" || $x.dtype === "float32", () => "Input dtype must be `int32` or `float32`."); if ($x.dtype === "int32") { $x = cast($x, "float32"); } const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.erf($x); save([$x]); return res; }, inputs, null, Erf); } const erf = op({erf_}); // node_modules/@tensorflow/tfjs-core/dist/ops/exp.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function exp_(x) { const $x = convertToTensor(x, "x", "exp"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.exp($x); save([res]); return res; }, inputs, null, Exp); } const exp = op({exp_}); // node_modules/@tensorflow/tfjs-core/dist/ops/expand_dims.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function expandDims_(x, axis = 0) { const parseAs = null; const $x = convertToTensor(x, "x", "expandDims", parseAs); assert(axis <= $x.rank, () => "Axis must be <= rank of the tensor"); const newShape = $x.shape.slice(); if (axis < 0) { assert(-($x.rank + 1) <= axis, () => `Axis must be in the interval [${-($x.rank + 1)}, ${$x.rank}]`); axis = $x.rank + axis + 1; } newShape.splice(axis, 0, 1); return reshape($x, newShape); } const expandDims = op({expandDims_}); // node_modules/@tensorflow/tfjs-core/dist/ops/expm1.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function expm1_(x) { const $x = convertToTensor(x, "x", "expm1"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.expm1($x); save([$x]); return res; }, inputs, null, Expm1); } const expm1 = op({expm1_}); // node_modules/@tensorflow/tfjs-core/dist/ops/tile.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tile_(x, reps) { const parseAs = null; const $x = convertToTensor(x, "x", "tile", parseAs); assert($x.rank === reps.length, () => `Error in transpose: rank of input ${$x.rank} must match length of reps ${reps}.`); const forward = (backend2, save) => { const res = backend2.tile($x, reps); save([$x]); return res; }; const inputsToSave = [$x]; const inputs = {x: $x}; const attrs = {reps}; return ENGINE.runKernelFunc(forward, inputs, null, Tile, attrs, inputsToSave); } const tile = op({tile_}); // node_modules/@tensorflow/tfjs-core/dist/ops/eye.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function eye_(numRows, numColumns, batchShape, dtype = "float32") { if (numColumns == null) { numColumns = numRows; } const buff = buffer([numRows, numColumns], dtype); const n = numRows <= numColumns ? numRows : numColumns; for (let i = 0; i < n; ++i) { buff.set(1, i, i); } const out = reshape(buff.toTensor(), [numRows, numColumns]); if (batchShape == null) { return out; } else { if (batchShape.length === 1) { return tile(expandDims(out, 0), [batchShape[0], 1, 1]); } else if (batchShape.length === 2) { return tile(expandDims(expandDims(out, 0), 0), [batchShape[0], batchShape[1], 1, 1]); } else if (batchShape.length === 3) { return tile(expandDims(expandDims(expandDims(out, 0), 0), 0), [ batchShape[0], batchShape[1], batchShape[2], 1, 1 ]); } else { throw new Error(`eye() currently supports only 1D and 2D batchShapes, but received ${batchShape.length}D.`); } } } const eye = op({eye_}); // node_modules/@tensorflow/tfjs-core/dist/ops/fft.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function fft_(input) { assert(input.dtype === "complex64", () => `The dtype for tf.spectral.fft() must be complex64 but got ${input.dtype}.`); const inputs = {input}; return ENGINE.runKernelFunc((backend2) => { const innerDimensionSize = input.shape[input.shape.length - 1]; const batch = input.size / innerDimensionSize; const input2D = input.as2D(batch, innerDimensionSize); const result = backend2.fft(input2D); return result.reshape(input.shape); }, inputs, null, FFT); } const fft = op({fft_}); // node_modules/@tensorflow/tfjs-core/dist/ops/fill.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function fill(shape, value, dtype) { const attrs = {shape, value, dtype}; return ENGINE.runKernelFunc((backend2) => backend2.fill(shape, value, dtype), {}, null, Fill, attrs); } // node_modules/@tensorflow/tfjs-core/dist/ops/floor.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function floor_(x) { const $x = convertToTensor(x, "x", "floor"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.floor($x), inputs, null, Floor); } const floor = op({floor_}); // node_modules/@tensorflow/tfjs-core/dist/ops/reduce_util.js /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const PARALLELIZE_THRESHOLD = 30; function computeOptimalWindowSize(inSize) { if (inSize <= PARALLELIZE_THRESHOLD) { return inSize; } return nearestDivisor(inSize, Math.floor(Math.sqrt(inSize))); } // node_modules/@tensorflow/tfjs-core/dist/ops/segment_util.js const segment_util_exports = {}; __export(segment_util_exports, { collectGatherOpShapeInfo: () => collectGatherOpShapeInfo, computeOutShape: () => computeOutShape3, segOpComputeOptimalWindowSize: () => segOpComputeOptimalWindowSize }); /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function segOpComputeOptimalWindowSize(inSize, numSegments) { let done = false; let res; if (inSize <= PARALLELIZE_THRESHOLD) { res = inSize; done = true; } else { res = nearestDivisor(inSize, Math.floor(Math.sqrt(inSize))); } while (!done) { if (res > numSegments || res === inSize) { done = true; } else { res = nearestDivisor(inSize, res + 1); } } return res; } function computeOutShape3(aShape, axis, numSegments) { const outShape = []; const rank = aShape.length; for (let dim = 0; dim < rank; dim++) { if (dim !== axis) { outShape.push(aShape[dim]); } else { outShape.push(numSegments); } } return outShape; } function collectGatherOpShapeInfo(x, indices, axis) { const dimSize = x.shape[axis]; const outputShape = []; let batchSize = 1; let sliceSize = 1; for (let i = 0; i < axis; i++) { outputShape.push(x.shape[i]); batchSize *= x.shape[i]; } for (let i = 0; i < indices.rank; i++) { outputShape.push(indices.shape[i]); } for (let i = axis + 1; i < x.rank; i++) { outputShape.push(x.shape[i]); sliceSize *= x.shape[i]; } return {batchSize, sliceSize, dimSize, outputShape}; } // node_modules/@tensorflow/tfjs-core/dist/ops/gather.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function gather_(x, indices, axis = 0) { const $x = convertToTensor(x, "x", "gather"); const $indices = convertToTensor(indices, "indices", "gather", "int32"); const inputs = {x: $x, indices: $indices}; const attrs = {axis}; const forward = (backend2, save) => { const parsedAxis = parseAxisParam(axis, $x.shape)[0]; const shapeInfo = collectGatherOpShapeInfo($x, $indices, parsedAxis); const res = backend2.gather($x, reshape($indices, [$indices.size]), parsedAxis); save([$x, $indices]); return reshape(res, shapeInfo.outputShape); }; return ENGINE.runKernelFunc(forward, inputs, null, GatherV2, attrs); } const gather = op({gather_}); // node_modules/@tensorflow/tfjs-core/dist/ops/greater.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function greater_(a, b) { let $a = convertToTensor(a, "a", "greater"); let $b = convertToTensor(b, "b", "greater"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); const forward = (backend2) => backend2.greater($a, $b); const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Greater); } const greater = op({greater_}); // node_modules/@tensorflow/tfjs-core/dist/ops/greater_equal.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function greaterEqual_(a, b) { let $a = convertToTensor(a, "a", "greaterEqual"); let $b = convertToTensor(b, "b", "greaterEqual"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); const forward = (backend2, save) => { const res = backend2.greaterEqual($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, GreaterEqual); } const greaterEqual = op({greaterEqual_}); // node_modules/@tensorflow/tfjs-core/dist/ops/ifft.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function ifft_(input) { assert(input.dtype === "complex64", () => `The dtype for tf.spectral.ifft() must be complex64 but got ${input.dtype}.`); const inputs = {input}; return ENGINE.runKernelFunc((backend2) => { const innerDimensionSize = input.shape[input.shape.length - 1]; const batch = input.size / innerDimensionSize; const input2D = reshape(input, [batch, innerDimensionSize]); const result = backend2.ifft(input2D); return reshape(result, input.shape); }, inputs, null, IFFT); } const ifft = op({ifft_}); // node_modules/@tensorflow/tfjs-core/dist/ops/imag.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function imag_(input) { const $input = convertToTensor(input, "input", "imag"); const forward = (backend2) => { return backend2.imag($input); }; const inputs = {input: $input}; return ENGINE.runKernelFunc(forward, inputs, null, Imag); } const imag = op({imag_}); // node_modules/@tensorflow/tfjs-core/dist/ops/real.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function real_(input) { const $input = convertToTensor(input, "input", "real"); const forward = (backend2) => { return backend2.real($input); }; const inputs = {input: $input}; return ENGINE.runKernelFunc(forward, inputs, null, Real); } const real = op({real_}); // node_modules/@tensorflow/tfjs-core/dist/ops/reverse.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function reverse_(x, axis) { const $x = convertToTensor(x, "x", "reverse"); const forward = (backend2) => { const axes = parseAxisParam(axis, $x.shape); if ($x.rank === 0) { return clone($x); } const res = backend2.reverse($x, axes); return reshape(res, $x.shape); }; const inputs = {x: $x}; const attrs = {dims: axis}; return ENGINE.runKernelFunc(forward, inputs, null, Reverse, attrs); } const reverse = op({reverse_}); // node_modules/@tensorflow/tfjs-core/dist/ops/scalar.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function scalar(value, dtype) { if ((isTypedArray(value) && dtype !== "string" || Array.isArray(value)) && dtype !== "complex64") { throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)"); } if (dtype === "string" && isTypedArray(value) && !(value instanceof Uint8Array)) { throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`."); } const shape = []; const inferredShape = []; return makeTensor(value, shape, inferredShape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/irfft.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function irfft_(input) { const innerDimensionSize = input.shape[input.shape.length - 1]; const batch = input.size / innerDimensionSize; let ret; if (innerDimensionSize <= 2) { const complexInput = reshape(input, [batch, innerDimensionSize]); ret = ifft(complexInput); } else { const outputShape = [batch, 2 * (innerDimensionSize - 1)]; const realInput = reshape(real(input), [batch, innerDimensionSize]); const imagInput = reshape(imag(input), [batch, innerDimensionSize]); const realConjugate = reverse(slice(realInput, [0, 1], [batch, innerDimensionSize - 2]), 1); const imagConjugate = mul(reverse(slice(imagInput, [0, 1], [batch, innerDimensionSize - 2]), 1), scalar(-1)); const r = concat([realInput, realConjugate], 1); const i = concat([imagInput, imagConjugate], 1); const complexInput = reshape(complex(r, i), [outputShape[0], outputShape[1]]); ret = ifft(complexInput); } ret = real(ret); if (input.rank === 3 && input.shape[0] !== 0) { const temp = ret; const batch2 = input.shape[0]; ret = reshape(ret, [batch2, ret.shape[0] / batch2, ret.shape[1]]); temp.dispose(); } return ret; } const irfft = op({irfft_}); // node_modules/@tensorflow/tfjs-core/dist/ops/is_finite.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function isFinite_(x) { const $x = convertToTensor(x, "x", "isFinite"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.isFinite($x), inputs, null, IsFinite); } const isFinite2 = op({isFinite_}); // node_modules/@tensorflow/tfjs-core/dist/ops/is_inf.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function isInf_(x) { const $x = convertToTensor(x, "x", "isInf"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.isInf($x), inputs, null, IsInf); } const isInf = op({isInf_}); // node_modules/@tensorflow/tfjs-core/dist/ops/is_nan.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function isNaN_(x) { const $x = convertToTensor(x, "x", "isNaN"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.isNaN($x), inputs, null, IsNan); } const isNaN2 = op({isNaN_}); // node_modules/@tensorflow/tfjs-core/dist/ops/maximum.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function maximum_(a, b) { let $a = convertToTensor(a, "a", "maximum"); let $b = convertToTensor(b, "b", "maximum"); [$a, $b] = makeTypesMatch($a, $b); if ($a.dtype === "bool") { $a = cast($a, "int32"); $b = cast($b, "int32"); } assertAndGetBroadcastShape($a.shape, $b.shape); const forward = (backend2, save) => { const res = backend2.maximum($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Maximum); } const maximum = op({maximum_}); // node_modules/@tensorflow/tfjs-core/dist/ops/leaky_relu.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function leakyRelu_(x, alpha = 0.2) { const $x = convertToTensor(x, "x", "leakyRelu"); return maximum(mul(scalar(alpha), $x), $x); } const leakyRelu = op({leakyRelu_}); // node_modules/@tensorflow/tfjs-core/dist/ops/less.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function less_(a, b) { let $a = convertToTensor(a, "a", "less"); let $b = convertToTensor(b, "b", "less"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); const forward = (backend2) => backend2.less($a, $b); const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Less); } const less = op({less_}); // node_modules/@tensorflow/tfjs-core/dist/ops/less_equal.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function lessEqual_(a, b) { let $a = convertToTensor(a, "a", "lessEqual"); let $b = convertToTensor(b, "b", "lessEqual"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); const forward = (backend2, save) => { const res = backend2.lessEqual($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, LessEqual); } const lessEqual = op({lessEqual_}); // node_modules/@tensorflow/tfjs-core/dist/ops/linspace.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function linspace(start, stop, num) { if (num <= 0) { throw new Error("The number of values should be positive."); } const attrs = {start, stop, num}; return ENGINE.runKernelFunc((backend2) => backend2.linspace(start, stop, num), {}, null, LinSpace, attrs); } // node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function localResponseNormalization_(x, depthRadius = 5, bias = 1, alpha = 1, beta = 0.5) { const $x = convertToTensor(x, "x", "localResponseNormalization"); assert($x.rank === 4 || $x.rank === 3, () => `Error in localResponseNormalization: x must be rank 3 or 4 but got rank ${$x.rank}.`); assert(isInt(depthRadius), () => `Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${depthRadius}.`); let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } const forward = (backend2, save) => { const y = backend2.localResponseNormalization4D(x4D, depthRadius, bias, alpha, beta); save([x4D, y]); return y; }; const inputs = {x: x4D}; const attrs = {depthRadius, bias, alpha, beta}; const res = ENGINE.runKernelFunc(forward, inputs, null, LRN, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } else { return res; } } const localResponseNormalization = op({localResponseNormalization_}); // node_modules/@tensorflow/tfjs-core/dist/ops/log.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function log_(x) { const $x = convertToTensor(x, "x", "log"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.log($x); save([$x]); return res; }, inputs, null, Log); } const log = op({log_}); // node_modules/@tensorflow/tfjs-core/dist/ops/log1p.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function log1p_(x) { const $x = convertToTensor(x, "x", "log1p"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.log1p($x); save([$x]); return res; }, inputs, null, Log1p); } const log1p = op({log1p_}); // node_modules/@tensorflow/tfjs-core/dist/gradients.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function grad(f) { assert(isFunction(f), () => "The f passed in grad(f) must be a function"); return (x, dy) => { const $x = convertToTensor(x, "x", "tf.grad", null); const $dy = dy != null ? convertToTensor(dy, "dy", "tf.grad") : null; return ENGINE.tidy(() => { const {value, grads: grads2} = ENGINE.gradients(() => f($x), [$x], $dy); if ($dy != null) { assertShapesMatch(value.shape, $dy.shape, "The shape of dy passed in grad(f)(x, dy) must match the shape returned by f(x)"); } checkGrads(grads2); return grads2[0]; }); }; } function grads(f) { assert(isFunction(f), () => "The f passed in grads(f) must be a function"); return (args, dy) => { assert(Array.isArray(args), () => "The args passed in grads(f)(args) must be an array of `Tensor`s or `TensorLike`s"); const $args = convertToTensorArray(args, "args", "tf.grads", null); const $dy = dy != null ? convertToTensor(dy, "dy", "tf.grads") : null; return ENGINE.tidy(() => { const {value, grads: grads2} = ENGINE.gradients(() => f(...$args), $args, $dy); if ($dy != null) { assertShapesMatch(value.shape, $dy.shape, "The shape of dy passed in grads(f)([x1,...], dy) must match the shape returned by f([x1,...])"); } checkGrads(grads2); return grads2; }); }; } function valueAndGrad(f) { assert(isFunction(f), () => "The f passed in valueAndGrad(f) must be a function"); return (x, dy) => { assert(x instanceof Tensor, () => "The x passed in valueAndGrad(f)(x) must be a tensor"); assert(dy == null || dy instanceof Tensor, () => "The dy passed in valueAndGrad(f)(x, dy) must be a tensor"); const {grads: grads2, value} = ENGINE.gradients(() => f(x), [x], dy); checkGrads(grads2); return {grad: grads2[0], value}; }; } function valueAndGrads(f) { assert(isFunction(f), () => "The f passed in valueAndGrads(f) must be a function"); return (args, dy) => { assert(Array.isArray(args) && args.every((arg) => arg instanceof Tensor), () => "The args passed in valueAndGrads(f)(args) must be array of tensors"); assert(dy == null || dy instanceof Tensor, () => "The dy passed in valueAndGrads(f)(args, dy) must be a tensor"); const res = ENGINE.gradients(() => f(...args), args, dy); if (dy != null) { assertShapesMatch(res.value.shape, dy.shape, "The shape of dy passed in valueAndGrads(f)([x1,...], dy) must match the shape returned by f([x1,...])"); } checkGrads(res.grads); return res; }; } function variableGrads(f, varList) { assert(isFunction(f), () => "The f passed in variableGrads(f) must be a function"); assert(varList == null || Array.isArray(varList) && varList.every((v) => v instanceof Variable), () => "The varList passed in variableGrads(f, varList) must be an array of variables"); const specifiedVarList = varList != null; if (!specifiedVarList) { varList = []; for (const varName in ENGINE.registeredVariables) { varList.push(ENGINE.registeredVariables[varName]); } } const specifiedNonTrainable = specifiedVarList ? varList.filter((variable3) => !variable3.trainable) : null; const originalVarCount = varList.length; varList = varList.filter((variable3) => variable3.trainable); assert(varList.length > 0, () => `variableGrads() expects at least one of the input variables to be trainable, but none of the ${originalVarCount} variables is trainable.`); const allowNoGradients = true; const {value, grads: grads2} = ENGINE.gradients(f, varList, null, allowNoGradients); assert(grads2.some((g) => g != null), () => "Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize()."); assert(value.rank === 0, () => `The f passed in variableGrads(f) must return a scalar, but it returned a rank-${value.rank} tensor`); const namedGrads = {}; varList.forEach((v, i) => { if (grads2[i] != null) { namedGrads[v.name] = grads2[i]; } }); if (specifiedNonTrainable != null) { specifiedNonTrainable.forEach((v) => namedGrads[v.name] = null); } return {value, grads: namedGrads}; } function customGrad(f) { return ENGINE.customGrad(f); } function checkGrads(grads2) { const numNullGradients = grads2.filter((g) => g == null).length; if (numNullGradients > 0) { throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y.`); } } // node_modules/@tensorflow/tfjs-core/dist/ops/neg.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function neg_(x) { const $x = convertToTensor(x, "x", "neg"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.neg($x), inputs, null, Negate); } const neg = op({neg_}); // node_modules/@tensorflow/tfjs-core/dist/ops/softplus.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function softplus_(x) { const $x = convertToTensor(x, "x", "softplus"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.softplus($x); save([$x]); return res; }, inputs, null, Softplus); } const softplus = op({softplus_}); // node_modules/@tensorflow/tfjs-core/dist/ops/log_sigmoid.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function logSigmoid_(x) { const $x = convertToTensor(x, "x", "logSigmoid"); const customOp = customGrad((x2) => { const value = neg(softplus(neg(x2))); const gradFunc = (dy) => { const derX = mul(dy, sigmoid(neg(x2))); return derX; }; return {value, gradFunc}; }); return customOp($x); } const logSigmoid = op({logSigmoid_}); // node_modules/@tensorflow/tfjs-core/dist/ops/max.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function max_(x, axis = null, keepDims = false) { const $x = convertToTensor(x, "x", "max"); const forward = (backend2, save) => { const origAxes = parseAxisParam(axis, $x.shape); let axes = origAxes; const permutedAxes = getAxesPermutation(axes, $x.rank); let maxInput = $x; if (permutedAxes != null) { maxInput = transpose($x, permutedAxes); axes = getInnerMostAxes(axes.length, maxInput.rank); } const y = backend2.max(maxInput, axes); if (permutedAxes != null) { maxInput.dispose(); } let res = y; if (keepDims) { const expandedShape = expandShapeToKeepDim(res.shape, parseAxisParam(axis, $x.shape)); res = reshape(res, expandedShape); y.dispose(); } save([$x, res]); return res; }; const inputs = {x: $x}; const attrs = {reductionIndices: axis, keepDims}; return ENGINE.runKernelFunc(forward, inputs, null, Max, attrs); } const max = op({max_}); // node_modules/@tensorflow/tfjs-core/dist/ops/sub.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function sub_(a, b) { let $a = convertToTensor(a, "a", "sub"); let $b = convertToTensor(b, "b", "sub"); [$a, $b] = makeTypesMatch($a, $b); const forward = (backend2, save) => { const res = backend2.subtract($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Sub); } const sub = op({sub_}); // node_modules/@tensorflow/tfjs-core/dist/ops/sum.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function sum_(x, axis = null, keepDims = false) { let $x = convertToTensor(x, "x", "sum"); if ($x.dtype === "bool") { $x = cast($x, "int32"); } const forward = (backend2, save) => { save([$x]); const axes = parseAxisParam(axis, $x.shape); const permutation = getAxesPermutation(axes, $x.rank); let reductionAxes = axes; let permutedX = $x; if (permutation != null) { permutedX = transpose($x, permutation); reductionAxes = getInnerMostAxes(reductionAxes.length, $x.rank); } let value = backend2.sum(permutedX, reductionAxes); if (keepDims) { const newShape = expandShapeToKeepDim(value.shape, axes); value = reshape(value, newShape); } return value; }; const inputs = {x: $x}; const attrs = {axis, keepDims}; return ENGINE.runKernelFunc(forward, inputs, null, Sum, attrs); } const sum2 = op({sum_}); // node_modules/@tensorflow/tfjs-core/dist/ops/log_softmax.js /** * @license * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function logSoftmax_(logits, axis = -1) { const $logits = convertToTensor(logits, "logits", "logSoftmax"); if (axis === -1) { axis = $logits.rank - 1; } if (axis !== $logits.rank - 1) { throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${$logits.rank} and axis was ${axis}`); } const forward = (backend2, save) => { const keepDims = true; const xMax = max(logits, axis, true); const shifted = sub(logits, xMax); const value = sub(cast(shifted, "float32"), log(sum2(exp(shifted), axis, keepDims))); save([value]); return value; }; const inputs = {logits: $logits}; const attrs = {axis}; return ENGINE.runKernelFunc(forward, inputs, null, LogSoftmax, attrs); } const logSoftmax = op({logSoftmax_}); // node_modules/@tensorflow/tfjs-core/dist/ops/log_sum_exp.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function logSumExp_(x, axis = null, keepDims = false) { const $x = convertToTensor(x, "x", "logSumExp"); const axes = parseAxisParam(axis, $x.shape); const xMax = max($x, axes, true); const a = sub($x, xMax); const b = exp(a); const c = sum2(b, axes); const d = log(c); const res = add2(reshape(xMax, d.shape), d); if (keepDims) { const newShape = expandShapeToKeepDim(res.shape, axes); return reshape(res, newShape); } return res; } const logSumExp = op({logSumExp_}); // node_modules/@tensorflow/tfjs-core/dist/ops/logical_and.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function logicalAnd_(a, b) { const $a = convertToTensor(a, "a", "logicalAnd", "bool"); const $b = convertToTensor(b, "b", "logicalAnd", "bool"); assertAndGetBroadcastShape($a.shape, $b.shape); const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc((backend2) => backend2.logicalAnd($a, $b), inputs, null, LogicalAnd); } const logicalAnd = op({logicalAnd_}); // node_modules/@tensorflow/tfjs-core/dist/ops/logical_not.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function logicalNot_(x) { const $x = convertToTensor(x, "x", "logicalNot", "bool"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.logicalNot($x), inputs, null, LogicalNot); } const logicalNot = op({logicalNot_}); // node_modules/@tensorflow/tfjs-core/dist/ops/logical_or.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function logicalOr_(a, b) { const $a = convertToTensor(a, "a", "logicalOr", "bool"); const $b = convertToTensor(b, "b", "logicalOr", "bool"); assertAndGetBroadcastShape($a.shape, $b.shape); const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc((backend2) => backend2.logicalOr($a, $b), inputs, null, LogicalOr); } const logicalOr = op({logicalOr_}); // node_modules/@tensorflow/tfjs-core/dist/ops/logical_xor.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function logicalXor_(a, b) { const $a = convertToTensor(a, "a", "logicalXor", "bool"); const $b = convertToTensor(b, "b", "logicalXor", "bool"); assertAndGetBroadcastShape($a.shape, $b.shape); return logicalAnd(logicalOr(a, b), logicalNot(logicalAnd(a, b))); } const logicalXor = op({logicalXor_}); // node_modules/@tensorflow/tfjs-core/dist/ops/max_pool.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function maxPool_(x, filterSize, strides, pad8, dimRoundingMode) { const $x = convertToTensor(x, "x", "maxPool"); const dilations = 1; let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } assert(x4D.rank === 4, () => `Error in maxPool: input must be rank 4 but got rank ${x4D.rank}.`); assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in maxPool: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in maxPool: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } const forward = (backend2, save) => { const convInfo = computePool2DInfo(x4D.shape, filterSize, strides, 1, pad8, dimRoundingMode); let y; if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && arraysEqual(convInfo.inShape, convInfo.outShape)) { y = x4D.clone(); } else { y = backend2.maxPool(x4D, convInfo); } save([x4D, y]); return y; }; const inputs = {x: x4D}; const attrs = {filterSize, strides, pad: pad8, dimRoundingMode}; const res = ENGINE.runKernelFunc(forward, inputs, null, MaxPool, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const maxPool = op({maxPool_}); // node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function maxPool3d_(x, filterSize = [1, 1, 1], strides, pad8, dimRoundingMode, dataFormat = "NDHWC", dilations) { if (dilations == null) { dilations = [1, 1, 1]; } else { deprecationWarn("dilations is deprecated, this field will be gone in v3.0.0."); } const $x = convertToTensor(x, "x", "maxPool3d"); let x5D = $x; let reshapedTo5D = false; if ($x.rank === 4) { reshapedTo5D = true; x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]); } assert(x5D.rank === 5, () => `Error in maxPool3d: x must be rank 5 but got rank ${x5D.rank}.`); assert(dataFormat === "NDHWC", () => `Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${dataFormat}`); assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in maxPool3d: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in maxPool3d: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } const forward = (backend2, save) => { if (dilations == null) { dilations = [1, 1, 1]; } const convInfo = computePool3DInfo(x5D.shape, filterSize, strides, dilations, pad8, dimRoundingMode, dataFormat); const y = backend2.maxPool3d(x5D, convInfo); save([x5D, y]); return y; }; const inputs = {x: x5D}; const attrs = {filterSize, strides, pad: pad8, dimRoundingMode, dataFormat, dilations}; const res = ENGINE.runKernelFunc(forward, inputs, null, MaxPool3D, attrs); if (reshapedTo5D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); } return res; } const maxPool3d = op({maxPool3d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_with_argmax.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function maxPoolWithArgmax_(x, filterSize, strides, pad8, includeBatchInIndex = false) { const $x = convertToTensor(x, "x", "maxPoolWithArgmax"); const inputs = {x: $x}; const attrs = {filterSize, strides, pad: pad8, includeBatchInIndex}; const result = ENGINE.runKernel(MaxPoolWithArgmax, inputs, attrs); return {result: result[0], indexes: result[1]}; } const maxPoolWithArgmax = op({maxPoolWithArgmax_}); // node_modules/@tensorflow/tfjs-core/dist/ops/zeros.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function zeros(shape, dtype = "float32") { if (dtype === "complex64") { const real6 = zeros(shape, "float32"); const imag6 = zeros(shape, "float32"); return complex(real6, imag6); } const values = makeZerosTypedArray(sizeFromShape(shape), dtype); return ENGINE.makeTensor(values, shape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/ones.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function ones2(shape, dtype = "float32") { if (dtype === "complex64") { const real6 = ones2(shape, "float32"); const imag6 = zeros(shape, "float32"); return complex(real6, imag6); } const values = makeOnesTypedArray(sizeFromShape(shape), dtype); return ENGINE.makeTensor(values, shape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/mean.js /** * @license * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function mean_(x, axis = null, keepDims = false) { const $x = convertToTensor(x, "x", "mean"); const axes = parseAxisParam(axis, $x.shape); const shapes = computeOutAndReduceShapes($x.shape, axes); const reduceShape = shapes[1]; const reduceSize = sizeFromShape(reduceShape); const customOp = customGrad((x2) => { const reduceSizeScalar = scalar(reduceSize); const xReduce = reduceSizeScalar.dtype === x2.dtype ? x2 : cast(x2, reduceSizeScalar.dtype); const res = div(xReduce, reduceSizeScalar); const value = sum2(res, axis, keepDims); const gradFunc = (dy) => { const expandedDyShape = x2.shape.slice(); axes.forEach((axis2) => { expandedDyShape[axis2] = 1; }); const expandedDy = reshape(dy, expandedDyShape); const derX = div(mul(expandedDy, ones2(x2.shape, "float32")), reduceSize); return derX; }; return {value, gradFunc}; }); return customOp($x); } const mean = op({mean_}); // node_modules/@tensorflow/tfjs-core/dist/ops/min.js function min_(x, axis = null, keepDims = false) { const $x = convertToTensor(x, "x", "min"); const forward = (backend2, save) => { const origAxes = parseAxisParam(axis, $x.shape); let axes = origAxes; const permutedAxes = getAxesPermutation(axes, $x.rank); let minInput = $x; if (permutedAxes != null) { minInput = transpose($x, permutedAxes); axes = getInnerMostAxes(axes.length, $x.rank); } const y = backend2.min(minInput, axes); if (permutedAxes != null) { minInput.dispose(); } let res = y; if (keepDims) { const expandedShape = expandShapeToKeepDim(res.shape, origAxes); res = reshape(y, expandedShape); y.dispose(); } save([$x, res]); return res; }; const inputs = {x: $x}; const attrs = {axis, keepDims}; return ENGINE.runKernelFunc(forward, inputs, null, Min, attrs); } const min = op({min_}); // node_modules/@tensorflow/tfjs-core/dist/ops/minimum.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function minimum_(a, b) { let $a = convertToTensor(a, "a", "minimum"); let $b = convertToTensor(b, "b", "minimum"); [$a, $b] = makeTypesMatch($a, $b); if ($a.dtype === "bool") { $a = cast($a, "int32"); $b = cast($b, "int32"); } assertAndGetBroadcastShape($a.shape, $b.shape); const forward = (backend2, save) => { const res = backend2.minimum($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Minimum); } const minimum = op({minimum_}); // node_modules/@tensorflow/tfjs-core/dist/ops/mod.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function mod_(a, b) { let $a = convertToTensor(a, "a", "mod"); let $b = convertToTensor(b, "b", "mod"); [$a, $b] = makeTypesMatch($a, $b); const forward = (backend2, save) => { const res = backend2.mod($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, Mod); } const mod = op({mod_}); // node_modules/@tensorflow/tfjs-core/dist/ops/square.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function square_(x) { const $x = convertToTensor(x, "x", "square"); const attrs = {}; const inputsToSave = [$x]; const outputsToSave = []; return ENGINE.runKernelFunc((backend2, save) => { save([$x]); return backend2.square($x); }, {x: $x}, null, "Square", attrs, inputsToSave, outputsToSave); } const square = op({square_}); // node_modules/@tensorflow/tfjs-core/dist/ops/moments.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function moments_(x, axis = null, keepDims = false) { x = convertToTensor(x, "x", "moments"); const axes = parseAxisParam(axis, x.shape); const xMean = mean(x, axes, keepDims); let keepDimsShape = xMean.shape; if (!keepDims) { keepDimsShape = expandShapeToKeepDim(xMean.shape, axes); } const devSquared = square(sub(cast(x, "float32"), reshape(xMean, keepDimsShape))); const variance = mean(devSquared, axes, keepDims); return {mean: xMean, variance}; } const moments = op({moments_}); // node_modules/@tensorflow/tfjs-core/dist/ops/multi_rnn_cell.js function multiRNNCell_(lstmCells, data, c, h) { const $data = convertToTensor(data, "data", "multiRNNCell"); const $c = convertToTensorArray(c, "c", "multiRNNCell"); const $h = convertToTensorArray(h, "h", "multiRNNCell"); let input = $data; const newStates = []; for (let i = 0; i < lstmCells.length; i++) { const output = lstmCells[i](input, $c[i], $h[i]); newStates.push(output[0]); newStates.push(output[1]); input = output[1]; } const newC = []; const newH = []; for (let i = 0; i < newStates.length; i += 2) { newC.push(newStates[i]); newH.push(newStates[i + 1]); } return [newC, newH]; } const multiRNNCell = op({multiRNNCell_}); // node_modules/@tensorflow/tfjs-core/dist/ops/multinomial.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function multinomial_(logits, numSamples, seed, normalized = false) { const $logits = convertToTensor(logits, "logits", "multinomial"); const numOutcomes = $logits.size; const origRank = $logits.rank; if (numOutcomes < 2) { throw new Error(`Error in multinomial: you need at least 2 outcomes, but got ${numOutcomes}.`); } if (origRank > 2) { throw new Error(`Rank of probabilities must be 1 or 2, but is ${origRank}`); } seed = seed || Math.random(); const logits2D = origRank === 1 ? reshape($logits, [1, -1]) : $logits; const res = ENGINE.runKernelFunc((backend2) => backend2.multinomial(logits2D, normalized, numSamples, seed), {logits2D}); return origRank === 1 ? reshape(res, [res.size]) : res; } const multinomial = op({multinomial_}); // node_modules/@tensorflow/tfjs-core/dist/ops/not_equal.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function notEqual_(a, b) { let $a = convertToTensor(a, "a", "notEqual"); let $b = convertToTensor(b, "b", "notEqual"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); const forward = (backend2) => backend2.notEqual($a, $b); const inputs = {a: $a, b: $b}; return ENGINE.runKernelFunc(forward, inputs, null, NotEqual); } const notEqual = op({notEqual_}); // node_modules/@tensorflow/tfjs-core/dist/ops/one_hot.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function oneHot_(indices, depth, onValue = 1, offValue = 0) { if (depth < 2) { throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`); } const $indices = convertToTensor(indices, "indices", "oneHot", "int32"); const outShape = [...$indices.shape, depth]; const forward = (backend2, save) => { save([$indices]); return reshape(backend2.oneHot(reshape($indices, [$indices.size]), depth, onValue, offValue), outShape); }; const inputs = {indices: $indices}; const attrs = {depth, onValue, offValue}; return ENGINE.runKernelFunc(forward, inputs, null, OneHot, attrs); } const oneHot = op({oneHot_}); // node_modules/@tensorflow/tfjs-core/dist/ops/ones_like.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function onesLike_(x) { const $x = convertToTensor(x, "x", "onesLike"); const forward = (backend2, save) => { if ($x.dtype === "complex64") { const r = onesLike(real($x)); const i = zerosLike(imag($x)); return complex(r, i); } return backend2.onesLike($x); }; const inputs = {x: $x}; return ENGINE.runKernelFunc(forward, inputs, null, OnesLike); } const onesLike = op({onesLike_}); // node_modules/@tensorflow/tfjs-core/dist/ops/outer_product.js function outerProduct_(v1, v2) { const $v1 = convertToTensor(v1, "v1", "outerProduct"); const $v2 = convertToTensor(v2, "v2", "outerProduct"); assert($v1.rank === 1 && $v2.rank === 1, () => `Error in outerProduct: inputs must be rank 1, but got ranks ${$v1.rank} and ${$v2.rank}.`); const v12D = reshape($v1, [-1, 1]); const v22D = reshape($v2, [1, -1]); return matMul(v12D, v22D); } const outerProduct = op({outerProduct_}); // node_modules/@tensorflow/tfjs-core/dist/ops/pad.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function pad_(x, paddings, constantValue = 0) { const $x = convertToTensor(x, "x", "pad"); if ($x.rank === 0) { throw new Error("pad(scalar) is not defined. Pass non-scalar to pad"); } const forward = (backend2, save) => { save([$x]); return backend2.pad($x, paddings, constantValue); }; const attrs = {paddings, constantValue}; const inputs = {x: $x}; return ENGINE.runKernelFunc(forward, inputs, null, PadV2, attrs); } const pad = op({pad_}); // node_modules/@tensorflow/tfjs-core/dist/ops/pad1d.js function pad1d_(x, paddings, constantValue = 0) { assert(paddings.length === 2, () => "Invalid number of paddings. Must be length of 2."); return pad(x, [paddings], constantValue); } const pad1d = op({pad1d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/pad2d.js function pad2d_(x, paddings, constantValue = 0) { assert(paddings.length === 2 && paddings[0].length === 2 && paddings[1].length === 2, () => "Invalid number of paddings. Must be length of 2 each."); return pad(x, paddings, constantValue); } const pad2d = op({pad2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/pad3d.js function pad3d_(x, paddings, constantValue = 0) { assert(paddings.length === 3 && paddings[0].length === 2 && paddings[1].length === 2 && paddings[2].length === 2, () => "Invalid number of paddings. Must be length of 2 each."); return pad(x, paddings, constantValue); } const pad3d = op({pad3d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/pad4d.js function pad4d_(x, paddings, constantValue = 0) { assert(paddings.length === 4 && paddings[0].length === 2 && paddings[1].length === 2 && paddings[2].length === 2 && paddings[3].length === 2, () => "Invalid number of paddings. Must be length of 2 each."); return pad(x, paddings, constantValue); } const pad4d = op({pad4d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/space_to_batch_nd.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function spaceToBatchND_(x, blockShape, paddings) { const $x = convertToTensor(x, "x", "spaceToBatchND"); assert($x.rank >= 1 + blockShape.length, () => `input rank ${$x.rank} should be > than [blockShape] ${blockShape.length}`); assert(paddings.length === blockShape.length, () => `paddings.shape[0] ${paddings.length} must be equal to [blockShape] ${blockShape.length}`); assert($x.shape.reduce((a, b, i) => { if (i > 0 && i <= blockShape.length) { return a && (b + paddings[i - 1][0] + paddings[i - 1][1]) % blockShape[i - 1] === 0; } return a; }, true), () => `input spatial dimensions ${$x.shape.slice(1)} with paddings ${paddings.toString()} must be divisible by blockShapes ${blockShape.toString()}`); const forward = (backend2) => backend2.spaceToBatchND($x, blockShape, paddings); const inputs = {x: $x}; const attrs = {blockShape, paddings}; return ENGINE.runKernelFunc(forward, inputs, null, SpaceToBatchND, attrs); } const spaceToBatchND = op({spaceToBatchND_}); // node_modules/@tensorflow/tfjs-core/dist/ops/pool.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function pool_(input, windowShape, poolingType, pad8, dilations, strides) { if (dilations == null) { dilations = [1, 1]; } if (strides == null) { strides = 1; } if (pad8 === 0) { pad8 = "valid"; } const $x = convertToTensor(input, "x", "maxPool"); let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in pool: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); const convInfo = computePool2DInfo(x4D.shape, windowShape, strides, dilations, pad8); const dilation = [convInfo.dilationHeight, convInfo.dilationWidth]; let basePadding; if (pad8 === "same") { basePadding = withSpaceToBatchBasePaddings([convInfo.filterHeight, convInfo.filterWidth], dilation); } else { basePadding = [[0, 0], [0, 0]]; } const isDilationOne = dilation[0] === 1 && dilation[1] === 1; const [adjustedPadding, adjustedCrops] = requiredSpaceToBatchPaddings([convInfo.inHeight, convInfo.inWidth], dilation, basePadding); const convertedPad = isDilationOne ? pad8 : "valid"; const convertedX = isDilationOne ? x4D : spaceToBatchND(x4D, dilation, adjustedPadding); const forwardOp = poolingType === "avg" ? () => avgPool(convertedX, windowShape, strides, convertedPad) : () => maxPool(convertedX, windowShape, strides, convertedPad); const y = forwardOp(); const res = isDilationOne ? y : batchToSpaceND(y, dilation, adjustedCrops); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } function requiredSpaceToBatchPaddings(inputShape, blockShape, basePadding) { const padStart = basePadding.map((b) => b[0]); const origPadEnd = basePadding.map((b) => b[1]); const fullInputShape = inputShape.concat(padStart, origPadEnd); const padEndExtra = blockShape.map((b, i) => (b - fullInputShape[i] % b) % b); const padEnd = origPadEnd.map((s, i) => s + padEndExtra[i]); const paddings = blockShape.map((_, i) => [padStart[i], padEnd[i]]); const crops = blockShape.map((_, i) => [0, padEndExtra[i]]); return [paddings, crops]; } function withSpaceToBatchBasePaddings(filterShape, dilation) { const dilatedFilterShape = filterShape.map((s, i) => { return s + (s - 1) * (dilation[i] - 1); }); const padExtraShape = dilatedFilterShape.map((s) => s - 1); const padExtraStart = padExtraShape.map((s) => Math.floor(s / 2)); const padExtraEnd = padExtraShape.map((s, i) => s - padExtraStart[i]); return padExtraShape.map((_, i) => { return [padExtraStart[i], padExtraEnd[i]]; }); } const pool = op({pool_}); // node_modules/@tensorflow/tfjs-core/dist/ops/pow.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function pow_(base, exp11) { let $base = convertToTensor(base, "base", "pow"); let $exp = convertToTensor(exp11, "exp", "pow"); [$base, $exp] = makeTypesMatch($base, $exp); const inputs = {a: $base, b: $exp}; const forward = (backend2, save) => { const y = backend2.pow($base, $exp); save([$base, $exp, y]); return y; }; return ENGINE.runKernelFunc(forward, inputs, null, Pow); } const pow = op({pow_}); // node_modules/@tensorflow/tfjs-core/dist/ops/prelu.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function prelu_(x, alpha) { const $x = convertToTensor(x, "x", "prelu"); const $alpha = convertToTensor(alpha, "alpha", "prelu"); const forward = (backend2, save) => { const res = backend2.prelu($x, $alpha); save([$x, $alpha]); return res; }; const inputs = {x: $x, alpha: $alpha}; return ENGINE.runKernelFunc(forward, inputs, null, Prelu); } const prelu = op({prelu_}); // node_modules/@tensorflow/tfjs-core/dist/ops/print.js /** * @license * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function print(x, verbose = false) { console.log(x.toString(verbose)); } // node_modules/@tensorflow/tfjs-core/dist/ops/prod.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function prod_(x, axis = null, keepDims = false) { let $x = convertToTensor(x, "x", "prod"); const forward = (backend2) => { if ($x.dtype === "bool") { $x = cast($x, "int32"); } const axes = parseAxisParam(axis, $x.shape); const permutation = getAxesPermutation(axes, $x.rank); let reductionAxes = axes; let permutedX = $x; if (permutation != null) { permutedX = transpose($x, permutation); reductionAxes = getInnerMostAxes(reductionAxes.length, $x.rank); } let value = backend2.prod(permutedX, reductionAxes); if (keepDims) { const newShape = expandShapeToKeepDim(value.shape, axes); value = reshape(value, newShape); } return value; }; const inputs = {x: $x}; const attrs = {axis, keepDims}; return ENGINE.runKernelFunc(forward, inputs, null, Prod, attrs); } const prod = op({prod_}); // node_modules/@tensorflow/tfjs-core/dist/ops/rand.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function rand_(shape, randFunction, dtype) { const size = sizeFromShape(shape); let values = null; if (dtype == null || dtype === "float32") { values = new Float32Array(size); } else if (dtype === "int32") { values = new Int32Array(size); } else if (dtype === "bool") { values = new Uint8Array(size); } else { throw new Error(`Unknown data type ${dtype}`); } for (let i = 0; i < size; i++) { values[i] = randFunction(); } return ENGINE.makeTensor(values, shape, dtype); } const rand = op({rand_}); // node_modules/@tensorflow/tfjs-core/dist/test_util.js const test_util_exports = {}; __export(test_util_exports, { TEST_EPSILON_FLOAT16: () => TEST_EPSILON_FLOAT16, expectArrayBuffersEqual: () => expectArrayBuffersEqual, expectArraysClose: () => expectArraysClose, expectArraysEqual: () => expectArraysEqual, expectNumbersClose: () => expectNumbersClose, expectPromiseToFail: () => expectPromiseToFail, expectValuesInRange: () => expectValuesInRange, testEpsilon: () => testEpsilon }); /** * @license * Copyright 2017 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const TEST_EPSILON_FLOAT32 = 1e-3; const TEST_EPSILON_FLOAT16 = 0.1; function expectArraysClose(actual, expected, epsilon) { if (epsilon == null) { epsilon = testEpsilon(); } return expectArraysPredicate(actual, expected, (a, b) => areClose(a, b, epsilon)); } function testEpsilon() { return ENGINE.backend.floatPrecision() === 32 ? TEST_EPSILON_FLOAT32 : TEST_EPSILON_FLOAT16; } function expectArraysPredicate(actual, expected, predicate) { let checkClassType = true; if (isTypedArray(actual) || isTypedArray(expected)) { checkClassType = false; } if (isTypedArray(actual) && isTypedArray(expected)) { checkClassType = true; } if (checkClassType) { const aType = actual.constructor.name; const bType = expected.constructor.name; if (aType !== bType) { throw new Error(`Arrays are of different type. Actual: ${aType}. Expected: ${bType}`); } } if (Array.isArray(actual) && Array.isArray(expected)) { const actualShape = inferShape(actual); const expectedShape = inferShape(expected); if (!arraysEqual(actualShape, expectedShape)) { throw new Error(`Arrays have different shapes. Actual: [${actualShape}]. Expected: [${expectedShape}]`); } } const actualFlat = isTypedArray(actual) ? actual : flatten(actual); const expectedFlat = isTypedArray(expected) ? expected : flatten(expected); if (actualFlat.length !== expectedFlat.length) { throw new Error(`Arrays have different lengths actual: ${actualFlat.length} vs expected: ${expectedFlat.length}. Actual: ${actualFlat}. Expected: ${expectedFlat}.`); } for (let i = 0; i < expectedFlat.length; ++i) { const a = actualFlat[i]; const e = expectedFlat[i]; if (!predicate(a, e)) { throw new Error(`Arrays differ: actual[${i}] = ${a}, expected[${i}] = ${e}. Actual: ${actualFlat}. Expected: ${expectedFlat}.`); } } } function expectPromiseToFail(fn, done) { fn().then(() => done.fail(), () => done()); } function expectArraysEqual(actual, expected) { const exp11 = typeof expected === "string" || typeof expected === "number" || typeof expected === "boolean" ? [expected] : expected; if (isString(actual) || isString(actual[0]) || isString(expected) || isString(expected[0])) { return expectArraysPredicate(actual, exp11, (a, b) => a == b); } return expectArraysPredicate(actual, expected, (a, b) => areClose(a, b, 0)); } function expectNumbersClose(a, e, epsilon) { if (epsilon == null) { epsilon = testEpsilon(); } if (!areClose(a, e, epsilon)) { throw new Error(`Numbers differ: actual === ${a}, expected === ${e}`); } } function areClose(a, e, epsilon) { if (!isFinite(a) && !isFinite(e)) { return true; } if (isNaN(a) || isNaN(e) || Math.abs(a - e) > epsilon) { return false; } return true; } function expectValuesInRange(actual, low, high) { for (let i = 0; i < actual.length; i++) { if (actual[i] < low || actual[i] > high) { throw new Error(`Value out of range:${actual[i]} low: ${low}, high: ${high}`); } } } function expectArrayBuffersEqual(actual, expected) { expect(new Float32Array(actual)).toEqual(new Float32Array(expected)); } // node_modules/@tensorflow/tfjs-core/dist/ops/rand_util.js const seedrandom = __toModule(require_seedrandom2()); /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ class MPRandGauss { constructor(mean5, stdDeviation, dtype, truncated, seed) { this.mean = mean5; this.stdDev = stdDeviation; this.dtype = dtype; this.nextVal = NaN; this.truncated = truncated; if (this.truncated) { this.upper = this.mean + this.stdDev * 2; this.lower = this.mean - this.stdDev * 2; } const seedValue = seed ? seed : Math.random(); this.random = seedrandom.alea(seedValue.toString()); } nextValue() { if (!isNaN(this.nextVal)) { const value = this.nextVal; this.nextVal = NaN; return value; } let resultX, resultY; let isValid = false; while (!isValid) { let v1, v2, s; do { v1 = 2 * this.random() - 1; v2 = 2 * this.random() - 1; s = v1 * v1 + v2 * v2; } while (s >= 1 || s === 0); const mul62 = Math.sqrt(-2 * Math.log(s) / s); resultX = this.mean + this.stdDev * v1 * mul62; resultY = this.mean + this.stdDev * v2 * mul62; if (!this.truncated || this.isValidTruncated(resultX)) { isValid = true; } } if (!this.truncated || this.isValidTruncated(resultY)) { this.nextVal = this.convertValue(resultY); } return this.convertValue(resultX); } convertValue(value) { if (this.dtype == null || this.dtype === "float32") { return value; } return Math.round(value); } isValidTruncated(value) { return value <= this.upper && value >= this.lower; } } class RandGamma { constructor(alpha, beta, dtype, seed) { this.alpha = alpha; this.beta = 1 / beta; this.dtype = dtype; const seedValue = seed ? seed : Math.random(); this.randu = seedrandom.alea(seedValue.toString()); this.randn = new MPRandGauss(0, 1, dtype, false, this.randu()); if (alpha < 1) { this.d = alpha + 2 / 3; } else { this.d = alpha - 1 / 3; } this.c = 1 / Math.sqrt(9 * this.d); } nextValue() { let x2, v0, v1, x, u, v; while (true) { do { x = this.randn.nextValue(); v = 1 + this.c * x; } while (v <= 0); v *= v * v; x2 = x * x; v0 = 1 - 0.331 * x2 * x2; v1 = 0.5 * x2 + this.d * (1 - v + Math.log(v)); u = this.randu(); if (u < v0 || Math.log(u) < v1) { break; } } v = 1 / this.beta * this.d * v; if (this.alpha < 1) { v *= Math.pow(this.randu(), 1 / this.alpha); } return this.convertValue(v); } convertValue(value) { if (this.dtype === "float32") { return value; } return Math.round(value); } } class UniformRandom { constructor(min5 = 0, max7 = 1, dtype, seed) { this.canReturnFloat = () => this.dtype == null || this.dtype === "float32"; this.min = min5; this.range = max7 - min5; this.dtype = dtype; if (seed == null) { seed = Math.random(); } if (typeof seed === "number") { seed = seed.toString(); } if (!this.canReturnFloat() && this.range <= 1) { throw new Error(`The difference between ${min5} - ${max7} <= 1 and dtype is not float`); } this.random = seedrandom.alea(seed); } convertValue(value) { if (this.canReturnFloat()) { return value; } return Math.round(value); } nextValue() { return this.convertValue(this.min + this.range * this.random()); } } // node_modules/@tensorflow/tfjs-core/dist/ops/random_gamma.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function randomGamma_(shape, alpha, beta = 1, dtype = "float32", seed) { if (beta == null) { beta = 1; } if (dtype == null) { dtype = "float32"; } if (dtype !== "float32" && dtype !== "int32") { throw new Error(`Unsupported data type ${dtype}`); } const rgamma = new RandGamma(alpha, beta, dtype, seed); const res = buffer(shape, dtype); for (let i = 0; i < res.values.length; i++) { res.values[i] = rgamma.nextValue(); } return res.toTensor(); } const randomGamma = op({randomGamma_}); // node_modules/@tensorflow/tfjs-core/dist/ops/random_normal.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function randomNormal_(shape, mean5 = 0, stdDev = 1, dtype, seed) { if (dtype != null && dtype === "bool") { throw new Error(`Unsupported data type ${dtype}`); } const randGauss = new MPRandGauss(mean5, stdDev, dtype, false, seed); const res = buffer(shape, dtype); for (let i = 0; i < res.values.length; i++) { res.values[i] = randGauss.nextValue(); } return res.toTensor(); } const randomNormal = op({randomNormal_}); // node_modules/@tensorflow/tfjs-core/dist/ops/random_uniform.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function randomUniform_(shape, minval = 0, maxval = 1, dtype = "float32", seed) { const res = buffer(shape, dtype); const random = new UniformRandom(minval, maxval, null, seed); for (let i = 0; i < res.values.length; i++) { res.values[i] = random.nextValue(); } return res.toTensor(); } const randomUniform = op({randomUniform_}); // node_modules/@tensorflow/tfjs-core/dist/ops/tensor1d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tensor1d(values, dtype) { assertNonNull(values); const inferredShape = inferShape(values, dtype); if (inferredShape.length !== 1) { throw new Error("tensor1d() requires values to be a flat/TypedArray"); } const shape = null; return makeTensor(values, shape, inferredShape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/range.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function range(start, stop, step7 = 1, dtype = "float32") { if (step7 === 0) { throw new Error("Cannot have a step of zero"); } const forward = () => { const sameStartStop = start === stop; const increasingRangeNegativeStep = start < stop && step7 < 0; const decreasingRangePositiveStep = stop < start && step7 > 1; if (sameStartStop || increasingRangeNegativeStep || decreasingRangePositiveStep) { return zeros([0], dtype); } const numElements = Math.abs(Math.ceil((stop - start) / step7)); const values = makeZerosTypedArray(numElements, dtype); if (stop < start && step7 === 1) { step7 = -1; } values[0] = start; for (let i = 1; i < values.length; i++) { values[i] = values[i - 1] + step7; } return tensor1d(values, dtype); }; const attrs = {start, stop, step: step7, dtype}; return ENGINE.runKernelFunc(forward, {}, null, Range, attrs); } // node_modules/@tensorflow/tfjs-core/dist/ops/reciprocal.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function reciprocal_(x) { const $x = convertToTensor(x, "x", "reciprocal"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.reciprocal($x); save([$x]); return res; }, inputs, null, Reciprocal); } const reciprocal = op({reciprocal_}); // node_modules/@tensorflow/tfjs-core/dist/ops/relu.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function relu_(x) { const $x = convertToTensor(x, "x", "relu"); const forward = (backend2, save) => { save([$x]); if ($x.dtype === "bool") { return cast($x, "int32"); } return backend2.relu($x); }; const inputs = {x: $x}; return ENGINE.runKernelFunc(forward, inputs, null, Relu); } const relu = op({relu_}); // node_modules/@tensorflow/tfjs-core/dist/ops/relu6.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function relu6_(x) { const $x = convertToTensor(x, "x", "relu6"); const forward = (backend2, save) => { save([$x]); if ($x.dtype === "bool") { return cast($x, "int32"); } return backend2.relu6($x); }; const inputs = {x: $x}; return ENGINE.runKernelFunc(forward, inputs, null, Relu6); } const relu6 = op({relu6_}); // node_modules/@tensorflow/tfjs-core/dist/ops/reverse_1d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function reverse1d_(x) { const $x = convertToTensor(x, "x", "reverse"); assert($x.rank === 1, () => `Error in reverse1D: x must be rank 1 but got rank ${$x.rank}.`); return reverse($x, 0); } const reverse1d = op({reverse1d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/reverse_2d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function reverse2d_(x, axis) { const $x = convertToTensor(x, "x", "reverse"); assert($x.rank === 2, () => `Error in reverse2D: x must be rank 2 but got rank ${$x.rank}.`); return reverse($x, axis); } const reverse2d = op({reverse2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/reverse_3d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function reverse3d_(x, axis) { const $x = convertToTensor(x, "x", "reverse"); assert($x.rank === 3, () => `Error in reverse3D: x must be rank 3 but got rank ${$x.rank}.`); return reverse($x, axis); } const reverse3d = op({reverse3d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/reverse_4d.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function reverse4d_(x, axis) { const $x = convertToTensor(x, "x", "reverse"); assert($x.rank === 4, () => `Error in reverse4D: x must be rank 4 but got rank ${$x.rank}.`); return reverse($x, axis); } const reverse4d = op({reverse4d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/split_util.js function prepareSplitSize(x, numOrSizeSplits, axis = 0) { let splitSizes = []; if (typeof numOrSizeSplits === "number") { assert(x.shape[axis] % numOrSizeSplits === 0, () => "Number of splits must evenly divide the axis."); splitSizes = new Array(numOrSizeSplits).fill(x.shape[axis] / numOrSizeSplits); } else { const numOfNegs = numOrSizeSplits.reduce((count, value) => { if (value === -1) { count += 1; } return count; }, 0); assert(numOfNegs <= 1, () => "There should be only one negative value in split array."); const negIndex = numOrSizeSplits.indexOf(-1); if (negIndex !== -1) { const total = numOrSizeSplits.reduce((a, b) => b > 0 ? a + b : a); numOrSizeSplits[negIndex] = x.shape[axis] - total; } assert(x.shape[axis] === numOrSizeSplits.reduce((a, b) => a + b), () => "The sum of sizes must match the size of the axis dimension."); splitSizes = numOrSizeSplits; } return splitSizes; } // node_modules/@tensorflow/tfjs-core/dist/ops/split.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function split_(x, numOrSizeSplits, axis = 0) { const $x = convertToTensor(x, "x", "split"); const forward = (backend2, _) => { const $axis = parseAxisParam(axis, $x.shape)[0]; const splitSizes = prepareSplitSize($x, numOrSizeSplits, $axis); return backend2.split($x, splitSizes, $axis); }; const inputs = {x: $x}; const attr = {numOrSizeSplits, axis}; return ENGINE.runKernelFunc(forward, inputs, null, SplitV, attr); } const split = op({split_}); // node_modules/@tensorflow/tfjs-core/dist/ops/rfft.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function rfft_(input, fftLength) { assert(input.dtype === "float32", () => `The dtype for rfft() must be real value but got ${input.dtype}`); let innerDimensionSize = input.shape[input.shape.length - 1]; const batch = input.size / innerDimensionSize; let adjustedInput; if (fftLength != null && fftLength < innerDimensionSize) { const begin = input.shape.map((v) => 0); const size = input.shape.map((v) => v); size[input.shape.length - 1] = fftLength; adjustedInput = slice(input, begin, size); innerDimensionSize = fftLength; } else if (fftLength != null && fftLength > innerDimensionSize) { const zerosShape = input.shape.map((v) => v); zerosShape[input.shape.length - 1] = fftLength - innerDimensionSize; adjustedInput = concat([input, zeros(zerosShape)], input.shape.length - 1); innerDimensionSize = fftLength; } else { adjustedInput = input; } const zerosInput = zerosLike(adjustedInput); const complexInput = reshape(complex(adjustedInput, zerosInput), [batch, innerDimensionSize]); const ret = fft(complexInput); const half = Math.floor(innerDimensionSize / 2) + 1; const realValues = real(ret); const imagValues = imag(ret); const realComplexConjugate = split(realValues, [half, innerDimensionSize - half], realValues.shape.length - 1); const imagComplexConjugate = split(imagValues, [half, innerDimensionSize - half], imagValues.shape.length - 1); const outputShape = adjustedInput.shape.slice(); outputShape[adjustedInput.shape.length - 1] = half; return reshape(complex(realComplexConjugate[0], imagComplexConjugate[0]), outputShape); } const rfft = op({rfft_}); // node_modules/@tensorflow/tfjs-core/dist/ops/round.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function round_(x) { const $x = convertToTensor(x, "x", "round"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.round($x), inputs, null, Round); } const round = op({round_}); // node_modules/@tensorflow/tfjs-core/dist/ops/rsqrt.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function rsqrt_(x) { const $x = convertToTensor(x, "x", "rsqrt"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.rsqrt($x); save([$x]); return res; }, inputs, null, Rsqrt); } const rsqrt = op({rsqrt_}); // node_modules/@tensorflow/tfjs-core/dist/ops/selu.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function selu_(x) { const $x = convertToTensor(x, "x", "selu"); const forward = (backend2, save) => { const res = backend2.selu($x); save([$x]); return res; }; const inputs = {x: $x}; return ENGINE.runKernelFunc(forward, inputs, null, Selu); } const selu = op({selu_}); // node_modules/@tensorflow/tfjs-core/dist/ops/separable_conv2d.js function separableConv2d_(x, depthwiseFilter, pointwiseFilter, strides, pad8, dilation = [1, 1], dataFormat = "NHWC") { const $x = convertToTensor(x, "x", "separableConv2d"); const $depthwiseFilter = convertToTensor(depthwiseFilter, "depthwiseFilter", "separableConv2d"); const $pointwiseFilter = convertToTensor(pointwiseFilter, "pointwiseFilter", "separableConv2d"); let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } if (dataFormat === "NCHW") { throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported"); } assert(x4D.rank === 4, () => `Error in separableConv2d: input must be rank 4, but got rank ${x4D.rank}.`); assert($depthwiseFilter.rank === 4, () => `Error in separableConv2d: depthwise filter must be rank 4, but got rank ${$depthwiseFilter.rank}.`); assert($pointwiseFilter.rank === 4, () => `Error in separableConv2d: pointwise filter must be rank 4, but got rank ${$depthwiseFilter.rank}.`); assert($pointwiseFilter.shape[0] === 1, () => `Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${$pointwiseFilter.shape[0]}.`); assert($pointwiseFilter.shape[1] === 1, () => `Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${$pointwiseFilter.shape[1]}.`); const inChannels = $depthwiseFilter.shape[2]; const channelMultiplier = $depthwiseFilter.shape[3]; assert($pointwiseFilter.shape[2] === inChannels * channelMultiplier, () => `Error in separableConv2d: the third dimension of pointwise filter must be ${inChannels * channelMultiplier}, but got ${$pointwiseFilter.shape[2]}.`); const depthwise = depthwiseConv2d(x4D, $depthwiseFilter, strides, pad8, dataFormat, dilation); const pointwiseStride = 1; const res = conv2d(depthwise, $pointwiseFilter, pointwiseStride, "valid", dataFormat); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const separableConv2d = op({separableConv2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/setdiff1d_async.js /** * @license * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ async function setdiff1dAsync_(x, y) { const $x = convertToTensor(x, "x", "setdiff1d"); const $y = convertToTensor(y, "y", "setdiff1d"); assert($x.dtype === $y.dtype, () => `x and y should have the same dtype, but got x (${$x.dtype}) and y (${$y.dtype}).`); assert($x.rank === 1, () => `x should be 1D tensor, but got x (${$x.shape}).`); assert($y.rank === 1, () => `y should be 1D tensor, but got y (${$y.shape}).`); const xVals = await $x.data(); const yVals = await $y.data(); const ySet = new Set(yVals); let outputSize = 0; for (let i = 0; i < xVals.length; i++) { if (!ySet.has(xVals[i])) { outputSize++; } } const buffer10 = new TensorBuffer([outputSize], $x.dtype); const indices = new TensorBuffer([outputSize], "int32"); for (let i = 0, p = 0; i < xVals.length; i++) { if (!ySet.has(xVals[i])) { buffer10.values[p] = xVals[i]; indices.values[p] = i; p++; } } return [buffer10.toTensor(), indices.toTensor()]; } const setdiff1dAsync = setdiff1dAsync_; // node_modules/@tensorflow/tfjs-core/dist/ops/sign.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function sign_(x) { const $x = convertToTensor(x, "x", "sign"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2) => backend2.sign($x), inputs, null, Sign); } const sign = op({sign_}); // node_modules/@tensorflow/tfjs-core/dist/ops/sin.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function sin_(x) { const $x = convertToTensor(x, "x", "sin"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.sin($x); save([$x]); return res; }, inputs, null, Sin); } const sin = op({sin_}); // node_modules/@tensorflow/tfjs-core/dist/ops/sinh.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function sinh_(x) { const $x = convertToTensor(x, "x", "sinh"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.sinh($x); save([$x]); return res; }, inputs, null, Sinh); } const sinh = op({sinh_}); // node_modules/@tensorflow/tfjs-core/dist/ops/slice1d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function slice1d_(x, begin, size) { const $x = convertToTensor(x, "x", "slice1d"); assert($x.rank === 1, () => `slice1d expects a rank-1 tensor, but got a rank-${$x.rank} tensor`); return slice($x, [begin], [size]); } const slice1d = op({slice1d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/slice2d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function slice2d_(x, begin, size) { const $x = convertToTensor(x, "x", "slice2d"); assert($x.rank === 2, () => `slice2d expects a rank-2 tensor, but got a rank-${$x.rank} tensor`); return slice($x, begin, size); } const slice2d = op({slice2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/slice3d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function slice3d_(x, begin, size) { const $x = convertToTensor(x, "x", "slice3d"); assert($x.rank === 3, () => `slice3d expects a rank-3 tensor, but got a rank-${$x.rank} tensor`); return slice($x, begin, size); } const slice3d = op({slice3d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/slice4d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function slice4d_(x, begin, size) { const $x = convertToTensor(x, "x", "slice4d"); assert($x.rank === 4, () => `slice4d expects a rank-4 tensor, but got a rank-${$x.rank} tensor`); return slice($x, begin, size); } const slice4d = op({slice4d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/softmax.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function softmax_(logits, dim = -1) { const $logits = convertToTensor(logits, "logits", "softmax", "float32"); if (dim === -1) { dim = $logits.rank - 1; } if (dim !== $logits.rank - 1) { throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${$logits.rank} and dim was ${dim}`); } const inputs = {logits: $logits}; const attrs = {dim}; return ENGINE.runKernelFunc((backend2, save) => { const y = backend2.softmax($logits, dim); save([y]); return y; }, inputs, null, Softmax, attrs); } const softmax = op({softmax_}); // node_modules/@tensorflow/tfjs-core/dist/ops/sqrt.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function sqrt_(x) { const $x = convertToTensor(x, "x", "sqrt"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.sqrt($x); save([$x]); return res; }, inputs, null, Sqrt); } const sqrt = op({sqrt_}); // node_modules/@tensorflow/tfjs-core/dist/ops/squared_difference.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function squaredDifference_(a, b) { let $a = convertToTensor(a, "a", "squaredDifference"); let $b = convertToTensor(b, "b", "squaredDifference"); [$a, $b] = makeTypesMatch($a, $b); assertAndGetBroadcastShape($a.shape, $b.shape); const forward = (backend2, save) => { const res = backend2.squaredDifference($a, $b); save([$a, $b]); return res; }; const inputs = {a: $a, b: $b}; const attrs = {}; return ENGINE.runKernelFunc(forward, inputs, null, SquaredDifference, attrs); } const squaredDifference = op({squaredDifference_}); // node_modules/@tensorflow/tfjs-core/dist/ops/squeeze.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function squeeze_(x, axis) { const $x = convertToTensor(x, "x", "squeeze"); return reshape($x, squeezeShape($x.shape, axis).newShape); } const squeeze = op({squeeze_}); // node_modules/@tensorflow/tfjs-core/dist/ops/stack.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function stack_(tensors, axis = 0) { const $tensors = convertToTensorArray(tensors, "tensors", "stack"); assert($tensors.length >= 1, () => "Pass at least one tensor to tf.stack"); if ($tensors.length === 1) { return expandDims($tensors[0], axis); } const rank = $tensors[0].rank; const shape = $tensors[0].shape; const dtype = $tensors[0].dtype; assert(axis <= rank, () => "Axis must be <= rank of the tensor"); $tensors.forEach((t) => { assertShapesMatch(shape, t.shape, "All tensors passed to stack must have matching shapes"); assert(dtype === t.dtype, () => "All tensors passed to stack must have matching dtypes"); }); const expandedTensors = $tensors.map((t) => expandDims(t, axis)); return concat(expandedTensors, axis); } const stack = op({stack_}); // node_modules/@tensorflow/tfjs-core/dist/ops/step.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function step_(x, alpha = 0) { const $x = convertToTensor(x, "x", "step"); const inputs = {x: $x}; const attrs = {alpha}; return ENGINE.runKernelFunc((backend2) => backend2.step($x, alpha), inputs, null, Step, attrs); } const step = op({step_}); // node_modules/@tensorflow/tfjs-core/dist/ops/strided_slice.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function stridedSlice_(x, begin, end, strides, beginMask = 0, endMask = 0, ellipsisMask = 0, newAxisMask = 0, shrinkAxisMask = 0) { if (strides == null) { strides = new Array(begin.length); } let $x = convertToTensor(x, "x", "stridedSlice"); const forward = (backend2) => { const ellipsisAxes = maskToAxes(ellipsisMask); if (ellipsisAxes.length > 1) { throw new Error("Multiple ellipses in slice is not allowed."); } if (ellipsisMask !== 0 && newAxisMask !== 0) { throw new Error("Using both ellipsisMask and newAxisMask is not yet supported."); } if (ellipsisMask !== 0 && shrinkAxisMask !== 0) { throw new Error("Using both ellipsisMask and shrinkAxisMask is not yet supported."); } const numInterpolatedAxes = $x.rank - begin.length; const expandAxes = maskToAxes(newAxisMask); const newShape = $x.shape.slice(); expandAxes.forEach((axis) => { begin[axis] = 0; end[axis] = 1; newShape.splice(axis, 0, 1); }); $x = reshape($x, newShape); if (ellipsisAxes.length && numInterpolatedAxes > 0) { const fullIndex = ellipsisAxes[0]; const numElidedAxes = numInterpolatedAxes + 1; begin = startIndicesWithElidedDims(beginMask, fullIndex, numElidedAxes, begin, $x.shape); end = stopIndicesWithElidedDims(endMask, fullIndex, numElidedAxes, end, $x.shape); strides = stridesWithElidedDims(strides, fullIndex, numElidedAxes, $x.shape); } else { for (let axis = 0; axis < $x.rank; axis++) { begin[axis] = startForAxis(beginMask, begin, strides, $x.shape, axis, ellipsisMask); end[axis] = stopForAxis(endMask, end, strides, $x.shape, axis, ellipsisMask); strides[axis] = stridesForAxis(strides, axis, ellipsisMask); } } const shrinkAxes = maskToAxes(shrinkAxisMask); shrinkAxes.forEach((axis) => { end[axis] = begin[axis] + 1; strides[axis] = 1; }); const size = computeOutShape2(begin, end, strides); const outShape = size.filter((_, axis) => shrinkAxes.indexOf(axis) === -1); const nonStrided = strides.every((v) => v === 1); if (nonStrided) { return reshape(slice($x, begin, size), outShape); } const res = backend2.stridedSlice($x, begin, end, strides); return reshape(res, outShape); }; const inputs = {x: $x}; const attrs = { begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask }; return ENGINE.runKernelFunc(forward, inputs, null, StridedSlice, attrs); } const stridedSlice = op({stridedSlice_}); // node_modules/@tensorflow/tfjs-core/dist/ops/tan.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tan_(x) { const $x = convertToTensor(x, "x", "tan"); const inputs = {x: $x}; return ENGINE.runKernelFunc((backend2, save) => { const res = backend2.tan($x); save([$x]); return res; }, inputs, null, Tan); } const tan = op({tan_}); // node_modules/@tensorflow/tfjs-core/dist/ops/tensor2d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tensor2d(values, shape, dtype) { assertNonNull(values); if (shape != null && shape.length !== 2) { throw new Error("tensor2d() requires shape to have two numbers"); } const inferredShape = inferShape(values, dtype); if (inferredShape.length !== 2 && inferredShape.length !== 1) { throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray"); } if (inferredShape.length === 1 && shape == null) { throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray"); } return makeTensor(values, shape, inferredShape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/tensor3d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tensor3d(values, shape, dtype) { assertNonNull(values); if (shape != null && shape.length !== 3) { throw new Error("tensor3d() requires shape to have three numbers"); } const inferredShape = inferShape(values, dtype); if (inferredShape.length !== 3 && inferredShape.length !== 1) { throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray"); } if (inferredShape.length === 1 && shape == null) { throw new Error("tensor3d() requires shape to be provided when `values` are a flat array"); } return makeTensor(values, shape, inferredShape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/tensor4d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tensor4d(values, shape, dtype) { assertNonNull(values); if (shape != null && shape.length !== 4) { throw new Error("tensor4d() requires shape to have four numbers"); } const inferredShape = inferShape(values, dtype); if (inferredShape.length !== 4 && inferredShape.length !== 1) { throw new Error("tensor4d() requires values to be number[][][][] or flat/TypedArray"); } if (inferredShape.length === 1 && shape == null) { throw new Error("tensor4d() requires shape to be provided when `values` are a flat array"); } return makeTensor(values, shape, inferredShape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/tensor5d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tensor5d(values, shape, dtype) { assertNonNull(values); if (shape != null && shape.length !== 5) { throw new Error("tensor5d() requires shape to have five numbers"); } const inferredShape = inferShape(values, dtype); if (inferredShape.length !== 5 && inferredShape.length !== 1) { throw new Error("tensor5d() requires values to be number[][][][][] or flat/TypedArray"); } if (inferredShape.length === 1 && shape == null) { throw new Error("tensor5d() requires shape to be provided when `values` are a flat array"); } return makeTensor(values, shape, inferredShape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/tensor6d.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function tensor6d(values, shape, dtype) { assertNonNull(values); if (shape != null && shape.length !== 6) { throw new Error("tensor6d() requires shape to have six numbers"); } const inferredShape = inferShape(values, dtype); if (inferredShape.length !== 6 && inferredShape.length !== 1) { throw new Error("tensor6d() requires values to be number[][][][][][] or flat/TypedArray"); } if (inferredShape.length === 1 && shape == null) { throw new Error("tensor6d() requires shape to be provided when `values` are a flat array"); } shape = shape || inferredShape; return makeTensor(values, shape, inferredShape, dtype); } // node_modules/@tensorflow/tfjs-core/dist/ops/topk.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function topk_(x, k = 1, sorted = true) { const $x = convertToTensor(x, "x", "topk"); if ($x.rank === 0) { throw new Error("topk() expects the input to be of rank 1 or higher"); } const lastDim = $x.shape[$x.shape.length - 1]; if (k > lastDim) { throw new Error(`'k' passed to topk() must be <= the last dimension (${lastDim}) but got ${k}`); } const inputs = {x: $x}; const attrs = {k, sorted}; const [values, indices] = ENGINE.runKernelFunc((b) => b.topk($x, k, sorted), inputs, null, TopK, attrs); return {values, indices}; } const topk = op({topk_}); // node_modules/@tensorflow/tfjs-core/dist/ops/truncated_normal.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function truncatedNormal_(shape, mean5 = 0, stdDev = 1, dtype, seed) { if (dtype != null && dtype === "bool") { throw new Error(`Unsupported data type $ { dtype }`); } const randGauss = new MPRandGauss(mean5, stdDev, dtype, true, seed); const res = buffer(shape, dtype); for (let i = 0; i < res.values.length; i++) { res.values[i] = randGauss.nextValue(); } return res.toTensor(); } const truncatedNormal = op({truncatedNormal_}); // node_modules/@tensorflow/tfjs-core/dist/ops/unsorted_segment_sum.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function unsortedSegmentSum_(x, segmentIds, numSegments) { const $x = convertToTensor(x, "x", "unsortedSegmentSum"); const $segmentIds = convertToTensor(segmentIds, "segmentIds", "unsortedSegmentSum", "int32"); assert(isInt(numSegments), () => "numSegments must be of dtype int"); const inputs = {x: $x, segmentIds: $segmentIds}; const attrs = {numSegments}; const forward = (backend2, save) => { const res = backend2.unsortedSegmentSum($x, $segmentIds, numSegments); save([$segmentIds]); return res; }; return ENGINE.runKernelFunc(forward, inputs, null, UnsortedSegmentSum, attrs); } const unsortedSegmentSum = op({unsortedSegmentSum_}); // node_modules/@tensorflow/tfjs-core/dist/ops/unstack.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function unstack_(x, axis = 0) { const $x = convertToTensor(x, "x", "unstack"); assert(axis >= -$x.shape.length && axis < $x.shape.length, () => `Axis = ${axis} is not in [-${$x.shape.length}, ${$x.shape.length})`); if (axis < 0) { axis += $x.shape.length; } const inputs = {value: $x}; const attrs = {axis}; const forward = (backend2) => backend2.unstack($x, axis); return ENGINE.runKernelFunc(forward, inputs, null, Unpack, attrs); } const unstack = op({unstack_}); // node_modules/@tensorflow/tfjs-core/dist/ops/variable.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function variable(initialValue, trainable = true, name, dtype) { return ENGINE.makeVariable(initialValue, trainable, name, dtype); } // node_modules/@tensorflow/tfjs-core/dist/backends/where_impl.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function whereImpl(condShape, condVals) { const indices = []; for (let i = 0; i < condVals.length; i++) { if (condVals[i]) { indices.push(i); } } const inBuffer = buffer(condShape, "int32"); const out = buffer([indices.length, condShape.length], "int32"); for (let i = 0; i < indices.length; i++) { const loc = inBuffer.indexToLoc(indices[i]); const offset = i * condShape.length; out.values.set(loc, offset); } return out.toTensor(); } // node_modules/@tensorflow/tfjs-core/dist/ops/where_async.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ async function whereAsync_(condition) { const $condition = convertToTensor(condition, "condition", "whereAsync", "bool"); const vals = await $condition.data(); const res = whereImpl($condition.shape, vals); if (condition !== $condition) { $condition.dispose(); } return res; } const whereAsync = whereAsync_; // node_modules/@tensorflow/tfjs-core/dist/ops/boolean_mask.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ async function booleanMaskAsync_(tensor17, mask, axis) { const $tensor = convertToTensor(tensor17, "tensor", "boolMask"); const $mask = convertToTensor(mask, "mask", "boolMask", "bool"); const axisFrom = axis == null ? 0 : axis; const maskDim = $mask.rank; const tensorShape = $tensor.shape; assert(maskDim > 0, () => "mask cannot be scalar"); assertShapesMatch(tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape, `mask's shape must match the first K dimensions of tensor's shape,`); let leadingSize = 1; for (let i = axisFrom; i < axisFrom + maskDim; i++) { leadingSize *= tensorShape[i]; } const targetTensorShape = tensorShape.slice(0, axisFrom).concat([leadingSize], tensorShape.slice(axisFrom + maskDim)); const reshapedTensor = reshape($tensor, targetTensorShape); const reshapedMask = reshape($mask, [-1]); const positivePositions = await whereAsync(reshapedMask); const indices = squeeze(positivePositions, [1]); const res = gather(reshapedTensor, indices, axisFrom); if (tensor17 !== $tensor) { $tensor.dispose(); } if (mask !== $mask) { $mask.dispose(); } indices.dispose(); reshapedTensor.dispose(); reshapedMask.dispose(); positivePositions.dispose(); return res; } const booleanMaskAsync = booleanMaskAsync_; // node_modules/@tensorflow/tfjs-core/dist/ops/compare.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function notEqualStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "notEqualStrict"); const $b = convertToTensor(b, "b", "notEqualStrict"); assertShapesMatch($a.shape, $b.shape, "Error in notEqualStrict: "); return notEqual($a, $b); } function lessStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "lessStrict"); const $b = convertToTensor(b, "b", "lessStrict"); assertShapesMatch($a.shape, $b.shape, "Error in lessStrict: "); return less($a, $b); } function equalStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "equalStrict"); const $b = convertToTensor(b, "b", "equalStrict"); assertShapesMatch($a.shape, $b.shape, "Error in equalStrict: "); return equal($a, $b); } function lessEqualStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "lessEqualStrict"); const $b = convertToTensor(b, "b", "lessEqualStrict"); assertShapesMatch($a.shape, $b.shape, "Error in lessEqualStrict: "); return lessEqual($a, $b); } function greaterStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "greaterStrict"); const $b = convertToTensor(b, "b", "greaterStrict"); assertShapesMatch($a.shape, $b.shape, "Error in greaterStrict: "); return greater($a, $b); } function greaterEqualStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "greaterEqualStrict"); const $b = convertToTensor(b, "b", "greaterEqualStrict"); assertShapesMatch($a.shape, $b.shape, "Error in greaterEqualStrict: "); return greaterEqual($a, $b); } const equalStrict = op({equalStrict_}); const greaterEqualStrict = op({greaterEqualStrict_}); const greaterStrict = op({greaterStrict_}); const lessEqualStrict = op({lessEqualStrict_}); const lessStrict = op({lessStrict_}); const notEqualStrict = op({notEqualStrict_}); // node_modules/@tensorflow/tfjs-core/dist/ops/binary_ops.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function addStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "addStrict"); const $b = convertToTensor(b, "b", "addStrict"); assertShapesMatch($a.shape, $b.shape, "Error in addStrict: "); return add2($a, $b); } function subStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "subStrict"); const $b = convertToTensor(b, "b", "subStrict"); assertShapesMatch($a.shape, $b.shape, "Error in subStrict: "); return sub($a, $b); } function powStrict_(base, exp11) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); assertShapesMatch(base.shape, exp11.shape, "Error in powStrict: "); return pow(base, exp11); } function mulStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "mul"); const $b = convertToTensor(b, "b", "mul"); assertShapesMatch($a.shape, $b.shape, "Error in multiplyStrict: "); return mul($a, $b); } function divStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "div"); const $b = convertToTensor(b, "b", "div"); assertShapesMatch($a.shape, $b.shape, "Error in divideStrict: "); return div($a, $b); } function modStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "modStrict"); const $b = convertToTensor(b, "b", "modStrict"); assertShapesMatch($a.shape, $b.shape, "Error in modStrict: "); return mod($a, $b); } function minimumStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "minimumStrict"); const $b = convertToTensor(b, "b", "minimumStrict"); assertShapesMatch($a.shape, $b.shape, "Error in minimumStrict: "); return minimum($a, $b); } function maximumStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "maximumStrict"); const $b = convertToTensor(b, "b", "maximumStrict"); assertShapesMatch($a.shape, $b.shape, "Error in maximumStrict: "); return maximum($a, $b); } function squaredDifferenceStrict_(a, b) { deprecationWarn("strict variants of ops have been deprecated and will be removed in future"); const $a = convertToTensor(a, "a", "squaredDifferenceStrict"); const $b = convertToTensor(b, "b", "squaredDifferenceStrict"); assertShapesMatch($a.shape, $b.shape, "Error in squaredDifferenceStrict: "); return squaredDifference($a, $b); } const addStrict = op({addStrict_}); const divStrict = op({divStrict_}); const maximumStrict = op({maximumStrict_}); const minimumStrict = op({minimumStrict_}); const modStrict = op({modStrict_}); const mulStrict = op({mulStrict_}); const powStrict = op({powStrict_}); const squaredDifferenceStrict = op({squaredDifferenceStrict_}); const subStrict = op({subStrict_}); // node_modules/@tensorflow/tfjs-core/dist/ops/norm.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function norm_(x, ord = "euclidean", axis = null, keepDims = false) { x = convertToTensor(x, "x", "norm"); const norm4 = normImpl(x, ord, axis); let keepDimsShape = norm4.shape; if (keepDims) { const axes = parseAxisParam(axis, x.shape); keepDimsShape = expandShapeToKeepDim(norm4.shape, axes); } return reshape(norm4, keepDimsShape); } function normImpl(x, p, axis = null) { if (x.rank === 0) { return abs(x); } if (x.rank !== 1 && axis === null) { return normImpl(reshape(x, [-1]), p, axis); } if (x.rank === 1 || typeof axis === "number" || Array.isArray(axis) && axis.length === 1) { if (p === 1) { return sum2(abs(x), axis); } if (p === Infinity) { return max(abs(x), axis); } if (p === -Infinity) { return min(abs(x), axis); } if (p === "euclidean" || p === 2) { return sqrt(sum2(pow(abs(x), scalar(2, "int32")), axis)); } throw new Error(`Error in norm: invalid ord value: ${p}`); } if (Array.isArray(axis) && axis.length === 2) { if (p === 1) { return max(sum2(abs(x), axis[0]), axis[1] - 1); } if (p === Infinity) { return max(sum2(abs(x), axis[1]), axis[0]); } if (p === -Infinity) { return min(sum2(abs(x), axis[1]), axis[0]); } if (p === "fro" || p === "euclidean") { return sqrt(sum2(square(x), axis)); } throw new Error(`Error in norm: invalid ord value: ${p}`); } throw new Error(`Error in norm: invalid axis: ${axis}`); } const norm = op({norm_}); // node_modules/@tensorflow/tfjs-core/dist/ops/moving_average.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function movingAverage_(v, x, decay, step7, zeroDebias = true) { const $v = convertToTensor(v, "v", "movingAverage"); const $x = convertToTensor(x, "x", "movingAverage"); const $decay = convertToTensor(decay, "decay", "movingAverage"); assertTypesMatch($v, $x); assert(arraysEqual($v.shape, $x.shape), () => "Shape mismatch in v and x"); const one = scalar(1); const oneMinusDecay = sub(one, $decay); let update = mul(sub($x, $v), oneMinusDecay); if (zeroDebias) { assert(step7 != null, () => "When using zeroDebias: true, step is required."); const $step = convertToTensor(step7, "step", "movingAverage"); update = div(update, sub(one, pow($decay, $step))); } return add2($v, update); } const movingAverage = op({movingAverage_}); // node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd_util.js const scatter_nd_util_exports = {}; __export(scatter_nd_util_exports, { calculateShapes: () => calculateShapes, validateInput: () => validateInput, validateUpdateShape: () => validateUpdateShape }); function validateUpdateShape(shape, indices, updates) { const sliceDim = indices.rank > 1 ? indices.shape[indices.rank - 1] : 1; const batchDim = indices.rank > 1 ? indices.rank - 1 : 1; const shapeError = `Must have updates.shape = indices.shape[:batchDim] + shape[sliceDim:], got updates.shape: ${updates.shape}, indices.shape: ${indices.shape}, shape: ${shape}, sliceDim: ${sliceDim}, and batchDim: ${batchDim}.`; if (updates.rank < batchDim) { throw new Error(shapeError + ` update.rank < ${batchDim}. `); } if (shape.length < sliceDim + (updates.rank - batchDim)) { throw new Error(shapeError + ` Output shape length < ${sliceDim + (updates.rank - batchDim)}`); } if (updates.rank !== batchDim + shape.length - sliceDim) { throw new Error(shapeError + ` update.rank != ${batchDim + shape.length - sliceDim}`); } for (let d = 0; d < batchDim; ++d) { if (updates.shape[d] !== indices.shape[d]) { throw new Error(shapeError + ` updates.shape[${d}] (${updates.shape[d]}) != indices.shape[${d}] (${indices.shape[d]}).`); } } for (let d = 0; d < updates.rank - batchDim; ++d) { if (updates.shape[d + batchDim] !== shape[d + sliceDim]) { throw new Error(shapeError + ` updates.shape[${d + batchDim}] (${updates.shape[d + batchDim]}) != shape[${d + batchDim}] (${shape[d + batchDim]})`); } } } function validateInput(updates, indices, shape) { if (indices.rank < 1) { throw new Error(`tf.scatterND() expects the indices to be rank 1 or higher, but the rank was ${indices.rank}.`); } if (updates.rank < 1) { throw new Error(`tf.scatterND() expects the updates to be rank 1 or higher, but the rank was ${updates.rank}.`); } if (indices.dtype !== "int32") { throw new Error(`The dtype of 'indices' should be int32, but got dtype: ${indices.dtype}`); } if (shape.length < 1) { throw new Error(`Output rank must be greater or equal to 1, but got shape: ${shape}`); } if (shape.length === 0) { if (indices.size === 0) { throw new Error(`Indices specified for empty output. indices shape: ${indices.shape}`); } if (updates.size === 0) { throw new Error(`Updates specified for empty output. updates shape: ${updates.shape}`); } } validateUpdateShape(shape, indices, updates); } function calculateShapes(updates, indices, shape) { const indicesRank = indices.shape.length; const sliceRank = indicesRank > 1 ? indices.shape[indicesRank - 1] : 1; const totalNd = shape.length; let sliceSize = 1; for (let i = sliceRank; i < totalNd; ++i) { sliceSize *= shape[i]; } const safeSliceDim = sliceRank < 1 ? 1 : sliceRank; const numUpdates = sizeFromShape(indices.shape) / safeSliceDim; const strides = [...computeStrides(shape.slice(0, sliceRank)), 1]; const outputSize = sizeFromShape(shape); return {sliceRank, numUpdates, sliceSize, strides, outputSize}; } // node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function scatterND_(indices, updates, shape) { const $indices = convertToTensor(indices, "indices", "scatterND", "int32"); const $updates = convertToTensor(updates, "updates", "scatterND"); validateInput($updates, $indices, shape); const forward = (backend2) => { return backend2.scatterND($indices, $updates, shape); }; const inputs = {indices: $indices, updates: $updates}; const attrs = {shape}; return ENGINE.runKernelFunc(forward, inputs, null, ScatterNd, attrs); } const scatterND = op({scatterND_}); // node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense_util.js function validateInput2(sparseIndices, sparseValues, outputShape, defaultValues) { if (sparseIndices.dtype !== "int32") { throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${sparseIndices.dtype}.`); } if (sparseIndices.rank > 2) { throw new Error(`sparseIndices should be a scalar, vector, or matrix, but got shape ${sparseIndices.shape}.`); } const numElems = sparseIndices.rank > 0 ? sparseIndices.shape[0] : 1; const numDims = sparseIndices.rank > 1 ? sparseIndices.shape[1] : 1; if (outputShape.length !== numDims) { throw new Error(`outputShape has incorrect number of elements:, ${outputShape.length}, should be: ${numDims}.`); } const numValues = sparseValues.size; if (!(sparseValues.rank === 0 || sparseValues.rank === 1 && numValues === numElems)) { throw new Error(`sparseValues has incorrect shape ${sparseValues.shape}, should be [] or [${numElems}]`); } if (sparseValues.dtype !== defaultValues.dtype) { throw new Error("sparseValues.dtype must match defaultValues.dtype"); } } // node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function sparseToDense_(sparseIndices, sparseValues, outputShape, defaultValue = 0) { const $sparseIndices = convertToTensor(sparseIndices, "sparseIndices", "sparseToDense", "int32"); const $sparseValues = convertToTensor(sparseValues, "sparseValues", "sparseToDense"); const $defaultValue = convertToTensor(defaultValue, "defaultValue", "sparseToDense", $sparseValues.dtype); validateInput2($sparseIndices, $sparseValues, outputShape, $defaultValue); const inputs = { sparseIndices: $sparseIndices, sparseValues: $sparseValues, defaultValue: $defaultValue }; const attrs = {outputShape}; return ENGINE.runKernelFunc((backend2) => backend2.sparseToDense($sparseIndices, $sparseValues, outputShape, $defaultValue), inputs, null, SparseToDense, attrs); } const sparseToDense = op({sparseToDense_}); // node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function gatherND_(x, indices) { const $indices = convertToTensor(indices, "indices", "gatherND", "int32"); const $x = convertToTensor(x, "x", "gatherND"); const forward = (backend2) => { return backend2.gatherND($x, $indices); }; const inputs = {params: $x, indices: $indices}; return ENGINE.runKernelFunc(forward, inputs, null, GatherNd); } const gatherND = op({gatherND_}); // node_modules/@tensorflow/tfjs-core/dist/ops/dropout_util.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function getNoiseShape(x, noiseShape) { if (noiseShape == null) { return x.shape.slice(); } if (arraysEqual(x.shape, noiseShape)) { return noiseShape; } if (x.shape.length === noiseShape.length) { const newDimension = []; for (let i = 0; i < x.shape.length; i++) { if (noiseShape[i] == null && x.shape[i] != null) { newDimension.push(x.shape[i]); } else { newDimension.push(noiseShape[i]); } } return newDimension; } return noiseShape; } // node_modules/@tensorflow/tfjs-core/dist/ops/dropout.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function dropout_(x, rate, noiseShape, seed) { const $x = convertToTensor(x, "x", "dropout"); assert($x.dtype === "float32", () => `x has to be a floating point tensor since it's going to be scaled, but got a ${$x.dtype} tensor instead.`); assert(rate >= 0 && rate < 1, () => `rate must be a float in the range [0, 1), but got ${rate}.`); if (rate === 0) { return x instanceof Tensor ? $x.clone() : $x; } const $noiseShape = getNoiseShape($x, noiseShape); const keepProb = 1 - rate; const multiplier = div(floor(add2(randomUniform($noiseShape, 0, 1, "float32", seed), keepProb)), keepProb); return mul($x, multiplier); } const dropout = op({dropout_}); // node_modules/@tensorflow/tfjs-core/dist/ops/signal_ops_util.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function enclosingPowerOfTwo(value) { return Math.floor(Math.pow(2, Math.ceil(Math.log(value) / Math.log(2)))); } function cosineWindow(windowLength, a, b) { const even = 1 - windowLength % 2; const newValues = new Float32Array(windowLength); for (let i = 0; i < windowLength; ++i) { const cosArg = 2 * Math.PI * i / (windowLength + even - 1); newValues[i] = a - b * Math.cos(cosArg); } return tensor1d(newValues, "float32"); } // node_modules/@tensorflow/tfjs-core/dist/ops/in_top_k.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ async function inTopKAsync_(predictions, targets, k = 1) { const $predictions = convertToTensor(predictions, "predictions", "inTopK"); const $targets = convertToTensor(targets, "targets", "inTopK"); assert($predictions.rank > 1, () => `inTopK() expects the predictions to be of rank 2 or higher, but got ${$predictions.rank}`); assert($predictions.rank - 1 === $targets.rank, () => `predictions rank should be 1 larger than targets rank, but got predictions rank ${$predictions.rank} and targets rank ${$targets.rank}`); assertShapesMatch($predictions.shape.slice(0, $predictions.shape.length - 1), $targets.shape, `predictions's shape should be align with the targets' shape, except the last dimension.`); const lastDim = $predictions.shape[$predictions.shape.length - 1]; assert(k > 0 && k <= lastDim, () => `'k' passed to inTopK() must be > 0 && <= the predictions last dimension (${lastDim}), but got ${k}`); const predictionsVals = await $predictions.data(); const targetsVals = await $targets.data(); const [batch, size] = [predictionsVals.length / lastDim, lastDim]; const precision = getTypedArrayFromDType("bool", batch); for (let b = 0; b < batch; b++) { const offset = b * size; const vals = predictionsVals.subarray(offset, offset + size); const valAndInd = []; for (let i = 0; i < vals.length; i++) { valAndInd.push({value: vals[i], index: i}); } valAndInd.sort((a, b2) => b2.value - a.value); precision[b] = 0; for (let i = 0; i < k; i++) { if (valAndInd[i].index === targetsVals[b]) { precision[b] = 1; break; } } } if (predictions !== $predictions) { $predictions.dispose(); } if (targets !== $targets) { $targets.dispose(); } return tensor5(precision, $targets.shape, "bool"); } const inTopKAsync = inTopKAsync_; // node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_filter.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function conv2DBackpropFilter_(x, dy, filterShape, strides, pad8, dataFormat = "NHWC", dimRoundingMode) { let x4D = x; if (x.rank === 3) { x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]); } let dy4D = dy; if (dy4D.rank === 3) { dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); } assert(x4D.rank === 4, () => `Error in conv2dDerFilter: input must be rank 4, but got shape ${x4D.shape}.`); assert(dy4D.rank === 4, () => `Error in conv2dDerFilter: dy must be rank 4, but got shape ${dy4D.shape}.`); assert(filterShape.length === 4, () => `Error in conv2dDerFilter: filterShape must be length 4, but got ${filterShape}.`); const inDepth = dataFormat === "NHWC" ? x4D.shape[3] : x4D.shape[1]; const outDepth = dataFormat === "NHWC" ? dy4D.shape[3] : dy4D.shape[1]; assert(inDepth === filterShape[2], () => `Error in conv2dDerFilter: depth of input ${inDepth}) must match input depth in filter (${filterShape[2]}.`); assert(outDepth === filterShape[3], () => `Error in conv2dDerFilter: depth of dy (${outDepth}) must match output depth for filter (${filterShape[3]}).`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in conv2dDerFilter: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } const forward = (backend2) => { const dilations = 1; const $dataFormat = convertConv2DDataFormat(dataFormat); const convInfo = computeConv2DInfo(x4D.shape, filterShape, strides, dilations, pad8, dimRoundingMode, false, $dataFormat); return backend2.conv2dDerFilter(x4D, dy4D, convInfo); }; const inputs = {x: x4D, dy: dy4D}; const attrs = {strides, pad: pad8, dataFormat, dimRoundingMode}; return ENGINE.runKernelFunc(forward, inputs, null, Conv2DBackpropFilter, attrs); } const conv2DBackpropFilter = op({conv2DBackpropFilter_}); // node_modules/@tensorflow/tfjs-core/dist/ops/fused_util.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function getFusedDyActivation(dy, y, activation) { if (activation == null || activation === "linear") { return dy; } if (activation === "relu") { return mul(dy, step(y)); } throw new Error(`Cannot compute gradient for fused activation ${activation}.`); } function getFusedBiasGradient(bias, dyActivation) { let res = dyActivation; const reduceAxes = getReductionAxes(bias.shape, dyActivation.shape); if (reduceAxes.length > 0) { res = sum2(res, reduceAxes); } return reshape(res, bias.shape); } function applyActivation(x, activation, preluActivationWeights) { if (activation === "linear") { return x; } else if (activation === "relu") { return relu(x); } else if (activation === "elu") { return elu(x); } else if (activation === "relu6") { return relu6(x); } else if (activation === "prelu") { return prelu(x, preluActivationWeights); } throw new Error(`Unknown fused activation ${activation}.`); } const shouldFuse = (gradientDepth, activation) => { const gradientMode = gradientDepth > 0; return !gradientMode || activation === "linear"; }; // node_modules/@tensorflow/tfjs-core/dist/ops/fused_conv2d.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function fusedConv2d_({x, filter, strides, pad: pad8, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode, bias, activation = "linear", preluActivationWeights}) { activation = activation || "linear"; if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) { let result = conv2d(x, filter, strides, pad8, dataFormat, dilations, dimRoundingMode); if (bias != null) { result = add2(result, bias); } return applyActivation(result, activation, preluActivationWeights); } const $x = convertToTensor(x, "x", "conv2d"); const $filter = convertToTensor(filter, "filter", "conv2d"); let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } assert(x4D.rank === 4, () => `Error in fused conv2d: input must be rank 4, but got rank ${x4D.rank}.`); assert($filter.rank === 4, () => `Error in fused conv2d: filter must be rank 4, but got rank ${$filter.rank}.`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in fused conv2d: pad must be an integer when using, dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } assert(x4D.shape[3] === $filter.shape[2], () => `Error in conv2d: depth of input (${x4D.shape[3]}) must match input depth for filter ${$filter.shape[2]}.`); assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in conv2D: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); assert(dataFormat === "NHWC", () => `Error in conv2d: got dataFormat of ${dataFormat} but only NHWC is currently supported.`); const convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad8, dimRoundingMode); let $bias; if (bias != null) { $bias = convertToTensor(bias, "bias", "fused conv2d"); [$bias] = makeTypesMatch($bias, $x); assertAndGetBroadcastShape(convInfo.outShape, $bias.shape); } let $preluActivationWeights; if (preluActivationWeights != null) { $preluActivationWeights = convertToTensor(preluActivationWeights, "prelu weights", "fused conv2d"); } const grad2 = (dy, saved) => { const [$filter2, x4D2, y, $bias2] = saved; const dyActivation = getFusedDyActivation(dy, y, activation); assert(tupleValuesAreOne(dilations), () => `Error in gradient of fused conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${dilations}'`); const xDer = conv2DBackpropInput(x4D2.shape, dyActivation, $filter2, strides, pad8); const filterDer = conv2DBackpropFilter(x4D2, dyActivation, $filter2.shape, strides, pad8); const der = [xDer, filterDer]; if ($bias2 != null) { const biasDer = getFusedBiasGradient($bias2, dyActivation); der.push(biasDer); } return der; }; const forward = (backend2) => { const res = backend2.fusedConv2d({ input: x4D, filter: $filter, convInfo, bias: $bias, activation, preluActivationWeights: $preluActivationWeights }); return res; }; const inputs = { x: x4D, filter: $filter, bias: $bias, preluActivationWeights: $preluActivationWeights }; const attrs = {strides, pad: pad8, dataFormat, dilations, dimRoundingMode, activation}; if (bias == null) { const customOp = customGrad((x4D2, filter2, save) => { let res = ENGINE.runKernelFunc(forward, inputs, null, FusedConv2D, attrs); save([filter2, x4D2, res]); if (reshapedTo4D) { res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return {value: res, gradFunc: grad2}; }); return customOp(x4D, $filter); } else { const customOpWithBias = customGrad((x4D2, filter2, bias2, save) => { let res = ENGINE.runKernelFunc(forward, inputs, null, FusedConv2D, attrs); save([filter2, x4D2, res, bias2]); if (reshapedTo4D) { res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return {value: res, gradFunc: grad2}; }); return customOpWithBias(x4D, $filter, $bias); } } const conv2d5 = op({fusedConv2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_filter.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function depthwiseConv2dNativeBackpropFilter_(x, dy, filterShape, convInfo) { let x4D = x; if (x.rank === 3) { x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]); } let dy4D = dy; if (dy4D.rank === 3) { dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); } const forward = (backend2) => backend2.depthwiseConv2DDerFilter(x4D, dy4D, convInfo); const inputs = {x: x4D, dy: dy4D}; return ENGINE.runKernelFunc(forward, inputs, null, DepthwiseConv2dNativeBackpropFilter); } const depthwiseConv2dNativeBackpropFilter = op({depthwiseConv2dNativeBackpropFilter_}); // node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_input.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function depthwiseConv2dNativeBackpropInput_(xShape, dy, filter, convInfo) { let dy4D = dy; let reshapedTo4D = false; if (dy.rank === 3) { reshapedTo4D = true; dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); } const forward = (backend2) => backend2.depthwiseConv2DDerInput(dy4D, filter, convInfo); const inputs = {dy: dy4D}; const res = ENGINE.runKernelFunc(forward, inputs, null, DepthwiseConv2dNativeBackpropInput); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const depthwiseConv2dNativeBackpropInput = op({depthwiseConv2dNativeBackpropInput_}); // node_modules/@tensorflow/tfjs-core/dist/ops/fused_depthwise_conv2d.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function fusedDepthwiseConv2d_({x, filter, strides, pad: pad8, dataFormat = "NHWC", dilations = [1, 1], dimRoundingMode, bias, activation = "linear", preluActivationWeights}) { if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) { let result = depthwiseConv2d(x, filter, strides, pad8, dataFormat, dilations, dimRoundingMode); if (bias != null) { result = add2(result, bias); } return applyActivation(result, activation, preluActivationWeights); } const $x = convertToTensor(x, "x", "depthwiseConv2d"); const $filter = convertToTensor(filter, "filter", "depthwiseConv2d"); let x4D = $x; let reshapedTo4D = false; if ($x.rank === 3) { reshapedTo4D = true; x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); } assert(x4D.rank === 4, () => `Error in fused depthwiseConv2d: input must be rank 4, but got rank ${x4D.rank}.`); assert($filter.rank === 4, () => `Error in fused depthwiseConv2d: filter must be rank 4, but got rank ${$filter.rank}.`); assert(x4D.shape[3] === $filter.shape[2], () => `Error in fused depthwiseConv2d: number of input channels (${x4D.shape[3]}) must match the inChannels dimension in filter ${$filter.shape[2]}.`); if (dilations == null) { dilations = [1, 1]; } assert(eitherStridesOrDilationsAreOne(strides, dilations), () => `Error in fused depthwiseConv2d: Either strides or dilations must be 1. Got strides ${strides} and dilations '${dilations}'`); if (dimRoundingMode != null) { assert(isInt(pad8), () => `Error in fused depthwiseConv2d: pad must be an integer when using dimRoundingMode ${dimRoundingMode} but got pad ${pad8}.`); } const convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad8, dimRoundingMode, true); let $bias; if (bias != null) { $bias = convertToTensor(bias, "bias", "fused conv2d"); [$bias] = makeTypesMatch($bias, $x); assertAndGetBroadcastShape(convInfo.outShape, $bias.shape); } let $preluActivationWeights; if (preluActivationWeights != null) { $preluActivationWeights = convertToTensor(preluActivationWeights, "prelu weights", "fused depthwiseConv2d"); } const grad2 = (dy, saved) => { assert(tupleValuesAreOne(dilations), () => `Error in gradient of fused depthwiseConv2d: dilation rates greater than 1 are not yet supported. Got dilations '${dilations}'`); const [$filter2, x4D2, y, bias2] = saved; const dyActivation = getFusedDyActivation(dy, y, activation); const xDer = depthwiseConv2dNativeBackpropInput(x4D2.shape, dyActivation, $filter2, convInfo); const filterDer = depthwiseConv2dNativeBackpropFilter(x4D2, dyActivation, $filter2.shape, convInfo); if (bias2 != null) { const biasDer = getFusedBiasGradient($bias, dyActivation); return [xDer, filterDer, biasDer]; } return [xDer, filterDer]; }; const forward = (backend2) => { const res = backend2.fusedDepthwiseConv2D({ input: x4D, filter: $filter, convInfo, bias: $bias, activation, preluActivationWeights: $preluActivationWeights }); return res; }; const inputs = { x: x4D, filter: $filter, bias: $bias, preluActivationWeights: $preluActivationWeights }; const attrs = {strides, pad: pad8, dataFormat, dilations, dimRoundingMode, activation}; if (bias == null) { const customOp = customGrad((x4D2, filter2, save) => { let res = ENGINE.runKernelFunc(forward, inputs, null, FusedDepthwiseConv2D, attrs); save([filter2, x4D2, res]); if (reshapedTo4D) { res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return {value: res, gradFunc: grad2}; }); return customOp(x4D, $filter); } else { const customOpWithBias = customGrad((x4D2, filter2, bias2, save) => { let res = ENGINE.runKernelFunc(forward, inputs, null, FusedDepthwiseConv2D, attrs); save([filter2, x4D2, res, bias2]); if (reshapedTo4D) { res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return {value: res, gradFunc: grad2}; }); return customOpWithBias(x4D, $filter, $bias); } } const depthwiseConv2d2 = op({fusedDepthwiseConv2d_}); // node_modules/@tensorflow/tfjs-core/dist/ops/fused_mat_mul.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function fusedMatMul_({a, b, transposeA = false, transposeB = false, bias, activation = "linear", preluActivationWeights}) { if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) { let result = matMul(a, b, transposeA, transposeB); if (bias != null) { result = add2(result, bias); } return applyActivation(result, activation, preluActivationWeights); } let $a = convertToTensor(a, "a", "fused matMul"); let $b = convertToTensor(b, "b", "fused matMul"); [$a, $b] = makeTypesMatch($a, $b); const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1]; const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2]; const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2]; const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1]; const outerDimsA = $a.shape.slice(0, -2); const outerDimsB = $b.shape.slice(0, -2); const batchDimA = sizeFromShape(outerDimsA); const batchDimB = sizeFromShape(outerDimsB); assert($a.rank >= 2 && $b.rank >= 2 && $a.rank === $b.rank, () => `Error in fused matMul: inputs must have the same rank of at least 2, got ranks ${$a.rank} and ${$b.rank}.`); assert(arraysEqual(outerDimsA, outerDimsB), () => `Error in fused matMul: outer dimensions (${outerDimsA}) and (${outerDimsB}) of Tensors with shapes ${$a.shape} and ${$b.shape} must match.`); assert(innerShapeA === innerShapeB, () => `Error in fused matMul: inner shapes (${innerShapeA}) and (${innerShapeB}) of Tensors with shapes ${$a.shape} and ${$b.shape} and transposeA=${transposeA} and transposeB=${transposeB} must match.`); const outShape = $a.shape.slice(0, -2).concat([outerShapeA, outerShapeB]); const a3D = transposeA ? reshape($a, [batchDimA, innerShapeA, outerShapeA]) : reshape($a, [batchDimA, outerShapeA, innerShapeA]); const b3D = transposeB ? reshape($b, [batchDimB, outerShapeB, innerShapeB]) : reshape($b, [batchDimB, innerShapeB, outerShapeB]); let $bias; if (bias != null) { $bias = convertToTensor(bias, "bias", "fused matMul"); [$bias] = makeTypesMatch($bias, $a); assertAndGetBroadcastShape(outShape, $bias.shape); } let $preluActivationWeights; if (preluActivationWeights != null) { $preluActivationWeights = convertToTensor(preluActivationWeights, "prelu weights", "fused matMul"); } const grad2 = (dy, saved) => { const [a3D2, b3D2, y, $bias2] = saved; const dyActivation = getFusedDyActivation(reshape(dy, y.shape), y, activation); let aDer; let bDer; if (!transposeA && !transposeB) { aDer = matMul(dyActivation, b3D2, false, true); bDer = matMul(a3D2, dyActivation, true, false); } else if (!transposeA && transposeB) { aDer = matMul(dyActivation, b3D2, false, false); bDer = matMul(dyActivation, a3D2, true, false); } else if (transposeA && !transposeB) { aDer = matMul(b3D2, dyActivation, false, true); bDer = matMul(a3D2, dyActivation, false, false); } else { aDer = matMul(b3D2, dyActivation, true, true); bDer = matMul(dyActivation, a3D2, true, true); } if (bias != null) { const biasDer = getFusedBiasGradient($bias2, dyActivation); return [aDer, bDer, biasDer]; } else { return [aDer, bDer]; } }; const forward = (backend2) => { const y = backend2.fusedBatchMatMul({ a: a3D, b: b3D, transposeA, transposeB, bias: $bias, activation, preluActivationWeights: $preluActivationWeights }); return y; }; const inputs = { a: a3D, b: b3D, bias: $bias, preluActivationWeights: $preluActivationWeights }; const attrs = {transposeA, transposeB, activation}; if (bias == null) { const customOp = customGrad((a3D2, b3D2, save) => { const res = ENGINE.runKernelFunc(forward, inputs, null, _FusedMatMul, attrs); save([a3D2, b3D2, res]); return {value: reshape(res, outShape), gradFunc: grad2}; }); return customOp(a3D, b3D); } else { const customOpWithBias = customGrad((a3D2, b3D2, $bias2, save) => { const res = ENGINE.runKernelFunc(forward, inputs, null, _FusedMatMul, attrs); save([a3D2, b3D2, res, $bias2]); return {value: reshape(res, outShape), gradFunc: grad2}; }); return customOpWithBias(a3D, b3D, $bias); } } const matMul2 = op({fusedMatMul_}); // node_modules/@tensorflow/tfjs-core/dist/ops/fused_ops.js const fused_ops_exports = {}; __export(fused_ops_exports, { conv2d: () => conv2d5, depthwiseConv2d: () => depthwiseConv2d2, matMul: () => matMul2 }); /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ // node_modules/@tensorflow/tfjs-core/dist/ops/hamming_window.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function hammingWindow_(windowLength) { return cosineWindow(windowLength, 0.54, 0.46); } const hammingWindow = op({hammingWindow_}); // node_modules/@tensorflow/tfjs-core/dist/ops/hann_window.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function hannWindow_(windowLength) { return cosineWindow(windowLength, 0.5, 0.5); } const hannWindow = op({hannWindow_}); // node_modules/@tensorflow/tfjs-core/dist/ops/frame.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function frame_(signal2, frameLength, frameStep, padEnd = false, padValue = 0) { let start = 0; const output = []; while (start + frameLength <= signal2.size) { output.push(slice(signal2, start, frameLength)); start += frameStep; } if (padEnd) { while (start < signal2.size) { const padLen = start + frameLength - signal2.size; const pad8 = concat([ slice(signal2, start, frameLength - padLen), fill([padLen], padValue) ]); output.push(pad8); start += frameStep; } } if (output.length === 0) { return tensor2d([], [0, frameLength]); } return reshape(concat(output), [output.length, frameLength]); } const frame = op({frame_}); // node_modules/@tensorflow/tfjs-core/dist/ops/stft.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function stft_(signal2, frameLength, frameStep, fftLength, windowFn = hannWindow) { if (fftLength == null) { fftLength = enclosingPowerOfTwo(frameLength); } const framedSignal = frame(signal2, frameLength, frameStep); const windowedSignal = mul(framedSignal, windowFn(frameLength)); const output = []; for (let i = 0; i < framedSignal.shape[0]; i++) { output.push(rfft(slice(windowedSignal, [i, 0], [1, frameLength]), fftLength)); } return concat(output); } const stft = op({stft_}); // node_modules/@tensorflow/tfjs-core/dist/ops/crop_and_resize.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function cropAndResize_(image2, boxes, boxInd, cropSize, method, extrapolationValue) { const $image = convertToTensor(image2, "image", "cropAndResize"); const $boxes = convertToTensor(boxes, "boxes", "cropAndResize", "float32"); const $boxInd = convertToTensor(boxInd, "boxInd", "cropAndResize", "int32"); method = method || "bilinear"; extrapolationValue = extrapolationValue || 0; const numBoxes = $boxes.shape[0]; assert($image.rank === 4, () => `Error in cropAndResize: image must be rank 4,but got rank ${$image.rank}.`); assert($boxes.rank === 2 && $boxes.shape[1] === 4, () => `Error in cropAndResize: boxes must be have size [${numBoxes},4] but had shape ${$boxes.shape}.`); assert($boxInd.rank === 1 && $boxInd.shape[0] === numBoxes, () => `Error in cropAndResize: boxInd must be have size [${numBoxes}] but had shape ${$boxes.shape}.`); assert(cropSize.length === 2, () => `Error in cropAndResize: cropSize must be of length 2, but got length ${cropSize.length}.`); assert(cropSize[0] >= 1 && cropSize[1] >= 1, () => `cropSize must be atleast [1,1], but was ${cropSize}`); assert(method === "bilinear" || method === "nearest", () => `method must be bilinear or nearest, but was ${method}`); const forward = (backend2) => backend2.cropAndResize($image, $boxes, $boxInd, cropSize, method, extrapolationValue); const inputs = {image: $image, boxes: $boxes, boxInd: $boxInd}; const attrs = {method, extrapolationValue, cropSize}; const res = ENGINE.runKernelFunc(forward, inputs, null, CropAndResize, attrs); return res; } const cropAndResize = op({cropAndResize_}); // node_modules/@tensorflow/tfjs-core/dist/ops/flip_left_right.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function flipLeftRight_(image2) { const $image = convertToTensor(image2, "image", "flipLeftRight", "float32"); assert($image.rank === 4, () => `Error in flipLeftRight: image must be rank 4,but got rank ${$image.rank}.`); const inputs = {image: $image}; const res = ENGINE.runKernel(FlipLeftRight, inputs, {}); return res; } const flipLeftRight = op({flipLeftRight_}); // node_modules/@tensorflow/tfjs-core/dist/ops/rotate_with_offset.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function rotateWithOffset_(image2, radians, fillValue = 0, center = 0.5) { const $image = convertToTensor(image2, "image", "rotateWithOffset", "float32"); assert($image.rank === 4, () => `Error in rotateWithOffset: image must be rank 4,but got rank ${$image.rank}.`); const inputs = {image: $image}; const attrs = {radians, fillValue, center}; const res = ENGINE.runKernel(RotateWithOffset, inputs, attrs); return res; } const rotateWithOffset = op({rotateWithOffset_}); // node_modules/@tensorflow/tfjs-core/dist/ops/nonmax_util.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function nonMaxSuppSanityCheck(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) { if (iouThreshold == null) { iouThreshold = 0.5; } if (scoreThreshold == null) { scoreThreshold = Number.NEGATIVE_INFINITY; } if (softNmsSigma == null) { softNmsSigma = 0; } const numBoxes = boxes.shape[0]; maxOutputSize = Math.min(maxOutputSize, numBoxes); assert(0 <= iouThreshold && iouThreshold <= 1, () => `iouThreshold must be in [0, 1], but was '${iouThreshold}'`); assert(boxes.rank === 2, () => `boxes must be a 2D tensor, but was of rank '${boxes.rank}'`); assert(boxes.shape[1] === 4, () => `boxes must have 4 columns, but 2nd dimension was ${boxes.shape[1]}`); assert(scores.rank === 1, () => "scores must be a 1D tensor"); assert(scores.shape[0] === numBoxes, () => `scores has incompatible shape with boxes. Expected ${numBoxes}, but was ${scores.shape[0]}`); assert(0 <= softNmsSigma && softNmsSigma <= 1, () => `softNmsSigma must be in [0, 1], but was '${softNmsSigma}'`); return {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma}; } // node_modules/@tensorflow/tfjs-core/dist/ops/non_max_suppression.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function nonMaxSuppression_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY) { const $boxes = convertToTensor(boxes, "boxes", "nonMaxSuppression"); const $scores = convertToTensor(scores, "scores", "nonMaxSuppression"); const inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold); maxOutputSize = inputs.maxOutputSize; iouThreshold = inputs.iouThreshold; scoreThreshold = inputs.scoreThreshold; const attrs = {maxOutputSize, iouThreshold, scoreThreshold}; return ENGINE.runKernelFunc((b) => b.nonMaxSuppression($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold), {boxes: $boxes, scores: $scores}, null, NonMaxSuppressionV3, attrs); } const nonMaxSuppression = op({nonMaxSuppression_}); // node_modules/@tensorflow/tfjs-core/dist/backends/array_util.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function binaryInsert(arr, element, comparator) { const index = binarySearch(arr, element, comparator); const insertionPoint = index < 0 ? -(index + 1) : index; arr.splice(insertionPoint, 0, element); } function binarySearch(arr, target, comparator) { return binarySearch_(arr, target, comparator || defaultComparator); } function defaultComparator(a, b) { return a > b ? 1 : a < b ? -1 : 0; } function binarySearch_(arr, target, comparator) { let left = 0; let right = arr.length; let middle = 0; let found = false; while (left < right) { middle = left + (right - left >>> 1); const compareResult = comparator(target, arr[middle]); if (compareResult > 0) { left = middle + 1; } else { right = middle; found = !compareResult; } } return found ? left : -left - 1; } // node_modules/@tensorflow/tfjs-core/dist/backends/non_max_suppression_impl.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function nonMaxSuppressionV3Impl(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) { return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, 0).selectedIndices; } function nonMaxSuppressionV4Impl(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize) { return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, 0, false, padToMaxOutputSize, true); } function nonMaxSuppressionV5Impl(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) { return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma, true); } function nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma, returnScoresTensor = false, padToMaxOutputSize = false, returnValidOutputs = false) { const candidates = []; for (let i = 0; i < scores.length; i++) { if (scores[i] > scoreThreshold) { candidates.push({score: scores[i], boxIndex: i, suppressBeginIndex: 0}); } } candidates.sort(ascendingComparator); const scale = softNmsSigma > 0 ? -0.5 / softNmsSigma : 0; const selectedIndices = []; const selectedScores = []; while (selectedIndices.length < maxOutputSize && candidates.length > 0) { const candidate = candidates.pop(); const {score: originalScore, boxIndex, suppressBeginIndex} = candidate; if (originalScore < scoreThreshold) { break; } let ignoreCandidate = false; for (let j = selectedIndices.length - 1; j >= suppressBeginIndex; --j) { const iou = intersectionOverUnion(boxes, boxIndex, selectedIndices[j]); if (iou >= iouThreshold) { ignoreCandidate = true; break; } candidate.score = candidate.score * suppressWeight(iouThreshold, scale, iou); if (candidate.score <= scoreThreshold) { break; } } candidate.suppressBeginIndex = selectedIndices.length; if (!ignoreCandidate) { if (candidate.score === originalScore) { selectedIndices.push(boxIndex); selectedScores.push(candidate.score); } else if (candidate.score > scoreThreshold) { binaryInsert(candidates, candidate, ascendingComparator); } } } const validOutputs = selectedIndices.length; const elemsToPad = maxOutputSize - validOutputs; if (padToMaxOutputSize && elemsToPad > 0) { selectedIndices.push(...new Array(elemsToPad).fill(0)); selectedScores.push(...new Array(elemsToPad).fill(0)); } const result = {selectedIndices: tensor1d(selectedIndices, "int32")}; if (returnScoresTensor) { result["selectedScores"] = tensor1d(selectedScores, "float32"); } if (returnValidOutputs) { result["validOutputs"] = scalar(validOutputs, "int32"); } return result; } function intersectionOverUnion(boxes, i, j) { const iCoord = boxes.subarray(i * 4, i * 4 + 4); const jCoord = boxes.subarray(j * 4, j * 4 + 4); const yminI = Math.min(iCoord[0], iCoord[2]); const xminI = Math.min(iCoord[1], iCoord[3]); const ymaxI = Math.max(iCoord[0], iCoord[2]); const xmaxI = Math.max(iCoord[1], iCoord[3]); const yminJ = Math.min(jCoord[0], jCoord[2]); const xminJ = Math.min(jCoord[1], jCoord[3]); const ymaxJ = Math.max(jCoord[0], jCoord[2]); const xmaxJ = Math.max(jCoord[1], jCoord[3]); const areaI = (ymaxI - yminI) * (xmaxI - xminI); const areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ); if (areaI <= 0 || areaJ <= 0) { return 0; } const intersectionYmin = Math.max(yminI, yminJ); const intersectionXmin = Math.max(xminI, xminJ); const intersectionYmax = Math.min(ymaxI, ymaxJ); const intersectionXmax = Math.min(xmaxI, xmaxJ); const intersectionArea = Math.max(intersectionYmax - intersectionYmin, 0) * Math.max(intersectionXmax - intersectionXmin, 0); return intersectionArea / (areaI + areaJ - intersectionArea); } function suppressWeight(iouThreshold, scale, iou) { const weight = Math.exp(scale * iou * iou); return iou <= iouThreshold ? weight : 0; } function ascendingComparator(c1, c2) { return c1.score - c2.score || c1.score === c2.score && c2.boxIndex - c1.boxIndex; } // node_modules/@tensorflow/tfjs-core/dist/ops/non_max_suppression_async.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ async function nonMaxSuppressionAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY) { const $boxes = convertToTensor(boxes, "boxes", "nonMaxSuppressionAsync"); const $scores = convertToTensor(scores, "scores", "nonMaxSuppressionAsync"); const inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold); maxOutputSize = inputs.maxOutputSize; iouThreshold = inputs.iouThreshold; scoreThreshold = inputs.scoreThreshold; const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]); const boxesVals = boxesAndScores[0]; const scoresVals = boxesAndScores[1]; const res = nonMaxSuppressionV3Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); if ($boxes !== boxes) { $boxes.dispose(); } if ($scores !== scores) { $scores.dispose(); } return res; } const nonMaxSuppressionAsync = nonMaxSuppressionAsync_; // node_modules/@tensorflow/tfjs-core/dist/ops/non_max_suppression_with_score.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function nonMaxSuppressionWithScore_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, softNmsSigma = 0) { const $boxes = convertToTensor(boxes, "boxes", "nonMaxSuppression"); const $scores = convertToTensor(scores, "scores", "nonMaxSuppression"); const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); maxOutputSize = params.maxOutputSize; iouThreshold = params.iouThreshold; scoreThreshold = params.scoreThreshold; softNmsSigma = params.softNmsSigma; const inputs = {boxes: $boxes, scores: $scores}; const attrs = {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma}; const result = ENGINE.runKernel(NonMaxSuppressionV5, inputs, attrs); return {selectedIndices: result[0], selectedScores: result[1]}; } const nonMaxSuppressionWithScore = op({nonMaxSuppressionWithScore_}); // node_modules/@tensorflow/tfjs-core/dist/ops/non_max_suppression_with_score_async.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ async function nonMaxSuppressionWithScoreAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, softNmsSigma = 0) { const $boxes = convertToTensor(boxes, "boxes", "nonMaxSuppressionAsync"); const $scores = convertToTensor(scores, "scores", "nonMaxSuppressionAsync"); const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); maxOutputSize = params.maxOutputSize; iouThreshold = params.iouThreshold; scoreThreshold = params.scoreThreshold; softNmsSigma = params.softNmsSigma; const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]); const boxesVals = boxesAndScores[0]; const scoresVals = boxesAndScores[1]; const res = nonMaxSuppressionV5Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); if ($boxes !== boxes) { $boxes.dispose(); } if ($scores !== scores) { $scores.dispose(); } return res; } const nonMaxSuppressionWithScoreAsync = nonMaxSuppressionWithScoreAsync_; // node_modules/@tensorflow/tfjs-core/dist/ops/non_max_suppression_padded.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function nonMaxSuppressionPadded_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, padToMaxOutputSize = false) { const $boxes = convertToTensor(boxes, "boxes", "nonMaxSuppression"); const $scores = convertToTensor(scores, "scores", "nonMaxSuppression"); const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null); const $maxOutputSize = params.maxOutputSize; const $iouThreshold = params.iouThreshold; const $scoreThreshold = params.scoreThreshold; const inputs = {boxes: $boxes, scores: $scores}; const attrs = { maxOutputSize: $maxOutputSize, iouThreshold: $iouThreshold, scoreThreshold: $scoreThreshold, padToMaxOutputSize }; const result = ENGINE.runKernel(NonMaxSuppressionV4, inputs, attrs); return {selectedIndices: result[0], validOutputs: result[1]}; } const nonMaxSuppressionPadded = op({nonMaxSuppressionPadded_}); // node_modules/@tensorflow/tfjs-core/dist/ops/non_max_suppression_padded_async.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ async function nonMaxSuppressionPaddedAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, padToMaxOutputSize = false) { const $boxes = convertToTensor(boxes, "boxes", "nonMaxSuppressionAsync"); const $scores = convertToTensor(scores, "scores", "nonMaxSuppressionAsync"); const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null); const $maxOutputSize = params.maxOutputSize; const $iouThreshold = params.iouThreshold; const $scoreThreshold = params.scoreThreshold; const [boxesVals, scoresVals] = await Promise.all([$boxes.data(), $scores.data()]); const res = nonMaxSuppressionV4Impl(boxesVals, scoresVals, $maxOutputSize, $iouThreshold, $scoreThreshold, padToMaxOutputSize); if ($boxes !== boxes) { $boxes.dispose(); } if ($scores !== scores) { $scores.dispose(); } return res; } const nonMaxSuppressionPaddedAsync = nonMaxSuppressionPaddedAsync_; // node_modules/@tensorflow/tfjs-core/dist/ops/resize_bilinear.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function resizeBilinear_(images, size, alignCorners = false) { const $images = convertToTensor(images, "images", "resizeBilinear"); assert($images.rank === 3 || $images.rank === 4, () => `Error in resizeBilinear: x must be rank 3 or 4, but got rank ${$images.rank}.`); assert(size.length === 2, () => `Error in resizeBilinear: new shape must 2D, but got shape ${size}.`); let batchImages = $images; let reshapedTo4D = false; if ($images.rank === 3) { reshapedTo4D = true; batchImages = reshape($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]); } const [newHeight, newWidth] = size; const forward = (backend2, save) => { save([batchImages]); return backend2.resizeBilinear(batchImages, newHeight, newWidth, alignCorners); }; const inputs = {images: batchImages}; const attrs = {alignCorners, size}; const res = ENGINE.runKernelFunc(forward, inputs, null, ResizeBilinear, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const resizeBilinear = op({resizeBilinear_}); // node_modules/@tensorflow/tfjs-core/dist/ops/resize_nearest_neighbor.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function resizeNearestNeighbor_(images, size, alignCorners = false) { const $images = convertToTensor(images, "images", "resizeNearestNeighbor"); assert($images.rank === 3 || $images.rank === 4, () => `Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${$images.rank}.`); assert(size.length === 2, () => `Error in resizeNearestNeighbor: new shape must 2D, but got shape ${size}.`); assert($images.dtype === "float32" || $images.dtype === "int32", () => "`images` must have `int32` or `float32` as dtype"); let batchImages = $images; let reshapedTo4D = false; if ($images.rank === 3) { reshapedTo4D = true; batchImages = reshape($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]); } const [newHeight, newWidth] = size; const inputs = {images: batchImages}; const attrs = {alignCorners, size}; const forward = (backend2, save) => { save([batchImages]); return backend2.resizeNearestNeighbor(batchImages, newHeight, newWidth, alignCorners); }; const res = ENGINE.runKernelFunc(forward, inputs, null, ResizeNearestNeighbor, attrs); if (reshapedTo4D) { return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]); } return res; } const resizeNearestNeighbor = op({resizeNearestNeighbor_}); // node_modules/@tensorflow/tfjs-core/dist/ops/band_part.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function bandPart_(a, numLower, numUpper) { assert(numLower % 1 === 0, () => `bandPart(): numLower must be an integer, got ${numLower}.`); assert(numUpper % 1 === 0, () => `bandPart(): numUpper must be an integer, got ${numUpper}.`); const $a = convertToTensor(a, "a", "bandPart"); assert($a.rank >= 2, () => `bandPart(): Rank must be at least 2, got ${$a.rank}.`); const shape = $a.shape; const [M, N] = $a.shape.slice(-2); if (!(numLower <= M)) { throw new Error(`bandPart(): numLower (${numLower}) must not be greater than the number of rows (${M}).`); } if (!(numUpper <= N)) { throw new Error(`bandPart(): numUpper (${numUpper}) must not be greater than the number of columns (${N}).`); } if (numLower < 0) { numLower = M; } if (numUpper < 0) { numUpper = N; } const i = reshape(range(0, M, 1, "int32"), [-1, 1]); const j = range(0, N, 1, "int32"); const ij = sub(i, j); const inBand = logicalAnd(lessEqual(ij, scalar(+numLower, "int32")), greaterEqual(ij, scalar(-numUpper, "int32"))); const zero = zeros([M, N], $a.dtype); return reshape(stack(unstack(reshape($a, [-1, M, N])).map((mat) => where(inBand, mat, zero))), shape); } const bandPart = op({bandPart_}); // node_modules/@tensorflow/tfjs-core/dist/ops/gram_schmidt.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function gramSchmidt_(xs) { let inputIsTensor2D; if (Array.isArray(xs)) { inputIsTensor2D = false; assert(xs != null && xs.length > 0, () => "Gram-Schmidt process: input must not be null, undefined, or empty"); const dim = xs[0].shape[0]; for (let i = 1; i < xs.length; ++i) { assert(xs[i].shape[0] === dim, () => `Gram-Schmidt: Non-unique lengths found in the input vectors: (${xs[i].shape[0]} vs. ${dim})`); } } else { inputIsTensor2D = true; xs = split(xs, xs.shape[0], 0).map((x) => squeeze(x, [0])); } assert(xs.length <= xs[0].shape[0], () => `Gram-Schmidt: Number of vectors (${xs.length}) exceeds number of dimensions (${xs[0].shape[0]}).`); const ys = []; const xs1d = xs; for (let i = 0; i < xs.length; ++i) { ys.push(ENGINE.tidy(() => { let x = xs1d[i]; if (i > 0) { for (let j = 0; j < i; ++j) { const proj = mul(sum2(mul(ys[j], x)), ys[j]); x = sub(x, proj); } } return div(x, norm(x, "euclidean")); })); } if (inputIsTensor2D) { return stack(ys, 0); } else { return ys; } } const gramSchmidt = op({gramSchmidt_}); // node_modules/@tensorflow/tfjs-core/dist/ops/qr.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function qr_(x, fullMatrices = false) { assert(x.rank >= 2, () => `qr() requires input tensor to have a rank >= 2, but got rank ${x.rank}`); if (x.rank === 2) { return qr2d(x, fullMatrices); } else { const outerDimsProd = x.shape.slice(0, x.shape.length - 2).reduce((value, prev) => value * prev); const x2ds = unstack(reshape(x, [ outerDimsProd, x.shape[x.shape.length - 2], x.shape[x.shape.length - 1] ]), 0); const q2ds = []; const r2ds = []; x2ds.forEach((x2d) => { const [q2d, r2d] = qr2d(x2d, fullMatrices); q2ds.push(q2d); r2ds.push(r2d); }); const q = reshape(stack(q2ds, 0), x.shape); const r = reshape(stack(r2ds, 0), x.shape); return [q, r]; } } function qr2d(x, fullMatrices = false) { return ENGINE.tidy(() => { assert(x.shape.length === 2, () => `qr2d() requires a 2D Tensor, but got a ${x.shape.length}D Tensor.`); const m = x.shape[0]; const n = x.shape[1]; let q = eye(m); let r = clone(x); const one2D = tensor2d([[1]], [1, 1]); let w = clone(one2D); const iters = m >= n ? n : m; for (let j = 0; j < iters; ++j) { const rTemp = r; const wTemp = w; const qTemp = q; [w, r, q] = ENGINE.tidy(() => { const rjEnd1 = slice(r, [j, j], [m - j, 1]); const normX = norm(rjEnd1); const rjj = slice(r, [j, j], [1, 1]); const s = where(greater(rjj, 0), tensor2d([[-1]]), tensor2d([[1]])); const u1 = sub(rjj, mul(s, normX)); const wPre = div(rjEnd1, u1); if (wPre.shape[0] === 1) { w = clone(one2D); } else { w = concat([ one2D, slice(wPre, [1, 0], [wPre.shape[0] - 1, wPre.shape[1]]) ], 0); } const tau = neg(div(matMul(s, u1), normX)); const rjEndAll = slice(r, [j, 0], [m - j, n]); const tauTimesW = mul(tau, w); const wT = transpose(w); if (j === 0) { r = sub(rjEndAll, matMul(tauTimesW, matMul(wT, rjEndAll))); } else { const rTimesTau = sub(rjEndAll, matMul(tauTimesW, matMul(wT, rjEndAll))); r = concat([slice(r, [0, 0], [j, n]), rTimesTau], 0); } const tawTimesWT = transpose(tauTimesW); const qAllJEnd = slice(q, [0, j], [m, q.shape[1] - j]); if (j === 0) { q = sub(qAllJEnd, matMul(matMul(qAllJEnd, w), tawTimesWT)); } else { const qTimesTau = sub(qAllJEnd, matMul(matMul(qAllJEnd, w), tawTimesWT)); q = concat([slice(q, [0, 0], [m, j]), qTimesTau], 1); } return [w, r, q]; }); dispose([rTemp, wTemp, qTemp]); } if (!fullMatrices && m > n) { q = slice(q, [0, 0], [m, n]); r = slice(r, [0, 0], [n, n]); } return [q, r]; }); } const qr = op({qr_}); // node_modules/@tensorflow/tfjs-core/dist/ops/loss_ops_utils.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ var Reduction; (function(Reduction2) { Reduction2[Reduction2["NONE"] = 0] = "NONE"; Reduction2[Reduction2["MEAN"] = 1] = "MEAN"; Reduction2[Reduction2["SUM"] = 2] = "SUM"; Reduction2[Reduction2["SUM_BY_NONZERO_WEIGHTS"] = 3] = "SUM_BY_NONZERO_WEIGHTS"; })(Reduction || (Reduction = {})); // node_modules/@tensorflow/tfjs-core/dist/ops/compute_weighted_loss.js function computeWeightedLoss_(losses2, weights, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $losses = convertToTensor(losses2, "losses", "computeWeightedLoss"); let $weights = null; if (weights != null) { $weights = convertToTensor(weights, "weights", "computeWeightedLoss"); } const weightedLoss = $weights == null ? $losses : mul($losses, $weights); if (reduction === Reduction.NONE) { return weightedLoss; } if (reduction === Reduction.SUM) { return sum2(weightedLoss); } if (reduction === Reduction.MEAN) { if ($weights == null) { return mean(weightedLoss); } else { const broadcastFactor = $losses.size / $weights.size; const result = div(sum2(weightedLoss), sum2($weights)); return broadcastFactor > 1 ? div(result, scalar(broadcastFactor)) : result; } } if (reduction === Reduction.SUM_BY_NONZERO_WEIGHTS) { if ($weights == null) { return div(sum2(weightedLoss), scalar($losses.size)); } else { const broadcastedWeights = mul($weights, ones2($losses.shape)); const numNonZeros = cast(sum2(notEqual(broadcastedWeights, scalar(0))), "float32"); return div(sum2(weightedLoss), numNonZeros); } } throw Error(`Unknown reduction: ${reduction}`); } const computeWeightedLoss = op({computeWeightedLoss_}); // node_modules/@tensorflow/tfjs-core/dist/ops/absolute_difference.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function absoluteDifference_(labels, predictions, weights, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "absoluteDifference"); const $predictions = convertToTensor(predictions, "predictions", "absoluteDifference"); let $weights = null; if (weights != null) { $weights = convertToTensor(weights, "weights", "absoluteDifference"); } assertShapesMatch($labels.shape, $predictions.shape, "Error in absoluteDifference: "); const losses2 = abs(sub($labels, $predictions)); return computeWeightedLoss(losses2, $weights, reduction); } const absoluteDifference = op({absoluteDifference_}); // node_modules/@tensorflow/tfjs-core/dist/ops/cosine_distance.js function cosineDistance_(labels, predictions, axis, weights, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "cosineDistance"); const $predictions = convertToTensor(predictions, "predictions", "cosineDistance"); let $weights = null; if (weights != null) { $weights = convertToTensor(weights, "weights", "cosineDistance"); } assertShapesMatch($labels.shape, $predictions.shape, "Error in cosineDistance: "); const one = scalar(1); const losses2 = sub(one, sum2(mul($labels, $predictions), axis, true)); return computeWeightedLoss(losses2, $weights, reduction); } const cosineDistance = op({cosineDistance_}); // node_modules/@tensorflow/tfjs-core/dist/ops/hinge_loss.js function hingeLoss_(labels, predictions, weights, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { let $labels = convertToTensor(labels, "labels", "hingeLoss"); const $predictions = convertToTensor(predictions, "predictions", "hingeLoss"); let $weights = null; if (weights != null) { $weights = convertToTensor(weights, "weights", "hingeLoss"); } assertShapesMatch($labels.shape, $predictions.shape, "Error in hingeLoss: "); const one = scalar(1); $labels = sub(mul(scalar(2), $labels), one); const losses2 = relu(sub(one, mul($labels, $predictions))); return computeWeightedLoss(losses2, $weights, reduction); } const hingeLoss = op({hingeLoss_}); // node_modules/@tensorflow/tfjs-core/dist/ops/huber_loss.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function huberLoss_(labels, predictions, weights, delta = 1, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "huberLoss"); const $predictions = convertToTensor(predictions, "predictions", "huberLoss"); let $weights = null; if (weights != null) { $weights = convertToTensor(weights, "weights", "huberLoss"); } assertShapesMatch($labels.shape, $predictions.shape, "Error in huberLoss: "); const deltaScalar = scalar(delta); const error = abs(sub($predictions, $labels)); const quadratic = minimum(error, deltaScalar); const linear = sub(error, quadratic); const losses2 = add2(mul(scalar(0.5), square(quadratic)), mul(deltaScalar, linear)); return computeWeightedLoss(losses2, $weights, reduction); } const huberLoss = op({huberLoss_}); // node_modules/@tensorflow/tfjs-core/dist/ops/log_loss.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function logLoss_(labels, predictions, weights, epsilon = 1e-7, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "logLoss"); const $predictions = convertToTensor(predictions, "predictions", "logLoss"); let $weights = null; if (weights != null) { $weights = convertToTensor(weights, "weights", "logLoss"); } assertShapesMatch($labels.shape, $predictions.shape, "Error in logLoss: "); const one = scalar(1); const epsilonScalar = scalar(epsilon); const l1 = neg(mul($labels, log(add2($predictions, epsilonScalar)))); const l2 = mul(sub(one, $labels), log(add2(sub(one, $predictions), epsilonScalar))); const losses2 = sub(l1, l2); return computeWeightedLoss(losses2, $weights, reduction); } const logLoss = op({logLoss_}); // node_modules/@tensorflow/tfjs-core/dist/ops/mean_squared_error.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function meanSquaredError_(labels, predictions, weights, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { const $labels = convertToTensor(labels, "labels", "meanSquaredError"); const $predictions = convertToTensor(predictions, "predictions", "meanSquaredError"); let $weights = null; if (weights != null) { $weights = convertToTensor(weights, "weights", "meanSquaredError"); } assertShapesMatch($labels.shape, $predictions.shape, "Error in meanSquaredError: "); const losses2 = squaredDifference($labels, $predictions); return computeWeightedLoss(losses2, $weights, reduction); } const meanSquaredError = op({meanSquaredError_}); // node_modules/@tensorflow/tfjs-core/dist/ops/sigmoid_cross_entropy.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function sigmoidCrossEntropyWithLogits_(labels, logits) { const $labels = convertToTensor(labels, "labels", "sigmoidCrossEntropyWithLogits"); const $logits = convertToTensor(logits, "logits", "sigmoidCrossEntropyWithLogits"); assertShapesMatch($labels.shape, $logits.shape, "Error in sigmoidCrossEntropyWithLogits: "); const maxOutput = relu($logits); const outputXTarget = mul($logits, $labels); const sigmoidOutput = log1p(exp(neg(abs($logits)))); return add2(sub(maxOutput, outputXTarget), sigmoidOutput); } function sigmoidCrossEntropy_(multiClassLabels, logits, weights, labelSmoothing = 0, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { let $multiClassLabels = convertToTensor(multiClassLabels, "multiClassLabels", "sigmoidCrossEntropy"); const $logits = convertToTensor(logits, "logits", "sigmoidCrossEntropy"); let $weights = null; if (weights != null) { $weights = convertToTensor(weights, "weights", "sigmoidCrossEntropy"); } assertShapesMatch($multiClassLabels.shape, $logits.shape, "Error in sigmoidCrossEntropy: "); if (labelSmoothing > 0) { const labelSmoothingScalar = scalar(labelSmoothing); const one = scalar(1); const half = scalar(0.5); $multiClassLabels = add2(mul($multiClassLabels, sub(one, labelSmoothingScalar)), mul(half, labelSmoothingScalar)); } const losses2 = sigmoidCrossEntropyWithLogits_($multiClassLabels, $logits); return computeWeightedLoss(losses2, $weights, reduction); } const sigmoidCrossEntropy = op({sigmoidCrossEntropy_}); // node_modules/@tensorflow/tfjs-core/dist/ops/softmax_cross_entropy.js /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function softmaxCrossEntropyWithLogits_(labels, logits, dim = -1) { if (dim === -1) { dim = logits.rank - 1; } if (dim !== logits.rank - 1) { throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${logits.rank} and dim was ${dim}`); } const customOp = customGrad((labels2, logits2, save) => { const keepDims = true; const lse = logSumExp(logits2, [dim], keepDims); const logResult = sub(cast(logits2, "float32"), lse); save([labels2, logResult]); const costVector = neg(mul(logResult, labels2)); const value = sum2(costVector, [dim]); const gradFunc = (dy, saved) => { const [labels3, logResult2] = saved; const dyShape = expandShapeToKeepDim(dy.shape, [dim]); return [ mul(reshape(dy, dyShape), sub(cast(labels3, "float32"), exp(logResult2))), mul(reshape(dy, dyShape), sub(exp(logResult2), cast(labels3, "float32"))) ]; }; return {value, gradFunc}; }); return customOp(labels, logits); } function softmaxCrossEntropy_(onehotLabels, logits, weights, labelSmoothing = 0, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) { let $onehotLabels = convertToTensor(onehotLabels, "onehotLabels", "softmaxCrossEntropy"); const $logits = convertToTensor(logits, "logits", "softmaxCrossEntropy"); let $weights = null; if (weights != null) { $weights = convertToTensor(weights, "weights", "softmaxCrossEntropy"); } assertShapesMatch($onehotLabels.shape, $logits.shape, "Error in softmaxCrossEntropy: "); if (labelSmoothing > 0) { const labelSmoothingScalar = scalar(labelSmoothing); const one = scalar(1); const numClasses = scalar($onehotLabels.shape[1]); $onehotLabels = add2(mul($onehotLabels, sub(one, labelSmoothingScalar)), div(labelSmoothingScalar, numClasses)); } const losses2 = softmaxCrossEntropyWithLogits_($onehotLabels, $logits); return computeWeightedLoss(losses2, $weights, reduction); } const softmaxCrossEntropy = op({softmaxCrossEntropy_}); // node_modules/@tensorflow/tfjs-core/dist/ops/ops.js const ops_exports = {}; __export(ops_exports, { abs: () => abs, acos: () => acos, acosh: () => acosh, add: () => add2, addN: () => addN, addStrict: () => addStrict, all: () => all, any: () => any, argMax: () => argMax, argMin: () => argMin, asin: () => asin, asinh: () => asinh, atan: () => atan, atan2: () => atan2, atanh: () => atanh, avgPool: () => avgPool, avgPool3d: () => avgPool3d, basicLSTMCell: () => basicLSTMCell, batchNorm: () => batchNorm, batchNorm2d: () => batchNorm2d, batchNorm3d: () => batchNorm3d, batchNorm4d: () => batchNorm4d, batchToSpaceND: () => batchToSpaceND, booleanMaskAsync: () => booleanMaskAsync, broadcastTo: () => broadcastTo, buffer: () => buffer, cast: () => cast, ceil: () => ceil, clipByValue: () => clipByValue, clone: () => clone, complex: () => complex, concat: () => concat, concat1d: () => concat1d, concat2d: () => concat2d, concat3d: () => concat3d, concat4d: () => concat4d, conv1d: () => conv1d, conv2d: () => conv2d, conv2dTranspose: () => conv2dTranspose, conv3d: () => conv3d, conv3dTranspose: () => conv3dTranspose, cos: () => cos, cosh: () => cosh, cosineWindow: () => cosineWindow, cumsum: () => cumsum, depthToSpace: () => depthToSpace, depthwiseConv2d: () => depthwiseConv2d, diag: () => diag, dilation2d: () => dilation2d, div: () => div, divNoNan: () => divNoNan, divStrict: () => divStrict, dot: () => dot, dropout: () => dropout, elu: () => elu, enclosingPowerOfTwo: () => enclosingPowerOfTwo, equal: () => equal, equalStrict: () => equalStrict, erf: () => erf, exp: () => exp, expandDims: () => expandDims, expm1: () => expm1, eye: () => eye, fft: () => fft, fill: () => fill, floor: () => floor, floorDiv: () => floorDiv, fused: () => fused_ops_exports, gather: () => gather, gatherND: () => gatherND, greater: () => greater, greaterEqual: () => greaterEqual, greaterEqualStrict: () => greaterEqualStrict, greaterStrict: () => greaterStrict, ifft: () => ifft, imag: () => imag, image: () => image, inTopKAsync: () => inTopKAsync, irfft: () => irfft, isFinite: () => isFinite2, isInf: () => isInf, isNaN: () => isNaN2, leakyRelu: () => leakyRelu, less: () => less, lessEqual: () => lessEqual, lessEqualStrict: () => lessEqualStrict, lessStrict: () => lessStrict, linalg: () => linalg, linspace: () => linspace, localResponseNormalization: () => localResponseNormalization, log: () => log, log1p: () => log1p, logSigmoid: () => logSigmoid, logSoftmax: () => logSoftmax, logSumExp: () => logSumExp, logicalAnd: () => logicalAnd, logicalNot: () => logicalNot, logicalOr: () => logicalOr, logicalXor: () => logicalXor, losses: () => losses, matMul: () => matMul, max: () => max, maxPool: () => maxPool, maxPool3d: () => maxPool3d, maxPoolWithArgmax: () => maxPoolWithArgmax, maximum: () => maximum, maximumStrict: () => maximumStrict, mean: () => mean, min: () => min, minimum: () => minimum, minimumStrict: () => minimumStrict, mod: () => mod, modStrict: () => modStrict, moments: () => moments, movingAverage: () => movingAverage, mul: () => mul, mulStrict: () => mulStrict, multiRNNCell: () => multiRNNCell, multinomial: () => multinomial, neg: () => neg, norm: () => norm, notEqual: () => notEqual, notEqualStrict: () => notEqualStrict, oneHot: () => oneHot, ones: () => ones2, onesLike: () => onesLike, op: () => op, outerProduct: () => outerProduct, pad: () => pad, pad1d: () => pad1d, pad2d: () => pad2d, pad3d: () => pad3d, pad4d: () => pad4d, pool: () => pool, pow: () => pow, powStrict: () => powStrict, prelu: () => prelu, print: () => print, prod: () => prod, rand: () => rand, randomGamma: () => randomGamma, randomNormal: () => randomNormal, randomUniform: () => randomUniform, range: () => range, real: () => real, reciprocal: () => reciprocal, relu: () => relu, relu6: () => relu6, reshape: () => reshape, reverse: () => reverse, reverse1d: () => reverse1d, reverse2d: () => reverse2d, reverse3d: () => reverse3d, reverse4d: () => reverse4d, rfft: () => rfft, round: () => round, rsqrt: () => rsqrt, scalar: () => scalar, scatterND: () => scatterND, selu: () => selu, separableConv2d: () => separableConv2d, setdiff1dAsync: () => setdiff1dAsync, sigmoid: () => sigmoid, sign: () => sign, signal: () => signal, sin: () => sin, sinh: () => sinh, slice: () => slice, slice1d: () => slice1d, slice2d: () => slice2d, slice3d: () => slice3d, slice4d: () => slice4d, softmax: () => softmax, softplus: () => softplus, spaceToBatchND: () => spaceToBatchND, sparseToDense: () => sparseToDense, spectral: () => spectral, split: () => split, sqrt: () => sqrt, square: () => square, squaredDifference: () => squaredDifference, squaredDifferenceStrict: () => squaredDifferenceStrict, squeeze: () => squeeze, stack: () => stack, step: () => step, stridedSlice: () => stridedSlice, sub: () => sub, subStrict: () => subStrict, sum: () => sum2, tan: () => tan, tanh: () => tanh2, tensor: () => tensor5, tensor1d: () => tensor1d, tensor2d: () => tensor2d, tensor3d: () => tensor3d, tensor4d: () => tensor4d, tensor5d: () => tensor5d, tensor6d: () => tensor6d, tile: () => tile, topk: () => topk, transpose: () => transpose, truncatedNormal: () => truncatedNormal, unsortedSegmentSum: () => unsortedSegmentSum, unstack: () => unstack, variable: () => variable, where: () => where, whereAsync: () => whereAsync, zeros: () => zeros, zerosLike: () => zerosLike }); /** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const spectral = { fft, ifft, rfft, irfft }; const signal = { hammingWindow, hannWindow, frame, stft }; const image = { flipLeftRight, resizeNearestNeighbor, resizeBilinear, rotateWithOffset, cropAndResize, nonMaxSuppression, nonMaxSuppressionAsync, nonMaxSuppressionWithScore, nonMaxSuppressionWithScoreAsync, nonMaxSuppressionPadded, nonMaxSuppressionPaddedAsync }; const linalg = { bandPart, gramSchmidt, qr }; const losses = { absoluteDifference, computeWeightedLoss, cosineDistance, hingeLoss, huberLoss, logLoss, meanSquaredError, sigmoidCrossEntropy, softmaxCrossEntropy }; // node_modules/@tensorflow/tfjs-core/dist/base_side_effects.js /** * @license * Copyright 2020 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ setOpHandler(ops_exports); // node_modules/@tensorflow/tfjs-core/dist/io/types.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const DTYPE_VALUE_SIZE_MAP = { float32: 4, float16: 2, int32: 4, uint16: 2, uint8: 1, bool: 1, complex64: 8 }; // node_modules/@tensorflow/tfjs-core/dist/io/io_utils.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const NUM_BYTES_STRING_LENGTH = 4; async function encodeWeights(tensors, group) { const specs = []; const dataPromises = []; const names = Array.isArray(tensors) ? tensors.map((tensor17) => tensor17.name) : Object.keys(tensors); for (let i = 0; i < names.length; ++i) { const name = names[i]; const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name]; if (t.dtype !== "float32" && t.dtype !== "int32" && t.dtype !== "bool" && t.dtype !== "string" && t.dtype !== "complex64") { throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`); } const spec = {name, shape: t.shape, dtype: t.dtype}; if (t.dtype === "string") { const utf8bytes = new Promise(async (resolve) => { const vals = await t.bytes(); const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) + NUM_BYTES_STRING_LENGTH * vals.length; const bytes = new Uint8Array(totalNumBytes); let offset = 0; for (let i2 = 0; i2 < vals.length; i2++) { const val = vals[i2]; const bytesOfLength = new Uint8Array(new Uint32Array([val.length]).buffer); bytes.set(bytesOfLength, offset); offset += NUM_BYTES_STRING_LENGTH; bytes.set(val, offset); offset += val.length; } resolve(bytes); }); dataPromises.push(utf8bytes); } else { dataPromises.push(t.data()); } if (group != null) { spec.group = group; } specs.push(spec); } const tensorValues = await Promise.all(dataPromises); return {data: concatenateTypedArrays(tensorValues), specs}; } function decodeWeights(buffer10, specs) { const out = {}; let float16Decode; let offset = 0; for (const spec of specs) { const name = spec.name; const dtype = spec.dtype; const shape = spec.shape; const size = sizeFromShape(shape); let values; if ("quantization" in spec) { const quantization = spec.quantization; if (quantization.dtype === "uint8" || quantization.dtype === "uint16") { if (!("min" in quantization && "scale" in quantization)) { throw new Error(`Weight ${spec.name} with quantization ${quantization.dtype} doesn't have corresponding metadata min and scale.`); } } else if (quantization.dtype === "float16") { if (dtype !== "float32") { throw new Error(`Weight ${spec.name} is quantized with ${quantization.dtype} which only supports weights of type float32 not ${dtype}.`); } } else { throw new Error(`Weight ${spec.name} has unknown quantization dtype ${quantization.dtype}. Supported quantization dtypes are: 'uint8', 'uint16', and 'float16'.`); } const quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype]; const byteBuffer = buffer10.slice(offset, offset + size * quantizationSizeFactor); const quantizedArray = quantization.dtype === "uint8" ? new Uint8Array(byteBuffer) : new Uint16Array(byteBuffer); if (dtype === "float32") { if (quantization.dtype === "uint8" || quantization.dtype === "uint16") { values = new Float32Array(quantizedArray.length); for (let i = 0; i < quantizedArray.length; i++) { const v = quantizedArray[i]; values[i] = v * quantization.scale + quantization.min; } } else if (quantization.dtype === "float16") { if (float16Decode === void 0) { float16Decode = getFloat16Decoder(); } values = float16Decode(quantizedArray); } else { throw new Error(`Unsupported quantization type ${quantization.dtype} for weight type float32.`); } } else if (dtype === "int32") { if (quantization.dtype !== "uint8" && quantization.dtype !== "uint16") { throw new Error(`Unsupported quantization type ${quantization.dtype} for weight type int32.`); } values = new Int32Array(quantizedArray.length); for (let i = 0; i < quantizedArray.length; i++) { const v = quantizedArray[i]; values[i] = Math.round(v * quantization.scale + quantization.min); } } else { throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`); } offset += size * quantizationSizeFactor; } else if (dtype === "string") { const size2 = sizeFromShape(spec.shape); values = []; for (let i = 0; i < size2; i++) { const byteLength = new Uint32Array(buffer10.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0]; offset += NUM_BYTES_STRING_LENGTH; const bytes = new Uint8Array(buffer10.slice(offset, offset + byteLength)); values.push(bytes); offset += byteLength; } } else { const dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype]; const byteBuffer = buffer10.slice(offset, offset + size * dtypeFactor); if (dtype === "float32") { values = new Float32Array(byteBuffer); } else if (dtype === "int32") { values = new Int32Array(byteBuffer); } else if (dtype === "bool") { values = new Uint8Array(byteBuffer); } else if (dtype === "complex64") { values = new Float32Array(byteBuffer); const real6 = new Float32Array(values.length / 2); const image2 = new Float32Array(values.length / 2); for (let i = 0; i < real6.length; i++) { real6[i] = values[i * 2]; image2[i] = values[i * 2 + 1]; } const realTensor = tensor5(real6, shape, "float32"); const imageTensor = tensor5(image2, shape, "float32"); out[name] = complex(realTensor, imageTensor); } else { throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`); } offset += size * dtypeFactor; } if (dtype !== "complex64") { out[name] = tensor5(values, shape, dtype); } } return out; } function concatenateTypedArrays(xs) { if (xs === null) { throw new Error(`Invalid input value: ${JSON.stringify(xs)}`); } let totalByteLength = 0; const normalizedXs = []; xs.forEach((x) => { totalByteLength += x.byteLength; normalizedXs.push(x.byteLength === x.buffer.byteLength ? x : new x.constructor(x)); if (!(x instanceof Float32Array || x instanceof Int32Array || x instanceof Uint8Array)) { throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`); } }); const y = new Uint8Array(totalByteLength); let offset = 0; normalizedXs.forEach((x) => { y.set(new Uint8Array(x.buffer), offset); offset += x.byteLength; }); return y.buffer; } const useNodeBuffer = typeof Buffer !== "undefined" && (typeof Blob === "undefined" || typeof atob === "undefined" || typeof btoa === "undefined"); function stringByteLength(str) { if (useNodeBuffer) { return Buffer.byteLength(str); } return new Blob([str]).size; } function concatenateArrayBuffers(buffers) { if (buffers.length === 1) { return buffers[0]; } let totalByteLength = 0; buffers.forEach((buffer10) => { totalByteLength += buffer10.byteLength; }); const temp = new Uint8Array(totalByteLength); let offset = 0; buffers.forEach((buffer10) => { temp.set(new Uint8Array(buffer10), offset); offset += buffer10.byteLength; }); return temp.buffer; } function basename(path) { const SEPARATOR = "/"; path = path.trim(); while (path.endsWith(SEPARATOR)) { path = path.slice(0, path.length - 1); } const items = path.split(SEPARATOR); return items[items.length - 1]; } function getModelArtifactsInfoForJSON(modelArtifacts) { if (modelArtifacts.modelTopology instanceof ArrayBuffer) { throw new Error("Expected JSON model topology, received ArrayBuffer."); } return { dateSaved: new Date(), modelTopologyType: "JSON", modelTopologyBytes: modelArtifacts.modelTopology == null ? 0 : stringByteLength(JSON.stringify(modelArtifacts.modelTopology)), weightSpecsBytes: modelArtifacts.weightSpecs == null ? 0 : stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)), weightDataBytes: modelArtifacts.weightData == null ? 0 : modelArtifacts.weightData.byteLength }; } function computeFloat16MantisaTable() { const convertMantissa = (i) => { let m = i << 13; let e = 0; while ((m & 8388608) === 0) { e -= 8388608; m <<= 1; } m &= ~8388608; e += 947912704; return m | e; }; const mantisaTable = new Uint32Array(2048); mantisaTable[0] = 0; for (let i = 1; i < 1024; i++) { mantisaTable[i] = convertMantissa(i); } for (let i = 1024; i < 2048; i++) { mantisaTable[i] = 939524096 + (i - 1024 << 13); } return mantisaTable; } function computeFloat16ExponentTable() { const exponentTable = new Uint32Array(64); exponentTable[0] = 0; exponentTable[31] = 1199570944; exponentTable[32] = 2147483648; exponentTable[63] = 3347054592; for (let i = 1; i < 31; i++) { exponentTable[i] = i << 23; } for (let i = 33; i < 63; i++) { exponentTable[i] = 2147483648 + (i - 32 << 23); } return exponentTable; } function computeFloat16OffsetTable() { const offsetTable = new Uint32Array(64); for (let i = 0; i < 64; i++) { offsetTable[i] = 1024; } offsetTable[0] = offsetTable[32] = 0; return offsetTable; } function getFloat16Decoder() { const mantisaTable = computeFloat16MantisaTable(); const exponentTable = computeFloat16ExponentTable(); const offsetTable = computeFloat16OffsetTable(); return (quantizedArray) => { const buffer10 = new ArrayBuffer(4 * quantizedArray.length); const bufferUint32View = new Uint32Array(buffer10); for (let index = 0; index < quantizedArray.length; index++) { const float16Bits = quantizedArray[index]; const float32Bits = mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 1023)] + exponentTable[float16Bits >> 10]; bufferUint32View[index] = float32Bits; } return new Float32Array(buffer10); }; } // node_modules/@tensorflow/tfjs-core/dist/io/router_registry.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ class IORouterRegistry { constructor() { this.saveRouters = []; this.loadRouters = []; } static getInstance() { if (IORouterRegistry.instance == null) { IORouterRegistry.instance = new IORouterRegistry(); } return IORouterRegistry.instance; } static registerSaveRouter(saveRouter) { IORouterRegistry.getInstance().saveRouters.push(saveRouter); } static registerLoadRouter(loadRouter) { IORouterRegistry.getInstance().loadRouters.push(loadRouter); } static getSaveHandlers(url) { return IORouterRegistry.getHandlers(url, "save"); } static getLoadHandlers(url, loadOptions) { return IORouterRegistry.getHandlers(url, "load", loadOptions); } static getHandlers(url, handlerType, loadOptions) { const validHandlers = []; const routers = handlerType === "load" ? IORouterRegistry.getInstance().loadRouters : IORouterRegistry.getInstance().saveRouters; routers.forEach((router) => { const handler = router(url, loadOptions); if (handler !== null) { validHandlers.push(handler); } }); return validHandlers; } } const registerSaveRouter = (loudRouter) => IORouterRegistry.registerSaveRouter(loudRouter); const registerLoadRouter = (loudRouter) => IORouterRegistry.registerLoadRouter(loudRouter); const getSaveHandlers = (url) => IORouterRegistry.getSaveHandlers(url); const getLoadHandlers = (url, loadOptions) => IORouterRegistry.getLoadHandlers(url, loadOptions); // node_modules/@tensorflow/tfjs-core/dist/io/model_management.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const URL_SCHEME_SUFFIX = "://"; class ModelStoreManagerRegistry { constructor() { this.managers = {}; } static getInstance() { if (ModelStoreManagerRegistry.instance == null) { ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry(); } return ModelStoreManagerRegistry.instance; } static registerManager(scheme, manager) { assert(scheme != null, () => "scheme must not be undefined or null."); if (scheme.endsWith(URL_SCHEME_SUFFIX)) { scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX)); } assert(scheme.length > 0, () => "scheme must not be an empty string."); const registry = ModelStoreManagerRegistry.getInstance(); assert(registry.managers[scheme] == null, () => `A model store manager is already registered for scheme '${scheme}'.`); registry.managers[scheme] = manager; } static getManager(scheme) { const manager = this.getInstance().managers[scheme]; if (manager == null) { throw new Error(`Cannot find model manager for scheme '${scheme}'`); } return manager; } static getSchemes() { return Object.keys(this.getInstance().managers); } } function parseURL(url) { if (url.indexOf(URL_SCHEME_SUFFIX) === -1) { throw new Error(`The url string provided does not contain a scheme. Supported schemes are: ${ModelStoreManagerRegistry.getSchemes().join(",")}`); } return { scheme: url.split(URL_SCHEME_SUFFIX)[0], path: url.split(URL_SCHEME_SUFFIX)[1] }; } async function cloneModelInternal(sourceURL, destURL, deleteSource = false) { assert(sourceURL !== destURL, () => `Old path and new path are the same: '${sourceURL}'`); const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL); assert(loadHandlers.length > 0, () => `Copying failed because no load handler is found for source URL ${sourceURL}.`); assert(loadHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) load handlers for source URL ${sourceURL}.`); const loadHandler = loadHandlers[0]; const saveHandlers = IORouterRegistry.getSaveHandlers(destURL); assert(saveHandlers.length > 0, () => `Copying failed because no save handler is found for destination URL ${destURL}.`); assert(saveHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) save handlers for destination URL ${destURL}.`); const saveHandler = saveHandlers[0]; const sourceScheme = parseURL(sourceURL).scheme; const sourcePath = parseURL(sourceURL).path; const sameMedium = sourceScheme === parseURL(sourceURL).scheme; const modelArtifacts = await loadHandler.load(); if (deleteSource && sameMedium) { await ModelStoreManagerRegistry.getManager(sourceScheme).removeModel(sourcePath); } const saveResult = await saveHandler.save(modelArtifacts); if (deleteSource && !sameMedium) { await ModelStoreManagerRegistry.getManager(sourceScheme).removeModel(sourcePath); } return saveResult.modelArtifactsInfo; } async function listModels() { const schemes = ModelStoreManagerRegistry.getSchemes(); const out = {}; for (const scheme of schemes) { const schemeOut = await ModelStoreManagerRegistry.getManager(scheme).listModels(); for (const path in schemeOut) { const url = scheme + URL_SCHEME_SUFFIX + path; out[url] = schemeOut[path]; } } return out; } async function removeModel(url) { const schemeAndPath = parseURL(url); const manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme); return manager.removeModel(schemeAndPath.path); } async function copyModel(sourceURL, destURL) { const deleteSource = false; return cloneModelInternal(sourceURL, destURL, deleteSource); } async function moveModel(sourceURL, destURL) { const deleteSource = true; return cloneModelInternal(sourceURL, destURL, deleteSource); } // node_modules/@tensorflow/tfjs-core/dist/io/browser_files.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const DEFAULT_FILE_NAME_PREFIX = "model"; const DEFAULT_JSON_EXTENSION_NAME = ".json"; const DEFAULT_WEIGHT_DATA_EXTENSION_NAME = ".weights.bin"; function defer(f) { return new Promise((resolve) => setTimeout(resolve)).then(f); } class BrowserDownloads { constructor(fileNamePrefix) { if (!env().getBool("IS_BROWSER")) { throw new Error("browserDownloads() cannot proceed because the current environment is not a browser."); } if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) { fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length); } if (fileNamePrefix == null || fileNamePrefix.length === 0) { fileNamePrefix = DEFAULT_FILE_NAME_PREFIX; } this.modelTopologyFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME; this.weightDataFileName = fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME; } async save(modelArtifacts) { if (typeof document === "undefined") { throw new Error("Browser downloads are not supported in this environment since `document` is not present"); } const weightsURL = window.URL.createObjectURL(new Blob([modelArtifacts.weightData], {type: "application/octet-stream"})); if (modelArtifacts.modelTopology instanceof ArrayBuffer) { throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet."); } else { const weightsManifest = [{ paths: ["./" + this.weightDataFileName], weights: modelArtifacts.weightSpecs }]; const modelTopologyAndWeightManifest = { modelTopology: modelArtifacts.modelTopology, format: modelArtifacts.format, generatedBy: modelArtifacts.generatedBy, convertedBy: modelArtifacts.convertedBy, weightsManifest }; const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelTopologyAndWeightManifest)], {type: "application/json"})); const jsonAnchor = this.jsonAnchor == null ? document.createElement("a") : this.jsonAnchor; jsonAnchor.download = this.modelTopologyFileName; jsonAnchor.href = modelTopologyAndWeightManifestURL; await defer(() => jsonAnchor.dispatchEvent(new MouseEvent("click"))); if (modelArtifacts.weightData != null) { const weightDataAnchor = this.weightDataAnchor == null ? document.createElement("a") : this.weightDataAnchor; weightDataAnchor.download = this.weightDataFileName; weightDataAnchor.href = weightsURL; await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent("click"))); } return {modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts)}; } } } BrowserDownloads.URL_SCHEME = "downloads://"; class BrowserFiles { constructor(files) { if (files == null || files.length < 1) { throw new Error(`When calling browserFiles, at least 1 file is required, but received ${files}`); } this.files = files; } async load() { const jsonFile = this.files[0]; const weightFiles = this.files.slice(1); return new Promise((resolve, reject) => { const jsonReader = new FileReader(); jsonReader.onload = (event) => { const modelJSON = JSON.parse(event.target.result); const modelTopology = modelJSON.modelTopology; if (modelTopology == null) { reject(new Error(`modelTopology field is missing from file ${jsonFile.name}`)); return; } if (weightFiles.length === 0) { resolve({modelTopology}); } const weightsManifest = modelJSON.weightsManifest; if (weightsManifest == null) { reject(new Error(`weightManifest field is missing from file ${jsonFile.name}`)); return; } let pathToFile; try { pathToFile = this.checkManifestAndWeightFiles(weightsManifest, weightFiles); } catch (err) { reject(err); return; } const weightSpecs = []; const paths = []; const perFileBuffers = []; weightsManifest.forEach((weightsGroup) => { weightsGroup.paths.forEach((path) => { paths.push(path); perFileBuffers.push(null); }); weightSpecs.push(...weightsGroup.weights); }); weightsManifest.forEach((weightsGroup) => { weightsGroup.paths.forEach((path) => { const weightFileReader = new FileReader(); weightFileReader.onload = (event2) => { const weightData = event2.target.result; const index = paths.indexOf(path); perFileBuffers[index] = weightData; if (perFileBuffers.indexOf(null) === -1) { resolve({ modelTopology, weightSpecs, weightData: concatenateArrayBuffers(perFileBuffers), format: modelJSON.format, generatedBy: modelJSON.generatedBy, convertedBy: modelJSON.convertedBy, userDefinedMetadata: modelJSON.userDefinedMetadata }); } }; weightFileReader.onerror = (error) => reject(`Failed to weights data from file of path '${path}'.`); weightFileReader.readAsArrayBuffer(pathToFile[path]); }); }); }; jsonReader.onerror = (error) => reject(`Failed to read model topology and weights manifest JSON from file '${jsonFile.name}'. BrowserFiles supports loading Keras-style tf.Model artifacts only.`); jsonReader.readAsText(jsonFile); }); } checkManifestAndWeightFiles(manifest, files) { const basenames = []; const fileNames = files.map((file) => basename(file.name)); const pathToFile = {}; for (const group of manifest) { group.paths.forEach((path) => { const pathBasename = basename(path); if (basenames.indexOf(pathBasename) !== -1) { throw new Error(`Duplicate file basename found in weights manifest: '${pathBasename}'`); } basenames.push(pathBasename); if (fileNames.indexOf(pathBasename) === -1) { throw new Error(`Weight file with basename '${pathBasename}' is not provided.`); } else { pathToFile[path] = files[fileNames.indexOf(pathBasename)]; } }); } if (basenames.length !== files.length) { throw new Error(`Mismatch in the number of files in weights manifest (${basenames.length}) and the number of weight files provided (${files.length}).`); } return pathToFile; } } const browserDownloadsRouter = (url) => { if (!env().getBool("IS_BROWSER")) { return null; } else { if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) { return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length)); } else { return null; } } }; IORouterRegistry.registerSaveRouter(browserDownloadsRouter); function browserDownloads(fileNamePrefix = "model") { return new BrowserDownloads(fileNamePrefix); } function browserFiles(files) { return new BrowserFiles(files); } // node_modules/@tensorflow/tfjs-core/dist/io/progress.js /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) { checkPromises(promises); startFraction = startFraction == null ? 0 : startFraction; endFraction = endFraction == null ? 1 : endFraction; checkFraction(startFraction, endFraction); let resolvedPromise = 0; const registerMonitor = (promise) => { promise.then((value) => { const fraction = startFraction + ++resolvedPromise / promises.length * (endFraction - startFraction); onProgress(fraction); return value; }); return promise; }; function checkPromises(promises2) { assert(promises2 != null && Array.isArray(promises2) && promises2.length > 0, () => "promises must be a none empty array"); } function checkFraction(startFraction2, endFraction2) { assert(startFraction2 >= 0 && startFraction2 <= 1, () => `Progress fraction must be in range [0, 1], but got startFraction ${startFraction2}`); assert(endFraction2 >= 0 && endFraction2 <= 1, () => `Progress fraction must be in range [0, 1], but got endFraction ${endFraction2}`); assert(endFraction2 >= startFraction2, () => `startFraction must be no more than endFraction, but got startFraction ${startFraction2} and endFraction ${endFraction2}`); } return Promise.all(promises.map(registerMonitor)); } // node_modules/@tensorflow/tfjs-core/dist/io/weights_loader.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) { if (loadOptions == null) { loadOptions = {}; } const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch : loadOptions.fetchFunc; const requests = fetchURLs.map((fetchURL) => fetchFunc(fetchURL, loadOptions.requestInit, {isBinary: true})); const fetchStartFraction = 0; const fetchEndFraction = 0.5; const responses = loadOptions.onProgress == null ? await Promise.all(requests) : await monitorPromisesProgress(requests, loadOptions.onProgress, fetchStartFraction, fetchEndFraction); const bufferPromises = responses.map((response) => response.arrayBuffer()); const bufferStartFraction = 0.5; const bufferEndFraction = 1; const buffers = loadOptions.onProgress == null ? await Promise.all(bufferPromises) : await monitorPromisesProgress(bufferPromises, loadOptions.onProgress, bufferStartFraction, bufferEndFraction); return buffers; } async function loadWeights(manifest, filePathPrefix = "", weightNames, requestInit) { const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, {requestInit}); const loadWeights2 = weightsLoaderFactory(fetchWeights); return loadWeights2(manifest, filePathPrefix, weightNames); } function weightsLoaderFactory(fetchWeightsFunction) { return async (manifest, filePathPrefix = "", weightNames) => { const groupIndicesToFetchMap = manifest.map(() => false); const groupWeightsToFetch = {}; const weightsFound = weightNames != null ? weightNames.map(() => false) : []; const allManifestWeightNames = []; manifest.forEach((manifestGroupConfig, groupIndex) => { let groupOffset = 0; manifestGroupConfig.weights.forEach((weightsEntry) => { const rawDtype = "quantization" in weightsEntry ? weightsEntry.quantization.dtype : weightsEntry.dtype; const weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] * sizeFromShape(weightsEntry.shape); const enqueueWeightsForFetchingFn = () => { groupIndicesToFetchMap[groupIndex] = true; if (groupWeightsToFetch[groupIndex] == null) { groupWeightsToFetch[groupIndex] = []; } groupWeightsToFetch[groupIndex].push({ manifestEntry: weightsEntry, groupOffset, sizeBytes: weightsBytes }); }; if (weightNames != null) { weightNames.forEach((weightName, weightIndex) => { if (weightName === weightsEntry.name) { enqueueWeightsForFetchingFn(); weightsFound[weightIndex] = true; } }); } else { enqueueWeightsForFetchingFn(); } allManifestWeightNames.push(weightsEntry.name); groupOffset += weightsBytes; }); }); if (!weightsFound.every((found) => found)) { const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]); throw new Error(`Could not find weights in manifest with names: ${weightsNotFound.join(", ")}. Manifest JSON has weights with names: ${allManifestWeightNames.join(", ")}.`); } const groupIndicesToFetch = groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => { if (shouldFetch) { accumulator.push(i); } return accumulator; }, []); const fetchUrls = []; groupIndicesToFetch.forEach((i) => { manifest[i].paths.forEach((filepath) => { const fetchUrl = filePathPrefix + (!filePathPrefix.endsWith("/") ? "/" : "") + filepath; fetchUrls.push(fetchUrl); }); }); const buffers = await fetchWeightsFunction(fetchUrls); const weightsTensorMap = {}; let bufferIndexOffset = 0; groupIndicesToFetch.forEach((i) => { const numBuffers = manifest[i].paths.length; let groupBytes = 0; for (let i2 = 0; i2 < numBuffers; i2++) { groupBytes += buffers[bufferIndexOffset + i2].byteLength; } const groupBuffer = new ArrayBuffer(groupBytes); const groupByteBuffer = new Uint8Array(groupBuffer); let groupBufferOffset = 0; for (let i2 = 0; i2 < numBuffers; i2++) { const buffer10 = new Uint8Array(buffers[bufferIndexOffset + i2]); groupByteBuffer.set(buffer10, groupBufferOffset); groupBufferOffset += buffer10.byteLength; } const weightsEntries = groupWeightsToFetch[i]; weightsEntries.forEach((weightsEntry) => { const byteBuffer = groupBuffer.slice(weightsEntry.groupOffset, weightsEntry.groupOffset + weightsEntry.sizeBytes); const nameToTensorMap = decodeWeights(byteBuffer, [weightsEntry.manifestEntry]); for (const name in nameToTensorMap) { weightsTensorMap[name] = nameToTensorMap[name]; } }); bufferIndexOffset += numBuffers; }); return weightsTensorMap; }; } // node_modules/@tensorflow/tfjs-core/dist/io/http.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ const OCTET_STREAM_MIME_TYPE = "application/octet-stream"; const JSON_TYPE = "application/json"; class HTTPRequest { constructor(path, loadOptions) { this.DEFAULT_METHOD = "POST"; if (loadOptions == null) { loadOptions = {}; } this.weightPathPrefix = loadOptions.weightPathPrefix; this.onProgress = loadOptions.onProgress; if (loadOptions.fetchFunc != null) { assert(typeof loadOptions.fetchFunc === "function", () => "Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"); this.fetch = loadOptions.fetchFunc; } else { this.fetch = env().platform.fetch; } assert(path != null && path.length > 0, () => "URL path for http must not be null, undefined or empty."); if (Array.isArray(path)) { assert(path.length === 2, () => `URL paths for http must have a length of 2, (actual length is ${path.length}).`); } this.path = path; if (loadOptions.requestInit != null && loadOptions.requestInit.body != null) { throw new Error("requestInit is expected to have no pre-existing body, but has one."); } this.requestInit = loadOptions.requestInit || {}; } async save(modelArtifacts) { if (modelArtifacts.modelTopology instanceof ArrayBuffer) { throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet."); } const init = Object.assign({method: this.DEFAULT_METHOD}, this.requestInit); init.body = new FormData(); const weightsManifest = [{ paths: ["./model.weights.bin"], weights: modelArtifacts.weightSpecs }]; const modelTopologyAndWeightManifest = { modelTopology: modelArtifacts.modelTopology, format: modelArtifacts.format, generatedBy: modelArtifacts.generatedBy, convertedBy: modelArtifacts.convertedBy, userDefinedMetadata: modelArtifacts.userDefinedMetadata, weightsManifest }; init.body.append("model.json", new Blob([JSON.stringify(modelTopologyAndWeightManifest)], {type: JSON_TYPE}), "model.json"); if (modelArtifacts.weightData != null) { init.body.append("model.weights.bin", new Blob([modelArtifacts.weightData], {type: OCTET_STREAM_MIME_TYPE}), "model.weights.bin"); } const response = await this.fetch(this.path, init); if (response.ok) { return { modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts), responses: [response] }; } else { throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${response.status}.`); } } async load() { const modelConfigRequest = await this.fetch(this.path, this.requestInit); if (!modelConfigRequest.ok) { throw new Error(`Request to ${this.path} failed with status code ${modelConfigRequest.status}. Please verify this URL points to the model JSON of the model to load.`); } let modelConfig; try { modelConfig = await modelConfigRequest.json(); } catch (e) { let message = `Failed to parse model JSON of response from ${this.path}.`; if (this.path.endsWith(".pb")) { message += " Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository."; } else { message += " Please make sure the server is serving valid JSON for this request."; } throw new Error(message); } const modelTopology = modelConfig.modelTopology; const weightsManifest = modelConfig.weightsManifest; const generatedBy = modelConfig.generatedBy; const convertedBy = modelConfig.convertedBy; const format = modelConfig.format; const userDefinedMetadata = modelConfig.userDefinedMetadata; if (modelTopology == null && weightsManifest == null) { throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`); } let weightSpecs; let weightData; if (weightsManifest != null) { const results = await this.loadWeights(weightsManifest); [weightSpecs, weightData] = results; } return { modelTopology, weightSpecs, weightData, userDefinedMetadata, generatedBy, convertedBy, format }; } async loadWeights(weightsManifest) { const weightPath = Array.isArray(this.path) ? this.path[1] : this.path; const [prefix, suffix] = parseUrl(weightPath); const pathPrefix = this.weightPathPrefix || prefix; const weightSpecs = []; for (const entry of weightsManifest) { weightSpecs.push(...entry.weights); } const fetchURLs = []; weightsManifest.forEach((weightsGroup) => { weightsGroup.paths.forEach((path) => { fetchURLs.push(pathPrefix + path + suffix); }); }); const buffers = await loadWeightsAsArrayBuffer(fetchURLs, { requestInit: this.requestInit, fetchFunc: this.fetch, onProgress: this.onProgress }); return [weightSpecs, concatenateArrayBuffers(buffers)]; } } HTTPRequest.URL_SCHEME_REGEX = /^https?:\/\//; function parseUrl(url) { const lastSlash = url.lastIndexOf("/"); const lastSearchParam = url.lastIndexOf("?"); const prefix = url.substring(0, lastSlash); const suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : ""; return [prefix + "/", suffix]; } function isHTTPScheme(url) { return url.match(HTTPRequest.URL_SCHEME_REGEX) != null; } const httpRouter = (url, loadOptions) => { if (typeof fetch === "undefined" && (loadOptions == null || loadOptions.fetchFunc == null)) { return null; } else { let isHTTP = true; if (Array.isArray(url)) { isHTTP = url.every((urlItem) => isHTTPScheme(urlItem)); } else { isHTTP = isHTTPScheme(url); } if (isHTTP) { return http(url, loadOptions); } } return null; }; IORouterRegistry.registerSaveRouter(httpRouter); IORouterRegistry.registerLoadRouter(httpRouter); function http(path, loadOptions) { return new HTTPRequest(path, loadOptions); } function browserHTTPRequest(path, loadOptions) { return http(path, loadOptions); } // node_modules/@tensorflow/tfjs-core/dist/io/passthrough.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ class PassthroughLoader { constructor(modelArtifacts) { this.modelArtifacts = modelArtifacts; } async load() { return this.modelArtifacts; } } class PassthroughSaver { constructor(saveHandler) { this.saveHandler = saveHandler; } async save(modelArtifacts) { return this.saveHandler(modelArtifacts); } } function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) { if (arguments.length === 1) { const isModelArtifacts = modelArtifacts.modelTopology != null || modelArtifacts.weightSpecs != null; if (isModelArtifacts) { return new PassthroughLoader(modelArtifacts); } else { console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."); return new PassthroughLoader({modelTopology: modelArtifacts}); } } else { console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."); return new PassthroughLoader({ modelTopology: modelArtifacts, weightSpecs, weightData, trainingConfig }); } } function withSaveHandler(saveHandler) { return new PassthroughSaver(saveHandler); } // node_modules/@tensorflow/tfjs-core/dist/io/io.js const io_exports = {}; __export(io_exports, { browserFiles: () => browserFiles, browserHTTPRequest: () => browserHTTPRequest, concatenateArrayBuffers: () => concatenateArrayBuffers, copyModel: () => copyModel, decodeWeights: () => decodeWeights, encodeWeights: () => encodeWeights, fromMemory: () => fromMemory, getLoadHandlers: () => getLoadHandlers, getModelArtifactsInfoForJSON: () => getModelArtifactsInfoForJSON, getSaveHandlers: () => getSaveHandlers, http: () => http, isHTTPScheme: () => isHTTPScheme, listModels: () => listModels, loadWeights: () => loadWeights, moveModel: () => moveModel, registerLoadRouter: () => registerLoadRouter, registerSaveRouter: () => registerSaveRouter, removeModel: () => removeModel, weightsLoaderFactory: () => weightsLoaderFactory, withSaveHandler: () => withSaveHandler }); /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ // node_modules/@tensorflow/tfjs-core/dist/ops/confusion_matrix.js /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ function confusionMatrix_(labels, predictions, numClasses) { const $labels = convertToTensor(labels, "labels", "confusionMatrix"); const $predictions = convertToTensor(predictions, "predictions", "confusionMatrix"); assert(numClasses == null || numClasses > 0 && Number.isInteger(numClasses), () => `If provided, numClasses must be a positive integer, but got ${numClasses}`); assert($labels.rank === 1, () => `Expected the rank of labels to be 1, but got ${$labels.rank}`); assert($predictions.rank === 1, () => `Expected the rank of predictions to be 1, but got ${$predictions.rank}`); assert($labels.shape[0] === $predictions.shape[0], () => `Mismatch in the number of examples: ${$labels.shape[0]} vs. ${$predictions.shape[0]}. Labels and predictions should have the same number of elements.`); assert(numClasses > 0 && Number.isInteger(numClasses), () => `numClasses is required to be a positive integer, but got ${numClasses}`); const oneHotLabels = oneHot(cast($labels, "int32"), numClasses); const oneHotPredictions = oneHot(cast($predictions, "int32"), numClasses); const oneHotLabelsT = transpose(oneHotLabels); return cast(matMul(oneHotLabelsT, oneHotPredictions), "int32"); } const confusionMatrix = op({confusionMatrix_}); // node_modules/@tensorflow/tfjs-core/dist/math.js const math_exports = {}; __export(math_exports, { confusionMatrix: () => confusionMatrix }); /** * @license * Copyright 2018 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ // node_modules/@tensorflow/tfjs-core/dist/ops/browser.js const browser_exports = {}; __export(browser_exports, { fromPixels: () => fromPixels, toPixels: () => toPixels }); /** * @license * Copyright 2019 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ let fromPixels2DContext; function fromPixels_(pixels, numChannels = 3) { if (numChannels > 4) { throw new Error("Cannot construct Tensor with more than 4 channels from pixels."); } if (pixels == null) { throw new Error("pixels passed to tf.browser.fromPixels() can not be null"); } let isPixelData = false; let isImageData = false; let isVideo = false; let isImage = false; let isCanvasLike = false; if (pixels.data instanceof Uint8Array) { isPixelData = true; } else if (typeof ImageData !== "undefined" && pixels instanceof ImageData) { isImageData = true; } else if (typeof HTMLVideoElement !== "undefined" && pixels instanceof HTMLVideoElement) { isVideo = true; } else if (typeof HTMLImageElement !== "undefined" && pixels instanceof HTMLImageElement) { isImage = true; } else if (pixels.getContext != null) { isCanvasLike = true; } else { throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${pixels.constructor.name}`); } if (isVideo) { const HAVE_CURRENT_DATA_READY_STATE = 2; if (isVideo && pixels.readyState < HAVE_CURRENT_DATA_READY_STATE) { throw new Error("The video element has not loaded data yet. Please wait for `loadeddata` event on the