mirror of https://github.com/vladmandic/human
add platform and backend capabilities detection
parent
f4caef2e90
commit
6eaea226da
|
@ -300,7 +300,7 @@ Default models in Human library are:
|
||||||
- **Body Analysis**: MoveNet - Lightning variation
|
- **Body Analysis**: MoveNet - Lightning variation
|
||||||
- **Hand Analysis**: MediaPipe Hands
|
- **Hand Analysis**: MediaPipe Hands
|
||||||
- **Body Segmentation**: Google Selfie
|
- **Body Segmentation**: Google Selfie
|
||||||
- **Object Detection**: CenterNet
|
- **Object Detection**: MB3 CenterNet
|
||||||
- **Body Segmentation**: Google Selfie
|
- **Body Segmentation**: Google Selfie
|
||||||
|
|
||||||
Note that alternative models are provided and can be enabled via configuration
|
Note that alternative models are provided and can be enabled via configuration
|
||||||
|
|
|
@ -1,159 +1,68 @@
|
||||||
/**
|
/**
|
||||||
* Human demo for NodeJS
|
* Human demo for NodeJS using Canvas library
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const log = require('@vladmandic/pilogger');
|
|
||||||
const fs = require('fs');
|
const fs = require('fs');
|
||||||
const process = require('process');
|
const process = require('process');
|
||||||
|
const log = require('@vladmandic/pilogger');
|
||||||
const canvas = require('canvas');
|
const canvas = require('canvas');
|
||||||
|
require('@tensorflow/tfjs-node'); // for nodejs, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
||||||
|
const Human = require('../../dist/human.node.js'); // this is 'const Human = require('../dist/human.node-gpu.js').default;'
|
||||||
|
|
||||||
// for NodeJS, `tfjs-node` or `tfjs-node-gpu` should be loaded before using Human
|
const config = { // just enable all and leave default settings
|
||||||
// eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars
|
|
||||||
const tf = require('@tensorflow/tfjs-node'); // or const tf = require('@tensorflow/tfjs-node-gpu');
|
|
||||||
|
|
||||||
// load specific version of Human library that matches TensorFlow mode
|
|
||||||
const Human = require('../../dist/human.node.js').default; // or const Human = require('../dist/human.node-gpu.js').default;
|
|
||||||
|
|
||||||
let human = null;
|
|
||||||
|
|
||||||
const myConfig = {
|
|
||||||
backend: 'tensorflow',
|
|
||||||
modelBasePath: 'file://models/',
|
|
||||||
debug: false,
|
debug: false,
|
||||||
async: true,
|
face: { enabled: true }, // includes mesh, iris, emotion, descriptor
|
||||||
filter: { enabled: false },
|
|
||||||
face: {
|
|
||||||
enabled: true,
|
|
||||||
detector: { enabled: true },
|
|
||||||
mesh: { enabled: true },
|
|
||||||
iris: { enabled: true },
|
|
||||||
description: { enabled: true },
|
|
||||||
emotion: { enabled: true },
|
|
||||||
},
|
|
||||||
hand: { enabled: true },
|
hand: { enabled: true },
|
||||||
body: { enabled: true },
|
body: { enabled: true },
|
||||||
object: { enabled: true },
|
object: { enabled: true },
|
||||||
|
gestures: { enabled: true },
|
||||||
};
|
};
|
||||||
|
|
||||||
async function init() {
|
async function main() {
|
||||||
// create instance of human
|
log.header();
|
||||||
human = new Human(myConfig);
|
|
||||||
// wait until tf is ready
|
// init
|
||||||
await human.tf.ready();
|
const human = new Human.Human(config); // create instance of human
|
||||||
// pre-load models
|
|
||||||
log.info('Human:', human.version);
|
log.info('Human:', human.version);
|
||||||
await human.load();
|
// @ts-ignore
|
||||||
const loaded = Object.keys(human.models).filter((a) => human.models[a]);
|
human.env.Canvas = canvas.Canvas; // monkey-patch human to use external canvas library
|
||||||
log.info('Loaded:', loaded);
|
await human.load(); // pre-load models
|
||||||
|
log.info('Loaded models:', Object.keys(human.models).filter((a) => human.models[a]));
|
||||||
log.info('Memory state:', human.tf.engine().memory());
|
log.info('Memory state:', human.tf.engine().memory());
|
||||||
}
|
|
||||||
|
|
||||||
async function detect(input, output) {
|
// parse cmdline
|
||||||
// read input image from file or url into buffer
|
const input = process.argv[2];
|
||||||
let buffer;
|
const output = process.argv[3];
|
||||||
log.info('Loading image:', input);
|
if (process.argv.length !== 4) log.error('Parameters: <input-image> <output-image> missing');
|
||||||
if (input.startsWith('http:') || input.startsWith('https:')) {
|
else if (!fs.existsSync(input) && !input.startsWith('http')) log.error(`File not found: ${process.argv[2]}`);
|
||||||
const fetch = (await import('node-fetch')).default;
|
else {
|
||||||
const res = await fetch(input);
|
// everything seems ok
|
||||||
if (res && res.ok) buffer = await res.buffer();
|
const inputImage = await canvas.loadImage(input); // load image using canvas library
|
||||||
else log.error('Invalid image URL:', input, res.status, res.statusText, res.headers.get('content-type'));
|
log.info('Loaded image', input, inputImage.width, inputImage.height);
|
||||||
} else {
|
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height); // create canvas
|
||||||
buffer = fs.readFileSync(input);
|
const ctx = inputCanvas.getContext('2d');
|
||||||
}
|
ctx.drawImage(inputImage, 0, 0); // draw input image onto canvas
|
||||||
if (!buffer) return {};
|
|
||||||
|
|
||||||
// decode image using tfjs-node so we don't need external depenencies
|
// run detection
|
||||||
/*
|
const result = await human.detect(inputCanvas);
|
||||||
const tensor = human.tf.tidy(() => {
|
|
||||||
const decode = human.tf.node.decodeImage(buffer, 3);
|
|
||||||
let expand;
|
|
||||||
if (decode.shape[2] === 4) { // input is in rgba format, need to convert to rgb
|
|
||||||
const channels = human.tf.split(decode, 4, 2); // split rgba to channels
|
|
||||||
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
|
|
||||||
expand = human.tf.reshape(rgb, [1, decode.shape[0], decode.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
|
|
||||||
} else {
|
|
||||||
expand = human.tf.expandDims(decode, 0);
|
|
||||||
}
|
|
||||||
const cast = human.tf.cast(expand, 'float32');
|
|
||||||
return cast;
|
|
||||||
});
|
|
||||||
*/
|
|
||||||
|
|
||||||
// decode image using canvas library
|
// print results summary
|
||||||
const inputImage = await canvas.loadImage(input);
|
const persons = result.persons; // invoke persons getter, only used to print summary on console
|
||||||
const inputCanvas = new canvas.Canvas(inputImage.width, inputImage.height, 'image');
|
|
||||||
const inputCtx = inputCanvas.getContext('2d');
|
|
||||||
inputCtx.drawImage(inputImage, 0, 0);
|
|
||||||
const inputData = inputCtx.getImageData(0, 0, inputImage.width, inputImage.height);
|
|
||||||
const tensor = human.tf.tidy(() => {
|
|
||||||
const data = tf.tensor(Array.from(inputData.data), [inputImage.width, inputImage.height, 4]);
|
|
||||||
const channels = human.tf.split(data, 4, 2); // split rgba to channels
|
|
||||||
const rgb = human.tf.stack([channels[0], channels[1], channels[2]], 2); // stack channels back to rgb and ignore alpha
|
|
||||||
const expand = human.tf.reshape(rgb, [1, data.shape[0], data.shape[1], 3]); // move extra dim from the end of tensor and use it as batch number instead
|
|
||||||
const cast = human.tf.cast(expand, 'float32');
|
|
||||||
return cast;
|
|
||||||
});
|
|
||||||
|
|
||||||
// image shape contains image dimensions and depth
|
|
||||||
log.state('Processing:', tensor['shape']);
|
|
||||||
|
|
||||||
// run actual detection
|
|
||||||
let result;
|
|
||||||
try {
|
|
||||||
result = await human.detect(tensor, myConfig);
|
|
||||||
} catch (err) {
|
|
||||||
log.error('caught');
|
|
||||||
}
|
|
||||||
|
|
||||||
// dispose image tensor as we no longer need it
|
|
||||||
human.tf.dispose(tensor);
|
|
||||||
|
|
||||||
// print data to console
|
|
||||||
if (result) {
|
|
||||||
// invoke persons getter
|
|
||||||
const persons = result.persons;
|
|
||||||
log.data('Detected:');
|
|
||||||
for (let i = 0; i < persons.length; i++) {
|
for (let i = 0; i < persons.length; i++) {
|
||||||
const face = persons[i].face;
|
const face = persons[i].face;
|
||||||
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
|
const faceTxt = face ? `score:${face.score} age:${face.age} gender:${face.gender} iris:${face.iris}` : null;
|
||||||
const body = persons[i].body;
|
const body = persons[i].body;
|
||||||
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
|
const bodyTxt = body ? `score:${body.score} keypoints:${body.keypoints?.length}` : null;
|
||||||
log.data(` #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
log.data(`Detected: #${i}: Face:${faceTxt} Body:${bodyTxt} LeftHand:${persons[i].hands.left ? 'yes' : 'no'} RightHand:${persons[i].hands.right ? 'yes' : 'no'} Gestures:${persons[i].gestures.length}`);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// load and draw original image
|
// draw detected results onto canvas and save it to a file
|
||||||
const outputCanvas = new canvas.Canvas(tensor.shape[2], tensor.shape[1], 'image'); // decoded tensor shape tells us width and height
|
human.draw.all(inputCanvas, result); // use human build-in method to draw results as overlays on canvas
|
||||||
const ctx = outputCanvas.getContext('2d');
|
const outFile = fs.createWriteStream(output); // write canvas to new image file
|
||||||
const original = await canvas.loadImage(buffer); // we already have input as buffer, so lets reuse it
|
outFile.on('finish', () => log.state('Output image:', output, inputCanvas.width, inputCanvas.height));
|
||||||
ctx.drawImage(original, 0, 0, outputCanvas.width, outputCanvas.height); // draw original to new canvas
|
outFile.on('error', (err) => log.error('Output error:', output, err));
|
||||||
|
const stream = inputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
|
||||||
// draw human results on canvas
|
stream.pipe(outFile);
|
||||||
// human.setCanvas(outputCanvas); // tell human to use this canvas
|
|
||||||
human.draw.all(outputCanvas, result); // human will draw results as overlays on canvas
|
|
||||||
|
|
||||||
// write canvas to new image file
|
|
||||||
const out = fs.createWriteStream(output);
|
|
||||||
out.on('finish', () => log.state('Created output image:', output));
|
|
||||||
out.on('error', (err) => log.error('Error creating image:', output, err));
|
|
||||||
const stream = outputCanvas.createJPEGStream({ quality: 0.5, progressive: true, chromaSubsampling: true });
|
|
||||||
stream.pipe(out);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
async function main() {
|
|
||||||
log.header();
|
|
||||||
log.info('Current folder:', process.env.PWD);
|
|
||||||
await init();
|
|
||||||
const input = process.argv[2];
|
|
||||||
const output = process.argv[3];
|
|
||||||
if (process.argv.length !== 4) {
|
|
||||||
log.error('Parameters: <input-image> <output-image> missing');
|
|
||||||
} else if (!fs.existsSync(input) && !input.startsWith('http')) {
|
|
||||||
log.error(`File not found: ${process.argv[2]}`);
|
|
||||||
} else {
|
|
||||||
await detect(input, output);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
68
src/env.ts
68
src/env.ts
|
@ -1,4 +1,5 @@
|
||||||
import * as tf from '../dist/tfjs.esm.js';
|
import * as tf from '../dist/tfjs.esm.js';
|
||||||
|
import * as image from './image/image';
|
||||||
|
|
||||||
export interface Env {
|
export interface Env {
|
||||||
browser: undefined | boolean,
|
browser: undefined | boolean,
|
||||||
|
@ -9,20 +10,22 @@ export interface Env {
|
||||||
backends: string[],
|
backends: string[],
|
||||||
tfjs: {
|
tfjs: {
|
||||||
version: undefined | string,
|
version: undefined | string,
|
||||||
external: undefined | boolean,
|
|
||||||
},
|
},
|
||||||
wasm: {
|
wasm: {
|
||||||
supported: undefined | boolean,
|
supported: undefined | boolean,
|
||||||
|
backend: undefined | boolean,
|
||||||
simd: undefined | boolean,
|
simd: undefined | boolean,
|
||||||
multithread: undefined | boolean,
|
multithread: undefined | boolean,
|
||||||
},
|
},
|
||||||
webgl: {
|
webgl: {
|
||||||
supported: undefined | boolean,
|
supported: undefined | boolean,
|
||||||
|
backend: undefined | boolean,
|
||||||
version: undefined | string,
|
version: undefined | string,
|
||||||
renderer: undefined | string,
|
renderer: undefined | string,
|
||||||
},
|
},
|
||||||
webgpu: {
|
webgpu: {
|
||||||
supported: undefined | boolean,
|
supported: undefined | boolean,
|
||||||
|
backend: undefined | boolean,
|
||||||
adapter: undefined | string,
|
adapter: undefined | string,
|
||||||
},
|
},
|
||||||
kernels: string[],
|
kernels: string[],
|
||||||
|
@ -39,20 +42,22 @@ export const env: Env = {
|
||||||
backends: [],
|
backends: [],
|
||||||
tfjs: {
|
tfjs: {
|
||||||
version: undefined,
|
version: undefined,
|
||||||
external: undefined,
|
|
||||||
},
|
},
|
||||||
wasm: {
|
wasm: {
|
||||||
supported: undefined,
|
supported: undefined,
|
||||||
|
backend: undefined,
|
||||||
simd: undefined,
|
simd: undefined,
|
||||||
multithread: undefined,
|
multithread: undefined,
|
||||||
},
|
},
|
||||||
webgl: {
|
webgl: {
|
||||||
supported: undefined,
|
supported: undefined,
|
||||||
|
backend: undefined,
|
||||||
version: undefined,
|
version: undefined,
|
||||||
renderer: undefined,
|
renderer: undefined,
|
||||||
},
|
},
|
||||||
webgpu: {
|
webgpu: {
|
||||||
supported: undefined,
|
supported: undefined,
|
||||||
|
backend: undefined,
|
||||||
adapter: undefined,
|
adapter: undefined,
|
||||||
},
|
},
|
||||||
kernels: [],
|
kernels: [],
|
||||||
|
@ -60,7 +65,7 @@ export const env: Env = {
|
||||||
Image: undefined,
|
Image: undefined,
|
||||||
};
|
};
|
||||||
|
|
||||||
export function cpuinfo() {
|
export async function cpuInfo() {
|
||||||
const cpu = { model: '', flags: [] };
|
const cpu = { model: '', flags: [] };
|
||||||
if (env.node && env.platform?.startsWith('linux')) {
|
if (env.node && env.platform?.startsWith('linux')) {
|
||||||
// eslint-disable-next-line global-require
|
// eslint-disable-next-line global-require
|
||||||
|
@ -81,6 +86,37 @@ export function cpuinfo() {
|
||||||
else env['cpu'] = cpu;
|
else env['cpu'] = cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export async function backendInfo() {
|
||||||
|
// analyze backends
|
||||||
|
env.backends = Object.keys(tf.engine().registryFactory);
|
||||||
|
env.wasm.supported = typeof WebAssembly !== 'undefined';
|
||||||
|
env.wasm.backend = env.backends.includes('wasm');
|
||||||
|
if (env.wasm.supported && env.wasm.backend) {
|
||||||
|
env.wasm.simd = await tf.env().getAsync('WASM_HAS_SIMD_SUPPORT');
|
||||||
|
env.wasm.multithread = await tf.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT');
|
||||||
|
}
|
||||||
|
|
||||||
|
const c = image.canvas(100, 100);
|
||||||
|
const ctx = c ? c.getContext('webgl2') : undefined;
|
||||||
|
env.webgl.supported = typeof ctx !== 'undefined';
|
||||||
|
env.webgl.backend = env.backends.includes('webgl');
|
||||||
|
if (env.webgl.supported && env.webgl.backend) {
|
||||||
|
// @ts-ignore getGPGPUContext only exists on WebGL backend
|
||||||
|
const gl = tf.backend().gpgpu !== 'undefined' ? await tf.backend().getGPGPUContext().gl : null;
|
||||||
|
if (gl) {
|
||||||
|
env.webgl.version = gl.getParameter(gl.VERSION);
|
||||||
|
env.webgl.renderer = gl.getParameter(gl.RENDERER);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
env.webgpu.supported = env.browser && typeof navigator['gpu'] !== 'undefined';
|
||||||
|
env.webgpu.backend = env.backends.includes('webgpu');
|
||||||
|
if (env.webgpu.supported) env.webgpu.adapter = (await navigator['gpu'].requestAdapter())?.name;
|
||||||
|
|
||||||
|
// enumerate kernels
|
||||||
|
env.kernels = tf.getKernelsForBackend(tf.getBackend()).map((kernel) => kernel.kernelName.toLowerCase());
|
||||||
|
}
|
||||||
|
|
||||||
export async function get() {
|
export async function get() {
|
||||||
env.browser = typeof navigator !== 'undefined';
|
env.browser = typeof navigator !== 'undefined';
|
||||||
env.node = typeof process !== 'undefined';
|
env.node = typeof process !== 'undefined';
|
||||||
|
@ -103,30 +139,8 @@ export async function get() {
|
||||||
env.agent = `NodeJS ${process.version}`;
|
env.agent = `NodeJS ${process.version}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
// analyze backends
|
await backendInfo();
|
||||||
env.backends = Object.keys(tf.engine().registryFactory);
|
|
||||||
env.wasm.supported = env.backends.includes('wasm');
|
|
||||||
if (env.wasm.supported) {
|
|
||||||
env.wasm.simd = await tf.env().getAsync('WASM_HAS_SIMD_SUPPORT');
|
|
||||||
env.wasm.multithread = await tf.env().getAsync('WASM_HAS_MULTITHREAD_SUPPORT');
|
|
||||||
}
|
|
||||||
|
|
||||||
env.webgl.supported = typeof tf.backend().gpgpu !== 'undefined';
|
|
||||||
if (env.webgl.supported) {
|
|
||||||
// @ts-ignore getGPGPUContext only exists on WebGL backend
|
|
||||||
const gl = await tf.backend().getGPGPUContext().gl;
|
|
||||||
if (gl) {
|
|
||||||
env.webgl.version = gl.getParameter(gl.VERSION);
|
|
||||||
env.webgl.renderer = gl.getParameter(gl.RENDERER);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
env.webgpu.supported = env.browser && typeof navigator['gpu'] !== 'undefined';
|
|
||||||
if (env.webgpu.supported) env.webgpu.adapter = (await navigator['gpu'].requestAdapter())?.name;
|
|
||||||
|
|
||||||
// enumerate kernels
|
|
||||||
env.kernels = tf.getKernelsForBackend(tf.getBackend()).map((kernel) => kernel.kernelName.toLowerCase());
|
|
||||||
|
|
||||||
// get cpu info
|
// get cpu info
|
||||||
// cpuinfo();
|
// await cpuInfo();
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,10 +29,9 @@ export function canvas(width, height) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// @ts-ignore // env.canvas is an external monkey-patch
|
// @ts-ignore // env.canvas is an external monkey-patch
|
||||||
// eslint-disable-next-line new-cap
|
|
||||||
c = (typeof env.Canvas !== 'undefined') ? new env.Canvas(width, height) : null;
|
c = (typeof env.Canvas !== 'undefined') ? new env.Canvas(width, height) : null;
|
||||||
}
|
}
|
||||||
if (!c) throw new Error('Human: Cannot create canvas');
|
// if (!c) throw new Error('Human: Cannot create canvas');
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +99,7 @@ export function process(input: Input, config: Config): { tensor: Tensor | null,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// imagefx transforms using gl
|
// imagefx transforms using gl
|
||||||
if (config.filter.enabled) {
|
if (config.filter.enabled && env.webgl.supported) {
|
||||||
if (!fx || !outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas?.height !== outCanvas?.height)) {
|
if (!fx || !outCanvas || (inCanvas.width !== outCanvas.width) || (inCanvas?.height !== outCanvas?.height)) {
|
||||||
outCanvas = canvas(inCanvas?.width, inCanvas?.height);
|
outCanvas = canvas(inCanvas?.width, inCanvas?.height);
|
||||||
if (outCanvas?.width !== inCanvas?.width) outCanvas.width = inCanvas?.width;
|
if (outCanvas?.width !== inCanvas?.width) outCanvas.width = inCanvas?.width;
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
|
|
||||||
import { log } from '../helpers';
|
import { log } from '../helpers';
|
||||||
import * as tf from '../../dist/tfjs.esm.js';
|
import * as tf from '../../dist/tfjs.esm.js';
|
||||||
|
import * as image from '../image/image';
|
||||||
|
|
||||||
export const config = {
|
export const config = {
|
||||||
name: 'humangl',
|
name: 'humangl',
|
||||||
|
@ -46,13 +47,13 @@ export function register(): void {
|
||||||
if (!tf.findBackend(config.name)) {
|
if (!tf.findBackend(config.name)) {
|
||||||
// log('backend registration:', config.name);
|
// log('backend registration:', config.name);
|
||||||
try {
|
try {
|
||||||
config.canvas = (typeof OffscreenCanvas !== 'undefined') ? new OffscreenCanvas(config.width, config.height) : document.createElement('canvas');
|
config.canvas = image.canvas(100, 100);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
log('error: cannot create canvas:', err);
|
log('error: cannot create canvas:', err);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
config.gl = config.canvas.getContext('webgl2', config.webGLattr) as WebGL2RenderingContext;
|
config.gl = config.canvas?.getContext('webgl2', config.webGLattr) as WebGL2RenderingContext;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
log('error: cannot get WebGL2 context:', err);
|
log('error: cannot get WebGL2 context:', err);
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -4,7 +4,9 @@ const { Canvas, Image } = require('canvas');
|
||||||
const Human = require('../dist/human.node-wasm.js');
|
const Human = require('../dist/human.node-wasm.js');
|
||||||
const test = require('./test-main.js').test;
|
const test = require('./test-main.js').test;
|
||||||
|
|
||||||
|
// @ts-ignore
|
||||||
Human.env.Canvas = Canvas;
|
Human.env.Canvas = Canvas;
|
||||||
|
// @ts-ignore
|
||||||
Human.env.Image = Image;
|
Human.env.Image = Image;
|
||||||
|
|
||||||
const config = {
|
const config = {
|
||||||
|
|
2
wiki
2
wiki
|
@ -1 +1 @@
|
||||||
Subproject commit 0e902fcb57bdf9b65ed5e7ef281a699e95db6d99
|
Subproject commit 44b1bf12ab5dbf4cedde34da123237b1cd02627b
|
Loading…
Reference in New Issue