mirror of https://github.com/vladmandic/human
update
parent
e49050a0db
commit
a99b969989
|
@ -9,7 +9,7 @@ Repository: **<git+https://github.com/vladmandic/human.git>**
|
||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
### **HEAD -> main** 2021/05/11 mandic00@live.com
|
### **HEAD -> main** 2021/05/16 mandic00@live.com
|
||||||
|
|
||||||
- add node-webcam demo
|
- add node-webcam demo
|
||||||
- fix node build and update model signatures
|
- fix node build and update model signatures
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -32,3 +32,20 @@
|
||||||
2021-05-16 23:54:31 [36mINFO: [39m Generate types: ["src/human.ts"]
|
2021-05-16 23:54:31 [36mINFO: [39m Generate types: ["src/human.ts"]
|
||||||
2021-05-16 23:54:36 [36mINFO: [39m Update Change log: ["/home/vlado/dev/human/CHANGELOG.md"]
|
2021-05-16 23:54:36 [36mINFO: [39m Update Change log: ["/home/vlado/dev/human/CHANGELOG.md"]
|
||||||
2021-05-16 23:54:36 [36mINFO: [39m Generate TypeDocs: ["src/human.ts"]
|
2021-05-16 23:54:36 [36mINFO: [39m Generate TypeDocs: ["src/human.ts"]
|
||||||
|
2021-05-17 08:56:17 [36mINFO: [39m @vladmandic/human version 1.8.4
|
||||||
|
2021-05-17 08:56:17 [36mINFO: [39m User: vlado Platform: linux Arch: x64 Node: v16.0.0
|
||||||
|
2021-05-17 08:56:17 [36mINFO: [39m Build: file startup all type: production config: {"minifyWhitespace":true,"minifyIdentifiers":true,"minifySyntax":true}
|
||||||
|
2021-05-17 08:56:17 [35mSTATE:[39m Build for: node type: tfjs: {"imports":1,"importBytes":39,"outputBytes":1284,"outputFiles":"dist/tfjs.esm.js"}
|
||||||
|
2021-05-17 08:56:17 [35mSTATE:[39m Build for: node type: node: {"imports":35,"importBytes":413594,"outputBytes":372832,"outputFiles":"dist/human.node.js"}
|
||||||
|
2021-05-17 08:56:17 [35mSTATE:[39m Build for: nodeGPU type: tfjs: {"imports":1,"importBytes":43,"outputBytes":1292,"outputFiles":"dist/tfjs.esm.js"}
|
||||||
|
2021-05-17 08:56:17 [35mSTATE:[39m Build for: nodeGPU type: node: {"imports":35,"importBytes":413602,"outputBytes":372836,"outputFiles":"dist/human.node-gpu.js"}
|
||||||
|
2021-05-17 08:56:17 [35mSTATE:[39m Build for: nodeWASM type: tfjs: {"imports":1,"importBytes":81,"outputBytes":1359,"outputFiles":"dist/tfjs.esm.js"}
|
||||||
|
2021-05-17 08:56:17 [35mSTATE:[39m Build for: nodeWASM type: node: {"imports":35,"importBytes":413669,"outputBytes":372908,"outputFiles":"dist/human.node-wasm.js"}
|
||||||
|
2021-05-17 08:56:17 [35mSTATE:[39m Build for: browserNoBundle type: tfjs: {"imports":1,"importBytes":2488,"outputBytes":1394,"outputFiles":"dist/tfjs.esm.js"}
|
||||||
|
2021-05-17 08:56:17 [35mSTATE:[39m Build for: browserNoBundle type: esm: {"imports":35,"importBytes":413704,"outputBytes":229716,"outputFiles":"dist/human.esm-nobundle.js"}
|
||||||
|
2021-05-17 08:56:18 [35mSTATE:[39m Build for: browserBundle type: tfjs: {"modules":1274,"moduleBytes":4114813,"imports":7,"importBytes":2488,"outputBytes":1111318,"outputFiles":"dist/tfjs.esm.js"}
|
||||||
|
2021-05-17 08:56:18 [35mSTATE:[39m Build for: browserBundle type: iife: {"imports":35,"importBytes":1523628,"outputBytes":1337370,"outputFiles":"dist/human.js"}
|
||||||
|
2021-05-17 08:56:19 [35mSTATE:[39m Build for: browserBundle type: esm: {"imports":35,"importBytes":1523628,"outputBytes":1337362,"outputFiles":"dist/human.esm.js"}
|
||||||
|
2021-05-17 08:56:19 [36mINFO: [39m Generate types: ["src/human.ts"]
|
||||||
|
2021-05-17 08:56:24 [36mINFO: [39m Update Change log: ["/home/vlado/dev/human/CHANGELOG.md"]
|
||||||
|
2021-05-17 08:56:24 [36mINFO: [39m Generate TypeDocs: ["src/human.ts"]
|
||||||
|
|
|
@ -54,7 +54,11 @@ export function enhance(input): Tensor {
|
||||||
const crop = (tensor.shape.length === 3)
|
const crop = (tensor.shape.length === 3)
|
||||||
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
|
? tf.image.cropAndResize(tf.expandDims(tensor, 0), box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]) // add batch dimension if missing
|
||||||
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
: tf.image.cropAndResize(tensor, box, [0], [model.inputs[0].shape[2], model.inputs[0].shape[1]]);
|
||||||
// const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false); // just resize to fit the embedding model
|
|
||||||
|
/*
|
||||||
|
// just resize to fit the embedding model instead of cropping
|
||||||
|
const crop = tf.image.resizeBilinear(tensor, [model.inputs[0].shape[2], model.inputs[0].shape[1]], false);
|
||||||
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
// convert to black&white to avoid colorization impact
|
// convert to black&white to avoid colorization impact
|
||||||
|
@ -68,16 +72,17 @@ export function enhance(input): Tensor {
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
// optional increase image contrast
|
// increase image pseudo-contrast 100%
|
||||||
// or do it per-channel so mean is done on each channel
|
// (or do it per-channel so mean is done on each channel)
|
||||||
// or do it based on histogram
|
// (or calculate histogram and do it based on histogram)
|
||||||
const mean = merge.mean();
|
const mean = merge.mean();
|
||||||
const factor = 5;
|
const factor = 2;
|
||||||
const contrast = merge.sub(mean).mul(factor).add(mean);
|
const contrast = merge.sub(mean).mul(factor).add(mean);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
// normalize brightness from 0..1
|
// normalize brightness from 0..1
|
||||||
|
// silly way of creating pseudo-hdr of image
|
||||||
const darken = crop.sub(crop.min());
|
const darken = crop.sub(crop.min());
|
||||||
const lighten = darken.div(darken.max());
|
const lighten = darken.div(darken.max());
|
||||||
*/
|
*/
|
||||||
|
@ -123,8 +128,8 @@ export async function predict(image, config) {
|
||||||
obj.age = Math.round(all[age - 1] > all[age + 1] ? 10 * age - 100 * all[age - 1] : 10 * age + 100 * all[age + 1]) / 10;
|
obj.age = Math.round(all[age - 1] > all[age + 1] ? 10 * age - 100 * all[age - 1] : 10 * age + 100 * all[age + 1]) / 10;
|
||||||
|
|
||||||
const desc = resT.find((t) => t.shape[1] === 1024);
|
const desc = resT.find((t) => t.shape[1] === 1024);
|
||||||
// const reshape = desc.reshape([128, 8]);
|
// const reshape = desc.reshape([128, 8]); // reshape large 1024-element descriptor to 128 x 8
|
||||||
// const reduce = reshape.logSumExp(1); // reduce 2nd dimension by calculating logSumExp on it
|
// const reduce = reshape.logSumExp(1); // reduce 2nd dimension by calculating logSumExp on it which leaves us with 128-element descriptor
|
||||||
|
|
||||||
obj.descriptor = [...desc.dataSync()];
|
obj.descriptor = [...desc.dataSync()];
|
||||||
});
|
});
|
||||||
|
|
Loading…
Reference in New Issue