hamster-desktop/electron/core/infer/vino.worker.ts
2024-08-08 14:29:57 +08:00

36 lines
1.0 KiB
TypeScript

import { parentPort } from "worker_threads";
import { addon as ov } from "openvino-node";
const modelXMLPath = "./model.xml";
const deviceName = "CPU";
async function runInference(data: Float32Array): Promise<Float32Array> {
console.time("openvino infer");
const core = new ov.Core();
const model = await core.readModel(modelXMLPath);
const compiledModel = await core.compileModel(model, deviceName);
const patchSize = [1, 1, 80, 160, 160];
const outputLayer = compiledModel.outputs[0];
const tensor = new ov.Tensor(ov.element.f32, patchSize, data);
const inferRequest = compiledModel.createInferRequest();
inferRequest.setInputTensor(tensor);
inferRequest.infer();
const resultInfer = inferRequest.getTensor(outputLayer);
console.timeEnd("openvino infer");
return resultInfer.data;
}
parentPort?.on("message", async (data: Float32Array) => {
try {
const result = await runInference(data);
parentPort?.postMessage(result);
} catch (error) {
parentPort?.postMessage({ error: error.message });
}
});