import { parentPort } from "worker_threads"; import { addon as ov } from "openvino-node"; const modelXMLPath = "./model.xml"; const deviceName = "CPU"; async function runInference(data: Float32Array): Promise { console.time("openvino infer"); const core = new ov.Core(); const model = await core.readModel(modelXMLPath); const compiledModel = await core.compileModel(model, deviceName); const patchSize = [1, 1, 80, 160, 160]; const outputLayer = compiledModel.outputs[0]; const tensor = new ov.Tensor(ov.element.f32, patchSize, data); const inferRequest = compiledModel.createInferRequest(); inferRequest.setInputTensor(tensor); inferRequest.infer(); const resultInfer = inferRequest.getTensor(outputLayer); console.timeEnd("openvino infer"); return resultInfer.data; } parentPort?.on("message", async (data: Float32Array) => { try { const result = await runInference(data); parentPort?.postMessage(result); } catch (error) { parentPort?.postMessage({ error: error.message }); } });