def test_inputs_get_friendly_name(device): core = Core() func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, device) inputs = exec_net.inputs input_0 = inputs[0] node = input_0.get_node() name = node.friendly_name assert name == "data"
def test_get_input(device): core = Core() func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, device) input = exec_net.input() input_node = input.get_node() name = input_node.friendly_name assert isinstance(input, ConstOutput) assert name == "data"
def test_infer_new_request_wrong_port_name(device): ie = Core() func = ie.read_model(model=test_net_xml, weights=test_net_bin) img = read_image() tensor = Tensor(img) exec_net = ie.compile_model(func, device) with pytest.raises(KeyError) as e: exec_net.infer_new_request({"_data_": tensor}) assert "Port for tensor named _data_ was not found!" in str(e.value)
def test_add_extension(device): model = bytes(b"""<net name="Network" version="10"> <layers> <layer name="in1" type="Parameter" id="0" version="opset1"> <data element_type="f32" shape="2,2,2,1"/> <output> <port id="0" precision="FP32"> <dim>2</dim> <dim>2</dim> <dim>2</dim> <dim>1</dim> </port> </output> </layer> <layer name="operation" id="1" type="Template" version="custom_opset"> <data add="11"/> <input> <port id="1" precision="FP32"> <dim>2</dim> <dim>2</dim> <dim>2</dim> <dim>1</dim> </port> </input> <output> <port id="2" precision="FP32"> <dim>2</dim> <dim>2</dim> <dim>2</dim> <dim>1</dim> </port> </output> </layer> <layer name="output" type="Result" id="2" version="opset1"> <input> <port id="0" precision="FP32"> <dim>2</dim> <dim>2</dim> <dim>2</dim> <dim>1</dim> </port> </input> </layer> </layers> <edges> <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/> <edge from-layer="1" from-port="2" to-layer="2" to-port="0"/> </edges> </net>""") core = Core() if platform == "win32": core.add_extension(library_path="template_extension.dll") else: core.add_extension(library_path="libtemplate_extension.so") func = core.read_model(model=model, init_from_buffer=True) assert isinstance(func, Function)
def test_output_set_friendly_name(device): core = Core() func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, device) output = exec_net.output(0) output_node = output.get_node() output_node.set_friendly_name("output_1") name = output_node.friendly_name assert isinstance(output, ConstOutput) assert name == "output_1"
def test_infer_new_request_tensor_numpy_copy(device): ie = Core() func = ie.read_model(model=test_net_xml, weights=test_net_bin) img = read_image() tensor = Tensor(img) exec_net = ie.compile_model(func, device) res_tensor = exec_net.infer_new_request({"data": tensor}) res_img = exec_net.infer_new_request({"data": tensor}) assert np.argmax(res_tensor) == 2 assert np.argmax(res_tensor) == np.argmax(res_img)
def test_query_model(device): ie = Core() func = ie.read_model(model=test_net_xml, weights=test_net_bin) query_res = ie.query_model(model=func, device_name=device) ops_func = func.get_ordered_ops() ops_func_names = [op.friendly_name for op in ops_func] assert [key for key in query_res.keys() if key not in ops_func_names] == [], \ "Not all network layers present in query_model results" assert next(iter(set( query_res.values()))) == device, "Wrong device for some layers"
def test_infer_tensor_wrong_input_data(device): ie = Core() func = ie.read_model(model=test_net_xml, weights=test_net_bin) img = read_image() img = np.ascontiguousarray(img) tensor = Tensor(img, shared_memory=True) exec_net = ie.compile_model(func, device) with pytest.raises(TypeError) as e: exec_net.infer_new_request({4.5: tensor}) assert "Incompatible key type!" in str(e.value)
def test_infer_tensor_numpy_shared_memory(device): ie = Core() func = ie.read_model(model=test_net_xml, weights=test_net_bin) img = read_image() img = np.ascontiguousarray(img) tensor = Tensor(img, shared_memory=True) exec_net = ie.compile_model(func, device) res_tensor = exec_net.infer_new_request({"data": tensor}) res_img = exec_net.infer_new_request({"data": tensor}) assert np.argmax(res_tensor) == 2 assert np.argmax(res_tensor) == np.argmax(res_img)
def test_infer_numpy_model_from_buffer(device): core = Core() with open(test_net_bin, "rb") as f: bin = f.read() with open(test_net_xml, "rb") as f: xml = f.read() func = core.read_model(model=xml, weights=bin) img = read_image() exec_net = core.compile_model(func, device) res = exec_net.infer_new_request({"data": img}) assert np.argmax(res) == 2
def test_set_tensors(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) exec_net = core.compile_model(func, device) data1 = read_image() tensor1 = Tensor(data1) data2 = np.ones(shape=(1, 10), dtype=np.float32) tensor2 = Tensor(data2) data3 = np.ones(shape=(1, 3, 32, 32), dtype=np.float32) tensor3 = Tensor(data3) data4 = np.zeros(shape=(1, 10), dtype=np.float32) tensor4 = Tensor(data4) request = exec_net.create_infer_request() request.set_tensors({"data": tensor1, "fc_out": tensor2}) t1 = request.get_tensor("data") t2 = request.get_tensor("fc_out") assert np.allclose(tensor1.data, t1.data, atol=1e-2, rtol=1e-2) assert np.allclose(tensor2.data, t2.data, atol=1e-2, rtol=1e-2) request.set_output_tensors({0: tensor2}) output_node = exec_net.outputs[0] t3 = request.get_tensor(output_node) assert np.allclose(tensor2.data, t3.data, atol=1e-2, rtol=1e-2) request.set_input_tensors({0: tensor1}) output_node = exec_net.inputs[0] t4 = request.get_tensor(output_node) assert np.allclose(tensor1.data, t4.data, atol=1e-2, rtol=1e-2) output_node = exec_net.inputs[0] request.set_tensor(output_node, tensor3) t5 = request.get_tensor(output_node) assert np.allclose(tensor3.data, t5.data, atol=1e-2, rtol=1e-2) request.set_input_tensor(tensor3) t6 = request.get_tensor(request.inputs[0]) assert np.allclose(tensor3.data, t6.data, atol=1e-2, rtol=1e-2) request.set_input_tensor(0, tensor1) t7 = request.get_tensor(request.inputs[0]) assert np.allclose(tensor1.data, t7.data, atol=1e-2, rtol=1e-2) request.set_output_tensor(tensor2) t8 = request.get_tensor(request.outputs[0]) assert np.allclose(tensor2.data, t8.data, atol=1e-2, rtol=1e-2) request.set_output_tensor(0, tensor4) t9 = request.get_tensor(request.outputs[0]) assert np.allclose(tensor4.data, t9.data, atol=1e-2, rtol=1e-2)
def test_import_onnx_function(): model_path = os.path.join(os.path.dirname(__file__), "models/add_abc.onnx") ie = Core() func = ie.read_model(model=model_path) dtype = np.float32 value_a = np.array([1.0], dtype=dtype) value_b = np.array([2.0], dtype=dtype) value_c = np.array([3.0], dtype=dtype) runtime = get_runtime() computation = runtime.computation(func) result = computation(value_a, value_b, value_c) assert np.allclose(result, np.array([6], dtype=dtype))
def test_export_import(): core = Core() func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, "CPU") exported_net_file = "exported_model.bin" exec_net.export_model(network_model=exported_net_file) assert os.path.exists(exported_net_file) exec_net = core.import_network(exported_net_file, "CPU") os.remove(exported_net_file) img = read_image() res = exec_net.infer({"data": img}) assert np.argmax(res["fc_out"][0]) == 3 del exec_net del core
def test_import_onnx_with_external_data(): model_path = os.path.join(os.path.dirname(__file__), "models/external_data.onnx") ie = Core() func = ie.read_model(model=model_path) dtype = np.float32 value_a = np.array([1.0, 3.0, 5.0], dtype=dtype) value_b = np.array([3.0, 5.0, 1.0], dtype=dtype) # third input [5.0, 1.0, 3.0] read from external file runtime = get_runtime() computation = runtime.computation(func) result = computation(value_a, value_b) assert np.allclose(result, np.array([3.0, 3.0, 3.0], dtype=dtype))
def test_register_plugins(): ie = Core() if platform == "linux" or platform == "linux2": ie.register_plugins(plugins_xml) elif platform == "darwin": ie.register_plugins(plugins_osx_xml) elif platform == "win32": ie.register_plugins(plugins_win_xml) func = ie.read_model(model=test_net_xml, weights=test_net_bin) exec_net = ie.compile_model(func, "CUSTOM") assert isinstance(exec_net, ExecutableNetwork), "Cannot load the network to " \ "the registered plugin with name 'CUSTOM' " \ "registered in the XML file"
def test_infer_mixed_keys(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) core.set_config({"PERF_COUNT": "YES"}, device) model = core.compile_model(func, device) img = read_image() tensor = Tensor(img) data2 = np.ones(shape=img.shape, dtype=np.float32) tensor2 = Tensor(data2) request = model.create_infer_request() res = request.infer({0: tensor2, "data": tensor}) assert np.argmax(res) == 2
def test_get_profiling_info(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) core.set_config({"PERF_COUNT": "YES"}, device) exec_net = core.compile_model(func, device) img = read_image() request = exec_net.create_infer_request() request.infer({0: img}) assert request.latency > 0 prof_info = request.get_profiling_info() soft_max_node = next(node for node in prof_info if node.node_name == "fc_out") assert soft_max_node.node_type == "Softmax" assert soft_max_node.status == ProfilingInfo.Status.OPTIMIZED_OUT assert isinstance(soft_max_node.real_time, datetime.timedelta) assert isinstance(soft_max_node.cpu_time, datetime.timedelta) assert isinstance(soft_max_node.exec_type, str)
def test_infer_mixed_keys(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) core.set_config({"PERF_COUNT": "YES"}, device) exec_net = core.compile_model(func, device) img = read_image() tensor = Tensor(img) data2 = np.ones(shape=(1, 10), dtype=np.float32) tensor2 = Tensor(data2) request = exec_net.create_infer_request() with pytest.raises(TypeError) as e: request.infer({0: tensor, "fc_out": tensor2}) assert "incompatible function arguments!" in str(e.value)
def test_serialize_pass(): core = Core() xml_path = "serialized_function.xml" bin_path = "serialized_function.bin" func = get_test_function() serialize(func, xml_path, bin_path) assert func is not None res_func = core.read_model(model=xml_path, weights=bin_path) assert func.get_parameters() == res_func.get_parameters() assert func.get_ordered_ops() == res_func.get_ordered_ops() os.remove(xml_path) os.remove(bin_path)
def test_cancel(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) exec_net = core.compile_model(func, device) img = read_image() request = exec_net.create_infer_request() request.start_async({0: img}) request.cancel() with pytest.raises(RuntimeError) as e: request.wait() assert "[ INFER_CANCELLED ]" in str(e.value) request.start_async({"data": img}) request.cancel() with pytest.raises(RuntimeError) as e: request.wait_for(1) assert "[ INFER_CANCELLED ]" in str(e.value)
def test_infer_queue_fail_on_cpp_func(device): jobs = 6 num_request = 4 core = Core() func = core.read_model(test_net_xml, test_net_bin) exec_net = core.compile_model(func, device) infer_queue = AsyncInferQueue(exec_net, num_request) def callback(request, _): request.get_tensor("Unknown") img = read_image() infer_queue.set_callback(callback) assert infer_queue.is_ready with pytest.raises(RuntimeError) as e: for _ in range(jobs): infer_queue.start_async({"data": img}) infer_queue.wait_all() assert "Port for tensor name Unknown was not found" in str(e.value)
def test_serialize_pass_v2(): core = Core() xml_path = "./serialized_function.xml" bin_path = "./serialized_function.bin" shape = [100, 100, 2] parameter_a = ov.opset8.parameter(shape, dtype=np.float32, name="A") parameter_b = ov.opset8.parameter(shape, dtype=np.float32, name="B") model = ov.opset8.floor(ov.opset8.minimum(ov.opset8.abs(parameter_a), parameter_b)) func = Function(model, [parameter_a, parameter_b], "Function") serialize(func, xml_path, bin_path) assert func is not None res_func = core.read_model(model=xml_path, weights=bin_path) assert func.get_parameters() == res_func.get_parameters() assert func.get_ordered_ops() == res_func.get_ordered_ops() os.remove(xml_path) os.remove(bin_path)
def test_infer_queue_fail_on_py_func(device): jobs = 1 num_request = 1 core = Core() func = core.read_model(test_net_xml, test_net_bin) exec_net = core.compile_model(func, device) infer_queue = AsyncInferQueue(exec_net, num_request) def callback(request, _): request = request + 21 img = read_image() infer_queue.set_callback(callback) assert infer_queue.is_ready with pytest.raises(TypeError) as e: for _ in range(jobs): infer_queue.start_async({"data": img}) infer_queue.wait_all() assert "unsupported operand type(s) for +" in str(e.value)
def test_infer_queue(device): jobs = 8 num_request = 4 core = Core() func = core.read_model(test_net_xml, test_net_bin) exec_net = core.compile_model(func, device) infer_queue = AsyncInferQueue(exec_net, num_request) jobs_done = [{"finished": False, "latency": 0} for _ in range(jobs)] def callback(request, job_id): jobs_done[job_id]["finished"] = True jobs_done[job_id]["latency"] = request.latency img = read_image() infer_queue.set_callback(callback) assert infer_queue.is_ready for i in range(jobs): infer_queue.start_async({"data": img}, i) infer_queue.wait_all() assert all(job["finished"] for job in jobs_done) assert all(job["latency"] > 0 for job in jobs_done)
def test_start_async(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) exec_net = core.compile_model(func, device) img = read_image() jobs = 3 requests = [] for _ in range(jobs): requests.append(exec_net.create_infer_request()) def callback(callbacks_info): time.sleep(0.01) callbacks_info["finished"] += 1 callbacks_info = {} callbacks_info["finished"] = 0 for request in requests: request.set_callback(callback, callbacks_info) request.start_async({0: img}) for request in requests: request.wait() assert request.latency > 0 assert callbacks_info["finished"] == jobs
def test_get_runtime_function(device): core = Core() func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, device) runtime_func = exec_net.get_runtime_function() assert isinstance(runtime_func, Function)
def test_get_metric(device): core = Core() func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, device) network_name = exec_net.get_metric("NETWORK_NAME") assert network_name == "test_model"
def test_inputs_items(device): core = Core() func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, device) inputs = exec_net.inputs assert isinstance(inputs[0], ConstOutput)
def test_get_output(device): core = Core() func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, device) output = exec_net.output() assert isinstance(output, ConstOutput)
def test_ngraph_preprocess_model(): model = bytes(b"""<net name="add_model" version="10"> <layers> <layer id="0" name="x" type="Parameter" version="opset1"> <data element_type="i32" shape="2,2,2"/> <output> <port id="0" precision="FP32"> <dim>2</dim> <dim>2</dim> <dim>2</dim> </port> </output> </layer> <layer id="1" name="y" type="Parameter" version="opset1"> <data element_type="i32" shape="2,2,2"/> <output> <port id="0" precision="FP32"> <dim>2</dim> <dim>2</dim> <dim>2</dim> </port> </output> </layer> <layer id="2" name="sum" type="Add" version="opset1"> <input> <port id="0"> <dim>2</dim> <dim>2</dim> <dim>2</dim> </port> <port id="1"> <dim>2</dim> <dim>2</dim> <dim>2</dim> </port> </input> <output> <port id="2" precision="FP32"> <dim>2</dim> <dim>2</dim> <dim>2</dim> </port> </output> </layer> <layer id="3" name="sum/sink_port_0" type="Result" version="opset1"> <input> <port id="0"> <dim>2</dim> <dim>2</dim> <dim>2</dim> </port> </input> </layer> </layers> <edges> <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/> <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/> <edge from-layer="2" from-port="2" to-layer="3" to-port="0"/> </edges> </net>""") core = Core() function = core.read_model(model=model) @custom_preprocess_function def custom_preprocess(output: Output): return ops.abs(output) p = PrePostProcessor(function) p.input(1).preprocess().convert_element_type(Type.f32).scale(0.5) p.input(0).preprocess().convert_element_type(Type.f32).mean(5.) p.output(0).postprocess().custom(custom_preprocess) function = p.build() input_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.float32) expected_output = np.array([[[2, 1], [4, 7]], [[10, 13], [16, 19]]]).astype(np.float32) runtime = get_runtime() computation = runtime.computation(function) output = computation(input_data, input_data) assert np.equal(output, expected_output).all()