Exemplo n.º 1
0
def test_output_type(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    output = exec_net.output(0)
    output_type = output.get_element_type().get_type_name()
    assert output_type == "f32"
Exemplo n.º 2
0
def test_const_output_get_partial_shape(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    node = exec_net.input("data")
    expected_partial_shape = PartialShape([1, 3, 32, 32])
    assert node.get_partial_shape() == expected_partial_shape
Exemplo n.º 3
0
def test_input_get_index(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.input(0)
    expected_idx = 0
    assert input.get_index() == expected_idx
Exemplo n.º 4
0
def test_output_shape(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    output = exec_net.output(0)
    expected_shape = Shape([1, 10])
    assert str(output.get_shape()) == str(expected_shape)
Exemplo n.º 5
0
def test_core_class():
    input_shape = [1, 3, 4, 4]
    param = ov.parameter(input_shape, np.float32, name="parameter")
    relu = ov.relu(param, name="relu")
    func = Function([relu], [param], "test")
    func.get_ordered_ops()[2].friendly_name = "friendly"

    cnn_network = IENetwork(func)

    core = Core()
    core.set_config({}, device_name="CPU")
    executable_network = core.compile_model(cnn_network, "CPU", {})

    td = TensorDesc("FP32", input_shape, "NCHW")

    # from IPython import embed; embed()

    request = executable_network.create_infer_request()
    input_data = np.random.rand(*input_shape) - 0.5

    expected_output = np.maximum(0.0, input_data)

    input_blob = Blob(td, input_data)

    request.set_input({"parameter": input_blob})
    request.infer()

    result = request.get_blob("relu").buffer

    assert np.allclose(result, expected_output)
Exemplo n.º 6
0
def test_inputs(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    inputs = exec_net.inputs
    assert isinstance(inputs, list)
    assert len(inputs) == 1
Exemplo n.º 7
0
def test_register_plugin():
    ie = Core()
    ie.register_plugin("MKLDNNPlugin", "BLA")
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = ie.compile_model(func, "BLA")
    assert isinstance(exec_net, ExecutableNetwork), \
        "Cannot load the network to the registered plugin with name 'BLA'"
Exemplo n.º 8
0
def test_infer_new_request_numpy(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    exec_net = ie.compile_model(func, device)
    res = exec_net.infer_new_request({"data": img})
    assert np.argmax(res) == 2
Exemplo n.º 9
0
def test_const_output_get_target_inputs(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    outputs = exec_net.outputs
    for node in outputs:
        assert isinstance(node.get_target_inputs(), set)
Exemplo n.º 10
0
def test_tensor_setter(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    exec_net_1 = core.compile_model(model=func, device_name=device)
    exec_net_2 = core.compile_model(model=func, device_name=device)

    img = read_image()
    tensor = Tensor(img)

    request1 = exec_net_1.create_infer_request()
    request1.set_tensor("data", tensor)
    t1 = request1.get_tensor("data")

    assert np.allclose(tensor.data, t1.data, atol=1e-2, rtol=1e-2)

    res = request1.infer({0: tensor})
    res_1 = np.sort(res[0])
    t2 = request1.get_tensor("fc_out")
    assert np.allclose(t2.data, res[0].data, atol=1e-2, rtol=1e-2)

    request = exec_net_2.create_infer_request()
    res = request.infer({"data": tensor})
    res_2 = np.sort(request.get_tensor("fc_out").data)
    assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)

    request.set_tensor("data", tensor)
    t3 = request.get_tensor("data")
    assert np.allclose(t3.data, t1.data, atol=1e-2, rtol=1e-2)
Exemplo n.º 11
0
def test_const_output_docs(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    node = exec_net.input(0)
    exptected_string = "openvino.impl.ConstOutput wraps ov::Output<Const ov::Node >"
    assert node.__doc__ == exptected_string
Exemplo n.º 12
0
def test_inputs_docs(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    inputs = exec_net.inputs
    input_0 = inputs[0]
    expected_string = "openvino.impl.ConstOutput wraps ov::Output<Const ov::Node >"
    assert input_0.__doc__ == expected_string
Exemplo n.º 13
0
def test_get_config(device):
    core = Core()
    if core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
        pytest.skip("Can't run on ARM plugin due-to CPU dependent test")
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    config = exec_net.get_config("PERF_COUNT")
    assert config == "NO"
Exemplo n.º 14
0
def test_get_input(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.input()
    input_node = input.get_node()
    name = input_node.friendly_name
    assert isinstance(input, ConstOutput)
    assert name == "data"
Exemplo n.º 15
0
def test_infer_new_request_wrong_port_name(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    tensor = Tensor(img)
    exec_net = ie.compile_model(func, device)
    with pytest.raises(RuntimeError) as e:
        exec_net.infer_new_request({"_data_": tensor})
    assert "Port for tensor name _data_ was not found." in str(e.value)
Exemplo n.º 16
0
def test_inputs_get_friendly_name(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    inputs = exec_net.inputs
    input_0 = inputs[0]
    node = input_0.get_node()
    name = node.friendly_name
    assert name == "data"
Exemplo n.º 17
0
def test_output_set_friendly_name(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    output = exec_net.output(0)
    output_node = output.get_node()
    output_node.set_friendly_name("output_1")
    name = output_node.friendly_name
    assert isinstance(output, ConstOutput)
    assert name == "output_1"
Exemplo n.º 18
0
def test_infer_tensor_wrong_input_data(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    img = np.ascontiguousarray(img)
    tensor = Tensor(img, shared_memory=True)
    exec_net = ie.compile_model(func, device)
    with pytest.raises(TypeError) as e:
        exec_net.infer_new_request({4.5: tensor})
    assert "Incompatible key type!" in str(e.value)
Exemplo n.º 19
0
def test_infer_new_request_tensor_numpy_copy(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    tensor = Tensor(img)
    exec_net = ie.compile_model(func, device)
    res_tensor = exec_net.infer_new_request({"data": tensor})
    res_img = exec_net.infer_new_request({"data": tensor})
    assert np.argmax(res_tensor) == 2
    assert np.argmax(res_tensor) == np.argmax(res_img)
Exemplo n.º 20
0
def test_infer_tensor_numpy_shared_memory(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    img = np.ascontiguousarray(img)
    tensor = Tensor(img, shared_memory=True)
    exec_net = ie.compile_model(func, device)
    res_tensor = exec_net.infer_new_request({"data": tensor})
    res_img = exec_net.infer_new_request({"data": tensor})
    assert np.argmax(res_tensor) == 2
    assert np.argmax(res_tensor) == np.argmax(res_img)
Exemplo n.º 21
0
def test_infer_numpy_model_from_buffer(device):
    core = Core()
    with open(test_net_bin, "rb") as f:
        bin = f.read()
    with open(test_net_xml, "rb") as f:
        xml = f.read()
    func = core.read_model(model=xml, weights=bin)
    img = read_image()
    exec_net = core.compile_model(func, device)
    res = exec_net.infer_new_request({"data": img})
    assert np.argmax(res) == 2
Exemplo n.º 22
0
def test_set_tensors(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    exec_net = core.compile_model(func, device)

    data1 = read_image()
    tensor1 = Tensor(data1)
    data2 = np.ones(shape=(1, 10), dtype=np.float32)
    tensor2 = Tensor(data2)
    data3 = np.ones(shape=(1, 3, 32, 32), dtype=np.float32)
    tensor3 = Tensor(data3)
    data4 = np.zeros(shape=(1, 10), dtype=np.float32)
    tensor4 = Tensor(data4)

    request = exec_net.create_infer_request()
    request.set_tensors({"data": tensor1, "fc_out": tensor2})
    t1 = request.get_tensor("data")
    t2 = request.get_tensor("fc_out")
    assert np.allclose(tensor1.data, t1.data, atol=1e-2, rtol=1e-2)
    assert np.allclose(tensor2.data, t2.data, atol=1e-2, rtol=1e-2)

    request.set_output_tensors({0: tensor2})
    output_node = exec_net.outputs[0]
    t3 = request.get_tensor(output_node)
    assert np.allclose(tensor2.data, t3.data, atol=1e-2, rtol=1e-2)

    request.set_input_tensors({0: tensor1})
    output_node = exec_net.inputs[0]
    t4 = request.get_tensor(output_node)
    assert np.allclose(tensor1.data, t4.data, atol=1e-2, rtol=1e-2)

    output_node = exec_net.inputs[0]
    request.set_tensor(output_node, tensor3)
    t5 = request.get_tensor(output_node)
    assert np.allclose(tensor3.data, t5.data, atol=1e-2, rtol=1e-2)

    request.set_input_tensor(tensor3)
    t6 = request.get_tensor(request.inputs[0])
    assert np.allclose(tensor3.data, t6.data, atol=1e-2, rtol=1e-2)

    request.set_input_tensor(0, tensor1)
    t7 = request.get_tensor(request.inputs[0])
    assert np.allclose(tensor1.data, t7.data, atol=1e-2, rtol=1e-2)

    request.set_output_tensor(tensor2)
    t8 = request.get_tensor(request.outputs[0])
    assert np.allclose(tensor2.data, t8.data, atol=1e-2, rtol=1e-2)

    request.set_output_tensor(0, tensor4)
    t9 = request.get_tensor(request.outputs[0])
    assert np.allclose(tensor4.data, t9.data, atol=1e-2, rtol=1e-2)
Exemplo n.º 23
0
def test_export_import():
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, "CPU")
    exported_net_file = "exported_model.bin"
    exec_net.export_model(network_model=exported_net_file)
    assert os.path.exists(exported_net_file)
    exec_net = core.import_network(exported_net_file, "CPU")
    os.remove(exported_net_file)
    img = read_image()
    res = exec_net.infer({"data": img})
    assert np.argmax(res["fc_out"][0]) == 3
    del exec_net
    del core
Exemplo n.º 24
0
def test_register_plugins():
    ie = Core()
    if platform == "linux" or platform == "linux2":
        ie.register_plugins(plugins_xml)
    elif platform == "darwin":
        ie.register_plugins(plugins_osx_xml)
    elif platform == "win32":
        ie.register_plugins(plugins_win_xml)

    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = ie.compile_model(func, "CUSTOM")
    assert isinstance(exec_net,
                      ExecutableNetwork), "Cannot load the network to " \
                                          "the registered plugin with name 'CUSTOM' " \
                                          "registered in the XML file"
Exemplo n.º 25
0
def test_infer_mixed_keys(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    core.set_config({"PERF_COUNT": "YES"}, device)
    model = core.compile_model(func, device)

    img = read_image()
    tensor = Tensor(img)

    data2 = np.ones(shape=img.shape, dtype=np.float32)
    tensor2 = Tensor(data2)

    request = model.create_infer_request()
    res = request.infer({0: tensor2, "data": tensor})
    assert np.argmax(res) == 2
Exemplo n.º 26
0
def test_infer_mixed_keys(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    core.set_config({"PERF_COUNT": "YES"}, device)
    exec_net = core.compile_model(func, device)

    img = read_image()
    tensor = Tensor(img)

    data2 = np.ones(shape=(1, 10), dtype=np.float32)
    tensor2 = Tensor(data2)

    request = exec_net.create_infer_request()
    with pytest.raises(TypeError) as e:
        request.infer({0: tensor, "fc_out": tensor2})
    assert "incompatible function arguments!" in str(e.value)
Exemplo n.º 27
0
def test_get_profiling_info(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    core.set_config({"PERF_COUNT": "YES"}, device)
    exec_net = core.compile_model(func, device)
    img = read_image()
    request = exec_net.create_infer_request()
    request.infer({0: img})
    assert request.latency > 0
    prof_info = request.get_profiling_info()
    soft_max_node = next(node for node in prof_info if node.node_name == "fc_out")
    assert soft_max_node.node_type == "Softmax"
    assert soft_max_node.status == ProfilingInfo.Status.OPTIMIZED_OUT
    assert isinstance(soft_max_node.real_time, datetime.timedelta)
    assert isinstance(soft_max_node.cpu_time, datetime.timedelta)
    assert isinstance(soft_max_node.exec_type, str)
Exemplo n.º 28
0
def test_query_state_write_buffer(device, input_shape, data_type, mode):
    core = Core()
    if device == "CPU":
        if core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
            pytest.skip("Can't run on ARM plugin")

    from openvino import Tensor
    from openvino.utils.types import get_dtype

    function = create_function_with_memory(input_shape, data_type)
    exec_net = core.compile_model(model=function, device_name=device)
    request = exec_net.create_infer_request()
    mem_states = request.query_state()
    mem_state = mem_states[0]

    assert mem_state.name == "var_id_667"
    # todo: Uncomment after fix 45611,
    #  CPU plugin returns outputs and memory state in FP32 in case of FP16 original precision
    # assert mem_state.state.tensor_desc.precision == data_type

    for i in range(1, 10):
        if mode == "set_init_memory_state":
            # create initial value
            const_init = 5
            init_array = np.full(input_shape,
                                 const_init,
                                 dtype=get_dtype(mem_state.state.element_type))
            tensor = Tensor(init_array)
            mem_state.state = tensor

            res = request.infer({0: np.full(input_shape, 1, dtype=data_type)})
            expected_res = np.full(input_shape,
                                   1 + const_init,
                                   dtype=data_type)
        elif mode == "reset_memory_state":
            # reset initial state of ReadValue to zero
            mem_state.reset()
            res = request.infer({0: np.full(input_shape, 1, dtype=data_type)})

            # always ones
            expected_res = np.full(input_shape, 1, dtype=data_type)
        else:
            res = request.infer({0: np.full(input_shape, 1, dtype=data_type)})
            expected_res = np.full(input_shape, i, dtype=data_type)
        assert np.allclose(res[0], expected_res, atol=1e-6), \
            "Expected values: {} \n Actual values: {} \n".format(expected_res, res)
Exemplo n.º 29
0
def test_cancel(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    exec_net = core.compile_model(func, device)
    img = read_image()
    request = exec_net.create_infer_request()

    request.start_async({0: img})
    request.cancel()
    with pytest.raises(RuntimeError) as e:
        request.wait()
    assert "[ INFER_CANCELLED ]" in str(e.value)

    request.start_async({"data": img})
    request.cancel()
    with pytest.raises(RuntimeError) as e:
        request.wait_for(1)
    assert "[ INFER_CANCELLED ]" in str(e.value)
Exemplo n.º 30
0
def test_core_class():
    input_shape = [1, 3, 4, 4]
    param = ov.parameter(input_shape, np.float32, name="parameter")
    relu = ov.relu(param, name="relu")
    func = Function([relu], [param], "test")
    func.get_ordered_ops()[2].friendly_name = "friendly"

    core = Core()
    model = core.compile_model(func, "CPU", {})

    request = model.create_infer_request()
    input_data = np.random.rand(*input_shape).astype(np.float32) - 0.5

    expected_output = np.maximum(0.0, input_data)

    input_tensor = Tensor(input_data)
    results = request.infer({"parameter": input_tensor})

    assert np.allclose(results, expected_output)