示例#1
0
def test_input_get_source_output(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.output(0)
    input_node = input.get_node().inputs()[0]
    name = input_node.get_source_output().get_node().get_friendly_name()
    assert name == "fc_out"
示例#2
0
def test_input_get_tensor(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.output(0)
    input_node = input.get_node().inputs()[0]
    tensor = input_node.get_tensor()
    assert isinstance(tensor, DescriptorTensor)
示例#3
0
def test_input_get_partial_shape(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.output(0)
    input_node = input.get_node().inputs()[0]
    expected_partial_shape = PartialShape([1, 10])
    assert input_node.get_partial_shape() == expected_partial_shape
示例#4
0
def test_const_output_docs(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.output(0)
    input_node = input.get_node().inputs()[0]
    exptected_string = "openvino.runtime.Input wraps ov::Input<Node>"
    assert input_node.__doc__ == exptected_string
示例#5
0
def test_input_rt_info(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.output(0)
    input_node = input.get_node().inputs()[0]
    rt_info = input_node.rt_info
    assert isinstance(rt_info, RTMap)
示例#6
0
def test_get_config(device):
    core = Core()
    if core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
        pytest.skip("Can't run on ARM plugin due-to CPU dependent test")
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    config = exec_net.get_config("PERF_COUNT")
    assert config == "NO"
示例#7
0
def test_inputs_docs(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    inputs = exec_net.inputs
    input_0 = inputs[0]
    expected_string = "openvino.runtime.ConstOutput wraps ov::Output<Const ov::Node >"
    assert input_0.__doc__ == expected_string
示例#8
0
def test_batched_tensors(device):
    batch = 4
    one_shape = Shape([1, 2, 2, 2])
    batch_shape = Shape([batch, 2, 2, 2])
    one_shape_size = np.prod(one_shape)

    core = Core()

    core.register_plugin("openvino_template_plugin", "TEMPLATE")

    data1 = ops.parameter(batch_shape, np.float32)
    data1.set_friendly_name("input0")
    data1.get_output_tensor(0).set_names({"tensor_input0"})
    data1.set_layout(Layout("N..."))

    constant = ops.constant([1], np.float32)

    op1 = ops.add(data1, constant)
    op1.set_friendly_name("Add0")

    res1 = ops.result(op1)
    res1.set_friendly_name("Result0")
    res1.get_output_tensor(0).set_names({"tensor_output0"})

    model = Model([res1], [data1])

    compiled = core.compile_model(model, "TEMPLATE")

    buffer = np.zeros([one_shape_size * batch * 2], dtype=np.float32)

    req = compiled.create_infer_request()

    tensors = []

    for i in range(0, batch):
        _start = i * one_shape_size * 2
        # Use of special constructor for Tensor.
        # It creates a Tensor from pointer, thus it requires only
        # one element from original buffer, and shape to "crop".
        tensor = Tensor(buffer[_start:(_start + 1)], one_shape)
        tensors.append(tensor)

    req.set_input_tensors(tensors)  # using list overload!

    actual_tensor = req.get_tensor("tensor_output0")
    actual = actual_tensor.data
    for test_num in range(0, 5):
        for i in range(0, batch):
            tensors[i].data[:] = test_num + 10

        req.infer()  # Adds '1' to each element

        # Reference values for each batch:
        _tmp = np.array([test_num + 11] * one_shape_size,
                        dtype=np.float32).reshape([2, 2, 2])

        for j in range(0, batch):
            assert np.array_equal(actual[j], _tmp)
示例#9
0
def test_get_input_tensor_name(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.input("data")
    input_node = input.get_node()
    name = input_node.friendly_name
    assert isinstance(input, ConstOutput)
    assert name == "data"
示例#10
0
def test_infer_new_request_wrong_port_name(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    tensor = Tensor(img)
    exec_net = ie.compile_model(func, device)
    with pytest.raises(KeyError) as e:
        exec_net.infer_new_request({"_data_": tensor})
    assert "Port for tensor _data_ was not found!" in str(e.value)
def main():
    args = build_argparser().parse_args()

    # Plugin initialization
    log.info('OpenVINO Inference Engine')
    log.info('\tbuild: {}'.format(get_version()))
    core = Core()
    core.set_config(config={"GPU_ENABLE_LOOP_UNROLLING": "NO", "CACHE_DIR": "./"}, device_name="GPU")

    # Read IR
    log.info('Reading model {}'.format(args.model))
    model = core.read_model(args.model)

    if len(model.inputs) != 1:
        raise RuntimeError("Demo supports only single input topologies")
    input_tensor_name = model.inputs[0].get_any_name()

    if args.output_blob is not None:
        output_tensor_name = args.output_blob
    else:
        if len(model.outputs) != 1:
            raise RuntimeError("Demo supports only single output topologies")
        output_tensor_name = model.outputs[0].get_any_name()

    characters = get_characters(args)
    codec = CTCCodec(characters, args.designated_characters, args.top_k)
    if len(codec.characters) != model.output(output_tensor_name).shape[2]:
        raise RuntimeError("The text recognition model does not correspond to decoding character list")

    input_batch_size, input_channel, input_height, input_width = model.inputs[0].shape

    # Read and pre-process input image (NOTE: one image only)
    preprocessing_start_time = perf_counter()
    input_image = preprocess_input(args.input, height=input_height, width=input_width)[None, :, :, :]
    preprocessing_total_time = perf_counter() - preprocessing_start_time
    if input_batch_size != input_image.shape[0]:
        raise RuntimeError("The model's input batch size should equal the input image's batch size")
    if input_channel != input_image.shape[1]:
        raise RuntimeError("The model's input channel should equal the input image's channel")

    # Loading model to the plugin
    compiled_model = core.compile_model(model, args.device)
    infer_request = compiled_model.create_infer_request()
    log.info('The model {} is loaded to {}'.format(args.model, args.device))

    # Start sync inference
    start_time = perf_counter()
    for _ in range(args.number_iter):
        infer_request.infer(inputs={input_tensor_name: input_image})
        preds = infer_request.get_tensor(output_tensor_name).data[:]
        result = codec.decode(preds)
        print(result)
    total_latency = ((perf_counter() - start_time) / args.number_iter + preprocessing_total_time) * 1e3
    log.info("Metrics report:")
    log.info("\tLatency: {:.1f} ms".format(total_latency))

    sys.exit()
示例#12
0
def test_get_results(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    core.set_config({"PERF_COUNT": "YES"}, device)
    exec_net = core.compile_model(func, device)
    img = read_image()
    request = exec_net.create_infer_request()
    outputs = request.infer({0: img})
    assert np.allclose(list(outputs.values()), list(request.results.values()))
示例#13
0
def test_inputs_get_friendly_name(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    inputs = exec_net.inputs
    input_0 = inputs[0]
    node = input_0.get_node()
    name = node.friendly_name
    assert name == "data"
示例#14
0
def test_output_set_friendly_name(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    output = exec_net.output(0)
    output_node = output.get_node()
    output_node.set_friendly_name("output_1")
    name = output_node.friendly_name
    assert isinstance(output, ConstOutput)
    assert name == "output_1"
示例#15
0
def test_reshape(device):
    shape = Shape([1, 10])
    param = ops.parameter(shape, dtype=np.float32)
    model = Model(ops.relu(param), [param])
    ref_shape = model.input().partial_shape
    ref_shape[0] = 3
    model.reshape(ref_shape)
    core = Core()
    compiled = core.compile_model(model, device)
    assert compiled.input().partial_shape == ref_shape
示例#16
0
def test_infer_tensor_wrong_input_data(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    img = np.ascontiguousarray(img)
    tensor = Tensor(img, shared_memory=True)
    exec_net = ie.compile_model(func, device)
    with pytest.raises(TypeError) as e:
        exec_net.infer_new_request({0.: tensor})
    assert "Incompatible key type for tensor: 0." in str(e.value)
示例#17
0
def test_get_results(device):
    core = Core()
    data = ops.parameter([10], np.float64)
    model = Model(ops.split(data, 0, 5), [data])
    compiled = core.compile_model(model, device)
    request = compiled.create_infer_request()
    inputs = [np.random.normal(size=list(compiled.input().shape))]
    results = request.infer(inputs)
    for output in compiled.outputs:
        assert np.array_equal(results[output], request.results[output])
示例#18
0
def test_tensor_setter(device):
    core = Core()
    model = core.read_model(test_net_xml, test_net_bin)
    compiled_1 = core.compile_model(model=model, device_name=device)
    compiled_2 = core.compile_model(model=model, device_name=device)
    compiled_3 = core.compile_model(model=model, device_name=device)

    img = read_image()
    tensor = Tensor(img)

    request1 = compiled_1.create_infer_request()
    request1.set_tensor("data", tensor)
    t1 = request1.get_tensor("data")

    assert np.allclose(tensor.data, t1.data, atol=1e-2, rtol=1e-2)

    res = request1.infer({0: tensor})
    k = list(res)[0]
    res_1 = np.sort(res[k])
    t2 = request1.get_tensor("fc_out")
    assert np.allclose(t2.data, res[k].data, atol=1e-2, rtol=1e-2)

    request = compiled_2.create_infer_request()
    res = request.infer({"data": tensor})
    res_2 = np.sort(request.get_tensor("fc_out").data)
    assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)

    request.set_tensor("data", tensor)
    t3 = request.get_tensor("data")
    assert np.allclose(t3.data, t1.data, atol=1e-2, rtol=1e-2)

    request = compiled_3.create_infer_request()
    request.set_tensor(model.inputs[0], tensor)
    t1 = request1.get_tensor(model.inputs[0])

    assert np.allclose(tensor.data, t1.data, atol=1e-2, rtol=1e-2)

    res = request.infer()
    k = list(res)[0]
    res_1 = np.sort(res[k])
    t2 = request1.get_tensor(model.outputs[0])
    assert np.allclose(t2.data, res[k].data, atol=1e-2, rtol=1e-2)
示例#19
0
def test_infer_new_request_tensor_numpy_copy(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    tensor = Tensor(img)
    exec_net = ie.compile_model(func, device)
    res_tensor = exec_net.infer_new_request({"data": tensor})
    res_img = exec_net.infer_new_request({"data": tensor})
    assert np.argmax(res_tensor[list(res_tensor)[0]]) == 2
    assert np.argmax(res_tensor[list(res_tensor)[0]]) == np.argmax(
        res_img[list(res_img)[0]])
示例#20
0
def test_input_update_rt_info(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.output(0)
    input_node = input.get_node().inputs()[0]
    rt = input_node.get_rt_info()
    rt["test12345"] = "test"
    for k, v in input_node.get_rt_info().items():
        assert k == "test12345"
        assert isinstance(v, Parameter)
示例#21
0
def test_infer_numpy_model_from_buffer(device):
    core = Core()
    with open(test_net_bin, "rb") as f:
        bin = f.read()
    with open(test_net_xml, "rb") as f:
        xml = f.read()
    func = core.read_model(model=xml, weights=bin)
    img = read_image()
    exec_net = core.compile_model(func, device)
    res = exec_net.infer_new_request({"data": img})
    assert np.argmax(res[list(res)[0]]) == 2
示例#22
0
class InferenceEngine:
    def __init__(self, model_path, device, stride):
        self.device = device
        self.stride = stride

        log.info('OpenVINO Inference Engine')
        log.info('\tbuild: {}'.format(get_version()))
        self.core = Core()

        log.info('Reading model {}'.format(model_path))
        self.model = self.core.read_model(model_path)

        required_output_keys = {'features', 'heatmaps', 'pafs'}
        for output_tensor_name in required_output_keys:
            try:
                self.model.output(output_tensor_name)
            except RuntimeError:
                raise RuntimeError("The demo supports only topologies with the following output keys: {}".format(
                    ', '.join(required_output_keys)))

        self.input_tensor_name = self.model.inputs[0].get_any_name()
        compiled_model = self.core.compile_model(self.model, self.device)
        self.infer_request = compiled_model.create_infer_request()
        log.info('The model {} is loaded to {}'.format(model_path, self.device))

    def infer(self, img):
        img = img[0:img.shape[0] - (img.shape[0] % self.stride),
                  0:img.shape[1] - (img.shape[1] % self.stride)]
        n, c, h, w = self.model.inputs[0].shape
        if h != img.shape[0] or w != img.shape[1]:
            self.model.reshape({self.input_tensor_name: PartialShape([n, c, img.shape[0], img.shape[1]])})
            compiled_model = self.core.compile_model(self.model, self.device)
            self.infer_request = compiled_model.create_infer_request()
        img = np.transpose(img, (2, 0, 1))[None, ]

        self.infer_request.infer({self.input_tensor_name: img})
        inference_result = {name: self.infer_request.get_tensor(name).data[:] for name in {'features', 'heatmaps', 'pafs'}}

        inference_result = (inference_result['features'][0],
                            inference_result['heatmaps'][0], inference_result['pafs'][0])
        return inference_result
示例#23
0
def set_property():
    #! [set_property]
    core = Core()
    cpu_config = {}
    gpu_config = {}
    model = core.read_model(model_path)
    core.set_property(device_name="CPU", properties=cpu_config)
    core.set_property(device_name="GPU", properties=gpu_config)
    compiled_model = core.compile_model(model=model,
                                        device_name="MULTI:GPU,CPU")
    # Query the optimal number of requests
    nireq = compiled_model.get_property("OPTIMAL_NUMBER_OF_INFER_REQUESTS")
示例#24
0
def available_devices_2():
    #! [available_devices_2]
    match_list = []
    all_devices = "MULTI:"
    dev_match_str = "MYRIAD"
    core = Core()
    model = core.read_model(model_path)
    for d in core.available_devices:
        if dev_match_str in d:
            match_list.append(d)
    all_devices += ",".join(match_list)
    compiled_model = core.compile_model(model=model, device_name=all_devices)
示例#25
0
def part0():
#! [part0]
    core = Core()

    # Read a network in IR, PaddlePaddle, or ONNX format:
    model = core.read_model(model_path)

    #  compile a model on AUTO using the default list of device candidates.
    #  The following lines are equivalent:
    compiled_model = core.compile_model(model=model)
    compiled_model = core.compile_model(model=model, device_name="AUTO")

    # Optional
    # You can also specify the devices to be used by AUTO.
    # The following lines are equivalent:
    compiled_model = core.compile_model(model=model, device_name="AUTO:GPU,CPU")
    compiled_model = core.compile_model(model=model, device_name="AUTO", config={"MULTI_DEVICE_PRIORITIES": "GPU,CPU"})

    # Optional
    # the AUTO plugin is pre-configured (globally) with the explicit option:
    core.set_property(device_name="AUTO", properties={"MULTI_DEVICE_PRIORITIES":"GPU,CPU"})
    def __init__(self, model_path, device):
        log.info('OpenVINO Runtime')
        log.info('\tbuild: {}'.format(get_version()))
        core = Core()

        log.info('Reading model {}'.format(model_path))
        self.model = core.read_model(model_path)
        self.input_tensor_name = "Placeholder"
        compiled_model = core.compile_model(self.model, device)
        self.output_tensor = compiled_model.outputs[0]
        self.infer_request = compiled_model.create_infer_request()
        log.info('The model {} is loaded to {}'.format(model_path, device))
def test_const_output_get_names(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input_name = "data"
    node = exec_net.input(input_name)
    expected_names = set()
    expected_names.add(input_name)
    assert node.get_names() == expected_names
    assert node.names == expected_names
    assert node.get_any_name() == input_name
    assert node.any_name == input_name
示例#28
0
def test_infer_tensor_numpy_shared_memory(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    img = np.ascontiguousarray(img)
    tensor = Tensor(img, shared_memory=True)
    exec_net = ie.compile_model(func, device)
    res_tensor = exec_net.infer_new_request({"data": tensor})
    res_img = exec_net.infer_new_request({"data": tensor})
    assert np.argmax(res_tensor[list(res_tensor)[0]]) == 2
    assert np.argmax(res_tensor[list(res_tensor)[0]]) == np.argmax(
        res_img[list(res_img)[0]])
示例#29
0
def test_infer_new_request_return_type(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    exec_net = ie.compile_model(func, device)
    res = exec_net.infer_new_request({"data": img})
    arr = res[list(res)[0]][0]

    assert isinstance(arr, np.ndarray)
    assert arr.itemsize == 4
    assert arr.shape == (10,)
    assert arr.dtype == "float32"
    assert arr.nbytes == 40
示例#30
0
def test_export_import():
    core = Core()
    model = core.read_model(model=test_net_xml, weights=test_net_bin)
    compiled = core.compile_model(model, "CPU")

    user_stream = compiled.export_model()

    new_compiled = core.import_model(user_stream, "CPU")

    img = read_image()
    res = new_compiled.infer_new_request({"data": img})

    assert np.argmax(res[new_compiled.outputs[0]]) == 2