Exemplo n.º 1
0
def test_infer_new_request_tensor_numpy_copy(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    tensor = Tensor(img)
    exec_net = ie.compile_model(func, device)
    res_tensor = exec_net.infer_new_request({"data": tensor})
    res_img = exec_net.infer_new_request({"data": tensor})
    assert np.argmax(res_tensor[list(res_tensor)[0]]) == 2
    assert np.argmax(res_tensor[list(res_tensor)[0]]) == np.argmax(
        res_img[list(res_img)[0]])
Exemplo n.º 2
0
def test_infer_numpy_model_from_buffer(device):
    core = Core()
    with open(test_net_bin, "rb") as f:
        bin = f.read()
    with open(test_net_xml, "rb") as f:
        xml = f.read()
    func = core.read_model(model=xml, weights=bin)
    img = read_image()
    exec_net = core.compile_model(func, device)
    res = exec_net.infer_new_request({"data": img})
    assert np.argmax(res[list(res)[0]]) == 2
Exemplo n.º 3
0
    def __init__(self, device='CPU'):
        """ Constructor
         :param device: specify the target device to infer on; CPU, GPU, FPGA, HDDL, MYRIAD or HETERO: is acceptable
         :param extension: path to the extension library with custom layers
        """
        self._tmp_dir = create_tmp_dir()
        self.device = device
        self.model = None
        self.infer_request = None

        self._ie = Core()
Exemplo n.º 4
0
def test_read_model_as_path():
    core = Core()
    func = core.read_model(model=Path(test_net_xml),
                           weights=Path(test_net_bin))
    assert isinstance(func, Model)

    func = core.read_model(model=test_net_xml, weights=Path(test_net_bin))
    assert isinstance(func, Model)

    func = core.read_model(model=Path(test_net_xml))
    assert isinstance(func, Model)
Exemplo n.º 5
0
def test_input_update_rt_info(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input = exec_net.output(0)
    input_node = input.get_node().inputs()[0]
    rt = input_node.get_rt_info()
    rt["test12345"] = "test"
    for k, v in input_node.get_rt_info().items():
        assert k == "test12345"
        assert isinstance(v, Parameter)
Exemplo n.º 6
0
def test_net_from_buffer_valid():
    core = Core()
    with open(test_net_bin, "rb") as f:
        bin = f.read()
    with open(model_path()[0], "rb") as f:
        xml = f.read()
    func = core.read_model(model=xml, weights=bin)
    ref_func = core.read_model(model=test_net_xml, weights=test_net_bin)
    assert func.get_parameters() == ref_func.get_parameters()
    assert func.get_results() == ref_func.get_results()
    assert func.get_ordered_ops() == ref_func.get_ordered_ops()
Exemplo n.º 7
0
def part5():
#! [part5]
    core = Core()
    model = core.read_model(model_path)
    core.set_property(device_name="CPU", properties={})
    core.set_property(device_name="MYRIAD", properties={})
    compiled_model = core.compile_model(model=model)
    compiled_model = core.compile_model(model=model, device_name="AUTO")
Exemplo n.º 8
0
def available_devices_2():
    #! [available_devices_2]
    match_list = []
    all_devices = "MULTI:"
    dev_match_str = "MYRIAD"
    core = Core()
    model = core.read_model(model_path)
    for d in core.available_devices:
        if dev_match_str in d:
            match_list.append(d)
    all_devices += ",".join(match_list)
    compiled_model = core.compile_model(model=model, device_name=all_devices)
Exemplo n.º 9
0
def test_infer_tensor_numpy_shared_memory(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    img = np.ascontiguousarray(img)
    tensor = Tensor(img, shared_memory=True)
    exec_net = ie.compile_model(func, device)
    res_tensor = exec_net.infer_new_request({"data": tensor})
    res_img = exec_net.infer_new_request({"data": tensor})
    assert np.argmax(res_tensor[list(res_tensor)[0]]) == 2
    assert np.argmax(res_tensor[list(res_tensor)[0]]) == np.argmax(
        res_img[list(res_img)[0]])
    def __init__(self, model_path, device):
        log.info('OpenVINO Runtime')
        log.info('\tbuild: {}'.format(get_version()))
        core = Core()

        log.info('Reading model {}'.format(model_path))
        self.model = core.read_model(model_path)
        self.input_tensor_name = "Placeholder"
        compiled_model = core.compile_model(self.model, device)
        self.output_tensor = compiled_model.outputs[0]
        self.infer_request = compiled_model.create_infer_request()
        log.info('The model {} is loaded to {}'.format(model_path, device))
Exemplo n.º 11
0
def test_const_output_get_names(device):
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, device)
    input_name = "data"
    node = exec_net.input(input_name)
    expected_names = set()
    expected_names.add(input_name)
    assert node.get_names() == expected_names
    assert node.names == expected_names
    assert node.get_any_name() == input_name
    assert node.any_name == input_name
Exemplo n.º 12
0
def test_export_import():
    core = Core()
    model = core.read_model(model=test_net_xml, weights=test_net_bin)
    compiled = core.compile_model(model, "CPU")

    user_stream = compiled.export_model()

    new_compiled = core.import_model(user_stream, "CPU")

    img = read_image()
    res = new_compiled.infer_new_request({"data": img})

    assert np.argmax(res[new_compiled.outputs[0]]) == 2
Exemplo n.º 13
0
def test_infer_new_request_return_type(device):
    ie = Core()
    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    img = read_image()
    exec_net = ie.compile_model(func, device)
    res = exec_net.infer_new_request({"data": img})
    arr = res[list(res)[0]][0]

    assert isinstance(arr, np.ndarray)
    assert arr.itemsize == 4
    assert arr.shape == (10,)
    assert arr.dtype == "float32"
    assert arr.nbytes == 40
Exemplo n.º 14
0
    def __init__(self, config_entry, model_name='', delayed_model_loading=False,
                 preprocessor=None, postpone_inputs_configuration=False):
        super().__init__(config_entry, model_name=model_name)
        self._set_variable = False
        self.ie_config = self.config.get('ie_config')
        self.ie_core = Core()
        if self.ie_config:
            ov_set_config(self.ie_core, self.ie_config)
        self._delayed_model_loading = delayed_model_loading
        dlsdk_launcher_config = DLSDKLauncherConfigValidator(
            'OpenVINO_Launcher', fields=self.parameters(), delayed_model_loading=delayed_model_loading,
        )
        dlsdk_launcher_config.validate(self.config, ie_core=self.ie_core)
        device = self.config['device'].split('.')
        self._device = '.'.join((device[0].upper(), device[1])) if len(device) > 1 else device[0].upper()
        self.dynamic_shapes_policy = self.get_value_from_config('_undefined_shapes_resolving_policy')
        self._set_variable = False
        self._async_mode = False
        self._prepare_ie()
        self._delayed_model_loading = delayed_model_loading
        self._postpone_input_configuration = postpone_inputs_configuration
        self._preprocess_info = {}
        self._preprocess_steps = []
        self.disable_resize_to_input = False
        self._do_reshape = False
        self._output_layouts = {}
        self._output_precisions = {}
        self.dyn_input_layers = []
        self._partial_shapes = {}
        self.is_dynamic = False
        self.preprocessor = preprocessor
        self.infer_request = None
        self._num_requests = None

        if not delayed_model_loading:
            self._model, self._weights = automatic_model_search(
                    self._model_name, self.get_value_from_config('model'),
                    self.get_value_from_config('weights'),
                    self.get_value_from_config('_model_type')
            )
            self.load_network(log=not postpone_inputs_configuration, preprocessing=preprocessor)
            self.allow_reshape_input = self.get_value_from_config('allow_reshape_input') and self.network is not None
            if not postpone_inputs_configuration:
                self.try_to_set_default_layout()
        else:
            self.allow_reshape_input = self.get_value_from_config('allow_reshape_input')
        self._target_layout_mapping = {}
        self._lstm_inputs = None
        if '_list_lstm_inputs' in self.config:
            self._configure_lstm_inputs()
        self.reset_memory_state = self.get_value_from_config('reset_memory_state')
Exemplo n.º 15
0
def test_set_tensors(device):
    core = Core()
    func = core.read_model(test_net_xml, test_net_bin)
    exec_net = core.compile_model(func, device)

    data1 = read_image()
    tensor1 = Tensor(data1)
    data2 = np.ones(shape=(1, 10), dtype=np.float32)
    tensor2 = Tensor(data2)
    data3 = np.ones(shape=(1, 3, 32, 32), dtype=np.float32)
    tensor3 = Tensor(data3)
    data4 = np.zeros(shape=(1, 10), dtype=np.float32)
    tensor4 = Tensor(data4)

    request = exec_net.create_infer_request()
    request.set_tensors({"data": tensor1, "fc_out": tensor2})
    t1 = request.get_tensor("data")
    t2 = request.get_tensor("fc_out")
    assert np.allclose(tensor1.data, t1.data, atol=1e-2, rtol=1e-2)
    assert np.allclose(tensor2.data, t2.data, atol=1e-2, rtol=1e-2)

    request.set_output_tensors({0: tensor2})
    output_node = exec_net.outputs[0]
    t3 = request.get_tensor(output_node)
    assert np.allclose(tensor2.data, t3.data, atol=1e-2, rtol=1e-2)

    request.set_input_tensors({0: tensor1})
    output_node = exec_net.inputs[0]
    t4 = request.get_tensor(output_node)
    assert np.allclose(tensor1.data, t4.data, atol=1e-2, rtol=1e-2)

    output_node = exec_net.inputs[0]
    request.set_tensor(output_node, tensor3)
    t5 = request.get_tensor(output_node)
    assert np.allclose(tensor3.data, t5.data, atol=1e-2, rtol=1e-2)

    request.set_input_tensor(tensor3)
    t6 = request.get_tensor(request.inputs[0])
    assert np.allclose(tensor3.data, t6.data, atol=1e-2, rtol=1e-2)

    request.set_input_tensor(0, tensor1)
    t7 = request.get_tensor(request.inputs[0])
    assert np.allclose(tensor1.data, t7.data, atol=1e-2, rtol=1e-2)

    request.set_output_tensor(tensor2)
    t8 = request.get_tensor(request.outputs[0])
    assert np.allclose(tensor2.data, t8.data, atol=1e-2, rtol=1e-2)

    request.set_output_tensor(0, tensor4)
    t9 = request.get_tensor(request.outputs[0])
    assert np.allclose(tensor4.data, t9.data, atol=1e-2, rtol=1e-2)
Exemplo n.º 16
0
def test_import_onnx_with_external_data():
    model_path = os.path.join(os.path.dirname(__file__), "models/external_data.onnx")
    ie = Core()
    func = ie.read_model(model=model_path)

    dtype = np.float32
    value_a = np.array([1.0, 3.0, 5.0], dtype=dtype)
    value_b = np.array([3.0, 5.0, 1.0], dtype=dtype)
    # third input [5.0, 1.0, 3.0] read from external file

    runtime = get_runtime()
    computation = runtime.computation(func)
    result = computation(value_a, value_b)
    assert np.allclose(result, np.array([3.0, 3.0, 3.0], dtype=dtype))
Exemplo n.º 17
0
def create_simple_request_and_inputs(device):
    input_shape = [2, 2]
    param_a = ops.parameter(input_shape, np.float32)
    param_b = ops.parameter(input_shape, np.float32)
    model = Model(ops.add(param_a, param_b), [param_a, param_b])

    core = Core()
    compiled = core.compile_model(model, device)
    request = compiled.create_infer_request()

    arr_1 = np.array([[1, 2], [3, 4]], dtype=np.float32)
    arr_2 = np.array([[3, 4], [1, 2]], dtype=np.float32)

    return request, arr_1, arr_2
Exemplo n.º 18
0
def test_infer_queue_is_ready(device):
    core = Core()
    param = ops.parameter([10])
    model = Model(ops.relu(param), [param])
    compiled = core.compile_model(model, device)
    infer_queue = AsyncInferQueue(compiled, 1)

    def callback(request, _):
        time.sleep(0.001)
    infer_queue.set_callback(callback)
    assert infer_queue.is_ready()
    infer_queue.start_async()
    assert not infer_queue.is_ready()
    infer_queue.wait_all()
Exemplo n.º 19
0
def test_inputs_outputs_property(device):
    num_inputs = 10
    input_shape = [1]
    params = [ops.parameter(input_shape, np.uint8) for _ in range(num_inputs)]
    model = Model(ops.split(ops.concat(params, 0), 0, num_inputs), params)
    core = Core()
    compiled = core.compile_model(model, device)
    request = compiled.create_infer_request()
    data = [np.atleast_1d(i) for i in range(num_inputs)]
    results = request.infer(data).values()
    for result, output_tensor in zip(results, request.outputs):
        assert np.array_equal(result, output_tensor.data)
    for input_data, input_tensor in zip(data, request.inputs):
        assert np.array_equal(input_data, input_tensor.data)
Exemplo n.º 20
0
def test_export_import():
    core = Core()
    func = core.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = core.compile_model(func, "CPU")
    exported_net_file = "exported_model.bin"
    exec_net.export_model(network_model=exported_net_file)
    assert os.path.exists(exported_net_file)
    exec_net = core.import_network(exported_net_file, "CPU")
    os.remove(exported_net_file)
    img = read_image()
    res = exec_net.infer({"data": img})
    assert np.argmax(res["fc_out"][0]) == 3
    del exec_net
    del core
Exemplo n.º 21
0
class InferenceEngine:
    def __init__(self, net_model_xml_path, device, stride):
        self.device = device
        self.stride = stride

        log.info('OpenVINO Inference Engine')
        log.info('\tbuild: {}'.format(get_version()))
        self.core = Core()

        log.info('Reading model {}'.format(net_model_xml_path))
        self.model = self.core.read_model(net_model_xml_path)

        required_output_keys = {'features', 'heatmaps', 'pafs'}
        for output_tensor_name in required_output_keys:
            try:
                self.model.output(output_tensor_name)
            except RuntimeError:
                raise RuntimeError(
                    "The demo supports only topologies with the following output keys: {}"
                    .format(', '.join(required_output_keys)))

        self.input_tensor_name = self.model.inputs[0].get_any_name()
        compiled_model = self.core.compile_model(self.model, self.device)
        self.infer_request = compiled_model.create_infer_request()
        log.info('The model {} is loaded to {}'.format(net_model_xml_path,
                                                       self.device))

    def infer(self, img):
        img = img[0:img.shape[0] - (img.shape[0] % self.stride),
                  0:img.shape[1] - (img.shape[1] % self.stride)]
        n, c, h, w = self.model.inputs[0].shape
        if h != img.shape[0] or w != img.shape[1]:
            self.model.reshape({
                self.input_tensor_name:
                PartialShape([n, c, img.shape[0], img.shape[1]])
            })
            compiled_model = self.core.compile_model(self.model, self.device)
            self.infer_request = compiled_model.create_infer_request()
        img = np.transpose(img, (2, 0, 1))[None, ]

        self.infer_request.infer({self.input_tensor_name: img})
        inference_result = {
            name: self.infer_request.get_tensor(name).data[:]
            for name in {'features', 'heatmaps', 'pafs'}
        }

        inference_result = (inference_result['features'][0],
                            inference_result['heatmaps'][0],
                            inference_result['pafs'][0])
        return inference_result
Exemplo n.º 22
0
    def __init__(self, model_path, device):
        log.info('OpenVINO Inference Engine')
        log.info('\tbuild: {}'.format(get_version()))
        core = Core()

        log.info('Reading model {}'.format(model_path))
        self.model = core.read_model(model_path)
        self.input_tensor_name = self.model.inputs[0].get_any_name()
        self.input_size = self.model.input(self.input_tensor_name).shape
        self.nchw_layout = self.input_size[1] == 3
        compiled_model = core.compile_model(self.model, device)
        self.output_tensor = compiled_model.outputs[0]
        self.infer_request = compiled_model.create_infer_request()
        log.info('The model {} is loaded to {}'.format(model_path, device))
    def __init__(self, model_xml, model_bin, device, output_name):
        log.info('OpenVINO Inference Engine')
        log.info('\tbuild: {}'.format(get_version()))
        core = Core()

        log.info('Reading model {}'.format(model_xml))
        self.model = core.read_model(model_xml, model_bin)
        compiled_model = core.compile_model(self.model, args.device)
        self.infer_request = compiled_model.create_infer_request()
        log.info('The model {} is loaded to {}'.format(model_xml, device))
        self.input_tensor_name = "tokens"
        self.output_tensor_name = output_name
        self.model.output(
            self.output_tensor_name)  # ensure a tensor with the name exists
Exemplo n.º 24
0
def test_import_onnx_function():
    model_path = os.path.join(os.path.dirname(__file__), "models/add_abc.onnx")
    ie = Core()
    func = ie.read_model(model=model_path)

    dtype = np.float32
    value_a = np.array([1.0], dtype=dtype)
    value_b = np.array([2.0], dtype=dtype)
    value_c = np.array([3.0], dtype=dtype)

    runtime = get_runtime()
    computation = runtime.computation(func)
    result = computation(value_a, value_b, value_c)
    assert np.allclose(result, np.array([6], dtype=dtype))
Exemplo n.º 25
0
def abs_model_with_data(device, ov_type, numpy_dtype):
    input_shape = [1, 4]
    param = ops.parameter(input_shape, ov_type)
    model = Model(ops.abs(param), [param])
    core = Core()
    compiled_model = core.compile_model(model, device)

    request = compiled_model.create_infer_request()

    tensor1 = Tensor(ov_type, input_shape)
    tensor1.data[:] = np.array([6, -7, -8, 9])

    array1 = np.array([[-1, 2, 5, -3]]).astype(numpy_dtype)

    return request, tensor1, array1
Exemplo n.º 26
0
def test_infer_dynamic_model(device):
    core = Core()
    param = ops.parameter(PartialShape([-1, -1]))
    model = Model(ops.relu(param), [param])
    compiled = core.compile_model(model, device)
    assert compiled.input().partial_shape.is_dynamic
    request = compiled.create_infer_request()

    shape1 = [1, 28]
    request.infer([np.random.normal(size=shape1)])
    assert request.get_input_tensor().shape == Shape(shape1)

    shape2 = [1, 32]
    request.infer([np.random.normal(size=shape2)])
    assert request.get_input_tensor().shape == Shape(shape2)
 def __init__(self, model_path, input_name, output_name, quantiles):
     device = "CPU"
     log.info('OpenVINO Inference Engine')
     log.info('\tbuild: {}'.format(get_version()))
     core = Core()
     log.info('Reading model {}'.format(model_path))
     model = core.read_model(model_path)
     compiled_model = core.compile_model(model, device)
     self.infer_request = compiled_model.create_infer_request()
     log.info('The model {} is loaded to {}'.format(model_path, device))
     self.input_tensor_name = input_name
     self.output_tensor_name = output_name
     self.quantiles = quantiles
     model.output(
         self.output_tensor_name)  # ensure a tensor with the name exists
Exemplo n.º 28
0
def test_get_version(device):
    ie = Core()
    version = ie.get_versions(device)
    assert isinstance(version, dict), "Returned version must be a dictionary"
    assert device in version, "{} plugin version wasn't found in versions"
    assert hasattr(version[device],
                   "major"), "Returned version has no field 'major'"
    assert hasattr(version[device],
                   "minor"), "Returned version has no field 'minor'"
    assert hasattr(
        version[device],
        "description"), "Returned version has no field 'description'"
    assert hasattr(
        version[device],
        "build_number"), "Returned version has no field 'build_number'"
Exemplo n.º 29
0
def test_direct_infer(device):
    core = Core()
    with open(test_net_bin, "rb") as f:
        bin = f.read()
    with open(test_net_xml, "rb") as f:
        xml = f.read()
    model = core.read_model(model=xml, weights=bin)
    img = read_image()
    tensor = Tensor(img)
    comp_model = core.compile_model(model, device)
    res = comp_model({"data": tensor})
    assert np.argmax(res[comp_model.outputs[0]]) == 2
    ref = comp_model.infer_new_request({"data": tensor})
    assert np.array_equal(ref[comp_model.outputs[0]],
                          res[comp_model.outputs[0]])
Exemplo n.º 30
0
def test_register_plugins():
    ie = Core()
    if platform == "linux" or platform == "linux2":
        ie.register_plugins(plugins_xml)
    elif platform == "darwin":
        ie.register_plugins(plugins_osx_xml)
    elif platform == "win32":
        ie.register_plugins(plugins_win_xml)

    func = ie.read_model(model=test_net_xml, weights=test_net_bin)
    exec_net = ie.compile_model(func, "CUSTOM")
    assert isinstance(exec_net,
                      CompiledModel), "Cannot load the network to " \
                                      "the registered plugin with name 'CUSTOM' " \
                                      "registered in the XML file"