def test_can_set_smaller_or_same_shape_on_preallocated_memory(ref_shape): ones_arr = np.ones(shape=(1, 3, 32, 32), dtype=np.float32) ones_arr = np.ascontiguousarray(ones_arr) ov_tensor = Tensor(ones_arr, shared_memory=True) assert np.shares_memory(ones_arr, ov_tensor.data) ov_tensor.shape = ref_shape assert list(ov_tensor.shape) == ref_shape
def test_ports_as_inputs(device): input_shape = [2, 2] param_a = ops.parameter(input_shape, np.float32) param_b = ops.parameter(input_shape, np.float32) model = Model(ops.add(param_a, param_b), [param_a, param_b]) core = Core() compiled = core.compile_model(model, device) request = compiled.create_infer_request() arr_1 = np.array([[1, 2], [3, 4]], dtype=np.float32) arr_2 = np.array([[3, 4], [1, 2]], dtype=np.float32) tensor1 = Tensor(arr_1) tensor2 = Tensor(arr_2) res = request.infer({ compiled.inputs[0]: tensor1, compiled.inputs[1]: tensor2 }) assert np.array_equal(res[compiled.outputs[0]], tensor1.data + tensor2.data) res = request.infer({ request.model_inputs[0]: tensor1, request.model_inputs[1]: tensor2 }) assert np.array_equal(res[request.model_outputs[0]], tensor1.data + tensor2.data)
def test_evaluate(): model = create_test_model() input1 = np.array([2, 1], dtype=np.float32).reshape(2, 1) input2 = np.array([3, 7], dtype=np.float32).reshape(2, 1) out_tensor = Tensor("float32", Shape([2, 1])) assert model.evaluate([out_tensor], [Tensor(input1), Tensor(input2)]) assert np.allclose(out_tensor.data, np.array([5, 8]).reshape(2, 1))
def test_init_with_ngraph(ov_type, numpy_dtype): ov_tensors = [] ov_tensors.append(Tensor(type=ov_type, shape=ov.Shape([1, 3, 32, 32]))) ov_tensors.append(Tensor(type=ov_type, shape=[1, 3, 32, 32])) assert np.all([list(ov_tensor.shape) == [1, 3, 32, 32] for ov_tensor in ov_tensors]) assert np.all(ov_tensor.element_type == ov_type for ov_tensor in ov_tensors) assert np.all(ov_tensor.data.dtype == numpy_dtype for ov_tensor in ov_tensors) assert np.all(ov_tensor.data.shape == (1, 3, 32, 32) for ov_tensor in ov_tensors)
def test_cannot_set_shape_on_preallocated_memory(ref_shape): ones_arr = np.ones(shape=(1, 3, 32, 32), dtype=np.float32) ones_arr = np.ascontiguousarray(ones_arr) ov_tensor = Tensor(ones_arr, shared_memory=True) assert np.shares_memory(ones_arr, ov_tensor.data) with pytest.raises(RuntimeError) as e: ov_tensor.shape = ref_shape assert "Cannot call setShape for Blobs created on top of preallocated memory" in str(e.value)
def test_init_with_roi_tensor(): array = np.random.normal(size=[1, 3, 48, 48]) ov_tensor1 = Tensor(array) ov_tensor2 = Tensor(ov_tensor1, [0, 0, 24, 24], [1, 3, 48, 48]) assert list(ov_tensor2.shape) == [1, 3, 24, 24] assert ov_tensor2.element_type == ov_tensor2.element_type assert np.shares_memory(ov_tensor1.data, ov_tensor2.data) assert np.array_equal(ov_tensor1.data[0:1, :, 24:, 24:], ov_tensor2.data)
def test_packing(shape, low, high, ov_type, dtype): ov_tensor = Tensor(ov_type, shape) data = np.random.uniform(low, high, shape).astype(dtype) packed_data = pack_data(data, ov_tensor.element_type) ov_tensor.data[:] = packed_data unpacked = unpack_data(ov_tensor.data, ov_tensor.element_type, ov_tensor.shape) assert np.array_equal(unpacked, data)
def test_evaluate_invalid_input_shape(): model = create_test_model() with pytest.raises(RuntimeError) as e: assert model.evaluate( [Tensor("float32", Shape([2, 1]))], [Tensor("float32", Shape([3, 1])), Tensor("float32", Shape([3, 1]))], ) assert "must be compatible with the partial shape: {2,1}" in str(e.value)
def test_can_reset_shape_after_decreasing_on_preallocated_memory(): ones_arr = np.ones(shape=(1, 3, 32, 32), dtype=np.float32) ones_arr = np.ascontiguousarray(ones_arr) ov_tensor = Tensor(ones_arr, shared_memory=True) ref_shape_1 = [1, 3, 24, 24] ref_shape_2 = [1, 3, 32, 32] assert np.shares_memory(ones_arr, ov_tensor.data) ov_tensor.shape = ref_shape_1 assert list(ov_tensor.shape) == ref_shape_1 ov_tensor.shape = ref_shape_2 assert list(ov_tensor.shape) == ref_shape_2
def convert_to_tensors(self, input_values): input_tensors = [] for parameter, input in zip(self.parameters, input_values): if not isinstance(input, (np.ndarray)): input = np.ndarray([], type(input), np.array(input)) if parameter.get_output_element_type(0) == Type.bf16: input_tensors.append(Tensor(Type.bf16, input.shape)) input_tensors[-1].data[:] = input.view(np.float16) else: input_tensors.append(Tensor(input)) return input_tensors
def test_evaluate(): param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1") param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2") add = ops.add(param1, param2) func = Model(add, [param1, param2], "TestFunction") input1 = np.array([2, 1], dtype=np.float32).reshape(2, 1) input2 = np.array([3, 7], dtype=np.float32).reshape(2, 1) out_tensor = Tensor("float32", Shape([2, 1])) assert func.evaluate([out_tensor], [Tensor(input1), Tensor(input2)]) assert np.allclose(out_tensor.data, np.array([5, 8]).reshape(2, 1))
def test_set_tensors(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) exec_net = core.compile_model(func, device) data1 = read_image() tensor1 = Tensor(data1) data2 = np.ones(shape=(1, 10), dtype=np.float32) tensor2 = Tensor(data2) data3 = np.ones(shape=(1, 3, 32, 32), dtype=np.float32) tensor3 = Tensor(data3) data4 = np.zeros(shape=(1, 10), dtype=np.float32) tensor4 = Tensor(data4) request = exec_net.create_infer_request() request.set_tensors({"data": tensor1, "fc_out": tensor2}) t1 = request.get_tensor("data") t2 = request.get_tensor("fc_out") assert np.allclose(tensor1.data, t1.data, atol=1e-2, rtol=1e-2) assert np.allclose(tensor2.data, t2.data, atol=1e-2, rtol=1e-2) request.set_output_tensors({0: tensor2}) output_node = exec_net.outputs[0] t3 = request.get_tensor(output_node) assert np.allclose(tensor2.data, t3.data, atol=1e-2, rtol=1e-2) request.set_input_tensors({0: tensor1}) output_node = exec_net.inputs[0] t4 = request.get_tensor(output_node) assert np.allclose(tensor1.data, t4.data, atol=1e-2, rtol=1e-2) output_node = exec_net.inputs[0] request.set_tensor(output_node, tensor3) t5 = request.get_tensor(output_node) assert np.allclose(tensor3.data, t5.data, atol=1e-2, rtol=1e-2) request.set_input_tensor(tensor3) t6 = request.get_tensor(request.inputs[0]) assert np.allclose(tensor3.data, t6.data, atol=1e-2, rtol=1e-2) request.set_input_tensor(0, tensor1) t7 = request.get_tensor(request.inputs[0]) assert np.allclose(tensor1.data, t7.data, atol=1e-2, rtol=1e-2) request.set_output_tensor(tensor2) t8 = request.get_tensor(request.outputs[0]) assert np.allclose(tensor2.data, t8.data, atol=1e-2, rtol=1e-2) request.set_output_tensor(0, tensor4) t9 = request.get_tensor(request.outputs[0]) assert np.allclose(tensor4.data, t9.data, atol=1e-2, rtol=1e-2)
def fill_tensors_with_random(layer): dtype = get_dtype(layer.element_type) rand_min, rand_max = (0, 1) if dtype == np.bool else (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max) # np.random.uniform excludes high: add 1 to have it generated if np.dtype(dtype).kind in ['i', 'u', 'b']: rand_max += 1 rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(0))) input_tensors = [] for shape in layer.shapes: if shape: input_tensors.append(Tensor(rs.uniform(rand_min, rand_max, list(shape)).astype(dtype))) else: input_tensors.append(Tensor(rs.uniform(rand_min, rand_max))) return input_tensors
def test_init_with_numpy_dtype(ov_type, numpy_dtype): shape = (1, 3, 127, 127) ov_shape = ov.Shape(shape) ov_tensors = [] ov_tensors.append(Tensor(type=numpy_dtype, shape=shape)) ov_tensors.append(Tensor(type=np.dtype(numpy_dtype), shape=shape)) ov_tensors.append(Tensor(type=np.dtype(numpy_dtype), shape=np.array(shape))) ov_tensors.append(Tensor(type=numpy_dtype, shape=ov_shape)) ov_tensors.append(Tensor(type=np.dtype(numpy_dtype), shape=ov_shape)) assert np.all(tuple(ov_tensor.shape) == shape for ov_tensor in ov_tensors) assert np.all(ov_tensor.element_type == ov_type for ov_tensor in ov_tensors) assert np.all(isinstance(ov_tensor.data, np.ndarray) for ov_tensor in ov_tensors) assert np.all(ov_tensor.data.dtype == numpy_dtype for ov_tensor in ov_tensors) assert np.all(ov_tensor.data.shape == shape for ov_tensor in ov_tensors)
def abs_model_with_data(device, ov_type, numpy_dtype): input_shape = [1, 4] param = ops.parameter(input_shape, ov_type) model = Model(ops.abs(param), [param]) core = Core() compiled_model = core.compile_model(model, device) request = compiled_model.create_infer_request() tensor1 = Tensor(ov_type, input_shape) tensor1.data[:] = np.array([6, -7, -8, 9]) array1 = np.array([[-1, 2, 5, -3]]).astype(numpy_dtype) return request, tensor1, array1
def test_infer_mixed_keys(device): core = Core() model = core.read_model(test_net_xml, test_net_bin) core.set_property(device, {"PERF_COUNT": "YES"}) model = core.compile_model(model, device) img = read_image() tensor = Tensor(img) data2 = np.ones(shape=img.shape, dtype=np.float32) tensor2 = Tensor(data2) request = model.create_infer_request() res = request.infer({0: tensor2, "data": tensor}) assert np.argmax(res[model.output()]) == 2
def test_evaluate_invalid_input_shape(): param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1") param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2") add = ops.add(param1, param2) func = Model(add, [param1, param2], "TestFunction") with pytest.raises(RuntimeError) as e: assert func.evaluate( [Tensor("float32", Shape([2, 1]))], [ Tensor("float32", Shape([3, 1])), Tensor("float32", Shape([3, 1])) ], ) assert "must be compatible with the partial shape: {2,1}" in str(e.value)
def test_infer_mixed_keys(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) core.set_config({"PERF_COUNT": "YES"}, device) model = core.compile_model(func, device) img = read_image() tensor = Tensor(img) data2 = np.ones(shape=img.shape, dtype=np.float32) tensor2 = Tensor(data2) request = model.create_infer_request() res = request.infer({0: tensor2, "data": tensor}) assert np.argmax(res[list(res)[0]]) == 2
def test_viewed_tensor(dtype, element_type): buffer = np.random.normal(size=(2, 16)).astype(dtype) fit = (dtype().nbytes * 8) / element_type.bitwidth t = Tensor(buffer, (buffer.shape[0], int(buffer.shape[1] * fit)), element_type) assert np.array_equal(t.data, buffer.view(ov.utils.types.get_dtype(element_type)))
def test_batched_tensors(device): core = Core() # TODO: remove when plugins will support set_input_tensors core.register_plugin("openvino_template_plugin", "TEMPLATE") batch = 4 one_shape = [1, 2, 2, 2] one_shape_size = np.prod(one_shape) batch_shape = [batch, 2, 2, 2] data1 = ops.parameter(batch_shape, np.float32) data1.set_friendly_name("input0") data1.get_output_tensor(0).set_names({"tensor_input0"}) data1.set_layout(Layout("N...")) constant = ops.constant([1], np.float32) op1 = ops.add(data1, constant) op1.set_friendly_name("Add0") res1 = ops.result(op1) res1.set_friendly_name("Result0") res1.get_output_tensor(0).set_names({"tensor_output0"}) model = Model([res1], [data1]) compiled = core.compile_model(model, "TEMPLATE") req = compiled.create_infer_request() # Allocate 8 chunks, set 'user tensors' to 0, 2, 4, 6 chunks buffer = np.zeros([batch * 2, *batch_shape[1:]], dtype=np.float32) tensors = [] for i in range(batch): # non contiguous memory (i*2) tensors.append( Tensor(np.expand_dims(buffer[i * 2], 0), shared_memory=True)) req.set_input_tensors(tensors) with pytest.raises(RuntimeError) as e: req.get_tensor("tensor_input0") assert "get_tensor shall not be used together with batched set_tensors/set_input_tensors" in str( e.value) actual_tensor = req.get_tensor("tensor_output0") actual = actual_tensor.data for test_num in range(0, 5): for i in range(0, batch): tensors[i].data[:] = test_num + 10 req.infer() # Adds '1' to each element # Reference values for each batch: _tmp = np.array([test_num + 11] * one_shape_size, dtype=np.float32).reshape([2, 2, 2]) for j in range(0, batch): assert np.array_equal(actual[j], _tmp)
def get_binary_tensors(binary_paths, info, batch_sizes): num_shapes = len(info.shapes) num_binaries = len(binary_paths) niter = max(num_shapes, num_binaries) processed_frames = 0 tensors = [] for i in range(niter): shape_id = i % num_shapes dtype = get_dtype(info.element_type.get_type_name())[0] shape = list(info.shapes[shape_id]) binaries = np.ndarray(shape=shape, dtype=dtype) if info.layout.has_name('N'): shape[info.layout.get_index_by_name('N')] = 1 binary_index = processed_frames current_batch_size = batch_sizes[shape_id] for b in range(current_batch_size): binary_index %= num_binaries binary_filename = binary_paths[binary_index] logger.info("Prepare binary file " + binary_filename) binary_file_size = os.path.getsize(binary_filename) blob_size = dtype().nbytes * int(np.prod(shape)) if blob_size != binary_file_size: raise Exception( f"File {binary_filename} contains {binary_file_size} bytes but network expects {blob_size}" ) binaries[b] = np.reshape(np.fromfile(binary_filename, dtype), shape) binary_index += 1 processed_frames += current_batch_size tensors.append(Tensor(binaries)) return tensors
def test_tensor_setter(device): core = Core() func = core.read_model(test_net_xml, test_net_bin) exec_net_1 = core.compile_model(model=func, device_name=device) exec_net_2 = core.compile_model(model=func, device_name=device) img = read_image() tensor = Tensor(img) request1 = exec_net_1.create_infer_request() request1.set_tensor("data", tensor) t1 = request1.get_tensor("data") assert np.allclose(tensor.data, t1.data, atol=1e-2, rtol=1e-2) res = request1.infer({0: tensor}) k = list(res)[0] res_1 = np.sort(res[k]) t2 = request1.get_tensor("fc_out") assert np.allclose(t2.data, res[k].data, atol=1e-2, rtol=1e-2) request = exec_net_2.create_infer_request() res = request.infer({"data": tensor}) res_2 = np.sort(request.get_tensor("fc_out").data) assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2) request.set_tensor("data", tensor) t3 = request.get_tensor("data") assert np.allclose(t3.data, t1.data, atol=1e-2, rtol=1e-2)
def import_onnx_model(model: onnx.ModelProto) -> Model: onnx.checker.check_model(model) model_byte_string = model.SerializeToString() ie = Core() func = ie.read_model(bytes(model_byte_string), Tensor(type=np.uint8, shape=[])) return func
def test_batched_tensors(device): batch = 4 one_shape = Shape([1, 2, 2, 2]) batch_shape = Shape([batch, 2, 2, 2]) one_shape_size = np.prod(one_shape) core = Core() core.register_plugin("openvino_template_plugin", "TEMPLATE") data1 = ops.parameter(batch_shape, np.float32) data1.set_friendly_name("input0") data1.get_output_tensor(0).set_names({"tensor_input0"}) data1.set_layout(Layout("N...")) constant = ops.constant([1], np.float32) op1 = ops.add(data1, constant) op1.set_friendly_name("Add0") res1 = ops.result(op1) res1.set_friendly_name("Result0") res1.get_output_tensor(0).set_names({"tensor_output0"}) model = Model([res1], [data1]) compiled = core.compile_model(model, "TEMPLATE") buffer = np.zeros([one_shape_size * batch * 2], dtype=np.float32) req = compiled.create_infer_request() tensors = [] for i in range(0, batch): _start = i * one_shape_size * 2 # Use of special constructor for Tensor. # It creates a Tensor from pointer, thus it requires only # one element from original buffer, and shape to "crop". tensor = Tensor(buffer[_start:(_start + 1)], one_shape) tensors.append(tensor) req.set_input_tensors(tensors) # using list overload! actual_tensor = req.get_tensor("tensor_output0") actual = actual_tensor.data for test_num in range(0, 5): for i in range(0, batch): tensors[i].data[:] = test_num + 10 req.infer() # Adds '1' to each element # Reference values for each batch: _tmp = np.array([test_num + 11] * one_shape_size, dtype=np.float32).reshape([2, 2, 2]) for j in range(0, batch): assert np.array_equal(actual[j], _tmp)
def test_init_with_packed_buffer(dtype, ov_type): shape = [1, 3, 32, 32] fit = np.dtype(dtype).itemsize * 8 / ov_type.bitwidth assert np.prod(shape) % fit == 0 size = int(np.prod(shape) // fit) buffer = np.random.normal(size=size).astype(dtype) ov_tensor = Tensor(buffer, shape, ov_type) assert ov_tensor.data.nbytes == ov_tensor.byte_size assert np.array_equal(ov_tensor.data.view(dtype), buffer)
def test_infer_new_request_wrong_port_name(device): ie = Core() func = ie.read_model(model=test_net_xml, weights=test_net_bin) img = read_image() tensor = Tensor(img) exec_net = ie.compile_model(func, device) with pytest.raises(KeyError) as e: exec_net.infer_new_request({"_data_": tensor}) assert "Port for tensor _data_ was not found!" in str(e.value)
def get_image_info_tensors(image_sizes, layer): im_infos = [] for shape, image_size in zip(layer.shapes, image_sizes): im_info = np.ndarray(shape, dtype=get_dtype(layer.element_type)) for b in range(shape[0]): for i in range(shape[1]): im_info[b][i] = image_size if i in [0, 1] else 1 im_infos.append(Tensor(im_info)) return im_infos
def test_infer_tensor_wrong_input_data(device): ie = Core() func = ie.read_model(model=test_net_xml, weights=test_net_bin) img = read_image() img = np.ascontiguousarray(img) tensor = Tensor(img, shared_memory=True) exec_net = ie.compile_model(func, device) with pytest.raises(TypeError) as e: exec_net.infer_new_request({0.: tensor}) assert "Incompatible key type for tensor: 0." in str(e.value)
def concat_model_with_data(device, ov_type, numpy_dtype): input_shape = [5] params = [] params += [ops.parameter(input_shape, ov_type)] if ov_type == Type.bf16: params += [ops.parameter(input_shape, ov_type)] else: params += [ops.parameter(input_shape, numpy_dtype)] model = Model(ops.concat(params, 0), params) core = Core() compiled = core.compile_model(model, device) request = compiled.create_infer_request() tensor1 = Tensor(ov_type, input_shape) tensor1.data[:] = np.array([6, 7, 8, 9, 0]) array1 = np.array([1, 2, 3, 4, 5], dtype=numpy_dtype) return request, tensor1, array1
def test_infer_new_request_tensor_numpy_copy(device): ie = Core() func = ie.read_model(model=test_net_xml, weights=test_net_bin) img = read_image() tensor = Tensor(img) exec_net = ie.compile_model(func, device) res_tensor = exec_net.infer_new_request({"data": tensor}) res_img = exec_net.infer_new_request({"data": tensor}) assert np.argmax(res_tensor[list(res_tensor)[0]]) == 2 assert np.argmax(res_tensor[list(res_tensor)[0]]) == np.argmax( res_img[list(res_img)[0]])