示例#1
0
def test_infer_list_as_inputs(device):
    num_inputs = 4
    input_shape = [2, 1]
    dtype = np.float32
    params = [ops.parameter(input_shape, dtype) for _ in range(num_inputs)]
    model = Model(ops.relu(ops.concat(params, 1)), params)
    core = Core()
    compiled_model = core.compile_model(model, device)

    def check_fill_inputs(request, inputs):
        for input_idx in range(len(inputs)):
            assert np.array_equal(
                request.get_input_tensor(input_idx).data, inputs[input_idx])

    request = compiled_model.create_infer_request()

    inputs = [np.random.normal(size=input_shape).astype(dtype)]
    request.infer(inputs)
    check_fill_inputs(request, inputs)

    inputs = [
        np.random.normal(size=input_shape).astype(dtype)
        for _ in range(num_inputs)
    ]
    request.infer(inputs)
    check_fill_inputs(request, inputs)
示例#2
0
def test_concat():
    a = np.array([[1, 2], [3, 4]]).astype(np.float32)
    b = np.array([[5, 6]]).astype(np.float32)
    axis = 0
    expected = np.concatenate((a, b), axis=0)

    runtime = get_runtime()
    parameter_a = ov.parameter(list(a.shape), name="A", dtype=np.float32)
    parameter_b = ov.parameter(list(b.shape), name="B", dtype=np.float32)
    node = ov.concat([parameter_a, parameter_b], axis)
    computation = runtime.computation(node, parameter_a, parameter_b)
    result = computation(a, b)
    assert np.allclose(result, expected)
示例#3
0
def test_inputs_outputs_property(device):
    num_inputs = 10
    input_shape = [1]
    params = [ops.parameter(input_shape, np.uint8) for _ in range(num_inputs)]
    model = Model(ops.split(ops.concat(params, 0), 0, num_inputs), params)
    core = Core()
    compiled = core.compile_model(model, device)
    request = compiled.create_infer_request()
    data = [np.atleast_1d(i) for i in range(num_inputs)]
    results = request.infer(data).values()
    for result, output_tensor in zip(results, request.outputs):
        assert np.array_equal(result, output_tensor.data)
    for input_data, input_tensor in zip(data, request.inputs):
        assert np.array_equal(input_data, input_tensor.data)
def concat_model_with_data(device, ov_type, numpy_dtype):
    input_shape = [5]

    params = []
    params += [ops.parameter(input_shape, ov_type)]
    if ov_type == Type.bf16:
        params += [ops.parameter(input_shape, ov_type)]
    else:
        params += [ops.parameter(input_shape, numpy_dtype)]

    model = Model(ops.concat(params, 0), params)
    core = Core()
    compiled = core.compile_model(model, device)
    request = compiled.create_infer_request()
    tensor1 = Tensor(ov_type, input_shape)
    tensor1.data[:] = np.array([6, 7, 8, 9, 0])
    array1 = np.array([1, 2, 3, 4, 5], dtype=numpy_dtype)

    return request, tensor1, array1
示例#5
0
def test_concat():

    element_type = Type.f32
    A = Parameter(element_type, Shape([1, 2]))
    B = Parameter(element_type, Shape([1, 2]))
    C = Parameter(element_type, Shape([1, 2]))
    parameter_list = [A, B, C]
    axis = 0
    function = Model([ov.concat([A, B, C], axis)], parameter_list, "test")

    a_arr = np.array([[1, 2]], dtype=np.float32)
    b_arr = np.array([[5, 6]], dtype=np.float32)
    c_arr = np.array([[7, 8]], dtype=np.float32)

    runtime = get_runtime()
    computation = runtime.computation(function, *parameter_list)
    result = computation(a_arr, b_arr, c_arr)[0]

    expected = np.concatenate((a_arr, b_arr, c_arr), axis)
    assert np.allclose(result, expected)