Esempio n. 1
0
def guess_source_layouts_for_reverse_channels(ov_function: Model, layout_values):
    """
    Internal function. Try to guess source layout for input by finding dimension with size=3 (RGB/BGR)
    Additionally checks existing layouts and detects suitable inputs for reversing of input channels
    :param: ov_function Original model
    :param: layout_values Existing source/target layout items specified by user
    :return: array with suitable parameters for reversing of input channels
    """
    all_params = []
    suitable_params = []
    for i in range(0, len(ov_function.inputs)):
        ov_input = ov_function.input(i)
        param_info = [ov_input.get_tensor().get_any_name(), ov_input.get_partial_shape()]
        all_params.append(param_info)

        if not ov_function.get_parameters()[i].layout.empty:
            if check_suitable_for_reverse(ov_function.get_parameters()[i].layout, ov_input):
                suitable_params.append(param_info)
            continue

        layout_item = None
        first_name = ov_input.get_tensor().get_any_name()
        for name in ov_input.get_tensor().get_names():
            if name in layout_values:
                layout_item = layout_values[name]
                break

        if layout_item is not None:
            if layout_item.get('target_layout'):
                if check_suitable_for_reverse(Layout(layout_item['target_layout']), ov_input):
                    suitable_params.append(param_info)
            elif layout_item.get('source_layout'):
                if check_suitable_for_reverse(Layout(layout_item['source_layout']), ov_input):
                    suitable_params.append(param_info)
            continue

        try:
            layout_values = find_channels_dimension(shape=ov_input.get_partial_shape(),
                                                    num_channels=3,
                                                    name=first_name,
                                                    layout_values=layout_values)
        except Error as e:
            log.debug('Reverse input channels guess did not succeed {}'.format(e))
        else:
            layout = layout_values[first_name].get('source_layout')
            if layout and check_suitable_for_reverse(Layout(layout), ov_input):
                suitable_params.append(param_info)

    if not len(suitable_params):
        raise Error('Network has {} inputs overall, but none of them are suitable for input channels reversing.\n'
                    'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic '
                    'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}'.format(
            len(all_params), all_params))
    elif len(suitable_params) < len(all_params):
        log.error('Network has {} inputs overall, but only {} of them are suitable for input channels reversing.\n'
                  'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic '
                  'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}\n'
                  'Suitable inputs {}'.format(len(all_params), len(suitable_params), all_params, suitable_params),
                  extra={'is_warning': True})
    return suitable_params
Esempio n. 2
0
def test_ngraph_preprocess_set_from_np_infer():
    shape = [1, 1, 1]
    parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
    model = parameter_a
    function = Model(model, [parameter_a], "TestFunction")

    @custom_preprocess_function
    def custom_crop(out_node: Output):
        start = ops.constant(np.array([1, 1, 1]), dtype=np.int32)
        stop = ops.constant(np.array([2, 2, 2]), dtype=np.int32)
        step = ops.constant(np.array([1, 1, 1]), dtype=np.int32)
        axis = ops.constant(np.array([0, 1, 2]), dtype=np.int32)
        return ops.slice(out_node, start, stop, step, axis)

    input_data = np.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
                           [[9, 10, 11], [12, 13, 14], [15, 16, 17]],
                           [[18, 19, 20], [21, 22, 23], [24, 25, 26]]]).astype(np.int32)

    p = PrePostProcessor(function)
    inp = p.input()
    inp.tensor().set_from(input_data)
    inp.preprocess().convert_element_type().custom(custom_crop)
    function = p.build()
    assert function.input().shape == ov.Shape([3, 3, 3])
    assert function.input().element_type == Type.i32

    expected_output = np.array([[[13]]]).astype(np.float32)

    runtime = get_runtime()
    computation = runtime.computation(function)
    output = computation(input_data)
    assert np.equal(output, expected_output).all()
def test_low_latency2():
    X = opset8.parameter(Shape([32, 40, 10]), np.float32, "X")
    Y = opset8.parameter(Shape([32, 40, 10]), np.float32, "Y")
    M = opset8.parameter(Shape([32, 2, 10]), np.float32, "M")

    X_i = opset8.parameter(Shape([32, 2, 10]), np.float32, "X_i")
    Y_i = opset8.parameter(Shape([32, 2, 10]), np.float32, "Y_i")
    M_body = opset8.parameter(Shape([32, 2, 10]), np.float32, "M_body")

    sum = opset8.add(X_i, Y_i)
    Zo = opset8.multiply(sum, M_body)

    body = Model([Zo], [X_i, Y_i, M_body], "body_function")

    ti = opset8.tensor_iterator()
    ti.set_body(body)
    ti.set_sliced_input(X_i, X.output(0), 0, 2, 2, 39, 1)
    ti.set_sliced_input(Y_i, Y.output(0), 0, 2, 2, -1, 1)
    ti.set_invariant_input(M_body, M.output(0))

    out0 = ti.get_iter_value(Zo.output(0), -1)
    out1 = ti.get_concatenated_slices(Zo.output(0), 0, 2, 2, 39, 1)

    result0 = opset8.result(out0)
    result1 = opset8.result(out1)

    model = Model([result0, result1], [X, Y, M])

    m = Manager()
    m.register_pass(LowLatency2())
    m.run_passes(model)

    # TODO: create TI which will be transformed by LowLatency2
    assert count_ops(model, "TensorIterator") == [1]
Esempio n. 4
0
def simple_if(condition_val):
    condition = ov.constant(condition_val, dtype=np.bool)
    # then_body
    X_t = ov.parameter([2], np.float32, "X")
    Y_t = ov.parameter([2], np.float32, "Y")

    then_mul = ov.multiply(X_t, Y_t)
    then_body_res_1 = ov.result(then_mul)
    then_body = Model([then_body_res_1], [X_t, Y_t], "then_body_function")

    # else_body
    X_e = ov.parameter([2], np.float32, "X")
    Y_e = ov.parameter([2], np.float32, "Y")
    add_e = ov.add(X_e, Y_e)
    else_body_res_1 = ov.result(add_e)
    else_body = Model([else_body_res_1], [X_e, Y_e], "else_body_function")

    X = ov.constant([3, 4], dtype=np.float32)
    Y = ov.constant([2, 1], dtype=np.float32)

    if_node = ov.if_op(condition)
    if_node.set_then_body(then_body)
    if_node.set_else_body(else_body)
    if_node.set_input(X.output(0), X_t, X_e)
    if_node.set_input(Y.output(0), Y_t, Y_e)
    if_res = if_node.set_output(then_body_res_1, else_body_res_1)
    relu = ov.relu(if_res)

    return relu
Esempio n. 5
0
def test_default_version_IR_V11_seperate_paths():
    core = Core()
    xml_path = "./serialized_function.xml"
    bin_path = "./serialized_function.bin"
    shape = [100, 100, 2]
    parameter_a = ov.parameter(shape, dtype=np.float32, name="A")
    parameter_b = ov.parameter(shape, dtype=np.float32, name="B")
    parameter_c = ov.parameter(shape, dtype=np.float32, name="C")
    parameter_d = ov.parameter(shape, dtype=np.float32, name="D")
    model = ov.floor(
        ov.minimum(ov.abs(parameter_a), ov.multiply(parameter_b, parameter_c)))
    func = Model(model, [parameter_a, parameter_b, parameter_c], "Model")
    pass_manager = Manager()
    pass_manager.register_pass("Serialize",
                               xml_path=xml_path,
                               bin_path=bin_path,
                               version="IR_V11")
    pass_manager.run_passes(func)

    res_func = core.read_model(model=xml_path, weights=bin_path)

    assert func.get_parameters() == res_func.get_parameters()
    assert func.get_ordered_ops() == res_func.get_ordered_ops()

    os.remove(xml_path)
    os.remove(bin_path)
Esempio n. 6
0
def test_get_result_index():
    input_shape = PartialShape([1])
    param = ops.parameter(input_shape, dtype=np.float32, name="data")
    relu = ops.relu(param, name="relu")
    function = Model(relu, [param], "TestFunction")
    assert len(function.outputs) == 1
    assert function.get_result_index(function.outputs[0]) == 0
Esempio n. 7
0
def guess_source_layouts_by_mean_scale(ov_function: Model, layout_values,
                                       mean_scale_values: dict):
    """
    Internal function. Try to guess source layout for input by its shape and/or framework
    :param: ov_function Original model
    :param: layout_values Existing source/target layout items specified by user
    :param: mean_scale_values Dictionary with mean/scale values defined for each argument
    :return: updated layout items with guessed layouts
    """
    for ms_name, mean_scale in mean_scale_values.items():
        num_channels_mean = len(
            mean_scale['mean']) if mean_scale['mean'] is not None else 0
        num_channels_scale = len(mean_scale['scale']) if hasattr(
            mean_scale['scale'], '__len__') else 0

        if num_channels_mean > 1 and \
                num_channels_scale > 1 and \
                num_channels_mean is not num_channels_scale:
            raise Error(
                'Mean/Scale values for {} have different sizes: {} {}'.format(
                    ms_name, num_channels_mean, num_channels_scale))

        need_guess_channels = num_channels_mean > 1 or num_channels_scale > 1
        if not need_guess_channels:  # Mean/scale is complex and needs 'channels' specified in layout
            continue

        num_channels = num_channels_mean if num_channels_mean > 1 else num_channels_scale

        for i in range(0, len(ov_function.inputs)):
            ov_input = ov_function.input(i)

            if not ov_function.get_parameters()[i].layout.empty:
                continue

            if ms_name not in ov_input.get_tensor().get_names():
                continue

            layout_item = None
            for name in ov_input.get_tensor().get_names():
                if name in layout_values:
                    layout_item = layout_values[name]
                    break

            if layout_item is not None:
                # User specified some layout, skip guessing
                continue

            # Guess layout is applicable only when number of channels is '3'
            if num_channels != 3:
                raise Error('Can\'t determine channels dimension for {}. '
                            'When number of mean/scale values is {} (not 3), '
                            'please specify layout for input manually'.format(
                                ms_name, num_channels))

            layout_values = find_channels_dimension(
                shape=ov_input.get_partial_shape(),
                num_channels=num_channels,
                name=ms_name,
                layout_values=layout_values)
    return layout_values
Esempio n. 8
0
def test_input_shape_read_only():
    shape = Shape([1, 10])
    param = ov.parameter(shape, dtype=np.float32)
    model = Model(ov.relu(param), [param])
    ref_shape = model.input().shape
    ref_shape[0] = Dimension(3)
    assert model.input().shape == shape
def test_get_batch():
    param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
    add = ops.add(param1, param2)
    func = Model(add, [param1, param2], "TestFunction")
    param = func.get_parameters()[0]
    param.set_layout(Layout("NC"))
    assert get_batch(func) == 2
Esempio n. 10
0
def test_parameter_index_invalid():
    shape1 = PartialShape([1])
    param1 = ops.parameter(shape1, dtype=np.float32, name="data1")
    relu = ops.relu(param1, name="relu")
    function = Model(relu, [param1], "TestFunction")
    shape2 = PartialShape([2])
    param2 = ops.parameter(shape2, dtype=np.float32, name="data2")
    assert function.get_parameter_index(param2) == -1
Esempio n. 11
0
def test_set_batch_default_batch_size():
    param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
    add = ops.add(param1, param2)
    func = Model(add, [param1, param2], "TestFunction")
    func_param1 = func.get_parameters()[0]
    func_param1.set_layout(Layout("NC"))
    set_batch(func)
    assert func.is_dynamic()
Esempio n. 12
0
def test_compress_model_transformation():
    node_constant = ov.opset8.constant(np.array([[0.0, 0.1, -0.1], [-2.5, 2.5, 3.0]], dtype=np.float32))
    node_ceil = ov.opset8.ceiling(node_constant)
    func = Model(node_ceil, [], "TestFunction")
    assert func.get_ordered_ops()[0].get_element_type().get_type_name() == "f32"
    compress_model_transformation(func)

    assert func is not None
    assert func.get_ordered_ops()[0].get_element_type().get_type_name() == "f16"
Esempio n. 13
0
def test_reshape(device):
    shape = Shape([1, 10])
    param = ops.parameter(shape, dtype=np.float32)
    model = Model(ops.relu(param), [param])
    ref_shape = model.input().partial_shape
    ref_shape[0] = 3
    model.reshape(ref_shape)
    core = Core()
    compiled = core.compile_model(model, device)
    assert compiled.input().partial_shape == ref_shape
Esempio n. 14
0
def test_add_outputs_incorrect_outputs_list():
    input_shape = PartialShape([1])
    param = ops.parameter(input_shape, dtype=np.float32, name="data")
    relu1 = ops.relu(param, name="relu1")
    relu1.get_output_tensor(0).set_names({"relu_t1"})
    function = Model(relu1, [param], "TestFunction")
    assert len(function.get_results()) == 1
    with pytest.raises(TypeError) as e:
        function.add_outputs([0, 0])
    assert "Incorrect type of a value to add as output at index 0" in str(e.value)
Esempio n. 15
0
def test_get_batch_CHWN():
    param1 = ops.parameter(Shape([3, 1, 3, 4]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([3, 1, 3, 4]), dtype=np.float32, name="data2")
    param3 = ops.parameter(Shape([3, 1, 3, 4]), dtype=np.float32, name="data3")
    add = ops.add(param1, param2)
    add2 = ops.add(add, param3)
    func = Model(add2, [param1, param2, param3], "TestFunction")
    param = func.get_parameters()[0]
    param.set_layout(Layout("CHWN"))
    assert get_batch(func) == 4
Esempio n. 16
0
def test_function_add_output_incorrect_tensor_name():
    input_shape = PartialShape([1])
    param = ops.parameter(input_shape, dtype=np.float32, name="data")
    relu1 = ops.relu(param, name="relu1")
    relu1.get_output_tensor(0).set_names({"relu_t1"})
    relu2 = ops.relu(relu1, name="relu2")
    function = Model(relu2, [param], "TestFunction")
    assert len(function.get_results()) == 1
    with pytest.raises(RuntimeError) as e:
        function.add_outputs("relu_t")
    assert "Tensor name relu_t was not found." in str(e.value)
Esempio n. 17
0
def test_function_add_output_incorrect_name():
    input_shape = PartialShape([1])
    param = ops.parameter(input_shape, dtype=np.float32, name="data")
    relu1 = ops.relu(param, name="relu1")
    relu1.get_output_tensor(0).set_names({"relu_t1"})
    relu2 = ops.relu(relu1, name="relu2")
    function = Model(relu2, [param], "TestFunction")
    assert len(function.get_results()) == 1
    with pytest.raises(RuntimeError) as e:
        function.add_outputs(("relu_1", 0))
    # Verify that absent op name is present in error message
    assert "relu_1" in str(e.value)
Esempio n. 18
0
def test_evaluate():
    param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
    add = ops.add(param1, param2)
    func = Model(add, [param1, param2], "TestFunction")

    input1 = np.array([2, 1], dtype=np.float32).reshape(2, 1)
    input2 = np.array([3, 7], dtype=np.float32).reshape(2, 1)
    out_tensor = Tensor("float32", Shape([2, 1]))

    assert func.evaluate([out_tensor], [Tensor(input1), Tensor(input2)])
    assert np.allclose(out_tensor.data, np.array([5, 8]).reshape(2, 1))
Esempio n. 19
0
def test_get_result_index_invalid():
    shape1 = PartialShape([1])
    param1 = ops.parameter(shape1, dtype=np.float32, name="data1")
    relu1 = ops.relu(param1, name="relu1")
    function = Model(relu1, [param1], "TestFunction")

    shape2 = PartialShape([2])
    param2 = ops.parameter(shape2, dtype=np.float32, name="data2")
    relu2 = ops.relu(param2, name="relu2")
    invalid_output = relu2.outputs()[0]
    assert len(function.outputs) == 1
    assert function.get_result_index(invalid_output) == -1
Esempio n. 20
0
def test_ngraph_preprocess_set_memory_type():
    shape = [1, 1, 1]
    parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
    op = ops.relu(parameter_a)
    model = op
    function = Model(model, [parameter_a], "TestFunction")

    p = PrePostProcessor(function)
    p.input().tensor().set_memory_type("some_memory_type")
    function = p.build()

    assert any(key for key in function.input().rt_info if "memory_type" in key)
Esempio n. 21
0
def test_function_add_output_incorrect_idx():
    input_shape = PartialShape([1])
    param = ops.parameter(input_shape, dtype=np.float32, name="data")
    relu1 = ops.relu(param, name="relu1")
    relu1.get_output_tensor(0).set_names({"relu_t1"})
    relu2 = ops.relu(relu1, name="relu2")
    function = Model(relu2, [param], "TestFunction")
    assert len(function.get_results()) == 1
    with pytest.raises(RuntimeError) as e:
        function.add_outputs(("relu1", 10))
    assert "Cannot add output to port 10 operation relu1 has only 1 outputs." in str(
        e.value)
Esempio n. 22
0
def test_repr_dynamic_shape():
    shape = PartialShape([-1, 2])
    parameter_a = ov.parameter(shape, dtype=np.float32, name="A")
    parameter_b = ov.parameter(shape, dtype=np.float32, name="B")
    model = parameter_a + parameter_b
    function = Model(model, [parameter_a, parameter_b],
                     "simple_dyn_shapes_graph")

    assert repr(function) == "<Model: 'simple_dyn_shapes_graph' ({?,2})>"

    ops = function.get_ordered_ops()
    for op in ops:
        assert "{?,2}" in repr(op)
Esempio n. 23
0
def test_function_add_output_port():
    input_shape = PartialShape([1])
    param = ops.parameter(input_shape, dtype=np.float32, name="data")
    relu1 = ops.relu(param, name="relu1")
    relu1.get_output_tensor(0).set_names({"relu_t1"})
    relu2 = ops.relu(relu1, name="relu2")
    function = Model(relu2, [param], "TestFunction")
    assert len(function.get_results()) == 1
    new_outs = function.add_outputs(relu1.output(0))
    assert len(function.get_results()) == 2
    assert len(new_outs) == 1
    assert new_outs[0].get_node() == function.outputs[1].get_node()
    assert new_outs[0].get_index() == function.outputs[1].get_index()
Esempio n. 24
0
def pre_post_processing(function: Model, app_inputs_info, input_precision: str,
                        output_precision: str, input_output_precision: str):
    pre_post_processor = PrePostProcessor(function)
    if input_precision:
        element_type = get_element_type(input_precision)
        for i in range(len(function.inputs)):
            pre_post_processor.input(i).tensor().set_element_type(element_type)
            app_inputs_info[i].element_type = element_type
    if output_precision:
        element_type = get_element_type(output_precision)
        for i in range(len(function.outputs)):
            pre_post_processor.output(i).tensor().set_element_type(
                element_type)
    user_precision_map = {}
    if input_output_precision:
        user_precision_map = _parse_arg_map(input_output_precision)
        input_names = get_input_output_names(function.get_parameters())
        output_names = get_input_output_names(function.get_results())
        for node_name, precision in user_precision_map.items():
            user_precision_map[node_name] = get_element_type(precision)
        for name, element_type in user_precision_map.items():
            if name in input_names:
                port = input_names.index(name)
                app_inputs_info[port].element_type = element_type
                pre_post_processor.input(port).tensor().set_element_type(
                    element_type)
            elif name in output_names:
                port = output_names.index(name)
                pre_post_processor.output(port).tensor().set_element_type(
                    element_type)
            else:
                raise Exception(f"Node '{name}' does not exist in network")

    # update app_inputs_info
    if not input_precision:
        inputs = function.inputs
        for i in range(len(inputs)):
            if app_inputs_info[i].name in user_precision_map.keys():
                app_inputs_info[i].element_type = user_precision_map[
                    app_inputs_info[i].name]
            elif app_inputs_info[i].is_image:
                app_inputs_info[i].element_type = Type.u8
                pre_post_processor.input(i).tensor().set_element_type(Type.u8)
            else:
                app_inputs_info[i].element_type = inputs[i].get_element_type()

    # set layout for model input
    for port, info in enumerate(app_inputs_info):
        pre_post_processor.input(port).model().set_layout(info.layout)

    function = pre_post_processor.build()
Esempio n. 25
0
def test_evaluate_invalid_input_shape():
    param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
    add = ops.add(param1, param2)
    func = Model(add, [param1, param2], "TestFunction")

    with pytest.raises(RuntimeError) as e:
        assert func.evaluate(
            [Tensor("float32", Shape([2, 1]))],
            [
                Tensor("float32", Shape([3, 1])),
                Tensor("float32", Shape([3, 1]))
            ],
        )
    assert "must be compatible with the partial shape: {2,1}" in str(e.value)
Esempio n. 26
0
def test_get_and_set_layout():
    shape = [2, 2]
    parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
    parameter_b = ops.parameter(shape, dtype=np.float32, name="B")

    model = Model(parameter_a + parameter_b, [parameter_a, parameter_b])

    assert layout_helpers.get_layout(model.input(0)) == ov.Layout()
    assert layout_helpers.get_layout(model.input(1)) == ov.Layout()

    layout_helpers.set_layout(model.input(0), ov.Layout("CH"))
    layout_helpers.set_layout(model.input(1), ov.Layout("HW"))

    assert layout_helpers.get_layout(model.input(0)) == ov.Layout("CH")
    assert layout_helpers.get_layout(model.input(1)) == ov.Layout("HW")
Esempio n. 27
0
def test_infer_list_as_inputs(device):
    num_inputs = 4
    input_shape = [2, 1]
    dtype = np.float32
    params = [ops.parameter(input_shape, dtype) for _ in range(num_inputs)]
    model = Model(ops.relu(ops.concat(params, 1)), params)
    core = Core()
    compiled_model = core.compile_model(model, device)

    def check_fill_inputs(request, inputs):
        for input_idx in range(len(inputs)):
            assert np.array_equal(
                request.get_input_tensor(input_idx).data, inputs[input_idx])

    request = compiled_model.create_infer_request()

    inputs = [np.random.normal(size=input_shape).astype(dtype)]
    request.infer(inputs)
    check_fill_inputs(request, inputs)

    inputs = [
        np.random.normal(size=input_shape).astype(dtype)
        for _ in range(num_inputs)
    ]
    request.infer(inputs)
    check_fill_inputs(request, inputs)
Esempio n. 28
0
def test_ngraph_preprocess_mean_scale_convert():
    shape = [2, 2]
    param1 = ops.parameter(shape, dtype=np.int32, name="A")
    param2 = ops.parameter(shape, dtype=np.int32, name="B")
    function = Model([param1, param2], [param1, param2], "TestFunction")

    @custom_preprocess_function
    def custom_preprocess(output: Output):
        return ops.abs(output)

    p = PrePostProcessor(function)
    inp2 = p.input(1)
    inp2.tensor().set_element_type(Type.i32)
    inp2.preprocess().convert_element_type(Type.f32).mean(1.).scale(2.)
    inp1 = p.input(0)
    inp1.preprocess().convert_element_type(
        Type.f32).mean(1.).custom(custom_preprocess)
    function = p.build()

    input_data1 = np.array([[0, 1], [2, -2]]).astype(np.int32)
    input_data2 = np.array([[1, 3], [5, 7]]).astype(np.int32)
    expected_output1 = np.array([[1, 0], [1, 3]]).astype(np.float32)
    expected_output2 = np.array([[0, 1], [2, 3]]).astype(np.float32)

    runtime = get_runtime()
    computation = runtime.computation(function)
    [output1, output2] = computation(input_data1, input_data2)
    assert np.equal(output1, expected_output1).all()
    assert np.equal(output2, expected_output2).all()
Esempio n. 29
0
def test_ngraph_preprocess_dump():
    shape = [1, 3, 224, 224]
    parameter_a = ops.parameter(shape, dtype=np.float32, name="RGB_input")
    model = parameter_a
    function = Model(model, [parameter_a], "TestFunction")

    p = PrePostProcessor(function)
    p.input().tensor()\
        .set_layout(ov.Layout("NHWC"))\
        .set_element_type(Type.u8)\
        .set_spatial_dynamic_shape()
    p.input().preprocess()\
        .convert_element_type(Type.f32)\
        .reverse_channels()\
        .mean([1, 2, 3])\
        .scale([4, 5, 6])\
        .resize(ResizeAlgorithm.RESIZE_LINEAR)
    p.input().model().set_layout(ov.Layout("NCHW"))
    p_str = str(p)
    print(p)
    assert "Pre-processing steps (5):" in p_str
    assert "convert type (f32):" in p_str
    assert "reverse channels:" in p_str
    assert "mean (1,2,3):" in p_str
    assert "scale (4,5,6):" in p_str
    assert "resize to model width/height:" in p_str
    assert "Implicit pre-processing steps (1):" in p_str
    assert "convert layout " + ov.Layout("NCHW").to_string() in p_str
Esempio n. 30
0
def test_ngraph_preprocess_postprocess_layout():
    shape = [1, 1, 3, 3]
    parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
    model = parameter_a
    function = Model(model, [parameter_a], "TestFunction")
    layout1 = ov.Layout("NCWH")
    layout2 = ov.Layout("NCHW")

    p = PrePostProcessor(function)
    inp = p.input()
    inp.tensor().set_layout(layout1)
    inp.preprocess().mean(1.).convert_layout(layout2).reverse_channels()
    out = p.output()
    out.postprocess().convert_layout([0, 1, 2, 3])
    function = p.build()

    input_data = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8,
                                                    9]]]]).astype(np.float32)
    expected_output = np.array([[[[0, 3, 6], [1, 4, 7],
                                  [2, 5, 8]]]]).astype(np.float32)

    runtime = get_runtime()
    computation = runtime.computation(function)
    output = computation(input_data)
    assert np.equal(output, expected_output).all()