コード例 #1
0
    def test_scale_vector3(self):
        argv = Namespace(mean_scale_values={
            'input1': {
                'scale': np.array([2., 4., 8.]),
                'mean': None
            }
        },
                         scale=None)
        function = create_function2(shape1=[1, 3, 224, 224])
        process_function(ov_function=function, argv=argv)
        op_node = list(function.get_parameters()[0].output(
            0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Divide'
                        or op_node.get_type_name() == 'Multiply')
        self.check_scale_constant(op_node,
                                  expected=[2., 4., 8.],
                                  shape=[1, 3, 1, 1])

        # Verify that input2 is not affected
        op_node = list(function.get_parameters()[1].output(
            0).get_target_inputs())[0].get_node()
        self.assertEqual(op_node.get_type_name(), 'Relu')

        # Verify that guessed layout (?C??) is not appeared in input1
        self.assertEqual(function.get_parameters()[0].layout, Layout())
コード例 #2
0
    def test_scale_vector4_layout(self):
        argv = Namespace(mean_scale_values={
            'input1': {
                'scale': np.array([2., 4., 8., 9.]),
                'mean': None
            }
        },
                         layout_values={'input1': {
                             'source_layout': 'nhwc'
                         }},
                         scale=None)
        function = create_function2(
            shape1=[1, 3, 3, 4])  # Use layout to determine channels dim

        process_function(ov_function=function, argv=argv)
        op_node = list(function.get_parameters()[0].output(
            0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Divide'
                        or op_node.get_type_name() == 'Multiply')
        self.check_scale_constant(op_node,
                                  expected=[2., 4., 8., 9.],
                                  shape=[1, 1, 1, 4])

        # Verify that input2 is not affected
        op_node = list(function.get_parameters()[1].output(
            0).get_target_inputs())[0].get_node()
        self.assertEqual(op_node.get_type_name(), 'Relu')

        # Verify that layout (NHWC) is appeared in input1
        self.assertEqual(function.get_parameters()[0].layout, Layout('nhwc'))
コード例 #3
0
def test_batched_tensors(device):
    core = Core()
    # TODO: remove when plugins will support set_input_tensors
    core.register_plugin("openvino_template_plugin", "TEMPLATE")

    batch = 4
    one_shape = [1, 2, 2, 2]
    one_shape_size = np.prod(one_shape)
    batch_shape = [batch, 2, 2, 2]

    data1 = ops.parameter(batch_shape, np.float32)
    data1.set_friendly_name("input0")
    data1.get_output_tensor(0).set_names({"tensor_input0"})
    data1.set_layout(Layout("N..."))

    constant = ops.constant([1], np.float32)

    op1 = ops.add(data1, constant)
    op1.set_friendly_name("Add0")

    res1 = ops.result(op1)
    res1.set_friendly_name("Result0")
    res1.get_output_tensor(0).set_names({"tensor_output0"})

    model = Model([res1], [data1])

    compiled = core.compile_model(model, "TEMPLATE")

    req = compiled.create_infer_request()

    # Allocate 8 chunks, set 'user tensors' to 0, 2, 4, 6 chunks
    buffer = np.zeros([batch * 2, *batch_shape[1:]], dtype=np.float32)

    tensors = []
    for i in range(batch):
        # non contiguous memory (i*2)
        tensors.append(
            Tensor(np.expand_dims(buffer[i * 2], 0), shared_memory=True))

    req.set_input_tensors(tensors)

    with pytest.raises(RuntimeError) as e:
        req.get_tensor("tensor_input0")
    assert "get_tensor shall not be used together with batched set_tensors/set_input_tensors" in str(
        e.value)

    actual_tensor = req.get_tensor("tensor_output0")
    actual = actual_tensor.data
    for test_num in range(0, 5):
        for i in range(0, batch):
            tensors[i].data[:] = test_num + 10

        req.infer()  # Adds '1' to each element

        # Reference values for each batch:
        _tmp = np.array([test_num + 11] * one_shape_size,
                        dtype=np.float32).reshape([2, 2, 2])

        for j in range(0, batch):
            assert np.array_equal(actual[j], _tmp)
コード例 #4
0
 def test_guess_layout_reverse_channels_incorrect_pos(self):
     argv = Namespace(reverse_input_channels=True,
                      mean_scale_values=None,
                      scale=None)
     function = create_function2(shape1=[1, 4, 224, 224],
                                 shape2=[1, 224, 224, 2])
     function.get_parameters()[0].layout = Layout("NCHW")
     function.get_parameters()[1].layout = Layout("NHWC")
     process_function(ov_function=function, argv=argv)
     # Nothing has applied
     op_node0 = list(function.get_parameters()[0].output(
         0).get_target_inputs())[0].get_node()
     self.assertTrue(op_node0.get_type_name() == 'Relu')
     op_node1 = list(function.get_parameters()[1].output(
         0).get_target_inputs())[0].get_node()
     self.assertTrue(op_node1.get_type_name() == 'Relu')
コード例 #5
0
    def test_reverse_input_channels_func_layout(self):
        argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None)
        function = create_function2(shape1=[1, 3, 3, 3], shape2=[1, 3, 3, 3])
        function.get_parameters()[0].layout = Layout("NCHW")
        function.get_parameters()[1].layout = Layout("NHWC")
        process_function(ov_function=function,
                         argv=argv)
        # Verify that some operations are inserted.
        # In future, consider using mock PrePostProcessor to verify that 'reverse_channels' was called
        op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node0.get_type_name() != 'Relu')
        op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node1.get_type_name() != 'Relu')

        # Verify that guessed layouts are not appeared in input1,input2
        self.assertEqual(function.get_parameters()[0].layout, Layout("NCHW"))
        self.assertEqual(function.get_parameters()[1].layout, Layout("NHWC"))
コード例 #6
0
def test_get_batch():
    param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
    add = ops.add(param1, param2)
    func = Model(add, [param1, param2], "TestFunction")
    param = func.get_parameters()[0]
    param.set_layout(Layout("NC"))
    assert get_batch(func) == 2
コード例 #7
0
def test_batched_tensors(device):
    batch = 4
    one_shape = Shape([1, 2, 2, 2])
    batch_shape = Shape([batch, 2, 2, 2])
    one_shape_size = np.prod(one_shape)

    core = Core()

    core.register_plugin("openvino_template_plugin", "TEMPLATE")

    data1 = ops.parameter(batch_shape, np.float32)
    data1.set_friendly_name("input0")
    data1.get_output_tensor(0).set_names({"tensor_input0"})
    data1.set_layout(Layout("N..."))

    constant = ops.constant([1], np.float32)

    op1 = ops.add(data1, constant)
    op1.set_friendly_name("Add0")

    res1 = ops.result(op1)
    res1.set_friendly_name("Result0")
    res1.get_output_tensor(0).set_names({"tensor_output0"})

    model = Model([res1], [data1])

    compiled = core.compile_model(model, "TEMPLATE")

    buffer = np.zeros([one_shape_size * batch * 2], dtype=np.float32)

    req = compiled.create_infer_request()

    tensors = []

    for i in range(0, batch):
        _start = i * one_shape_size * 2
        # Use of special constructor for Tensor.
        # It creates a Tensor from pointer, thus it requires only
        # one element from original buffer, and shape to "crop".
        tensor = Tensor(buffer[_start:(_start + 1)], one_shape)
        tensors.append(tensor)

    req.set_input_tensors(tensors)  # using list overload!

    actual_tensor = req.get_tensor("tensor_output0")
    actual = actual_tensor.data
    for test_num in range(0, 5):
        for i in range(0, batch):
            tensors[i].data[:] = test_num + 10

        req.infer()  # Adds '1' to each element

        # Reference values for each batch:
        _tmp = np.array([test_num + 11] * one_shape_size,
                        dtype=np.float32).reshape([2, 2, 2])

        for j in range(0, batch):
            assert np.array_equal(actual[j], _tmp)
コード例 #8
0
 def test_reverse_channels_bad_layout(self):
     argv = Namespace(reverse_input_channels=True,
                      mean_scale_values=None,
                      scale=None)
     function = create_function2(shape1=[1, 224, 224, 3],
                                 shape2=[1, 4, 224, 224])
     function.get_parameters()[0].layout = Layout("NDHWC")
     with self.assertRaisesRegex(Error, '.*input1.*'):
         process_function(ov_function=function, argv=argv)
コード例 #9
0
    def test_reverse_input_channels_2_channels(self):
        argv = Namespace(reverse_input_channels=True,
                         mean_scale_values=None,
                         scale=None)
        function = create_function2(shape1=[1, 224, 224, 2],
                                    shape2=[1, 3, 224, 224])
        process_function(ov_function=function, argv=argv)
        # Verify that some operations are inserted to input2.
        op_node0 = list(function.get_parameters()[0].output(
            0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node0.get_type_name() == 'Relu')
        op_node1 = list(function.get_parameters()[1].output(
            0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node1.get_type_name() != 'Relu')

        # Verify that guessed layouts are not appeared in input1,input2
        self.assertEqual(function.get_parameters()[0].layout, Layout())
        self.assertEqual(function.get_parameters()[1].layout, Layout())
コード例 #10
0
ファイル: utils.py プロジェクト: yeonbok/openvino
def prepare_image(image,
                  layout,
                  dst_shape=None,
                  central_fraction=None,
                  grayscale=False):
    if central_fraction:
        image = crop(image, central_fraction)

    if dst_shape:
        image = cv.resize(image, dst_shape[::-1])

    if grayscale:
        image = np.expand_dims(image, 2)

    if layout == Layout('NCHW') or layout == Layout('CHW'):
        return image.transpose(2, 0, 1)

    return image
コード例 #11
0
def test_set_batch_default_batch_size():
    param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
    add = ops.add(param1, param2)
    func = Model(add, [param1, param2], "TestFunction")
    func_param1 = func.get_parameters()[0]
    func_param1.set_layout(Layout("NC"))
    set_batch(func)
    assert func.is_dynamic()
コード例 #12
0
def test_get_batch_CHWN():
    param1 = ops.parameter(Shape([3, 1, 3, 4]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([3, 1, 3, 4]), dtype=np.float32, name="data2")
    param3 = ops.parameter(Shape([3, 1, 3, 4]), dtype=np.float32, name="data3")
    add = ops.add(param1, param2)
    add2 = ops.add(add, param3)
    func = Model(add2, [param1, param2, param3], "TestFunction")
    param = func.get_parameters()[0]
    param.set_layout(Layout("CHWN"))
    assert get_batch(func) == 4
コード例 #13
0
    def test_layout_output(self):
        argv = Namespace(mean_scale_values=None,
                         layout_values={
                             'res1': {
                                 'source_layout': 'nchw',
                                 'target_layout': 'nhwc'
                             },
                             'res2a': {
                                 'source_layout': 'ncdhw'
                             }
                         },
                         scale=None)
        function = create_function2(shape1=[1, 3, 3, 3], shape2=[1, 3, 3, 3, 3])

        process_function(ov_function=function, argv=argv)
        op_node = function.get_results()[0].input(0).get_source_output().get_node()
        self.assertEqual(op_node.get_type_name(), 'Transpose')

        self.assertEqual(function.get_results()[0].layout, Layout('nhwc'))
        self.assertEqual(function.get_results()[1].layout, Layout('ncdhw'))
コード例 #14
0
    def test_friendly_name(self):
        argv = Namespace(mean_scale_values={'input1': {'mean': np.array([2., 4., 8.]), 'scale': None}},
                         layout_values={'input1': {'source_layout': 'nchw'}},
                         scale=None)
        function = create_function1(shape1=[1, 3, 224, 224])
        process_function(ov_function=function, argv=argv)
        op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
        self.check_mean_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])

        # Verify that layout (nchw) is appeared in input1
        self.assertEqual(function.get_parameters()[0].layout, Layout('nchw'))
コード例 #15
0
ファイル: image_loader.py プロジェクト: yeonbok/openvino
    def get_layout(self, input_node=None):
        if self._layout is not None:
            if 'C' not in self._layout or 'H' not in self._layout or 'W' not in self._layout:
                raise ValueError('Unexpected {} layout'.format(self._layout))
            if self._shape is not None and 'N' in self._layout and len(
                    self._shape) == 3:
                self._layout = self._layout[1:]
            self._layout = Layout(self._layout)
            return

        if input_node and hasattr(input_node.graph, 'meta_data') \
                and input_node.graph.meta_data.get('layout', None) not in [None, '()']:
            layout_from_ir = get_layout_values(
                input_node.graph.meta_data.get('layout', None))
            if layout_from_ir is not None:
                layout_from_ir = layout_from_ir[next(
                    iter(layout_from_ir))].get('source_layout', None)
                self._layout = Layout(layout_from_ir)
                return

        image_colors_dim = (Dimension(3), Dimension(1))
        num_dims = len(self._shape)
        if num_dims == 4:
            if self._shape[1] in image_colors_dim:
                self._layout = Layout("NCHW")
            elif self._shape[3] in image_colors_dim:
                self._layout = Layout("NHWC")
        elif num_dims == 3:
            if self._shape[0] in image_colors_dim:
                self._layout = Layout("CHW")
            elif self._shape[2] in image_colors_dim:
                self._layout = Layout("HWC")
        logger.info(f'Layout value is set {self._layout}')
コード例 #16
0
ファイル: image_loader.py プロジェクト: terfendail/openvino
    def get_layout(self, input_node=None):
        if self._layout is not None:
            if 'C' not in self._layout or 'H' not in self._layout or 'W' not in self._layout:
                raise ValueError('Unexpected {} layout'.format(self._layout))
            if self._shape is not None and 'N' in self._layout and len(
                    self._shape) == 3:
                self._layout = self._layout[1:]
            self._layout = Layout(self._layout)
            return

        if input_node:
            layout_from_ir = input_node.graph.graph.get('layout', None)
            if layout_from_ir is not None:
                if self._shape is not None and 'N' in layout_from_ir and len(
                        self._shape) == 3:
                    layout_from_ir = layout_from_ir[1:]
                self._layout = Layout(layout_from_ir)
                return

        image_colors_dim = (Dimension(3), Dimension(1))
        num_dims = len(self._shape)
        if num_dims == 4:
            if self._shape[1] in image_colors_dim:
                self._layout = Layout("NCHW")
            elif self._shape[3] in image_colors_dim:
                self._layout = Layout("NHWC")
        elif num_dims == 3:
            if self._shape[0] in image_colors_dim:
                self._layout = Layout("CHW")
            elif self._shape[2] in image_colors_dim:
                self._layout = Layout("HWC")
コード例 #17
0
    def test_no_param_name(self):
        argv = Namespace(mean_scale_values=list(np.array([(np.array([1., 2., 3.]), np.array([2., 4., 6.])),
                                                          (np.array([7., 8., 9.]), None)],
                                                         dtype='object')), scale=None)
        function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 224, 224, 3])
        process_function(ov_function=function, argv=argv)

        op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
        self.check_mean_constant(op_node, expected=[1., 2., 3.], shape=[1, 3, 1, 1])

        op_node = list(op_node.output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
        self.check_scale_constant(op_node, expected=[2., 4., 6.], shape=[1, 3, 1, 1])

        op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
        self.check_mean_constant(op_node, expected=[7., 8., 9.], shape=[1, 1, 1, 3])

        # Verify that guessed layouts are not appeared in inputs
        self.assertEqual(function.get_parameters()[0].layout, Layout())
        self.assertEqual(function.get_parameters()[1].layout, Layout())
コード例 #18
0
    def test_mean_scale_with_layout_dynamic(self):
        argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3., 4.]),
                                                        'scale': np.array([2., 4., 8., 9.])}},
                         scale=None)
        function = create_function2(shape2=[-1, -1, -1, -1])
        function.get_parameters()[1].layout = Layout("NHWC")
        process_function(ov_function=function, argv=argv)
        # Verify that first is 'subtract mean', then 'scale'
        op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
        self.check_mean_constant(op_node, expected=[1., 2., 3., 4.], shape=[1, 1, 1, 4])

        op_node = list(op_node.output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
        self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4])

        # Verify that input1 is not affected
        op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
        self.assertEqual(op_node.get_type_name(), 'Relu')

        # Verify that layout presents in function after preprocessing
        self.assertEqual(function.get_parameters()[1].layout, Layout("NHWC"))
コード例 #19
0
ファイル: layout.py プロジェクト: yury-intel/openvino
def get_dim_from_layout(node: Node, dim: str):
    """
    Gets index of dimension from layout specified for node.
    :param node: node to get dim for.
    :param dim: name of dimension to get index for.
    :return: tuple with index of the dimension and bool flag if the node has layout specified or no.
    """
    layout = None
    graph = node.graph
    if 'layout_values' in graph.graph['cmd_params'] and graph.graph[
            'cmd_params'].layout_values:
        layout_values = graph.graph['cmd_params'].layout_values.copy()
        if '' in layout_values:
            in_nodes = graph.get_op_nodes(op='Parameter')
            if len(in_nodes) == 1:
                in_node = in_nodes[0]
                layout_values[in_node.soft_get('name',
                                               in_node.id)] = layout_values['']
                del layout_values['']
        name = node.soft_get('name', node.id)
        if name in layout_values:
            if layout_values[name]['source_layout']:
                layout = layout_values[name]['source_layout']

    if layout:
        from openvino.runtime import Layout  # pylint: disable=no-name-in-module,import-error

        layout_parsed = Layout(layout)
        has_dim = layout_parsed.has_name(dim)
        if has_dim:
            idx = layout_parsed.get_index_by_name(dim)
            if idx < 0:
                idx = len(node.shape) + idx
            return idx, True
        else:
            return None, True
    else:
        return None, False
コード例 #20
0
 def create_infer_requests(self, model, path, batch_sizes=None):
     if batch_sizes is not None:
         requests = []
         for parameter in model.get_parameters():
             parameter.set_layout(Layout("BC"))
         for b_s in batch_sizes:
             set_batch(model, b_s)
             compiled_model = self.ie.compile_model(model, device_name=self.device)
             requests.append(compiled_model.create_infer_request())
     else:
         compiled_model = self.ie.compile_model(model, device_name=self.device)
         requests = compiled_model.create_infer_request()
     log.info('The WaveRNN model {} is loaded to {}'.format(path, self.device))
     return requests
コード例 #21
0
def test_set_batch_int():
    model = create_test_model()
    model_param1 = model.get_parameters()[0]
    model_param2 = model.get_parameters()[1]
    # batch == 2
    model_param1.set_layout(Layout("NC"))
    assert get_batch(model) == 2
    # set batch to 1
    set_batch(model, 1)
    assert get_batch(model) == 1
    # check if shape of param 1 has changed
    assert model_param1.get_output_shape(0) == PartialShape([1, 1])
    # check if shape of param 2 has not changed
    assert model_param2.get_output_shape(0) == PartialShape([2, 1])
コード例 #22
0
    def test_scale_vector3_layout_empty_input_name(self):
        argv = Namespace(mean_scale_values=list(np.array([(None, np.array([2., 4., 8.]))],
                                                         dtype='object')),
                         layout_values={'': {'source_layout': 'nchw'}},
                         scale=None)
        function = create_function1(shape1=[1, 3, 3, 3])  # Use layout to determine channels dim

        process_function(ov_function=function, argv=argv)
        op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
        self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])

        # Verify that layout (nchw) is appeared in input1
        self.assertEqual(function.get_parameters()[0].layout, Layout('nchw'))
コード例 #23
0
    def test_reverse_channels_and_mean_scale(self):
        argv = Namespace(reverse_input_channels=True,
                         mean_scale_values={
                             'input2a': {
                                 'mean': np.array([1., 2., 3.]),
                                 'scale': np.array([2., 4., 8.])
                             }
                         },
                         scale=None)
        function = create_function2(shape2=[1, 3, 224, 224])
        process_function(ov_function=function, argv=argv)

        # Verify that first is gather, then subtract 'mean', then 'scale'
        gather = list(function.get_parameters()[1].output(
            0).get_target_inputs())[0].get_node()
        self.assertTrue(gather.get_type_name() == 'Gather')
        range_node = gather.input(1).get_source_output().get_node()
        self.assertTrue(range_node.get_type_name() == 'Range')
        start = range_node.input(0).get_source_output().get_node()
        end = range_node.input(1).get_source_output().get_node()
        step = range_node.input(2).get_source_output().get_node()
        self.check_constant(start, expected=[2], shape=[])
        self.check_constant(end, expected=[-1], shape=[])
        self.check_constant(step, expected=[-1], shape=[])
        axes = gather.input(2).get_source_output().get_node()
        self.check_constant(axes, expected=[1], shape=[1])

        op_node = list(gather.output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Subtract'
                        or op_node.get_type_name() == 'Add')
        self.check_mean_constant(op_node,
                                 expected=[1., 2., 3.],
                                 shape=[1, 3, 1, 1])

        op_node = list(op_node.output(0).get_target_inputs())[0].get_node()
        self.assertTrue(op_node.get_type_name() == 'Divide'
                        or op_node.get_type_name() == 'Multiply')
        self.check_scale_constant(op_node,
                                  expected=[2., 4., 8.],
                                  shape=[1, 3, 1, 1])

        # Verify that input1 is not affected
        op_node = list(function.get_parameters()[0].output(
            0).get_target_inputs())[0].get_node()
        self.assertEqual(op_node.get_type_name(), 'Relu')

        # Verify that guessed layout (?C??) is not appeared in input2
        self.assertEqual(function.get_parameters()[1].layout, Layout())
コード例 #24
0
def test_set_batch_int():
    param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
    add = ops.add(param1, param2)
    func = Model(add, [param1, param2], "TestFunction")
    func_param1 = func.get_parameters()[0]
    func_param2 = func.get_parameters()[1]
    # batch == 2
    func_param1.set_layout(Layout("NC"))
    assert get_batch(func) == 2
    # set batch to 1
    set_batch(func, 1)
    assert get_batch(func) == 1
    # check if shape of param 1 has changed
    assert str(func_param1.get_output_shape(0) == {1, 1})
    # check if shape of param 2 has not changed
    assert str(func_param2.get_output_shape(0) == {2, 1})
コード例 #25
0
    def generate_dataset(self):
        height = self._shape[self._layout.get_index_by_name('H')]
        width = self._shape[self._layout.get_index_by_name('W')]
        self._initialize_params(height, width)

        # to avoid multiprocessing error: can't pickle openvino.pyopenvino.Layout objects
        self._layout = str(self._layout)

        with Pool(processes=self._cpu_count) as pool:
            params = pool.map(self._generate_category,
                              [1e-5] * self._categories)

        instances_weights = np.repeat(self._weights, self._instances, axis=0)
        weight_per_img = np.tile(instances_weights, (self._categories, 1))
        repeated_params = np.repeat(params,
                                    self._weights.shape[0] * self._instances,
                                    axis=0)
        repeated_params = repeated_params[:self.subset_size]
        weight_per_img = weight_per_img[:self.subset_size]
        assert weight_per_img.shape[0] == len(
            repeated_params) == self.subset_size

        splits = min(self._cpu_count, self.subset_size)
        params_per_proc = np.array_split(repeated_params, splits)
        weights_per_proc = np.array_split(weight_per_img, splits)

        generation_params = []
        offset = 0
        for param, w in zip(params_per_proc, weights_per_proc):
            indices = list(range(offset, offset + len(param)))
            offset += len(param)
            generation_params.append((param, w, height, width, indices))

        with Pool(processes=self._cpu_count) as pool:
            pool.starmap(self._generate_image_batch, generation_params)

        self._layout = Layout(self._layout)
コード例 #26
0
def main() -> int:
    log.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=log.INFO,
                    stream=sys.stdout)
    args = parse_args()

    # --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
    log.info('Creating OpenVINO Runtime Core')
    core = Core()

    # --------------------------- Step 2. Read a model --------------------------------------------------------------------
    log.info(f'Reading the model: {args.model}')
    # (.xml and .bin files) or (.onnx file)
    model = core.read_model(args.model)

    if len(model.inputs) != 1:
        log.error('Sample supports only single input topologies')
        return -1

    if len(model.outputs) != 1:
        log.error('Sample supports only single output topologies')
        return -1


# --------------------------- Step 3. Set up input --------------------------------------------------------------------
# Read input images
    images = [cv2.imread(image_path) for image_path in args.input]

    # Resize images to model input dims
    _, _, h, w = model.input().shape
    resized_images = [cv2.resize(image, (w, h)) for image in images]

    # Add N dimension
    input_tensors = [np.expand_dims(image, 0) for image in resized_images]

    # --------------------------- Step 4. Apply preprocessing -------------------------------------------------------------
    ppp = PrePostProcessor(model)

    # 1) Set input tensor information:
    # - input() provides information about a single model input
    # - precision of tensor is supposed to be 'u8'
    # - layout of data is 'NHWC'
    ppp.input().tensor() \
        .set_element_type(Type.u8) \
        .set_layout(Layout('NHWC'))  # noqa: N400

    # 2) Here we suppose model has 'NCHW' layout for input
    ppp.input().model().set_layout(Layout('NCHW'))

    # 3) Set output tensor information:
    # - precision of tensor is supposed to be 'f32'
    ppp.output().tensor().set_element_type(Type.f32)

    # 4) Apply preprocessing modifing the original 'model'
    model = ppp.build()

    # --------------------------- Step 5. Loading model to the device -----------------------------------------------------
    log.info('Loading the model to the plugin')
    compiled_model = core.compile_model(model, args.device)

    # --------------------------- Step 6. Create infer request queue ------------------------------------------------------
    log.info('Starting inference in asynchronous mode')
    infer_queue = AsyncInferQueue(compiled_model, len(input_tensors))
    infer_queue.set_callback(completion_callback)

    # --------------------------- Step 7. Do inference --------------------------------------------------------------------
    for i, input_tensor in enumerate(input_tensors):
        infer_queue.start_async({0: input_tensor}, args.input[i])

    infer_queue.wait_all()
    # ----------------------------------------------------------------------------------------------------------------------
    log.info(
        'This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n'
    )
    return 0
コード例 #27
0
ファイル: common.py プロジェクト: rkuo2000/yolov5
    def __init__(self,
                 weights='yolov5s.pt',
                 device=torch.device('cpu'),
                 dnn=False,
                 data=None,
                 fp16=False):
        # Usage:
        #   PyTorch:              weights = *.pt
        #   TorchScript:                    *.torchscript
        #   ONNX Runtime:                   *.onnx
        #   ONNX OpenCV DNN:                *.onnx with --dnn
        #   OpenVINO:                       *.xml
        #   CoreML:                         *.mlmodel
        #   TensorRT:                       *.engine
        #   TensorFlow SavedModel:          *_saved_model
        #   TensorFlow GraphDef:            *.pb
        #   TensorFlow Lite:                *.tflite
        #   TensorFlow Edge TPU:            *_edgetpu.tflite
        from models.experimental import attempt_download, attempt_load  # scoped to avoid circular import

        super().__init__()
        w = str(weights[0] if isinstance(weights, list) else weights)
        pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(
            w)  # get backend
        w = attempt_download(w)  # download if not local
        fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu'  # FP16
        stride, names = 32, [f'class{i}'
                             for i in range(1000)]  # assign defaults
        if data:  # assign class names (optional)
            with open(data, errors='ignore') as f:
                names = yaml.safe_load(f)['names']

        if pt:  # PyTorch
            model = attempt_load(weights if isinstance(weights, list) else w,
                                 device=device)
            stride = max(int(model.stride.max()), 32)  # model stride
            names = model.module.names if hasattr(
                model, 'module') else model.names  # get class names
            model.half() if fp16 else model.float()
            self.model = model  # explicitly assign for to(), cpu(), cuda(), half()
        elif jit:  # TorchScript
            LOGGER.info(f'Loading {w} for TorchScript inference...')
            extra_files = {'config.txt': ''}  # model metadata
            model = torch.jit.load(w, _extra_files=extra_files)
            model.half() if fp16 else model.float()
            if extra_files['config.txt']:
                d = json.loads(extra_files['config.txt'])  # extra_files dict
                stride, names = int(d['stride']), d['names']
        elif dnn:  # ONNX OpenCV DNN
            LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
            check_requirements(('opencv-python>=4.5.4', ))
            net = cv2.dnn.readNetFromONNX(w)
        elif onnx:  # ONNX Runtime
            LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
            cuda = torch.cuda.is_available()
            check_requirements(
                ('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
            import onnxruntime
            providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'
                         ] if cuda else ['CPUExecutionProvider']
            session = onnxruntime.InferenceSession(w, providers=providers)
            meta = session.get_modelmeta().custom_metadata_map  # metadata
            if 'stride' in meta:
                stride, names = int(meta['stride']), eval(meta['names'])
        elif xml:  # OpenVINO
            LOGGER.info(f'Loading {w} for OpenVINO inference...')
            check_requirements(
                ('openvino', )
            )  # requires openvino-dev: https://pypi.org/project/openvino-dev/
            from openvino.runtime import Core, Layout, get_batch
            ie = Core()
            if not Path(w).is_file():  # if not *.xml
                w = next(Path(w).glob(
                    '*.xml'))  # get *.xml file from *_openvino_model dir
            network = ie.read_model(model=w,
                                    weights=Path(w).with_suffix('.bin'))
            if network.get_parameters()[0].get_layout().empty:
                network.get_parameters()[0].set_layout(Layout("NCHW"))
            batch_dim = get_batch(network)
            if batch_dim.is_static:
                batch_size = batch_dim.get_length()
            executable_network = ie.compile_model(
                network,
                device_name="CPU")  # device_name="MYRIAD" for Intel NCS2
            output_layer = next(iter(executable_network.outputs))
            meta = Path(w).with_suffix('.yaml')
            if meta.exists():
                stride, names = self._load_metadata(meta)  # load metadata
        elif engine:  # TensorRT
            LOGGER.info(f'Loading {w} for TensorRT inference...')
            import tensorrt as trt  # https://developer.nvidia.com/nvidia-tensorrt-download
            check_version(trt.__version__, '7.0.0',
                          hard=True)  # require tensorrt>=7.0.0
            Binding = namedtuple('Binding',
                                 ('name', 'dtype', 'shape', 'data', 'ptr'))
            logger = trt.Logger(trt.Logger.INFO)
            with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
                model = runtime.deserialize_cuda_engine(f.read())
            bindings = OrderedDict()
            fp16 = False  # default updated below
            for index in range(model.num_bindings):
                name = model.get_binding_name(index)
                dtype = trt.nptype(model.get_binding_dtype(index))
                shape = tuple(model.get_binding_shape(index))
                data = torch.from_numpy(np.empty(
                    shape, dtype=np.dtype(dtype))).to(device)
                bindings[name] = Binding(name, dtype, shape, data,
                                         int(data.data_ptr()))
                if model.binding_is_input(index) and dtype == np.float16:
                    fp16 = True
            binding_addrs = OrderedDict(
                (n, d.ptr) for n, d in bindings.items())
            context = model.create_execution_context()
            batch_size = bindings['images'].shape[0]
        elif coreml:  # CoreML
            LOGGER.info(f'Loading {w} for CoreML inference...')
            import coremltools as ct
            model = ct.models.MLModel(w)
        else:  # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
            if saved_model:  # SavedModel
                LOGGER.info(
                    f'Loading {w} for TensorFlow SavedModel inference...')
                import tensorflow as tf
                keras = False  # assume TF1 saved_model
                model = tf.keras.models.load_model(
                    w) if keras else tf.saved_model.load(w)
            elif pb:  # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
                LOGGER.info(
                    f'Loading {w} for TensorFlow GraphDef inference...')
                import tensorflow as tf

                def wrap_frozen_graph(gd, inputs, outputs):
                    x = tf.compat.v1.wrap_function(
                        lambda: tf.compat.v1.import_graph_def(gd, name=""),
                        [])  # wrapped
                    ge = x.graph.as_graph_element
                    return x.prune(tf.nest.map_structure(ge, inputs),
                                   tf.nest.map_structure(ge, outputs))

                gd = tf.Graph().as_graph_def()  # graph_def
                with open(w, 'rb') as f:
                    gd.ParseFromString(f.read())
                frozen_func = wrap_frozen_graph(gd,
                                                inputs="x:0",
                                                outputs="Identity:0")
            elif tflite or edgetpu:  # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
                try:  # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
                    from tflite_runtime.interpreter import Interpreter, load_delegate
                except ImportError:
                    import tensorflow as tf
                    Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
                if edgetpu:  # Edge TPU https://coral.ai/software/#edgetpu-runtime
                    LOGGER.info(
                        f'Loading {w} for TensorFlow Lite Edge TPU inference...'
                    )
                    delegate = {
                        'Linux': 'libedgetpu.so.1',
                        'Darwin': 'libedgetpu.1.dylib',
                        'Windows': 'edgetpu.dll'
                    }[platform.system()]
                    interpreter = Interpreter(
                        model_path=w,
                        experimental_delegates=[load_delegate(delegate)])
                else:  # Lite
                    LOGGER.info(
                        f'Loading {w} for TensorFlow Lite inference...')
                    interpreter = Interpreter(
                        model_path=w)  # load TFLite model
                interpreter.allocate_tensors()  # allocate
                input_details = interpreter.get_input_details()  # inputs
                output_details = interpreter.get_output_details()  # outputs
            elif tfjs:
                raise Exception(
                    'ERROR: YOLOv5 TF.js inference is not supported')
            else:
                raise Exception(f'ERROR: {w} is not a supported format')
        self.__dict__.update(locals())  # assign all variables to self
コード例 #28
0
ファイル: utils.py プロジェクト: mikhailk62/openvino
def get_inputs_info(shape_string, data_shape_string, layout_string, batch_size, scale_string, mean_string, inputs):
    input_names = get_input_output_names(inputs)
    shape_map = parse_input_parameters(shape_string, input_names)
    data_shape_map = get_data_shapes_map(data_shape_string, input_names)
    layout_map = parse_input_parameters(layout_string, input_names)
    batch_size = parse_batch_size(batch_size)
    reshape = False
    batch_found = False
    input_info = []
    for i in range(len(inputs)):
        info = AppInputInfo()
        # Input name
        info.name = input_names[i]
        # Input precision
        info.element_type = inputs[i].element_type
        # Shape
        info.original_shape = inputs[i].partial_shape
        if info.name in shape_map.keys():
            info.partial_shape = parse_partial_shape(shape_map[info.name])
            reshape = True
        else:
            info.partial_shape = inputs[i].partial_shape

        # Layout
        if info.name in layout_map.keys():
            info.layout = Layout(layout_map[info.name])
        elif inputs[i].node.layout != Layout():
            info.layout = inputs[i].node.layout
        else:
            image_colors_dim = Dimension(3)
            shape = info.partial_shape
            num_dims = len(shape)
            if num_dims == 4:
                if(shape[1]) == image_colors_dim:
                    info.layout = Layout("NCHW")
                elif(shape[3] == image_colors_dim):
                    info.layout = Layout("NHWC")
            elif num_dims == 3:
                if(shape[0]) == image_colors_dim:
                    info.layout = Layout("CHW")
                elif(shape[2] == image_colors_dim):
                    info.layout = Layout("HWC")

        # Update shape with batch if needed
        if batch_size != 0:
            if batch_size.is_static and data_shape_map:
                 logger.warning(f"Batch size will be ignored. Provide batch deminsion in data_shape parameter.")
            else:
                batch_index = -1
                if info.layout.has_name('N'):
                    batch_index = info.layout.get_index_by_name('N')
                elif info.layout == Layout():
                    supposed_batch = info.partial_shape[0]
                    if supposed_batch.is_dynamic or supposed_batch in [0, 1]:
                        logger.warning(f"Batch dimension is not specified for input '{info.name}'. "
                                        "The first dimension will be interpreted as batch size.")
                        batch_index = 0
                        info.layout = Layout("N...")
                if batch_index != -1 and info.partial_shape[batch_index] != batch_size:
                    info.partial_shape[batch_index] = batch_size
                    reshape = True
                    batch_found = True
                elif batch_index == -1 and not batch_found and i == len(inputs) - 1:
                    raise Exception(f"Batch dimension is not specified for this model!")

        # Data shape
        if info.name in data_shape_map.keys() and info.is_dynamic:
            for p_shape in data_shape_map[info.name]:
                if p_shape.is_dynamic:
                    raise Exception(f"Data shape always should be static, {str(p_shape)} is dynamic.")
                elif info.partial_shape.compatible(p_shape):
                    info.data_shapes.append(p_shape.to_shape())
                else:
                    raise Exception(f"Data shape '{str(p_shape)}' provided for input '{info.name}' "
                                    f"is not compatible with partial shape '{str(info.partial_shape)}' for this input.")
        elif info.name in data_shape_map.keys():
            logger.warning(f"Input '{info.name}' has static shape. Provided data shapes for this input will be ignored.")

        input_info.append(info)

    # Update scale, mean
    scale_map = parse_scale_or_mean(scale_string, input_info)
    mean_map = parse_scale_or_mean(mean_string, input_info)

    for input in input_info:
        if input.name in scale_map:
                input.scale = scale_map[input.name]
        if input.name in mean_map:
            input.mean = mean_map[input.name]

    return input_info, reshape
コード例 #29
0
ファイル: utils.py プロジェクト: mikhailk62/openvino
class AppInputInfo:
    def __init__(self):
        self.element_type = None
        self.layout = Layout()
        self.original_shape = None
        self.partial_shape = None
        self.data_shapes = []
        self.scale = []
        self.mean = []
        self.name = None

    @property
    def is_image(self):
        if str(self.layout) not in [ "[N,C,H,W]", "[N,H,W,C]", "[C,H,W]", "[H,W,C]" ]:
            return False
        return self.channels == 3

    @property
    def is_image_info(self):
        if str(self.layout) != "[N,C]":
            return False
        return self.channels.relaxes(Dimension(2))

    def getDimentionByLayout(self, character):
        if self.layout.has_name(character):
            return self.partial_shape[self.layout.get_index_by_name(character)]
        else:
            return Dimension(0)

    def getDimentionsByLayout(self, character):
        if self.layout.has_name(character):
            d_index = self.layout.get_index_by_name(character)
            dims = []
            for shape in self.data_shapes:
                dims.append(shape[d_index])
            return dims
        else:
            return [0] * len(self.data_shapes)

    @property
    def shapes(self):
        if self.is_static:
            return [self.partial_shape.to_shape()]
        else:
            return self.data_shapes

    @property
    def width(self):
        return len(self.getDimentionByLayout("W"))

    @property
    def widthes(self):
        return self.getDimentionsByLayout("W")

    @property
    def height(self):
        return len(self.getDimentionByLayout("H"))

    @property
    def heights(self):
        return self.getDimentionsByLayout("H")

    @property
    def channels(self):
        return self.getDimentionByLayout("C")

    @property
    def is_static(self):
        return self.partial_shape.is_static

    @property
    def is_dynamic(self):
        return self.partial_shape.is_dynamic
コード例 #30
0
def main():
    log.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=log.INFO,
                    stream=sys.stdout)
    # Parsing and validation of input arguments
    if len(sys.argv) != 3:
        log.info('Usage: <path_to_model> <device_name>')
        return 1

    model_path = sys.argv[1]
    device_name = sys.argv[2]
    labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    number_top = 1
    # ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
    log.info('Creating OpenVINO Runtime Core')
    core = Core()

    # ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation------------------------------
    log.info(
        f'Loading the network using ngraph function with weights from {model_path}'
    )
    model = create_ngraph_function(model_path)
    # ---------------------------Step 3. Apply preprocessing----------------------------------------------------------
    # Get names of input and output blobs
    ppp = PrePostProcessor(model)
    # 1) Set input tensor information:
    # - input() provides information about a single model input
    # - precision of tensor is supposed to be 'u8'
    # - layout of data is 'NHWC'
    ppp.input().tensor() \
        .set_element_type(Type.u8) \
        .set_layout(Layout('NHWC'))  # noqa: N400

    # 2) Here we suppose model has 'NCHW' layout for input
    ppp.input().model().set_layout(Layout('NCHW'))
    # 3) Set output tensor information:
    # - precision of tensor is supposed to be 'f32'
    ppp.output().tensor().set_element_type(Type.f32)

    # 4) Apply preprocessing modifing the original 'model'
    model = ppp.build()

    # Set a batch size equal to number of input images
    model.reshape({
        model.input().get_any_name():
        PartialShape(
            (digits.shape[0], model.input().shape[1], model.input().shape[2],
             model.input().shape[3]))
    })

    # ---------------------------Step 4. Loading model to the device-------------------------------------------------------
    log.info('Loading the model to the plugin')
    compiled_model = core.compile_model(model, device_name)

    # ---------------------------Step 5. Prepare input---------------------------------------------------------------------
    n, c, h, w = model.input().shape
    input_data = np.ndarray(shape=(n, c, h, w))
    for i in range(n):
        image = digits[i].reshape(28, 28)
        image = image[:, :, np.newaxis]
        input_data[i] = image

    # ---------------------------Step 6. Do inference----------------------------------------------------------------------
    log.info('Starting inference in synchronous mode')
    results = compiled_model.infer_new_request({0: input_data})

    # ---------------------------Step 7. Process output--------------------------------------------------------------------
    predictions = next(iter(results.values()))

    log.info(f'Top {number_top} results: ')
    for i in range(n):
        probs = predictions[i]
        # Get an array of number_top class IDs in descending order of probability
        top_n_idexes = np.argsort(probs)[-number_top:][::-1]

        header = 'classid probability'
        header = header + ' label' if labels else header

        log.info(f'Image {i}')
        log.info('')
        log.info(header)
        log.info('-' * len(header))

        for class_id in top_n_idexes:
            probability_indent = ' ' * (len('classid') - len(str(class_id)) +
                                        1)
            label_indent = ' ' * (len('probability') - 8) if labels else ''
            label = labels[class_id] if labels else ''
            log.info(
                f'{class_id}{probability_indent}{probs[class_id]:.7f}{label_indent}{label}'
            )
        log.info('')

    # ----------------------------------------------------------------------------------------------------------------------
    log.info(
        'This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n'
    )
    return 0