コード例 #1
0
ファイル: image_loader.py プロジェクト: yeonbok/openvino
    def get_layout(self, input_node=None):
        if self._layout is not None:
            if 'C' not in self._layout or 'H' not in self._layout or 'W' not in self._layout:
                raise ValueError('Unexpected {} layout'.format(self._layout))
            if self._shape is not None and 'N' in self._layout and len(
                    self._shape) == 3:
                self._layout = self._layout[1:]
            self._layout = Layout(self._layout)
            return

        if input_node and hasattr(input_node.graph, 'meta_data') \
                and input_node.graph.meta_data.get('layout', None) not in [None, '()']:
            layout_from_ir = get_layout_values(
                input_node.graph.meta_data.get('layout', None))
            if layout_from_ir is not None:
                layout_from_ir = layout_from_ir[next(
                    iter(layout_from_ir))].get('source_layout', None)
                self._layout = Layout(layout_from_ir)
                return

        image_colors_dim = (Dimension(3), Dimension(1))
        num_dims = len(self._shape)
        if num_dims == 4:
            if self._shape[1] in image_colors_dim:
                self._layout = Layout("NCHW")
            elif self._shape[3] in image_colors_dim:
                self._layout = Layout("NHWC")
        elif num_dims == 3:
            if self._shape[0] in image_colors_dim:
                self._layout = Layout("CHW")
            elif self._shape[2] in image_colors_dim:
                self._layout = Layout("HWC")
        logger.info(f'Layout value is set {self._layout}')
コード例 #2
0
ファイル: image_loader.py プロジェクト: terfendail/openvino
    def get_layout(self, input_node=None):
        if self._layout is not None:
            if 'C' not in self._layout or 'H' not in self._layout or 'W' not in self._layout:
                raise ValueError('Unexpected {} layout'.format(self._layout))
            if self._shape is not None and 'N' in self._layout and len(
                    self._shape) == 3:
                self._layout = self._layout[1:]
            self._layout = Layout(self._layout)
            return

        if input_node:
            layout_from_ir = input_node.graph.graph.get('layout', None)
            if layout_from_ir is not None:
                if self._shape is not None and 'N' in layout_from_ir and len(
                        self._shape) == 3:
                    layout_from_ir = layout_from_ir[1:]
                self._layout = Layout(layout_from_ir)
                return

        image_colors_dim = (Dimension(3), Dimension(1))
        num_dims = len(self._shape)
        if num_dims == 4:
            if self._shape[1] in image_colors_dim:
                self._layout = Layout("NCHW")
            elif self._shape[3] in image_colors_dim:
                self._layout = Layout("NHWC")
        elif num_dims == 3:
            if self._shape[0] in image_colors_dim:
                self._layout = Layout("CHW")
            elif self._shape[2] in image_colors_dim:
                self._layout = Layout("HWC")
コード例 #3
0
 def reshape_network(network, shapes):
     partial_shapes = {}
     for name, shape in shapes.items():
         p_shape = PartialShape(
             [Dimension(d) if not isinstance(d, tuple) else Dimension(d[0], d[1]) for d in shape])
         partial_shapes[name] = p_shape
     network.reshape(partial_shapes)
     return network
コード例 #4
0
 def reshape_model(self, new_shape):
     new_shape = {
         name: PartialShape([
             Dimension(dim) if not isinstance(dim, tuple) else Dimension(
                 dim[0], dim[1]) for dim in shape
         ])
         for name, shape in new_shape.items()
     }
     self.model.reshape(new_shape)
コード例 #5
0
def partial_shape_from_tuple(shape: tuple):
    new_shape = []
    for dim in shape:
        if isinstance(dim, tuple):
            assert len(dim) == 2, "Incorrect boundaries of dimension {} in shape {}".format(dim, shape)
            assert dim[0] >= 0, "Incorrect min value of dimension {} in shape".format(dim, shape)
            new_shape.append(Dimension(dim[0], dim[1]))
        else:
            assert isinstance(dim, np.int64), "Incorrect type of dimension {} in shape".format(dim, shape)
            new_shape.append(Dimension(dim))
    return PartialShape(new_shape)
コード例 #6
0
ファイル: utils.py プロジェクト: mikhailk62/openvino
def parse_partial_shape(shape_str):
    dims = []
    for dim in shape_str.split(','):
        if '.. ' in dim:
            range = list(int(d) for d in dim.split('..'))
            assert len(range) == 2
            dims.append(Dimension(range))
        elif dim == '?':
            dims.append(Dimension())
        else:
            dims.append(Dimension(int(dim)))
    return PartialShape(dims)
コード例 #7
0
ファイル: utils.py プロジェクト: mikhailk62/openvino
def get_network_batch_size(inputs_info):
    null_dimension = Dimension(0)
    batch_size = null_dimension
    for info in inputs_info:
        batch_index = info.layout.get_index_by_name('N') if info.layout.has_name('N') else -1
        if batch_index != -1:
            if batch_size == null_dimension:
                batch_size = info.partial_shape[batch_index]
            elif batch_size != info.partial_shape[batch_index]:
                raise Exception("Can't deterimine batch size: batch is different for different inputs!")
    if batch_size == null_dimension:
        batch_size = Dimension(1)
    return batch_size
コード例 #8
0
def test_non_max_suppression():

    boxes_shape = [1, 1000, 4]
    scores_shape = [1, 1, 1000]
    boxes_parameter = ov.parameter(boxes_shape, name="Boxes", dtype=np.float32)
    scores_parameter = ov.parameter(scores_shape, name="Scores", dtype=np.float32)

    node = ov.non_max_suppression(boxes_parameter, scores_parameter, make_constant_node(1000, np.int64))

    assert node.get_type_name() == "NonMaxSuppression"
    assert node.get_output_size() == 3
    assert node.get_output_partial_shape(0) == PartialShape([Dimension(0, 1000), Dimension(3)])
    assert node.get_output_partial_shape(1) == PartialShape([Dimension(0, 1000), Dimension(3)])
    assert list(node.get_output_shape(2)) == [1]
コード例 #9
0
ファイル: test_core.py プロジェクト: yury-intel/openvino
def test_input_shape_read_only():
    shape = Shape([1, 10])
    param = ov.parameter(shape, dtype=np.float32)
    model = Model(ov.relu(param), [param])
    ref_shape = model.input().shape
    ref_shape[0] = Dimension(3)
    assert model.input().shape == shape
コード例 #10
0
 def is_image_info(self):
     if str(self.layout) != "[N,C]":
         return False
     return len(
         self.channels
     ) >= 2 if self.channels.is_static else self.channels.relaxes(
         Dimension(2))
コード例 #11
0
 def _reshape_input(self, shapes, make_dynamic=False):
     if hasattr(self, 'exec_network'):
         del self.exec_network
     if self.infer_request is not None:
         del self.infer_request
         self.infer_request = None
     partial_shapes = {}
     for name, shape in shapes.items():
         p_shape = PartialShape(
             [Dimension(d) if not isinstance(d, tuple) else Dimension(d[0], d[1]) for d in shape])
         partial_shapes[self.input_to_index[name]] = p_shape
     self.network.reshape(partial_shapes)
     self.dyn_input_layers, self._partial_shapes = self.get_dynamic_inputs(self.network)
     if self.dyn_input_layers and make_dynamic:
         return
     self.exec_network = self.ie_core.compile_model(self.network, self.device)
     self.infer_request = self.exec_network.create_infer_request()
コード例 #12
0
ファイル: utils.py プロジェクト: mikhailk62/openvino
def parse_batch_size(batch_size_str):
    if batch_size_str:
        error_message = f"Can't parse batch size '{batch_size_str}'"
        dims = batch_size_str.split("..")
        if len(dims) > 2:
            raise Exception(error_message)
        elif len(dims) == 2:
            range = []
            for d in dims:
                if d.isnumeric():
                    range.append(int(d))
                else:
                    raise Exception(error_message)
            return Dimension(*range)
        else:
            if dims[0].lstrip("-").isnumeric():
                return Dimension(int(dims[0]))
            elif dims[0] == "?":
                return Dimension()
            else:
                raise Exception(error_message)
    else:
        return Dimension(0)
コード例 #13
0
def test_set_batch_dimension():
    model = create_test_model()
    model_param1 = model.get_parameters()[0]
    model_param2 = model.get_parameters()[1]
    # batch == 2
    model_param1.set_layout(Layout("NC"))
    assert get_batch(model) == 2
    # set batch to 1
    set_batch(model, Dimension(1))
    assert get_batch(model) == 1
    # check if shape of param 1 has changed
    assert model_param1.get_output_shape(0) == PartialShape([1, 1])
    # check if shape of param 2 has not changed
    assert model_param2.get_output_shape(0) == PartialShape([2, 1])
コード例 #14
0
def test_set_batch_dimension():
    param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
    param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
    add = ops.add(param1, param2)
    func = Model(add, [param1, param2], "TestFunction")
    func_param1 = func.get_parameters()[0]
    func_param2 = func.get_parameters()[1]
    # batch == 2
    func_param1.set_layout(Layout("NC"))
    assert get_batch(func) == 2
    # set batch to 1
    set_batch(func, Dimension(1))
    assert get_batch(func) == 1
    # check if shape of param 1 has changed
    assert str(func_param1.get_output_shape(0) == {1, 1})
    # check if shape of param 2 has not changed
    assert str(func_param2.get_output_shape(0) == {2, 1})
コード例 #15
0
ファイル: test_core.py プロジェクト: yury-intel/openvino
def test_partial_shape():
    ps = PartialShape([1, 2, 3, 4])
    assert ps.is_static
    assert not ps.is_dynamic
    assert ps.rank == 4
    assert repr(ps) == "<PartialShape: {1,2,3,4}>"
    assert ps.get_dimension(0) == Dimension(1)
    assert ps.get_dimension(1) == Dimension(2)
    assert ps.get_dimension(2) == Dimension(3)
    assert ps.get_dimension(3) == Dimension(4)

    shape = Shape([1, 2, 3])
    ps = PartialShape(shape)
    assert ps.is_static
    assert not ps.is_dynamic
    assert ps.all_non_negative
    assert ps.rank == 3
    assert list(ps.get_shape()) == [1, 2, 3]
    assert list(ps.get_max_shape()) == [1, 2, 3]
    assert list(ps.get_min_shape()) == [1, 2, 3]
    assert list(ps.to_shape()) == [1, 2, 3]
    assert repr(shape) == "<Shape: {1, 2, 3}>"
    assert repr(ps) == "<PartialShape: {1,2,3}>"

    ps = PartialShape(
        [Dimension(1),
         Dimension(2),
         Dimension(3),
         Dimension.dynamic()])
    assert not ps.is_static
    assert ps.is_dynamic
    assert ps.all_non_negative
    assert ps.rank == 4
    assert list(ps.get_min_shape()) == [1, 2, 3, 0]
    assert list(ps.get_max_shape())[3] > 1000000000
    assert repr(ps) == "<PartialShape: {1,2,3,?}>"
    assert ps.get_dimension(0) == Dimension(1)
    assert ps.get_dimension(1) == Dimension(2)
    assert ps.get_dimension(2) == Dimension(3)
    assert ps.get_dimension(3) == Dimension.dynamic()

    ps = PartialShape([1, 2, 3, -1])
    assert not ps.is_static
    assert ps.is_dynamic
    assert ps.all_non_negative
    assert ps.rank == 4
    assert list(ps.get_min_shape()) == [1, 2, 3, 0]
    assert list(ps.get_max_shape())[3] > 1000000000
    assert repr(ps) == "<PartialShape: {1,2,3,?}>"

    ps = PartialShape.dynamic()
    assert not ps.is_static
    assert ps.is_dynamic
    assert ps.rank == Dimension.dynamic()
    assert list(ps.get_min_shape()) == []
    assert list(ps.get_max_shape()) == []
    assert repr(ps) == "<PartialShape: ...>"

    ps = PartialShape.dynamic(rank=Dimension(2))
    assert not ps.is_static
    assert ps.is_dynamic
    assert ps.rank == 2
    assert 2 == ps.rank
    assert list(ps.get_min_shape()) == [0, 0]
    assert list(ps.get_max_shape())[0] > 1000000000
    assert repr(ps) == "<PartialShape: {?,?}>"
コード例 #16
0
ファイル: utils.py プロジェクト: mikhailk62/openvino
 def getDimentionByLayout(self, character):
     if self.layout.has_name(character):
         return self.partial_shape[self.layout.get_index_by_name(character)]
     else:
         return Dimension(0)
コード例 #17
0
ファイル: utils.py プロジェクト: mikhailk62/openvino
 def is_image_info(self):
     if str(self.layout) != "[N,C]":
         return False
     return self.channels.relaxes(Dimension(2))
コード例 #18
0
def run(args):
    statistics = None
    try:
        if args.number_streams is None:
                logger.warning(" -nstreams default value is determined automatically for a device. "
                               "Although the automatic selection usually provides a reasonable performance, "
                               "but it still may be non-optimal for some cases, for more information look at README. ")

        command_line_arguments = get_command_line_arguments(sys.argv)
        if args.report_type:
          statistics = StatisticsReport(StatisticsReport.Config(args.report_type, args.report_folder))
          statistics.add_parameters(StatisticsReport.Category.COMMAND_LINE_PARAMETERS, command_line_arguments)

        def is_flag_set_in_command_line(flag):
            return any(x.strip('-') == flag for x, y in command_line_arguments)

        device_name = args.target_device

        devices = parse_devices(device_name)
        device_number_streams = parse_nstreams_value_per_device(devices, args.number_streams)

        config = {}
        if args.load_config:
            load_config(args.load_config, config)

        is_network_compiled = False
        _, ext = os.path.splitext(args.path_to_model)

        if ext == BLOB_EXTENSION:
            is_network_compiled = True
            print("Model is compiled")

        # ------------------------------ 2. Loading OpenVINO ---------------------------------------------------
        next_step(step_id=2)

        benchmark = Benchmark(args.target_device, args.number_infer_requests,
                              args.number_iterations, args.time, args.api_type, args.inference_only)

        ## CPU (MKLDNN) extensions
        if CPU_DEVICE_NAME in device_name and args.path_to_extension:
            benchmark.add_extension(path_to_extension=args.path_to_extension)

        ## GPU (clDNN) Extensions
        if GPU_DEVICE_NAME in device_name and args.path_to_cldnn_config:
            if GPU_DEVICE_NAME not in config.keys():
                config[GPU_DEVICE_NAME] = {}
            config[GPU_DEVICE_NAME]['CONFIG_FILE'] = args.path_to_cldnn_config

        if GPU_DEVICE_NAME in config.keys() and 'CONFIG_FILE' in config[GPU_DEVICE_NAME].keys():
            cldnn_config = config[GPU_DEVICE_NAME]['CONFIG_FILE']
            benchmark.add_extension(path_to_cldnn_config=cldnn_config)

        if not args.perf_hint:
            for device in devices:
                supported_config_keys = benchmark.core.get_property(device, 'SUPPORTED_CONFIG_KEYS')
                if 'PERFORMANCE_HINT' in supported_config_keys:
                    logger.warning(f"-hint default value is determined as 'THROUGHPUT' automatically for {device} device" +
                                    "For more detailed information look at README.")
                    args.perf_hint = "throughput"

        version = benchmark.get_version_info()

        logger.info(version)

        # --------------------- 3. Setting device configuration --------------------------------------------------------
        next_step()
        def get_device_type_from_name(name) :
            new_name = str(name)
            new_name = new_name.split(".", 1)[0]
            new_name = new_name.split("(", 1)[0]
            return new_name

        ## Set default values from dumped config
        default_devices = set()
        for device in devices:
            device_type = get_device_type_from_name(device)
            if device_type in config and device not in config:
                config[device] = config[device_type].copy()
                default_devices.add(device_type)

        for def_device in default_devices:
            config.pop(def_device)

        perf_counts = False
        for device in devices:
            if device not in config.keys():
                config[device] = {}
            ## Set performance counter
            if is_flag_set_in_command_line('pc'):
                ## set to user defined value
                config[device]['PERF_COUNT'] = 'YES' if args.perf_counts else 'NO'
            elif 'PERF_COUNT' in config[device].keys() and config[device]['PERF_COUNT'] == 'YES':
                logger.warning(f"Performance counters for {device} device is turned on. " +
                               "To print results use -pc option.")
            elif args.report_type in [ averageCntReport, detailedCntReport ]:
                logger.warning(f"Turn on performance counters for {device} device " +
                               f"since report type is {args.report_type}.")
                config[device]['PERF_COUNT'] = 'YES'
            elif args.exec_graph_path is not None:
                logger.warning(f"Turn on performance counters for {device} device " +
                               "due to execution graph dumping.")
                config[device]['PERF_COUNT'] = 'YES'
            else:
                ## set to default value
                config[device]['PERF_COUNT'] = 'YES' if args.perf_counts else 'NO'
            perf_counts = True if config[device]['PERF_COUNT'] == 'YES' else perf_counts

            ## high-level performance hints
            if is_flag_set_in_command_line('hint') or args.perf_hint:
                config[device]['PERFORMANCE_HINT'] = args.perf_hint.upper()
                if is_flag_set_in_command_line('nireq'):
                    config[device]['PERFORMANCE_HINT_NUM_REQUESTS'] = str(args.number_infer_requests)
            ## the rest are individual per-device settings (overriding the values the device will deduce from perf hint)
            def set_throughput_streams():
                key = get_device_type_from_name(device) + "_THROUGHPUT_STREAMS"
                if device in device_number_streams.keys():
                    ## set to user defined value
                    supported_config_keys = benchmark.core.get_property(device, 'SUPPORTED_CONFIG_KEYS')
                    if key not in supported_config_keys:
                        raise Exception(f"Device {device} doesn't support config key '{key}'! " +
                                        "Please specify -nstreams for correct devices in format  <dev1>:<nstreams1>,<dev2>:<nstreams2>")
                    config[device][key] = device_number_streams[device]
                elif key not in config[device].keys() and args.api_type == "async" and not is_flag_set_in_command_line('hint'):
                    ## set the _AUTO value for the #streams
                    logger.warning(f"-nstreams default value is determined automatically for {device} device. " +
                                   "Although the automatic selection usually provides a reasonable performance, "
                                   "but it still may be non-optimal for some cases, for more information look at README.")
                    if device != MYRIAD_DEVICE_NAME:  ## MYRIAD sets the default number of streams implicitly
                        config[device][key] = get_device_type_from_name(device) + "_THROUGHPUT_AUTO"
                if key in config[device].keys():
                    device_number_streams[device] = config[device][key]

            if CPU_DEVICE_NAME in device: # CPU supports few special performance-oriented keys
                # limit threading for CPU portion of inference
                if args.number_threads and is_flag_set_in_command_line("nthreads"):
                    config[device]['CPU_THREADS_NUM'] = str(args.number_threads)

                if is_flag_set_in_command_line("enforcebf16") or is_flag_set_in_command_line("enforce_bfloat16"):
                    config[device]['ENFORCE_BF16'] = 'YES' if args.enforce_bfloat16 else 'NO'

                if is_flag_set_in_command_line('pin'):
                    ## set to user defined value
                    config[device]['CPU_BIND_THREAD'] = args.infer_threads_pinning
                elif 'CPU_BIND_THREAD' not in config[device].keys():
                    if MULTI_DEVICE_NAME in device_name and GPU_DEVICE_NAME in device_name:
                        logger.warning(f"Turn off threads pinning for {device} " +
                                       "device since multi-scenario with GPU device is used.")
                        config[device]['CPU_BIND_THREAD'] = 'NO'

                ## for CPU execution, more throughput-oriented execution via streams
                set_throughput_streams()
            elif GPU_DEVICE_NAME in device:
                ## for GPU execution, more throughput-oriented execution via streams
                set_throughput_streams()

                if MULTI_DEVICE_NAME in device_name and CPU_DEVICE_NAME in device_name:
                    logger.warning("Turn on GPU throttling. Multi-device execution with the CPU + GPU performs best with GPU throttling hint, " +
                                   "which releases another CPU thread (that is otherwise used by the GPU driver for active polling)")
                    config[device]['GPU_PLUGIN_THROTTLE'] = '1'
            elif MYRIAD_DEVICE_NAME in device:
                set_throughput_streams()
                config[device]['LOG_LEVEL'] = 'LOG_INFO'
            elif GNA_DEVICE_NAME in device:
                if is_flag_set_in_command_line('qb'):
                    if args.qb == 8:
                        config[device]['GNA_PRECISION'] = 'I8'
                    else:
                        config[device]['GNA_PRECISION'] = 'I16'
            else:
                supported_config_keys = benchmark.core.get_property(device, 'SUPPORTED_CONFIG_KEYS')
                if 'CPU_THREADS_NUM' in supported_config_keys and args.number_threads and is_flag_set_in_command_line("nthreads"):
                    config[device]['CPU_THREADS_NUM'] = str(args.number_threads)
                if 'CPU_THROUGHPUT_STREAMS' in supported_config_keys and args.number_streams and is_flag_set_in_command_line("streams"):
                    config[device]['CPU_THROUGHPUT_STREAMS'] = args.number_streams
                if 'CPU_BIND_THREAD' in supported_config_keys and args.infer_threads_pinning and is_flag_set_in_command_line("pin"):
                    config[device]['CPU_BIND_THREAD'] = args.infer_threads_pinning
        perf_counts = perf_counts

        benchmark.set_config(config)
        if args.cache_dir:
            benchmark.set_cache_dir(args.cache_dir)

        topology_name = ""
        load_from_file_enabled = is_flag_set_in_command_line('load_from_file') or is_flag_set_in_command_line('lfile')
        if load_from_file_enabled and not is_network_compiled:
            next_step()
            print("Skipping the step for loading model from file")
            next_step()
            print("Skipping the step for loading model from file")
            next_step()
            print("Skipping the step for loading model from file")

            # --------------------- 7. Loading the model to the device -------------------------------------------------
            next_step()

            start_time = datetime.utcnow()
            compiled_model = benchmark.core.compile_model(args.path_to_model, benchmark.device)
            duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
            logger.info(f"Compile model took {duration_ms} ms")
            if statistics:
                statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ('load network time (ms)', duration_ms)
                                          ])
            app_inputs_info, _ = get_inputs_info(args.shape, args.data_shape, args.layout, args.batch_size, args.input_scale, args.input_mean, compiled_model.inputs)
            batch_size = get_network_batch_size(app_inputs_info)
        elif not is_network_compiled:
            # --------------------- 4. Read the Intermediate Representation of the network -----------------------------
            next_step()

            start_time = datetime.utcnow()
            model = benchmark.read_model(args.path_to_model)
            topology_name = model.get_name()
            duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
            logger.info(f"Read model took {duration_ms} ms")
            if statistics:
                statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ('read network time (ms)', duration_ms)
                                          ])

            # --------------------- 5. Resizing network to match image sizes and given batch ---------------------------
            next_step()

            app_inputs_info, reshape = get_inputs_info(args.shape, args.data_shape, args.layout, args.batch_size, args.input_scale, args.input_mean, model.inputs)
            if reshape:
                start_time = datetime.utcnow()
                shapes = { info.name : info.partial_shape for info in app_inputs_info }
                logger.info(
                    'Reshaping model: {}'.format(', '.join("'{}': {}".format(k, str(v)) for k, v in shapes.items())))
                model.reshape(shapes)
                duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
                logger.info(f"Reshape model took {duration_ms} ms")
                if statistics:
                    statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                              [
                                                  ('reshape network time (ms)', duration_ms)
                                              ])

            # use batch size according to provided layout and shapes
            batch_size = get_network_batch_size(app_inputs_info)
            logger.info(f'Network batch size: {batch_size}')

            # --------------------- 6. Configuring inputs and outputs of the model --------------------------------------------------
            next_step()

            pre_post_processing(model, app_inputs_info, args.input_precision, args.output_precision, args.input_output_precision)
            print_inputs_and_outputs_info(model)

            # --------------------- 7. Loading the model to the device -------------------------------------------------
            next_step()

            start_time = datetime.utcnow()
            compiled_model = benchmark.core.compile_model(model, benchmark.device)
            duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
            logger.info(f"Compile model took {duration_ms} ms")
            if statistics:
                statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ('load network time (ms)', duration_ms)
                                          ])
        else:
            next_step()
            print("Skipping the step for compiled network")
            next_step()
            print("Skipping the step for compiled network")
            next_step()
            print("Skipping the step for compiled network")

            # --------------------- 7. Loading the model to the device -------------------------------------------------
            next_step()

            start_time = datetime.utcnow()
            compiled_model = benchmark.core.import_model(args.path_to_model)
            duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
            logger.info(f"Import model took {duration_ms} ms")
            if statistics:
                statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ('import network time (ms)', duration_ms)
                                          ])
            app_inputs_info, _ = get_inputs_info(args.shape, args.data_shape, args.layout, args.batch_size, args.input_scale, args.input_mean, compiled_model.inputs)
            batch_size = get_network_batch_size(app_inputs_info)

        # --------------------- 8. Querying optimal runtime parameters --------------------------------------------------
        next_step()
        ## actual device-deduced settings
        for device in devices:
            keys = benchmark.core.get_property(device, 'SUPPORTED_CONFIG_KEYS')
            logger.info(f'DEVICE: {device}')
            for k in keys:
                try:
                    logger.info(f'  {k}  , {benchmark.core.get_property(device, k)}')
                except:
                    pass


        # Update number of streams
        for device in device_number_streams.keys():
            key = get_device_type_from_name(device) + '_THROUGHPUT_STREAMS'
            device_number_streams[device] = benchmark.core.get_property(device, key)

        # ------------------------------------ 9. Creating infer requests and preparing input data ----------------------
        next_step()

        # Create infer requests
        start_time = datetime.utcnow()
        requests = benchmark.create_infer_requests(compiled_model)
        duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
        logger.info(f"Create {benchmark.nireq} infer requests took {duration_ms} ms")
        if statistics:
                statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ('create infer requests time (ms)', duration_ms)
                                          ])

        # Prepare input data
        paths_to_input = list()
        if args.paths_to_input:
            for path in args.paths_to_input:
                if ":" in next(iter(path), ""):
                    paths_to_input.extend(path)
                else:
                    paths_to_input.append(os.path.abspath(*path))

        data_queue = get_input_data(paths_to_input, app_inputs_info)

        static_mode = check_for_static(app_inputs_info)
        allow_inference_only_or_sync = can_measure_as_static(app_inputs_info)
        if not allow_inference_only_or_sync and benchmark.api_type == 'sync':
            raise Exception("Benchmarking of the model with dynamic shapes is available for async API only."
                                   "Please use -api async -nstreams 1 -nireq 1 to emulate sync behavior.")

        if benchmark.inference_only == None:
            if static_mode:
                benchmark.inference_only = True
            else:
                benchmark.inference_only = False
        elif benchmark.inference_only and not allow_inference_only_or_sync:
            raise Exception("Benchmarking dynamic model available with input filling in measurement loop only!")

        if benchmark.inference_only:
            logger.info("Benchmarking in inference only mode (inputs filling are not included in measurement loop).")
        else:
            logger.info("Benchmarking in full mode (inputs filling are included in measurement loop).")

        # update batch size in case dynamic network with one data_shape
        if benchmark.inference_only and batch_size.is_dynamic:
            batch_size = Dimension(data_queue.batch_sizes[data_queue.current_group_id])

        benchmark.latency_groups = get_latency_groups(app_inputs_info)

        if len(benchmark.latency_groups) > 1:
            logger.info(f"Defined {len(benchmark.latency_groups)} tensor groups:")
            for group in benchmark.latency_groups:
                print(f"\t{str(group)}")

        # Iteration limit
        benchmark.niter = get_number_iterations(benchmark.niter, benchmark.nireq, max(len(info.shapes) for info in app_inputs_info), benchmark.api_type)

        # Set input tensors before first inference
        for request in requests:
            data_tensors = data_queue.get_next_input()
            for port, data_tensor in data_tensors.items():
                input_tensor = request.get_input_tensor(port)
                if not static_mode:
                    input_tensor.shape = data_tensor.shape
                input_tensor.data[:] = data_tensor.data

        if statistics:
            statistics.add_parameters(StatisticsReport.Category.RUNTIME_CONFIG,
                                      [
                                          ('topology', topology_name),
                                          ('target device', device_name),
                                          ('API', args.api_type),
                                          ('inference_only', benchmark.inference_only),
                                          ('precision', "UNSPECIFIED"),
                                          ('batch size', str(batch_size)),
                                          ('number of iterations', str(benchmark.niter)),
                                          ('number of parallel infer requests', str(benchmark.nireq)),
                                          ('duration (ms)', str(get_duration_in_milliseconds(benchmark.duration_seconds))),
                                       ])

            for nstreams in device_number_streams.items():
                statistics.add_parameters(StatisticsReport.Category.RUNTIME_CONFIG,
                                         [
                                            (f"number of {nstreams[0]} streams", str(nstreams[1])),
                                         ])

        # ------------------------------------ 10. Measuring performance -----------------------------------------------

        output_string = process_help_inference_string(benchmark, device_number_streams)

        next_step(additional_info=output_string)
        progress_bar_total_count = 10000
        if benchmark.niter and not benchmark.duration_seconds:
            progress_bar_total_count = benchmark.niter

        progress_bar = ProgressBar(progress_bar_total_count, args.stream_output, args.progress) if args.progress else None

        duration_ms = f"{benchmark.first_infer(requests):.2f}"
        logger.info(f"First inference took {duration_ms} ms")
        if statistics:
            statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                    [
                                        ('first inference time (ms)', duration_ms)
                                    ])

        pcseq = args.pcseq
        if static_mode or len(benchmark.latency_groups) == 1:
            pcseq = False

        fps, median_latency_ms, avg_latency_ms, min_latency_ms, max_latency_ms, total_duration_sec, iteration = benchmark.main_loop(requests, data_queue, batch_size, args.latency_percentile, progress_bar, pcseq)

        # ------------------------------------ 11. Dumping statistics report -------------------------------------------
        next_step()

        if args.dump_config:
            dump_config(args.dump_config, config)
            logger.info(f"OpenVINO configuration settings were dumped to {args.dump_config}")

        if args.exec_graph_path:
            dump_exec_graph(compiled_model, args.exec_graph_path)

        if perf_counts:
            perfs_count_list = []
            for request in requests:
                perfs_count_list.append(request.profiling_info)
            if args.perf_counts:
                print_perf_counters(perfs_count_list)
            if statistics:
                statistics.dump_performance_counters(perfs_count_list)

        if statistics:
            statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                      [
                                          ('total execution time (ms)', f'{get_duration_in_milliseconds(total_duration_sec):.2f}'),
                                          ('total number of iterations', str(iteration)),
                                      ])
            if MULTI_DEVICE_NAME not in device_name:
                latency_prefix = None
                if args.latency_percentile == 50 and static_mode:
                    #latency_prefix = 'median latency (ms)'
                    latency_prefix = 'latency (ms)'
                elif args.latency_percentile != 50:
                    latency_prefix = 'latency (' + str(args.latency_percentile) + ' percentile) (ms)'
                if latency_prefix:
                    statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                            [
                                                (latency_prefix, f'{median_latency_ms:.2f}'),
                                            ])
                statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ("avg latency", f'{avg_latency_ms:.2f}'),
                                          ])
                statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ("min latency", f'{min_latency_ms:.2f}'),
                                          ])
                statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ("max latency", f'{max_latency_ms:.2f}'),
                                          ])
                if pcseq:
                    for group in benchmark.latency_groups:
                        statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ("group", str(group)),
                                          ])
                        statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ("avg latency", f'{group.avg:.2f}'),
                                          ])
                        statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ("min latency", f'{group.min:.2f}'),
                                          ])
                        statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                          [
                                              ("max latency", f'{group.max:.2f}'),
                                          ])
            statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                      [
                                          ('throughput', f'{fps:.2f}'),
                                      ])
            statistics.dump()


        print(f'Count:          {iteration} iterations')
        print(f'Duration:       {get_duration_in_milliseconds(total_duration_sec):.2f} ms')
        if MULTI_DEVICE_NAME not in device_name:
            print('Latency:')
            if args.latency_percentile == 50 and static_mode:
                print(f'    Median:     {median_latency_ms:.2f} ms')
            elif args.latency_percentile != 50:
                print(f'({args.latency_percentile} percentile):     {median_latency_ms:.2f} ms')
            print(f'    AVG:        {avg_latency_ms:.2f} ms')
            print(f'    MIN:        {min_latency_ms:.2f} ms')
            print(f'    MAX:        {max_latency_ms:.2f} ms')

            if pcseq:
                print("Latency for each data shape group: ")
                for group in benchmark.latency_groups:
                    print(f"  {str(group)}")
                    print(f'    AVG:        {group.avg:.2f} ms')
                    print(f'    MIN:        {group.min:.2f} ms')
                    print(f'    MAX:        {group.max:.2f} ms')

        print(f'Throughput: {fps:.2f} FPS')

        del compiled_model

        next_step.step_id = 0
    except Exception as e:
        logger.exception(e)

        if statistics:
            statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
                                      [
                                          ('error', str(e)),
                                      ])
            statistics.dump()
        sys.exit(1)
コード例 #19
0
ファイル: test_function.py プロジェクト: yeonbok/openvino
def test_reshape_with_python_types(device):
    model = create_test_model()

    def check_shape(new_shape):
        for input in model.inputs:
            assert input.partial_shape == new_shape

    shape1 = [1, 4]
    new_shapes = {input: shape1 for input in model.inputs}
    model.reshape(new_shapes)
    check_shape(PartialShape(shape1))

    shape2 = [1, 6]
    new_shapes = {input.any_name: shape2 for input in model.inputs}
    model.reshape(new_shapes)
    check_shape(PartialShape(shape2))

    shape3 = [1, 8]
    new_shapes = {i: shape3 for i, input in enumerate(model.inputs)}
    model.reshape(new_shapes)
    check_shape(PartialShape(shape3))

    shape4 = [1, -1]
    new_shapes = {input: shape4 for input in model.inputs}
    model.reshape(new_shapes)
    check_shape(PartialShape([Dimension(1), Dimension(-1)]))

    shape5 = [1, (1, 10)]
    new_shapes = {input: shape5 for input in model.inputs}
    model.reshape(new_shapes)
    check_shape(PartialShape([Dimension(1), Dimension(1, 10)]))

    shape6 = [Dimension(3), Dimension(3, 10)]
    new_shapes = {input: shape6 for input in model.inputs}
    model.reshape(new_shapes)
    check_shape(PartialShape(shape6))

    shape7 = "1..10, ?"
    new_shapes = {input: shape7 for input in model.inputs}
    model.reshape(new_shapes)
    check_shape(PartialShape(shape7))

    # reshape mixed keys
    shape8 = [(1, 20), -1]
    new_shapes = {"data1": shape8, 1: shape8}
    model.reshape(new_shapes)
    check_shape(PartialShape([Dimension(1, 20), Dimension(-1)]))

    # reshape with one input
    param = ops.parameter([1, 3, 28, 28])
    model = Model(ops.relu(param), [param])

    shape9 = [-1, 3, (28, 56), (28, 56)]
    model.reshape(shape9)
    check_shape(
        PartialShape([
            Dimension(-1),
            Dimension(3),
            Dimension(28, 56),
            Dimension(28, 56)
        ]))

    shape10 = "?,3,..224,..224"
    model.reshape(shape10)
    check_shape(
        PartialShape([
            Dimension(-1),
            Dimension(3),
            Dimension(-1, 224),
            Dimension(-1, 224)
        ]))

    # check exceptions
    shape10 = [1, 1, 1, 1]
    with pytest.raises(TypeError) as e:
        model.reshape({model.input().node: shape10})
    assert "Incorrect key type <class 'openvino.pyopenvino.op.Parameter'> to reshape a model, " \
           "expected keys as openvino.runtime.Output, int or str." in str(e.value)

    with pytest.raises(TypeError) as e:
        model.reshape({0: range(1, 9)})
    assert "Incorrect value type <class 'range'> to reshape a model, " \
           "expected values as openvino.runtime.PartialShape, str, list or tuple." in str(e.value)
コード例 #20
0
def main():
    args = build_argparser().parse_args()

    # load vocabulary file for model
    vocab = load_vocab_file(args.vocab)
    log.debug("Loaded vocab file from {}, get {} tokens".format(
        args.vocab, len(vocab)))

    # create tokenizer
    tokenizer = Tokenizer(BPE(str(args.vocab), str(args.merges)))
    tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
    tokenizer.decoder = decoders.ByteLevel()

    log.info('OpenVINO Inference Engine')
    log.info('\tbuild: {}'.format(get_version()))
    ie = Core()

    # read IR
    model_path = args.model
    log.info('Reading model {}'.format(args.model))
    model = ie.read_model(model_path)

    # check number inputs and outputs
    if len(model.inputs) != 1:
        raise RuntimeError(
            'The demo expects model with single input, while provided {}'.
            format(len(model.inputs)))
    if len(model.outputs) != 1:
        raise RuntimeError(
            'The demo expects model with single output, while provided {}'.
            format(len(model.outputs)))
    input_tensor = model.inputs[0].any_name

    if not args.dynamic_shape and (
            model.inputs[0].partial_shape.is_dynamic
            or model.inputs[0].shape[1] != args.max_seq_len):
        model.reshape({
            input_tensor:
            PartialShape([Dimension(1),
                          Dimension(args.max_seq_len)])
        })

    if args.dynamic_shape:
        model.reshape({
            input_tensor:
            PartialShape([Dimension(1),
                          Dimension(0, args.max_seq_len)])
        })

    # load model to the device
    compiled_model = ie.compile_model(model, args.device)
    infer_request = compiled_model.create_infer_request()
    log.info('The model {} is loaded to {}'.format(args.model, args.device))

    if args.input:

        def prompts():
            for prompt in args.input:
                log.info("Input prompt: {}".format(prompt))
                yield prompt
    else:

        def prompts():
            while True:
                yield input('Type input prompt (empty string to exit):')

    # loop on user's or prepared prompts
    for prompt in prompts():
        if not prompt.strip():
            break

        # encode input
        tokens = tokenizer.encode_batch([prompt])[0].ids
        input_ids = np.array([tokens], dtype=np.int32)

        # maximum number of tokens that can be processed by network at once
        max_length = args.max_seq_len

        eos_token_id = len(vocab) - 1

        cur_input_len = input_ids.shape[-1]

        # maximum number of tokens that will be generated
        max_sample_token_num = args.max_sample_token_num + cur_input_len

        t0 = time.perf_counter()
        t_count = 0

        while True:
            model_input = input_ids
            if not args.dynamic_shape:
                # pad the rest of the request
                pad_len = max_length - cur_input_len
                model_input = np.concatenate(
                    (input_ids, [[eos_token_id] * pad_len]), axis=-1)

            # create numpy inputs for IE
            inputs = {
                input_tensor: model_input,
            }

            # infer by IE
            t_start = time.perf_counter()
            res = infer_request.infer(inputs)
            t_end = time.perf_counter()
            t_count += 1
            log.info(
                "Sequence of length {} is processed with {:0.2f} requests/sec ({:0.2} sec per request)"
                .format(model_input.shape[1], 1 / (t_end - t_start),
                        t_end - t_start))

            outputs = next(iter(res.values()))
            next_token_logits = outputs[:, cur_input_len - 1, :]

            # pre-process distribution
            next_token_scores = process_logits(input_ids, next_token_logits,
                                               eos_token_id)
            if args.top_k > 0:
                next_token_scores = get_top_k_logits(next_token_scores,
                                                     args.top_k)

            if args.top_p < 1.0:
                next_token_scores = get_top_p_logits(next_token_scores,
                                                     args.top_p)

            # get next token id
            probs = softmax(next_token_scores)
            next_tokens = np.random.choice(probs.shape[-1],
                                           1,
                                           p=probs[0],
                                           replace=True)

            # update info for the next step
            input_ids = np.concatenate((input_ids, [next_tokens]), axis=-1)

            cur_input_len = input_ids.shape[-1]

            if stop_criteria(input_ids, min(max_length, max_sample_token_num),
                             eos_token_id):
                break

        t1 = time.perf_counter()

        text = tokenizer.decode_batch(input_ids)[0]

        log.info(
            "{} requests were processed in {:0.2f}sec ({:0.2}sec per request)".
            format(t_count, t1 - t0, (t1 - t0) / t_count))

        # print result
        log.info("GENERATED SEQUENCE: {}".format(text))
コード例 #21
0
ファイル: test_core.py プロジェクト: yeonbok/openvino
def test_partial_shape():
    ps = PartialShape([1, 2, 3, 4])
    assert ps.is_static
    assert not ps.is_dynamic
    assert ps.rank == 4
    assert repr(ps) == "<PartialShape: {1,2,3,4}>"
    assert ps.get_dimension(0) == Dimension(1)
    assert ps.get_dimension(1) == Dimension(2)
    assert ps.get_dimension(2) == Dimension(3)
    assert ps.get_dimension(3) == Dimension(4)

    shape = Shape([1, 2, 3])
    ps = PartialShape(shape)
    assert ps.is_static
    assert not ps.is_dynamic
    assert ps.all_non_negative
    assert ps.rank == 3
    assert list(ps.get_shape()) == [1, 2, 3]
    assert list(ps.get_max_shape()) == [1, 2, 3]
    assert list(ps.get_min_shape()) == [1, 2, 3]
    assert list(ps.to_shape()) == [1, 2, 3]
    assert repr(shape) == "<Shape: {1, 2, 3}>"
    assert repr(ps) == "<PartialShape: {1,2,3}>"

    ps = PartialShape(
        [Dimension(1),
         Dimension(2),
         Dimension(3),
         Dimension.dynamic()])
    assert not ps.is_static
    assert ps.is_dynamic
    assert ps.all_non_negative
    assert ps.rank == 4
    assert list(ps.get_min_shape()) == [1, 2, 3, 0]
    assert list(ps.get_max_shape())[3] > 1000000000
    assert repr(ps) == "<PartialShape: {1,2,3,?}>"
    assert ps.get_dimension(0) == Dimension(1)
    assert ps.get_dimension(1) == Dimension(2)
    assert ps.get_dimension(2) == Dimension(3)
    assert ps.get_dimension(3) == Dimension.dynamic()

    ps = PartialShape([1, 2, 3, -1])
    assert not ps.is_static
    assert ps.is_dynamic
    assert ps.all_non_negative
    assert ps.rank == 4
    assert list(ps.get_min_shape()) == [1, 2, 3, 0]
    assert list(ps.get_max_shape())[3] > 1000000000
    assert repr(ps) == "<PartialShape: {1,2,3,?}>"

    ps = PartialShape.dynamic()
    assert not ps.is_static
    assert ps.is_dynamic
    assert ps.rank == Dimension.dynamic()
    assert list(ps.get_min_shape()) == []
    assert list(ps.get_max_shape()) == []
    assert repr(ps) == "<PartialShape: ...>"

    ps = PartialShape.dynamic(rank=Dimension(2))
    assert not ps.is_static
    assert ps.is_dynamic
    assert ps.rank == 2
    assert 2 == ps.rank
    assert list(ps.get_min_shape()) == [0, 0]
    assert list(ps.get_max_shape())[0] > 1000000000
    assert repr(ps) == "<PartialShape: {?,?}>"

    shape_list = [(1, 10), [2, 5], 4, Dimension(2), "..10"]
    ref_ps = PartialShape([
        Dimension(1, 10),
        Dimension(2, 5),
        Dimension(4),
        Dimension(2),
        Dimension(-1, 10)
    ])
    assert PartialShape(shape_list) == ref_ps
    assert PartialShape(tuple(shape_list)) == ref_ps

    with pytest.raises(TypeError) as e:
        PartialShape([(1, 2, 3)])
    assert "Two elements are expected in tuple(lower, upper) " \
           "for dynamic dimension, but 3 elements were given." in str(e.value)

    with pytest.raises(TypeError) as e:
        PartialShape([("?", "?")])
    assert "Incorrect pair of types (<class 'str'>, <class 'str'>) " \
           "for dynamic dimension, ints are expected." in str(e.value)

    with pytest.raises(TypeError) as e:
        PartialShape([range(10)])
    assert "Incorrect type <class 'range'> for dimension. Expected types are: " \
           "int, str, openvino.runtime.Dimension, list/tuple with lower " \
           "and upper values for dynamic dimension." in str(e.value)

    ps = PartialShape("...")
    assert ps == PartialShape.dynamic()

    ps = PartialShape("?, 3, ..224, 28..224")
    assert ps == PartialShape(
        [Dimension(-1),
         Dimension(3),
         Dimension(-1, 224),
         Dimension(28, 224)])

    with pytest.raises(RuntimeError) as e:
        ps = PartialShape("?,,3")
    assert 'Cannot get vector of dimensions! "?,,3" is incorrect' in str(
        e.value)

    shape = Shape()
    assert len(shape) == 0
コード例 #22
0
ファイル: pipeline.py プロジェクト: yury-intel/openvino
def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
    """
    Load input model and convert it to nGraph function
    :param: argv: parsed command line arguments
    :param: moc_front_end: Loaded Frontend for converting input model
    :return: converted nGraph function ready for serialization
    """
    input_model = moc_front_end.load(argv.input_model)

    user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
        input_model, argv.placeholder_shapes, argv.placeholder_data_types,
        argv.output, argv.freeze_placeholder_with_value,
        moc_front_end.get_name())

    def check_places_are_same(places_original: List[Place],
                              places_new: List[Place]):
        """
        Check if set of new places is same as original or not.
        :param places_original: List[Place] Original model places
        :param places_new: List[Place] New list of places
        :return: True if new list of places is same as original
        """
        return len(places_original) == len(places_new) and len([
            item for item in places_original
            if any([item.is_equal(item2['node']) for item2 in places_new])
        ]) == len(places_original)

    def add_names_to_tensors(model: InputModel, places: List[Place]):
        """
        Adds additional names to some model input tensors. This helper should be used
        when a model modification is going to happen.
        :param model The input model loaded by a given frontend
        :param places An object containing Places and names that will be used for model modification
        """
        for new_input in places:
            if not hasattr(new_input, 'input_name'):
                continue

            try:
                model.add_name_for_tensor(new_input['node'],
                                          new_input['input_name'])
            except NotImplementedFailure as e:
                # some frontends might not implement this method
                log.warn(
                    'Could not add an additional name to a tensor pointed to by \'{}\'. Details: {}'
                    .format(new_input['input_name'], str(e)))

    enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms(
    )
    if 'ANALYSIS_JSON_PRINT' in enabled_transforms:
        # NOTE that model analysis is performed before applying user's settings (inputs's shapes etc.)
        framework_model = moc_front_end.decode(input_model)
        json_model_analysis_dump(framework_model)
        # a model is not processed further in json analysis mode
        sys.exit(0)

    inputs_equal = True
    if user_shapes:
        inputs_equal = check_places_are_same(input_model.get_inputs(),
                                             user_shapes)

    outputs_equal = True
    if outputs:
        outputs_equal = check_places_are_same(input_model.get_outputs(),
                                              outputs)
    log.debug('Inputs are same: {}, outputs are same: {}'.format(
        inputs_equal, outputs_equal))

    if not inputs_equal and not outputs_equal:
        log.debug('Using extract subgraph')
        new_input_places = [x['node'] for x in user_shapes]
        new_output_places = [x['node'] for x in outputs]
        add_names_to_tensors(input_model, user_shapes)
        input_model.extract_subgraph(new_input_places, new_output_places)
        # invalidation of existing Place objects could have happened in the operation above
        if user_shapes:
            user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
                input_model, argv.placeholder_shapes,
                argv.placeholder_data_types, argv.output,
                argv.freeze_placeholder_with_value, moc_front_end.get_name())
    elif not inputs_equal:
        log.debug('Using override_all_inputs')
        add_names_to_tensors(input_model, user_shapes)
        new_input_places = [x['node'] for x in user_shapes]
        input_model.override_all_inputs(new_input_places)
        # invalidation of existing Place objects could have happened in the operation above
        names = [place.get_names()[0] for place in new_input_places]
        shapes = [shape for shape in argv.placeholder_shapes.values()]
        # we have to update names used to find nodes, since the original
        # ones where cut off the graph
        placeholder_shapes = dict(zip(names, shapes))
        if user_shapes:
            user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
                input_model, placeholder_shapes, argv.placeholder_data_types,
                argv.output, argv.freeze_placeholder_with_value,
                moc_front_end.get_name())
    elif not outputs_equal:
        log.debug('Using override_all_outputs')
        add_names_to_tensors(input_model, user_shapes)
        new_output_places = [x['node'] for x in outputs]
        input_model.override_all_outputs(new_output_places)

    if user_shapes:
        for user_shape in user_shapes:
            if user_shape.get('shape') is not None:
                input_model.set_partial_shape(
                    user_shape['node'],
                    partial_shape_from_tuple(user_shape['shape']))
            if user_shape.get('data_type') is not None:
                data_type = get_element_type(user_shape['data_type'])
                log.debug('Set data type: {}'.format(data_type))
                input_model.set_element_type(user_shape['node'], data_type)

    def shape_to_array(shape: PartialShape):
        return [shape.get_dimension(i) for i in range(shape.rank.get_length())]

    # Set batch size
    if argv.batch is not None and argv.batch > 0:
        log.debug('Setting batch size to {}'.format(argv.batch))
        for place in input_model.get_inputs():
            old_partial_shape = input_model.get_partial_shape(place)
            old_shape_array = shape_to_array(
                old_partial_shape) if old_partial_shape.rank.is_static else []
            joined_name = ' '.join(place.get_names())
            validate_batch_in_shape(old_shape_array, joined_name)

            # Assume batch size is always 1-st dimension in shape
            # Keep other dimensions unchanged
            new_shape = [
                old_partial_shape.get_dimension(i)
                for i in range(old_partial_shape.rank.get_length())
            ]
            new_shape[0] = Dimension(argv.batch)

            new_partial_shape = PartialShape(new_shape)
            log.debug('Input: {}, Old shape: {}, New shape: {}'.format(
                joined_name, old_shape_array, new_shape))
            input_model.set_partial_shape(place, new_partial_shape)

    ngraph_function = moc_front_end.convert(input_model)
    return ngraph_function
コード例 #23
0
ファイル: test_core.py プロジェクト: yeonbok/openvino
def test_dimension_comparisons():
    d1 = Dimension.dynamic()
    d2 = Dimension.dynamic()
    assert d1 == d2
    assert d1 == -1
    assert d1.refines(d2)
    assert d1.relaxes(d2)
    assert d2.refines(d1)
    assert d2.relaxes(d1)
    assert d2.compatible(d1)
    assert d2.same_scheme(d1)

    d1 = Dimension.dynamic()
    d2 = Dimension(3)
    assert d1 != d2
    assert d2 == 3
    assert not d1.refines(d2)
    assert d1.relaxes(d2)
    assert d2.refines(d1)
    assert not d2.relaxes(d1)
    assert d2.compatible(d1)
    assert not d2.same_scheme(d1)

    d1 = Dimension(3)
    d2 = Dimension(3)
    assert d1 == d2
    assert d1.refines(d2)
    assert d1.relaxes(d2)
    assert d2.refines(d1)
    assert d2.relaxes(d1)
    assert d2.compatible(d1)
    assert d2.same_scheme(d1)

    d1 = Dimension(4)
    d2 = Dimension(3)
    assert d1 != d2
    assert not d1.refines(d2)
    assert not d1.relaxes(d2)
    assert not d2.refines(d1)
    assert not d2.relaxes(d1)
    assert not d2.compatible(d1)
    assert not d2.same_scheme(d1)

    d = Dimension("?")
    assert d == Dimension()

    d = Dimension("1")
    assert d == Dimension(1)

    d = Dimension("..10")
    assert d == Dimension(-1, 10)

    d = Dimension("10..")
    assert d == Dimension(10, -1)

    d = Dimension("5..10")
    assert d == Dimension(5, 10)

    with pytest.raises(RuntimeError) as e:
        d = Dimension("C")
    assert 'Cannot parse dimension: "C"' in str(e.value)

    with pytest.raises(RuntimeError) as e:
        d = Dimension("?..5")
    assert 'Cannot parse min bound: "?"' in str(e.value)

    with pytest.raises(RuntimeError) as e:
        d = Dimension("5..?")
    assert 'Cannot parse max bound: "?"' in str(e.value)
コード例 #24
0
ファイル: test_core.py プロジェクト: yury-intel/openvino
def test_dimension_comparisons():
    d1 = Dimension.dynamic()
    d2 = Dimension.dynamic()
    assert d1 == d2
    assert d1 == -1
    assert d1.refines(d2)
    assert d1.relaxes(d2)
    assert d2.refines(d1)
    assert d2.relaxes(d1)
    assert d2.compatible(d1)
    assert d2.same_scheme(d1)

    d1 = Dimension.dynamic()
    d2 = Dimension(3)
    assert d1 != d2
    assert d2 == 3
    assert not d1.refines(d2)
    assert d1.relaxes(d2)
    assert d2.refines(d1)
    assert not d2.relaxes(d1)
    assert d2.compatible(d1)
    assert not d2.same_scheme(d1)

    d1 = Dimension(3)
    d2 = Dimension(3)
    assert d1 == d2
    assert d1.refines(d2)
    assert d1.relaxes(d2)
    assert d2.refines(d1)
    assert d2.relaxes(d1)
    assert d2.compatible(d1)
    assert d2.same_scheme(d1)

    d1 = Dimension(4)
    d2 = Dimension(3)
    assert d1 != d2
    assert not d1.refines(d2)
    assert not d1.relaxes(d2)
    assert not d2.refines(d1)
    assert not d2.relaxes(d1)
    assert not d2.compatible(d1)
    assert not d2.same_scheme(d1)
コード例 #25
0
ファイル: utils.py プロジェクト: mikhailk62/openvino
def get_inputs_info(shape_string, data_shape_string, layout_string, batch_size, scale_string, mean_string, inputs):
    input_names = get_input_output_names(inputs)
    shape_map = parse_input_parameters(shape_string, input_names)
    data_shape_map = get_data_shapes_map(data_shape_string, input_names)
    layout_map = parse_input_parameters(layout_string, input_names)
    batch_size = parse_batch_size(batch_size)
    reshape = False
    batch_found = False
    input_info = []
    for i in range(len(inputs)):
        info = AppInputInfo()
        # Input name
        info.name = input_names[i]
        # Input precision
        info.element_type = inputs[i].element_type
        # Shape
        info.original_shape = inputs[i].partial_shape
        if info.name in shape_map.keys():
            info.partial_shape = parse_partial_shape(shape_map[info.name])
            reshape = True
        else:
            info.partial_shape = inputs[i].partial_shape

        # Layout
        if info.name in layout_map.keys():
            info.layout = Layout(layout_map[info.name])
        elif inputs[i].node.layout != Layout():
            info.layout = inputs[i].node.layout
        else:
            image_colors_dim = Dimension(3)
            shape = info.partial_shape
            num_dims = len(shape)
            if num_dims == 4:
                if(shape[1]) == image_colors_dim:
                    info.layout = Layout("NCHW")
                elif(shape[3] == image_colors_dim):
                    info.layout = Layout("NHWC")
            elif num_dims == 3:
                if(shape[0]) == image_colors_dim:
                    info.layout = Layout("CHW")
                elif(shape[2] == image_colors_dim):
                    info.layout = Layout("HWC")

        # Update shape with batch if needed
        if batch_size != 0:
            if batch_size.is_static and data_shape_map:
                 logger.warning(f"Batch size will be ignored. Provide batch deminsion in data_shape parameter.")
            else:
                batch_index = -1
                if info.layout.has_name('N'):
                    batch_index = info.layout.get_index_by_name('N')
                elif info.layout == Layout():
                    supposed_batch = info.partial_shape[0]
                    if supposed_batch.is_dynamic or supposed_batch in [0, 1]:
                        logger.warning(f"Batch dimension is not specified for input '{info.name}'. "
                                        "The first dimension will be interpreted as batch size.")
                        batch_index = 0
                        info.layout = Layout("N...")
                if batch_index != -1 and info.partial_shape[batch_index] != batch_size:
                    info.partial_shape[batch_index] = batch_size
                    reshape = True
                    batch_found = True
                elif batch_index == -1 and not batch_found and i == len(inputs) - 1:
                    raise Exception(f"Batch dimension is not specified for this model!")

        # Data shape
        if info.name in data_shape_map.keys() and info.is_dynamic:
            for p_shape in data_shape_map[info.name]:
                if p_shape.is_dynamic:
                    raise Exception(f"Data shape always should be static, {str(p_shape)} is dynamic.")
                elif info.partial_shape.compatible(p_shape):
                    info.data_shapes.append(p_shape.to_shape())
                else:
                    raise Exception(f"Data shape '{str(p_shape)}' provided for input '{info.name}' "
                                    f"is not compatible with partial shape '{str(info.partial_shape)}' for this input.")
        elif info.name in data_shape_map.keys():
            logger.warning(f"Input '{info.name}' has static shape. Provided data shapes for this input will be ignored.")

        input_info.append(info)

    # Update scale, mean
    scale_map = parse_scale_or_mean(scale_string, input_info)
    mean_map = parse_scale_or_mean(mean_string, input_info)

    for input in input_info:
        if input.name in scale_map:
                input.scale = scale_map[input.name]
        if input.name in mean_map:
            input.mean = mean_map[input.name]

    return input_info, reshape
コード例 #26
0
ファイル: test_core.py プロジェクト: yury-intel/openvino
def test_dimension():
    dim = Dimension()
    assert dim.is_dynamic
    assert not dim.is_static
    assert repr(dim) == "<Dimension: ?>"

    dim = Dimension.dynamic()
    assert dim.is_dynamic
    assert not dim.is_static
    assert repr(dim) == "<Dimension: ?>"

    dim = Dimension(10)
    assert dim.is_static
    assert len(dim) == 10
    assert dim.get_length() == 10
    assert dim.get_min_length() == 10
    assert dim.get_max_length() == 10
    assert repr(dim) == "<Dimension: 10>"

    dim = Dimension(5, 15)
    assert dim.is_dynamic
    assert dim.get_min_length() == 5
    assert dim.get_max_length() == 15
    assert repr(dim) == "<Dimension: 5..15>"
コード例 #27
0
def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd):
    """
    Load input model and convert it to nGraph function
    :param: argv: parsed command line arguments
    :param: moc_front_end: Loaded Frontend for converting input model
    :return: converted nGraph function ready for serialization
    """
    input_model = moc_front_end.load(argv.input_model)

    user_shapes, outputs, freeze_placeholder = fe_user_data_repack(
        input_model, argv.placeholder_shapes, argv.placeholder_data_types,
        argv.output, argv.freeze_placeholder_with_value)

    def check_places_are_same(places_original: List[Place],
                              places_new: List[Place]):
        """
        Check if set of new places is same as original or not.
        :param places_original: List[Place] Original model places
        :param places_new: List[Place] New list of places
        :return: True if new list of places is same as original
        """
        return len(places_original) == len(places_new) and len([
            item for item in places_original
            if any([item.is_equal(item2['node']) for item2 in places_new])
        ]) == len(places_original)

    inputs_equal = True
    if user_shapes:
        inputs_equal = check_places_are_same(input_model.get_inputs(),
                                             user_shapes)

    outputs_equal = True
    if outputs:
        outputs_equal = check_places_are_same(input_model.get_outputs(),
                                              outputs)
    log.debug('Inputs are same: {}, outputs are same: {}'.format(
        inputs_equal, outputs_equal))

    if not inputs_equal and not outputs_equal:
        # Use ExtractSubgraph
        new_input_places = [x['node'] for x in user_shapes]
        new_output_places = [x['node'] for x in outputs]
        log.debug('Using extract subgraph')
        input_model.extract_subgraph(new_input_places, new_output_places)
    elif not inputs_equal:
        new_input_places = [x['node'] for x in user_shapes]
        log.debug('Using override_all_inputs')
        input_model.override_all_inputs(new_input_places)
    elif not outputs_equal:
        new_output_places = [x['node'] for x in outputs]
        log.debug('Using override_all_outputs')
        input_model.override_all_outputs(new_output_places)

    if user_shapes:
        for user_shape in user_shapes:
            if user_shape.get('shape') is not None:
                input_model.set_partial_shape(
                    user_shape['node'], PartialShape(user_shape['shape']))
            if user_shape.get('data_type') is not None:
                data_type = get_element_type(user_shape['data_type'])
                log.debug('Set data type: {}'.format(data_type))
                input_model.set_element_type(user_shape['node'], data_type)

    def shape_to_array(shape: PartialShape):
        return [shape.get_dimension(i) for i in range(shape.rank.get_length())]

    # Set batch size
    if argv.batch is not None and argv.batch > 0:
        log.debug('Setting batch size to {}'.format(argv.batch))
        for place in input_model.get_inputs():
            old_partial_shape = input_model.get_partial_shape(place)
            old_shape_array = shape_to_array(
                old_partial_shape) if old_partial_shape.rank.is_static else []
            joined_name = ' '.join(place.get_names())
            validate_batch_in_shape(old_shape_array, joined_name)

            # Assume batch size is always 1-st dimension in shape
            # Keep other dimensions unchanged
            new_shape = [
                old_partial_shape.get_dimension(i)
                for i in range(old_partial_shape.rank.get_length())
            ]
            new_shape[0] = Dimension(argv.batch)

            new_partial_shape = PartialShape(new_shape)
            log.debug('Input: {}, Old shape: {}, New shape: {}'.format(
                joined_name, old_shape_array, new_shape))
            input_model.set_partial_shape(place, new_partial_shape)

    ngraph_function = moc_front_end.convert(input_model)
    return ngraph_function