示例#1
0
def main():
    args = parse_args()

    # --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
    log.info('Creating OpenVINO Runtime Core')
    core = Core()

    # --------------------------- Step 2. Read a model --------------------------------------------------------------------
    if args.model:
        log.info(f'Reading the model: {args.model}')
        # (.xml and .bin files) or (.onnx file)
        model = core.read_model(args.model)

        # --------------------------- Step 3. Apply preprocessing -------------------------------------------------------------
        if args.output_layers:
            output_layer_names, output_layer_ports = parse_outputs_from_args(
                args)
            model.add_outputs(list(zip(output_layer_names,
                                       output_layer_ports)))

        if args.layout:
            layouts = parse_input_layouts(args, model.inputs)

        ppp = PrePostProcessor(model)

        for i in range(len(model.inputs)):
            ppp.input(i).tensor().set_element_type(Type.f32)

            input_name = model.input(i).get_any_name()

            if args.layout and input_name in layouts.keys():
                ppp.input(i).tensor().set_layout(Layout(layouts[input_name]))
                ppp.input(i).model().set_layout(Layout(layouts[input_name]))

        for i in range(len(model.outputs)):
            ppp.output(i).tensor().set_element_type(Type.f32)

        model = ppp.build()

        if args.batch_size:
            batch_size = args.batch_size if args.context_window_left == args.context_window_right == 0 else 1

            if any([not _input.node.layout.empty for _input in model.inputs]):
                set_batch(model, batch_size)
            else:
                log.warning(
                    'Layout is not set for any input, so custom batch size is not set'
                )

# ---------------------------Step 4. Configure plugin ---------------------------------------------------------
    devices = args.device.replace('HETERO:', '').split(',')
    plugin_config = {}

    if 'GNA' in args.device:
        gna_device_mode = devices[0] if '_' in devices[0] else 'GNA_AUTO'
        devices[0] = 'GNA'

        plugin_config['GNA_DEVICE_MODE'] = gna_device_mode
        plugin_config['GNA_PRECISION'] = f'I{args.quantization_bits}'
        plugin_config['GNA_EXEC_TARGET'] = args.exec_target
        plugin_config['GNA_PWL_MAX_ERROR_PERCENT'] = str(args.pwl_me)

        # Set a GNA scale factor
        if args.import_gna_model:
            if args.scale_factor:
                log.warning(
                    f'Custom scale factor will be used for imported GNA model: {args.import_gna_model}'
                )
                set_scale_factors(plugin_config, parse_scale_factors(args))
            else:
                log.info(
                    f'Using scale factor from the imported GNA model: {args.import_gna_model}'
                )
        else:
            if args.scale_factor:
                set_scale_factors(plugin_config, parse_scale_factors(args))
            else:
                scale_factors = []

                for file_name in re.split(', |,', args.input):
                    _, utterances = read_utterance_file(file_name)
                    scale_factors.append(get_scale_factor(utterances[0]))

                log.info(
                    'Using scale factor(s) calculated from first utterance')
                set_scale_factors(plugin_config, scale_factors)

        if args.export_embedded_gna_model:
            plugin_config[
                'GNA_FIRMWARE_MODEL_IMAGE'] = args.export_embedded_gna_model
            plugin_config[
                'GNA_FIRMWARE_MODEL_IMAGE_GENERATION'] = args.embedded_gna_configuration

        if args.performance_counter:
            plugin_config['PERF_COUNT'] = 'YES'

    device_str = f'HETERO:{",".join(devices)}' if 'HETERO' in args.device else devices[
        0]

    # --------------------------- Step 5. Loading model to the device -----------------------------------------------------
    log.info('Loading the model to the plugin')
    if args.model:
        compiled_model = core.compile_model(model, device_str, plugin_config)
    else:
        with open(args.import_gna_model, 'rb') as f:
            buf = BytesIO(f.read())
            compiled_model = core.import_model(buf, device_str, plugin_config)

# --------------------------- Exporting GNA model using InferenceEngine AOT API ---------------------------------------
    if args.export_gna_model:
        log.info(f'Writing GNA Model to {args.export_gna_model}')
        user_stream = compiled_model.export_model()
        with open(args.export_gna_model, 'wb') as f:
            f.write(user_stream)
        return 0

    if args.export_embedded_gna_model:
        log.info(
            f'Exported GNA embedded model to file {args.export_embedded_gna_model}'
        )
        log.info(
            f'GNA embedded model export done for GNA generation {args.embedded_gna_configuration}'
        )
        return 0

# --------------------------- Step 6. Set up input --------------------------------------------------------------------
    if args.input_layers:
        input_layer_names = re.split(', |,', args.input_layers)
    else:
        input_layer_names = [
            _input.any_name for _input in compiled_model.inputs
        ]

    input_file_names = re.split(', |,', args.input)

    if len(input_layer_names) != len(input_file_names):
        log.error(
            f'Number of model inputs ({len(compiled_model.inputs)}) is not equal '
            f'to number of ark files ({len(input_file_names)})')
        sys.exit(-3)

    input_file_data = [
        read_utterance_file(file_name) for file_name in input_file_names
    ]

    infer_data = [{
        input_layer_names[j]: input_file_data[j].utterances[i]
        for j in range(len(input_layer_names))
    } for i in range(len(input_file_data[0].utterances))]

    if args.output_layers:
        output_layer_names, output_layer_ports = parse_outputs_from_args(args)
        # If a name of output layer contains a port number then concatenate output_layer_names and output_layer_ports
        if ':' in compiled_model.outputs[0].any_name:
            output_layer_names = [
                f'{output_layer_names[i]}:{output_layer_ports[i]}'
                for i in range(len(output_layer_names))
            ]
    else:
        output_layer_names = [compiled_model.outputs[0].any_name]

    if args.output:
        output_file_names = re.split(', |,', args.output)

        if len(output_layer_names) != len(output_file_names):
            log.error(
                'The number of output files is not equal to the number of model outputs.'
            )
            sys.exit(-6)

    if args.reference:
        reference_file_names = re.split(', |,', args.reference)

        if len(output_layer_names) != len(reference_file_names):
            log.error(
                'The number of reference files is not equal to the number of model outputs.'
            )
            sys.exit(-5)

        reference_file_data = [
            read_utterance_file(file_name)
            for file_name in reference_file_names
        ]

        references = [{
            output_layer_names[j]: reference_file_data[j].utterances[i]
            for j in range(len(output_layer_names))
        } for i in range(len(input_file_data[0].utterances))]

# --------------------------- Step 7. Create infer request ------------------------------------------------------------
    infer_request = compiled_model.create_infer_request()

    # --------------------------- Step 8. Do inference --------------------------------------------------------------------
    log.info('Starting inference in synchronous mode')
    results = []
    total_infer_time = 0

    for i in range(len(infer_data)):
        start_infer_time = default_timer()

        # Reset states between utterance inferences to remove a memory impact
        for state in infer_request.query_state():
            state.reset()

        results.append(
            do_inference(
                infer_data[i],
                infer_request,
                args.context_window_left,
                args.context_window_right,
            ))

        infer_time = default_timer() - start_infer_time
        total_infer_time += infer_time
        num_of_frames = infer_data[i][input_layer_names[0]].shape[0]
        avg_infer_time_per_frame = infer_time / num_of_frames

        # --------------------------- Step 9. Process output ------------------------------------------------------------------
        log.info('')
        log.info(f'Utterance {i}:')
        log.info(f'Total time in Infer (HW and SW): {infer_time * 1000:.2f}ms')
        log.info(f'Frames in utterance: {num_of_frames}')
        log.info(
            f'Average Infer time per frame: {avg_infer_time_per_frame * 1000:.2f}ms'
        )

        for name in output_layer_names:
            log.info('')
            log.info(f'Output blob name: {name}')
            log.info(f'Number scores per frame: {results[i][name].shape[1]}')

            if args.reference:
                log.info('')
                compare_with_reference(results[i][name], references[i][name])

        if args.performance_counter:
            if 'GNA' in args.device:
                total_cycles = infer_request.profiling_info[
                    0].real_time.total_seconds()
                stall_cycles = infer_request.profiling_info[
                    1].real_time.total_seconds()
                active_cycles = total_cycles - stall_cycles
                frequency = 10**6
                if args.arch == 'CORE':
                    frequency *= GNA_CORE_FREQUENCY
                else:
                    frequency *= GNA_ATOM_FREQUENCY
                total_inference_time = total_cycles / frequency
                active_time = active_cycles / frequency
                stall_time = stall_cycles / frequency
                log.info('')
                log.info('Performance Statistics of GNA Hardware')
                log.info(
                    f'   Total Inference Time: {(total_inference_time * 1000):.4f} ms'
                )
                log.info(f'   Active Time: {(active_time * 1000):.4f} ms')
                log.info(f'   Stall Time:  {(stall_time * 1000):.4f} ms')

    log.info('')
    log.info(f'Total sample time: {total_infer_time * 1000:.2f}ms')

    if args.output:
        for i, name in enumerate(output_layer_names):
            data = [
                results[i][name]
                for i in range(len(input_file_data[0].utterances))
            ]
            write_utterance_file(output_file_names[i], input_file_data[0].keys,
                                 data)
            log.info(f'File {output_file_names[i]} was created!')


# ----------------------------------------------------------------------------------------------------------------------
    log.info(
        'This sample is an API example, '
        'for any performance measurements please use the dedicated benchmark_app tool\n'
    )
    return 0
示例#2
0
def main():
    log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
    args = parse_args()

# ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
    log.info('Creating Inference Engine')
    ie = IECore()

# ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation---------------
    if args.model:
        log.info(f'Reading the network: {args.model}')
        # .xml and .bin files
        net = ie.read_network(model=args.model)

# ---------------------------Step 3. Configure input & output----------------------------------------------------------
        log.info('Configuring input and output blobs')
        # Mark layers from args.output_layers as outputs
        if args.output_layers:
            net.add_outputs(get_output_layer_list(net, args, with_ports=True))

        # Get names of input and output blobs
        input_blobs = get_input_layer_list(net, args)
        output_blobs = get_output_layer_list(net, args, with_ports=False)

        # Set input and output precision manually
        for blob_name in input_blobs:
            net.input_info[blob_name].precision = 'FP32'

        for blob_name in output_blobs:
            net.outputs[blob_name].precision = 'FP32'

        net.batch_size = args.batch_size

# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
    devices = args.device.replace('HETERO:', '').split(',')
    plugin_config = {}

    if 'GNA' in args.device:
        gna_device_mode = devices[0] if '_' in devices[0] else 'GNA_AUTO'
        devices[0] = 'GNA'

        plugin_config['GNA_DEVICE_MODE'] = gna_device_mode
        plugin_config['GNA_PRECISION'] = f'I{args.quantization_bits}'

        # Set a GNA scale factor
        if args.import_gna_model:
            if args.scale_factor:
                log.warning(f'Custom scale factor will be used for imported GNA model: {args.import_gna_model}')
                set_scale_factors(plugin_config, parse_scale_factors(args))
            else:
                log.info(f'Using scale factor from the imported GNA model: {args.import_gna_model}')
        else:
            if args.scale_factor:
                set_scale_factors(plugin_config, parse_scale_factors(args))
            else:
                scale_factors = []

                for file_name in re.split(', |,', args.input):
                    first_utterance = next(iter(read_utterance_file(file_name).values()))
                    scale_factors.append(get_scale_factor(first_utterance))

                log.info('Using scale factor(s) calculated from first utterance')
                set_scale_factors(plugin_config, scale_factors)

        if args.export_embedded_gna_model:
            plugin_config['GNA_FIRMWARE_MODEL_IMAGE'] = args.export_embedded_gna_model
            plugin_config['GNA_FIRMWARE_MODEL_IMAGE_GENERATION'] = args.embedded_gna_configuration

        if args.performance_counter:
            plugin_config['PERF_COUNT'] = 'YES'

    device_str = f'HETERO:{",".join(devices)}' if 'HETERO' in args.device else devices[0]

    log.info('Loading the model to the plugin')
    if args.model:
        exec_net = ie.load_network(net, device_str, plugin_config)
    else:
        exec_net = ie.import_network(args.import_gna_model, device_str, plugin_config)
        input_blobs = get_input_layer_list(exec_net, args)
        output_blobs = get_output_layer_list(exec_net, args, with_ports=False)

    if args.input:
        input_files = re.split(', |,', args.input)

        if len(input_blobs) != len(input_files):
            log.error(f'Number of network inputs ({len(input_blobs)}) is not equal '
                      f'to number of ark files ({len(input_files)})')
            sys.exit(-3)

    if args.reference:
        reference_files = re.split(', |,', args.reference)

        if len(output_blobs) != len(reference_files):
            log.error('The number of reference files is not equal to the number of network outputs.')
            sys.exit(-5)

    if args.output:
        output_files = re.split(', |,', args.output)

        if len(output_blobs) != len(output_files):
            log.error('The number of output files is not equal to the number of network outputs.')
            sys.exit(-6)

    if args.export_gna_model:
        log.info(f'Writing GNA Model to {args.export_gna_model}')
        exec_net.export(args.export_gna_model)
        return 0

    if args.export_embedded_gna_model:
        log.info(f'Exported GNA embedded model to file {args.export_embedded_gna_model}')
        log.info(f'GNA embedded model export done for GNA generation {args.embedded_gna_configuration}')
        return 0

# ---------------------------Step 5. Create infer request--------------------------------------------------------------
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
# instance which stores infer requests. So you already created Infer requests in the previous step.

# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
    file_data = [read_utterance_file(file_name) for file_name in input_files]
    input_data = {
        utterance_name: {
            input_blobs[i]: file_data[i][utterance_name] for i in range(len(input_blobs))
        }
        for utterance_name in file_data[0].keys()
    }

    if args.reference:
        references = {output_blobs[i]: read_utterance_file(reference_files[i]) for i in range(len(output_blobs))}

# ---------------------------Step 7. Do inference----------------------------------------------------------------------
    log.info('Starting inference in synchronous mode')
    results = {blob_name: {} for blob_name in output_blobs}
    total_infer_time = 0

    for i, key in enumerate(sorted(input_data)):
        start_infer_time = default_timer()

        # Reset states between utterance inferences to remove a memory impact
        for request in exec_net.requests:
            for state in request.query_state():
                state.reset()

        result = infer_data(input_data[key], exec_net, input_blobs, output_blobs)

        for blob_name in result.keys():
            results[blob_name][key] = result[blob_name]

        infer_time = default_timer() - start_infer_time
        total_infer_time += infer_time
        num_of_frames = file_data[0][key].shape[0]
        avg_infer_time_per_frame = infer_time / num_of_frames

# ---------------------------Step 8. Process output--------------------------------------------------------------------
        log.info('')
        log.info(f'Utterance {i} ({key}):')
        log.info(f'Total time in Infer (HW and SW): {infer_time * 1000:.2f}ms')
        log.info(f'Frames in utterance: {num_of_frames}')
        log.info(f'Average Infer time per frame: {avg_infer_time_per_frame * 1000:.2f}ms')

        for blob_name in output_blobs:
            log.info('')
            log.info(f'Output blob name: {blob_name}')
            log.info(f'Number scores per frame: {results[blob_name][key].shape[1]}')

            if args.reference:
                log.info('')
                compare_with_reference(results[blob_name][key], references[blob_name][key])

        if args.performance_counter:
            if 'GNA' in args.device:
                pc = exec_net.requests[0].get_perf_counts()
                total_cycles = int(pc['1.1 Total scoring time in HW']['real_time'])
                stall_cycles = int(pc['1.2 Stall scoring time in HW']['real_time'])
                active_cycles = total_cycles - stall_cycles
                frequency = 10**6
                if args.arch == 'CORE':
                    frequency *= GNA_CORE_FREQUENCY
                else:
                    frequency *= GNA_ATOM_FREQUENCY
                total_inference_time = total_cycles / frequency
                active_time = active_cycles / frequency
                stall_time = stall_cycles / frequency
                log.info('')
                log.info('Performance Statistics of GNA Hardware')
                log.info(f'   Total Inference Time: {(total_inference_time * 1000):.4f} ms')
                log.info(f'   Active Time: {(active_time * 1000):.4f} ms')
                log.info(f'   Stall Time:  {(stall_time * 1000):.4f} ms')

    log.info('')
    log.info(f'Total sample time: {total_infer_time * 1000:.2f}ms')

    if args.output:
        for i, blob_name in enumerate(results):
            write_utterance_file(output_files[i], results[blob_name])
            log.info(f'File {output_files[i]} was created!')

# ----------------------------------------------------------------------------------------------------------------------
    log.info('This sample is an API example, '
             'for any performance measurements please use the dedicated benchmark_app tool\n')
    return 0
示例#3
0
def main():
    log.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=log.INFO,
                    stream=sys.stdout)
    args = parse_args()

    # ---------------------------Step 1. Initialize inference engine core--------------------------------------------------
    log.info('Creating Inference Engine')
    ie = IECore()

    # ---------------------------Step 2. Read a model in OpenVINO Intermediate Representation---------------
    if args.model:
        log.info(f'Reading the network: {args.model}')
        # .xml and .bin files
        net = ie.read_network(model=args.model)

        # ---------------------------Step 3. Configure input & output----------------------------------------------------------
        log.info('Configuring input and output blobs')
        # Get names of input and output blobs
        if args.input_layers:
            input_blobs = re.split(', |,', args.input_layers)
        else:
            input_blobs = [next(iter(net.input_info))]

        if args.output_layers:
            output_name_port = [
                output.split(':')
                for output in re.split(', |,', args.output_layers)
            ]
            try:
                output_name_port = [(blob_name, int(port))
                                    for blob_name, port in output_name_port]
            except ValueError:
                log.error('Output Parameter does not have a port.')
                sys.exit(-4)

            net.add_outputs(output_name_port)

            output_blobs = [blob_name for blob_name, port in output_name_port]
        else:
            output_blobs = [list(net.outputs.keys())[-1]]

        # Set input and output precision manually
        for blob_name in input_blobs:
            net.input_info[blob_name].precision = 'FP32'

        for blob_name in output_blobs:
            net.outputs[blob_name].precision = 'FP32'

        net.batch_size = args.batch_size

# ---------------------------Step 4. Loading model to the device-------------------------------------------------------
    devices = args.device.replace('HETERO:', '').split(',')
    plugin_config = {}

    if 'GNA' in args.device:
        gna_device_mode = devices[0] if '_' in devices[0] else 'GNA_AUTO'
        devices[0] = 'GNA'

        plugin_config['GNA_DEVICE_MODE'] = gna_device_mode
        plugin_config['GNA_PRECISION'] = f'I{args.quantization_bits}'

        # Get a GNA scale factor
        if args.import_gna_model:
            log.info(
                f'Using scale factor from the imported GNA model: {args.import_gna_model}'
            )
        else:
            utterances = read_utterance_file(args.input.split(',')[0])
            key = sorted(utterances)[0]
            scale_factor = get_scale_factor(utterances[key])
            log.info(
                f'Using scale factor of {scale_factor:.7f} calculated from first utterance.'
            )

            plugin_config['GNA_SCALE_FACTOR'] = str(scale_factor)

        if args.export_embedded_gna_model:
            plugin_config[
                'GNA_FIRMWARE_MODEL_IMAGE'] = args.export_embedded_gna_model
            plugin_config[
                'GNA_FIRMWARE_MODEL_IMAGE_GENERATION'] = args.embedded_gna_configuration

    device_str = f'HETERO:{",".join(devices)}' if 'HETERO' in args.device else devices[
        0]

    log.info('Loading the model to the plugin')
    if args.model:
        exec_net = ie.load_network(net, device_str, plugin_config)
    else:
        exec_net = ie.import_network(args.import_gna_model, device_str,
                                     plugin_config)
        input_blobs = [next(iter(exec_net.input_info))]
        output_blobs = [list(exec_net.outputs.keys())[-1]]

    if args.input:
        input_files = re.split(', |,', args.input)

        if len(input_blobs) != len(input_files):
            log.error(
                f'Number of network inputs ({len(input_blobs)}) is not equal '
                f'to number of ark files ({len(input_files)})')
            sys.exit(-3)

    if args.reference:
        reference_files = re.split(', |,', args.reference)

        if len(output_blobs) != len(reference_files):
            log.error(
                'The number of reference files is not equal to the number of network outputs.'
            )
            sys.exit(-5)

    if args.output:
        output_files = re.split(', |,', args.output)

        if len(output_blobs) != len(output_files):
            log.error(
                'The number of output files is not equal to the number of network outputs.'
            )
            sys.exit(-6)

    if args.export_gna_model:
        log.info(f'Writing GNA Model to {args.export_gna_model}')
        exec_net.export(args.export_gna_model)
        return 0

    if args.export_embedded_gna_model:
        log.info(
            f'Exported GNA embedded model to file {args.export_embedded_gna_model}'
        )
        log.info(
            f'GNA embedded model export done for GNA generation {args.embedded_gna_configuration}'
        )
        return 0

# ---------------------------Step 5. Create infer request--------------------------------------------------------------
# load_network() method of the IECore class with a specified number of requests (default 1) returns an ExecutableNetwork
# instance which stores infer requests. So you already created Infer requests in the previous step.

# ---------------------------Step 6. Prepare input---------------------------------------------------------------------
    file_data = [read_utterance_file(file_name) for file_name in input_files]
    input_data = {
        utterance_name: {
            input_blobs[i]: file_data[i][utterance_name]
            for i in range(len(input_blobs))
        }
        for utterance_name in file_data[0].keys()
    }

    if args.reference:
        references = {
            output_blobs[i]: read_utterance_file(reference_files[i])
            for i in range(len(output_blobs))
        }

# ---------------------------Step 7. Do inference----------------------------------------------------------------------
    log.info('Starting inference in synchronous mode')
    results = {blob_name: {} for blob_name in output_blobs}
    infer_times = []

    for key in sorted(input_data):
        start_infer_time = default_timer()

        # Reset states between utterance inferences to remove a memory impact
        for request in exec_net.requests:
            for state in request.query_state():
                state.reset()

        result = infer_data(input_data[key], exec_net, input_blobs,
                            output_blobs)

        for blob_name in result.keys():
            results[blob_name][key] = result[blob_name]

        infer_times.append(default_timer() - start_infer_time)

# ---------------------------Step 8. Process output--------------------------------------------------------------------
    for blob_name in output_blobs:
        for i, key in enumerate(sorted(results[blob_name])):
            log.info(f'Utterance {i} ({key})')
            log.info(f'Output blob name: {blob_name}')
            log.info(
                f'Frames in utterance: {results[blob_name][key].shape[0]}')
            log.info(
                f'Total time in Infer (HW and SW): {infer_times[i] * 1000:.2f}ms'
            )

            if args.reference:
                compare_with_reference(results[blob_name][key],
                                       references[blob_name][key])

            log.info('')

    log.info(f'Total sample time: {sum(infer_times) * 1000:.2f}ms')

    if args.output:
        for i, blob_name in enumerate(results):
            write_utterance_file(output_files[i], results[blob_name])
            log.info(f'File {output_files[i]} was created!')


# ----------------------------------------------------------------------------------------------------------------------
    log.info(
        'This sample is an API example, '
        'for any performance measurements please use the dedicated benchmark_app tool\n'
    )
    return 0