Example #1
0
def app(argv):
    telemetry = start_session_telemetry()
    parser = get_common_argument_parser()
    args = parser.parse_args(args=argv)
    check_dependencies(args)
    if not args.config:
        _update_config_path(args)

    config = Config.read_config(args.config)
    config.configure_params(args.ac_config)
    config.update_from_args(args)

    if config.engine.type == 'simplified' and args.evaluate:
        raise Exception('Can not make evaluation in simplified mode')

    log_dir = _create_log_path(config)
    init_logger(level=args.log_level,
                file_name=os.path.join(log_dir, 'log.txt'),
                progress_bar=args.pbar)
    logger.info('Output log dir: {}'.format(log_dir))

    metrics = optimize(config)
    if metrics and logger.progress_bar_disabled:
        for name, value in metrics.items():
            logger.info('{: <27s}: {}'.format(name, value))
    end_session_telemetry(telemetry)
Example #2
0
def main():
    args = parse_args()
    out_dir = os.path.expanduser(args.output)
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    init_logger(level=args.log_level,
                file_name=os.path.join(out_dir, 'log.txt'))
    compressed_model = optimize_model(args)
    save_model(compressed_model, out_dir)
def main():
    stimestamp = get_timestamp()
    parser = ArgumentParser(
        description='Tool for making series of experiments',
        allow_abbrev=False)
    parser.add_argument('-c',
                        '--config',
                        required=True,
                        help='Path to a config file')
    parser.add_argument(
        '-o',
        '--output-dir',
        default='.',
        help='Path to a folder to store the results, default is ".".')

    log_levels = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']
    parser.add_argument('--log-level',
                        type=str,
                        default='INFO',
                        choices=log_levels,
                        help='Log level to print: {}'.format(log_levels))

    parser.add_argument(
        '--dry-run',
        action='store_true',
        help='If everything excepth actual run of experiment should be done')

    args = parser.parse_args()
    dry_run = args.dry_run

    init_logger(level=args.log_level, stream=sys.stdout)

    config_path = Path(args.config)
    root_dir = args.output_dir
    root_dir = Path(root_dir)
    series_dir = root_dir / ('series_experiments_' + stimestamp)
    series_dir = series_dir.absolute()

    config = read_config_from_file(config_path)

    experiment_template_path = Path(config['experiment_template']).absolute()
    if not experiment_template_path.exists():
        raise RuntimeError('Cannot find experiment template "{}"'.format(
            experiment_template_path))

    experiment_params, num_experiments = _parse_experiment_params(
        config['params'])

    _run_experiments(experiment_template_path=experiment_template_path,
                     experiment_params=experiment_params,
                     num_experiments=num_experiments,
                     series_dir=series_dir,
                     stimestamp=stimestamp,
                     root_dir=root_dir,
                     dry_run=dry_run)
Example #4
0
def test_benchmark(model=None, cfg=None):
    init_logger(level='INFO')
    if cfg:
        set_benchmark_config(cfg)
    if model:
        benchmark_embedded(model=model)
        return

    path_to_model_file = str(REFERENCE_MODELS_PATH)
    logger.info('Benchmark test with {}'.format(path_to_model_file))

    cfg = {'nireq': 0}
    set_benchmark_config(cfg)
    benchmark_embedded(model=None, mf=path_to_model_file, duration_seconds=1)

    cfg = {'nireq': 0, 'benchmark_app_dir': ""}
    set_benchmark_config(cfg)
    benchmark_embedded(model=None, mf=path_to_model_file, duration_seconds=1)

    cfg = {'nireq': 0, 'benchmark_app_dir': "wrong_benchmark_dir"}
    set_benchmark_config(cfg)
    benchmark_embedded(model=None, mf=path_to_model_file, duration_seconds=1)
Example #5
0
def app(argv):
    telemetry = start_session_telemetry()
    parser = get_common_argument_parser()
    args = parser.parse_args(args=argv)
    check_dependencies(args)
    if not args.config:
        _update_config_path(args)

    config = Config.read_config(args.config)

    if args.engine:
        config.engine[
            'type'] = args.engine if args.engine else 'accuracy_checker'
    if 'data_source' not in config.engine:
        if args.data_source is None and config.engine.type == 'data_free':
            args.data_source = 'pot_dataset'
        config.engine['data_source'] = args.data_source

    config.configure_params(args.ac_config)
    config.update_from_args(args)

    if config.engine.type != 'accuracy_checker' and args.evaluate:
        raise Exception(
            'Can not make evaluation in simplified or data_free mode')

    log_dir = _create_log_path(config)
    init_logger(level=args.log_level,
                file_name=os.path.join(log_dir, 'log.txt'),
                progress_bar=args.pbar)
    logger.info('Output log dir: {}'.format(log_dir))

    metrics = optimize(config)
    if metrics and logger.progress_bar_disabled:
        for name, value in metrics.items():
            logger.info('{: <27s}: {}'.format(name, value))
    end_session_telemetry(telemetry)
Example #6
0
from time import time

from addict import Dict
import cv2
import numpy as np

from openvino.tools.pot.api import Metric, DataLoader
from openvino.tools.pot.engines.ie_engine import IEEngine
from openvino.tools.pot.graph import load_model
from openvino.tools.pot.graph.model_utils import compress_model_weights, add_outputs
from openvino.tools.pot.pipeline.initializer import create_pipeline
from openvino.tools.pot.utils.logger import init_logger, get_logger
from openvino.tools.pot.api.samples.face_detection import utils

# Initialize the logger to print the quantization process in the console.
init_logger(level='INFO')
logger = get_logger(__name__)


# Custom DataLoader class implementation that is required for
# the proper reading of WIDER FACE images and annotations.
class WiderFaceLoader(DataLoader):

    # Required methods:
    def __init__(self, config):
        if not isinstance(config, Dict):
            config = Dict(config)
        super().__init__(config)
        self._min_height_ann = 60
        self._img_ids, self._annotations = self._read_image_ids_annotations(
            config.annotation_file)
Example #7
0
# Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import sys
import logging
from pathlib import Path
from argparse import ArgumentParser

from openvino.tools.pot.utils.ac_imports import create_model_evaluator
from openvino.tools.pot.configs.config import Config
from openvino.tools.pot.utils.logger import init_logger, get_logger


init_logger(level=logging.DEBUG)
logger = get_logger(__name__)


def parse_args(argv):
    """Parse and process arguments for evaluation"""
    parser = ArgumentParser(description='Accuracy evaluation tool', allow_abbrev=False)
    parser.add_argument(
        '-c',
        '--config',
        help='Path to a config file with model-specific parameters',
        required=True)
    parser.add_argument(
        '-m',
        '--compressed_model',
        help='Path to a compressed model (.xml)',
        required=False)
    parser.add_argument(