Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-d',
        '--download_dir',
        type=Path,
        metavar='DIR',
        default=Path.cwd(),
        help='root of the directory tree with downloaded model files')
    parser.add_argument(
        '-o',
        '--output_dir',
        type=Path,
        metavar='DIR',
        help='root of the directory tree to place converted files into')
    parser.add_argument(
        '--name',
        metavar='PAT[,PAT...]',
        help=
        'convert only models whose names match at least one of the specified patterns'
    )
    parser.add_argument(
        '--list',
        type=Path,
        metavar='FILE.LST',
        help=
        'convert only models whose names match at least one of the patterns in the specified file'
    )
    parser.add_argument('--all',
                        action='store_true',
                        help='convert all available models')
    parser.add_argument('--print_all',
                        action='store_true',
                        help='print all available models')
    parser.add_argument(
        '--precisions',
        metavar='PREC[,PREC...]',
        help=
        'run only conversions that produce models with the specified precisions'
    )
    parser.add_argument('-p',
                        '--python',
                        type=Path,
                        metavar='PYTHON',
                        default=sys.executable,
                        help='Python executable to run Model Optimizer with')
    parser.add_argument('--mo',
                        type=Path,
                        metavar='MO.PY',
                        help='Model Optimizer entry point script')
    parser.add_argument('--add_mo_arg',
                        dest='extra_mo_args',
                        metavar='ARG',
                        action='append',
                        help='Extra argument to pass to Model Optimizer')
    parser.add_argument(
        '--dry_run',
        action='store_true',
        help='Print the conversion commands without running them')
    parser.add_argument('-j',
                        '--jobs',
                        type=num_jobs_arg,
                        default=1,
                        help='number of conversions to run concurrently')

    # lux
    parser.add_argument('--model_root',
                        type=Path,
                        default=None,
                        help='path to models folder')

    # aliases for backwards compatibility
    parser.add_argument('--add-mo-arg',
                        dest='extra_mo_args',
                        action='append',
                        help=argparse.SUPPRESS)
    parser.add_argument('--dry-run',
                        action='store_true',
                        help=argparse.SUPPRESS)

    args = parser.parse_args()

    with _common.telemetry_session('Model Converter',
                                   'converter') as telemetry:
        models = _configuration.load_models_from_args(parser, args)
        for mode in ['all', 'list', 'name']:
            if getattr(args, mode):
                telemetry.send_event('md', 'converter_selection_mode', mode)

        if args.precisions is None:
            requested_precisions = _common.KNOWN_PRECISIONS
        else:
            requested_precisions = set(args.precisions.split(','))

        for model in models:
            precisions_to_send = requested_precisions if args.precisions else requested_precisions & model.precisions
            model_information = {
                'name': model.name,
                'framework': model.framework,
                'precisions': str(precisions_to_send).replace(',', ';'),
            }
            telemetry.send_event('md', 'converter_model',
                                 json.dumps(model_information))

        unknown_precisions = requested_precisions - _common.KNOWN_PRECISIONS
        if unknown_precisions:
            sys.exit('Unknown precisions specified: {}.'.format(', '.join(
                sorted(unknown_precisions))))

        mo_path = args.mo

        if mo_path is None:
            mo_package_path = _common.get_package_path(args.python, 'mo')

            if mo_package_path:
                # run MO as a module
                mo_cmd_prefix = [str(args.python), '-m', 'mo']
                mo_dir = mo_package_path.parent
            else:
                try:
                    mo_path = Path(os.environ['INTEL_OPENVINO_DIR']
                                   ) / 'deployment_tools/model_optimizer/mo.py'
                except KeyError:
                    sys.exit(
                        'Unable to locate Model Optimizer. ' +
                        'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.'
                    )

        if mo_path is not None:
            # run MO as a script
            mo_cmd_prefix = [str(args.python), '--', str(mo_path)]
            mo_dir = mo_path.parent

        output_dir = args.download_dir if args.output_dir is None else args.output_dir

        reporter = _reporting.Reporter(_reporting.DirectOutputContext())
        mo_props = ModelOptimizerProperties(
            cmd_prefix=mo_cmd_prefix,
            extra_args=args.extra_mo_args or [],
            base_dir=mo_dir,
        )
        shared_convert_args = (output_dir, args, mo_props,
                               requested_precisions)

        if args.jobs == 1 or args.dry_run:
            results = [
                convert(reporter, model, *shared_convert_args)
                for model in models
            ]
        else:
            results = _concurrency.run_in_parallel(
                args.jobs, lambda context, model: convert(
                    _reporting.Reporter(context), model, *shared_convert_args),
                models)

        failed_models = [
            model.name for model, successful in zip(models, results)
            if not successful
        ]

        if failed_models:
            reporter.print('FAILED:')
            for failed_model_name in failed_models:
                reporter.print(failed_model_name)
            sys.exit(1)
Пример #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_dir', type=Path, metavar='DIR',
        default=Path.cwd(), help='root of the directory tree with the full precision model files')
    parser.add_argument('--dataset_dir', type=Path, help='root of the dataset directory tree')
    parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
        help='root of the directory tree to place quantized model files into')
    parser.add_argument('--name', metavar='PAT[,PAT...]',
        help='quantize only models whose names match at least one of the specified patterns')
    parser.add_argument('--list', type=Path, metavar='FILE.LST',
        help='quantize only models whose names match at least one of the patterns in the specified file')
    parser.add_argument('--all', action='store_true', help='quantize all available models')
    parser.add_argument('--print_all', action='store_true', help='print all available models')
    parser.add_argument('-p', '--python', type=Path, metavar='PYTHON', default=sys.executable,
        help='Python executable to run Post-Training Optimization Toolkit with')
    parser.add_argument('--pot', type=Path, help='Post-Training Optimization Toolkit entry point script')
    parser.add_argument('--dry_run', action='store_true',
        help='print the quantization commands without running them')
    parser.add_argument('--precisions', metavar='PREC[,PREC...]',
        help='quantize only to the specified precisions')
    parser.add_argument('--target_device', help='target device for the quantized model')
    args = parser.parse_args()

    with _common.telemetry_session('Model Quantizer', 'quantizer') as telemetry:
        models = _configuration.load_models_from_args(parser, args)
        for mode in ['all', 'list', 'name']:
            if getattr(args, mode):
                telemetry.send_event('md', 'quantizer_selection_mode', mode)

        if args.precisions is None:
            requested_precisions = _common.KNOWN_QUANTIZED_PRECISIONS.keys()
        else:
            requested_precisions = set(args.precisions.split(','))

        for model in models:
            model_information = {
                'name': model.name,
                'framework': model.framework,
                'precisions': str(requested_precisions).replace(',', ';'),
            }
            telemetry.send_event('md', 'quantizer_model', json.dumps(model_information))

        unknown_precisions = requested_precisions - _common.KNOWN_QUANTIZED_PRECISIONS.keys()
        if unknown_precisions:
            sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))

        pot_path = args.pot
        if pot_path is None:
            if _common.get_package_path(args.python, 'pot'):
                # run POT as a module
                pot_cmd_prefix = [str(args.python), '-m', 'pot']
            else:
                try:
                    pot_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/tools/post_training_optimization_toolkit/main.py'
                except KeyError:
                    sys.exit('Unable to locate Post-Training Optimization Toolkit. '
                        + 'Use --pot or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')

        if pot_path is not None:
            # run POT as a script
            pot_cmd_prefix = [str(args.python), '--', str(pot_path)]

        # We can't mark it as required, because it's not required when --print_all is specified.
        # So we have to check it manually.
        if not args.dataset_dir:
            sys.exit('--dataset_dir must be specified.')

        reporter = _reporting.Reporter(_reporting.DirectOutputContext())

        output_dir = args.output_dir or args.model_dir

        failed_models = []

        with tempfile.TemporaryDirectory() as temp_dir:
            annotation_dir = Path(temp_dir) / 'annotations'
            annotation_dir.mkdir()

            pot_env = {
                'ANNOTATIONS_DIR': str(annotation_dir),
                'DATA_DIR': str(args.dataset_dir),
                'DEFINITIONS_FILE': str(_common.DATASET_DEFINITIONS),
            }

            for model in models:
                if not model.quantization_output_precisions:
                    reporter.print_section_heading('Skipping {} (quantization not supported)', model.name)
                    reporter.print()
                    continue

                model_precisions = requested_precisions & model.quantization_output_precisions

                if not model_precisions:
                    reporter.print_section_heading('Skipping {} (all precisions skipped)', model.name)
                    reporter.print()
                    continue

                pot_env.update({
                    'MODELS_DIR': str(args.model_dir / model.subdirectory)
                })

                for precision in sorted(model_precisions):
                    if not quantize(reporter, model, precision, args, output_dir, pot_cmd_prefix, pot_env):
                        failed_models.append(model.name)
                        break


        if failed_models:
            reporter.print('FAILED:')
            for failed_model_name in failed_models:
                reporter.print(failed_model_name)
            sys.exit(1)
Пример #3
0
def main():
    parser = DownloaderArgumentParser()
    parser.add_argument(
        '--name',
        metavar='PAT[,PAT...]',
        help=
        'download only models whose names match at least one of the specified patterns'
    )
    parser.add_argument(
        '--list',
        type=Path,
        metavar='FILE.LST',
        help=
        'download only models whose names match at least one of the patterns in the specified file'
    )
    parser.add_argument('--all',
                        action='store_true',
                        help='download all available models')
    parser.add_argument('--print_all',
                        action='store_true',
                        help='print all available models')
    parser.add_argument(
        '--precisions',
        metavar='PREC[,PREC...]',
        help=
        'download only models with the specified precisions (actual for DLDT networks); specify one or more of: '
        + ','.join(_common.KNOWN_PRECISIONS))
    parser.add_argument('-o',
                        '--output_dir',
                        type=Path,
                        metavar='DIR',
                        default=Path.cwd(),
                        help='path where to save models')
    parser.add_argument(
        '--cache_dir',
        type=Path,
        metavar='DIR',
        help='directory to use as a cache for downloaded files')
    parser.add_argument('--num_attempts',
                        type=positive_int_arg,
                        metavar='N',
                        default=1,
                        help='attempt each download up to N times')
    parser.add_argument('--progress_format',
                        choices=('text', 'json'),
                        default='text',
                        help='which format to use for progress reporting')
    # unlike Model Converter, -jauto is not supported here, because CPU count has no
    # relation to the optimal number of concurrent downloads
    parser.add_argument('-j',
                        '--jobs',
                        type=positive_int_arg,
                        metavar='N',
                        default=1,
                        help='how many downloads to perform concurrently')

    # lux
    parser.add_argument('--model_root',
                        type=Path,
                        default=None,
                        help='path to models folder')

    args = parser.parse_args()

    def make_reporter(context):
        return _reporting.Reporter(
            context,
            enable_human_output=args.progress_format == 'text',
            enable_json_output=args.progress_format == 'json')

    reporter = make_reporter(_reporting.DirectOutputContext())
    cache = NullCache() if args.cache_dir is None else DirCache(args.cache_dir)

    with _common.telemetry_session('Model Downloader',
                                   'downloader') as telemetry:
        models = _configuration.load_models_from_args(parser, args)
        for mode in ['all', 'list', 'name']:
            if getattr(args, mode):
                telemetry.send_event('md', 'downloader_selection_mode', mode)

        if args.precisions is None:
            requested_precisions = _common.KNOWN_PRECISIONS
        else:
            requested_precisions = set(args.precisions.split(','))

        for model in models:
            precisions_to_send = requested_precisions if args.precisions else requested_precisions & model.precisions
            model_information = {
                'name': model.name,
                'framework': model.framework,
                'precisions': str(precisions_to_send).replace(',', ';'),
            }
            telemetry.send_event('md', 'downloader_model',
                                 json.dumps(model_information))

        failed_models = set()

        unknown_precisions = requested_precisions - _common.KNOWN_PRECISIONS
        if unknown_precisions:
            sys.exit('Unknown precisions specified: {}.'.format(', '.join(
                sorted(unknown_precisions))))

        with contextlib.ExitStack() as exit_stack:
            session_factory = ThreadSessionFactory(exit_stack)
            if args.jobs == 1:
                results = [
                    download_model(reporter, args, cache, session_factory,
                                   requested_precisions, model)
                    for model in models
                ]
            else:
                results = _concurrency.run_in_parallel(
                    args.jobs, lambda context, model: download_model(
                        make_reporter(context), args, cache, session_factory,
                        requested_precisions, model), models)

        failed_models = {
            model.name
            for model, successful in zip(models, results) if not successful
        }

        if failed_models:
            reporter.print('FAILED:')
            for failed_model_name in failed_models:
                reporter.print(failed_model_name)
            sys.exit(1)