Exemple #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-c',
                        '--config',
                        type=Path,
                        metavar='CONFIG.YML',
                        help='model configuration file (deprecated)')
    parser.add_argument(
        '--name',
        metavar='PAT[,PAT...]',
        help=
        'only dump info for models whose names match at least one of the specified patterns'
    )
    parser.add_argument(
        '--list',
        type=Path,
        metavar='FILE.LST',
        help=
        'only dump info for models whose names match at least one of the patterns in the specified file'
    )
    parser.add_argument('--all',
                        action='store_true',
                        help='dump info for all available models')
    parser.add_argument('--print_all',
                        action='store_true',
                        help='print all available models')
    args = parser.parse_args()

    models = common.load_models_from_args(parser, args)

    json.dump(list(map(to_info, models)), sys.stdout, indent=4)
    print()  # add a final newline
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config', type=Path, metavar='CONFIG.YML',
        help='model configuration file (deprecated)')
    parser.add_argument('-d', '--download_dir', type=Path, metavar='DIR',
        default=Path.cwd(), help='root of the directory tree with downloaded model files')
    parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
        help='root of the directory tree to place converted files into')
    parser.add_argument('--name', metavar='PAT[,PAT...]',
        help='convert only models whose names match at least one of the specified patterns')
    parser.add_argument('--list', type=Path, metavar='FILE.LST',
        help='convert only models whose names match at least one of the patterns in the specified file')
    parser.add_argument('--all', action='store_true', help='convert all available models')
    parser.add_argument('--print_all', action='store_true', help='print all available models')
    parser.add_argument('--precisions', metavar='PREC[,PREC...]',
        help='run only conversions that produce models with the specified precisions')
    parser.add_argument('-p', '--python', type=Path, metavar='PYTHON', default=sys.executable,
        help='Python executable to run Model Optimizer with')
    parser.add_argument('--mo', type=Path, metavar='MO.PY',
        help='Model Optimizer entry point script')
    parser.add_argument('--add-mo-arg', dest='extra_mo_args', metavar='ARG', action='append',
        help='Extra argument to pass to Model Optimizer')
    parser.add_argument('--dry-run', action='store_true',
        help='Print the conversion commands without running them')
    parser.add_argument('-j', '--jobs', type=num_jobs_arg, default=1,
        help='number of conversions to run concurrently')

    parser.add_argument('--model_root', type=Path, default=None, help='path to models folder')
    
    args = parser.parse_args()

    mo_path = args.mo
    if mo_path is None:
        try:
            mo_path = Path(os.environ['INTEL_OPENVINO_DIR']) / 'deployment_tools/model_optimizer/mo.py'
        except KeyError:
            sys.exit('Unable to locate Model Optimizer. '
                + 'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.')

    extra_mo_args = args.extra_mo_args or []

    if args.precisions is None:
        requested_precisions = common.KNOWN_PRECISIONS
    else:
        requested_precisions = set(args.precisions.split(','))
        unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS
        if unknown_precisions:
            sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))

    models = common.load_models_from_args(parser, args)

    output_dir = args.download_dir if args.output_dir is None else args.output_dir

    def convert(context, model):
        if model.mo_args is None:
            context.printf('========= Skipping {} (no conversions defined)', model.name)
            context.printf('')
            return True

        model_precisions = requested_precisions & model.precisions
        if not model_precisions:
            context.printf('========= Skipping {} (all conversions skipped)', model.name)
            context.printf('')
            return True

        model_format = model.framework

        if model.conversion_to_onnx_args:
            if not convert_to_onnx(context, model, output_dir, args):
                return False
            model_format = 'onnx'

        expanded_mo_args = [
            string.Template(arg).substitute(dl_dir=args.download_dir / model.subdirectory,
                                            mo_dir=mo_path.parent,
                                            conv_dir=output_dir / model.subdirectory)
            for arg in model.mo_args]

        for model_precision in sorted(model_precisions):
            mo_cmd = [str(args.python), '--', str(mo_path),
                '--framework={}'.format(model_format),
                '--data_type={}'.format(model_precision),
                '--output_dir={}'.format(output_dir / model.subdirectory / model_precision),
                '--model_name={}'.format(model.name),
                *expanded_mo_args, *extra_mo_args]

            context.printf('========= {}Converting {} to IR ({})',
                '(DRY RUN) ' if args.dry_run else '', model.name, model_precision)

            context.printf('Conversion command: {}', ' '.join(map(quote_arg, mo_cmd)))

            if not args.dry_run:
                context.printf('', flush=True)

                if not context.subprocess(mo_cmd):
                    return False

            context.printf('')

        return True

    if args.jobs == 1 or args.dry_run:
        context = DirectOutputContext()
        results = [convert(context, model) for model in models]
    else:
        with concurrent.futures.ThreadPoolExecutor(args.jobs) as executor:
            def start(model):
                output_queue = queue.Queue()
                return JobWithQueuedOutput(
                    output_queue,
                    executor.submit(convert, QueuedOutputContext(output_queue), model))

            jobs = list(map(start, models))

            try:
                results = [job.complete() for job in jobs]
            except:
                for job in jobs: job.cancel()
                raise

    failed_models = [model.name for model, successful in zip(models, results) if not successful]

    if failed_models:
        print('FAILED:')
        print(*sorted(failed_models), sep='\n')
        sys.exit(1)
Exemple #3
0
def main():
    parser = DownloaderArgumentParser()
    parser.add_argument('-c', '--config', type=Path, metavar='CONFIG.YML',
        help='model configuration file (deprecated)')
    parser.add_argument('--name', metavar='PAT[,PAT...]',
        help='download only models whose names match at least one of the specified patterns')
    parser.add_argument('--list', type=Path, metavar='FILE.LST',
        help='download only models whose names match at least one of the patterns in the specified file')
    parser.add_argument('--all',  action='store_true', help='download all available models')
    parser.add_argument('--print_all', action='store_true', help='print all available models')
    parser.add_argument('--precisions', metavar='PREC[,PREC...]',
                        help='download only models with the specified precisions (actual for DLDT networks)')
    parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
        default=Path.cwd(), help='path where to save models')
    parser.add_argument('--cache_dir', type=Path, metavar='DIR',
        help='directory to use as a cache for downloaded files')
    parser.add_argument('--num_attempts', type=positive_int_arg, metavar='N', default=1,
        help='attempt each download up to N times')
    parser.add_argument('--progress_format', choices=('text', 'json'), default='text',
        help='which format to use for progress reporting')

    parser.add_argument('--model_root', type=Path, default=None, help='path to models folder')

    args = parser.parse_args()

    reporter = common.Reporter(
        enable_human_output=args.progress_format == 'text',
        enable_json_output=args.progress_format == 'json')

    cache = NullCache() if args.cache_dir is None else DirCache(args.cache_dir)
    models = common.load_models_from_args(parser, args)

    failed_models = set()

    if args.precisions is None:
        requested_precisions = common.KNOWN_PRECISIONS
    else:
        requested_precisions = set(args.precisions.split(','))
        unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS
        if unknown_precisions:
            sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))

    reporter.print_group_heading('Downloading models')
    with requests.Session() as session:
        session.mount('file://', LocalFileAdapter())
        for model in models:
            reporter.emit_event('model_download_begin', model=model.name, num_files=len(model.files))

            output = args.output_dir / model.subdirectory
            output.mkdir(parents=True, exist_ok=True)

            for model_file in model.files:
                if len(model_file.name.parts) == 2:
                    p = model_file.name.parts[0]
                    if p in common.KNOWN_PRECISIONS and p not in requested_precisions:
                        continue

                model_file_reporter = reporter.with_event_context(model=model.name, model_file=model_file.name.as_posix())
                model_file_reporter.emit_event('model_file_download_begin', size=model_file.size)

                destination = output / model_file.name

                if not try_retrieve(model_file_reporter, model.name, destination, model_file, cache, args.num_attempts,
                        lambda: model_file.source.start_download(session, CHUNK_SIZE)):
                    shutil.rmtree(str(output))
                    failed_models.add(model.name)
                    model_file_reporter.emit_event('model_file_download_end', successful=False)
                    reporter.emit_event('model_download_end', model=model.name, successful=False)
                    break

                model_file_reporter.emit_event('model_file_download_end', successful=True)
            else:
                reporter.emit_event('model_download_end', model=model.name, successful=True)

    reporter.print_group_heading('Post-processing')
    for model in models:
        if model.name in failed_models or not model.postprocessing: continue

        reporter.emit_event('model_postprocessing_begin', model=model.name)

        output = args.output_dir / model.subdirectory

        for postproc in model.postprocessing:
            postproc.apply(reporter, output)

        reporter.emit_event('model_postprocessing_end', model=model.name)

    if failed_models:
        reporter.print('FAILED:')
        for failed_model_name in failed_models:
            reporter.print(failed_model_name)
        sys.exit(1)
Exemple #4
0
def main():
    parser = DownloaderArgumentParser()
    parser.add_argument(
        '--name',
        metavar='PAT[,PAT...]',
        help=
        'download only models whose names match at least one of the specified patterns'
    )
    parser.add_argument(
        '--list',
        type=Path,
        metavar='FILE.LST',
        help=
        'download only models whose names match at least one of the patterns in the specified file'
    )
    parser.add_argument('--all',
                        action='store_true',
                        help='download all available models')
    parser.add_argument('--print_all',
                        action='store_true',
                        help='print all available models')
    parser.add_argument(
        '--precisions',
        metavar='PREC[,PREC...]',
        help=
        'download only models with the specified precisions (actual for DLDT networks)'
    )
    parser.add_argument('-o',
                        '--output_dir',
                        type=Path,
                        metavar='DIR',
                        default=Path.cwd(),
                        help='path where to save models')
    parser.add_argument(
        '--cache_dir',
        type=Path,
        metavar='DIR',
        help='directory to use as a cache for downloaded files')
    parser.add_argument('--num_attempts',
                        type=positive_int_arg,
                        metavar='N',
                        default=1,
                        help='attempt each download up to N times')
    parser.add_argument('--progress_format',
                        choices=('text', 'json'),
                        default='text',
                        help='which format to use for progress reporting')
    # unlike Model Converter, -jauto is not supported here, because CPU count has no
    # relation to the optimal number of concurrent downloads
    parser.add_argument('-j',
                        '--jobs',
                        type=positive_int_arg,
                        metavar='N',
                        default=1,
                        help='how many downloads to perform concurrently')

    args = parser.parse_args()

    def make_reporter(context):
        return common.Reporter(
            context,
            enable_human_output=args.progress_format == 'text',
            enable_json_output=args.progress_format == 'json')

    reporter = make_reporter(common.DirectOutputContext())

    cache = NullCache() if args.cache_dir is None else DirCache(args.cache_dir)
    models = common.load_models_from_args(parser, args)

    failed_models = set()

    if args.precisions is None:
        requested_precisions = common.KNOWN_PRECISIONS
    else:
        requested_precisions = set(args.precisions.split(','))
        unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS
        if unknown_precisions:
            sys.exit('Unknown precisions specified: {}.'.format(', '.join(
                sorted(unknown_precisions))))

    reporter.print_group_heading('Downloading models')
    with contextlib.ExitStack() as exit_stack:
        session_factory = ThreadSessionFactory(exit_stack)
        if args.jobs == 1:
            results = [
                download_model(reporter, args, cache, session_factory,
                               requested_precisions, model) for model in models
            ]
        else:
            results = common.run_in_parallel(
                args.jobs, lambda context, model: download_model(
                    make_reporter(context), args, cache, session_factory,
                    requested_precisions, model), models)

    failed_models = {
        model.name
        for model, successful in zip(models, results) if not successful
    }

    reporter.print_group_heading('Post-processing')
    for model in models:
        if model.name in failed_models or not model.postprocessing: continue

        reporter.emit_event('model_postprocessing_begin', model=model.name)

        output = args.output_dir / model.subdirectory

        for postproc in model.postprocessing:
            postproc.apply(reporter, output)

        reporter.emit_event('model_postprocessing_end', model=model.name)

    if failed_models:
        reporter.print('FAILED:')
        for failed_model_name in failed_models:
            reporter.print(failed_model_name)
        sys.exit(1)
Exemple #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-d',
        '--download_dir',
        type=Path,
        metavar='DIR',
        default=Path.cwd(),
        help='root of the directory tree with downloaded model files')
    parser.add_argument(
        '-o',
        '--output_dir',
        type=Path,
        metavar='DIR',
        help='root of the directory tree to place converted files into')
    parser.add_argument(
        '--name',
        metavar='PAT[,PAT...]',
        help=
        'convert only models whose names match at least one of the specified patterns'
    )
    parser.add_argument(
        '--list',
        type=Path,
        metavar='FILE.LST',
        help=
        'convert only models whose names match at least one of the patterns in the specified file'
    )
    parser.add_argument('--all',
                        action='store_true',
                        help='convert all available models')
    parser.add_argument('--print_all',
                        action='store_true',
                        help='print all available models')
    parser.add_argument(
        '--precisions',
        metavar='PREC[,PREC...]',
        help=
        'run only conversions that produce models with the specified precisions'
    )
    parser.add_argument('-p',
                        '--python',
                        type=Path,
                        metavar='PYTHON',
                        default=sys.executable,
                        help='Python executable to run Model Optimizer with')
    parser.add_argument('--mo',
                        type=Path,
                        metavar='MO.PY',
                        help='Model Optimizer entry point script')
    parser.add_argument('--add_mo_arg',
                        dest='extra_mo_args',
                        metavar='ARG',
                        action='append',
                        help='Extra argument to pass to Model Optimizer')
    parser.add_argument(
        '--dry_run',
        action='store_true',
        help='Print the conversion commands without running them')
    parser.add_argument('-j',
                        '--jobs',
                        type=num_jobs_arg,
                        default=1,
                        help='number of conversions to run concurrently')

    # aliases for backwards compatibility
    parser.add_argument('--add-mo-arg',
                        dest='extra_mo_args',
                        action='append',
                        help=argparse.SUPPRESS)
    parser.add_argument('--dry-run',
                        action='store_true',
                        help=argparse.SUPPRESS)

    args = parser.parse_args()

    mo_path = args.mo
    if mo_path is None:
        try:
            mo_path = Path(os.environ['INTEL_OPENVINO_DIR']
                           ) / 'deployment_tools/model_optimizer/mo.py'
        except KeyError:
            sys.exit(
                'Unable to locate Model Optimizer. ' +
                'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.'
            )

    extra_mo_args = args.extra_mo_args or []

    if args.precisions is None:
        requested_precisions = common.KNOWN_PRECISIONS
    else:
        requested_precisions = set(args.precisions.split(','))
        unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS
        if unknown_precisions:
            sys.exit('Unknown precisions specified: {}.'.format(', '.join(
                sorted(unknown_precisions))))

    models = common.load_models_from_args(parser, args)

    output_dir = args.download_dir if args.output_dir is None else args.output_dir

    def convert(reporter, model):
        if model.mo_args is None:
            reporter.print_section_heading(
                'Skipping {} (no conversions defined)', model.name)
            reporter.print()
            return True

        model_precisions = requested_precisions & model.precisions
        if not model_precisions:
            reporter.print_section_heading(
                'Skipping {} (all conversions skipped)', model.name)
            reporter.print()
            return True

        model_format = model.framework

        if model.conversion_to_onnx_args:
            if not convert_to_onnx(reporter, model, output_dir, args):
                return False
            model_format = 'onnx'

        expanded_mo_args = [
            string.Template(arg).substitute(
                dl_dir=args.download_dir / model.subdirectory,
                mo_dir=mo_path.parent,
                conv_dir=output_dir / model.subdirectory,
                config_dir=common.MODEL_ROOT / model.subdirectory)
            for arg in model.mo_args
        ]

        for model_precision in sorted(model_precisions):
            mo_cmd = [
                str(args.python), '--',
                str(mo_path), '--framework={}'.format(model_format),
                '--data_type={}'.format(model_precision),
                '--output_dir={}'.format(output_dir / model.subdirectory /
                                         model_precision),
                '--model_name={}'.format(model.name), *expanded_mo_args,
                *extra_mo_args
            ]

            reporter.print_section_heading(
                '{}Converting {} to IR ({})',
                '(DRY RUN) ' if args.dry_run else '', model.name,
                model_precision)

            reporter.print('Conversion command: {}',
                           common.command_string(mo_cmd))

            if not args.dry_run:
                reporter.print(flush=True)

                if not reporter.job_context.subprocess(mo_cmd):
                    return False

            reporter.print()

        return True

    reporter = common.Reporter(common.DirectOutputContext())

    if args.jobs == 1 or args.dry_run:
        results = [convert(reporter, model) for model in models]
    else:
        results = common.run_in_parallel(
            args.jobs,
            lambda context, model: convert(common.Reporter(context), model),
            models)

    failed_models = [
        model.name for model, successful in zip(models, results)
        if not successful
    ]

    if failed_models:
        reporter.print('FAILED:')
        for failed_model_name in failed_models:
            reporter.print(failed_model_name)
        sys.exit(1)
Exemple #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--model_dir',
        type=Path,
        metavar='DIR',
        default=Path.cwd(),
        help='root of the directory tree with the full precision model files')
    parser.add_argument('--dataset_dir',
                        type=Path,
                        help='root of the dataset directory tree')
    parser.add_argument(
        '-o',
        '--output_dir',
        type=Path,
        metavar='DIR',
        help='root of the directory tree to place quantized model files into')
    parser.add_argument(
        '--name',
        metavar='PAT[,PAT...]',
        help=
        'quantize only models whose names match at least one of the specified patterns'
    )
    parser.add_argument(
        '--list',
        type=Path,
        metavar='FILE.LST',
        help=
        'quantize only models whose names match at least one of the patterns in the specified file'
    )
    parser.add_argument('--all',
                        action='store_true',
                        help='quantize all available models')
    parser.add_argument('--print_all',
                        action='store_true',
                        help='print all available models')
    parser.add_argument(
        '-p',
        '--python',
        type=Path,
        metavar='PYTHON',
        default=sys.executable,
        help='Python executable to run Post-Training Optimization Toolkit with'
    )
    parser.add_argument(
        '--pot',
        type=Path,
        help='Post-Training Optimization Toolkit entry point script')
    parser.add_argument(
        '--dry_run',
        action='store_true',
        help='print the quantization commands without running them')
    parser.add_argument('--precisions',
                        metavar='PREC[,PREC...]',
                        help='quantize only to the specified precisions')
    parser.add_argument('--target_device',
                        help='target device for the quantized model')
    args = parser.parse_args()

    pot_path = args.pot
    if pot_path is None:
        try:
            pot_path = Path(
                os.environ['INTEL_OPENVINO_DIR']
            ) / 'deployment_tools/tools/post_training_optimization_toolkit/main.py'
        except KeyError:
            sys.exit(
                'Unable to locate Post-Training Optimization Toolkit. ' +
                'Use --pot or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.'
            )

    models = common.load_models_from_args(parser, args)

    # We can't mark it as required, because it's not required when --print_all is specified.
    # So we have to check it manually.
    if not args.dataset_dir:
        sys.exit('--dataset_dir must be specified.')

    if args.precisions is None:
        requested_precisions = common.KNOWN_QUANTIZED_PRECISIONS.keys()
    else:
        requested_precisions = set(args.precisions.split(','))
        unknown_precisions = requested_precisions - common.KNOWN_QUANTIZED_PRECISIONS.keys(
        )
        if unknown_precisions:
            sys.exit('Unknown precisions specified: {}.'.format(', '.join(
                sorted(unknown_precisions))))

    reporter = common.Reporter(common.DirectOutputContext())

    output_dir = args.output_dir or args.model_dir

    failed_models = []

    with tempfile.TemporaryDirectory() as temp_dir:
        annotation_dir = Path(temp_dir) / 'annotations'
        annotation_dir.mkdir()

        pot_env = {
            'ANNOTATIONS_DIR': str(annotation_dir),
            'DATA_DIR': str(args.dataset_dir),
            'DEFINITIONS_FILE': str(DATASET_DEFINITIONS_PATH),
        }

        for model in models:
            if not model.quantizable:
                reporter.print_section_heading(
                    'Skipping {} (quantization not supported)', model.name)
                reporter.print()
                continue

            for precision in sorted(requested_precisions):
                if not quantize(reporter, model, precision, args, output_dir,
                                pot_path, pot_env):
                    failed_models.append(model.name)
                    break

    if failed_models:
        reporter.print('FAILED:')
        for failed_model_name in failed_models:
            reporter.print(failed_model_name)
        sys.exit(1)