def main(): parser = DownloaderArgumentParser() parser.add_argument( '--name', metavar='PAT[,PAT...]', help= 'download only models whose names match at least one of the specified patterns' ) parser.add_argument( '--list', type=Path, metavar='FILE.LST', help= 'download only models whose names match at least one of the patterns in the specified file' ) parser.add_argument('--all', action='store_true', help='download all available models') parser.add_argument('--print_all', action='store_true', help='print all available models') parser.add_argument( '--precisions', metavar='PREC[,PREC...]', help= 'download only models with the specified precisions (actual for DLDT networks); specify one or more of: ' + ','.join(_common.KNOWN_PRECISIONS)) parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR', default=Path.cwd(), help='path where to save models') parser.add_argument( '--cache_dir', type=Path, metavar='DIR', help='directory to use as a cache for downloaded files') parser.add_argument('--num_attempts', type=positive_int_arg, metavar='N', default=1, help='attempt each download up to N times') parser.add_argument('--progress_format', choices=('text', 'json'), default='text', help='which format to use for progress reporting') # unlike Model Converter, -jauto is not supported here, because CPU count has no # relation to the optimal number of concurrent downloads parser.add_argument('-j', '--jobs', type=positive_int_arg, metavar='N', default=1, help='how many downloads to perform concurrently') args = parser.parse_args() reporter = Downloader.make_reporter(args.progress_format) with _common.telemetry_session('Model Downloader', 'downloader') as telemetry: models = _configuration.load_models_from_args(parser, args, _common.MODEL_ROOT) for mode in ['all', 'list', 'name']: if getattr(args, mode): telemetry.send_event('md', 'downloader_selection_mode', mode) failed_models = set() downloader = Downloader(args.precisions, args.output_dir, args.cache_dir, args.num_attempts) for model in models: precisions_to_send = downloader.requested_precisions if args.precisions else downloader.requested_precisions & model.precisions model_information = { 'name': model.name, 'framework': model.framework, 'precisions': str(precisions_to_send).replace(',', ';'), } telemetry.send_event('md', 'downloader_model', json.dumps(model_information)) failed_models = downloader.bulk_download_model(models, reporter, args.jobs, args.progress_format) if failed_models: reporter.print('FAILED:') for failed_model_name in failed_models: reporter.print(failed_model_name) telemetry.send_event('md', 'downloader_failed_models', failed_model_name) sys.exit(1)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-d', '--download_dir', type=Path, metavar='DIR', default=Path.cwd(), help='root of the directory tree with downloaded model files') parser.add_argument( '-o', '--output_dir', type=Path, metavar='DIR', help='root of the directory tree to place converted files into') parser.add_argument( '--name', metavar='PAT[,PAT...]', help= 'convert only models whose names match at least one of the specified patterns' ) parser.add_argument( '--list', type=Path, metavar='FILE.LST', help= 'convert only models whose names match at least one of the patterns in the specified file' ) parser.add_argument('--all', action='store_true', help='convert all available models') parser.add_argument('--print_all', action='store_true', help='print all available models') parser.add_argument( '--precisions', metavar='PREC[,PREC...]', help= 'run only conversions that produce models with the specified precisions' ) parser.add_argument('-p', '--python', type=Path, metavar='PYTHON', default=sys.executable, help='Python executable to run Model Optimizer with') parser.add_argument('--mo', type=Path, metavar='MO.PY', help='Model Optimizer entry point script') parser.add_argument('--add_mo_arg', dest='extra_mo_args', metavar='ARG', action='append', help='Extra argument to pass to Model Optimizer') parser.add_argument( '--dry_run', action='store_true', help='Print the conversion commands without running them') parser.add_argument('-j', '--jobs', type=num_jobs_arg, default=1, help='number of conversions to run concurrently') # aliases for backwards compatibility parser.add_argument('--add-mo-arg', dest='extra_mo_args', action='append', help=argparse.SUPPRESS) parser.add_argument('--dry-run', action='store_true', help=argparse.SUPPRESS) args = parser.parse_args() with _common.telemetry_session('Model Converter', 'converter') as telemetry: models = _configuration.load_models_from_args(parser, args, _common.MODEL_ROOT) for mode in ['all', 'list', 'name']: if getattr(args, mode): telemetry.send_event('md', 'converter_selection_mode', mode) if args.precisions is None: requested_precisions = _common.KNOWN_PRECISIONS else: requested_precisions = set(args.precisions.split(',')) for model in models: precisions_to_send = requested_precisions if args.precisions else requested_precisions & model.precisions model_information = { 'name': model.name, 'framework': model.framework, 'precisions': str(precisions_to_send).replace(',', ';'), } telemetry.send_event('md', 'converter_model', json.dumps(model_information)) unknown_precisions = requested_precisions - _common.KNOWN_PRECISIONS if unknown_precisions: sys.exit('Unknown precisions specified: {}.'.format(', '.join( sorted(unknown_precisions)))) mo_path = args.mo if mo_path is None: mo_executable = shutil.which('mo') if mo_executable: mo_path = Path(mo_executable) else: try: mo_path = Path(os.environ['INTEL_OPENVINO_DIR'] ) / 'tools/mo/openvino/tools/mo/mo.py' if not mo_path.exists(): mo_path = Path(os.environ['INTEL_OPENVINO_DIR'] ) / 'tools/model_optimizer/mo.py' except KeyError: sys.exit( 'Unable to locate Model Optimizer. ' + 'Use --mo or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.' ) if mo_path is not None: mo_path = mo_path.resolve() mo_cmd_prefix = [str(args.python), '--', str(mo_path)] if str(mo_path).lower().endswith('.py'): mo_dir = mo_path.parent else: mo_package_path, stderr = _common.get_package_path( args.python, 'openvino.tools.mo') mo_dir = mo_package_path if mo_package_path is None: mo_package_path, stderr = _common.get_package_path( args.python, 'mo') if mo_package_path is None: sys.exit( 'Unable to load Model Optimizer. Errors occurred: {}' .format(stderr)) mo_dir = mo_package_path.parent output_dir = args.download_dir if args.output_dir is None else args.output_dir reporter = _reporting.Reporter(_reporting.DirectOutputContext()) mo_props = ModelOptimizerProperties( cmd_prefix=mo_cmd_prefix, extra_args=args.extra_mo_args or [], base_dir=mo_dir, ) shared_convert_args = (output_dir, args, mo_props, requested_precisions) def convert_model(model, reporter): if model.model_stages: results = [] for model_stage in model.model_stages: results.append( convert(reporter, model_stage, *shared_convert_args)) return sum(results) == len(model.model_stages) else: return convert(reporter, model, *shared_convert_args) if args.jobs == 1 or args.dry_run: results = [convert_model(model, reporter) for model in models] else: results = _concurrency.run_in_parallel( args.jobs, lambda context, model: convert_model( model, _reporting.Reporter(context)), models) failed_models = [ model.name for model, successful in zip(models, results) if not successful ] if failed_models: reporter.print('FAILED:') for failed_model_name in failed_models: reporter.print(failed_model_name) sys.exit(1)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--model_dir', type=Path, metavar='DIR', default=Path.cwd(), help='root of the directory tree with the full precision model files') parser.add_argument('--dataset_dir', type=Path, help='root of the dataset directory tree') parser.add_argument( '-o', '--output_dir', type=Path, metavar='DIR', help='root of the directory tree to place quantized model files into') parser.add_argument( '--name', metavar='PAT[,PAT...]', help= 'quantize only models whose names match at least one of the specified patterns' ) parser.add_argument( '--list', type=Path, metavar='FILE.LST', help= 'quantize only models whose names match at least one of the patterns in the specified file' ) parser.add_argument('--all', action='store_true', help='quantize all available models') parser.add_argument('--print_all', action='store_true', help='print all available models') parser.add_argument( '-p', '--python', type=Path, metavar='PYTHON', default=sys.executable, help='Python executable to run Post-Training Optimization Toolkit with' ) parser.add_argument( '--pot', type=Path, help='Post-Training Optimization Toolkit entry point script') parser.add_argument( '--dry_run', action='store_true', help='print the quantization commands without running them') parser.add_argument('--precisions', metavar='PREC[,PREC...]', help='quantize only to the specified precisions') parser.add_argument('--target_device', help='target device for the quantized model') args = parser.parse_args() with _common.telemetry_session('Model Quantizer', 'quantizer') as telemetry: models = _configuration.load_models_from_args(parser, args, _common.MODEL_ROOT) for mode in ['all', 'list', 'name']: if getattr(args, mode): telemetry.send_event('md', 'quantizer_selection_mode', mode) if args.precisions is None: requested_precisions = _common.KNOWN_QUANTIZED_PRECISIONS.keys() else: requested_precisions = set(args.precisions.split(',')) for model in models: model_information = { 'name': model.name, 'framework': model.framework, 'precisions': str(requested_precisions).replace(',', ';'), } telemetry.send_event('md', 'quantizer_model', json.dumps(model_information)) unknown_precisions = requested_precisions - _common.KNOWN_QUANTIZED_PRECISIONS.keys( ) if unknown_precisions: sys.exit('Unknown precisions specified: {}.'.format(', '.join( sorted(unknown_precisions)))) pot_path = args.pot if pot_path is None: pot_executable = shutil.which('pot') if pot_executable: pot_cmd_prefix = [str(args.python), '--', pot_executable] else: try: pot_path = Path( os.environ['INTEL_OPENVINO_DIR'] ) / 'tools/post_training_optimization_tool/main.py' except KeyError: sys.exit( 'Unable to locate Post-Training Optimization Toolkit. ' + 'Use --pot or run setupvars.sh/setupvars.bat from the OpenVINO toolkit.' ) if pot_path is not None: # run POT as a script pot_cmd_prefix = [str(args.python), '--', str(pot_path)] # We can't mark it as required, because it's not required when --print_all is specified. # So we have to check it manually. if not args.dataset_dir: sys.exit('--dataset_dir must be specified.') reporter = _reporting.Reporter(_reporting.DirectOutputContext()) output_dir = args.output_dir or args.model_dir failed_models = [] with tempfile.TemporaryDirectory() as temp_dir: annotation_dir = Path(temp_dir) / 'annotations' annotation_dir.mkdir() pot_env = { 'ANNOTATIONS_DIR': str(annotation_dir), 'DATA_DIR': str(args.dataset_dir), 'DEFINITIONS_FILE': str(_common.DATASET_DEFINITIONS), } for model in models: if not model.quantization_output_precisions: reporter.print_section_heading( 'Skipping {} (quantization not supported)', model.name) reporter.print() continue model_precisions = requested_precisions & model.quantization_output_precisions if not model_precisions: reporter.print_section_heading( 'Skipping {} (all precisions skipped)', model.name) reporter.print() continue pot_env.update( {'MODELS_DIR': str(args.model_dir / model.subdirectory)}) for precision in sorted(model_precisions): if not quantize(reporter, model, precision, args, output_dir, pot_cmd_prefix, pot_env): failed_models.append(model.name) break if failed_models: reporter.print('FAILED:') for failed_model_name in failed_models: reporter.print(failed_model_name) sys.exit(1)
def main(): parser = DownloaderArgumentParser() parser.add_argument('--name', metavar='PAT[,PAT...]', help='download only models whose names match at least one of the specified patterns') parser.add_argument('--list', type=Path, metavar='FILE.LST', help='download only models whose names match at least one of the patterns in the specified file') parser.add_argument('--all', action='store_true', help='download all available models') parser.add_argument('--print_all', action='store_true', help='print all available models') parser.add_argument('--precisions', metavar='PREC[,PREC...]', help='download only models with the specified precisions (actual for DLDT networks); specify one or more of: ' + ','.join(_common.KNOWN_PRECISIONS)) parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR', default=Path.cwd(), help='path where to save models') parser.add_argument('--cache_dir', type=Path, metavar='DIR', help='directory to use as a cache for downloaded files') parser.add_argument('--num_attempts', type=positive_int_arg, metavar='N', default=1, help='attempt each download up to N times') parser.add_argument('--progress_format', choices=('text', 'json'), default='text', help='which format to use for progress reporting') # unlike Model Converter, -jauto is not supported here, because CPU count has no # relation to the optimal number of concurrent downloads parser.add_argument('-j', '--jobs', type=positive_int_arg, metavar='N', default=1, help='how many downloads to perform concurrently') args = parser.parse_args() def make_reporter(context): return _reporting.Reporter(context, enable_human_output=args.progress_format == 'text', enable_json_output=args.progress_format == 'json') reporter = make_reporter(_reporting.DirectOutputContext()) with _common.telemetry_session('Model Downloader', 'downloader') as telemetry: models = _configuration.load_models_from_args(parser, args, _common.MODEL_ROOT) for mode in ['all', 'list', 'name']: if getattr(args, mode): telemetry.send_event('md', 'downloader_selection_mode', mode) if args.precisions is None: requested_precisions = _common.KNOWN_PRECISIONS else: requested_precisions = set(args.precisions.split(',')) for model in models: precisions_to_send = requested_precisions if args.precisions else requested_precisions & model.precisions model_information = { 'name': model.name, 'framework': model.framework, 'precisions': str(precisions_to_send).replace(',', ';'), } telemetry.send_event('md', 'downloader_model', json.dumps(model_information)) failed_models = set() unknown_precisions = requested_precisions - _common.KNOWN_PRECISIONS if unknown_precisions: sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions)))) downloader = Downloader(args.output_dir, args.cache_dir, args.num_attempts) def download_model(model, reporter, session): if model.model_stages: results = [] for model_stage in model.model_stages: results.append(downloader.download_model( reporter, session, requested_precisions, model_stage, _common.KNOWN_PRECISIONS)) return sum(results) == len(model.model_stages) else: return downloader.download_model( reporter, session, requested_precisions, model, _common.KNOWN_PRECISIONS) with contextlib.ExitStack() as exit_stack: session_factory = ThreadSessionFactory(exit_stack) if args.jobs == 1: results = [download_model(model, reporter, session_factory) for model in models] else: results = _concurrency.run_in_parallel(args.jobs, lambda context, model: download_model(model, make_reporter(context), session_factory), models) failed_models = {model.name for model, successful in zip(models, results) if not successful} if failed_models: reporter.print('FAILED:') for failed_model_name in failed_models: reporter.print(failed_model_name) telemetry.send_event('md', 'downloader_failed_models', failed_model_name) sys.exit(1)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--model_dir', type=Path, metavar='DIR', default=Path.cwd(), help='root of the directory tree with the full precision model files') parser.add_argument('--dataset_dir', type=Path, help='root of the dataset directory tree') parser.add_argument( '-o', '--output_dir', type=Path, metavar='DIR', help='root of the directory tree to place quantized model files into') parser.add_argument( '--name', metavar='PAT[,PAT...]', help= 'quantize only models whose names match at least one of the specified patterns' ) parser.add_argument( '--list', type=Path, metavar='FILE.LST', help= 'quantize only models whose names match at least one of the patterns in the specified file' ) parser.add_argument('--all', action='store_true', help='quantize all available models') parser.add_argument('--print_all', action='store_true', help='print all available models') parser.add_argument( '-p', '--python', type=Path, metavar='PYTHON', default=sys.executable, help='Python executable to run Post-Training Optimization Toolkit with' ) parser.add_argument( '--pot', type=Path, help='Post-Training Optimization Toolkit entry point script') parser.add_argument( '--dry_run', action='store_true', help='print the quantization commands without running them') parser.add_argument('--precisions', metavar='PREC[,PREC...]', help='quantize only to the specified precisions') parser.add_argument('--target_device', help='target device for the quantized model') args = parser.parse_args() with _common.telemetry_session('Model Quantizer', 'quantizer') as telemetry: models = _configuration.load_models_from_args(parser, args, _common.MODEL_ROOT) for mode in ['all', 'list', 'name']: if getattr(args, mode): telemetry.send_event('md', 'quantizer_selection_mode', mode) if args.precisions is None: requested_precisions = _common.KNOWN_QUANTIZED_PRECISIONS.keys() else: requested_precisions = set(args.precisions.split(',')) for model in models: model_information = { 'name': model.name, 'framework': model.framework, 'precisions': str(requested_precisions).replace(',', ';'), } telemetry.send_event('md', 'quantizer_model', json.dumps(model_information)) quantizer = Quantizer(args.python, requested_precisions, args.output_dir, args.model_dir, args.pot, args.dataset_dir, args.dry_run) reporter = _reporting.Reporter(_reporting.DirectOutputContext()) failed_models = quantizer.bulk_quantize(reporter, models, args.target_device) if failed_models: reporter.print('FAILED:') for failed_model_name in failed_models: reporter.print(failed_model_name) sys.exit(1)