def argument_parser(): parser = basic_argument_parser(distributed=True, requires_output_dir=False) parser.add_argument("--num-gpus", type=int, default=0, help="number of GPUs per machine") return parser
def get_parser(): parser = basic_argument_parser(distributed=False) parser.add_argument( "--predictor-types", type=str, nargs="+", help="List of strings specify the types of predictors to export", ) parser.add_argument( "--compare-accuracy", action="store_true", help= "If true, all exported models and the original pytorch model will be" " evaluated on cfg.DATASETS.TEST", ) parser.add_argument( "--skip-if-fail", action="store_true", default=False, help= "If set, suppress the exception for failed exporting and continue to" " export the next type of model", ) parser.add_argument( "--inference-config-file", type=str, default=None, help= "Inference config file containing the model parameters for c++ sdk pipeline", ) return parser
def cli(args): parser = basic_argument_parser() parser.add_argument( "--predictor-path", type=str, help="Path (a directory) to the exported model that will be evaluated", ) # === performance config =========================================================== parser.add_argument( "--num-threads", type=int, default=None, help= "Number of omp/mkl threads (per process) to use in Caffe2's GlobalInit", ) parser.add_argument( "--caffe2-engine", type=str, default=None, help="If set, engine of all ops will be set by this value", ) parser.add_argument( "--caffe2_logging_print_net_summary", type=int, default=0, help="Control the --caffe2_logging_print_net_summary in GlobalInit", ) run_with_cmdline_args(parser.parse_args())
def cli(args): parser = basic_argument_parser(requires_output_dir=False) parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") parser.add_argument( "--resume", action="store_true", help="whether to attempt to resume from the checkpoint directory", ) run_with_cmdline_args(parser.parse_args(args))
print_metrics_table(metrics) return { "accuracy": metrics, "metrics": metrics, } def run_with_cmdline_args(args): cfg, output_dir, runner = prepare_for_launch(args) launch( post_mortem_if_fail_for_main(main), num_processes_per_machine=args.num_processes, num_machines=args.num_machines, machine_rank=args.machine_rank, dist_url=args.dist_url, backend=args.dist_backend, args=(cfg, output_dir, runner, args.is_train), ) if __name__ == "__main__": parser = basic_argument_parser(requires_output_dir=True) parser.add_argument( "--is-train", type=bool, default=True, help="data loader is train", ) run_with_cmdline_args(parser.parse_args())
always_spawn=False, args=( cfg, output_dir, runner, # binary specific optional arguments args.predictor_path, args.num_threads, args.caffe2_engine, args.caffe2_logging_print_net_summary, ), ) if __name__ == "__main__": parser = basic_argument_parser() parser.add_argument( "--predictor-path", type=str, help="Path (a directory) to the exported model that will be evaluated", ) # === performance config =========================================================== parser.add_argument( "--num-threads", type=int, default=None, help="Number of omp/mkl threads (per process) to use in Caffe2's GlobalInit", ) parser.add_argument( "--caffe2-engine", type=str,