def debug_parser(): parser = default_argument_parser() parser.add_argument("--ckpt-file", type=str, default=None, help="path of debug checkpoint file") return parser
def train_argument_parser(): parser = default_argument_parser() parser.add_argument( "--dir", type=str, default=None, help= "path of dir that contains config and network, default to working dir") parser.add_argument("--clearml", action="store_true", help="use clearml or not") return parser
def debug_parser(): parser = default_argument_parser() parser.add_argument( "--dir", type=str, default=None, help= "path of dir that contains config and network, default to working dir") parser.add_argument("--ckpt-file", type=str, default=None, help="path of debug checkpoint file") return parser
def test_argument_parser(): parser = default_argument_parser() parser.add_argument("--start-iter", type=int, default=None, help="start iter used to test") parser.add_argument("--end-iter", type=int, default=None, help="end iter used to test") parser.add_argument("--debug", action="store_true", help="use debug mode or not") return parser
def test_argument_parser(): parser = default_argument_parser() parser.add_argument( "--dir", type=str, default=None, help= "path of dir that contains config and network, default to working dir") parser.add_argument("--start-iter", type=int, default=None, help="start iter used to test") parser.add_argument("--end-iter", type=int, default=None, help="end iter used to test") parser.add_argument("--debug", action="store_true", help="use debug mode or not") return parser
with open(save_path, "wb") as f: pkl.dump(res, f) def main(args): if isinstance(config, list): assert isinstance(build_model, list) and len(config) == len(build_model) for cfg, build in zip(config, build_model): stage_main(args, cfg, build) else: stage_main(args, config, build_model) if __name__ == "__main__": args = default_argument_parser().parse_args() if isinstance(config, list): assert len(config) > 0 print("soft link first config in list to {}".format(config[0].OUTPUT_DIR)) config[0].link_log() else: print("soft link to {}".format(config.OUTPUT_DIR)) config.link_log() print("Command Line Args:", args) launch( main, args.num_gpus, num_machines=args.num_machines, machine_rank=args.machine_rank, dist_url=args.dist_url, args=(args,),
for _ in range(5): # warmup model(dummy_data[0]) max_iter = 400 timer = Timer() with tqdm.tqdm(total=max_iter) as pbar: for idx, d in enumerate(f()): if idx == max_iter: break model(d) pbar.update() logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds())) if __name__ == "__main__": parser = default_argument_parser() parser.add_argument("--task", choices=["train", "eval", "data"], required=True) args = parser.parse_args() assert not args.eval_only if args.task == "data": f = benchmark_data elif args.task == "train": """ Note: training speed may not be representative. The training cost of a R-CNN model varies with the content of the data and the quality of the model. """ f = benchmark_train