res.update(Trainer.test_with_TTA(cfg, model)) return res """ If you'd like to do anything fancier than the standard training logic, consider writing your own training loop or subclassing the trainer. """ trainer = Trainer(cfg, model) trainer.resume_or_load(resume=args.resume) if cfg.TEST.AUG.ENABLED: trainer.register_hooks([ hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model)) ]) return trainer.train() if __name__ == "__main__": args = default_argument_parser().parse_args() print("soft link to {}".format(config.OUTPUT_DIR)) config.link_log() print("Command Line Args:", args) launch( main, args.num_gpus, num_machines=args.num_machines, machine_rank=args.machine_rank, dist_url=args.dist_url, args=(args, ), )
logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds())) if __name__ == "__main__": parser = default_argument_parser() parser.add_argument("--task", choices=["train", "eval", "data"], required=True) args = parser.parse_args() assert not args.eval_only if args.task == "data": f = benchmark_data elif args.task == "train": """ Note: training speed may not be representative. The training cost of a R-CNN model varies with the content of the data and the quality of the model. """ f = benchmark_train elif args.task == "eval": f = benchmark_eval # only benchmark single-GPU inference. assert args.num_gpus == 1 and args.num_machines == 1 launch(f, args.num_gpus, args.num_machines, args.machine_rank, args.dist_url, args=(args, ))