def main(): r"""Main function. """ # arguments args = get_args() print("Command line arguments:") print(args) # configurations cfg = get_cfg_defaults() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) if args.inference: update_inference_cfg(cfg) cfg.freeze() print("Configuration details:") print(cfg) if not os.path.exists(cfg.DATASET.OUTPUT_PATH): print('Output directory: ', cfg.DATASET.OUTPUT_PATH) os.makedirs(cfg.DATASET.OUTPUT_PATH) save_all_cfg(cfg, cfg.DATASET.OUTPUT_PATH) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Device: ", device) mode = 'test' if args.inference else 'train' trainer = Trainer(cfg, device, mode, args.checkpoint) if args.inference: trainer.test() else: trainer.train()
def main(): r"""Main function. """ # arguments args = get_args() print("Command line arguments:") print(args) # configurations cfg = get_cfg_defaults() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) if args.inference: update_inference_cfg(cfg) cfg.freeze() print("Configuration details:") print(cfg) if not os.path.exists(cfg.dataset.output_path): print('Output directory: ', cfg.dataset.output_path) os.makedirs(cfg.dataset.output_path) save_all_cfg(cfg, cfg.dataset.output_path) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Device: ", device) cudnn.enabled = True cudnn.benchmark = True mode = 'test' if args.inference else 'train' trainer = Trainer(cfg, device, mode, args.checkpoint) if cfg.dataset.DO_CHUNK_TITLE == 0: if args.inference: trainer.test() else: trainer.train() else: trainer.run_chunk(mode)