return lines if __name__ == '__main__': test_args = TestOptions().parse() test_args.thread = 1 test_args.batchsize = 1 merge_cfg_from_file(test_args) # load model model = MetricDepthModel() model.eval() # load checkpoint if test_args.load_ckpt: load_ckpt(test_args, model) model.cuda() model = torch.nn.DataParallel(model) out_dir = os.path.join(test_args.dataroot, 'VNL_Monocular') if not os.path.exists(out_dir): os.mkdir(out_dir) file_path = test_args.dataroot + "path_list.txt" with open(file_path, "r") as file: for path in tqdm(file, total=get_num_lines(file_path)): img_path = path.strip() dir, file = os.path.split(img_path) out_dir = dir.replace('leftImg8bit', 'VNL_Monocular') out_path = os.path.join( out_dir, file.replace('leftImg8bit', 'VNL_Monocular'))
optimizer = ModelOptimizer(model) #loss function loss_func = ModelLoss() val_err = [{'abs_rel': 0, 'silog': 0}] ignore_step = -1 # Lerning strategy lr_optim_lambda = lambda iter: (1.0 - iter / (float(total_iters)))**0.9 scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer.optimizer, lr_lambda=lr_optim_lambda) # load checkpoint if train_args.load_ckpt: load_ckpt(train_args, model, optimizer.optimizer, scheduler, val_err) ignore_step = train_args.start_step - train_args.start_epoch * math.ceil( train_datasize / train_args.batchsize) if gpu_num != -1: model = torch.nn.DataParallel(model) try: for epoch in range(train_args.start_epoch, cfg.TRAIN.EPOCH[-1]): # training train(train_dataloader, model, epoch, loss_func, optimizer, scheduler, training_stats, val_dataloader, val_err, ignore_step) ignore_step = -1 except (RuntimeError, KeyboardInterrupt): logger.info('Save ckpt on exception ...')