def main(): global args args = parser.parse_args() if args.num_samples == 0: args.num_samples = None if args.val_batch_size is None: args.val_batch_size = args.batch_size if args.seed: random.seed(args.seed) torch.manual_seed(args.seed) # torch.backends.cudnn.deterministic = True # warnings.warn('You have chosen to seed training. ' # 'This will turn on the CUDNN deterministic setting, ' # 'which can slow down your training considerably! ' # 'You may see unexpected behavior when restarting from checkpoints.') # For distributed training # init_distributed_mode(args) if not args.no_cuda and not torch.cuda.is_available(): raise Exception("No gpu available for usage") torch.backends.cudnn.benchmark = args.cudnn # Init model channels_in = 1 if args.input_type == 'depth' else 4 model = Models.define_model(mod=args.mod, in_channels=channels_in, thres=args.thres) define_init_weights(model, args.weight_init) # Load on gpu before passing params to optimizer if not args.no_cuda: if not args.multi: model = model.cuda() else: model = torch.nn.DataParallel(model).cuda() # model.cuda() # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) # model = model.module save_id = '{}_{}_{}_{}_{}_batch{}_pretrain{}_wlid{}_wrgb{}_wguide{}_wpred{}_patience{}_num_samples{}_multi{}'.\ format(args.mod, args.optimizer, args.loss_criterion, args.learning_rate, args.input_type, args.batch_size, args.pretrained, args.wlid, args.wrgb, args.wguide, args.wpred, args.lr_decay_iters, args.num_samples, args.multi) # INIT optimizer/scheduler/loss criterion optimizer = define_optim(args.optimizer, model.parameters(), args.learning_rate, args.weight_decay) scheduler = define_scheduler(optimizer, args) # Optional to use different losses criterion_local = define_loss(args.loss_criterion) criterion_lidar = define_loss(args.loss_criterion) criterion_rgb = define_loss(args.loss_criterion) criterion_guide = define_loss(args.loss_criterion) # INIT dataset dataset = Datasets.define_dataset(args.dataset, args.data_path, args.input_type, args.side_selection) dataset.prepare_dataset() train_loader, valid_loader, valid_selection_loader = get_loader( args, dataset) # Resume training best_epoch = 0 lowest_loss = np.inf args.save_path = os.path.join(args.save_path, save_id) mkdir_if_missing(args.save_path) log_file_name = 'log_train_start_0.txt' args.resume = first_run(args.save_path) if args.resume and not args.test_mode and not args.evaluate: path = os.path.join( args.save_path, 'checkpoint_model_epoch_{}.pth.tar'.format(int(args.resume))) if os.path.isfile(path): log_file_name = 'log_train_start_{}.txt'.format(args.resume) # stdout sys.stdout = Logger(os.path.join(args.save_path, log_file_name)) print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(path) args.start_epoch = checkpoint['epoch'] lowest_loss = checkpoint['loss'] best_epoch = checkpoint['best epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) else: log_file_name = 'log_train_start_0.txt' # stdout sys.stdout = Logger(os.path.join(args.save_path, log_file_name)) print("=> no checkpoint found at '{}'".format(path)) # Only evaluate elif args.evaluate: print("Evaluate only") best_file_lst = glob.glob(os.path.join(args.save_path, 'model_best*')) if len(best_file_lst) != 0: best_file_name = best_file_lst[0] print(best_file_name) if os.path.isfile(best_file_name): sys.stdout = Logger( os.path.join(args.save_path, 'Evaluate.txt')) print("=> loading checkpoint '{}'".format(best_file_name)) checkpoint = torch.load(best_file_name) model.load_state_dict(checkpoint['state_dict']) else: print("=> no checkpoint found at '{}'".format(best_file_name)) else: print("=> no checkpoint found at due to empy list in folder {}". format(args.save_path)) validate(valid_selection_loader, model, criterion_lidar, criterion_rgb, criterion_local, criterion_guide) return # Start training from clean slate else: # Redirect stdout sys.stdout = Logger(os.path.join(args.save_path, log_file_name)) # INIT MODEL print(40 * "=" + "\nArgs:{}\n".format(args) + 40 * "=") print("Init model: '{}'".format(args.mod)) print("Number of parameters in model {} is {:.3f}M".format( args.mod.upper(), sum(tensor.numel() for tensor in model.parameters()) / 1e6)) # Load pretrained state for cityscapes in GLOBAL net if args.pretrained and not args.resume: if not args.load_external_mod: if not args.multi: target_state = model.depthnet.state_dict() else: target_state = model.module.depthnet.state_dict() check = torch.load('erfnet_pretrained.pth') for name, val in check.items(): # Exclude multi GPU prefix mono_name = name[7:] if mono_name not in target_state: continue try: target_state[mono_name].copy_(val) except RuntimeError: continue print('Successfully loaded pretrained model') else: check = torch.load('external_mod.pth.tar') lowest_loss_load = check['loss'] target_state = model.state_dict() for name, val in check['state_dict'].items(): if name not in target_state: continue try: target_state[name].copy_(val) except RuntimeError: continue print("=> loaded EXTERNAL checkpoint with best rmse {}".format( lowest_loss_load)) # Start training for epoch in range(args.start_epoch, args.nepochs): print("\n => Start EPOCH {}".format(epoch + 1)) print(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) print(args.save_path) # Adjust learning rate if args.lr_policy is not None and args.lr_policy != 'plateau': scheduler.step() lr = optimizer.param_groups[0]['lr'] print('lr is set to {}'.format(lr)) # Define container objects batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() score_train = AverageMeter() score_train_1 = AverageMeter() metric_train = Metrics(max_depth=args.max_depth, disp=args.use_disp, normal=args.normal) # Train model for args.nepochs model.train() # compute timing end = time.time() # Load dataset for i, (input, gt) in tqdm(enumerate(train_loader)): # Time dataloader data_time.update(time.time() - end) # Put inputs on gpu if possible if not args.no_cuda: input, gt = input.cuda(), gt.cuda() prediction, lidar_out, precise, guide = model(input, epoch) loss = criterion_local(prediction, gt) loss_lidar = criterion_lidar(lidar_out, gt) loss_rgb = criterion_rgb(precise, gt) loss_guide = criterion_guide(guide, gt) loss = args.wpred * loss + args.wlid * loss_lidar + args.wrgb * loss_rgb + args.wguide * loss_guide losses.update(loss.item(), input.size(0)) metric_train.calculate(prediction[:, 0:1].detach(), gt.detach()) score_train.update(metric_train.get_metric(args.metric), metric_train.num) score_train_1.update(metric_train.get_metric(args.metric_1), metric_train.num) # Clip gradients (usefull for instabilities or mistakes in ground truth) if args.clip_grad_norm != 0: nn.utils.clip_grad_norm(model.parameters(), args.clip_grad_norm) # Setup backward pass optimizer.zero_grad() loss.backward() optimizer.step() # Time trainig iteration batch_time.update(time.time() - end) end = time.time() # Print info if (i + 1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Metric {score.val:.4f} ({score.avg:.4f})'.format( epoch + 1, i + 1, len(train_loader), batch_time=batch_time, loss=losses, score=score_train)) print("===> Average RMSE score on training set is {:.4f}".format( score_train.avg)) print("===> Average MAE score on training set is {:.4f}".format( score_train_1.avg)) # Evaulate model on validation set print("=> Start validation set") score_valid, score_valid_1, losses_valid = validate( valid_loader, model, criterion_lidar, criterion_rgb, criterion_local, criterion_guide, epoch) print("===> Average RMSE score on validation set is {:.4f}".format( score_valid)) print("===> Average MAE score on validation set is {:.4f}".format( score_valid_1)) # Evaluate model on selected validation set if args.subset is None: print("=> Start selection validation set") score_selection, score_selection_1, losses_selection = validate( valid_selection_loader, model, criterion_lidar, criterion_rgb, criterion_local, criterion_guide, epoch) total_score = score_selection print("===> Average RMSE score on selection set is {:.4f}".format( score_selection)) print("===> Average MAE score on selection set is {:.4f}".format( score_selection_1)) else: total_score = score_valid print("===> Last best score was RMSE of {:.4f} in epoch {}".format( lowest_loss, best_epoch)) # Adjust lr if loss plateaued if args.lr_policy == 'plateau': scheduler.step(total_score) lr = optimizer.param_groups[0]['lr'] print('LR plateaued, hence is set to {}'.format(lr)) # File to keep latest epoch with open(os.path.join(args.save_path, 'first_run.txt'), 'w') as f: f.write(str(epoch)) # Save model to_save = False if total_score < lowest_loss: to_save = True best_epoch = epoch + 1 lowest_loss = total_score save_checkpoint( { 'epoch': epoch + 1, 'best epoch': best_epoch, 'arch': args.mod, 'state_dict': model.state_dict(), 'loss': lowest_loss, 'optimizer': optimizer.state_dict() }, to_save, epoch) if not args.no_tb: writer.close()
def main(): global args args = parser.parse_args() if args.num_samples == 0: args.num_samples = None if args.cuda and not torch.cuda.is_available(): raise Exception("No gpu available for usage") # Init model channels_in = 1 if args.input_type == 'depth' else 4 model = Models.define_model(mod=args.mod, in_channels=channels_in) if args.mod == 'mod': define_init_weights(model, args.weight_init) # Load on gpu before passing params to optimizer if args.cuda: model = model.cuda() save_id = '{}_{}_{}_{}_batch{}_pretrain{}_wlid{}_wrgb{}_wguide{}_wpred{}_num_samples{}'.\ format(args.mod, args.loss_criterion_source, args.learning_rate, args.input_type, args.batch_size, args.load_path!='', args.wlid, args.wrgb, args.wguide, args.wpred, args.num_samples) optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) # Optional to use different losses criterion_source = define_loss(args.loss_criterion_source) criterion_target = define_loss(args.loss_criterion_target) # INIT KITTI dataset print('Load KITTI') dataset = Datasets.define_dataset('kitti', args.data_path_target, args.input_type) dataset.prepare_dataset() train_loader = get_loader(args, dataset, only_train=True) # INIT Carla dataset print('Load Carla') dataset = Datasets.define_dataset('carla', args.data_path_source, args.input_type) dataset.prepare_dataset() # The sparsification of the data and projection from the LiDAR reference # frame to the RGB camera explained in the paper happens in the dataloader train_loader_carla = get_loader(args, dataset, is_carla=True, only_train=True) train_loader_iter = iter(train_loader) # Resume training if args.save_name == '': args.save_path = os.path.join(args.save_path, save_id) else: args.save_path = os.path.join(args.save_path, args.save_name) if os.path.exists(args.save_path): raise Exception('Save path already exists') mkdir_if_missing(args.save_path) # INIT MODEL print(40 * "=" + "\nArgs:{}\n".format(args) + 40 * "=") print("Init model: '{}'".format(args.mod)) print("Number of parameters in model {} is {:.3f}M".format( args.mod.upper(), sum(tensor.numel() for tensor in model.parameters()) / 1e6)) # Load pretrained state if args.load_path != '': print("=> loading checkpoint {:s}".format(args.load_path)) check = torch.load( args.load_path, map_location=lambda storage, loc: storage)['state_dict'] model.load_state_dict(check) if args.use_image_translation: image_trans_net = ResnetGeneratorCycle(3, 3, 64, n_blocks=9) state_dict = torch.load('./image_translation_weights.pth') image_trans_net.load_state_dict(state_dict) image_trans_net.eval() if args.cuda: image_trans_net = image_trans_net.cuda() # Start training global_step = 0 for epoch in range(args.start_epoch, args.nepochs): print("\n => Start EPOCH {}".format(epoch + 1)) # Define container objects batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() score_train_rmse = AverageMeter() score_train_mae = AverageMeter() metric_train = Metrics(max_depth=args.max_depth) # Train model for args.nepochs model.train() # compute timing end = time.time() for i, (input, gt, filepath) in tqdm(enumerate(train_loader_carla)): # Time dataloader data_time.update(time.time() - end) loss_extra = 0 # Put inputs on gpu if possible if args.cuda: input, gt = input.cuda(), gt.cuda() # The LiDAR depths have large regions where no input depth is given # We remove all of the GT in the synthetic data where no input information is given # in a NxN window around the GT point (we set N=41) to avoid the model trying to estimate # depth for areas without any input guidance input_depth = input[:, 0:1] input_depth, gt = filter_data(input_depth, gt, max_depth=args.max_depth) input[:, 0:1] = input_depth ### Load target set (KITTI) data if args.train_target: try: input_target, gt_target, filepath_t = next( train_loader_iter) except: train_loader_iter = iter(train_loader) input_target, gt_target, filepath_t = next( train_loader_iter) if args.cuda: input_target, gt_target = input_target.cuda( ), gt_target.cuda() if args.use_image_translation: # The CycleGAN model was trained with inputs in the range of [-1, 1] with torch.no_grad(): rgb_trans = image_trans_net(input[:, 1:] / 128.5 - 1) rgb_trans = 128.5 * (rgb_trans + 1) rgb_trans = rgb_trans.clamp(0, 255) input = torch.cat([input[:, :1], rgb_trans], 1) if args.train_target: input_joint = torch.cat([input, input_target]) prediction, lidar_out, precise, guide = model( input_joint, epoch) # We separate predictions from the target domain and source domain prediction_target, lidar_out_target, precise_target, guide_target = prediction[ args.batch_size:], lidar_out[args.batch_size:], precise[ args.batch_size:], guide[args.batch_size:] prediction, lidar_out, precise, guide = prediction[:args. batch_size], lidar_out[: args . batch_size], precise[: args . batch_size], guide[: args . batch_size] else: prediction, lidar_out, precise, guide = model(input, epoch) # We compute the loss for the source domain data loss = criterion_source(prediction, gt) loss_lidar = criterion_source(lidar_out, gt) loss_rgb = criterion_source(precise, gt) loss_guide = criterion_source(guide, gt) loss = args.wpred * loss + args.wlid * loss_lidar + args.wrgb * loss_rgb + args.wguide * loss_guide if args.train_target: loss_target = 0 # We filter the input data for supervision as explained in the paper filtered_sparse_data = filter_sparse_guidance( input_target[:, :1], args.filter_window, args.filter_th) # We compute the loss for the target domain data loss_target += args.wpred * (criterion_target( prediction_target, filtered_sparse_data)) loss_target += args.wlid * (criterion_target( lidar_out_target, filtered_sparse_data)) loss_target += args.wrgb * (criterion_target( precise_target, filtered_sparse_data)) loss_target += args.wguide * (criterion_target( guide_target, filtered_sparse_data)) loss = loss + loss_target metric_train.calculate(prediction[:, 0:1].detach(), gt.detach()) score_train_rmse.update(metric_train.get_metric('rmse'), metric_train.num) score_train_mae.update(metric_train.get_metric('mae'), metric_train.num) losses.update(loss.item(), input.size(0)) # Optimization step optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() global_step += 1 # Print info if (i + 1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'RMSE Train {score.val:.4f} ({score.avg:.4f})'.format( epoch + 1, i + 1, len(train_loader_carla), batch_time=batch_time, loss=losses, score=score_train_rmse)) if global_step == args.n_training_iterations: dict_save = { 'epoch': epoch + 1, 'arch': args.mod, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict() } save_checkpoint(dict_save, False, epoch + 1, global_step) return 1 print("===> Average RMSE score on training set is {:.4f}".format( score_train_rmse.avg)) print("===> Average MAE score on training set is {:.4f}".format( score_train_mae.avg)) dict_save = { 'epoch': epoch + 1, 'arch': args.mod, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict() } save_checkpoint(dict_save, False, epoch + 1)