def create_data_loaders(args): # Data loading code print("=> creating data loaders ...") traindir = os.path.join('data', args.data, 'train') valdir = os.path.join('data', args.data, 'val') train_loader = None val_loader = None # sparsifier is a class for generating random sparse depth input from the ground truth sparsifier = None max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf if args.sparsifier == UniformSampling.name: sparsifier = UniformSampling(num_samples=args.num_samples, max_depth=max_depth) elif args.sparsifier == SimulatedStereo.name: sparsifier = SimulatedStereo(num_samples=args.num_samples, max_depth=max_depth) ''' if args.data == 'nyudepthv2': from dataloaders.nyu_dataloader import NYUDataset if not args.evaluate: train_dataset = NYUDataset(traindir, type='train', modality=args.modality, sparsifier=sparsifier) val_dataset = NYUDataset(valdir, type='val', modality=args.modality, sparsifier=sparsifier) ''' if args.data == 'nyudepthv2': from dataloaders.nyu_dataloader import NYUDataset if not args.evaluate: train_dataset = NYUDataset('nyu_depth_v2_labeled.mat',type = 'train', modality=args.modality, sparsifier=sparsifier, lists = train_lists) val_dataset = NYUDataset('nyu_depth_v2_labeled.mat', type = 'val', modality = args.modality, sparsifier = sparsifier, lists = val_lists) elif args.data == 'kitti': from dataloaders.kitti_dataloader import KITTIDataset if not args.evaluate: train_dataset = KITTIDataset(traindir, type='train', modality=args.modality, sparsifier=sparsifier) val_dataset = KITTIDataset(valdir, type='val', modality=args.modality, sparsifier=sparsifier) else: raise RuntimeError('Dataset not found.' + 'The dataset must be either of nyudepthv2 or kitti.') # set batch size to be 1 for validation val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True) # put construction of train loader here, for those who are interested in testing only if not args.evaluate: train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, sampler=None, worker_init_fn=lambda work_id:np.random.seed(work_id)) # worker_init_fn ensures different sampling patterns for each data loading thread print("=> data loaders created.") return train_loader, val_loader
def main(): global args, best_result, output_directory, train_csv, test_csv # create results folder, if not already exists output_directory = utils.get_output_directory(args) if not os.path.exists(output_directory): os.makedirs(output_directory) train_csv = os.path.join(output_directory, 'train.csv') test_csv = os.path.join(output_directory, 'test.csv') best_txt = os.path.join(output_directory, 'best.txt') # define loss function (criterion) and optimizer if args.criterion == 'l2': criterion = criteria.MaskedMSELoss().cuda() elif args.criterion == 'l1': criterion = criteria.MaskedL1Loss().cuda() # sparsifier is a class for generating random sparse depth input from the ground truth sparsifier = None max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf if args.sparsifier == UniformSampling.name: sparsifier = UniformSampling(num_samples=args.num_samples, max_depth=max_depth) elif args.sparsifier == SimulatedStereo.name: sparsifier = SimulatedStereo(num_samples=args.num_samples, max_depth=max_depth) # Data loading code print("=> creating data loaders ...") traindir = os.path.join('data', args.data, 'train') valdir = os.path.join('data', args.data, 'val') if args.data == 'nyudepthv2': from dataloaders.nyu_dataloader import NYUDataset train_dataset = NYUDataset(traindir, type='train', modality=args.modality, sparsifier=sparsifier) val_dataset = NYUDataset(valdir, type='val', modality=args.modality, sparsifier=sparsifier) elif args.data == 'kitti': from dataloaders.kitti_dataloader import KITTIDataset train_dataset = KITTIDataset(traindir, type='train', modality=args.modality, sparsifier=sparsifier) val_dataset = KITTIDataset(valdir, type='val', modality=args.modality, sparsifier=sparsifier) else: raise RuntimeError( 'Dataset not found.' + 'The dataset must be either of nyudepthv2 or kitti.') train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, sampler=None) # set batch size to be 1 for validation val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True) print("=> data loaders created.") # evaluation mode if args.evaluate: best_model_filename = os.path.join(output_directory, 'model_best.pth.tar') assert os.path.isfile(best_model_filename), \ "=> no best model found at '{}'".format(best_model_filename) print("=> loading best model '{}'".format(best_model_filename)) checkpoint = torch.load(best_model_filename) args.start_epoch = checkpoint['epoch'] best_result = checkpoint['best_result'] model = checkpoint['model'] print("=> loaded best model (epoch {})".format(checkpoint['epoch'])) validate(val_loader, model, checkpoint['epoch'], write_to_file=False) return # optionally resume from a checkpoint elif args.resume: assert os.path.isfile(args.resume), \ "=> no checkpoint found at '{}'".format(args.resume) print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] + 1 best_result = checkpoint['best_result'] model = checkpoint['model'] optimizer = checkpoint['optimizer'] print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch'])) # create new model else: # define model print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder)) in_channels = len(args.modality) if args.arch == 'resnet50': model = ResNet(layers=50, decoder=args.decoder, output_size=train_dataset.output_size, in_channels=in_channels, pretrained=args.pretrained) elif args.arch == 'resnet18': model = ResNet(layers=18, decoder=args.decoder, output_size=train_dataset.output_size, in_channels=in_channels, pretrained=args.pretrained) print("=> model created.") optimizer = torch.optim.SGD(model.parameters(), args.lr, \ momentum=args.momentum, weight_decay=args.weight_decay) # create new csv files with only header with open(train_csv, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() with open(test_csv, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training model = model.cuda() # print(model) print("=> model transferred to GPU.") for epoch in range(args.start_epoch, args.epochs): utils.adjust_learning_rate(optimizer, epoch, args.lr) train(train_loader, model, criterion, optimizer, epoch) # train for one epoch result, img_merge = validate(val_loader, model, epoch) # evaluate on validation set # remember best rmse and save checkpoint is_best = result.rmse < best_result.rmse if is_best: best_result = result with open(best_txt, 'w') as txtfile: txtfile.write( "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n" .format(epoch, result.mse, result.rmse, result.absrel, result.lg10, result.mae, result.delta1, result.gpu_time)) if img_merge is not None: img_filename = output_directory + '/comparison_best.png' utils.save_image(img_merge, img_filename) utils.save_checkpoint( { 'args': args, 'epoch': epoch, 'arch': args.arch, 'model': model, 'best_result': best_result, 'optimizer': optimizer, }, is_best, epoch, output_directory)