Exemplo n.º 1
0
def create_data_loaders(args):
    # Data loading code
    print("=> creating data loaders ...")
    traindir = os.path.join('data', args.data, 'train')
    valdir = os.path.join('data', args.data, 'val')
    train_loader = None
    val_loader = None

    # sparsifier is a class for generating random sparse depth input from the ground truth
    sparsifier = None
    max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf
    if args.sparsifier == UniformSampling.name:
        sparsifier = UniformSampling(num_samples=args.num_samples, max_depth=max_depth)
    elif args.sparsifier == SimulatedStereo.name:
        sparsifier = SimulatedStereo(num_samples=args.num_samples, max_depth=max_depth)
    '''
    if args.data == 'nyudepthv2':
        from dataloaders.nyu_dataloader import NYUDataset
        if not args.evaluate:
            train_dataset = NYUDataset(traindir, type='train',
                modality=args.modality, sparsifier=sparsifier)
        val_dataset = NYUDataset(valdir, type='val',
            modality=args.modality, sparsifier=sparsifier)
    '''
    if args.data == 'nyudepthv2':
        from dataloaders.nyu_dataloader import NYUDataset
        if not args.evaluate:
            train_dataset = NYUDataset('nyu_depth_v2_labeled.mat',type = 'train',
                modality=args.modality, sparsifier=sparsifier, lists = train_lists)
        val_dataset = NYUDataset('nyu_depth_v2_labeled.mat', type = 'val', modality = args.modality, sparsifier = sparsifier, lists = val_lists)    

    elif args.data == 'kitti':
        from dataloaders.kitti_dataloader import KITTIDataset
        if not args.evaluate:
            train_dataset = KITTIDataset(traindir, type='train',
                modality=args.modality, sparsifier=sparsifier)
        val_dataset = KITTIDataset(valdir, type='val',
            modality=args.modality, sparsifier=sparsifier)

    else:
        raise RuntimeError('Dataset not found.' +
                           'The dataset must be either of nyudepthv2 or kitti.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
        batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)

    # put construction of train loader here, for those who are interested in testing only
    if not args.evaluate:
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True, sampler=None,
            worker_init_fn=lambda work_id:np.random.seed(work_id))
            # worker_init_fn ensures different sampling patterns for each data loading thread

    print("=> data loaders created.")
    return train_loader, val_loader
Exemplo n.º 2
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # Data loading code
    print("=> creating data loaders...")
    valdir = os.path.join('..', 'data', args.data, 'val')

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)

    elif args.data == 'kitti':
        from dataloaders.kitti_dataloader import KITTIDataset
        if not args.evaluate:
            train_dataset = KITTIDataset(traindir,
                                         type='train',
                                         modality=args.modality,
                                         sparsifier=sparsifier)
        val_dataset = KITTIDataset(valdir,
                                   type='val',
                                   modality=args.modality,
                                   sparsifier=sparsifier)
    else:
        raise RuntimeError('Dataset not found.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    print("=> data loaders created.")


    assert os.path.isfile(args.evaluate), \
    "=> no model found at '{}'".format(args.evaluate)
    print("=> loading model '{}'".format(args.evaluate))
    checkpoint = torch.load(args.evaluate)
    if type(checkpoint) is dict:
        args.start_epoch = checkpoint['epoch']
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
    else:
        model = checkpoint
        args.start_epoch = 0
    output_directory = os.path.dirname(args.evaluate)
    demo(val_loader, model, args.start_epoch, write_to_file=False)
    return
Exemplo n.º 3
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = criteria.MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = criteria.MaskedL1Loss().cuda()

    # sparsifier is a class for generating random sparse depth input from the ground truth
    sparsifier = None
    max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf
    if args.sparsifier == UniformSampling.name:
        sparsifier = UniformSampling(num_samples=args.num_samples,
                                     max_depth=max_depth)
    elif args.sparsifier == SimulatedStereo.name:
        sparsifier = SimulatedStereo(num_samples=args.num_samples,
                                     max_depth=max_depth)

    # Data loading code
    print("=> creating data loaders ...")
    traindir = os.path.join('data', args.data, 'train')
    valdir = os.path.join('data', args.data, 'val')

    if args.data == 'nyudepthv2':
        from dataloaders.nyu_dataloader import NYUDataset
        train_dataset = NYUDataset(traindir,
                                   type='train',
                                   modality=args.modality,
                                   sparsifier=sparsifier)
        val_dataset = NYUDataset(valdir,
                                 type='val',
                                 modality=args.modality,
                                 sparsifier=sparsifier)

    elif args.data == 'kitti':
        from dataloaders.kitti_dataloader import KITTIDataset
        train_dataset = KITTIDataset(traindir,
                                     type='train',
                                     modality=args.modality,
                                     sparsifier=sparsifier)
        val_dataset = KITTIDataset(valdir,
                                   type='val',
                                   modality=args.modality,
                                   sparsifier=sparsifier)

    else:
        raise RuntimeError(
            'Dataset not found.' +
            'The dataset must be either of nyudepthv2 or kitti.')

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=None)

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    print("=> data loaders created.")

    # evaluation mode
    if args.evaluate:
        best_model_filename = os.path.join(output_directory,
                                           'model_best.pth.tar')
        assert os.path.isfile(best_model_filename), \
        "=> no best model found at '{}'".format(best_model_filename)
        print("=> loading best model '{}'".format(best_model_filename))
        checkpoint = torch.load(best_model_filename)
        args.start_epoch = checkpoint['epoch']
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
        validate(val_loader, model, checkpoint['epoch'], write_to_file=False)
        return

    # optionally resume from a checkpoint
    elif args.resume:
        assert os.path.isfile(args.resume), \
            "=> no checkpoint found at '{}'".format(args.resume)
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch'] + 1
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))

    # create new model
    else:
        # define model
        print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))
        in_channels = len(args.modality)
        if args.arch == 'resnet50':
            model = ResNet(layers=50,
                           decoder=args.decoder,
                           output_size=train_dataset.output_size,
                           in_channels=in_channels,
                           pretrained=args.pretrained)
        elif args.arch == 'resnet18':
            model = ResNet(layers=18,
                           decoder=args.decoder,
                           output_size=train_dataset.output_size,
                           in_channels=in_channels,
                           pretrained=args.pretrained)
        print("=> model created.")

        optimizer = torch.optim.SGD(model.parameters(), args.lr, \
            momentum=args.momentum, weight_decay=args.weight_decay)

        # create new csv files with only header
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

    # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
    model = model.cuda()
    # print(model)
    print("=> model transferred to GPU.")

    for epoch in range(args.start_epoch, args.epochs):
        utils.adjust_learning_rate(optimizer, epoch, args.lr)
        train(train_loader, model, criterion, optimizer,
              epoch)  # train for one epoch
        result, img_merge = validate(val_loader, model,
                                     epoch)  # evaluate on validation set

        # remember best rmse and save checkpoint
        is_best = result.rmse < best_result.rmse
        if is_best:
            best_result = result
            with open(best_txt, 'w') as txtfile:
                txtfile.write(
                    "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                    .format(epoch, result.mse, result.rmse, result.absrel,
                            result.lg10, result.mae, result.delta1,
                            result.gpu_time))
            if img_merge is not None:
                img_filename = output_directory + '/comparison_best.png'
                utils.save_image(img_merge, img_filename)

        utils.save_checkpoint(
            {
                'args': args,
                'epoch': epoch,
                'arch': args.arch,
                'model': model,
                'best_result': best_result,
                'optimizer': optimizer,
            }, is_best, epoch, output_directory)
Exemplo n.º 4
0
from dataloaders.nyu_dataloader import NYUDataset
from dataloaders.kitti_dataloader import KITTIDataset
from dataloaders.dense_to_sparse import UniformSampling, SimulatedStereo
import numpy as np
import random
import torch
import cv2

if __name__ == '__main__':
    train_opt = AdvanceOptions().parse(True)

    # The SimulatedStereo class is also provided to subsample to stereo points
    sparsifier = UniformSampling(train_opt.nP, max_depth=np.inf)

    train_dataset = KITTIDataset(train_opt.train_path,
                                 type='train',
                                 modality='rgbdm',
                                 sparsifier=sparsifier)
    test_dataset = KITTIDataset(train_opt.test_path,
                                type='val',
                                modality='rgbdm',
                                sparsifier=sparsifier)
    ## Please use this dataloder if you want to use NYU
    # train_dataset = NYUDataset(train_opt.train_path, type='train',
    #                modality='rgbdm', sparsifier=sparsifier)
    ## Please use this dataloder if you want to use NYU
    # test_dataset = NYUDataset(train_opt.test_path, type='val',
    #            modality='rgbdm', sparsifier=sparsifier)

    train_data_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=train_opt.batch_size,
def create_data_loaders(args):
	# Data loading code
	print("=> creating data loaders ...")
	traindir = os.path.join('data', args.data, 'train')
	valdir = os.path.join('data', args.data, 'val')

	train_loader = None
	val_loader = None

	# sparsifier is a class for generating random sparse depth input from the ground truth
	sparsifier = None
	max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf

	if args.sparsifier == SimulatedCameraNoise.name:
		sparsifier = SimulatedCameraNoise(args.sparsemodel, 100)
	elif args.sparsifier == AlgorithmicNoise.name:
		sparsifier = AlgorithmicNoise()
	elif args.sparsifier == UniformSampling.name:
		sparsifier = UniformSampling(num_samples=args.num_samples,
		max_depth=max_depth)
	elif args.sparsifier == SimulatedStereo.name:
		sparsifier = SimulatedStereo(num_samples=args.num_samples,
		max_depth=max_depth)

	if args.data == 'nyudepthv2' or args.data == 'gt_nyudepthv2':
		from dataloaders.DepthNoise_DataLoader import DepthNoiseDataset
		if not args.evaluate:
			train_dataset = DepthNoiseDataset(traindir, type='train',
				sparsifier=sparsifier, num_augmented=args.numaugmented,
				sample_cap=args.samplecap, scene_cap=args.scenecap,
				sim_offline=args.simoffline)

		val_dataset = DepthNoiseDataset(valdir, type='val',
			sparsifier=None, num_augmented=0, sample_cap=1, scene_cap=-1)

	elif args.data == 'kitti':
		from dataloaders.kitti_dataloader import KITTIDataset
		if not args.evaluate:
			train_dataset = KITTIDataset(traindir, type='train',
				modality=args.modality, sparsifier=sparsifier)
		val_dataset = KITTIDataset(valdir, type='val',
			modality=args.modality, sparsifier=sparsifier)

	else:
		print("Uknown dataset. Attempting to load as D415 or D435.")
		from dataloaders.DepthNoise_DataLoader import DepthNoiseDataset
		if not args.evaluate:
			train_dataset = DepthNoiseDataset(traindir, type='train',
				sparsifier=sparsifier, num_augmented=args.numaugmented,
				sample_cap=args.samplecap, scene_cap=args.scenecap,
				sim_offline=args.simoffline)

		val_dataset = DepthNoiseDataset(valdir, type='val',
			sparsifier=None, num_augmented=0, sample_cap=1, scene_cap=-1)

	# set batch size to be 1 for validation
	val_loader = torch.utils.data.DataLoader(val_dataset,
		batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)

	# put construction of train loader here, for those who are interested in testing only
	if not args.evaluate:
		train_loader = torch.utils.data.DataLoader(
			train_dataset, batch_size=args.batch_size, shuffle=True,
			num_workers=args.workers, pin_memory=True, sampler=None,
			worker_init_fn=lambda work_id:np.random.seed(work_id))
			# worker_init_fn ensures different sampling patterns for each data loading thread

	print("=> data loaders created.")
	return train_loader, val_loader