Exemplo n.º 1
0
def create_data_loaders(args):
    # Data loading code
    print("=> creating data loaders ...")
    traindir = os.path.join('data', args.data, 'train')
    valdir = os.path.join('data', args.data, 'val')
    train_loader = None
    val_loader = None

    # sparsifier is a class for generating random sparse depth input from the ground truth
    sparsifier = None
    max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf
    if args.sparsifier == UniformSampling.name:
        sparsifier = UniformSampling(num_samples=args.num_samples, max_depth=max_depth)
    elif args.sparsifier == SimulatedStereo.name:
        sparsifier = SimulatedStereo(num_samples=args.num_samples, max_depth=max_depth)
    '''
    if args.data == 'nyudepthv2':
        from dataloaders.nyu_dataloader import NYUDataset
        if not args.evaluate:
            train_dataset = NYUDataset(traindir, type='train',
                modality=args.modality, sparsifier=sparsifier)
        val_dataset = NYUDataset(valdir, type='val',
            modality=args.modality, sparsifier=sparsifier)
    '''
    if args.data == 'nyudepthv2':
        from dataloaders.nyu_dataloader import NYUDataset
        if not args.evaluate:
            train_dataset = NYUDataset('nyu_depth_v2_labeled.mat',type = 'train',
                modality=args.modality, sparsifier=sparsifier, lists = train_lists)
        val_dataset = NYUDataset('nyu_depth_v2_labeled.mat', type = 'val', modality = args.modality, sparsifier = sparsifier, lists = val_lists)    

    elif args.data == 'kitti':
        from dataloaders.kitti_dataloader import KITTIDataset
        if not args.evaluate:
            train_dataset = KITTIDataset(traindir, type='train',
                modality=args.modality, sparsifier=sparsifier)
        val_dataset = KITTIDataset(valdir, type='val',
            modality=args.modality, sparsifier=sparsifier)

    else:
        raise RuntimeError('Dataset not found.' +
                           'The dataset must be either of nyudepthv2 or kitti.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
        batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)

    # put construction of train loader here, for those who are interested in testing only
    if not args.evaluate:
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True, sampler=None,
            worker_init_fn=lambda work_id:np.random.seed(work_id))
            # worker_init_fn ensures different sampling patterns for each data loading thread

    print("=> data loaders created.")
    return train_loader, val_loader
Exemplo n.º 2
0
def create_data_loaders(data_path,
                        data_type='visim',
                        loader_type='val',
                        arch='',
                        sparsifier_type='uar',
                        num_samples=500,
                        modality='rgb-fd',
                        depth_divisor=1,
                        max_depth=-1,
                        max_gt_depth=-1,
                        batch_size=8,
                        workers=8):
    # Data loading code
    print("=> creating data loaders ...")

    #legacy compatibility with sparse-to-dense data folder
    subfolder = os.path.join(data_path, loader_type)
    # if os.path.exists(subfolder):
    #     data_path = subfolder

    if not os.path.exists(data_path):
        raise RuntimeError('Data source does not exit:{}'.format(data_path))

    loader = None
    dataset = None
    max_depth = max_depth if max_depth >= 0.0 else np.inf
    max_gt_depth = max_gt_depth if max_gt_depth >= 0.0 else np.inf

    # sparsifier is a class for generating random sparse depth input from the ground truth
    sparsifier = None

    if sparsifier_type == UniformSampling.name:  #uar
        sparsifier = UniformSampling(num_samples=num_samples,
                                     max_depth=max_depth)
    elif sparsifier_type == SimulatedStereo.name:  #sim_stereo
        sparsifier = SimulatedStereo(num_samples=num_samples,
                                     max_depth=max_depth)

    if data_type == 'kitti':
        from dataloaders.kitti_loader import KittiDepth

        dataset = KittiDepth(data_path,
                             split=loader_type,
                             depth_divisor=depth_divisor)

    elif data_type == 'visim':
        from dataloaders.visim_dataloader import VISIMDataset

        dataset = VISIMDataset(data_path,
                               type=loader_type,
                               modality=modality,
                               sparsifier=sparsifier,
                               depth_divider=depth_divisor,
                               is_resnet=('resnet' in arch),
                               max_gt_depth=max_gt_depth)

    elif data_type == 'visim_seq':
        from dataloaders.visim_dataloader import VISIMSeqDataset
        dataset = VISIMSeqDataset(data_path,
                                  type=loader_type,
                                  modality=modality,
                                  sparsifier=sparsifier,
                                  depth_divider=depth_divisor,
                                  is_resnet=('resnet' in arch),
                                  max_gt_depth=max_gt_depth)
    else:
        raise RuntimeError(
            'data type not found.' +
            'The dataset must be either of kitti, visim or visim_seq.')

    if loader_type == 'val':
        # set batch size to be 1 for validation
        loader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=workers,
                                             pin_memory=True)
        print("=> Val loader:{}".format(len(dataset)))
    elif loader_type == 'train':
        loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=True,
            num_workers=workers,
            pin_memory=True,
            sampler=None,
            worker_init_fn=lambda work_id: np.random.seed(work_id))
        print("=> Train loader:{}".format(len(dataset)))
        # worker_init_fn ensures different sampling patterns for each data loading thread

    print("=> data loaders created.")
    return loader, dataset
Exemplo n.º 3
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = criteria.MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = criteria.MaskedL1Loss().cuda()

    # sparsifier is a class for generating random sparse depth input from the ground truth
    sparsifier = None
    max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf
    if args.sparsifier == UniformSampling.name:
        sparsifier = UniformSampling(num_samples=args.num_samples,
                                     max_depth=max_depth)
    elif args.sparsifier == SimulatedStereo.name:
        sparsifier = SimulatedStereo(num_samples=args.num_samples,
                                     max_depth=max_depth)

    # Data loading code
    print("=> creating data loaders ...")
    traindir = os.path.join('data', args.data, 'train')
    valdir = os.path.join('data', args.data, 'val')

    if args.data == 'nyudepthv2':
        from dataloaders.nyu_dataloader import NYUDataset
        train_dataset = NYUDataset(traindir,
                                   type='train',
                                   modality=args.modality,
                                   sparsifier=sparsifier)
        val_dataset = NYUDataset(valdir,
                                 type='val',
                                 modality=args.modality,
                                 sparsifier=sparsifier)

    elif args.data == 'kitti':
        from dataloaders.kitti_dataloader import KITTIDataset
        train_dataset = KITTIDataset(traindir,
                                     type='train',
                                     modality=args.modality,
                                     sparsifier=sparsifier)
        val_dataset = KITTIDataset(valdir,
                                   type='val',
                                   modality=args.modality,
                                   sparsifier=sparsifier)

    else:
        raise RuntimeError(
            'Dataset not found.' +
            'The dataset must be either of nyudepthv2 or kitti.')

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=None)

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    print("=> data loaders created.")

    # evaluation mode
    if args.evaluate:
        best_model_filename = os.path.join(output_directory,
                                           'model_best.pth.tar')
        assert os.path.isfile(best_model_filename), \
        "=> no best model found at '{}'".format(best_model_filename)
        print("=> loading best model '{}'".format(best_model_filename))
        checkpoint = torch.load(best_model_filename)
        args.start_epoch = checkpoint['epoch']
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
        validate(val_loader, model, checkpoint['epoch'], write_to_file=False)
        return

    # optionally resume from a checkpoint
    elif args.resume:
        assert os.path.isfile(args.resume), \
            "=> no checkpoint found at '{}'".format(args.resume)
        print("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(args.resume)
        args.start_epoch = checkpoint['epoch'] + 1
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))

    # create new model
    else:
        # define model
        print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))
        in_channels = len(args.modality)
        if args.arch == 'resnet50':
            model = ResNet(layers=50,
                           decoder=args.decoder,
                           output_size=train_dataset.output_size,
                           in_channels=in_channels,
                           pretrained=args.pretrained)
        elif args.arch == 'resnet18':
            model = ResNet(layers=18,
                           decoder=args.decoder,
                           output_size=train_dataset.output_size,
                           in_channels=in_channels,
                           pretrained=args.pretrained)
        print("=> model created.")

        optimizer = torch.optim.SGD(model.parameters(), args.lr, \
            momentum=args.momentum, weight_decay=args.weight_decay)

        # create new csv files with only header
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

    # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
    model = model.cuda()
    # print(model)
    print("=> model transferred to GPU.")

    for epoch in range(args.start_epoch, args.epochs):
        utils.adjust_learning_rate(optimizer, epoch, args.lr)
        train(train_loader, model, criterion, optimizer,
              epoch)  # train for one epoch
        result, img_merge = validate(val_loader, model,
                                     epoch)  # evaluate on validation set

        # remember best rmse and save checkpoint
        is_best = result.rmse < best_result.rmse
        if is_best:
            best_result = result
            with open(best_txt, 'w') as txtfile:
                txtfile.write(
                    "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                    .format(epoch, result.mse, result.rmse, result.absrel,
                            result.lg10, result.mae, result.delta1,
                            result.gpu_time))
            if img_merge is not None:
                img_filename = output_directory + '/comparison_best.png'
                utils.save_image(img_merge, img_filename)

        utils.save_checkpoint(
            {
                'args': args,
                'epoch': epoch,
                'arch': args.arch,
                'model': model,
                'best_result': best_result,
                'optimizer': optimizer,
            }, is_best, epoch, output_directory)
Exemplo n.º 4
0
from models import create_model
from util.visualizer import Visualizer
#from util.util import confusion_matrix, getScores
from dataloaders.nyu_dataloader import NYUDataset
from dataloaders.kitti_dataloader import KITTIDataset
from dataloaders.dense_to_sparse import UniformSampling, SimulatedStereo
import numpy as np
import random
import torch
import cv2

if __name__ == '__main__':
    train_opt = AdvanceOptions().parse(True)

    # The SimulatedStereo class is also provided to subsample to stereo points
    sparsifier = UniformSampling(train_opt.nP, max_depth=np.inf)

    train_dataset = KITTIDataset(train_opt.train_path,
                                 type='train',
                                 modality='rgbdm',
                                 sparsifier=sparsifier)
    test_dataset = KITTIDataset(train_opt.test_path,
                                type='val',
                                modality='rgbdm',
                                sparsifier=sparsifier)
    ## Please use this dataloder if you want to use NYU
    # train_dataset = NYUDataset(train_opt.train_path, type='train',
    #                modality='rgbdm', sparsifier=sparsifier)
    ## Please use this dataloder if you want to use NYU
    # test_dataset = NYUDataset(train_opt.test_path, type='val',
    #            modality='rgbdm', sparsifier=sparsifier)
def main():
	## manual single gpu training
	os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"

	#specify device ID (GPU 0 is 1, GPU 1 is 0)
	os.environ["CUDA_VISIBLE_DEVICES"] = "1"
	train_opt = AdvanceOptions().parse(True)

	# The SimulatedStereo class is also provided to subsample to stereo points
	if train_opt.sD_sampler == "uniform":
		sparsifier = UniformSampling(train_opt.nP, max_depth=np.inf)
	if train_opt.sD_sampler == "orb":
		sparsifier = ORBSampling(max_depth=np.inf)
	if train_opt.sD_sampler == "stereo":
		sparsifier = SimulatedStereo(num_samples=train_opt.nP, max_depth=np.inf)


	torch.cudnn.benchmark = True

	#train_dataset = KITTIDataset(train_opt.train_path, type='train',
   #             modality='rgbdm', sparsifier=sparsifier)
	#test_dataset = KITTIDataset(train_opt.test_path, type='val',
    #        modality='rgbdm', sparsifier=sparsifier)
	## Please use this dataloder if you want to use NYU
	#train_dataset = NYUDataset(train_opt.train_path, type='train',
    #             modality='rgbdm', sparsifier=sparsifier)

	train_dataset = OwnDataset(images_root="C:\Users\student\Documents\Drone_Dataset_update\Img",
            depth_root="C:\Users\student\Documents\Drone_Dataset_update\RefinedDepth_GT",split='train',modality='rgbdm', sparsifier=sparsifier)
	test_dataset = OwnDataset(images_root="C:\Users\student\Documents\Drone_Dataset_update\Img",
            depth_root="C:\Users\student\Documents\Drone_Dataset_update\RefinedDepth_GT",
                 split='val', modality='rgbdm', sparsifier=sparsifier)
	# Please use this dataloder if you want to use NYU
	#test_dataset = NYUDataset(train_opt.test_path, type='val',
     #       modality='rgbdm', sparsifier=sparsifier)
#

	train_data_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=train_opt.batch_size, shuffle=True,
            num_workers=8, pin_memory=True)
	test_opt = AdvanceOptions().parse(True)
	test_opt.phase = 'val'
	test_opt.batch_size = 1
	test_opt.num_threads = 1
	test_opt.serial_batches = True
	test_opt.no_flip = True

	test_data_loader = torch.utils.data.DataLoader(test_dataset,
        batch_size=test_opt.batch_size, shuffle=False, num_workers=8, pin_memory=True)

	train_dataset_size = len(train_data_loader)
	print('#training images = %d' % train_dataset_size)
	test_dataset_size = len(test_data_loader)
	print('#test images = %d' % test_dataset_size)

	model = create_model(train_opt, train_dataset)
	model.setup(train_opt)
	visualizer = Visualizer(train_opt)
	total_steps = 0
	for epoch in range(train_opt.epoch_count, train_opt.niter + 1):
		model.train()
		epoch_start_time = time.time()
		iter_data_time = time.time()
		epoch_iter = 0
		model.init_eval()
		iterator = iter(train_data_loader)
		#start = timeit.timeit()
		while True:
			try:
				nn = next(iterator)
			except IndexError: # Some images couldn't sample more than defined nP points under Stereo sampling
				print("Catch and Skip!")
				continue
			except StopIteration:
				break
			data, target = nn[0], nn[1]
			#end = timeit.timeit()
			#print("Data Loading Time", end - start)
			#data = data.float().cuda()
			#target = target.cuda()

			iter_start_time = time.time()
			if total_steps % train_opt.print_freq == 0:
				t_data = iter_start_time - iter_data_time
			total_steps += train_opt.batch_size
			epoch_iter += train_opt.batch_size
			model.set_new_input(data.float(),target)
			model.optimize_parameters()

			if total_steps % train_opt.print_freq == 0:
				losses = model.get_current_losses()
				loss_mse, loss_dcca, loss_total, depth_est, rgb_image= model.get_log()
				# writer.add_graph(model,self.depth_est)
				#writer.add_scalar('Loss MSE', loss_mse, total_steps)
				#writer.add_scalar('Loss DCAA', loss_dcca, total_steps)
				#writer.add_scalar('Loss Total', loss_total, total_steps)
				#if epoch_iter % 1200 == 0:
				#	grid = torchvision.utils.make_grid(depth_est)
				#	grid3 = torchvision.utils.make_grid(rgb_image)
				#	writer.add_image('Input', grid3, total_steps)
				#	writer.add_image('predictions', grid, total_steps)
					#grid2 = torchvision.utils.make_grid(conf)
					#writer.add_image('conf', grid2, total_steps)

				t = (time.time() - iter_start_time) / train_opt.batch_size
				visualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)
				message = model.print_depth_evaluation()
				visualizer.print_current_depth_evaluation(message)
				print()

			iter_data_time = time.time()

		model.lr_schedule(epoch)
		print('End of epoch %d / %d \t Time Taken: %d sec' %   (epoch, train_opt.niter, time.time() - epoch_start_time))
		#model.update_learning_rate()
		if epoch  and epoch % train_opt.save_epoch_freq == 0:
			print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
			model.save_networks('latest')
			model.save_networks(epoch)

			model.eval()
			test_loss_iter = []
			gts = None
			preds = None
			epoch_iter = 0
			model.init_test_eval()
			with torch.no_grad():
				iterator = iter(test_data_loader)
				while True:
					try:
						nn = next(iterator)
					except IndexError:
						print("Catch and Skip!")
						continue
					except StopIteration:
						break

					data, target = nn[0], nn[1]

					model.set_new_input(data.float(),target)
					model.forward()
					model.test_depth_evaluation()
					model.get_loss()
					epoch_iter += test_opt.batch_size
					losses = model.get_current_losses()
					test_loss_iter.append(model.loss_dcca.item())
					print('test epoch {0:}, iters: {1:}/{2:} '.format(epoch, epoch_iter, len(test_dataset) * test_opt.batch_size), end='\r')
					message = model.print_test_depth_evaluation()
					visualizer.print_current_depth_evaluation(message)
					print(
                  'RMSE={result.rmse:.4f}({average.rmse:.4f}) '
                  'MAE={result.mae:.4f}({average.mae:.4f}) '
                  'Delta1={result.delta1:.4f}({average.delta1:.4f}) '
                  'REL={result.absrel:.4f}({average.absrel:.4f}) '
                  'Lg10={result.lg10:.4f}({average.lg10:.4f}) '.format(
                 result=model.test_result, average=model.test_average.average()))
			avg_test_loss = np.mean(np.asarray(test_loss_iter))
def create_data_loaders(args):
	# Data loading code
	print("=> creating data loaders ...")
	traindir = os.path.join('data', args.data, 'train')
	valdir = os.path.join('data', args.data, 'val')

	train_loader = None
	val_loader = None

	# sparsifier is a class for generating random sparse depth input from the ground truth
	sparsifier = None
	max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf

	if args.sparsifier == SimulatedCameraNoise.name:
		sparsifier = SimulatedCameraNoise(args.sparsemodel, 100)
	elif args.sparsifier == AlgorithmicNoise.name:
		sparsifier = AlgorithmicNoise()
	elif args.sparsifier == UniformSampling.name:
		sparsifier = UniformSampling(num_samples=args.num_samples,
		max_depth=max_depth)
	elif args.sparsifier == SimulatedStereo.name:
		sparsifier = SimulatedStereo(num_samples=args.num_samples,
		max_depth=max_depth)

	if args.data == 'nyudepthv2' or args.data == 'gt_nyudepthv2':
		from dataloaders.DepthNoise_DataLoader import DepthNoiseDataset
		if not args.evaluate:
			train_dataset = DepthNoiseDataset(traindir, type='train',
				sparsifier=sparsifier, num_augmented=args.numaugmented,
				sample_cap=args.samplecap, scene_cap=args.scenecap,
				sim_offline=args.simoffline)

		val_dataset = DepthNoiseDataset(valdir, type='val',
			sparsifier=None, num_augmented=0, sample_cap=1, scene_cap=-1)

	elif args.data == 'kitti':
		from dataloaders.kitti_dataloader import KITTIDataset
		if not args.evaluate:
			train_dataset = KITTIDataset(traindir, type='train',
				modality=args.modality, sparsifier=sparsifier)
		val_dataset = KITTIDataset(valdir, type='val',
			modality=args.modality, sparsifier=sparsifier)

	else:
		print("Uknown dataset. Attempting to load as D415 or D435.")
		from dataloaders.DepthNoise_DataLoader import DepthNoiseDataset
		if not args.evaluate:
			train_dataset = DepthNoiseDataset(traindir, type='train',
				sparsifier=sparsifier, num_augmented=args.numaugmented,
				sample_cap=args.samplecap, scene_cap=args.scenecap,
				sim_offline=args.simoffline)

		val_dataset = DepthNoiseDataset(valdir, type='val',
			sparsifier=None, num_augmented=0, sample_cap=1, scene_cap=-1)

	# set batch size to be 1 for validation
	val_loader = torch.utils.data.DataLoader(val_dataset,
		batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)

	# put construction of train loader here, for those who are interested in testing only
	if not args.evaluate:
		train_loader = torch.utils.data.DataLoader(
			train_dataset, batch_size=args.batch_size, shuffle=True,
			num_workers=args.workers, pin_memory=True, sampler=None,
			worker_init_fn=lambda work_id:np.random.seed(work_id))
			# worker_init_fn ensures different sampling patterns for each data loading thread

	print("=> data loaders created.")
	return train_loader, val_loader