Exemple #1
0
def create_data_loaders(args):
    # Data loading code
    print("=> creating data loaders ...")
    datasets = '/content/drive/MyDrive'
    #traindir = os.path.join(datasets,'Datasets', args.data, 'train')
    #valdir = os.path.join(datasets,'Datasets', args.data, 'val')
    traindir = os.path.join(datasets, 'Datasets', 'Nyudepthv2Previous',
                            args.data, 'train')
    valdir = os.path.join(datasets, 'Datasets', 'Nyudepthv2Previous',
                          args.data, 'val')
    train_loader = None
    val_loader = None

    if args.data == 'nyudepthv2':
        if not args.evaluate:
            train_dataset = NYUDataset(traindir,
                                       split='train',
                                       modality=args.modality)

        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)

    elif args.data == 'kitti':
        if not args.evaluate:
            train_dataset = KITTIDataset(traindir,
                                         type='train',
                                         modality=args.modality)
        val_dataset = KITTIDataset(valdir, type='val', modality=args.modality)

    else:
        raise RuntimeError(
            'Dataset not found.' +
            'The dataset must be either of nyudepthv2 or kitti.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # put construction of train loader here, for those who are interested in testing only
    if not args.evaluate:

        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.workers,
            pin_memory=True,
            sampler=None,
            worker_init_fn=lambda work_id: np.random.seed(work_id))
        # worker_init_fn ensures different sampling patterns for each data loading thread

    print("=> data loaders created.")
    return train_loader, val_loader
def load_dataset(params):
    print("Loading the dataset...")

    if params['nyu_dataset']:
        dataset = NYUDataset("../data/nyudepthv2/train", split='train')
        test_dataset = NYUDataset("../data/nyudepthv2/val", split='val')
    else:
        dataset = Datasets.FastDepthDataset(
            params["training_dataset_paths"],
            split='train',
            depth_min=params["depth_min"],
            depth_max=params["depth_max"],
            input_shape_model=(224, 224),
            disparity=params["predict_disparity"],
            random_crop=params["random_crop"])

        test_dataset = Datasets.FastDepthDataset(
            params["test_dataset_paths"],
            split='val',
            depth_min=params["depth_min"],
            depth_max=params["depth_max"],
            input_shape_model=(224, 224),
            disparity=params["predict_disparity"],
            random_crop=False)

    # Make training/validation split
    train_val_split_lengths = utils.get_train_val_split_lengths(
        params["train_val_split"], len(dataset))
    train_dataset, val_dataset = torch.utils.data.random_split(
        dataset, train_val_split_lengths)
    params["num_training_examples"] = len(train_dataset)
    params["num_validation_examples"] = len(val_dataset)

    # DataLoaders
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=params["batch_size"],
        shuffle=True,
        num_workers=params["num_workers"],
        pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=params["batch_size"],
                                             shuffle=True,
                                             num_workers=params["num_workers"],
                                             pin_memory=True)

    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=params["batch_size"],
        shuffle=False,
        num_workers=params["num_workers"],
        pin_memory=True)

    return train_loader, val_loader, test_loader
Exemple #3
0
def create_data_loaders(args):
    # Data loading code
    print("=> creating data loaders...")
    traindir = os.path.join('..','data', args.data, 'train')
    valdir = os.path.join('..','data', args.data, 'val')
    train_loader = None
    val_loader = None

    # sparsifier is a class for generating random sparse depth input from the ground truth
    sparsifier = None
    max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf
    if args.sparsifier == UniformSampling.name:
        sparsifier = UniformSampling(num_samples=args.num_samples, max_depth=max_depth)
    elif args.sparsifier == SimulatedStereo.name:
        sparsifier = SimulatedStereo(num_samples=args.num_samples, max_depth=max_depth)

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        if not args.evaluate:
            train_dataset = NYUDataset(traindir, split='train',modality=args.modality) #//lisa
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)

    elif args.data == 'kitti':
        from dataloaders.kitti_dataloader import KITTIDataset
        if not args.evaluate:
            train_dataset = KITTIDataset(traindir, type='train',
                modality=args.modality, sparsifier=sparsifier)
        val_dataset = KITTIDataset(valdir, type='val',
            modality=args.modality, sparsifier=sparsifier)
      
    else:
        raise RuntimeError('Dataset not found.'+
                           'The dataset must be either of nyudepthv2 or kitti.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
        batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)
    #print("=> data loaders created.")

    # lisa -> put construction of train loader here
    if not args.evaluate:
        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=True,
            num_workers=args.workers, pin_memory=True, sampler=None,
            worker_init_fn=lambda work_id:np.random.seed(work_id))
            # worker_init_fn ensures different sampling patterns for each data loading thread

    print("=> data loaders created.")
    return train_loader, val_loader
Exemple #4
0
def create_data_loaders(args):
    # Data loading code
    print("=> creating data loaders ...")
    home_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
    traindir = os.path.join(home_path, 'data', args.data, 'train')
    valdir = os.path.join(home_path, 'data', args.data, 'val')

    train_loader = None

    max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        if not args.evaluate:
            train_dataset = NYUDataset(traindir,
                                       split='train',
                                       modality=args.modality)
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)
    else:
        raise RuntimeError(
            'Dataset not found.' +
            'The dataset must be either of nyudepthv2 or kitti.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # put construction of train loader here, for those who are interested in testing only
    if not args.evaluate:
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.workers,
            pin_memory=True,
            sampler=None,
            worker_init_fn=lambda work_id: np.random.seed(work_id))
        # worker_init_fn ensures different sampling patterns for each data loading thread

    print("=> data loaders created.")
    return train_loader, val_loader
def main(args):

    print("Loading config file: ", args.config)
    params = utils.load_config_file(args.config)
    params["dataset_paths"] = utils.format_dataset_path(
        params["dataset_paths"])
    if "nyu" not in params:
        params["nyu"] = False

    # Data loading code
    print("Creating data loaders...")
    if params["nyu"]:
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(params["dataset_paths"], split='val')
    else:
        val_dataset = Datasets.FastDepthDataset(params["dataset_paths"],
                                                split='val',
                                                depth_min=params["depth_min"],
                                                depth_max=params["depth_max"],
                                                input_shape_model=(224, 224),
                                                random_crop=False)

    # set batch size to be 1 for validation
    data_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=params["num_workers"],
        pin_memory=True)

    # Set GPU
    params["device"] = torch.device(
        "cuda:{}".format(params["device"])
        if params["device"] >= 0 and torch.cuda.is_available() else "cpu")
    print("Using device", params["device"])

    print("Loading models...")
    models = []
    model_names = []
    for model_dict in params["models"]:
        model_names.append(Path(model_dict["model_path"]).stem)
        model, _ = utils.load_model(model_dict, model_dict["model_path"],
                                    params["device"])
        model.to(params["device"])
        models.append(model)

    # Create output directory
    output_directory = os.path.join(params["save_folder"],
                                    ".".join(model_names))
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    params["output_directory"] = output_directory
    print("Saving results to " + output_directory)

    compare_models(params, data_loader, models)
Exemple #6
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # Data loading code
    print("=> creating data loaders...")
    valdir = os.path.join('..', 'data', args.data, 'val')

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)

    elif args.data == 'kitti':
        from dataloaders.kitti_dataloader import KITTIDataset
        if not args.evaluate:
            train_dataset = KITTIDataset(traindir,
                                         type='train',
                                         modality=args.modality,
                                         sparsifier=sparsifier)
        val_dataset = KITTIDataset(valdir,
                                   type='val',
                                   modality=args.modality,
                                   sparsifier=sparsifier)
    else:
        raise RuntimeError('Dataset not found.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    print("=> data loaders created.")


    assert os.path.isfile(args.evaluate), \
    "=> no model found at '{}'".format(args.evaluate)
    print("=> loading model '{}'".format(args.evaluate))
    checkpoint = torch.load(args.evaluate)
    if type(checkpoint) is dict:
        args.start_epoch = checkpoint['epoch']
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
    else:
        model = checkpoint
        args.start_epoch = 0
    output_directory = os.path.dirname(args.evaluate)
    demo(val_loader, model, args.start_epoch, write_to_file=False)
    return
Exemple #7
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # Data loading code
    print("=> creating data loaders...")
    # TODO: Test - change later
    valdir = os.path.join('..', 'data', args.data, 'val')
    testdir = args.test_path

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)
    elif args.data == 'sun3d':
        from dataloaders.sun3d import Sun3DDataset
        val_dataset = Sun3DDataset(testdir, split='val', modality=args.modality)

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
        batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)
    print("=> data loaders created.")

    # evaluation mode
    if args.evaluate:
        assert os.path.isfile(args.evaluate), \
        "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate, map_location='cpu')
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        validate(val_loader, model, args.start_epoch, write_to_file=False)
        return

    # Train from start
    if args.train:
        assert os.path.isfile(args.train), \
        "=> no model found at '{}'".format(args.train)
        print("=> loading model '{}'".format(args.train))
        model = models.MobileNetSkipAdd(output_size=10)
        args.start_epoch = 0
        output_directory = os.path.dirname(args.train)
        validate(val_loader, model, args.start_epoch, write_to_file=False)
        return
Exemple #8
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # Data loading code
    print("=> creating data loaders...")
    valdir = "/mnt/06e9a677-7e53-4230-9208-c934654a74eb/nyu-depth/preprocessed/nyudepthv2/val"
    #os.path.join('..', 'data', args.data, 'val')

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)
    else:
        raise RuntimeError('Dataset not found.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    print("=> data loaders created.")

    # evaluation mode
    if args.evaluate:
        assert os.path.isfile(args.evaluate), \
        "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        if cpu:
            checkpoint = torch.load(args.evaluate, map_location='cpu')
        else:
            checkpoint = torch.load(args.evaluate)
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(
                checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = "results"  #os.path.dirname(args.evaluate)
        validate(val_loader, model, args.start_epoch, write_to_file=False)
        return
Exemple #9
0
    def __init__(self, model, input_data_shape, dataset_path, finetune_lr=1e-3):
        '''
            Initialize:
                (1) network definition 'network_def'
                (2) num of simplifiable blocks 'num_simplifiable_blocks'. 
                (3) loss function 'criterion'
                (4) data loader for training/validation set 'train_loader' and 'holdout_loader',
                (5) optimizer 'optimizer'
                
            Need to be implemented:
                (1) finetune/evaluation data loader
                (2) loss function
                (3) optimizer
                
            Input: 
                `model`: model from which we will get network_def.
                `input_data_shape`: (list) [C, H, W].
                `dataset_path`: (string) path to dataset.
                `finetune_lr`: (float) short-term fine-tune learning rate.
        '''
        
        super().__init__()

        # Set the shape of the input data.
        self.input_data_shape = input_data_shape
        # Set network definition (conv & fc)
        network_def = self.get_network_def_from_model(model)        
        # Set num_simplifiable_blocks.
        self.num_simplifiable_blocks = 0
        for layer_name, layer_properties in network_def.items():
            if not layer_properties[KEY_IS_DEPTHWISE]:
                self.num_simplifiable_blocks += 1                
        # We cannot reduce the number of filters in the output layer (1).
        self.num_simplifiable_blocks -= 1 

        '''
            The following variables need to be defined depending on tasks:
                (1) finetune/evaluation data loader
                (2) loss function
                (3) optimizer
        '''
        # Data loaders for fine tuning and evaluation.
        self.batch_size = 8
        self.num_workers = 4
        self.momentum = 0.9
        self.weight_decay = 1e-4
        self.finetune_lr = finetune_lr

        train_dataset = NYUDataset(root="data/nyudepthv2/train", split="train")
        val_dataset = NYUDataset(root="data/nyudepthv2/val", split="val")
        
        
        
        train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=self.batch_size, 
        num_workers=self.num_workers, pin_memory=True, shuffle=True)
        self.train_loader = train_loader
        
        val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=self.batch_size, shuffle=False,
        num_workers=self.num_workers, pin_memory=True)
        self.val_loader = val_loader   
        
        self.criterion = torch.nn.L1Loss()
        self.optimizer = torch.optim.SGD(model.parameters(),
                                         finetune_lr, momentum=self.momentum, weight_decay=self.weight_decay)
Exemple #10
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # evaluation mode
    if args.evaluate:

        # Data loading code
        print("=> creating data loaders...")
        valdir = os.path.join('..', 'data', args.data, 'val')

        if args.data == 'nyudepthv2':
            from dataloaders.nyu import NYUDataset
            val_dataset = NYUDataset(valdir,
                                     split='val',
                                     modality=args.modality)
        else:
            raise RuntimeError('Dataset not found.')

        # set batch size to be 1 for validation
        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)
        print("=> data loaders created.")

        assert os.path.isfile(args.evaluate), \
            "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(
                checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        validate(val_loader, model, args.start_epoch, write_to_file=False)
        return

    start_epoch = 0
    if args.train:
        train_loader, val_loader = create_data_loaders(args)
        print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))

        model = models.MobileNetSkipAdd(
            output_size=train_loader.dataset.output_size)
        print("=> model created.")
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

        # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
        model = model.cuda()

        # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = criteria.MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = criteria.MaskedL1Loss().cuda()

        # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

        for epoch in range(start_epoch, args.epochs):
            utils.adjust_learning_rate(optimizer, epoch, args.lr)
            train(train_loader, model, criterion, optimizer,
                  epoch)  # train for one epoch
            result, img_merge = validate(val_loader, model,
                                         epoch)  # evaluate on validation set

            # remember best rmse and save checkpoint
            is_best = result.rmse < best_result.rmse
            if is_best:
                best_result = result
                with open(best_txt, 'w') as txtfile:
                    txtfile.write(
                        "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                        .format(epoch, result.mse, result.rmse, result.absrel,
                                result.lg10, result.mae, result.delta1,
                                result.gpu_time))
                if img_merge is not None:
                    img_filename = output_directory + '/comparison_best.png'
                    utils.save_image(img_merge, img_filename)

            utils.save_checkpoint(
                {
                    'args': args,
                    'epoch': epoch,
                    'arch': args.arch,
                    'model': model,
                    'best_result': best_result,
                    'optimizer': optimizer,
                }, is_best, epoch, output_directory)
def main(args):

    print("Loading config file: ", args.config)
    params = utils.load_config_file(args.config)
    params["test_dataset_paths"] = utils.format_dataset_path(
        params["test_dataset_paths"])

    if args.existing_experiment:
        experiment = ExistingExperiment(
            api_key="jBFVYFo9VUsy0kb0lioKXfTmM",
            previous_experiment=args.existing_experiment)
    else:
        experiment = Experiment(api_key="jBFVYFo9VUsy0kb0lioKXfTmM",
                                project_name="fastdepth")

    # Data loading code
    print("Creating data loaders...")
    if args.nyu:
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(params["test_dataset_paths"], split='val')
    else:
        val_dataset = Datasets.FastDepthDataset(params["test_dataset_paths"],
                                                split='val',
                                                depth_min=params["depth_min"],
                                                depth_max=params["depth_max"],
                                                input_shape_model=(224, 224))

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=params["num_workers"],
                                             pin_memory=True)

    # Set GPU
    params["device"] = torch.device(
        "cuda:{}".format(params["device"])
        if params["device"] >= 0 and torch.cuda.is_available() else "cpu")
    print("Using device", params["device"])

    print("Loading model '{}'".format(args.model))
    if not args.nyu:
        model, _ = utils.load_model(params, args.model, params["device"])
    else:
        # Maintain compatibility for fastdepth NYU model format
        state_dict = torch.load(args.model, map_location=params["device"])
        model = models.MobileNetSkipAdd(output_size=(224, 224),
                                        pretrained=True)
        model.load_state_dict(state_dict)
        params["start_epoch"] = 0

    model.to(params["device"])

    # Create output directory
    output_directory = os.path.join(os.path.dirname(args.model), "images")
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    params["experiment_dir"] = output_directory
    print("Saving results to " + output_directory)

    evaluate(params, val_loader, model, experiment)
Exemple #12
0
def main():
    global args, best_result, output_directory, train_csv, test_csv
    # Random seed setting
    torch.manual_seed(16)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # Data loading code
    print("=> creating data loaders...")
    data_dir = '/media/vasp/Data2/Users/vmhp806/depth-estimation'
    valdir = os.path.join(data_dir, 'data', args.data, 'val')
    traindir = os.path.join(data_dir, 'data', args.data, 'train')

    if args.data == 'nyu' or args.data == 'uow_dataset':
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)
        #val_dataset = nc.SafeDataset(val_dataset)
        train_dataset = NYUDataset(traindir,
                                   split='train',
                                   modality=args.modality)
        #train_dataset = nc.SafeDataset(train_dataset)
    else:
        raise RuntimeError('Dataset not found.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True,
                                             collate_fn=my_collate)
    if not args.evaluate:
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=False,
                                                   num_workers=args.workers,
                                                   pin_memory=True,
                                                   collate_fn=my_collate)
    print("=> data loaders created.")

    # evaluation mode
    if args.evaluate:
        assert os.path.isfile(args.evaluate), \
        "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(
                checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        if args.predict:
            predict(val_loader, model, output_directory)
        else:
            validate(val_loader, model, args.start_epoch, write_to_file=False)
        return
        # optionally resume from a checkpoint
    elif args.resume:
        chkpt_path = args.resume
        assert os.path.isfile(chkpt_path), \
            "=> no checkpoint found at '{}'".format(chkpt_path)
        print("=> loading checkpoint " "'{}'".format(chkpt_path))
        checkpoint = torch.load(chkpt_path)
        #args = checkpoint['args']
        start_epoch = checkpoint['epoch'] + 1
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        optimizer = torch.optim.SGD(model.parameters(), lr=0.9)
        optimizer.load_state_dict(checkpoint['optimizer'])
        #optimizer = checkpoint['optimizer']
        output_directory = os.path.dirname(os.path.abspath(chkpt_path))
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        args.resume = True
    else:
        print("=> creating Model ({} - {}) ...".format(args.arch,
                                                       args.decoder))
        #in_channels = len(args.modality)
        if args.arch == 'mobilenet-skipconcat':
            model = models.MobileNetSkipConcat(
                decoder=args.decoder,
                output_size=train_loader.dataset.output_size)
        elif args.arch == 'mobilenet-skipadd':
            model = models.MobileNetSkipAdd(
                decoder=args.decoder,
                output_size=train_loader.dataset.output_size)
        elif args.arch == 'resnet18-skipconcat':
            model = models.ResNetSkipConcat(
                layers=18,
                decoder=args.decoder,
                output_size=train_loader.dataset.output_size)
        elif args.arch == 'resnet18-skipadd':
            model = models.ResNetSkipAdd(
                layers=18, output_size=train_loader.dataset.output_size)
        else:
            raise Exception('Invalid architecture')
        print("=> model created.")
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

        # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
        model = model.cuda()
        start_epoch = 0
    # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = criteria.MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = criteria.MaskedL1Loss().cuda()

    # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
    #start_epoch = 0
    for epoch in range(start_epoch, args.epochs):
        utils.adjust_learning_rate(optimizer, epoch, args.lr)
        train(train_loader, model, criterion, optimizer,
              epoch)  # train for one epoch
        result, img_merge = validate(
            val_loader, model, epoch,
            write_to_file=True)  # evaluate on validation set

        # remember best rmse and save checkpoint
        is_best = result.rmse < best_result.rmse
        if is_best:
            best_result = result
            with open(best_txt, 'w') as txtfile:
                txtfile.write(
                    "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                    .format(epoch, result.mse, result.rmse, result.absrel,
                            result.lg10, result.mae, result.delta1,
                            result.gpu_time))
            if img_merge is not None:
                img_filename = output_directory + '/comparison_best.png'
                utils.save_image(img_merge, img_filename)

        utils.save_checkpoint(
            {
                'args': args,
                'epoch': epoch,
                #'arch': args.arch,
                'model': model,
                'best_result': best_result,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            epoch,
            output_directory)
Exemple #13
0
def main():
    global args, best_result, output_directory, train_csv, test_csv
    print(args)
    start = 0

    # evaluation mode
    if args.evaluate:
        datasets = configuration_file.datasets_path
        valdir = os.path.join(datasets, args.data, 'val')
        print("Validation directory ", valdir)
        if args.data == 'nyudepthv2':
            from dataloaders.nyu import NYUDataset
            val_dataset = NYUDataset(valdir,
                                     split='val',
                                     modality=args.modality)
        else:
            raise RuntimeError('Dataset not found.')

        #set batch size to be 1 for validation
        val_loader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)
        print("=> validation loaders created.")
        assert os.path.isfile(args.evaluate), \
            "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(
                checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        validate(val_loader, model, args.start_epoch, write_to_file=False)

        return

    # resume from a particular check point
    elif args.resume:
        chkpt_path = args.resume
        assert os.path.isfile(
            chkpt_path), "=> no checkpoint found at '{}'".format(chkpt_path)
        print("=> loading checkpoint '{}'".format(chkpt_path))
        checkpoint = torch.load(chkpt_path)
        args = checkpoint['args']
        start_epoch = checkpoint['epoch'] + 1  # load epoch number
        start = start_epoch  # resume from the checkpoint epoch
        best_result = checkpoint['best_result']  # load best result
        model = checkpoint['model']  # load model
        optimizer = checkpoint['optimizer']  # load optimizer
        output_directory = os.path.dirname(os.path.abspath(chkpt_path))
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        train_loader, val_loader = create_data_loaders(
            args)  # create data loader
        args.resume = True

    # create new model if checkpoint does not exist
    elif args.train:
        train_loader, val_loader = create_data_loaders(
            args)  # load train and validation data
        print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))
        in_channels = len(args.modality)
        if args.arch == 'MobileNet':  # if encoder is MobileNet
            model = models.MobileNetSkipAdd(
                output_size=train_loader.dataset.output_size
            )  # MobileNet model is created
        else:
            model = models.MobileNetSkipAdd(
                output_size=train_loader.dataset.output_size
            )  # by default MobileNet

        print("=> model created.")
        optimizer = torch.optim.SGD(model.parameters(), args.lr, \
                                    momentum=args.momentum, weight_decay=args.weight_decay) # configure optimizer

        if configuration_file.GPU == True:
            if configuration_file.MULTI_GPU == True:  # training on multiple GPU
                model = torch.nn.DataParallel(model).cuda()
            else:  # training on single GPU
                model = model.cuda()
        else:
            pass

    # define loss function  and optimizer
    if args.criterion == 'l2':
        if configuration_file.GPU == True:
            criterion = MaskedMSELoss().cuda()
        else:
            criterion = MaskedMSELoss()
    elif args.criterion == 'l1':
        if configuration_file.GPU == True:
            criterion = MaskedL1Loss().cuda()
        else:
            criterion = MaskedL1Loss()

    # create results folder, if not already exists
    output_directory = utils.get_output_directory(args)

    if not os.path.exists(output_directory):  # create new directory
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory,
                             'train.csv')  # store training result
    test_csv = os.path.join(output_directory, 'test.csv')  # store test result
    best_txt = os.path.join(output_directory, 'best.txt')  # store best result

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

    # training is strarted from here
    for epoch in range(start, args.epochs):
        utils.adjust_learning_rate(optimizer, epoch, args.lr)
        train(train_loader, model, criterion, optimizer,
              epoch)  # train for one epoch
        result, img_merge = validate(val_loader, model,
                                     epoch)  # evaluate on validation set

        # remember best rmse and save checkpoint
        is_best = result.rmse < best_result.rmse  # compare result of the current epoch and best result
        if is_best:
            best_result = result
            with open(best_txt, 'w') as txtfile:
                txtfile.write(
                    "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                    .format(epoch, result.mse, result.rmse, result.absrel,
                            result.lg10, result.mae, result.delta1,
                            result.gpu_time))
            if img_merge is not None:
                img_filename = output_directory + '/comparison_best.png'
                utils.save_image(img_merge, img_filename)

        utils.save_checkpoint(
            {
                'args': args,
                'epoch': epoch,
                'arch': args.arch,
                'model': model,
                'best_result': best_result,
                'optimizer': optimizer,
            }, is_best, epoch, output_directory)
Exemple #14
0
def main():
    global args, best_result, output_directory, train_csv, test_csv
    # Data loading code
    datasets = '/content/drive/MyDrive'
    #valdir = os.path.join(datasets, 'Datasets', args.data, 'val')
    #valdir = '/content/drive/MyDrive/Datasets/Nyudepthv2Previous/nyudepthv2/val/official/'
    valdir = os.path.join(datasets, 'Datasets', 'Nyudepthv2Previous',
                          args.data, 'val')

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        val_dataset = NYUDataset(valdir, split='val', modality=args.modality)

    elif args.data == 'kitti':
        from dataloaders.kitti import KITTIDataset
        val_dataset = KITTIDataset(valdir, type='val', modality=args.modality)
    else:
        raise RuntimeError('Dataset not found.')

    # set batch size to be 1 for validation
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    print("=> validation loaders created.")

    # evaluation mode
    if args.evaluate:
        assert os.path.isfile(args.evaluate), \
        "=> no model found at '{}'".format(args.evaluate)
        print("=> loading model '{}'".format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        if type(checkpoint) is dict:
            args.start_epoch = checkpoint['epoch']
            best_result = checkpoint['best_result']
            model = checkpoint['model']
            print("=> loaded best model (epoch {})".format(
                checkpoint['epoch']))
        else:
            model = checkpoint
            args.start_epoch = 0
        output_directory = os.path.dirname(args.evaluate)
        validate(val_loader, model, args.start_epoch, write_to_file=False)

        return

    # training mode
    # resume from a check point
    elif args.resume:
        print("Resume")
        chkpt_path = args.resume
        assert os.path.isfile(
            chkpt_path), "=> no checkpoint found at '{}'".format(chkpt_path)
        print("=> loading checkpoint '{}'".format(chkpt_path))
        checkpoint = torch.load(chkpt_path)
        args = checkpoint['args']
        start_epoch = checkpoint['epoch'] + 1
        best_result = checkpoint['best_result']
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
        output_directory = os.path.dirname(os.path.abspath(chkpt_path))
        print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
        train_loader, val_loader = create_data_loaders(args)
        args.resume = True

    # create new model
    elif args.train:
        print("Inside Train 1----------->")
        train_loader, val_loader = create_data_loaders(args)
        print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))
        in_channels = len(args.modality)
        if args.arch == 'MobileNet':
            #model = models.MobileNetSkipAdd(output_size=train_loader.dataset.output_size)
            model = ResNetSkipAdd(layers=50,
                                  output_size=train_loader.dataset.output_size,
                                  in_channels=in_channels,
                                  pretrained=args.pretrained)
            #print("Mobile Net model ",str(train_loader.dataset.output_size)
        elif args.arch == 'resnet50':
            model = ResNet(layers=50,
                           decoder=args.decoder,
                           output_size=train_loader.dataset.output_size,
                           in_channels=in_channels,
                           pretrained=args.pretrained)

        else:
            model = models.MobileNetSkipAdd(output_size=train_loader.dataset.
                                            output_size)  #by default MobileNet

        print("=> model created.")
        optimizer = torch.optim.SGD(model.parameters(), args.lr, \
                                    momentum=args.momentum, weight_decay=args.weight_decay)

        # model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
        model = model.cuda()

        # define loss function (criterion) and optimizer
    if args.criterion == 'l2':
        criterion = MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = MaskedL1Loss().cuda()

        # create results folder, if not already exists
    print("Arguments ")
    print(args)
    output_directory = utils.get_output_directory(args)

    if not os.path.exists(output_directory):
        os.makedirs(output_directory)
    train_csv = os.path.join(output_directory, 'train.csv')
    test_csv = os.path.join(output_directory, 'test.csv')
    best_txt = os.path.join(output_directory, 'best.txt')

    # create new csv files with only header
    if not args.resume:
        with open(train_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
        with open(test_csv, 'w') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()

    #Training is strarted from here
    if args.train == True:
        print("Training...........(args.train)", args.train)
        start = 0
        for epoch in range(start, args.epochs):
            utils.adjust_learning_rate(optimizer, epoch, args.lr)
            train(train_loader, model, criterion, optimizer,
                  epoch)  # train for one epoch
            result, img_merge = validate(val_loader, model,
                                         epoch)  # evaluate on validation set

            # remember best rmse and save checkpoint
            is_best = result.rmse < best_result.rmse
            if is_best:
                best_result = result
                with open(best_txt, 'w') as txtfile:
                    txtfile.write(
                        "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                        .format(epoch, result.mse, result.rmse, result.absrel,
                                result.lg10, result.mae, result.delta1,
                                result.gpu_time))
                if img_merge is not None:
                    img_filename = output_directory + '/comparison_best.png'
                    utils.save_image(img_merge, img_filename)

            utils.save_checkpoint(
                {
                    'args': args,
                    'epoch': epoch,
                    'arch': args.arch,
                    'model': model,
                    'best_result': best_result,
                    'optimizer': optimizer,
                }, is_best, epoch, output_directory)

    elif args.resume == True:
        print("Resume......................")
        start = start_epoch
        for epoch in range(start, args.epochs):
            print("Epoch inside resume ", epoch)
            utils.adjust_learning_rate(optimizer, epoch, args.lr)
            train(train_loader, model, criterion, optimizer,
                  epoch)  # train for one epoch
            result, img_merge = validate(val_loader, model,
                                         epoch)  # evaluate on validation set

            # remember best rmse and save checkpoint
            is_best = result.rmse < best_result.rmse
            if is_best:
                best_result = result
                with open(best_txt, 'w') as txtfile:
                    txtfile.write(
                        "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                        .format(epoch, result.mse, result.rmse, result.absrel,
                                result.lg10, result.mae, result.delta1,
                                result.gpu_time))
                if img_merge is not None:
                    img_filename = output_directory + '/comparison_best.png'
                    utils.save_image(img_merge, img_filename)

            utils.save_checkpoint(
                {
                    'args': args,
                    'epoch': epoch,
                    'arch': args.arch,
                    'model': model,
                    'best_result': best_result,
                    'optimizer': optimizer,
                }, is_best, epoch, output_directory)
Exemple #15
0
def main():
    global args, best_result, output_directory, train_csv, test_csv

    # Data loading code
    print("=> creating data loaders...")
    # valdir = os.path.join('..', 'data', args.data, 'val')
    # valdir ="/home/titan-nano/Documents/DLProject/data/rgbd/val/img"

    data_dir = '/p300/dataset'
    train_dir = os.path.join(data_dir, 'data', args.data, 'train')
    val_dir = os.path.join(data_dir, 'data', args.data, 'val')

    if args.data == 'nyudepthv2':
        from dataloaders.nyu import NYUDataset
        train_dataset = NYUDataset(train_dir,
                                   split='train',
                                   modality=args.modality)
        val_dataset = NYUDataset(train_dir,
                                 split='val',
                                 modality=args.modality)
    elif args.data == 'rgbd':
        from dataloaders.sist import RGBDDataset
        train_dataset = RGBDDataset(train_dir,
                                    split='train',
                                    modality=args.modality)
        val_dataset = RGBDDataset(val_dir, split='val', modality=args.modality)
    else:
        raise RuntimeError('Dataset not found.')

    # set batch size to be 1 for validation
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=1,
                                               shuffle=False,
                                               num_workers=args.workers,
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    print("=> data loaders created.")

    ############################## Resume Mode ##############################
    # loading pretrained model
    print("=> loading model '{}'".format(args.evaluate))
    args.start_epoch = 0
    checkpoint = torch.load(args.evaluate)
    if type(checkpoint) is dict:
        # loading pretrained model
        model = checkpoint['model']
        print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
    else:
        model = checkpoint

    ############################## Training Setting ##############################
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # define loss function (criterion) and optimizer
    criterion = None
    if args.criterion == 'l2':
        criterion = criteria.MaskedMSELoss().cuda()
    elif args.criterion == 'l1':
        criterion = criteria.MaskedL1Loss().cuda()

    output_directory = os.path.dirname(args.evaluate)
    best_txt = os.path.join(output_directory, 'best.txt')

    ############################## Training ##############################
    for epoch in range(args.epochs):
        utils.adjust_learning_rate(optimizer, epoch, args.lr)
        train(train_loader, model, criterion, optimizer, epoch)
        result, img_merge = validate(val_loader, model, epoch)

        # remember best rmse and save checkpoint
        is_best = result.rmse < best_result.rmse
        if is_best:
            best_result = result
            best_model = model
            with open(best_txt, 'w') as txtfile:
                txtfile.write(
                    "epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n"
                    .format(epoch, result.mse, result.rmse, result.absrel,
                            result.lg10, result.mae, result.delta1,
                            result.gpu_time))
            if img_merge is not None:
                img_filename = output_directory + '/comparison_best.png'
                utils.save_image(img_merge, img_filename)

        utils.save_checkpoint(
            {
                'args': args,
                'epoch': epoch,
                'arch': args.arch,
                'model': model,
                'best_result': best_result,
                'optimizer': optimizer,
            }, is_best, epoch, output_directory)

    # save loss file
    loss_file = np.array(history_loss)
    np.savetxt(output_directory + '/loss.txt', loss_file)

    torch.save(best_model.state_dict(), output_directory + '/best_model.pkl')