예제 #1
0
def prepare_dataloader(data_directory, mode, augment_parameters,
                       do_augmentation, batch_size, size, num_workers, filenames = None, load_gt = False, load_predition = False, args = None):


    data_transform = image_transforms(
        mode=mode,
        augment_parameters=augment_parameters,
        do_augmentation=do_augmentation,
        size = size)
    if filenames is None:
        data_dirs = os.listdir(data_directory)
        datasets = [KittiLoader(os.path.join(data_directory,
                                data_dir), mode, transform=data_transform)
                                for data_dir in data_dirs]
        dataset = ConcatDataset(datasets)
    else:
        data_dir = data_directory
        dataset = KittiLoader(data_dir, mode, transform=data_transform, filenames = filenames, load_gt = load_gt, load_predition = load_predition, args = args)

    n_img = len(dataset)
    print('Use a dataset with', n_img, 'images')
    if mode == 'train':
        loader = DataLoader(dataset, batch_size=batch_size,
                            shuffle=True, num_workers=num_workers,
                            pin_memory=True)
    else:
        loader = DataLoader(dataset, batch_size=batch_size,
                            shuffle=False, num_workers=num_workers,
                            pin_memory=True)
    return n_img, loader
예제 #2
0
def prepare_dataloader(data_directory, mode, augment_parameters,
                       do_augmentation, batch_size, size, num_workers):
    data_dirs = os.listdir(data_directory)
    #aa=os.path.join(data_directory,data_dir) for data_dir in data_dirs
    # print(data_directory,data_dirs,111111111111111111111111111111111111111111111111)
    data_transform = image_transforms(mode=mode,
                                      augment_parameters=augment_parameters,
                                      do_augmentation=do_augmentation,
                                      size=size)
    # print('ffffffffffff',data_directory,data_dirs)
    datasets = [
        KittiLoader(os.path.join(data_directory, data_dir),
                    mode,
                    transform=data_transform) for data_dir in data_dirs
    ]

    dataset = ConcatDataset(datasets)
    n_img = len(dataset)
    print('Use a dataset with', n_img, 'images')
    loader = DataLoader(dataset,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers=num_workers)

    return n_img, loader
def prepare_dataloader(data_directory, mode, augment_parameters,
                       do_augmentation, batch_size, size, num_workers,split,filename):
    data_dirs = os.listdir(data_directory)
    data_transform = image_transforms(
        mode=mode,
        augment_parameters=augment_parameters,
        do_augmentation=do_augmentation,
        size = size)
    if split=='kitti':
        datasets = [KittiLoader(os.path.join(data_directory,
                                data_dir), mode, transform=data_transform)
                                for data_dir in data_dirs]
        # 考虑datasets是多个数据loader组成的list,通过ConcatDataset对其进行合并成一个整合loader
        dataset = ConcatDataset(datasets)
        n_img = len(dataset)
        print('KITTI: Use a dataset with', n_img, 'images')
    elif split=='eigen':
        dataset = KittiLoader_Eigen(root_dir=data_directory,root_filename = filename,
                                    mode = mode,transform=data_transform)
        n_img = len(dataset)
        print('EIGEN: Use a dataset with', n_img, 'images')
    else:
        print('Wrong split')
        pass
    if mode == 'train':
        loader = DataLoader(dataset, batch_size=batch_size,
                            shuffle=True, num_workers=num_workers,
                            pin_memory=True)
    else:
        loader = DataLoader(dataset, batch_size=batch_size,
                            shuffle=False, num_workers=num_workers,
                            pin_memory=True)
    return n_img, loader
예제 #4
0
def prepare_dataloader(data_directory, mode, do_augmentation, batch_size,
                       input_channels, size, num_workers):
    data_dirs = os.listdir(data_directory)
    datasets = [
        KittiLoader(os.path.join(data_directory, data_dir), mode,
                    input_channels, size[0], size[1], do_augmentation)
        for data_dir in data_dirs
    ]
    # print(datasets)
    dataset = ConcatDataset(datasets)
    n_img = len(dataset)
    print('Use a dataset with', n_img, 'samples')
    if mode == 'train':
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers,
                            pin_memory=True)
    else:
        loader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=num_workers,
                            pin_memory=True)
    return n_img, loader
예제 #5
0
def prepare_dataloader_kitty(
    data_directory,
    mode,
    augment_parameters,
    do_augmentation,
    batch_size,
    size,
    num_workers,
):
    data_dirs = os.listdir(data_directory)
    data_transform = image_transforms(
        mode=mode,
        augment_parameters=augment_parameters,
        do_augmentation=do_augmentation,
        size=size,
    )
    datasets = [
        KittiLoader(os.path.join(data_directory, data_dir),
                    mode,
                    transform=data_transform) for data_dir in data_dirs
    ]
    dataset = ConcatDataset(datasets)
    n_img = len(dataset)
    print("Use a dataset with", n_img, "images")
    if mode == "train":
        loader = DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=True,
            num_workers=num_workers,
            pin_memory=True,
        )
    else:
        loader = DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=False,
            num_workers=num_workers,
            pin_memory=True,
        )
    return n_img, loader
예제 #6
0
 def __init__(self, args):
     self.args = args
     if args.mode == 'train':
         # Load data
         data_dirs = os.listdir(args.data_dir)
         data_transform = image_transforms(
             mode=args.mode,
             tensor_type=args.tensor_type,
             augment_parameters=args.augment_parameters,
             do_augmentation=args.do_augmentation)
         train_datasets = [
             KittiLoader(os.path.join(args.data_dir, data_dir),
                         True,
                         transform=data_transform) for data_dir in data_dirs
         ]
         train_dataset = ConcatDataset(train_datasets)
         self.n_img = len(train_dataset)
         print('Use a dataset with', self.n_img, 'images')
         self.train_loader = DataLoader(train_dataset,
                                        batch_size=args.batch_size,
                                        shuffle=True)
         # Set up model
         self.device = torch.device(
             ('cuda:0' if torch.cuda.is_available()
              and args.tensor_type == 'torch.cuda.FloatTensor' else 'cpu'))
         self.loss_function = MonodepthLoss(n=4,
                                            SSIM_w=0.85,
                                            disp_gradient_w=0.1,
                                            lr_w=1).to(self.device)
         if args.model == 'resnet50_md':
             self.model = models_resnet.resnet50_md(3)
         elif args.model == 'resnet18_md':
             self.model = models_resnet.resnet18_md(3)
         self.model = self.model.to(self.device)
         self.optimizer = optim.Adam(self.model.parameters(),
                                     lr=args.learning_rate)
         if args.tensor_type == 'torch.cuda.FloatTensor':
             torch.cuda.synchronize()
     elif args.mode == 'test':
         # Load data
         self.output_directory = args.output_directory
         self.input_height = args.input_height
         self.input_width = args.input_width
         data_transform = image_transforms(mode=args.mode,
                                           tensor_type=args.tensor_type)
         test_dataset = ImageLoader(args.data_dir,
                                    False,
                                    transform=data_transform)
         self.num_test_examples = len(test_dataset)
         self.test_loader = DataLoader(test_dataset,
                                       batch_size=1,
                                       shuffle=False)
         # Set up model
         self.device = torch.device(
             ('cuda:0' if torch.cuda.is_available()
              and args.tensor_type == 'torch.cuda.FloatTensor' else 'cpu'))
         if args.model == 'resnet50_md':
             self.model = models_resnet.resnet50_md(3)
         elif args.model == 'resnet18_md':
             self.model = models_resnet.resnet18_md(3)
         self.model.load_state_dict(torch.load(args.model_path))
         self.model = self.model.to(self.device)
예제 #7
0
from data_loader import KittiLoader
from laser_odometry import Odometry
from laser_mapping import Mapper
import argparse
import json
import numpy as np

parser = argparse.ArgumentParser()
parser.add_argument('--kitti_path', type=str, help='Input folder of KITTI .bin files')
parser.add_argument('--config_path', type=str, help='Configuration file')

if __name__== '__main__':
    args = parser.parse_args()
    loader = KittiLoader(path=args.kitti_path, name='Kitti dataset')

    config = None
    if args.config_path is not None:
        with open(args.config_path) as config_file:
            config_data = config_file.read()
            config = json.loads(config_data)
    
    odometry = Odometry(config=config)
    mapper = Mapper(config=config)
    skip_frame = 5
    res_mapped = []
    res_odom = []

    for i in range(len(loader)):
        cloud = loader[i]
        surf_pts, corner_pts, odom = odometry.grab_frame(cloud)
        res_odom.append(odom[0:3, 3].reshape(3))