예제 #1
0
 def __init__(self, dataset, *args, **kwargs):
     kwargs = dict(kwargs, skip_images=True)
     if dataset == '7Scenes':
         from seven_scenes import SevenScenes
         self.real_dset = SevenScenes(*args, real=True, **kwargs)
         self.gt_dset = SevenScenes(*args, real=False, **kwargs)
     elif dataset == 'RobotCar':
         from robotcar import RobotCar
         self.real_dset = RobotCar(*args, real=True, **kwargs)
         self.gt_dset = RobotCar(*args, real=False, **kwargs)
     else:
         raise NotImplementedError
예제 #2
0
    transforms.Resize(256),
    transforms.RandomCrop(crop_size),
    transforms.ToTensor()
])

# dataset loader
data_dir = osp.join('..', 'data', 'deepslam_data', args.dataset)
kwargs = dict(sequence=args.scene,
              data_path=data_dir,
              train=True,
              real=False,
              transform=data_transform)
if args.dataset == '7Scenes':
    dset = SevenScenes(**kwargs)
elif args.dataset == 'RobotCar':
    dset = RobotCar(**kwargs)
else:
    raise NotImplementedError

# accumulate
batch_size = 8
num_workers = batch_size
loader = DataLoader(dset,
                    batch_size=batch_size,
                    num_workers=num_workers,
                    collate_fn=safe_collate)
acc = np.zeros((3, crop_size[0], crop_size[1]))
sq_acc = np.zeros((3, crop_size[0], crop_size[1]))
for batch_idx, (imgs, _) in enumerate(loader):
    imgs = imgs.numpy()
    acc += np.sum(imgs, axis=0)
예제 #3
0
파일: eval.py 프로젝트: zjudzl/geomapnet
                  steps=steps,
                  skip=skip,
                  real=real,
                  variable_skip=variable_skip,
                  include_vos=args.pose_graph,
                  vo_func=vo_func,
                  no_duplicates=False,
                  **kwargs)
    L = len(data_set.dset)
elif args.dataset == '7Scenes':
    from dataset_loaders.seven_scenes import SevenScenes
    data_set = SevenScenes(**kwargs)
    L = len(data_set)
elif args.dataset == 'RobotCar':
    from dataset_loaders.robotcar import RobotCar
    data_set = RobotCar(**kwargs)
    L = len(data_set)
else:
    raise NotImplementedError

# loader (batch_size MUST be 1)
batch_size = 1
assert batch_size == 1
loader = DataLoader(data_set,
                    batch_size=batch_size,
                    shuffle=False,
                    num_workers=5,
                    pin_memory=True)

# activate GPUs
CUDA = torch.cuda.is_available()
예제 #4
0
# datasets
data_dir = osp.join('..', 'data', 'deepslam_data', args.dataset)
kwargs = dict(scene=args.scene,
              data_path=data_dir,
              transform=data_transform,
              target_transform=target_transform,
              seed=seed)
if args.model == 'posenet':
    if args.dataset == '7Scenes':
        from dataset_loaders.seven_scenes import SevenScenes
        train_set = SevenScenes(train=True, **kwargs)
        val_set = SevenScenes(train=False, **kwargs)
    elif args.dataset == 'RobotCar':
        from dataset_loaders.robotcar import RobotCar
        train_set = RobotCar(train=True, **kwargs)
        val_set = RobotCar(train=False, **kwargs)
    else:
        raise NotImplementedError
elif args.model.find('mapnet') >= 0:
    kwargs = dict(kwargs,
                  dataset=args.dataset,
                  skip=skip,
                  steps=steps,
                  variable_skip=variable_skip)
    if args.model.find('++') >= 0:
        train_set = MFOnline(vo_lib=vo_lib,
                             gps_mode=(vo_lib == 'gps'),
                             **kwargs)
        val_set = None
    else:
예제 #5
0
                                             'images')
parser.add_argument('--scene', type=str, required=True)
parser.add_argument('--n_cores', type=int, default=4)
parser.add_argument('--val', action='store_true')
args = parser.parse_args()
if args.val:
    print('processing VAL data using {:d} cores'.format(args.n_cores))
else:
    print('processing TRAIN data using {:d} cores'.format(args.n_cores))

# create data loader
batch_size = args.n_cores
data_dir = osp.join('..', 'data', 'deepslam_data', 'RobotCar')
transform = transforms.Compose([transforms.Scale(256),
                                transforms.Lambda(lambda x: np.asarray(x))])
dset = RobotCar(scene=args.scene, data_path=data_dir, train=(not args.val),
                transform=transform, undistort=True)
loader = DataLoader(dset, batch_size=batch_size, num_workers=args.n_cores,
                    collate_fn=safe_collate)

# gather information about output filenames
base_dir = osp.join(data_dir, args.scene)
if args.val:
    split_filename = osp.join(base_dir, 'test_split.txt')
else:
    split_filename = osp.join(base_dir, 'train_split.txt')
with open(split_filename, 'r') as f:
    seqs = [l.rstrip() for l in f if not l.startswith('#')]

im_filenames = []
for seq in seqs:
    seq_dir = osp.join(base_dir, seq)
예제 #6
0
    def __init__(self,
                 dataset,
                 include_vos=False,
                 no_duplicates=False,
                 *args,
                 **kwargs):
        """
        :param steps: Number of frames to return on every call
        :param skip: Number of frames to skip
        :param variable_skip: If True, skip = [1, ..., skip]
        :param include_vos: True if the VOs have to be appended to poses. If real
        and include_vos are both on, it gives absolute poses from GT and VOs from
        the SLAM / DSO
        :param no_duplicates: if True, does not duplicate frames when len(self) is
        not a multiple of skip*steps
        """
        self.steps = kwargs.pop('steps', 2)
        self.skip = kwargs.pop('skip', 1)
        self.variable_skip = kwargs.pop('variable_skip', False)
        self.real = kwargs.pop('real', False)
        self.include_vos = include_vos
        self.train = kwargs['train']
        self.vo_func = kwargs.pop('vo_func', calc_vos_simple)
        self.no_duplicates = no_duplicates

        if dataset == '7Scenes':
            from dataset_loaders.seven_scenes import SevenScenes
            self.dset = SevenScenes(*args, real=self.real, **kwargs)
            if self.include_vos and self.real:
                self.gt_dset = SevenScenes(*args,
                                           skip_images=True,
                                           real=False,
                                           **kwargs)
        elif dataset == 'InLoc':
            from dataset_loaders.inloc import InLoc
            self.dset = InLoc(*args, real=self.real, **kwargs)
            if self.include_vos and self.real:
                # This might not work
                self.gt_dset = InLoc(*args,
                                     skip_images=True,
                                     real=False,
                                     **kwargs)
        elif dataset == 'InLocRes':
            from dataset_loaders.inloc import InLocQuery
            self.dset = InLocQuery(*args, real=self.real, **kwargs)
            if self.include_vos and self.real:
                # This might not work
                self.gt_dset = InLoc(*args,
                                     skip_images=True,
                                     real=False,
                                     **kwargs)
        elif dataset == 'RobotCar':
            from dataset_loaders.robotcar import RobotCar
            self.dset = RobotCar(*args, real=self.real, **kwargs)
            if self.include_vos and self.real:
                self.gt_dset = RobotCar(*args,
                                        skip_images=True,
                                        real=False,
                                        **kwargs)
        else:
            raise NotImplementedError

        self.L = self.steps * self.skip