output_types=output_types, train_split=train_split, #concatenate_inputs=True ) if args.dataset == 'AachenDayNight': kwargs['night_augmentation'] = args.use_augmentation if (args.model.find('mapnet') >= 0) or (args.model.find('semantic') >= 0) or (args.model.find('multitask') >= 0): vo_func = calc_vos_safe_fc if fc_vos else calc_vos_safe data_set = MF(dataset=args.dataset, steps=steps, skip=skip, real=real, variable_skip=variable_skip, include_vos=False, vo_func=vo_func, no_duplicates=False, **kwargs) L = len(data_set.dset) elif args.dataset == '7Scenes': from dataset_loaders.seven_scenes import SevenScenes data_set = SevenScenes(**kwargs) L = len(data_set) elif args.dataset == 'DeepLoc': from dataset_loaders.deeploc import DeepLoc data_set = DeepLoc(**kwargs) L = len(data_set) elif args.dataset == 'RobotCar': from dataset_loaders.robotcar import RobotCar data_set = RobotCar(**kwargs) L = len(data_set) elif args.dataset == 'AachenDayNight': from dataset_loaders.aachen import AachenDayNight data_set = AachenDayNight(**kwargs) L = len(data_set) elif args.dataset == 'CambridgeLandmarks': from dataset_loaders.cambridge import Cambridge
# training split kwargs = dict(scene=args.scene, data_path=data_dir, train=True, real=False, seed=7, train_split=20) if args.dataset in ['DeepLoc', 'AachenDayNight', 'CambridgeLandmarks']: kwargs['input_types'] = [] else: kwargs['skip_images'] = True if args.dataset == '7Scenes': from dataset_loaders.seven_scenes import SevenScenes dset = SevenScenes(**kwargs) elif args.dataset == 'DeepLoc': from dataset_loaders.deeploc import DeepLoc dset = DeepLoc(**kwargs) elif args.dataset == 'RobotCar': from dataset_loaders.robotcar import RobotCar dset = RobotCar(**kwargs) elif args.dataset == 'AachenDayNight': from dataset_loaders.aachen import AachenDayNight dset = AachenDayNight(**kwargs) elif args.dataset == 'CambridgeLandmarks': from dataset_loaders.cambridge import Cambridge dset = Cambridge(**kwargs) elif args.dataset == 'stylized_localization': from dataset_loaders.stylized_loader import StylizedCambridge dset = StylizedCambridge(**kwargs)
tforms.append(transforms.ToTensor()) tforms.append(transforms.Normalize(mean=stats[0], std=np.sqrt(stats[1]))) data_transform = transforms.Compose(tforms) target_transform = transforms.Lambda(lambda x: torch.from_numpy(x).float()) # datasets data_dir = osp.join('..', 'data', 'deepslam_data', args.dataset) kwargs = dict(scene=args.scene, data_path=data_dir, transform=data_transform, target_transform=target_transform, seed=seed) if args.model == 'posenet': if args.dataset == '7Scenes': from dataset_loaders.seven_scenes import SevenScenes train_set = SevenScenes(train=True, **kwargs) val_set = SevenScenes(train=False, **kwargs) elif args.dataset == 'RobotCar': from dataset_loaders.robotcar import RobotCar train_set = RobotCar(train=True, **kwargs) val_set = RobotCar(train=False, **kwargs) else: raise NotImplementedError elif args.model.find('mapnet') >= 0: kwargs = dict(kwargs, dataset=args.dataset, skip=skip, steps=steps, variable_skip=variable_skip) if args.model.find('++') >= 0: train_set = MFOnline(vo_lib=vo_lib,
def __init__(self, dataset, include_vos=False, no_duplicates=False, *args, **kwargs): """ :param steps: Number of frames to return on every call :param skip: Number of frames to skip :param variable_skip: If True, skip = [1, ..., skip] :param include_vos: True if the VOs have to be appended to poses. If real and include_vos are both on, it gives absolute poses from GT and VOs from the SLAM / DSO :param no_duplicates: if True, does not duplicate frames when len(self) is not a multiple of skip*steps """ self.steps = kwargs.pop('steps', 2) self.skip = kwargs.pop('skip', 1) self.variable_skip = kwargs.pop('variable_skip', False) self.real = kwargs.pop('real', False) self.include_vos = include_vos self.train = kwargs['train'] self.vo_func = kwargs.pop('vo_func', calc_vos_simple) self.no_duplicates = no_duplicates if dataset == '7Scenes': from dataset_loaders.seven_scenes import SevenScenes self.dset = SevenScenes(*args, real=self.real, **kwargs) if self.include_vos and self.real: self.gt_dset = SevenScenes(*args, skip_images=True, real=False, **kwargs) elif dataset == 'InLoc': from dataset_loaders.inloc import InLoc self.dset = InLoc(*args, real=self.real, **kwargs) if self.include_vos and self.real: # This might not work self.gt_dset = InLoc(*args, skip_images=True, real=False, **kwargs) elif dataset == 'InLocRes': from dataset_loaders.inloc import InLocQuery self.dset = InLocQuery(*args, real=self.real, **kwargs) if self.include_vos and self.real: # This might not work self.gt_dset = InLoc(*args, skip_images=True, real=False, **kwargs) elif dataset == 'RobotCar': from dataset_loaders.robotcar import RobotCar self.dset = RobotCar(*args, real=self.real, **kwargs) if self.include_vos and self.real: self.gt_dset = RobotCar(*args, skip_images=True, real=False, **kwargs) else: raise NotImplementedError self.L = self.steps * self.skip