Exemple #1
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        pruning = kwargs.pop('pruning', 0.9)
        sparse_pruning = kwargs.pop('sparse_pruning', False)

        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)
        self.folders = list()
        with open(self.root_path + 'TrainSplit.txt', 'r') as f:
            for line in f:
                fold = 'seq-{:02d}/'.format(int(re.search('(?<=sequence)\d', line).group(0)))
                self.folders.append(self.root_path + fold)

        self.load_data()

        if sparse_pruning:
            step = round(1 / (1-pruning))
            logger.info('Computed step for pruning: {}'.format(step))
            self.data = [dat for i, dat in enumerate(self.data) if i % step == 0]
        else:
            self.data = self.data[round(len(self.data)*pruning):]
Exemple #2
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        pruning = kwargs.pop('pruning', 0.9)
        sparse_pruning = kwargs.pop('sparse_pruning', False)

        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)

        data_file_name = self.root_path + 'dataset_train.txt'
        self.data = pd.read_csv(data_file_name, header=1, sep=' ').values

        if sparse_pruning:
            step = round(1 / (1-pruning))
            logger.info('Computed step for pruning: {}'.format(step))
            indexor = np.zeros(len(self.data))
            for i in range(len(self.data)):
                if i % step == 0:
                    indexor[i] = 1
            self.data = self.data[indexor]
        else:
            self.data = self.data[round(len(self.data)*pruning):]
Exemple #3
0
    def __init__(self, root, file, modalities, **kwargs):
        self.root = root
        self.transform = kwargs.pop('transform', 'default')
        self.bearing = kwargs.pop('bearing', True)
        self.panorama_split = kwargs.pop('panorama_split', {
            'v_split': 3,
            'h_split': 2,
            'offset': 0
        })

        if kwargs:
            raise TypeError('Unexpected **kwargs: %r' % kwargs)

        if self.transform == 'default':
            self.transform = {'first': (tf.Resize((224, 224)), tf.ToTensor())}

        self.data = pd.read_csv(self.root + file, skiprows=2, sep=';')
        self.modalities = modalities
        self.used_mod = self.modalities
Exemple #4
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        light = kwargs.pop('light', False)
        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)

        self.folders = list()
        with open(self.root_path + 'TestSplit.txt', 'r') as f:
            for line in f:
                fold = 'seq-{:02d}/'.format(int(re.search('(?<=sequence)\d', line).group(0)))
                self.folders.append(self.root_path + fold)

        self.load_data()

        if light:
            step = 10
            self.data = [dat for i, dat in enumerate(self.data) if i % step == 0]
Exemple #5
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        light = kwargs.pop('light', False)
        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)

        data_file_name = self.root_path + 'dataset_test.txt'
        self.data = pd.read_csv(data_file_name, header=1, sep=' ').values

        if light:
            step = 10
            indexor = np.zeros(len(self.data))
            for i in range(len(self.data)):
                if i % step == 0:
                    indexor[i] = 1

            self.data = self.data[indexor.astype(bool)]
Exemple #6
0
                for l in b['pose']['T'].squeeze(0).numpy():
                    for num in l:
                        f.write("%16.7e\t" % num)
                    f.write('\n')


if __name__ == '__main__':
    import datasets.SevenScene as SevenS
    aug_tf = {
        'first': (tf.CenterCrop(480),),
        'rgb': (tf.ToTensor(), ),
        'depth': (tf.ToTensor(), tf.DepthTransform())
    }

    std_tf = {
        'first': (tf.Resize(256),  tf.RandomCrop(224),),
        'rgb': (tf.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.05),
                tf.ToTensor(),
                tf.Normalize(mean=[0.4684, 0.4624, 0.4690], std=[0.2680, 0.2659, 0.2549])),
        'depth': (tf.Resize(56), tf.ToTensor(), tf.DepthTransform())
    }

    for room in ['pumpkin/', 'chess/', 'red_kitchen/']:
        print(room)
        root = os.environ['SEVENSCENES'] + room

        train_aug_dataset = SevenS.AugmentedTrain(root=root,
                                                  transform=aug_tf,
                                                  final_depth_size=256,
                                                  reduce_fact=1.85,
                                                  zoom_percentage=0.15)
Exemple #7
0
                    spamwriter.writerow(list(zip(id, ranked[i])))

        return ranked


if __name__ == '__main__':
    logger.setLevel('INFO')
    modtouse = {'rgb': 'dataset.txt', 'depth': 'mono_depth_dataset.txt'}
    transform = {
        'first': (tf.RandomResizedCrop(224), ),
        'rgb': (tf.ToTensor(), ),
        'depth': (tf.ToTensor(), )
    }
    transform_eval = {
        'first': (
            tf.Resize((224, 224)),
            tf.ToTensor(),
        ),
    }

    query_data = Robotcar.VBLDataset(root=os.environ['ROBOTCAR'] +
                                     'Robotcar_D1/Query/',
                                     modalities={'rgb': 'query.txt'},
                                     coord_file='coordxIm.txt',
                                     transform=transform_eval,
                                     bearing=False)
    data = Robotcar.VBLDataset(root=os.environ['ROBOTCAR'] +
                               'Robotcar_D1/Dataset/',
                               modalities={'rgb': 'dataset.txt'},
                               coord_file='coordxIm.txt',
                               transform=transform_eval,
Exemple #8
0
        #q2[1:] *= -1 # Inverse computation

        w3 = np.abs(np.dot(q1, q2))
        if w3 > 1:
            logger.warning('Unproper quaternion q1 = {}, q2 = {}'.format(
                q1, q2))
            w3 = 0.5
        angle = 2 * np.arccos(w3)

        return np.rad2deg(angle)


if __name__ == '__main__':
    test_tf = {
        'first': (
            tf.Resize(240),
            tf.RandomResizedCrop(224),
        ),
        'rgb': (tf.ColorJitter(), tf.ToTensor())
    }
    val_tf = {'first': (tf.Resize((224, 224)), ), 'rgb': (tf.ToTensor(), )}
    root = os.environ['SEVENSCENES'] + 'heads/'

    train_dataset = SevenScene.Train(root=root,
                                     transform=test_tf,
                                     used_mod=('rgb', ))

    val_dataset = SevenScene.Val(root=root,
                                 transform=val_tf,
                                 used_mod=('rgb', ))
Exemple #9
0
    grid = torchvis.utils.make_grid(torch.cat([batched['rgb'] for batched in sample_batched]), nrow=2)
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


def show_batch_mono(sample_batched, n_row=4):
    """Show image with landmarks for a batch of samples."""
    depth = sample_batched['depth']  # /torch.max(sample_batched['depth'])
    grid = torchvis.utils.make_grid(depth, nrow=n_row)
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


if __name__ == '__main__':

    logger.setLevel('INFO')
    test_tf = {
            'first': (tf.Resize(256), tf.CenterCrop(256), ),
            'rgb': (tf.ToTensor(), ),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
    test_tf_wo_tf = {
            'first': (tf.Resize(240),),
            'rgb': (tf.ToTensor(),),
        }
    root = os.environ['SEVENSCENES'] + 'heads/'
    '''
    train_dataset = Train(root=root,
                          transform=test_tf)

    train_dataset_wo_tf = Train(root=root,
                                transform=test_tf_wo_tf,
                                used_mod=('rgb',))
Exemple #10
0
    grid = torchvis.utils.make_grid(torch.cat([batched['rgb'] for batched in sample_batched]), nrow=2)
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


def show_batch_mono(sample_batched, n_row=4):
    """Show image with landmarks for a batch of samples."""
    depth = sample_batched['depth']  # /torch.max(sample_batched['depth'])
    grid = torchvis.utils.make_grid(depth, nrow=n_row)
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


if __name__ == '__main__':

    logger.setLevel('INFO')
    test_tf = {
            'first': (tf.Resize(140), tf.RandomCrop((112, 224))),
            'rgb': (tf.ToTensor(), ),
        }
    test_tf_wo_tf = {
            'first': (tf.Resize(240),),
            'rgb': (tf.ToTensor(),),
        }
    root = os.environ['CAMBRIDGE']
    train_dataset = TrainSequence(root=root, folders='Street/',
                                  transform=test_tf, spacing=1, num_samples=8, random=False)

    print(len(train_dataset))

    dataloader = data.DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=2)
    plt.figure(1)
Exemple #11
0
    images_batch = torch.cat(buffer, 0)
    grid = torchvis.utils.make_grid(images_batch, nrow=4)

    plt.imshow(grid.numpy().transpose((1, 2, 0)))


if __name__ == '__main__':
    #root_to_folders = os.environ['PLATINUM'] + 'data/'
    root_to_folders = '/private/anakim/data/mboussaha/data/imori/session_575/section_3/'

    modtouse = [
        'rgb',
    ]
    transform = {
        'first': (tf.Resize((224)), ),
        'rgb': (
            tf.RandomVerticalFlip(p=1),
            tf.ToTensor(),
        ),
        'depth': (tf.ToTensor(), ),
        'sem': (tf.ToTensor(), )
    }

    dataset = Platinum(
        root=root_to_folders,
        file='session-575-3.csv',
        modalities=modtouse,
        transform=transform,
        panorama_split=None)  #{'v_split': 3, 'h_split': 2, 'offset': 0})