Exemple #1
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        pruning = kwargs.pop('pruning', 0.9)
        sparse_pruning = kwargs.pop('sparse_pruning', False)

        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)

        data_file_name = self.root_path + 'dataset_train.txt'
        self.data = pd.read_csv(data_file_name, header=1, sep=' ').values

        if sparse_pruning:
            step = round(1 / (1-pruning))
            logger.info('Computed step for pruning: {}'.format(step))
            indexor = np.zeros(len(self.data))
            for i in range(len(self.data)):
                if i % step == 0:
                    indexor[i] = 1
            self.data = self.data[indexor]
        else:
            self.data = self.data[round(len(self.data)*pruning):]
Exemple #2
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        pruning = kwargs.pop('pruning', 0.9)
        sparse_pruning = kwargs.pop('sparse_pruning', False)

        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)
        self.folders = list()
        with open(self.root_path + 'TrainSplit.txt', 'r') as f:
            for line in f:
                fold = 'seq-{:02d}/'.format(int(re.search('(?<=sequence)\d', line).group(0)))
                self.folders.append(self.root_path + fold)

        self.load_data()

        if sparse_pruning:
            step = round(1 / (1-pruning))
            logger.info('Computed step for pruning: {}'.format(step))
            self.data = [dat for i, dat in enumerate(self.data) if i % step == 0]
        else:
            self.data = self.data[round(len(self.data)*pruning):]
Exemple #3
0
    def __init__(self, **kwargs):
        self.zoom_percentage = kwargs.pop('zoom_percentage', 0.2)
        self.tilte_angle = kwargs.pop('tilte_angle', 3.1415 / 16)
        self.reduce_fact = kwargs.pop('reduce_fact', 480/224)
        self.final_depth_size = kwargs.pop('final_depth_size', 56)
        Train.__init__(self, **kwargs)

        self.transform = copy.deepcopy(self.transform)
        self.transform['first'] = (tf.CenterCrop(480), )
        self.transform['depth'] = (tf.ToTensor(), tf.DepthTransform())
Exemple #4
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        light = kwargs.pop('light', False)
        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)

        self.folders = list()
        with open(self.root_path + 'TestSplit.txt', 'r') as f:
            for line in f:
                fold = 'seq-{:02d}/'.format(int(re.search('(?<=sequence)\d', line).group(0)))
                self.folders.append(self.root_path + fold)

        self.load_data()

        if light:
            step = 10
            self.data = [dat for i, dat in enumerate(self.data) if i % step == 0]
Exemple #5
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        light = kwargs.pop('light', False)
        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)

        data_file_name = self.root_path + 'dataset_test.txt'
        self.data = pd.read_csv(data_file_name, header=1, sep=' ').values

        if light:
            step = 10
            indexor = np.zeros(len(self.data))
            for i in range(len(self.data)):
                if i % step == 0:
                    indexor[i] = 1

            self.data = self.data[indexor.astype(bool)]
Exemple #6
0
    def __init__(self, root, file, modalities, **kwargs):
        self.root = root
        self.transform = kwargs.pop('transform', 'default')
        self.bearing = kwargs.pop('bearing', True)
        self.panorama_split = kwargs.pop('panorama_split', {
            'v_split': 3,
            'h_split': 2,
            'offset': 0
        })

        if kwargs:
            raise TypeError('Unexpected **kwargs: %r' % kwargs)

        if self.transform == 'default':
            self.transform = {'first': (tf.Resize((224, 224)), tf.ToTensor())}

        self.data = pd.read_csv(self.root + file, skiprows=2, sep=';')
        self.modalities = modalities
        self.used_mod = self.modalities
Exemple #7
0

def show_batch_mono(sample_batched):
    """Show image with landmarks for a batch of samples."""
    depth = sample_batched['depth']  # /torch.max(sample_batched['depth'])
    grid = torchvision.utils.make_grid(depth)
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


if __name__ == '__main__':

    logger.debug('Beginning main')
    logger.info('Root logging')

    tf = torchvision.transforms.Compose(
        (tf.RandomResizedCrop(224), tf.ColorJitter(), tf.ToTensor()))

    root = '/media/nathan/Data/7_Scenes/chess/'
    # root = '/private/anakim/data/7_scenes/chess/'

    dataset = datasets.SevenScene.SevenSceneTrain(root_path=root, transform=tf)

    dataloader = data.DataLoader(dataset,
                                 batch_size=8,
                                 shuffle=True,
                                 num_workers=2)

    for b in dataloader:
        plt.figure(1)
        show_batch(b)
        plt.figure(2)
Exemple #8
0
            im = tv.transforms.functional.to_pil_image(b['rgb'].squeeze(0))
            im.save(folder + file_base_name + ".color.png", "PNG")
            depth = tv.transforms.functional.to_pil_image((b['depth'].squeeze(0)*1e3).int(), mode='I')
            depth.save(folder + file_base_name + ".depth.png", "PNG", bytes=8)
            with open(folder + file_base_name + '.pose.txt', 'w') as f:
                for l in b['pose']['T'].squeeze(0).numpy():
                    for num in l:
                        f.write("%16.7e\t" % num)
                    f.write('\n')


if __name__ == '__main__':
    import datasets.SevenScene as SevenS
    aug_tf = {
        'first': (tf.CenterCrop(480),),
        'rgb': (tf.ToTensor(), ),
        'depth': (tf.ToTensor(), tf.DepthTransform())
    }

    std_tf = {
        'first': (tf.Resize(256),  tf.RandomCrop(224),),
        'rgb': (tf.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.05),
                tf.ToTensor(),
                tf.Normalize(mean=[0.4684, 0.4624, 0.4690], std=[0.2680, 0.2659, 0.2549])),
        'depth': (tf.Resize(56), tf.ToTensor(), tf.DepthTransform())
    }

    for room in ['pumpkin/', 'chess/', 'red_kitchen/']:
        print(room)
        root = os.environ['SEVENSCENES'] + room
Exemple #9
0
        if res_file:
            with open('res_file.csv', 'w') as csvfile:
                spamwriter = csv.writer(csvfile, delimiter=',')
                for i, id in enumerate(idx):
                    spamwriter.writerow(list(zip(id, ranked[i])))

        return ranked


if __name__ == '__main__':
    logger.setLevel('INFO')
    modtouse = {'rgb': 'dataset.txt', 'depth': 'mono_depth_dataset.txt'}
    transform = {
        'first': (tf.RandomResizedCrop(224), ),
        'rgb': (tf.ToTensor(), ),
        'depth': (tf.ToTensor(), )
    }
    transform_eval = {
        'first': (
            tf.Resize((224, 224)),
            tf.ToTensor(),
        ),
    }

    query_data = Robotcar.VBLDataset(root=os.environ['ROBOTCAR'] +
                                     'Robotcar_D1/Query/',
                                     modalities={'rgb': 'query.txt'},
                                     coord_file='coordxIm.txt',
                                     transform=transform_eval,
                                     bearing=False)
Exemple #10
0
        if w3 > 1:
            logger.warning('Unproper quaternion q1 = {}, q2 = {}'.format(
                q1, q2))
            w3 = 0.5
        angle = 2 * np.arccos(w3)

        return np.rad2deg(angle)


if __name__ == '__main__':
    test_tf = {
        'first': (
            tf.Resize(240),
            tf.RandomResizedCrop(224),
        ),
        'rgb': (tf.ColorJitter(), tf.ToTensor())
    }
    val_tf = {'first': (tf.Resize((224, 224)), ), 'rgb': (tf.ToTensor(), )}
    root = os.environ['SEVENSCENES'] + 'heads/'

    train_dataset = SevenScene.Train(root=root,
                                     transform=test_tf,
                                     used_mod=('rgb', ))

    val_dataset = SevenScene.Val(root=root,
                                 transform=val_tf,
                                 used_mod=('rgb', ))

    dtload = utils.data.DataLoader(train_dataset, batch_size=24)

    net = Pose.Main()
Exemple #11
0
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


def show_batch_mono(sample_batched, n_row=4):
    """Show image with landmarks for a batch of samples."""
    depth = sample_batched['depth']  # /torch.max(sample_batched['depth'])
    grid = torchvis.utils.make_grid(depth, nrow=n_row)
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


if __name__ == '__main__':

    logger.setLevel('INFO')
    test_tf = {
            'first': (tf.Resize(256), tf.CenterCrop(256), ),
            'rgb': (tf.ToTensor(), ),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
    test_tf_wo_tf = {
            'first': (tf.Resize(240),),
            'rgb': (tf.ToTensor(),),
        }
    root = os.environ['SEVENSCENES'] + 'heads/'
    '''
    train_dataset = Train(root=root,
                          transform=test_tf)

    train_dataset_wo_tf = Train(root=root,
                                transform=test_tf_wo_tf,
                                used_mod=('rgb',))
    '''
Exemple #12
0
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


def show_batch_mono(sample_batched, n_row=4):
    """Show image with landmarks for a batch of samples."""
    depth = sample_batched['depth']  # /torch.max(sample_batched['depth'])
    grid = torchvis.utils.make_grid(depth, nrow=n_row)
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


if __name__ == '__main__':

    logger.setLevel('INFO')
    test_tf = {
            'first': (tf.Resize(140), tf.RandomCrop((112, 224))),
            'rgb': (tf.ToTensor(), ),
        }
    test_tf_wo_tf = {
            'first': (tf.Resize(240),),
            'rgb': (tf.ToTensor(),),
        }
    root = os.environ['CAMBRIDGE']
    train_dataset = TrainSequence(root=root, folders='Street/',
                                  transform=test_tf, spacing=1, num_samples=8, random=False)

    print(len(train_dataset))

    dataloader = data.DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=2)
    plt.figure(1)

    plt.ion()
Exemple #13
0
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


if __name__ == '__main__':
    #root_to_folders = os.environ['PLATINUM'] + 'data/'
    root_to_folders = '/private/anakim/data/mboussaha/data/imori/session_575/section_3/'

    modtouse = [
        'rgb',
    ]
    transform = {
        'first': (tf.Resize((224)), ),
        'rgb': (
            tf.RandomVerticalFlip(p=1),
            tf.ToTensor(),
        ),
        'depth': (tf.ToTensor(), ),
        'sem': (tf.ToTensor(), )
    }

    dataset = Platinum(
        root=root_to_folders,
        file='session-575-3.csv',
        modalities=modtouse,
        transform=transform,
        panorama_split=None)  #{'v_split': 3, 'h_split': 2, 'offset': 0})

    dataloader = utils.data.DataLoader(dataset,
                                       batch_size=4,
                                       shuffle=True,