Exemplo n.º 1
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        pruning = kwargs.pop('pruning', 0.9)
        sparse_pruning = kwargs.pop('sparse_pruning', False)

        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)
        self.folders = list()
        with open(self.root_path + 'TrainSplit.txt', 'r') as f:
            for line in f:
                fold = 'seq-{:02d}/'.format(int(re.search('(?<=sequence)\d', line).group(0)))
                self.folders.append(self.root_path + fold)

        self.load_data()

        if sparse_pruning:
            step = round(1 / (1-pruning))
            logger.info('Computed step for pruning: {}'.format(step))
            self.data = [dat for i, dat in enumerate(self.data) if i % step == 0]
        else:
            self.data = self.data[round(len(self.data)*pruning):]
Exemplo n.º 2
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        pruning = kwargs.pop('pruning', 0.9)
        sparse_pruning = kwargs.pop('sparse_pruning', False)

        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)

        data_file_name = self.root_path + 'dataset_train.txt'
        self.data = pd.read_csv(data_file_name, header=1, sep=' ').values

        if sparse_pruning:
            step = round(1 / (1-pruning))
            logger.info('Computed step for pruning: {}'.format(step))
            indexor = np.zeros(len(self.data))
            for i in range(len(self.data)):
                if i % step == 0:
                    indexor[i] = 1
            self.data = self.data[indexor]
        else:
            self.data = self.data[round(len(self.data)*pruning):]
Exemplo n.º 3
0
    def __init__(self, **kwargs):
        self.zoom_percentage = kwargs.pop('zoom_percentage', 0.2)
        self.tilte_angle = kwargs.pop('tilte_angle', 3.1415 / 16)
        self.reduce_fact = kwargs.pop('reduce_fact', 480/224)
        self.final_depth_size = kwargs.pop('final_depth_size', 56)
        Train.__init__(self, **kwargs)

        self.transform = copy.deepcopy(self.transform)
        self.transform['first'] = (tf.CenterCrop(480), )
        self.transform['depth'] = (tf.ToTensor(), tf.DepthTransform())
Exemplo n.º 4
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        light = kwargs.pop('light', False)
        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)

        self.folders = list()
        with open(self.root_path + 'TestSplit.txt', 'r') as f:
            for line in f:
                fold = 'seq-{:02d}/'.format(int(re.search('(?<=sequence)\d', line).group(0)))
                self.folders.append(self.root_path + fold)

        self.load_data()

        if light:
            step = 10
            self.data = [dat for i, dat in enumerate(self.data) if i % step == 0]
Exemplo n.º 5
0
    def __init__(self, **kwargs):
        default_tf = {
            'first': (tf.Resize((224, 224)),),
            'rgb': (tf.ToTensor(),),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
        light = kwargs.pop('light', False)
        Base.__init__(self,
                      transform=kwargs.pop('transform', default_tf),
                      **kwargs)

        data_file_name = self.root_path + 'dataset_test.txt'
        self.data = pd.read_csv(data_file_name, header=1, sep=' ').values

        if light:
            step = 10
            indexor = np.zeros(len(self.data))
            for i in range(len(self.data)):
                if i % step == 0:
                    indexor[i] = 1

            self.data = self.data[indexor.astype(bool)]
Exemplo n.º 6
0
            im.save(folder + file_base_name + ".color.png", "PNG")
            depth = tv.transforms.functional.to_pil_image((b['depth'].squeeze(0)*1e3).int(), mode='I')
            depth.save(folder + file_base_name + ".depth.png", "PNG", bytes=8)
            with open(folder + file_base_name + '.pose.txt', 'w') as f:
                for l in b['pose']['T'].squeeze(0).numpy():
                    for num in l:
                        f.write("%16.7e\t" % num)
                    f.write('\n')


if __name__ == '__main__':
    import datasets.SevenScene as SevenS
    aug_tf = {
        'first': (tf.CenterCrop(480),),
        'rgb': (tf.ToTensor(), ),
        'depth': (tf.ToTensor(), tf.DepthTransform())
    }

    std_tf = {
        'first': (tf.Resize(256),  tf.RandomCrop(224),),
        'rgb': (tf.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.05),
                tf.ToTensor(),
                tf.Normalize(mean=[0.4684, 0.4624, 0.4690], std=[0.2680, 0.2659, 0.2549])),
        'depth': (tf.Resize(56), tf.ToTensor(), tf.DepthTransform())
    }

    for room in ['pumpkin/', 'chess/', 'red_kitchen/']:
        print(room)
        root = os.environ['SEVENSCENES'] + room

        train_aug_dataset = SevenS.AugmentedTrain(root=root,
Exemplo n.º 7
0

def show_batch_mono(sample_batched, n_row=4):
    """Show image with landmarks for a batch of samples."""
    depth = sample_batched['depth']  # /torch.max(sample_batched['depth'])
    grid = torchvis.utils.make_grid(depth, nrow=n_row)
    plt.imshow(grid.numpy().transpose((1, 2, 0)))


if __name__ == '__main__':

    logger.setLevel('INFO')
    test_tf = {
            'first': (tf.Resize(256), tf.CenterCrop(256), ),
            'rgb': (tf.ToTensor(), ),
            'depth': (tf.ToTensor(), tf.DepthTransform())
        }
    test_tf_wo_tf = {
            'first': (tf.Resize(240),),
            'rgb': (tf.ToTensor(),),
        }
    root = os.environ['SEVENSCENES'] + 'heads/'
    '''
    train_dataset = Train(root=root,
                          transform=test_tf)

    train_dataset_wo_tf = Train(root=root,
                                transform=test_tf_wo_tf,
                                used_mod=('rgb',))
    '''
    test_dataset = Test(root=root, light=True)