예제 #1
0
 def AlbumentationTrainTransform(self):
     tf = tc.Compose([
         ta.HorizontalFlip(),
         ta.Cutout(num_holes=1, max_h_size=16, max_w_size=16),
         tp.ToTensor(dict(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
     ])
     return lambda img: tf(image=np.array(img))["image"]
예제 #2
0
 def AlbumentationTestTransform(self):
     tf = tc.Compose([
         tp.ToTensor(
             dict(mean=(0.4802, 0.4481, 0.3975),
                  std=(0.2302, 0.2265, 0.2262)))
     ])
     return lambda img: tf(image=np.array(img))["image"]
예제 #3
0
 def AlbumentationTrainTransform(self):
     tf = tc.Compose([ta.PadIfNeeded(4, 4, always_apply=True),
                     ta.RandomCrop(height=32, width=32, always_apply=True),
                     ta.Cutout(num_holes = 1, max_h_size=8, max_w_size=8, always_apply=True),
                     ta.HorizontalFlip(),
                     tp.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
                     ])
     return lambda img: tf(image = np.array(img))["image"]
예제 #4
0
 def AlbumentationTrainTransform(self):
     tf = tc.Compose([ta.HorizontalFlip(p=0.5),
                         ta.Rotate(limit=(-20, 20)),
                         # ta.VerticalFlip(p=0.5),
                         # ta.Cutout(num_holes=3, max_h_size=8, max_w_size=8, p=0.5),
                         # ta.Blur(),
                         # ta.ChannelShuffle(),
                         # ta.InvertImg(),
                         ta.RandomCrop(height=30, width=30, p=5.0),
                         ta.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
                         tp.ToTensor()
                         ])
     return lambda img: tf(image = np.array(img))["image"]
예제 #5
0
파일: aachen.py 프로젝트: a1302z/geomapnet
                           night_augmentation=None,
                           output_types=['pose', 'label'])
     tf = transforms.Compose([
         transforms.Resize(224),
         transforms.CenterCrop(224),
         transforms.ToTensor()
     ])
     L = len(test)
     print('Dataset has {:d} entries'.format(L))
     for i in tqdm.tqdm(range(len(test)),
                        total=L,
                        desc='All there?',
                        leave=False):
         test[i]
     imgs = torch.stack(
         [tf(test[i][0]) for i in range(L // 16 - 7, L // 16 + 7)]
         #+ [tf(test[i][0]) for i in range(2*(L//3)-7, 2*(L//3)+7)]
     )
     grid = utils.make_grid(imgs, 7)
     plt.imshow(grid.permute(1, 2, 0))
     plt.show()
 elif test_name == 'synthetic':
     test = AachenDayNight('../data/deepslam_data/AachenDayNight/',
                           True,
                           224,
                           use_synthetic=True,
                           use_stylization=16,
                           output_types=['pose'])
     print('Length synthetic dataset: {:d}'.format(len(test)))
 """
 loader = AachenDayNight('../data/deepslam_data/AachenDayNight/', True, verbose=True)
예제 #6
0
 def AlbumentationTestTransform(self):
     tf = tc.Compose([ta.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
                     tp.ToTensor()
                     # tp.ToTensor(dict(mean=(0.4914, 0.4822, 0.4465), std=(0.247, 0.2435, 0.2616)))
                     ])
     return lambda img: tf(image = np.array(img))["image"]