Пример #1
0
def get_transformations(p):
    """ Return transformations for training and evaluationg """
    from data import custom_transforms as tr

    db_name = p['train_db_name']

    __imagenet_pca = {
        'eigval':
        torch.Tensor([0.2175, 0.0188, 0.0045]),
        'eigvec':
        torch.Tensor([
            [-0.5675, 0.7192, 0.4009],
            [-0.5808, -0.0045, -0.8140],
            [-0.5836, -0.6948, 0.4203],
        ])
    }

    # Training transformations

    # Horizontal flips with probability of 0.5
    transforms_tr = [tr.RandomHorizontalFlip()]

    # Fixed Resize to input resolution
    transforms_tr.extend([
        tr.FixedResize(resolutions={
            x: tuple(p.TRAIN.SCALE)
            for x in p.ALL_TASKS.FLAGVALS
        },
                       flagvals={
                           x: p.ALL_TASKS.FLAGVALS[x]
                           for x in p.ALL_TASKS.FLAGVALS
                       })
    ])

    transforms_tr.extend([tr.AddIgnoreRegions(), tr.ToTensor()])

    transforms_tr.extend(
        [tr.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])

    transforms_tr = transforms.Compose(transforms_tr)

    # Testing (during training transforms)
    transforms_ts = []
    transforms_ts.extend([
        tr.FixedResize(
            resolutions={
                x: tuple(p.TRAIN.SCALE)
                for x in p.ALL_TASKS.FLAGVALS
            },
            flagvals={x: p.TASKS.FLAGVALS[x]
                      for x in p.TASKS.FLAGVALS})
    ])
    transforms_ts.extend([
        tr.AddIgnoreRegions(),
        tr.ToTensor(),
        tr.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    transforms_ts = transforms.Compose(transforms_ts)

    return transforms_tr, transforms_ts
Пример #2
0
def get_transformations(p):
    """ Return transformations for training and evaluationg """
    from data import custom_transforms as tr

    # Training transformations

    # Horizontal flips with probability of 0.5
    transforms_tr = [tr.RandomHorizontalFlip()]

    # Rotations and scaling
    transforms_tr.extend([
        tr.ScaleNRotate(rots=(-20, 20),
                        scales=(.75, 1.25),
                        flagvals={
                            x: p.ALL_TASKS.FLAGVALS[x]
                            for x in p.ALL_TASKS.FLAGVALS
                        })
    ])
    # Fixed Resize to input resolution
    transforms_tr.extend([
        tr.FixedResize(resolutions={
            x: tuple(p.TRAIN.SCALE)
            for x in p.ALL_TASKS.FLAGVALS
        },
                       flagvals={
                           x: p.ALL_TASKS.FLAGVALS[x]
                           for x in p.ALL_TASKS.FLAGVALS
                       })
    ])
    transforms_tr.extend([
        tr.AddIgnoreRegions(),
        tr.ToTensor(),
        tr.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    transforms_tr = transforms.Compose(transforms_tr)

    # Testing (during training transforms)
    transforms_ts = []
    transforms_ts.extend([
        tr.FixedResize(
            resolutions={x: tuple(p.TEST.SCALE)
                         for x in p.TASKS.FLAGVALS},
            flagvals={x: p.TASKS.FLAGVALS[x]
                      for x in p.TASKS.FLAGVALS})
    ])
    transforms_ts.extend([
        tr.AddIgnoreRegions(),
        tr.ToTensor(),
        tr.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    transforms_ts = transforms.Compose(transforms_ts)

    return transforms_tr, transforms_ts
Пример #3
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=400),
            tr.Normalize(),
            tr.ToTensor(),
        ])
        return composed_transforms(sample)
Пример #4
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=400),
            tr.Normalize(mean=self.source_dist['mean'], std=self.source_dist['std']),
            tr.ToTensor(),
        ])
        return composed_transforms(sample)
Пример #5
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=self.args.crop_size),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)
Пример #6
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=400),
            tr.Normalize(mean=self.mean_std[0],
                         std=self.mean_std[1]),  # tr.Normalize(),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Пример #7
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=400),
            tr.Normalize(mean=(0.2382, 0.2741, 0.3068),
                         std=(0.1586, 0.1593, 0.1618)),
            # tr.Normalize(),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Пример #8
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=400),
            tr.Normalize(mean=(0.2709, 0.3400, 0.3707),
                         std=(0.1403, 0.1570, 0.1658)),
            # tr.Normalize(),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Пример #9
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=400),
            tr.Normalize(mean=(0.1420, 0.2116, 0.2823),
                         std=(0.0899, 0.1083, 0.1310)),
            # tr.Normalize(),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Пример #10
0
    def transform_ts(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedResize(size=400),
            tr.Normalize(mean=(0.3441, 0.3809, 0.4014),
                         std=(0.1883, 0.2039, 0.2119)),
            # tr.Normalize(),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Пример #11
0
def test_mt():
    import torch
    import data.custom_transforms as tr
    import matplotlib.pyplot as plt
    from torchvision import transforms
    transform = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-2, 2),
                        scales=(.75, 1.25),
                        flagvals={
                            'image': cv2.INTER_CUBIC,
                            'edge': cv2.INTER_NEAREST,
                            'semseg': cv2.INTER_NEAREST,
                            'normals': cv2.INTER_LINEAR,
                            'depth': cv2.INTER_LINEAR
                        }),
        tr.FixedResize(resolutions={
            'image': (512, 512),
            'edge': (512, 512),
            'semseg': (512, 512),
            'normals': (512, 512),
            'depth': (512, 512)
        },
                       flagvals={
                           'image': cv2.INTER_CUBIC,
                           'edge': cv2.INTER_NEAREST,
                           'semseg': cv2.INTER_NEAREST,
                           'normals': cv2.INTER_LINEAR,
                           'depth': cv2.INTER_LINEAR
                       }),
        tr.AddIgnoreRegions(),
        tr.ToTensor()
    ])
    dataset = NYUD_MT(split='train',
                      transform=transform,
                      retname=True,
                      do_edge=True,
                      do_semseg=True,
                      do_normals=True,
                      do_depth=True)

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=5,
                                             shuffle=False,
                                             num_workers=5)

    for i, sample in enumerate(dataloader):
        print(i)
        for j in range(sample['image'].shape[0]):
            f, ax_arr = plt.subplots(5)
            for k in range(len(ax_arr)):
                ax_arr[k].cla()
            ax_arr[0].imshow(np.transpose(sample['image'][j], (1, 2, 0)))
            ax_arr[1].imshow(sample['edge'][j, 0])
            ax_arr[2].imshow(sample['semseg'][j, 0] / 40)
            ax_arr[3].imshow(np.transpose(sample['normals'][j], (1, 2, 0)))
            max_depth = torch.max(
                sample['depth'][j, 0][sample['depth'][j, 0] != 255]).item()
            ax_arr[4].imshow(
                sample['depth'][j, 0] /
                max_depth)  # Not ideal. Better is to show inverse depth.

            plt.show()
        break
Пример #12
0
def test_all():
    import matplotlib.pyplot as plt
    import torch
    import data.custom_transforms as tr
    from torchvision import transforms
    from utils.custom_collate import collate_mil

    transform = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-90, 90),
                        scales=(1., 1.),
                        flagvals={
                            'image': cv2.INTER_CUBIC,
                            'edge': cv2.INTER_NEAREST,
                            'semseg': cv2.INTER_NEAREST,
                            'human_parts': cv2.INTER_NEAREST,
                            'normals': cv2.INTER_CUBIC,
                            'sal': cv2.INTER_NEAREST
                        }),
        tr.FixedResize(resolutions={
            'image': (512, 512),
            'edge': (512, 512),
            'semseg': (512, 512),
            'human_parts': (512, 512),
            'normals': (512, 512),
            'sal': (512, 512)
        },
                       flagvals={
                           'image': cv2.INTER_CUBIC,
                           'edge': cv2.INTER_NEAREST,
                           'semseg': cv2.INTER_NEAREST,
                           'human_parts': cv2.INTER_NEAREST,
                           'normals': cv2.INTER_CUBIC,
                           'sal': cv2.INTER_NEAREST
                       }),
        tr.AddIgnoreRegions(),
        tr.ToTensor()
    ])
    dataset = PASCALContext(split='train',
                            transform=transform,
                            retname=True,
                            do_edge=True,
                            do_semseg=True,
                            do_human_parts=True,
                            do_normals=True,
                            do_sal=True)

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=2,
                                             shuffle=False,
                                             num_workers=0)

    for i, sample in enumerate(dataloader):
        print(i)
        for j in range(sample['image'].shape[0]):
            f, ax_arr = plt.subplots(2, 3)

            for k in range(len(ax_arr)):
                for l in range(len(ax_arr[k])):
                    ax_arr[k][l].cla()

            ax_arr[0][0].imshow(np.transpose(sample['image'][j], (1, 2, 0)))
            ax_arr[0][1].imshow(
                np.transpose(sample['edge'][j], (1, 2, 0))[:, :, 0])
            ax_arr[0][2].imshow(
                np.transpose(sample['semseg'][j], (1, 2, 0))[:, :, 0] / 20.)
            ax_arr[1][0].imshow(
                np.transpose(sample['human_parts'][j],
                             (1, 2, 0))[:, :, 0] / 6.)
            ax_arr[1][1].imshow(np.transpose(sample['normals'][j], (1, 2, 0)))
            ax_arr[1][2].imshow(
                np.transpose(sample['sal'][j], (1, 2, 0))[:, :, 0])

            plt.show()
        break