Exemplo n.º 1
0
def get_augmentation(mode, input_size):
    if mode == 'RGB':
        return torchvision.transforms.Compose(
            [GroupMultiScaleCrop(input_size, [1, .875, .75, .66])])
    elif mode == 'Flow':
        return torchvision.transforms.Compose(
            [GroupMultiScaleCrop(input_size, [1, .875, .75])])
    elif mode == 'RGBDiff':
        return torchvision.transforms.Compose(
            [GroupMultiScaleCrop(input_size, [1, .875, .75])])
    def __init__(self, cfg: DictConfig):
        super().__init__()
        self.train_gulp_dir = Path(cfg.data.train_gulp_dir)
        self.val_gulp_dir = Path(cfg.data.val_gulp_dir)
        self.test_gulp_dir = Path(cfg.data.test_gulp_dir)
        self.cfg = cfg

        channel_count = (3 if self.cfg.modality == "RGB" else 2 *
                         self.cfg.data.segment_length)
        common_transform = Compose([
            Stack(bgr=self.cfg.modality == "RGB"
                  and self.cfg.data.preprocessing.get("bgr", False)),
            ToTorchFormatTensor(div=self.cfg.data.preprocessing.rescale),
            GroupNormalize(
                mean=list(self.cfg.data.preprocessing.mean),
                std=list(self.cfg.data.preprocessing.std),
            ),
            ExtractTimeFromChannel(channel_count),
        ])
        self.train_transform = Compose([
            GroupMultiScaleCrop(
                self.cfg.data.preprocessing.input_size,
                self.cfg.data.train_augmentation.multiscale_crop_scales,
            ),
            GroupRandomHorizontalFlip(is_flow=self.cfg.modality == "Flow"),
            common_transform,
        ])
        self.test_transform = Compose([
            GroupScale(self.cfg.data.test_augmentation.rescale_size),
            GroupCenterCrop(self.cfg.data.preprocessing.input_size),
            common_transform,
        ])
Exemplo n.º 3
0
    def get_augmentation(self):
        scales_i = [1, .875, .75, .66]
        scales_m = [1, .875, .75]
        scales_r = [1, .875, .75]

        print('Augmentation scales_i:', scales_i)
        print('Augmentation scales_m:', scales_m)
        print('Augmentation scales_r:', scales_r)

        transform_i = torchvision.transforms.Compose(
            [GroupMultiScaleCrop(self._input_size, scales_i),
             GroupRandomHorizontalFlip(is_mv=False)])
        transform_m = torchvision.transforms.Compose(
            [GroupMultiScaleCrop(self._input_size, scales_m),
             GroupRandomHorizontalFlip(is_mv=True)])
        transform_r = torchvision.transforms.Compose(
            [GroupMultiScaleCrop(self._input_size, scales_r),
             GroupRandomHorizontalFlip(is_mv=False)])

        return transform_i, transform_m, transform_r
Exemplo n.º 4
0
    def get_augmentation(self):
        if self._representation in ['mv', 'residual']:
            scales = [1, .875, .75]
        else:
            scales = [1, .875, .75, .66]

        print('Augmentation scales:', scales)
        return torchvision.transforms.Compose([
            GroupMultiScaleCrop(self._input_size, scales),
            GroupRandomHorizontalFlip(is_mv=(self._representation == 'mv'))
        ])
Exemplo n.º 5
0
 def get_augmentation(self):
     scales = [1, .875, .75, .66]
     return torchvision.transforms.Compose([
         GroupMultiScaleCrop(self._input_size, scales),
         GroupRandomHorizontalFlip(is_mv=False)
     ])