Exemple #1
0
 def transform_tr(self, sample):
     # if (sample['image'].width>self.args.base_size*2) and (sample['image'].height>self.args.base_size*2):
     #     composed_transforms = transforms.Compose([
     #         tr.RandomHorizontalFlip(),
     #         tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
     #         tr.RandomGaussianBlur(),
     #         tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
     #         tr.ToTensor()])
     # else:
     #     composed_transforms = transforms.Compose([
     #         # tr.FixScaleCrop(crop_size=self.args.crop_size),
     #         tr.RandomHorizontalFlip(),
     #         tr.RandomGaussianBlur(),
     #         tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
     #         tr.ToTensor()])
     composed_transforms = transforms.Compose([
         tr.RandomHorizontalFlip(),
         tr.RandomScaleCrop(base_size=self.args.base_size,
                            crop_size=self.args.crop_size),
         tr.RandomGaussianBlur(),
         tr.Normalize(mean=(0.485, 0.456, 0.406),
                      std=(0.229, 0.224, 0.225)),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
Exemple #2
0
    def transform_tr(self, sample):
        """Transformations for images
        sample: {image:img, annotation:ann}

        Note: the mean and std is from imagenet
        """
        if self.args.no_flip:
            composed_transforms = transforms.Compose([
                tr.RandomScaleCrop(base_size=self.args.base_size,
                                   crop_size=self.args.crop_size,
                                   scale_ratio=self.args.scale_ratio,
                                   fill=0),
                tr.Normalize(mean=(0.485, 0.456, 0.406),
                             std=(0.229, 0.224, 0.225)),
                tr.ToTensor()
            ])
            return composed_transforms(sample)
        else:
            composed_transforms = transforms.Compose([
                tr.RandomHorizontalFlip(),
                tr.RandomScaleCrop(base_size=self.args.base_size,
                                   crop_size=self.args.crop_size,
                                   scale_ratio=self.args.scale_ratio,
                                   fill=0),
                tr.Normalize(mean=(0.485, 0.456, 0.406),
                             std=(0.229, 0.224, 0.225)),
                tr.ToTensor()
            ])
            return composed_transforms(sample)
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)
Exemple #4
0
def create_transforms(relax_crop, zero_crop):
    # Preparation of the data loaders
    first = [
        tr.CropFromMask(crop_elems=('image', 'gt'),
                        relax=relax_crop,
                        zero_pad=zero_crop),
        tr.FixedResize(resolutions={
            'crop_image': (512, 512),
            'crop_gt': (512, 512)
        })
    ]
    second = [
        tr.ToImage(norm_elem='extreme_points'),
        tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
        tr.ToTensor()
    ]
    train_tf = transforms.Compose([
        tr.RandomHorizontalFlip(),
        tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25)), *first,
        tr.ExtremePoints(sigma=10, pert=5, elem='crop_gt'), *second
    ])
    test_tf = transforms.Compose(
        [*first,
         tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'), *second])
    return train_tf, test_tf
 def transform_tr(self, sample):
      composed_transforms = transforms.Compose([
          tr.RandomHorizontalFlip(),
          tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
          tr.RandomGaussianBlur(),
          tr.Normalize(mean=(0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225)),
          tr.ToTensor()])
Exemple #6
0
    def transform_tr(self, sample):

        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            #tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
            #tr.FixScaleCrop(crop_size=self.args.crop_size),
            #tr.FixedResize(size=self.args.crop_size),
            #tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
        ])

        img = composed_transforms(sample)

        data = img['image']
        label = img['label']

        p = np.random.rand(1)[0]

        if p < 0.25:
            data = np.rot90(data, 1, (0, 1)).copy()
            label = np.rot90(label, 1, (0, 1)).copy()
        elif p >= 0.25 and p < 0.5:
            data = np.rot90(data, 2, (0, 1)).copy()
            label = np.rot90(label, 2, (0, 1)).copy()
        elif p >= 0.5 and p < 0.75:
            data = np.rot90(data, 3, (0, 1)).copy()
            label = np.rot90(label, 3, (0, 1)).copy()

        data = torch.from_numpy(data.transpose(2, 0, 1))
        label = torch.from_numpy(label)

        return {'image': data, 'label': label}
Exemple #7
0
 def transform_tr(self, sample):
     composed_transforms = transforms.Compose([
         tr.RandomCrop(self.par.base_size, self.par.crop_size, fill=255),
         tr.RandomColorJitter(),
         tr.RandomHorizontalFlip(),
         tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
         tr.ToTensor()])
     return composed_transforms(sample)
Exemple #8
0
    def transform_tr(self, sample): # eventually, according to the condition of split in self.split, then split == 'train'
        composed_transforms = transforms.Compose([     # define transform_tr
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size), # random scale crop, we have to calcualte base_size and crop_size based on argparse
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)  # return composed_transforms
 def transform_train(self):
     temp = []
     temp.append(tr.Resize(self.args.input_size))
     temp.append(tr.RandomHorizontalFlip())
     temp.append(tr.RandomRotate(15))
     temp.append(tr.RandomCrop(self.args.input_size))
     temp.append(tr.ToTensor())
     composed_transforms = transforms.Compose(temp)
     return composed_transforms
Exemple #10
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.FixedResize(size=self.args['crop_size']),
            tr.Normalize(mean=self.mean, std=self.std),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Exemple #11
0
    def transform(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=self.cfg.DATASET.BASE_SIZE, crop_size=self.cfg.DATASET.CROP_SIZE),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)
Exemple #12
0
 def transform_tr(self, sample: dict):
     sample_transforms = transforms.Compose([
         ctr.RandomCrop(size=self.settings['rnd_crop_size']),
         ctr.RandomHorizontalFlip(p=0.5),
         ctr.ToTensor(),
         ctr.Normalize(**self.settings['normalize_params'],
                       apply_to=['image']),
         ctr.Squeeze(apply_to=['label']),
     ])
     return sample_transforms(sample)
    def transform_tr(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixedCrop(x1=280, x2=1000, y1=50, y2=562),
            tr.RandomHorizontalFlip(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])
        return composed_transforms(sample)
Exemple #14
0
 def transform_tr(self, sample):
     composed_transforms_tr = transforms.Compose([
         tr.RandomHorizontalFlip(),
         tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25)),
         tr.CropFromMask(crop_elems=('image', 'gt'), relax=20, zero_pad=True),
         tr.FixedResize(resolutions={'crop_image': (256, 256), 'crop_gt': (256, 256)}),
         tr.Normalize(elems='crop_image'),
         tr.ToTensor()
     ])
     return composed_transforms_tr(sample)
Exemple #15
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=self.args.base_size,
                               crop_size=self.args.crop_size),
            tr.RandomGaussianBlur(),
            tr.Resize_normalize_train(mean=(0.5, 0.5, 0.5),
                                      std=(0.5, 0.5, 0.5))
        ])

        return composed_transforms(sample)
Exemple #16
0
 def transform_tr(self, sample):
     composed_transforms = transforms.Compose([
         tr.FixedResize(size=(1024, 2048)),
         tr.ColorJitter(),
         tr.RandomGaussianBlur(),
         tr.RandomMotionBlur(),
         tr.RandomHorizontalFlip(),
         tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
         tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
         tr.ToTensor()])
     return composed_transforms(sample)
Exemple #17
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.FixedNoMaskResize(size=self.args.crop_size),
            tr.RandomColorJeter(0.3, 0.3, 0.3, 0.3),
            tr.RandomHorizontalFlip(),
            # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            tr.ToTensor()])

        return composed_transforms(sample)
Exemple #18
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            #tr.FixScaleCrop(crop_size=self.args['crop_size']),
            tr.FixedResize(size=self.args['crop_size']),
            #tr.RandomScaleCrop(base_size=self.args['base_size'], crop_size=self.args['crop_size'], min_scale = 1.0, max_scale = 1.5),
            #tr.RandomGaussianBlur(),
            tr.Normalize(mean=self.mean, std=self.std),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Exemple #19
0
    def transform_tr_part1_1(self, sample):
        if self.args.use_small:
            composed_transforms = transforms.Compose(
                [tr.FixScaleCrop(crop_size=self.args.crop_size)])
        else:
            composed_transforms = transforms.Compose([
                tr.RandomHorizontalFlip(),
                tr.RandomScaleCrop(base_size=self.args.base_size,
                                   crop_size=self.args.crop_size)
            ])  # Zhiwei

        return composed_transforms(sample)
    def transform_train(self, sample):

        composed_transforms = transforms.Compose([
            self.scalecrop,
            tr.RandomHorizontalFlip(),
            #tr.RandomScaleCrop(base_size=self.base_size, crop_size=self.crop_size, fill=255),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
    def transform_train(self, sample):

        composed_transforms = transforms.Compose([
            tr.FixScaleCrop(crop_size=self.crop_size),
            tr.RandomHorizontalFlip(),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=[0.4911], std=[0.1658]),
            tr.ToTensor()
        ])
        transformed = composed_transforms(sample)
        transformed['image'] = transformed['image'].unsqueeze(0)
        return transformed
 def transforms_train_esp(self, sample):
     composed_transforms = transforms.Compose([
         tr.RandomVerticalFlip(),
         tr.RandomHorizontalFlip(),
         tr.RandomAffine(degrees=40, scale=(.9, 1.1), shear=30),
         tr.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),
         tr.FixedResize(size=self.input_size),
         tr.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                       0.225]),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=self.base_size,
                               crop_size=self.crop_size,
                               fill=255),
            tr.RandomDarken(self.cfg, self.darken),
            #tr.RandomGaussianBlur(), #TODO Not working for depth channel
            tr.Normalize(mean=self.data_mean, std=self.data_std),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Exemple #24
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            # transforms.ColorJitter(brightness=(-1,1),contrast=(-1, 1),saturation=(-0.3, 0.3), hue=(-0.3, 0.3)),
            # transforms.ColorJitter(brightness=0.4, contrast=0.4,saturation=0.4),
            tr.RandomHorizontalFlip(),
            tr.GaussianNoise(),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.PatchToTensor()
        ])

        return composed_transforms(sample)
Exemple #25
0
    def transform_tr(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(
            ),  # given PIL image randomly with a given probability
            tr.RandomScaleCrop(base_size=self.args.base_size,
                               crop_size=self.args.crop_size),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Exemple #26
0
    def transform_tr(self, sample):
        #print(sample)
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.FixScaleCrop(crop_size=self.args.crop_size),
            #tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor(),
            tr.Lablize(high_confidence=self.args.high_confidence)
        ])

        return composed_transforms(sample)
Exemple #27
0
    def transform_train(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=self.args.base_size,
                               crop_size=self.args.crop_size,
                               fill=255),
            tr.RandomRotate(degree=10),
            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        return composed_transforms(sample)
Exemple #28
0
    def transform_train(self, sample):
        composed_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomVerticalFlip(),
            # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
            # tr.FixedResize(size=self.args.crop_size),
            tr.RandomRotate(),
            tr.RandomGammaTransform(),
            tr.RandomGaussianBlur(),
            tr.RandomNoise(),
            tr.Normalize(mean=(0.544650, 0.352033, 0.384602, 0.352311), std=(0.249456, 0.241652, 0.228824, 0.227583)),
            tr.ToTensor()])

        return composed_transforms(sample)
Exemple #29
0
 def transform_tr(self, sample):
     composed_transforms = transforms.Compose([
         tr.RandomHorizontalFlip(),
         tr.RandomScaleCrop(base_size=513, crop_size=513),
         tr.ColorJitter(brightness=0.3,
                        contrast=0.3,
                        saturation=0.3,
                        hue=0.3,
                        gamma=0.3),
         tr.Normalize(mean=(0.485, 0.456, 0.406),
                      std=(0.229, 0.224, 0.225)),
         tr.ToTensor()
     ])
     return composed_transforms(sample)
    def transform_tr(self, sample):
        if self.csplit == 'all': ignores = []
        elif self.csplit == 'seen': ignores = classes['unseen']
        else: raise RuntimeError("Training Unseen data is not legal.")
        composed_transforms = transforms.Compose([
            tr.MaskIgnores(ignores=ignores, mask=255),
            tr.RandomHorizontalFlip(),
            #            tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
            #            tr.RandomGaussianBlur(),
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225),
                         seg=True),
            tr.ToTensor()
        ])

        return composed_transforms(sample)