Exemplo n.º 1
0
    def __init__(
            self,
            df,
            batch_size,
            num_classes,
            data_dir: str = "/media/hdd/Datasets/asl/",
            img_size=(256, 256),
    ):
        super().__init__()
        self.df = df
        self.data_dir = data_dir
        self.batch_size = batch_size
        self.train_transform = A.Compose(
            [
                A.RandomResizedCrop(img_size, img_size, p=1.0),
                A.Transpose(p=0.5),
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                A.ShiftScaleRotate(p=0.5),
                A.HueSaturationValue(hue_shift_limit=0.2,
                                     sat_shift_limit=0.2,
                                     val_shift_limit=0.2,
                                     p=0.5),
                A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1),
                                           contrast_limit=(-0.1, 0.1),
                                           p=0.5),
                A.Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225],
                    max_pixel_value=255.0,
                    p=1.0,
                ),
                A.CoarseDropout(p=0.5),
                A.Cutout(p=0.5),
                ToTensorV2(p=1.0),
            ],
            p=1.0,
        )

        self.valid_transform = A.Compose(
            [
                A.CenterCrop(img_size, img_size, p=1.0),
                A.Resize(img_size, img_size),
                A.Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225],
                    max_pixel_value=255.0,
                    p=1.0,
                ),
                ToTensorV2(p=1.0),
            ],
            p=1.0,
        )
Exemplo n.º 2
0
def get_training_augmentation_both():
    train_transform = [
        albu.VerticalFlip(p=0.6),
        albu.HorizontalFlip(p=0.6),
        albu.ShiftScaleRotate(
            scale_limit=0.3, rotate_limit=15, shift_limit=0.1, p=0.6, border_mode=0
        ),
        albu.GridDistortion(p=0.6),
        albu.OpticalDistortion(p=0.6, distort_limit=0.1, shift_limit=0.2),
        albu.RandomBrightnessContrast(p=0.6),
    ]
    return train_transform
Exemplo n.º 3
0
def get_training_augmentation():
    train_transform = [
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.ShiftScaleRotate(shift_limit=0.25,
                              scale_limit=0.25,
                              rotate_limit=90,
                              p=0.5),
        albu.GridDistortion(p=0.5),
        albu.Resize(384, 576)
    ]
    return albu.Compose(train_transform)
Exemplo n.º 4
0
def light_training_transforms_xray(crop_size=256):
    return A.Compose([
        A.RandomResizedCrop(height=crop_size, width=crop_size),
        A.OneOf([
            A.Transpose(),
            A.VerticalFlip(),
            A.HorizontalFlip(),
            A.RandomRotate90(),
            A.NoOp()
        ],
                p=1.0),
    ])
Exemplo n.º 5
0
    def __init__(self,
                 img_paths,
                 basepath_2_ohe=None,
                 img_size=1024,
                 transform=None,
                 return_label=True,
                 is_trainset=True,
                 in_channels=4,
                 crop_size=0,
                 random_crop=False,
                 cherrypicked_mitotic_spindle_df=None,
                 cherrypicked_aggresome_df=None,
                 mitotic_img_prob=None,
                 max_num_mitotic_cells_per_img=None,
                 aggresome_img_prob=None,
                 max_num_aggresome_cells_per_img=None
                 ):

        self.is_trainset = is_trainset
        self.img_size = img_size
        self.return_label = return_label
        self.in_channels = in_channels
        self.transform = transform
        self.crop_size = crop_size
        self.random_crop = random_crop

        self.img_paths = img_paths
        if is_trainset:
            self.basepath_2_ohe = basepath_2_ohe

        self.mitotic_aggresome_balancing = cherrypicked_aggresome_df is not None
        if self.mitotic_aggresome_balancing:
            assert cherrypicked_mitotic_spindle_df is not None, 'when balancing minor classes, both Aggresome and Mitotic must be included'
            self.cherrypicked_aggresome_df = cherrypicked_aggresome_df
            self.cherrypicked_mitotic_spindle_df = cherrypicked_mitotic_spindle_df

            self.minority_aug = A.Compose([
                A.HorizontalFlip(p=0.7),
                A.VerticalFlip(p=0.7),
                A.RandomRotate90(p=0.5),
                A.ShiftScaleRotate(p=0.8, rotate_limit=0),
                A.GaussianBlur(p=0.2)])
            self.indices_of_mitotic_cells = list(range(len(cherrypicked_mitotic_spindle_df)))
            self.indices_of_aggresome_cells = list(range(len(cherrypicked_aggresome_df)))
            self.mitotic_img_prob = 0.1 if mitotic_img_prob is None else mitotic_img_prob
            self.aggresome_img_prob = 0.1 if aggresome_img_prob is None else aggresome_img_prob
            self.max_num_mitotic_cells_per_img = 4 if max_num_mitotic_cells_per_img is None else max_num_mitotic_cells_per_img
            self.max_num_aggresome_cells_per_img = 4 if max_num_aggresome_cells_per_img is None else max_num_aggresome_cells_per_img

            self.img_paths_len = len(self.img_paths)
            self.num = self.img_paths_len
        else:
            self.num = len(self.img_paths)
def get_cc_training_augmentation():
    train_transform = [
        A.CropNonEmptyMaskIfExists(height=640, width=640, always_apply=True),
        A.Resize(height=256, width=256),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.PadIfNeeded(min_height=256,
                      min_width=256,
                      always_apply=True,
                      border_mode=0),
    ]
    return A.Compose(train_transform)
Exemplo n.º 7
0
def get_light_augmentations(image_size):
    min_size = min(image_size[0], image_size[1])
    return A.Compose([
        A.RandomSizedCrop(min_max_height=(int(min_size * 0.85), min_size),
                          height=image_size[0],
                          width=image_size[1], p=1.0),

        A.OneOf([A.HorizontalFlip(),
                A.VerticalFlip(),
                A.RandomRotate90()
                ])  
    ])
Exemplo n.º 8
0
def get_training_augmentation():
    # for train
    train_transform = [
        albu.Resize(height=configs.input_size, width=configs.input_size),
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        AT.ToTensor(),
    ]
    return albu.Compose(train_transform)
Exemplo n.º 9
0
 def __init__(self, crop_size=(256, 256)):
     self.transform = alb.Compose([
         alb.Rotate(limit=90, p=0.5),
         alb.RandomSizedCrop(min_max_height=(int(0.8 * crop_size[0]),
                                             int(1.2 * crop_size[0])),
                             height=crop_size[0],
                             width=crop_size[1]),
         alb.RandomRotate90(p=0.5),
         alb.VerticalFlip(p=0.5),
         alb.HorizontalFlip(p=0.5),
         alb.Cutout(num_holes=8, max_h_size=8, max_w_size=8)
     ])
Exemplo n.º 10
0
def get_transform(size):
    transform = A.Compose([
        A.RandomResizedCrop(size,
                            size,
                            scale=(0.7, 1.0),
                            ratio=(0.9, 1.1),
                            always_apply=True),
        A.RandomRotate90(p=0.3),
        A.HorizontalFlip(p=0.3),
        A.VerticalFlip(p=0.1)
    ])
    return transform
Exemplo n.º 11
0
    def __random_transform(self, img, masks):
        composition = albu.Compose([
            albu.HorizontalFlip(),
            albu.VerticalFlip(),
            albu.ShiftScaleRotate(rotate_limit=30, shift_limit=0.1)
        ])

        composed = composition(image=img, mask=masks)
        aug_img = composed['image']
        aug_masks = composed['mask']

        return aug_img, aug_masks
Exemplo n.º 12
0
    def __init__(self, image_list):
        self.image_list = image_list

        self.aug = A.Compose({
            A.Resize(224),
            # A.CenterCrop(100, 100),
            # A.RandomCrop(80, 80),
            A.HorizontalFlip(p=0.5),
            A.Rotate(limit=(-90, 90)),
            A.VerticalFlip(p=0.5),
            A.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        })
Exemplo n.º 13
0
def pw_transforms(image_size=IMAGE_SIZE):  #дополнить
    result = albu.Compose([
        albu.RandomBrightnessContrast(brightness_limit=0.2,
                                      contrast_limit=0.2,
                                      p=0.3),
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.RandomRotate90(p=0.5),
        albu.ShiftScaleRotate(0, (-0.5, 0.5), 0)
    ])

    return result
Exemplo n.º 14
0
def case1_cls_train_augs(name, **kwargs):
    return [
        A.Compose([
            A.HorizontalFlip(),
            A.VerticalFlip(),
            A.RandomRotate90(),
            A.ShiftScaleRotate(
                rotate_limit=30, border_mode=cv2.BORDER_CONSTANT, value=0),
            A.Normalize()
        ],
                  p=1.0),
    ]
Exemplo n.º 15
0
def get_augumentation(phase,
                      width=512,
                      height=512,
                      min_area=0.,
                      min_visibility=0.):
    list_transforms = []
    if phase == 'train':
        list_transforms.extend([
            albu.augmentations.transforms.LongestMaxSize(max_size=width,
                                                         always_apply=True),
            albu.PadIfNeeded(min_height=height,
                             min_width=width,
                             always_apply=True,
                             border_mode=0,
                             value=[0, 0, 0]),
            albu.augmentations.transforms.RandomResizedCrop(height=height,
                                                            width=width,
                                                            p=0.3),
            albu.augmentations.transforms.Flip(),
            albu.augmentations.transforms.Transpose(),
            albu.OneOf([
                albu.RandomBrightnessContrast(brightness_limit=0.5,
                                              contrast_limit=0.4),
                albu.RandomGamma(gamma_limit=(50, 150)),
                albu.NoOp()
            ]),
            albu.OneOf([
                albu.RGBShift(r_shift_limit=20,
                              b_shift_limit=15,
                              g_shift_limit=15),
                albu.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5),
                albu.NoOp()
            ]),
            albu.CLAHE(p=0.8),
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
        ])
    if (phase == 'test' or phase == 'valid'):
        list_transforms.extend([albu.Resize(height=height, width=width)])
    list_transforms.extend([
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        ToTensor()
    ])
    if (phase == 'test'):
        return albu.Compose(list_transforms)
    return albu.Compose(list_transforms,
                        bbox_params=albu.BboxParams(
                            format='coco',
                            min_area=min_area,
                            min_visibility=min_visibility,
                            label_fields=['category_id']))
def get_tfms_faster(ds):
    """Function that returns the transformations to
  be applied to both loaders (training, validation) 
  Keyword arguments:
  - cropsize: tupple
  - scalling: list of two values     
  """

    if ds == "datamatrix":
        train_tfms = albu.Compose([
            albu.OneOf([
                albu.augmentations.transforms.RandomSizedBBoxSafeCrop(
                    480, 640, p=0.2),
                albu.augmentations.transforms.RandomSizedBBoxSafeCrop(
                    960, 1280, p=0.2),
                albu.augmentations.transforms.Resize(750, 1000, p=0.6),
            ],
                       p=1),
            albu.augmentations.transforms.RandomBrightness(limit=0.5),
            albu.augmentations.transforms.RandomContrast(limit=0.5),
            albu.HorizontalFlip(),
            ToTensor(),
        ],
                                  bbox_params=albu.BboxParams(
                                      format='pascal_voc',
                                      min_area=0.,
                                      min_visibility=0.,
                                      label_fields=['labels']))

    elif ds == "coco":
        train_tfms = albu.Compose([
            albu.augmentations.transforms.RandomBrightness(limit=0.5),
            albu.augmentations.transforms.RandomContrast(limit=0.5),
            albu.HorizontalFlip(),
            albu.VerticalFlip(),
            ToTensor(),
        ],
                                  bbox_params=albu.BboxParams(
                                      format='pascal_voc',
                                      min_area=0.,
                                      min_visibility=0.,
                                      label_fields=['labels']))

    val_tfms = albu.Compose([
        ToTensor(),
    ],
                            bbox_params=albu.BboxParams(
                                format='pascal_voc',
                                min_area=0.,
                                min_visibility=0.,
                                label_fields=['labels']))

    return train_tfms, val_tfms
Exemplo n.º 17
0
def pet_augmentation():
    transform_list = [
        albu.Resize(320, 320),
        albu.RandomRotate90(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.HorizontalFlip(p=0.5),
        albu.MultiplicativeNoise(p=0.7,
                                 multiplier=(0.5, 1.5),
                                 elementwise=True),
        albu.GaussianBlur(p=0.5, blur_limit=3)
    ]
    return albu.Compose(transform_list)
Exemplo n.º 18
0
def light_training_transforms():
    return A.Compose([
        A.OneOf([
            A.Transpose(),
            A.VerticalFlip(),
            A.HorizontalFlip(),
            A.RandomRotate90(),
            A.NoOp()
        ],
                p=1.0),
        A.Normalize()
    ])
Exemplo n.º 19
0
def parse_albu_short(config, always_apply=False):
    if isinstance(config, str):
        if config == 'hflip':
            return A.HorizontalFlip(always_apply=always_apply)
        if config == 'vflip':
            return A.VerticalFlip(always_apply=always_apply)
        if config == 'transpose':
            return A.Transpose(always_apply=always_apply)

        raise Exception(f'Unknwon augmentation {config}')
    assert type(config) == dict
    return parse_albu([config])
Exemplo n.º 20
0
 def __build_augmentator(self):
     return albu.Compose([
         albu.CropNonEmptyMaskIfExists(height=self.height, width=self.width, p=1.0),
         albu.ShiftScaleRotate(shift_limit=0.15, scale_limit=0.15, p=0.6),
         albu.PadIfNeeded(256, 256),
         albu.OneOf([
             albu.VerticalFlip(p=0.5),
             albu.HorizontalFlip(p=0.5),
         ], p=0.5),
         albu.RandomBrightnessContrast(0.1, 0.1),
         # albu.RandomGamma()
     ], p=self.p)
Exemplo n.º 21
0
def data_augmentation(batch_images, batch_masks):
    images = []
    masks = []
    for image, mask in zip(batch_images, batch_masks):
        #Rotations
        for i in range(0, 4):
            transform_image, transform_mask = A.RandomRotate90(p=1).apply(
                image, factor=i), A.RandomRotate90(p=1).apply(mask, factor=i)
            images.append(transform_image)
            masks.append(transform_mask)
        #Horizontal Flip
        transform_image, transform_mask = A.HorizontalFlip(
            p=1).apply(image), A.HorizontalFlip(p=1).apply(mask)
        images.append(transform_image)
        masks.append(transform_mask)

        #Vertical Flip
        transform_image, transform_mask = A.VerticalFlip(
            p=1).apply(image), A.VerticalFlip(p=1).apply(mask)
        images.append(transform_image)
        masks.append(transform_mask)

        #Diagonal flip 1
        transform_image, transform_mask = A.Transpose(
            p=1).apply(image), A.Transpose(p=1).apply(mask)
        images.append(transform_image)
        masks.append(transform_mask)

        #Diagonal flip 2
        transform_image, transform_mask = A.RandomRotate90(p=1).apply(
            image, factor=2), A.RandomRotate90(p=1).apply(mask, factor=2)
        transform_image, transform_mask = A.Transpose(
            p=1).apply(transform_image), A.Transpose(p=1).apply(transform_mask)
        images.append(transform_image)
        masks.append(transform_mask)

    all_images = np.array(images)
    all_masks = np.array(masks)

    return all_images, all_masks
Exemplo n.º 22
0
def generate_transforms2(img_size):
    train_transform = Compose([
        # A.RandomCrop(p=1, height=img_size, width=img_size),
        A.Resize(height=img_size, width=img_size),
        A.RandomSunFlare(p=1),
        A.RandomFog(p=1),
        A.RandomBrightness(p=1),
        A.Rotate(p=1, limit=90),
        A.RGBShift(p=1),
        A.RandomSnow(p=1),
        A.HorizontalFlip(p=1),
        A.VerticalFlip(p=1),
        A.RandomContrast(limit=0.5, p=1),
        A.HueSaturationValue(p=1,
                             hue_shift_limit=20,
                             sat_shift_limit=30,
                             val_shift_limit=50),
        # A.Cutout(p=1),
        # A.Transpose(p=1),
        A.JpegCompression(p=1),
        A.CoarseDropout(p=1),
        A.IAAAdditiveGaussianNoise(loc=0,
                                   scale=(2.5500000000000003, 12.75),
                                   per_channel=False,
                                   p=1),
        A.IAAAffine(scale=1.0,
                    translate_percent=None,
                    translate_px=None,
                    rotate=0.0,
                    shear=0.0,
                    order=1,
                    cval=0,
                    mode='reflect',
                    p=1),
        A.IAAAffine(rotate=90., p=1),
        A.IAAAffine(rotate=180., p=1),
        Normalize(mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225),
                  max_pixel_value=255.0,
                  p=1.0),
        ToTensorV2()
    ])
    val_transform = Compose([
        Resize(height=img_size, width=img_size),
        Normalize(mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225),
                  max_pixel_value=255.0,
                  p=1.0),
        ToTensorV2(),
    ])

    return {"train": train_transform, "val": val_transform}
Exemplo n.º 23
0
Arquivo: prep.py Projeto: bleamer/eva5
    def __init__(self, **kwargs):

        self.transforms = []
        h_flip = kwargs['h_flip'] if 'h_flip' in kwargs else 0.
        v_flip = kwargs['v_flip'] if 'v_flip' in kwargs else 0.
        g_blur = kwargs['g_blur'] if 'g_blur' in kwargs else 0.
        rot = kwargs['rotation'] if 'rotation' in kwargs else 0.
        cutout = kwargs['cutout'] if 'cutout' in kwargs else 0.
        cutout_dimen = kwargs['cutout_dimen'] if 'cutout_dimen' in kwargs else (0,0)
        cutout_wd = kwargs['cutout_wd'] if 'cutout_wd' in kwargs else 0.
        padding = kwargs['padding'] if 'padding' in kwargs else 0.
        crop = kwargs['crop'] if 'crop' in kwargs else 0.
        crop_prob = kwargs['crop_prob'] if 'crop_prob' in kwargs else 0.


        mean = kwargs['mean'] if 'mean' in kwargs else (.5,.5,.5)
        std = kwargs['std'] if 'std' in kwargs else (.5,.5,.5)

        train = kwargs['train'] if 'train' in kwargs else True

        print(padding, padding[0], padding[1])

        print('Transformations')
        print(kwargs)
        if train:
            if padding[0] > 0 or padding[1] > 0:
                self.transforms += [A.PadIfNeeded(min_height = padding[0],
                                                  min_width = padding[1],
                                                  mask_value = tuple([x * 255.0 for x in mean]),
                                                  always_apply=True)]
            if crop_prob > 0:
                self.transforms += [A.RandomCrop(height = crop[0], width=crop[1], always_apply=True)]
            if h_flip > 0:  # Horizontal Flip
                self.transforms += [A.HorizontalFlip(p=h_flip)]
            if v_flip > 0:  # Vertical Flip
                self.transforms += [A.VerticalFlip(p=v_flip)]
            if g_blur > 0:  # Patch Gaussian Augmentation
                self.transforms += [A.GaussianBlur(p=g_blur)]
            if rot > 0:  # Rotate image
                self.transforms += [A.Rotate(limit=rot)]
            if cutout > 0:  # CutOut
                self.transforms += [A.CoarseDropout(
                    p=cutout, max_holes=1, fill_value=tuple([x * 255.0 for x in mean]),
                    max_height=cutout_dimen[0], max_width=cutout_dimen[1], min_height=1, min_width=1
                )]
        self.transforms += [
            A.Normalize(mean=mean, std=std, always_apply=True),
            # convert the data to torch.FloatTensor
            # with values within the range [0.0 ,1.0]
            ToTensor()
        ]
        self.transform = A.Compose(self.transforms)
    def __init__(self, data_dir, img_size, class_names=None, bbox_sizes=None):
        """
        Arguments
        ---------
        data_dir : str
            path to the data directory (which has pickled files)
        img_size : tuple
            Desired output image size
            IMPORTANT : (WIDTH, HEIGHT)
        class_names : list
            list of classes to extract data other than mouse
        bbox_sizes : list
            Will draw a box which is centered at the class's point,
            as the size defined.
            The order of class names and bbox_sizes should match.
            [(half_width, half_height)]
            i.e. x_min = ctr_x - half_width
        """
        self.class_names = ['mouse']
        if not (class_names is None):
            self.class_names.extend(class_names)
        self.bbox_sizes = bbox_sizes
        self.img_size = img_size
        self.data_dir = Path(data_dir)
        self.raw_data = []
        for pk_name in os.listdir(self.data_dir):
            with open(self.data_dir / pk_name, 'rb') as f:
                self.raw_data.extend(pickle.load(f))

        self.n = len(self.raw_data)
        self.output_size = img_size
        self.aug = A.Compose(
            [
                # A.OneOf([
                #     A.RandomGamma((40,200),p=1),
                #     A.RandomBrightness(limit=0.5, p=1),
                #     A.RandomContrast(limit=0.5,p=1),
                #     A.RGBShift(40,40,40,p=1),
                #     A.Downscale(scale_min=0.25,scale_max=0.5,p=1),
                #     A.ChannelShuffle(p=1),
                # ], p=0.8),
                # A.InvertImg(p=0.5),
                A.VerticalFlip(p=0.5),
                A.HorizontalFlip(p=0.5),
                # A.RandomRotate90(p=1),
                A.Resize(img_size[1], img_size[0]),
                # A.Cutout(8,img_size[0]//12,img_size[1]//12)
            ],
            # (x1, y1, x2, y2) format, all normalized
            bbox_params=A.BboxParams(format='albumentations',
                                     label_fields=['bbox_labels']),
        )
Exemplo n.º 25
0
def augment(img_size, mean=[0.5] * 3, std=[0.5] * 3):
    aug_seq1 = A.OneOf([
        A.HorizontalFlip(p=1.0),
        A.VerticalFlip(p=1.0),
        A.Transpose(p=1.0),
    ],
                       p=1.0)
    aug_seq2 = A.OneOf(
        [
            # A.RandomGamma(always_apply=False, p=1.0, gamma_limit=(80, 120), eps=1e-07),
            A.RandomBrightnessContrast(always_apply=False,
                                       p=1.0,
                                       brightness_limit=(-0.2, 0.2),
                                       contrast_limit=(-0.2, 0.2),
                                       brightness_by_max=True),
        ],
        p=1.0)
    aug_seq3 = A.OneOf([
        A.RGBShift(always_apply=False,
                   p=1.0,
                   r_shift_limit=(-10, 10),
                   g_shift_limit=(-10, 10),
                   b_shift_limit=(-10, 10)),
        A.HueSaturationValue(always_apply=False,
                             p=1.0,
                             hue_shift_limit=(-4, 4),
                             sat_shift_limit=(-30, 30),
                             val_shift_limit=(-20, 20)),
    ],
                       p=1.0)
    aug_seq4 = A.OneOf([
        A.MultiplicativeNoise(
            always_apply=False,
            p=1.0,
            multiplier=(0.8999999761581421, 1.100000023841858),
            per_channel=True,
            elementwise=True),
        A.MotionBlur(always_apply=False, p=1.0, blur_limit=(3, 7)),
        A.GaussNoise(always_apply=False, p=1.0, var_limit=(10.0, 50.0)),
        A.Blur(always_apply=False, p=1.0, blur_limit=(3, 7)),
    ],
                       p=1.0)
    aug_seq = A.Compose([
        A.Resize(img_size, img_size),
        aug_seq1,
        aug_seq2,
        aug_seq3,
        aug_seq4,
        A.Normalize(mean=mean, std=std),
        # A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
    ])
    return aug_seq
Exemplo n.º 26
0
    def __init__(self, train=True, **transform_args):

        ## ARGS

        horizontal_flip_prob = transform_args['horizontal_flip_prob']
        vertical_flip_prob = transform_args['vertical_flip_prob']
        gaussian_blur_prob = transform_args['gaussian_blur_prob']
        rotate_degree = transform_args['rotate_degree']
        cutout = transform_args['cutout']
        cutout_height = transform_args['cutout_height']
        cutout_width = transform_args['cutout_width']

        mean = (0.5, 0.5, 0.5)
        std = (0.5, 0.5, 0.5)

        # Train phase transformations

        transforms_list = []

        if train:
            if horizontal_flip_prob > 0:  # Horizontal Flip
                transforms_list += [A.HorizontalFlip(p=horizontal_flip_prob)]
            if vertical_flip_prob > 0:  # Vertical Flip
                transforms_list += [A.VerticalFlip(p=vertical_flip_prob)]
            if gaussian_blur_prob > 0:  # Patch Gaussian Augmentation
                transforms_list += [A.GaussianBlur(p=gaussian_blur_prob)]
            if rotate_degree > 0:  # Rotate image
                transforms_list += [A.Rotate(limit=rotate_degree)]
            if cutout > 0:  # CutOut
                transforms_list += [
                    A.CoarseDropout(p=cutout,
                                    max_holes=1,
                                    fill_value=tuple([x * 255.0
                                                      for x in mean]),
                                    max_height=cutout_height,
                                    max_width=cutout_width,
                                    min_height=1,
                                    min_width=1)
                ]

        transforms_list += [
            # normalize the data with mean and standard deviation to keep values in range [-1, 1]
            # since there are 3 channels for each image,
            # we have to specify mean and std for each channel
            A.Normalize(mean=mean, std=std, always_apply=True),

            # convert the data to torch.FloatTensor
            # with values within the range [0.0 ,1.0]
            ToTensor()
        ]

        self.transform = A.Compose(transforms_list)
Exemplo n.º 27
0
def get_training_augmentation():
    train_transform = [
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.5),
        A.ShiftScaleRotate(scale_limit=0.0,
                           rotate_limit=360,
                           shift_limit=0.1,
                           p=1,
                           border_mode=0),
        A.PadIfNeeded(min_height=256,
                      min_width=256,
                      always_apply=True,
                      border_mode=0),
        A.RandomCrop(height=256, width=256, always_apply=True),
        # To prevent overfitting
        A.IAAAdditiveGaussianNoise(p=0.3),
        ### We do not use these transformations since our validation data is not different regarding
        # HUE, contrast brightness and saturation
        # and very uniform ###

        # Satellite pictures always have the same perspective,
        # Therefore we do not want our model to learn the concept of perspective
        # A.IAAPerspective(p=0.5),

        # A.OneOf(
        #     [
        #         A.CLAHE(p=1),
        #         A.RandomBrightness(p=1),
        #         A.RandomGamma(p=1),
        #     ],
        #     p=0.9,
        # ),
        #
        # A.OneOf(
        #     [
        #         A.IAASharpen(p=1),
        #         A.Blur(blur_limit=3, p=1),
        #         A.MotionBlur(blur_limit=3, p=1),
        #     ],
        #     p=0.9,
        # ),
        #
        # A.OneOf(
        #     [
        #         A.RandomContrast(p=1),
        #         A.HueSaturationValue(p=1),
        #     ],
        #     p=0.9,
        # ),
        # A.Lambda(mask=round_clip_0_1)
    ]
    return A.Compose(train_transform)
def main(opt):
    device = torch.device("cuda")

    current_checkpoint = [
            os.path.join("./artifacts_train", "checkpoints", opt.version, name)
            for name in os.listdir(os.path.join("./artifacts_train", "checkpoints", opt.version))
            if not name.startswith(".")
        ]
    checkpoint = sorted(current_checkpoint, key=lambda x: int(x.split("/")[-1].split("_")[0]), reverse=True)[0]
    print(f"Checkpoint: {checkpoint}")

    path_to_img = [os.path.join(opt.data_dir, name) for name in os.listdir(opt.data_dir) if not name.startswith(".")]

    val_trans = albu.Compose([
            albu.RandomResizedCrop(*opt.input_shape),
            albu.VerticalFlip(),
            albu.HorizontalFlip(),
            albu.ShiftScaleRotate(p=0.5),
            albu.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
            albu.RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
            albu.Normalize()
        ])

    model = timm.create_model(opt.model_arch, pretrained=False)
    model.classifier = nn.Linear(model.classifier.in_features, 5)
    model.load_state_dict(torch.load(checkpoint, map_location=device)["model_state_dict"])
    model.to(device)
    model.eval()

    predicts = []
    for path in path_to_img:
        img = cv2.imread(path)
        tta_preds = []
        for _ in range(opt.tta):
            img_inp = val_trans(image=img)["image"]
            img_inp = img_inp.transpose(2, 0, 1)
            img_inp = torch.from_numpy(img_inp)[None, ...]
            img_inp = img_inp.to(device)

            with torch.no_grad():
                logit = model(img_inp)
            
            tta_preds.append(F.softmax(logit, dim=-1)[None, ...])
        
        tta_pred = torch.cat(tta_preds, dim=0).mean(dim=0)

        _, predicted = torch.max(tta_pred.data, 1)
        predicts += list(predicted.cpu().numpy())

    submission_df = pd.DataFrame(zip(path_to_img, predicts), columns=["image_id", "label"])
    submission_df["image_id"] = submission_df["image_id"].apply(lambda x: x.split("/")[-1])
    submission_df.to_csv("submission.csv", index=False)
Exemplo n.º 29
0
    def __init__(
        self, horizontal_flip_prob=0.0, vertical_flip_prob=0.0, gaussian_blur_prob=0.0,
        rotate_degree=0.0, cutout=0.0, cutout_height=0, cutout_width=0,
        mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), train=True
    ):
        """Create data transformation pipeline
        
        Args:
            horizontal_flip_prob: Probability of an image being horizontally flipped.
                Defaults to 0.
            vertical_flip_prob: Probability of an image being vertically flipped.
                Defaults to 0.
            rotate_degree: Angle of rotation for image augmentation.
                Defaults to 0.
            cutout: Probability that cutout will be performed.
                Defaults to 0.
            cutout_height: Max height of the cutout box.
                Defaults to 0.
            cutout_width: Max width of the cutout box.
                Defaults to 0.
            mean: Mean value. Defaults to 0.5 for each channel.
            std: Standard deviation value. Defaults to 0.5 for each channel.
        """
        transforms_list = []

        if train:
            if horizontal_flip_prob > 0:  # Horizontal Flip
                transforms_list += [A.HorizontalFlip(p=horizontal_flip_prob)]
            if vertical_flip_prob > 0:  # Vertical Flip
                transforms_list += [A.VerticalFlip(p=vertical_flip_prob)]
            if gaussian_blur_prob > 0:  # Patch Gaussian Augmentation
                transforms_list += [A.GaussianBlur(p=gaussian_blur_prob)]
            if rotate_degree > 0:  # Rotate image
                transforms_list += [A.Rotate(limit=rotate_degree)]
            if cutout > 0:  # CutOut
                transforms_list += [A.CoarseDropout(
                    p=cutout, max_holes=1, fill_value=tuple([x * 255.0 for x in mean]),
                    max_height=cutout_height, max_width=cutout_width, min_height=1, min_width=1
                )]
        
        transforms_list += [
            # normalize the data with mean and standard deviation to keep values in range [-1, 1]
            # since there are 3 channels for each image,
            # we have to specify mean and std for each channel
            A.Normalize(mean=mean, std=std, always_apply=True),
            
            # convert the data to torch.FloatTensor
            # with values within the range [0.0 ,1.0]
            ToTensor()
        ]

        self.transform = A.Compose(transforms_list)
Exemplo n.º 30
0
def get_spacenet6_augmentation(config, is_train):
    """
    """
    if is_train:
        augmentation = [
            # random flip
            albu.HorizontalFlip(
                p=config.TRANSFORM.TRAIN_HORIZONTAL_FLIP_PROB
            ),
            albu.VerticalFlip(
                p=config.TRANSFORM.TRAIN_VERTICAL_FLIP_PROB
            ),
            # random rotate
            albu.ShiftScaleRotate(
                scale_limit=0.0,
                rotate_limit=config.TRANSFORM.TRAIN_RANDOM_ROTATE_DEG,
                shift_limit=0.0,
                p=config.TRANSFORM.TRAIN_RANDOM_ROTATE_PROB,
                border_mode=0),
            # random crop
            albu.RandomCrop(
                width=config.TRANSFORM.TRAIN_RANDOM_CROP_SIZE[0],
                height=config.TRANSFORM.TRAIN_RANDOM_CROP_SIZE[1],
                always_apply=True
            ),
            # speckle noise
            albu.Lambda(
                image=functools.partial(
                    _random_speckle_noise,
                    speckle_std=config.TRANSFORM.TRAIN_SPECKLE_NOISE_STD,
                    p=config.TRANSFORM.TRAIN_SPECKLE_NOISE_PROB
                )
            ),
            # random brightness
            albu.Lambda(
                image=functools.partial(
                    _random_brightness,
                    brightness_std=config.TRANSFORM.TRAIN_RANDOM_BRIGHTNESS_STD,
                    p=config.TRANSFORM.TRAIN_RANDOM_BRIGHTNESS_PROB
                )
            ),
        ]
    else:
        augmentation = [
            albu.PadIfNeeded(
                min_width=config.TRANSFORM.TEST_SIZE[0],
                min_height=config.TRANSFORM.TEST_SIZE[1],
                always_apply=True,
                border_mode=0
            )
        ]
    return albu.Compose(augmentation)