예제 #1
0
def hard_transform(image_size: int = 256, p: float = 0.5, **kwargs):
    """Hard augmentations (on training)"""
    _add_transform_default_params(kwargs)

    transforms = Compose([
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p,
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf([
            HueSaturationValue(p=p),
            ToGray(p=p),
            RGBShift(p=p),
            ChannelShuffle(p=p),
        ]),
        RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.5,
                                 p=p),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
        PadIfNeeded(image_size, image_size, border_mode=cv2.BORDER_CONSTANT),
    ], **kwargs)
    return transforms
예제 #2
0
 def strong_aug(self):
     color_r = random.randint(0, 256)
     color_g = random.randint(0, 256)
     color_b = random.randint(0, 256)
     num_holes = random.randint(1, 2)
     if num_holes == 2:
         max_h_size = random.randint(15, 30)
         max_w_size = random.randint(15, 30)
     else:
         max_h_size = random.randint(30, 60)
         max_w_size = random.randint(30, 60)
     return Compose([
         OneOf([
             OneOf([
                 MultiplicativeNoise(multiplier=[0.5, 1.5], elementwise=True, per_channel=True, p=0.2),
                 IAAAdditiveGaussianNoise(),
                 GaussNoise()]),
             OneOf([
                 InvertImg(),
                 ToSepia()]),
             OneOf([
                 ChannelDropout(channel_drop_range=(1, 1), fill_value=0),
                 ChannelShuffle()]),
             HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=0.1)],
             p=0.25),
         Cutout(num_holes=num_holes, max_h_size=max_h_size, max_w_size=max_w_size,
                fill_value=[color_r, color_g, color_b], p=0.9),
     ])
 def box_segmentation_aug():
     return Compose([
         OneOf([
             RandomBrightnessContrast(brightness_limit=0.2, p=0.5),
             RandomGamma(gamma_limit=50, p=0.5),
             ChannelShuffle(p=0.5)
         ]),
         OneOf([
             ImageCompression(quality_lower=0, quality_upper=20, p=0.5),
             MultiplicativeNoise(multiplier=(0.3, 0.8),
                                 elementwise=True,
                                 per_channel=True,
                                 p=0.5),
             Blur(blur_limit=(15, 15), p=0.5)
         ]),
         OneOf([
             CenterCrop(height=1000, width=1000, p=0.1),
             RandomGridShuffle(grid=(3, 3), p=0.2),
             CoarseDropout(max_holes=20,
                           max_height=100,
                           max_width=100,
                           fill_value=53,
                           p=0.2)
         ]),
         OneOf([
             GridDistortion(p=0.5, num_steps=2, distort_limit=0.2),
             ElasticTransform(alpha=157, sigma=80, alpha_affine=196, p=0.5),
             OpticalDistortion(distort_limit=0.5, shift_limit=0.5, p=0.5)
         ]),
         OneOf([
             VerticalFlip(p=0.5),
             HorizontalFlip(p=0.5),
             Rotate(limit=44, p=0.5)
         ])
     ])
예제 #4
0
def medium_aug(p=1.0):
    return Compose(
        [
            HorizontalFlip(p=0.5),
            ShiftScaleRotate(p=0.75,
                             shift_limit=0.1,
                             scale_limit=0.2,
                             rotate_limit=45,
                             border_mode=cv2.BORDER_CONSTANT),
            RandomBrightnessContrast(
                brightness_limit=0.6, contrast_limit=0.6, p=0.5),
            OneOf([
                HueSaturationValue(p=1.0),
                RGBShift(p=1.0),
                ChannelShuffle(p=1.0)
            ],
                  p=0.5),
            OneOf([
                Blur(p=1.0),
                MedianBlur(p=1.0),
                MotionBlur(p=1.0),
            ], p=0.3),
            OneOf([GridDistortion(p=1.0),
                   ElasticTransform(p=1.0)], p=0.3),
            OneOf([
                CLAHE(p=1.0),
                IAASharpen(p=1.0),
            ], p=0.3),
            IAAAdditiveGaussianNoise(p=0.5)
            # ToGray(p=1.0),
        ],
        p=p)
예제 #5
0
def hard_transform(image_size: int = 256, p: float = 0.5):
    """Hard augmentations"""
    transforms = Compose([
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p,
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf([
            HueSaturationValue(p=p),
            ToGray(p=p),
            RGBShift(p=p),
            ChannelShuffle(p=p),
        ]),
        RandomBrightnessContrast(
            brightness_limit=0.5, contrast_limit=0.5, p=p
        ),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
    ])
    return transforms
예제 #6
0
def strong_aug(p=.5):
    return Compose([
        HorizontalFlip(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.4),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.1),
            Blur(blur_limit=3, p=.1),
        ],
              p=0.3),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            RandomContrast(),
            RandomBrightness(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
        ChannelShuffle(),
        Cutout(num_holes=20, max_h_size=16, max_w_size=16)
    ],
                   p=p)
def hard_transform(image_size=224, p=0.5):
    transforms = [
        Cutout(
            num_holes=4,
            max_w_size=image_size // 4,
            max_h_size=image_size // 4,
            p=p
        ),
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf(
            [
                HueSaturationValue(p=p),
                ToGray(p=p),
                RGBShift(p=p),
                ChannelShuffle(p=p),
            ]
        ),
        RandomBrightnessContrast(
            brightness_limit=0.5, contrast_limit=0.5, p=p
        ),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
    ]
    transforms = Compose(transforms)
    return transforms
예제 #8
0
    def __getitem__(self, index):
        '''
            The method to get one data from dataset.
            index: int, the index of the sample in dataset.
        '''
        #get data from dataset
        img_path = self.img_paths[index]
        mask_path = self.mask_paths[index]
        img = cv2.imread(img_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        #modify the mask to satisfied our requirments for the mask
        if not os.path.exists(mask_path):
            mask = np.zeros((img.shape[0], img.shape[1]), dtype=np.float32)
        else:
            mask = cv2.imread(mask_path, 0)
            mask[mask > 1] = 2
            mask[mask < 1] = 1
            mask[mask > 1] = 0

        if isinstance(self.img_size, tuple):
            height = self.img_size[1]
            width = self.img_size[0]
        elif isinstance(self.img_size, int):
            width = height = self.img_size

        if self.augumentation:
            #augmentation methods
            task = Compose([
                RandomBrightnessContrast(),
                RandomGamma(),
                HorizontalFlip(),
                VerticalFlip(),
                ChannelShuffle(),
                PadIfNeeded(height, width),
            ])
            #augmentation
            augument_data = task(image=img, mask=mask)
            img = augument_data["image"]
            mask = augument_data["mask"]
        resize = Compose([
            Resize(height=height, width=width, always_apply=True),
            Normalize(mean=(0.5, 0.5, 0.5),
                      std=(0.5, 0.5, 0.5),
                      always_apply=True)
        ])
        #resize data
        resize_data = resize(image=img, mask=mask)
        img, mask = resize_data["image"], resize_data["mask"]
        if img.ndim > 2:
            img = np.transpose(img, axes=[2, 0, 1])
        elif img.ndim == 2:
            img = np.expand_dims(img, axis=0)
        return torch.from_numpy(img.astype(np.float32)), torch.from_numpy(
            mask.astype(np.float32))
예제 #9
0
 def __init__(self):
     self.albTrainTransforms = Compose([  # Resize(256, 256),
         Rotate((-10.0, 10.0)),
         HorizontalFlip(p=0.5),
         ChannelShuffle(p=0.5),
         PadIfNeeded(min_height=36, min_width=36),
         RandomCrop(height=32, width=32, p=1.0),
         Cutout(num_holes=1, max_h_size=8, max_w_size=8),
         Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
         ToTensor()
     ])
예제 #10
0
def augument():
    augm = Compose([
        RGBShift(),
        RandomBrightness(),
        RandomContrast(),
        HueSaturationValue(p=0.2),
        ChannelShuffle(),
        CLAHE(),
        Blur(),
        ToGray(),
        CoarseDropout()
    ],
                   p=0.5)
    return augm
예제 #11
0
def augment(image, mask):
    original_height, original_width = image.shape[:2]
    aug = PadIfNeeded(p=1, min_height=128, min_width=128)
    augmented = aug(image=image, mask=mask)

    image_padded = augmented['image']
    mask_padded = augmented['mask']

    aug = Compose([
        OneOf([RandomSizedCrop(p=0.5, min_max_height=(int(original_height/4), int(original_height/2)),
                              height=original_height, width=original_width),
               PadIfNeeded(min_height=original_height,
                           min_width=original_width, p=0.5)], p=1),
        VerticalFlip(p=0.5),
        RandomRotate90(p=0.5),
        HorizontalFlip(p=0.5),
        OneOf([
            ElasticTransform(p=0.5, alpha=120, sigma=120 * random.uniform(0.07,0.2),
                             alpha_affine=120 * random.uniform(0.03,0.5)),
            GridDistortion(p=0.5),
            OpticalDistortion(p=1, distort_limit=0.2, shift_limit=0.2)
        ], p=0.8),
        CLAHE(p=0.8),
        RandomContrast(p=0.8),
        RandomBrightness(p=0.8),
        RandomGamma(p=0.8),
        RGBShift(p=0.1),
        HueSaturationValue(p=0.1),
        ChannelShuffle(p=0.1),
        Blur(p=0.3),
        MedianBlur(p=0.3),
        JpegCompression(p=0.8)
    ])

    augmented = aug(image=image_padded, mask=mask_padded)
    image_v = augmented['image']
    mask_v = augmented['mask']

    aug = PadIfNeeded(p=1, min_height=1024, min_width=1024)
    augmented = aug(image=image_v, mask=mask_v)

    image_v = augmented['image']
    mask_v = augmented['mask']

    # image_v = cv2.resize(image_v, (64, 64))
    # mask_v = cv2.resize(image_v, (64, 64))
    return image_v, mask_v
예제 #12
0
def aug_train_heavy(p=1):
    return Compose([
        HorizontalFlip(),
        VerticalFlip(),
        RandomRotate90(),
        Transpose(),
        RandomBrightnessContrast(p=0.3),
        RandomGamma(p=0.3),
        OneOf([
            HueSaturationValue(hue_shift_limit=20,
                               sat_shift_limit=0.1,
                               val_shift_limit=0.1,
                               p=0.3),
            ChannelShuffle(p=0.3)
        ])
    ],
                   p=p)
예제 #13
0
 def __getitem__(self, index):
     '''
         Magic method for get one sample from the dataset.
         args:
             index: int, the index of the sample
         return:
             the spcify sample in the dataset
     '''
     #get sample from the dataset
     img_path = self.image_paths[index]
     label = np.array(self.labels[index], dtype=np.float32)
     assert os.path.exists(
         img_path), "Cannot found the image file {}".format(img_path)
     #read image
     img = cv2.imread(img_path)
     img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
     if isinstance(self.img_size, tuple):
         height = self.img_size[1]
         width = self.img_size[0]
     elif isinstance(self.img_size, int):
         width = height = self.img_size
     #data augmentation
     if self.augmentation:
         #data augmentation metiod
         task = Compose([
             RandomBrightnessContrast(),
             RandomGamma(),
             HorizontalFlip(),
             VerticalFlip(),
             ChannelShuffle(),
             PadIfNeeded(height, width),
         ])
         aug_data = task(image=img)
         img = aug_data["image"]
     #resize the image
     resize = Compose([
         Resize(height=height, width=width, always_apply=True),
         Normalize(mean=self.mean, std=self.std, always_apply=True)
     ])
     resize_data = resize(image=img)
     img = resize_data["image"]
     if img.ndim > 2:
         img = np.transpose(img, axes=[2, 0, 1])
     else:
         img = np.expand_dims(img, axis=0)
     return torch.from_numpy(img), torch.from_numpy(label)
예제 #14
0
    def __init__(self):
        self.random_brightness_contrast = RandomBrightnessContrast()
        self.hue_saturation_value = HueSaturationValue()
        self.random_gamma = RandomGamma()
        self.clahe = CLAHE()

        self.blur = Blur()
        self.gauss_noise = GaussNoise()

        self.channel_shuffle = ChannelShuffle()
        self.rgb_shift = RGBShift()
        self.channel_dropout = ChannelDropout()

        self.random_fog = RandomFog(fog_coef_upper=0.4)
        self.random_rain = RandomRain()
        self.random_snow = RandomSnow()
        self.random_shadow = RandomShadow()
        self.random_sunflare = RandomSunFlare(angle_upper=0.2)
예제 #15
0
 def strong_aug(p=0.9):
     return Compose(
         [
             OneOf([
                 IAAAdditiveGaussianNoise(scale=(0.01 * 255, 0.05 * 255),
                                          p=1.0),
                 GaussNoise(var_limit=(20, 120), p=1.0),
                 RandomGamma(gamma_limit=(80, 120), p=1.0),
             ],
                   p=0.9),
             RandomBrightnessContrast(p=1.0),
             OneOf(
                 [
                     # MotionBlur(p=1.0),
                     # MedianBlur(blur_limit=3, p=1.0),
                     Blur(blur_limit=5, p=1.0),
                     IAASharpen(p=1.0),
                     # IAAEmboss(p=1.0),
                     # IAASuperpixels(n_segments=10, p_replace=0.05, p=1.0),
                 ],
                 p=0.9),
             OneOf(
                 [
                     CLAHE(clip_limit=8, p=1.0),
                     RGBShift(p=1.0),
                     ChannelShuffle(p=1.0),
                     HueSaturationValue(p=1.0),
                     # ToGray(p=1.0),
                 ],
                 p=0.9),
             # OneOf([
             #     OpticalDistortion(border_mode=cv2.BORDER_CONSTANT, p=1.0),
             #     # GridDistortion(border_mode=cv2.BORDER_CONSTANT, p=1.0),
             #     IAAPiecewiseAffine(nb_rows=4, nb_cols=4, p=1.0),
             #     IAAPerspective(scale=(0.05, 0.075), p=1.0),
             #     # IAAAffine(mode='constant', p=1.0),
             #     ElasticTransform(alpha=alpha, sigma=sigma, alpha_affine=alpha_affine,
             #                      border_mode=cv2.BORDER_CONSTANT,
             #                      p=1.0),
             # ], p=0.9),
         ],
         p=p)
예제 #16
0
def hard_transform():
    transforms = [
        ShiftScaleRotate(shift_limit=0.1,
                         scale_limit=0.1,
                         rotate_limit=15,
                         border_mode=cv2.BORDER_REFLECT,
                         p=0.5),
        IAAPerspective(scale=(0.02, 0.05), p=0.3),
        RandomBrightnessContrast(brightness_limit=0.2,
                                 contrast_limit=0.2,
                                 p=0.3),
        RandomGamma(gamma_limit=(85, 115), p=0.3),
        HueSaturationValue(p=0.3),
        ChannelShuffle(p=0.5),
        ToGray(p=0.2),
        CLAHE(p=0.3),
        RGBShift(p=0.3),
        JpegCompression(quality_lower=50),
    ]
    transforms = Compose(transforms)
    return transforms
예제 #17
0
def strong_aug(p=0.8):
    return Compose([
        # RandomRotate90(),
        # Flip(),
        # Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2),
        OneOf([ #模糊
            MotionBlur(p=0.5),
            MedianBlur(blur_limit=3, p=0.5),
            Blur(blur_limit=3, p=0.5),
            JpegCompression(p=1,quality_lower=7,quality_upper=40)
        ], p=1),
        # ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([ 
            IAAPiecewiseAffine(p=1,scale=(0.005, 0.01), nb_rows=4, nb_cols=4),
            IAAPerspective(p=1,scale=(random.uniform(0.01,0.03),random.uniform(0.01, 0.03))),
            ElasticTransform(p=1,alpha=random.randint(50,100), sigma=random.randint(8,13), alpha_affine=0,border_mode=3),
        ], p=0.2),
        OneOf([ 
            ElasticTransform(p=1,alpha=random.randint(50,100), sigma=random.randint(8,13), alpha_affine=0,border_mode=3),
        ], p=0.6),
        OneOf([ 
            OpticalDistortion(p=1,distort_limit=0.2,border_mode=3),
            # GridDistortion(p=1,distort_limit=0.1,border_mode=3),
        ], p=0.1),        
        OneOf([
            CLAHE(clip_limit=2,p=0.5),
            # IAASharpen(),
            IAAEmboss(p=0.5),
            RandomBrightnessContrast(p=1), #随机调整亮度饱和度,和下一个区别?
            HueSaturationValue(p=1), #随机调整hsv值
            RGBShift(p=0.5), #随机调整rgb值
            ChannelShuffle(p=0.5), #RGB通道调换
            InvertImg(p=0.1), #255-像素值,反转图像
        ], p=0.5),    

    ], p=p) 
예제 #18
0
def aug_train(p=1):
    return Compose([
        Resize(224, 224),
        HorizontalFlip(),
        VerticalFlip(),
        RandomRotate90(),
        Transpose(),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.50, rotate_limit=45, p=.75),
        OpticalDistortion(),
        GridDistortion(),
        RandomBrightnessContrast(p=0.3),
        RandomGamma(p=0.3),
        OneOf([
            HueSaturationValue(hue_shift_limit=20,
                               sat_shift_limit=0.1,
                               val_shift_limit=0.1,
                               p=0.3),
            ChannelShuffle(p=0.3),
            CLAHE(p=0.3)
        ])
    ],
                   p=p)
예제 #19
0
def augment_data(images, masks, save_path, augment=True):
    """ Performing data augmentation. """
    size = (512, 512)
    crop_size = (448, 448)

    for idx, (x, y) in tqdm(enumerate(zip(images, masks)), total=len(images)):
        image_name = x.split("/")[-1].split(".")[0]
        mask_name = y.split("/")[-1].split(".")[0]

        x = cv2.imread(x, cv2.IMREAD_COLOR)
        y = cv2.imread(y, cv2.IMREAD_COLOR)

        if x.shape[0] >= size[0] and x.shape[1] >= size[1]:
            if augment == True:
                ## Crop
                x_min = 0
                y_min = 0
                x_max = x_min + size[0]
                y_max = y_min + size[1]

                aug = Crop(p=1, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max)
                augmented = aug(image=x, mask=y)
                x1 = augmented['image']
                y1 = augmented['mask']

                # Random Rotate 90 degree
                aug = RandomRotate90(p=1)
                augmented = aug(image=x, mask=y)
                x2 = augmented['image']
                y2 = augmented['mask']

                ## ElasticTransform
                aug = ElasticTransform(p=1, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03)
                augmented = aug(image=x, mask=y)
                x3 = augmented['image']
                y3 = augmented['mask']

                ## Grid Distortion
                aug = GridDistortion(p=1)
                augmented = aug(image=x, mask=y)
                x4 = augmented['image']
                y4 = augmented['mask']

                ## Optical Distortion
                aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
                augmented = aug(image=x, mask=y)
                x5 = augmented['image']
                y5 = augmented['mask']

                ## Vertical Flip
                aug = VerticalFlip(p=1)
                augmented = aug(image=x, mask=y)
                x6 = augmented['image']
                y6 = augmented['mask']

                ## Horizontal Flip
                aug = HorizontalFlip(p=1)
                augmented = aug(image=x, mask=y)
                x7 = augmented['image']
                y7 = augmented['mask']

                ## Grayscale
                x8 = cv2.cvtColor(x, cv2.COLOR_RGB2GRAY)
                y8 = y

                ## Grayscale Vertical Flip
                aug = VerticalFlip(p=1)
                augmented = aug(image=x8, mask=y8)
                x9 = augmented['image']
                y9 = augmented['mask']

                ## Grayscale Horizontal Flip
                aug = HorizontalFlip(p=1)
                augmented = aug(image=x8, mask=y8)
                x10 = augmented['image']
                y10 = augmented['mask']

                # aug = RandomBrightnessContrast(p=1)
                # augmented = aug(image=x, mask=y)
                # x11 = augmented['image']
                # y11 = augmented['mask']
                #
                # aug = RandomGamma(p=1)
                # augmented = aug(image=x, mask=y)
                # x12 = augmented['image']
                # y12 = augmented['mask']
                #
                # aug = HueSaturationValue(p=1)
                # augmented = aug(image=x, mask=y)
                # x13 = augmented['image']
                # y13 = augmented['mask']

                aug = RGBShift(p=1)
                augmented = aug(image=x, mask=y)
                x14 = augmented['image']
                y14 = augmented['mask']

                # aug = RandomBrightness(p=1)
                # augmented = aug(image=x, mask=y)
                # x15 = augmented['image']
                # y15 = augmented['mask']
                #
                # aug = RandomContrast(p=1)
                # augmented = aug(image=x, mask=y)
                # x16 = augmented['image']
                # y16 = augmented['mask']

                aug = ChannelShuffle(p=1)
                augmented = aug(image=x, mask=y)
                x17 = augmented['image']
                y17 = augmented['mask']

                aug = CoarseDropout(p=1, max_holes=10, max_height=32, max_width=32)
                augmented = aug(image=x, mask=y)
                x18 = augmented['image']
                y18 = augmented['mask']

                aug = GaussNoise(p=1)
                augmented = aug(image=x, mask=y)
                x19 = augmented['image']
                y19 = augmented['mask']

                # aug = MotionBlur(p=1, blur_limit=7)
                # augmented = aug(image=x, mask=y)
                # x20 = augmented['image']
                # y20 = augmented['mask']
                #
                # aug = MedianBlur(p=1, blur_limit=11)
                # augmented = aug(image=x, mask=y)
                # x21 = augmented['image']
                # y21 = augmented['mask']
                #
                # aug = GaussianBlur(p=1, blur_limit=11)
                # augmented = aug(image=x, mask=y)
                # x22 = augmented['image']
                # y22 = augmented['mask']

                ##
                aug = CenterCrop(256, 256, p=1)
                augmented = aug(image=x, mask=y)
                x23 = augmented['image']
                y23 = augmented['mask']

                aug = CenterCrop(384, 384, p=1)
                augmented = aug(image=x, mask=y)
                x24 = augmented['image']
                y24 = augmented['mask']

                aug = CenterCrop(448, 448, p=1)
                augmented = aug(image=x, mask=y)
                x25 = augmented['image']
                y25 = augmented['mask']

                ## x23 Vertical Flip
                aug = VerticalFlip(p=1)
                augmented = aug(image=x23, mask=y23)
                x26 = augmented['image']
                y26 = augmented['mask']

                ## x23 Horizontal Flip
                aug = HorizontalFlip(p=1)
                augmented = aug(image=x23, mask=y23)
                x27 = augmented['image']
                y27 = augmented['mask']

                ## x24 Vertical Flip
                aug = VerticalFlip(p=1)
                augmented = aug(image=x24, mask=y24)
                x28 = augmented['image']
                y28 = augmented['mask']

                ## x24 Horizontal Flip
                aug = HorizontalFlip(p=1)
                augmented = aug(image=x24, mask=y24)
                x29 = augmented['image']
                y29 = augmented['mask']

                ## x25 Vertical Flip
                aug = VerticalFlip(p=1)
                augmented = aug(image=x25, mask=y25)
                x30 = augmented['image']
                y30 = augmented['mask']

                ## x25 Horizontal Flip
                aug = HorizontalFlip(p=1)
                augmented = aug(image=x25, mask=y25)
                x31 = augmented['image']
                y31 = augmented['mask']

                images = [
                    x, x1, x2, x3, x4, x5, x6, x7, x8, x9,
                    x10,
                    # x11, x12, x13,
                    x14,
                    # x15, x16,
                    x17, x18, x19,
                    # x20, x21, x22,
                    x23, x24, x25, x26, x27, x28, x29,
                    x30, x31
                ]
                masks  = [
                    y, y1, y2, y3, y4, y5, y6, y7, y8, y9,
                    y10,
                    # y11, y12, y13,
                    y14,
                    # y15, y16,
                    y17, y18, y19,
                    # y20, y21, y22,
                    y23, y24, y25, y26, y27, y28, y29,
                    y30, y31
                ]

            else:
                images = [x]
                masks  = [y]

            idx = 0
        for i, m in zip(images, masks):
            i = cv2.resize(i, size)
            m = cv2.resize(m, size)

            if len(images) == 1:
                tmp_image_name = f"{image_name}.jpg"
                tmp_mask_name  = f"{mask_name}.jpg"
            else:
                tmp_image_name = f"{image_name}_{idx}.jpg"
                tmp_mask_name  = f"{mask_name}_{idx}.jpg"

            image_path = os.path.join(save_path, "image/", tmp_image_name)
            mask_path  = os.path.join(save_path, "mask/", tmp_mask_name)

            cv2.imwrite(image_path, i)
            cv2.imwrite(mask_path, m)

            idx += 1
예제 #20
0
파일: main.py 프로젝트: bofei5675/AutoDrive
def main():
    args = parse_args()
    current_time = time.strftime("%Y-%m-%d_%H-%M-%S", time.gmtime())
    model_name = 'model_{}_stack_{}_feat_{}_g_{}_{}_' if args.prob <= 0 else 'model_aug_{}_stack_{}_feat_{}_g_{}_{}_'
    model_name += 'pt_' if args.pre_train else ''
    model_name += 'cbam_' if args.use_cbam else ''
    model_name += 'unsup_' if args.unsupervise != 0 else ''
    model_name += 'norm_' if args.normalized else ''
    save_dir = args.save_dir + model_name.format(args.model_type, args.num_stacks, args.num_features, args.gamma, args.loss_type)\
               + current_time + '/'
    train_images_dir = PATH + 'train_images/{}.jpg'
    train = pd.read_csv(PATH + 'train_fixed.csv')  # .sample(n=20).reset_index()
    train = remove_out_image_cars(train)

    if args.debug:
        train = train.iloc[:50, :]
    df_train, df_dev = train_test_split(train, test_size=args.val_size, random_state=42)
    df_dev.to_csv('val.csv', index=False)
    # Augmentation
    albu_list = [RandomBrightnessContrast(brightness_limit=(-0.3, 0.3), contrast_limit=(-0.3, 0.3), p=0.3),
                 RandomGamma(p=0.2), HueSaturationValue(p=0.3), RGBShift(p=0.3), MotionBlur(p=0.1), Blur(p=0.1),
                 GaussNoise(var_limit=(20, 100), p=0.2),
                 ChannelShuffle(p=0.2),
                 #Normalize(mean=[145.3834, 136.9748, 122.7390], std=[95.1996, 94.6686, 85.9170])
                 ]

    transform = Compose(albu_list, p=args.prob)

    # Create dataset objects
    train_dataset = CarDataset(df_train, train_images_dir, sigma=args.sigma, training=True, transform=transform,
                               normalized=args.normalized)
    dev_dataset = CarDataset(df_dev, train_images_dir, sigma=args.sigma, training=False, normalized=args.normalized)
    BATCH_SIZE = args.batch_size

    # Create data generators - they will produce batches
    # transform not using yet
    train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
    dev_loader = DataLoader(dataset=dev_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
    # Gets the GPU if there is one, otherwise the cpu
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)
    print('Running on', torch.cuda.get_device_name(), 'x', torch.cuda.device_count())

    if args.checkpoint:
        model, start_epoch = load_checkpoints(args)
        save_dir = args.checkpoint + '/'
        n_epochs = start_epoch + args.epoch
        history = pd.read_csv(os.path.join(save_dir, 'history.csv'))
        history = history.iloc[: start_epoch + 1]
        args.lr = args.lr / 10
    else:
        start_epoch = 0
        n_epochs = args.epoch
        if args.model_type == 'UNet':
            model = MyUNet(args.num_classes).to(device)
        elif args.model_type == 'HG':
            model = HourglassNet(nStacks=args.num_stacks, nModules=1, nFeat=args.num_features, nClasses=args.num_classes)
            model.cuda()
        elif args.model_type == 'HG2':
            model = PoseNet(nstack=args.num_stacks, inp_dim=args.num_features,
                            oup_dim=args.num_classes, use_cbam=args.use_cbam)
            model = model.cuda()
            if args.num_stacks <= 2 and args.pre_train:
                save = torch.load('./weights/checkpoint_2hg.pt')
            elif args.pre_train:
                save = torch.load('./weights/checkpoint_8hg.pt')
            save = save['state_dict']
            # print(model)
            #  print(list(save.keys()))
            # print(model.state_dict().keys())
            load_my_state_dict(model, save)
            del save

        elif args.model_type == 'LHG':
            heads = {'hm': 8}
            model = create_model('hourglass', heads, 256)
            model = model.cuda()
            if args.pre_train:
                model_dir = './weights/ctdet_coco_hg.pth'
                load_model(model, model_dir)
        elif args.model_type in ['res_34', 'res_50', 'res_101', 'res_152']:
            heads = {'hm': 8}
            model = create_model(args.model_type, heads, 0)
            model = model.cuda()

        history = pd.DataFrame()

    if torch.cuda.device_count() > 1 and not isinstance(model, nn.DataParallel):
        model = nn.DataParallel(model)

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[5, 10, 15, 20, 25, 30, 35, 40, 45, 50], gamma=0.5)
    # exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=max(n_epochs, 10) * len(train_loader) // 3, gamma=0.1)
    best_loss = 1e6
    # save configuration
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
        with open(save_dir + 'config.txt', 'w') as f:
            f.write(str(args))
    # unsupervise part
    test_images_dir = PATH + 'test_images/{}.jpg'
    test = pd.read_csv(PATH + 'sample_submission.csv')
    test = test.sample(n=train.shape[0], replace=True)#.reset_index()
    transform_test = Compose(albu_list, p=1)
    test_dataset = CarDatasetUnsup(test, test_images_dir, sigma=args.sigma, training= args.unsupervise != 0, transform=transform_test,
                                   normalized=args.normalized)
    test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)

    for epoch in range(start_epoch + 1, n_epochs):
        torch.cuda.empty_cache()
        gc.collect()
        train_loss, train_final_loss = train_model(save_dir, model, epoch, train_loader, test_loader, device,
                                                   optimizer, history,
                                                    args)
        best_loss, eval_loss, clf_losses, regr_losses = evaluate_model(model, epoch, dev_loader, device, best_loss, save_dir, history, args)
        cur_lr = optimizer.state_dict()['param_groups'][0]['lr']
        with open(save_dir + 'log.txt', 'a+') as f:
            line = 'Epoch: {}; Train total loss: {:.3f}; Train final loss: {:.3f}; Eval final loss: {:.3f}; Clf loss: {:.3f}; Regr loss: {:.3f}; Best eval loss: {:.3f}; LR: {}\n' \
                .format(epoch,
                        train_loss,
                        train_final_loss,
                        eval_loss,
                        clf_losses,
                        regr_losses,
                        best_loss,
                        cur_lr)
            f.write(line)
        history.to_csv(save_dir + 'history.csv', index=False)
        scheduler.step()
예제 #21
0
                  p=0.3)

WeatherTfms = RandomSunFlare(src_radius=80, p=0.1)

NoiseTfms = OneOf(
    [
        GaussNoise(p=0.6),
        IAAAdditiveGaussianNoise(p=0.4),  # stronger
        JpegCompression(quality_lower=25, quality_upper=55, p=0.2)
    ],
    p=0.25)

ColorTonesTfms = OneOf([ToSepia(), ToGray()], p=0.3)

ColorChannelTfms = OneOf(
    [ChannelShuffle(),
     HueSaturationValue(val_shift_limit=5),
     RGBShift()],
    p=0.3)

LightingTfms = OneOf(
    [RandomContrast(p=0.1),
     RandomBrightness(p=0.1),
     CLAHE(p=0.8)], p=0.3)

OtherTfms = FancyPCA(alpha=0.4, p=0.4)

# Cell
Tfms = Compose([
    BlurringTfms, StyleTfms, WeatherTfms, NoiseTfms, ColorTonesTfms,
    ColorChannelTfms, LightingTfms, OtherTfms
예제 #22
0
import cv2
import torch
import os
from torch.nn import functional as F
from torch.utils.data.sampler import SubsetRandomSampler
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
import segmentation_models_pytorch as smp
from albumentations import CLAHE, HorizontalFlip, Compose, RandomBrightnessContrast, RandomGamma, Resize, \
    ChannelShuffle, ShiftScaleRotate, VerticalFlip, Normalize
from albumentations.pytorch.transforms import ToTensorV2
from pytorch_lightning.callbacks import LearningRateLogger, ModelCheckpoint

aug = Compose([
    Resize(480, 640),
    ChannelShuffle(p=0.5),
    HorizontalFlip(p=0.5),
    VerticalFlip(p=0.5),
    ShiftScaleRotate(shift_limit=0.1,
                     scale_limit=0.1,
                     rotate_limit=45,
                     p=0.7,
                     border_mode=cv2.BORDER_CONSTANT),
    CLAHE(p=0.1),
    RandomBrightnessContrast(p=0.8),
    RandomGamma(p=0.5),
    Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
    ToTensorV2()
])

예제 #23
0
    def __init__(
        self,
        prob=0,
        Flip_prob=0,
        HueSaturationValue_prob=0,
        RandomBrightnessContrast_prob=0,
        crop_prob=0,
        randomrotate90_prob=0,
        elastictransform_prob=0,
        gridistortion_prob=0,
        opticaldistortion_prob=0,
        verticalflip_prob=0,
        horizontalflip_prob=0,
        randomgamma_prob=0,
        CoarseDropout_prob=0,
        RGBShift_prob=0,
        MotionBlur_prob=0,
        MedianBlur_prob=0,
        GaussianBlur_prob=0,
        GaussNoise_prob=0,
        ChannelShuffle_prob=0,
        ColorJitter_prob=0,
    ):
        super().__init__()

        self.prob = prob
        self.randomrotate90_prob = randomrotate90_prob
        self.elastictransform_prob = elastictransform_prob

        self.transforms = al.Compose(
            [
                transforms.RandomRotate90(p=randomrotate90_prob),
                transforms.Flip(p=Flip_prob),
                transforms.HueSaturationValue(p=HueSaturationValue_prob),
                transforms.RandomBrightnessContrast(
                    p=RandomBrightnessContrast_prob),
                transforms.Transpose(),
                OneOf(
                    [
                        transforms.RandomCrop(220, 220, p=0.5),
                        transforms.CenterCrop(220, 220, p=0.5),
                    ],
                    p=crop_prob,
                ),
                ElasticTransform(
                    p=elastictransform_prob,
                    alpha=120,
                    sigma=120 * 0.05,
                    alpha_affine=120 * 0.03,
                ),
                GridDistortion(p=gridistortion_prob),
                OpticalDistortion(p=opticaldistortion_prob,
                                  distort_limit=2,
                                  shift_limit=0.5),
                VerticalFlip(p=verticalflip_prob),
                HorizontalFlip(p=horizontalflip_prob),
                RandomGamma(p=randomgamma_prob),
                RGBShift(p=RGBShift_prob),
                MotionBlur(p=MotionBlur_prob, blur_limit=7),
                MedianBlur(p=MedianBlur_prob, blur_limit=9),
                GaussianBlur(p=GaussianBlur_prob, blur_limit=9),
                GaussNoise(p=GaussNoise_prob),
                ChannelShuffle(p=ChannelShuffle_prob),
                CoarseDropout(p=CoarseDropout_prob,
                              max_holes=8,
                              max_height=32,
                              max_width=32),
                ColorJitter(p=ColorJitter_prob)
                # transforms.Resize(352, 352),
                # transforms.Normalize(
                #     mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
                # ),
            ],
            p=self.prob,
        )
예제 #24
0
def augment_data(image_paths, mask_paths, output_dir):

    if not os.path.exists(output_dir + '/images_aug2'):
        os.makedirs(output_dir + '/images_aug2')
    if not os.path.exists(output_dir + '/masks_aug2'):
        os.makedirs(output_dir + '/masks_aug2')

    for image, mask in tqdm(zip(image_paths, mask_paths),
                            total=len(image_paths)):
        images_aug = []
        masks_aug = []
        image_name = Path(image).stem
        mask_name = Path(mask).stem

        x, y = read_single(image, mask)
        mask_density = np.count_nonzero(y)

        ## Augmenting only images with Gloms
        if (mask_density > 0):

            try:
                h, w, c = x.shape
            except Exception as e:
                image = image[:-1]
                x, y = read_single(image, mask)
                h, w, c = x.shape

            aug = CLAHE(clip_limit=1.0,
                        tile_grid_size=(8, 8),
                        always_apply=False,
                        p=1)
            augmented = aug(image=x, mask=y)
            x0 = augmented['image']
            y0 = augmented['mask']

            ## ElasticTransform
            aug = ElasticTransform(p=1,
                                   alpha=120,
                                   sigma=512 * 0.05,
                                   alpha_affine=512 * 0.03)
            augmented = aug(image=x, mask=y)
            x1 = augmented['image']
            y1 = augmented['mask']

            ## Grid Distortion
            aug = GridDistortion(p=1)
            augmented = aug(image=x, mask=y)
            x2 = augmented['image']
            y2 = augmented['mask']

            ## Optical Distortion
            aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
            augmented = aug(image=x, mask=y)
            x3 = augmented['image']
            y3 = augmented['mask']

            ## Horizontal Flip
            aug = HorizontalFlip(p=1)
            augmented = aug(image=x, mask=y)
            x4 = augmented['image']
            y4 = augmented['mask']

            ## Random Brightness and Contrast
            aug = RandomBrightnessContrast(p=1)
            augmented = aug(image=x, mask=y)
            x5 = augmented['image']
            y5 = augmented['mask']

            aug = RandomGamma(p=1)
            augmented = aug(image=x, mask=y)
            x6 = augmented['image']
            y6 = augmented['mask']

            aug = HueSaturationValue(p=1)
            augmented = aug(image=x, mask=y)
            x7 = augmented['image']
            y7 = augmented['mask']

            aug = RGBShift(p=1)
            augmented = aug(image=x, mask=y)
            x8 = augmented['image']
            y8 = augmented['mask']

            aug = MedianBlur(p=1, blur_limit=5)
            augmented = aug(image=x, mask=y)
            x9 = augmented['image']
            y9 = augmented['mask']

            aug = GaussianBlur(p=1, blur_limit=3)
            augmented = aug(image=x, mask=y)
            x10 = augmented['image']
            y10 = augmented['mask']

            aug = GaussNoise(p=1)
            augmented = aug(image=x, mask=y)
            x11 = augmented['image']
            y11 = augmented['mask']

            aug = ChannelShuffle(p=1)
            augmented = aug(image=x, mask=y)
            x12 = augmented['image']
            y12 = augmented['mask']

            aug = CoarseDropout(p=1, max_holes=8, max_height=32, max_width=32)
            augmented = aug(image=x, mask=y)
            x13 = augmented['image']
            y13 = augmented['mask']

            images_aug.extend(
                [x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13])

            masks_aug.extend(
                [y0, y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11, y12, y13])

            idx = 0
            for i, m in zip(images_aug, masks_aug):
                tmp_image_name = f"{image_name}_{idx}.png"
                tmp_mask_name = f"{mask_name}_{idx}.png"

                image_path = os.path.join(output_dir + "/images_aug2/",
                                          tmp_image_name)
                mask_path = os.path.join(output_dir + "/masks_aug2/",
                                         tmp_mask_name)

                cv2.imwrite(image_path, i)
                cv2.imwrite(mask_path, m)

                idx += 1

    return images_aug, masks_aug
def transform(image, mask, image_name, mask_name):

    x, y = image, mask

    rand = random.uniform(0, 1)
    if (rand > 0.5):

        images_name = [f"{image_name}"]
        masks_name = [f"{mask_name}"]
        images_aug = [x]
        masks_aug = [y]

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

        return imagedict, masksdict

    mask_density = np.count_nonzero(y)

    ## Augmenting only images with Gloms
    if (mask_density > 0):
        try:
            h, w, c = x.shape
        except Exception as e:
            image = image[:-1]
            x, y = image, mask
            h, w, c = x.shape

        aug = Blur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x0 = augmented['image']
        y0 = augmented['mask']

        #    aug = CenterCrop(p=1, height=32, width=32)
        #    augmented = aug(image=x, mask=y)
        #    x1 = augmented['image']
        #    y1 = augmented['mask']

        ## Horizontal Flip
        aug = HorizontalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x2 = augmented['image']
        y2 = augmented['mask']

        aug = VerticalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x3 = augmented['image']
        y3 = augmented['mask']

        #      aug = Normalize(p=1)
        #      augmented = aug(image=x, mask=y)
        #      x4 = augmented['image']
        #      y4 = augmented['mask']

        aug = Transpose(p=1)
        augmented = aug(image=x, mask=y)
        x5 = augmented['image']
        y5 = augmented['mask']

        aug = RandomGamma(p=1)
        augmented = aug(image=x, mask=y)
        x6 = augmented['image']
        y6 = augmented['mask']

        ## Optical Distortion
        aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
        augmented = aug(image=x, mask=y)
        x7 = augmented['image']
        y7 = augmented['mask']

        ## Grid Distortion
        aug = GridDistortion(p=1)
        augmented = aug(image=x, mask=y)
        x8 = augmented['image']
        y8 = augmented['mask']

        aug = RandomGridShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x9 = augmented['image']
        y9 = augmented['mask']

        aug = HueSaturationValue(p=1)
        augmented = aug(image=x, mask=y)
        x10 = augmented['image']
        y10 = augmented['mask']

        #        aug = PadIfNeeded(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x11 = augmented['image']
        #        y11 = augmented['mask']

        aug = RGBShift(p=1)
        augmented = aug(image=x, mask=y)
        x12 = augmented['image']
        y12 = augmented['mask']

        ## Random Brightness
        aug = RandomBrightness(p=1)
        augmented = aug(image=x, mask=y)
        x13 = augmented['image']
        y13 = augmented['mask']

        ## Random  Contrast
        aug = RandomContrast(p=1)
        augmented = aug(image=x, mask=y)
        x14 = augmented['image']
        y14 = augmented['mask']

        #aug = MotionBlur(p=1)
        #augmented = aug(image=x, mask=y)
        #   x15 = augmented['image']
        #  y15 = augmented['mask']

        aug = MedianBlur(p=1, blur_limit=5)
        augmented = aug(image=x, mask=y)
        x16 = augmented['image']
        y16 = augmented['mask']

        aug = GaussianBlur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x17 = augmented['image']
        y17 = augmented['mask']

        aug = GaussNoise(p=1)
        augmented = aug(image=x, mask=y)
        x18 = augmented['image']
        y18 = augmented['mask']

        aug = GlassBlur(p=1)
        augmented = aug(image=x, mask=y)
        x19 = augmented['image']
        y19 = augmented['mask']

        aug = CLAHE(clip_limit=1.0,
                    tile_grid_size=(8, 8),
                    always_apply=False,
                    p=1)
        augmented = aug(image=x, mask=y)
        x20 = augmented['image']
        y20 = augmented['mask']

        aug = ChannelShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x21 = augmented['image']
        y21 = augmented['mask']

        aug = ToGray(p=1)
        augmented = aug(image=x, mask=y)
        x22 = augmented['image']
        y22 = augmented['mask']

        aug = ToSepia(p=1)
        augmented = aug(image=x, mask=y)
        x23 = augmented['image']
        y23 = augmented['mask']

        aug = JpegCompression(p=1)
        augmented = aug(image=x, mask=y)
        x24 = augmented['image']
        y24 = augmented['mask']

        aug = ImageCompression(p=1)
        augmented = aug(image=x, mask=y)
        x25 = augmented['image']
        y25 = augmented['mask']

        aug = Cutout(p=1)
        augmented = aug(image=x, mask=y)
        x26 = augmented['image']
        y26 = augmented['mask']

        #       aug = CoarseDropout(p=1, max_holes=8, max_height=32, max_width=32)
        #       augmented = aug(image=x, mask=y)
        #       x27 = augmented['image']
        #       y27 = augmented['mask']

        #       aug = ToFloat(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x28 = augmented['image']
        #       y28 = augmented['mask']

        aug = FromFloat(p=1)
        augmented = aug(image=x, mask=y)
        x29 = augmented['image']
        y29 = augmented['mask']

        ## Random Brightness and Contrast
        aug = RandomBrightnessContrast(p=1)
        augmented = aug(image=x, mask=y)
        x30 = augmented['image']
        y30 = augmented['mask']

        aug = RandomSnow(p=1)
        augmented = aug(image=x, mask=y)
        x31 = augmented['image']
        y31 = augmented['mask']

        aug = RandomRain(p=1)
        augmented = aug(image=x, mask=y)
        x32 = augmented['image']
        y32 = augmented['mask']

        aug = RandomFog(p=1)
        augmented = aug(image=x, mask=y)
        x33 = augmented['image']
        y33 = augmented['mask']

        aug = RandomSunFlare(p=1)
        augmented = aug(image=x, mask=y)
        x34 = augmented['image']
        y34 = augmented['mask']

        aug = RandomShadow(p=1)
        augmented = aug(image=x, mask=y)
        x35 = augmented['image']
        y35 = augmented['mask']

        aug = Lambda(p=1)
        augmented = aug(image=x, mask=y)
        x36 = augmented['image']
        y36 = augmented['mask']

        aug = ChannelDropout(p=1)
        augmented = aug(image=x, mask=y)
        x37 = augmented['image']
        y37 = augmented['mask']

        aug = ISONoise(p=1)
        augmented = aug(image=x, mask=y)
        x38 = augmented['image']
        y38 = augmented['mask']

        aug = Solarize(p=1)
        augmented = aug(image=x, mask=y)
        x39 = augmented['image']
        y39 = augmented['mask']

        aug = Equalize(p=1)
        augmented = aug(image=x, mask=y)
        x40 = augmented['image']
        y40 = augmented['mask']

        aug = Posterize(p=1)
        augmented = aug(image=x, mask=y)
        x41 = augmented['image']
        y41 = augmented['mask']

        aug = Downscale(p=1)
        augmented = aug(image=x, mask=y)
        x42 = augmented['image']
        y42 = augmented['mask']

        aug = MultiplicativeNoise(p=1)
        augmented = aug(image=x, mask=y)
        x43 = augmented['image']
        y43 = augmented['mask']

        aug = FancyPCA(p=1)
        augmented = aug(image=x, mask=y)
        x44 = augmented['image']
        y44 = augmented['mask']

        #       aug = MaskDropout(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x45 = augmented['image']
        #       y45 = augmented['mask']

        aug = GridDropout(p=1)
        augmented = aug(image=x, mask=y)
        x46 = augmented['image']
        y46 = augmented['mask']

        aug = ColorJitter(p=1)
        augmented = aug(image=x, mask=y)
        x47 = augmented['image']
        y47 = augmented['mask']

        ## ElasticTransform
        aug = ElasticTransform(p=1,
                               alpha=120,
                               sigma=512 * 0.05,
                               alpha_affine=512 * 0.03)
        augmented = aug(image=x, mask=y)
        x50 = augmented['image']
        y50 = augmented['mask']

        aug = CropNonEmptyMaskIfExists(p=1, height=22, width=32)
        augmented = aug(image=x, mask=y)
        x51 = augmented['image']
        y51 = augmented['mask']

        aug = IAAAffine(p=1)
        augmented = aug(image=x, mask=y)
        x52 = augmented['image']
        y52 = augmented['mask']

        #        aug = IAACropAndPad(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x53 = augmented['image']
        #        y53 = augmented['mask']

        aug = IAAFliplr(p=1)
        augmented = aug(image=x, mask=y)
        x54 = augmented['image']
        y54 = augmented['mask']

        aug = IAAFlipud(p=1)
        augmented = aug(image=x, mask=y)
        x55 = augmented['image']
        y55 = augmented['mask']

        aug = IAAPerspective(p=1)
        augmented = aug(image=x, mask=y)
        x56 = augmented['image']
        y56 = augmented['mask']

        aug = IAAPiecewiseAffine(p=1)
        augmented = aug(image=x, mask=y)
        x57 = augmented['image']
        y57 = augmented['mask']

        aug = LongestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x58 = augmented['image']
        y58 = augmented['mask']

        aug = NoOp(p=1)
        augmented = aug(image=x, mask=y)
        x59 = augmented['image']
        y59 = augmented['mask']

        #       aug = RandomCrop(p=1, height=22, width=22)
        #       augmented = aug(image=x, mask=y)
        #       x61 = augmented['image']
        #       y61 = augmented['mask']

        #      aug = RandomResizedCrop(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x63 = augmented['image']
        #      y63 = augmented['mask']

        aug = RandomScale(p=1)
        augmented = aug(image=x, mask=y)
        x64 = augmented['image']
        y64 = augmented['mask']

        #      aug = RandomSizedCrop(p=1, height=22, width=20, min_max_height = [32,32])
        #      augmented = aug(image=x, mask=y)
        #      x66 = augmented['image']
        #      y66 = augmented['mask']

        #      aug = Resize(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x67 = augmented['image']
        #      y67 = augmented['mask']

        aug = Rotate(p=1)
        augmented = aug(image=x, mask=y)
        x68 = augmented['image']
        y68 = augmented['mask']

        aug = ShiftScaleRotate(p=1)
        augmented = aug(image=x, mask=y)
        x69 = augmented['image']
        y69 = augmented['mask']

        aug = SmallestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x70 = augmented['image']
        y70 = augmented['mask']

        images_aug.extend([
            x, x0, x2, x3, x5, x6, x7, x8, x9, x10, x12, x13, x14, x16, x17,
            x18, x19, x20, x21, x22, x23, x24, x25, x26, x29, x30, x31, x32,
            x33, x34, x35, x36, x37, x38, x39, x40, x41, x42, x43, x44, x46,
            x47, x50, x51, x52, x54, x55, x56, x57, x58, x59, x64, x68, x69,
            x70
        ])

        masks_aug.extend([
            y, y0, y2, y3, y5, y6, y7, y8, y9, y10, y12, y13, y14, y16, y17,
            y18, y19, y20, y21, y22, y23, y24, y25, y26, y29, y30, y31, y32,
            y33, y34, y35, y36, y37, y38, y39, y40, y41, y42, y43, y44, y46,
            y47, y50, y51, y52, y54, y55, y56, y57, y58, y59, y64, y68, y69,
            y70
        ])

        idx = -1
        images_name = []
        masks_name = []
        for i, m in zip(images_aug, masks_aug):
            if idx == -1:
                tmp_image_name = f"{image_name}"
                tmp_mask_name = f"{mask_name}"
            else:
                tmp_image_name = f"{image_name}_{smalllist[idx]}"
                tmp_mask_name = f"{mask_name}_{smalllist[idx]}"
            images_name.extend(tmp_image_name)
            masks_name.extend(tmp_mask_name)
            idx += 1

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

    return imagedict, masksdict
    VerticalFlip(p=0.1),
    Blur(blur_limit=16, p=0.1),
    RandomGamma(gamma_limit=(60, 140), p=0.1),
    Rotate(limit=35, p=0.15),
    ShiftScaleRotate(rotate_limit=35, p=0.2),
    OpticalDistortion(distort_limit=1.0, shift_limit=1.0, p=0.2),
    HueSaturationValue(hue_shift_limit=20,
                       sat_shift_limit=30,
                       val_shift_limit=20,
                       p=0.2),
    RGBShift(r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, p=0.15),
    RandomBrightnessContrast(p=0.2),
    MotionBlur(blur_limit=7, p=0.2),
    GaussianBlur(blur_limit=7, p=0.15),
    CLAHE(p=0.05),
    ChannelShuffle(p=0.05),
    ToGray(p=0.1),
    ImageCompression(quality_lower=10, quality_upper=100, p=0.15),
    CoarseDropout(max_holes=32, max_height=12, max_width=12, p=0.05),
    Downscale(p=0.3),
    FancyPCA(alpha=0.4, p=0.1),
    Posterize(num_bits=4, p=0.03),
    Equalize(p=0.05),
    ISONoise(color_shift=(0.1, 0.5), p=0.07),
    RandomFog(p=0.03)
]

BACKGROUNDS_PATHS = glob(BACKGROUNDS_WILDRCARD)
BACKGROUNDS = [
    load_image(path, cv.COLOR_BGR2RGB) for path in BACKGROUNDS_PATHS
]
예제 #27
0

@pytest.mark.parametrize(
    "transforms",
    [OneOf([Sequential([HorizontalFlip(p=1)])], p=1), SomeOf([Sequential([HorizontalFlip(p=1)])], n=1, p=1)],
)
def test_choice_inner_compositions(transforms):
    """Check that the inner composition is selected without errors."""
    image = np.empty([10, 10, 3], dtype=np.uint8)
    transforms(image=image)


@pytest.mark.parametrize(
    "transforms",
    [
        Compose([ChannelShuffle(p=1)], p=1),
        Compose([ChannelShuffle(p=0)], p=0),
    ],
)
def test_contiguous_output(transforms):
    image = np.empty([3, 24, 24], dtype=np.uint8).transpose(1, 2, 0)
    mask = np.empty([3, 24, 24], dtype=np.uint8).transpose(1, 2, 0)

    # check preconditions
    assert not image.flags["C_CONTIGUOUS"]
    assert not mask.flags["C_CONTIGUOUS"]

    # pipeline always outputs contiguous results
    data = transforms(image=image, mask=mask)

    # confirm output contiguous
예제 #28
0
def augment_data(images, masks, save_path, augment=True):
    """
    Performing data augmentation.
    """

    crop_size = (256, 256)
    size = (2018, 2006)
    # 将数据与标签组合
    for image, mask in tqdm(zip(images, masks), total=len(images)):
        image_name = image.split("/")[-1].split(".")[0]
        mask_name = mask.split("/")[-1].split(".")[0]

        x, y = read_data(image, mask)
        # try except 使用
        try:
            h, w, c = x.shape   # 获取图像的 w h z
        except Exception as e:
            image = image[:-1]
            x, y = read_data(image, mask)
            h, w, c = x.shape

        # 进行数据增强
        if augment == True:
            # Center Crop
            aug = CenterCrop(p=1, height=crop_size[1], width=crop_size[0])
            augmented = aug(image=x, mask=y)
            x1 = augmented['image']
            y1 = augmented['mask']

            # Crop
            x_min = 0
            y_min = 0
            x_max = x_min + crop_size[0]
            y_max = y_min + crop_size[1]

            aug = Crop(p=1, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max)
            augmented = aug(image=x, mask=y)
            x2 = augmented['image']
            y2 = augmented['mask']

            # Random Rotate 90 degree
            aug = RandomRotate90(p=1)
            augmented = aug(image=x, mask=y)
            x3 = augmented['image']
            y3 = augmented['mask']

            # Transpose
            aug = Transpose(p=1)
            augmented = aug(image=x, mask=y)
            x4 = augmented['image']
            y4 = augmented['mask']

            # ElasticTransform
            aug = ElasticTransform(p=1, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03)
            augmented = aug(image=x, mask=y)
            x5 = augmented['image']
            y5 = augmented['mask']

            # Grid Distortion
            aug = GridDistortion(p=1)
            augmented = aug(image=x, mask=y)
            x6 = augmented['image']
            y6 = augmented['mask']

            # Optical Distortion
            aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
            augmented = aug(image=x, mask=y)
            x7 = augmented['image']
            y7 = augmented['mask']

            # Vertical Flip
            aug = VerticalFlip(p=1)
            augmented = aug(image=x, mask=y)
            x8 = augmented['image']
            y8 = augmented['mask']

            # Horizontal Flip
            aug = HorizontalFlip(p=1)
            augmented = aug(image=x, mask=y)
            x9 = augmented['image']
            y9 = augmented['mask']

            # Grayscale
            x10 = cv2.cvtColor(x, cv2.COLOR_RGB2GRAY)
            y10 = y

            # Grayscale Vertical Flip
            aug = VerticalFlip(p=1)
            augmented = aug(image=x10, mask=y10)
            x11 = augmented['image']
            y11 = augmented['mask']

            # Grayscale Horizontal Flip
            aug = HorizontalFlip(p=1)
            augmented = aug(image=x10, mask=y10)
            x12 = augmented['image']
            y12 = augmented['mask']

            # Grayscale Center Crop
            aug = CenterCrop(p=1, height=crop_size[1], width=crop_size[0])
            augmented = aug(image=x10, mask=y10)
            x13 = augmented['image']
            y13 = augmented['mask']

            # Random Brightness Contrast
            aug = RandomBrightnessContrast(p=1)
            augmented = aug(image=x, mask=y)
            x14 = augmented['image']
            y14 = augmented['mask']

            # Random Gamma
            aug = RandomGamma(p=1)
            augmented = aug(image=x, mask=y)
            x15 = augmented['image']
            y15 = augmented['mask']

            aug = HueSaturationValue(p=1)
            augmented = aug(image=x, mask=y)
            x16 = augmented['image']
            y16 = augmented['mask']

            aug = RGBShift(p=1)
            augmented = aug(image=x, mask=y)
            x17 = augmented['image']
            y17 = augmented['mask']

            aug = RandomBrightness(p=1)
            augmented = aug(image=x, mask=y)
            x18 = augmented['image']
            y18 = augmented['mask']

            aug = RandomContrast(p=1)
            augmented = aug(image=x, mask=y)
            x19 = augmented['image']
            y19 = augmented['mask']

            aug = MotionBlur(p=1, blur_limit=7)
            augmented = aug(image=x, mask=y)
            x20 = augmented['image']
            y20 = augmented['mask']

            aug = MedianBlur(p=1, blur_limit=10)
            augmented = aug(image=x, mask=y)
            x21 = augmented['image']
            y21 = augmented['mask']

            aug = GaussianBlur(p=1, blur_limit=10)
            augmented = aug(image=x, mask=y)
            x22 = augmented['image']
            y22 = augmented['mask']

            aug = GaussNoise(p=1)
            augmented = aug(image=x, mask=y)
            x23 = augmented['image']
            y23 = augmented['mask']

            aug = ChannelShuffle(p=1)
            augmented = aug(image=x, mask=y)
            x24 = augmented['image']
            y24 = augmented['mask']

            aug = CoarseDropout(p=1, max_holes=8, max_height=32, max_width=32)
            augmented = aug(image=x, mask=y)
            x25 = augmented['image']
            y25 = augmented['mask']

            images = [
                x, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10,
                x11, x12, x13, x14, x15, x16, x17, x18, x19, x20,
                x21, x22, x23, x24, x25
            ]
            masks = [
                y, y1, y2, y3, y4, y5, y6, y7, y8, y9, y10,
                y11, y12, y13, y14, y15, y16, y17, y18, y19, y20,
                y21, y22, y23, y24, y25
            ]

        else:
            images = [x]
            masks = [y]

        idx = 0
        # 数据增强之后数据
        for i, m in zip(images, masks):
            i = cv2.resize(i, size)
            m = cv2.resize(m, size)

            tmp_image_name = f"{image_name}_{idx}.jpg"
            tmp_mask_name = f"{mask_name}_{idx}.jpg"

            image_path = os.path.join(save_path, "image/", tmp_image_name)
            mask_path = os.path.join(save_path, "mask/", tmp_mask_name)

            # 保存数据
            cv2.imwrite(image_path, i)
            cv2.imwrite(mask_path, m)

            idx += 1
    def __init__(self, config):
        super(AugmentedPair2, self).__init__(config)
        self.use_appearance_augmentation = config.get("data_augment_appearance", False)
        self.use_shape_augmentation = config.get("data_augment_shape", False)
        additional_targets = {
            "image{}".format(i): "image" for i in range(1, self.n_images)
        }
        p = 0.9
        appearance_augmentation = Compose(
            [
                OneOf(
                    [
                        MedianBlur(blur_limit=3, p=0.1),
                        Blur(blur_limit=3, p=0.1),
                    ],
                    p=0.5,
                ),
                OneOf(
                    [
                        RandomBrightnessContrast(p=0.3),
                        RGBShift(p=0.3),
                        HueSaturationValue(p=0.3),
                    ],
                    p=0.8,
                ),    
                OneOf(
                    [
                        RandomBrightnessContrast(p=0.3),
                        RGBShift(p=0.3),
                        HueSaturationValue(p=0.3),
                    ],
                    p=0.8,
                ),     
                OneOf(
                    [
                        RandomBrightnessContrast(p=0.3),
                        RGBShift(p=0.3),
                        HueSaturationValue(p=0.3),
                    ],
                    p=0.8,
                ),
                ToGray(p=0.1),  
                ChannelShuffle(p=0.3),
            ],
            p=p,
            additional_targets=additional_targets,
        )
        self.appearance_augmentation = appearance_augmentation  

        p = 0.9
        shape_augmentation = Compose(
            [
                HorizontalFlip(p=0.3),
                ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.25, rotate_limit=25, p=0.3, border_mode=cv2.BORDER_REPLICATE),
                OneOf([
                    # OpticalDistortion(p=0.3),
                    # GridDistortion(p=0.1),
                    IAAPiecewiseAffine(p=0.5),
                    ElasticTransform(p=0.5, border_mode=cv2.BORDER_REPLICATE)
                ], p=0.3),
            ],
            p=p,
            additional_targets=additional_targets,
        )
        self.shape_augmentation = shape_augmentation
예제 #30
0
    RandomContrast, Blur, MotionBlur, MedianBlur, GaussNoise, CenterCrop,
    IAAAdditiveGaussianNoise, GaussNoise, OpticalDistortion, RandomSizedCrop,
    ChannelShuffle, RandomRotate90)

albumentations_transform = Compose([
    Resize(256, 256),
    HorizontalFlip(p=0.5),
    RandomRotate90(p=0.5),
    OneOf([
        RandomContrast(),
        RandomGamma(),
        RandomBrightness(),
    ], p=0.3),
    Rotate(limit=45, p=0.5),
    VerticalFlip(p=0.5),
    ChannelShuffle(p=0.3),
    ShiftScaleRotate(shift_limit=0.0625,
                     scale_limit=0.1,
                     rotate_limit=45,
                     p=0.5),
    OneOf([
        ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
        GridDistortion(),
        OpticalDistortion(distort_limit=2, shift_limit=0.5),
    ],
          p=0.3),
    RandomSizedCrop(min_max_height=(128, 256), height=256, width=256, p=0.5),
    ToFloat(max_value=1)
])