def __init__( self, prob=0.7, blur_prob=0.7, jitter_prob=0.7, rotate_prob=0.7, flip_prob=0.7, ): super().__init__() self.prob = prob self.blur_prob = blur_prob self.jitter_prob = jitter_prob self.rotate_prob = rotate_prob self.flip_prob = flip_prob self.transforms = al.Compose( [ transforms.RandomRotate90(), transforms.Flip(), transforms.HueSaturationValue(), transforms.RandomBrightnessContrast(), transforms.Transpose(), OneOf([ transforms.RandomCrop(220, 220, p=0.5), transforms.CenterCrop(220, 220, p=0.5) ], p=0.5), # transforms.Resize(352,352), # transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), ], p=self.prob)
def get_presize_combine_transforms_V4(): transforms_presize = A.Compose([ transforms.PadIfNeeded(600, 800), geometric.Perspective( scale=[0, .1], pad_mode=cv2.BORDER_REFLECT, interpolation=cv2.INTER_AREA, p = .3), transforms.Flip(), geometric.ShiftScaleRotate(interpolation=cv2.INTER_LANCZOS4, p = 0.95, scale_limit=0.0), crops.RandomResizedCrop( TARGET_SIZE, TARGET_SIZE, scale=(config['rrc_scale_min'], config['rrc_scale_max']), ratio=(.70, 1.4), interpolation=cv2.INTER_CUBIC, p=1.0), transforms.Transpose() #rotate.Rotate(interpolation=cv2.INTER_LANCZOS4, p = 0.99), ]) transforms_postsize = A.Compose([ #imgaug.IAAPiecewiseAffine(), transforms.CoarseDropout(), transforms.CLAHE(p=.1), transforms.RandomToneCurve(scale=.1, p=0.2), transforms.RandomBrightnessContrast( brightness_limit=.1, contrast_limit=0.4, p=.8), transforms.HueSaturationValue( hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=0, p=0.5), transforms.Equalize(p=0.05), transforms.FancyPCA(p=0.05), transforms.RandomGridShuffle(p=0.1), A.OneOf([ transforms.MotionBlur(blur_limit=(3, 9)), transforms.GaussianBlur(), transforms.MedianBlur() ], p=0.1), transforms.ISONoise(p=.2), transforms.GaussNoise(var_limit=127., p=.3), A.OneOf([ transforms.GridDistortion(interpolation=cv2.INTER_AREA, distort_limit=[0.7, 0.7], p=0.5), transforms.OpticalDistortion(interpolation=cv2.INTER_AREA, p=.3), ], p=.3), geometric.ElasticTransform(alpha=4, sigma=4, alpha_affine=4, interpolation=cv2.INTER_AREA, p=0.3), transforms.CoarseDropout(), transforms.Normalize(), ToTensorV2() ]) return transforms_presize, transforms_postsize
def get_augumentation(phase, width=512, height=512, min_area=0., min_visibility=0., ft='coco'): # from albumentations for all detection and segmentation list_transforms = [] if phase == 'train': list_transforms.extend([ transforms.LongestMaxSize( max_size=width, always_apply=True), albu.PadIfNeeded(min_height=height, min_width=width, always_apply=True, border_mode=0, value=[0, 0, 0]), transforms.RandomResizedCrop( height=height, width=width, p=0.3), transforms.Flip(), transforms.Transpose(), albu.OneOf([ albu.RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.4), albu.RandomGamma(gamma_limit=(50, 150)), albu.NoOp() ]), albu.OneOf([ albu.RGBShift(r_shift_limit=20, b_shift_limit=15, g_shift_limit=15), albu.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5), albu.NoOp() ]), albu.CLAHE(p=0.8), albu.HorizontalFlip(p=0.5), albu.VerticalFlip(p=0.5), ]) if phase == 'test': list_transforms.extend([ albu.Resize(height=height, width=width) ]) list_transforms.extend([ albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1), ToTensorV2() ]) if phase == 'test': return albu.Compose(list_transforms) return albu.Compose(list_transforms, bbox_params=albu.BboxParams(format=ft, min_area=min_area, min_visibility=min_visibility, label_fields=['category_id']))
def get_presize_combine_tune_transforms(): transforms_presize = A.Compose([ transforms.Transpose(), transforms.Flip(), #transforms.PadIfNeeded(600, 800), crops.RandomResizedCrop( TARGET_SIZE, TARGET_SIZE, scale=(.75, 1), interpolation=cv2.INTER_CUBIC, p=1.0), rotate.Rotate(interpolation=cv2.INTER_LANCZOS4, p = 0.99), ]) transforms_postsize = A.Compose([ transforms.CoarseDropout(), # transforms.CLAHE(p=.1), transforms.RandomToneCurve(scale=.1), transforms.RandomBrightnessContrast( brightness_limit=.1, contrast_limit=0.2, p=.7), transforms.HueSaturationValue( hue_shift_limit=20, sat_shift_limit=60, val_shift_limit=0, p=0.6), #transforms.Equalize(p=0.1), #transforms.FancyPCA(p=0.05), #transforms.RandomGridShuffle(p=0.1), #A.OneOf([ # transforms.MotionBlur(blur_limit=(3, 9)), # transforms.GaussianBlur(), # transforms.MedianBlur() # ], p=0.2), transforms.ISONoise(p=.3), transforms.GaussNoise(var_limit=255., p=.3), #A.OneOf([ # transforms.GridDistortion(interpolation=cv2.INTER_AREA, distort_limit=[0.7, 0.7], p=0.5), # transforms.OpticalDistortion(interpolation=cv2.INTER_AREA, p=.3), # ], p=.3), geometric.ElasticTransform(alpha=4, sigma=100, alpha_affine=100, interpolation=cv2.INTER_AREA, p=0.3), transforms.CoarseDropout(), transforms.Normalize(), ToTensorV2() ]) return transforms_presize, transforms_postsize
def get_valid_transforms(): return A.Compose([ transforms.Transpose(), transforms.PadIfNeeded(600, 800), rotate.Rotate(interpolation=cv2.INTER_LANCZOS4, p = 0.90), crops.RandomResizedCrop( TARGET_SIZE_VALID, TARGET_SIZE_VALID, scale=(.75, 1), interpolation=cv2.INTER_CUBIC, p=1.0), transforms.Flip(), #transforms.RandomToneCurve(scale=.1), #transforms.RandomBrightnessContrast(brightness_limit=0.0, contrast_limit=0.3, p=.7), #transforms.HueSaturationValue(hue_shift_limit=10, # sat_shift_limit=10, # val_shift_limit=5, p=0.6), transforms.Normalize(), ToTensorV2() ])
def get_train_transforms(): return A.Compose([ transforms.PadIfNeeded(600, 800), geometric.ShiftScaleRotate(interpolation=cv2.INTER_LANCZOS4, p = 0.99, scale_limit=0.8), geometric.Perspective(pad_mode=cv2.BORDER_REFLECT,interpolation=cv2.INTER_AREA), crops.RandomResizedCrop( TARGET_SIZE, TARGET_SIZE, scale=(config['rrc_scale_min'], config['rrc_scale_max']), interpolation=cv2.INTER_CUBIC, p=1.0), transforms.Transpose(), transforms.Flip(), transforms.CoarseDropout(), transforms.CLAHE(p=.1), transforms.RandomToneCurve(scale=.1), transforms.RandomBrightnessContrast( brightness_limit=.1, contrast_limit=0.3, p=.7), transforms.HueSaturationValue( hue_shift_limit=20, sat_shift_limit=60, val_shift_limit=0, p=0.6), transforms.RandomGridShuffle(p=0.1), A.OneOf([ transforms.MotionBlur(blur_limit=(3, 9)), transforms.GaussianBlur(), transforms.MedianBlur() ], p=0.2), transforms.ISONoise(p=.3), transforms.GaussNoise(var_limit=255., p=.3), A.OneOf([ transforms.GridDistortion(interpolation=cv2.INTER_AREA, distort_limit=[0.7, 0.7], p=0.5), transforms.OpticalDistortion(interpolation=cv2.INTER_AREA, p=.3), ], p=.3), geometric.ElasticTransform(alpha=4, sigma=100, alpha_affine=100, interpolation=cv2.INTER_AREA, p=0.3), transforms.CoarseDropout(), transforms.Normalize(), ToTensorV2() ])
def augmentation(in_path='./train/raw_images/', out_path='./train/aug_images/'): for filename in os.listdir(in_path): hf = transforms.HorizontalFlip(always_apply=True) vf = transforms.VerticalFlip(always_apply=True) tp = transforms.Transpose(always_apply=True) rt = transforms.Rotate(limit=80, always_apply=True) image = np.array(Image.open(in_path + filename)) hf_image = hf(image=image)['image'] vf_image = vf(image=image)['image'] tp_image = tp(image=image)['image'] rt_image = rt(image=image)['image'] count = 1 for img in [image, hf_image, vf_image, tp_image, rt_image]: if len(img.shape) == 2: img = Image.fromarray(img) img.convert(mode='RGB') else: img = Image.fromarray(img, mode='RGB') img.save(out_path + filename.replace('.jpg', '_' + str(count) + '.jpg')) count += 1
def __init__( self, prob=0, Flip_prob=0, HueSaturationValue_prob=0, RandomBrightnessContrast_prob=0, crop_prob=0, randomrotate90_prob=0, elastictransform_prob=0, gridistortion_prob=0, opticaldistortion_prob=0, verticalflip_prob=0, horizontalflip_prob=0, randomgamma_prob=0, CoarseDropout_prob=0, RGBShift_prob=0, MotionBlur_prob=0, MedianBlur_prob=0, GaussianBlur_prob=0, GaussNoise_prob=0, ChannelShuffle_prob=0, ColorJitter_prob=0, ): super().__init__() self.prob = prob self.randomrotate90_prob = randomrotate90_prob self.elastictransform_prob = elastictransform_prob self.transforms = al.Compose( [ transforms.RandomRotate90(p=randomrotate90_prob), transforms.Flip(p=Flip_prob), transforms.HueSaturationValue(p=HueSaturationValue_prob), transforms.RandomBrightnessContrast( p=RandomBrightnessContrast_prob), transforms.Transpose(), OneOf( [ transforms.RandomCrop(220, 220, p=0.5), transforms.CenterCrop(220, 220, p=0.5), ], p=crop_prob, ), ElasticTransform( p=elastictransform_prob, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03, ), GridDistortion(p=gridistortion_prob), OpticalDistortion(p=opticaldistortion_prob, distort_limit=2, shift_limit=0.5), VerticalFlip(p=verticalflip_prob), HorizontalFlip(p=horizontalflip_prob), RandomGamma(p=randomgamma_prob), RGBShift(p=RGBShift_prob), MotionBlur(p=MotionBlur_prob, blur_limit=7), MedianBlur(p=MedianBlur_prob, blur_limit=9), GaussianBlur(p=GaussianBlur_prob, blur_limit=9), GaussNoise(p=GaussNoise_prob), ChannelShuffle(p=ChannelShuffle_prob), CoarseDropout(p=CoarseDropout_prob, max_holes=8, max_height=32, max_width=32), ColorJitter(p=ColorJitter_prob) # transforms.Resize(352, 352), # transforms.Normalize( # mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225) # ), ], p=self.prob, )
train_img_paths = [] train_mask_paths = [] train_data_path = ["data/kvasir-seg/TrainDataset"] for i in train_data_path: train_img_paths.extend(glob(os.path.join(i, "images", "*"))) train_mask_paths.extend(glob(os.path.join(i, "masks", "*"))) train_img_paths.sort() train_mask_paths.sort() transforms = al.Compose( [ transforms.RandomRotate90(), transforms.Flip(), transforms.HueSaturationValue(), transforms.RandomBrightnessContrast(), transforms.Transpose(), OneOf( [ transforms.RandomCrop(220, 220, p=0.5), transforms.CenterCrop(220, 220, p=0.5), ], p=1, ), # transforms.Resize(352,352), # transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), ], p=1, ) dataset = KvasirDataset(train_img_paths, train_mask_paths, 352,