def get_augmentations(p=1.0): return Compose([ RandomSizedCrop((250, 600), 224, 224), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=1), OneOf([ MotionBlur(p=.6), MedianBlur(blur_limit=3, p=0.6), Blur(blur_limit=3, p=0.6), ], p=1), ShiftScaleRotate(shift_limit=0.0825, scale_limit=0.3, rotate_limit=30, p=1), OneOf([ OpticalDistortion(p=0.5), GridDistortion(p=.4), IAAPiecewiseAffine(p=0.5), ], p=0.8), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.9), HueSaturationValue(p=0.3), ], p=p)
def aug_daniel(prob=0.8): return Compose( [ RandomRotate90(p=0.5), Transpose(p=0.5), Flip(p=0.5), OneOf( [ IAAAdditiveGaussianNoise(), GaussNoise(), #Blur(), ], p=0.3), OneOf( [ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), OneOf([ RandomContrast(), RandomBrightness(), ]), #Blur(), #GaussNoise() ], p=0.5), HueSaturationValue(p=0.5) ], p=prob)
def strong_aug(p=0.5): return Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p)
def img_augment(p=1.): return Compose( [ #RandomSizedCrop((300, 300), img_sz, img_sz, p=1.), Resize(img_sz, img_sz), HorizontalFlip(p=0.8), #RandomRotate90(p=0.25), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.3), # ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.1, rotate_limit=10, p=.75), RandomBrightnessContrast(p=0.2), #GaussNoise(), #Blur(blur_limit=3, p=.33), #OpticalDistortion(p=.33), #GridDistortion(p=.33), #HueSaturationValue(p=.33) ], p=p)
def alb_transform_train(imsize = 256, p=1): albumentations_transform = Compose([ # RandomCrop(imsize), # RandomRotate90(), Flip(), # Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ # CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.3), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) ], p=p) return albumentations_transform
def transform(config, image, mask): try: p = config["train"]["dap"]["p"] except: p = 1 assert 0 <= p <= 1 # Inspire by: https://albumentations.readthedocs.io/en/latest/examples.html return Compose([ Flip(), Transpose(), OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1) ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3), HueSaturationValue(p=0.3), ])(image=image, mask=mask, p=p)
def make(p=0.5): return Compose( [ OneOf([IAAAdditiveGaussianNoise(), GaussNoise(), ISONoise()], p=0.9), MotionBlur(p=0.3), ShiftScaleRotate(shift_limit=0.0925, scale_limit=0.4, rotate_limit=7, border_mode=cv2.BORDER_CONSTANT, value=0, p=0.6), # IAAPerspective(scale=(.055, .060), keep_size=False, p=.2), # OpticalDistortion(p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), RGBShift(40, 40, 40) ], p=p)
def get_transforms(): return Compose([ RandomRotate90(p=0.5), Flip(p=0.5), Transpose(p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ])
def strong_aug(config, aug_prob): return Compose( [ # Resize(config.image_height, config.image_width, always_apply=True), RandomSizedCrop( p=config.random_sized_crop_prob, min_max_height=(int( config.image_height * config.min_max_height), config.image_height), height=config.image_height, width=config.image_width, w2h_ratio=config.image_width / config.image_height), HorizontalFlip(p=config.horizontal_flip_prob), RandomGamma(p=config.random_gamma_prob), RandomContrast(p=config.random_contrast_prob, limit=config.random_contrast_limit), RandomBrightness(p=config.random_brightness_prob, limit=config.random_brightness_limit), OneOf([ MotionBlur(p=config.motion_blur_prob), MedianBlur(blur_limit=config.median_blur_limit, p=config.median_blur_prob), Blur(blur_limit=config.blur_limit, p=config.blur_prob), ], p=config.one_of_blur_prob), CLAHE(clip_limit=config.clahe_limit, p=config.clahe_prob), IAAEmboss(p=config.iaaemboss_prob), HueSaturationValue(p=config.hue_saturation_value_prob, hue_shift_limit=config.hue_shift_limit, sat_shift_limit=config.sat_shift_limit, val_shift_limit=config.val_shift_limit) ], p=aug_prob)
def train_aug(self, image, label): aug = Compose( [ OneOf( [CLAHE(), IAASharpen(), IAAEmboss()], p=0.5), # OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2), # OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2), RandomContrast(), RandomBrightness(), # ChannelShuffle(), RandomRotate90(), Flip(), # RandomScale(scale_limit=(0.0, 0.1)), OneOf([ ElasticTransform(), OpticalDistortion(), GridDistortion(), IAAPiecewiseAffine() ], p=0.5), # HueSaturationValue(p=0.3), ], p=0.9) augmented = aug(image=image, mask=label) augmented = ToGray(p=1)(image=augmented['image'], mask=augmented['mask']) augmented = RandomCrop(256, 256)(image=augmented['image'], mask=augmented['mask']) image, label = augmented['image'], augmented['mask'] return image, label
def get_photometric(self): coeff = int(3 * self.strength) k = max(1, coeff if coeff % 2 else coeff - 1) return Compose([ OneOf( [ # CLAHE(clip_limit=2, p=.4), IAASharpen(p=.5), IAAEmboss(p=.5), ], p=0.2), OneOf([ IAAAdditiveGaussianNoise(p=.3), GaussNoise(p=.7), ], p=.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=k, p=.3), Blur(blur_limit=k, p=.5), ], p=.2), OneOf([ RandomContrast(), RandomBrightness(), ], p=.2) ])
def strong_aug(p=1): return Compose( [ HorizontalFlip(p=0.5), OneOf([ RandomCrop(94, 94, p=0.6), ShiftScaleRotate(shift_limit=(0.1, 0.1), scale_limit=(0.05, 0.05), rotate_limit=10, p=0.4), ], p=0.6), OneOf([ ElasticTransform(p=0.2, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), IAAPiecewiseAffine(p=.4), GridDistortion(p=0.4), ], p=.4), OneOf( [ # CLAHE(clip_limit=2), RandomGamma((90, 110)), IAAEmboss((0.1, 0.4), (0.1, 0.6)), RandomContrast(0.1), RandomBrightness(0.1), ], p=0.5), ], p=p)
def get_train_transforms(): augmentations = Compose([ Resize(236,236), Flip(), OneOf([ IAAAdditiveGaussianNoise(p=.5), GaussNoise(p=.4), ], p=0.4), OneOf([ MotionBlur(p=0.6), Blur(blur_limit=3, p=0.2), ], p=0.4), ShiftScaleRotate(shift_limit=0.0725, scale_limit=0.2, rotate_limit=45, p=0.6), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.4), IAAPiecewiseAffine(p=0.2), ], p=0.3), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.25), HueSaturationValue(p=0.3), CenterCrop(224,224), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ), ToTensor() ]) return lambda img:augmentations(image=np.array(img))
def aug_train(resolution, p=1): return Compose([Resize(resolution, resolution), OneOf([ HorizontalFlip(), VerticalFlip(), RandomRotate90(), Transpose()], p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.5), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.5), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.5), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.5), HueSaturationValue(p=0.3), Normalize() ], p=p)
def aug_with_crop(width=640, height=480, crop_prob=1): return Compose( [ # RandomCrop(width=480, height=640, p=crop_prob), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), RandomRotate90(p=0.5), Transpose(p=0.5), ShiftScaleRotate( shift_limit=0.01, scale_limit=0.04, rotate_limit=0, p=0.25), RandomBrightnessContrast(p=0.5), RandomGamma(p=0.25), IAAEmboss(p=0.25), Blur(p=0.01, blur_limit=3), OneOf([ ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GridDistortion(p=0.5), OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5) ], p=0.8) ], p=1)
def __call__(self, original_image): self.augmentation_pipeline = Compose( [ Resize(650, 650, always_apply=True), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(rotate_limit=25.0, p=0.7), OneOf([IAAEmboss(p=1), IAASharpen(p=1), Blur(p=1)], p=0.5), IAAPiecewiseAffine(p=0.5), Resize(self.height, self.width, always_apply=True), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], always_apply=True ), ToTensor() ] ) augmented = self.augmentation_pipeline( image=original_image ) image = augmented["image"] return image
def train_transform(p=1): return Compose([ HorizontalFlip(p=0.5), OneOf([ RandomSizedCrop((92, 98), 101, 101, p=0.6), ShiftScaleRotate(shift_limit=(0, 0.1), scale_limit=(0, 0.05), rotate_limit=10, p=0.4), ], p=0.6), #OneOf([ # IAAAdditiveGaussianNoise(), #may by # GaussNoise(),#may by #], p=0.2), #OneOf([ # MotionBlur(p=0.2), # MedianBlur(blur_limit=3, p=0.3), # Blur(blur_limit=3, p=0.5), #], p=0.4), OneOf([ ElasticTransform(p=0.2, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), IAAPiecewiseAffine(p=.4), GridDistortion(p=0.4), ], p=.4), OneOf([ #CLAHE(clip_limit=2), RandomGamma((90,110)), ShiftBrightness((5, 20)), IAAEmboss((0.1, 0.4), (0.1, 0.6)), RandomContrast(0.08), RandomBrightness(0.08), ], p=0.5), ], p=p)
def strong_aug(p=1): return Compose([ RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), ], p=p)
def strong_aug(p=1.0): return Compose( [ Cutout(num_holes=1, max_h_size=16, max_w_size=16, fill_value=[0.4914 * 255, 0.4822 * 255, 0.4465 * 255], p=1.), HorizontalFlip(p=0.5), # RandomGamma(p=0.5), # ElasticTransform(value=10), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=20, p=1.), # Below are Required OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.3), # HueSaturationValue(p=0.3), ToTensor(normalize={ 'mean': (0.4914, 0.4822, 0.4465), 'std': (0.2023, 0.1994, 0.2010) }) ], p=p)
def strong_aug(p=0.5): return Compose([ OneOf([ CoarseDropout(p=0.5), Cutout(p=0.5), ], p=0.3), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), OneOf([ MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.2) ], p=p)
def strong_aug(p=1): return Compose([ OneOf([ RandomRotate90(p=1), Flip(p=1), ], p=1), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=45, p=1, value=0, border_mode=2), OneOf([ IAAAdditiveGaussianNoise(p=0.7), GaussNoise(p=0.7), ], p=1), OneOf([ MotionBlur(p=0.7), MedianBlur(blur_limit=3, p=0.7), Blur(blur_limit=3, p=0.7), ], p=1), RandomBrightnessContrast(p=0.5), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(p=0.7), ], p=1) ], p=p)
def get_corrupter(self): distortion_augs = OneOf([OpticalDistortion(p=1), GridDistortion(p=1)], p=1) effects_augs = OneOf([ IAASharpen(p=1), IAAEmboss(p=1), IAAPiecewiseAffine(p=1), IAAPerspective(p=1), CLAHE(p=1) ], p=1) misc_augs = OneOf([ ShiftScaleRotate(p=1), HueSaturationValue(p=1), RandomBrightnessContrast(p=1) ], p=1) blur_augs = OneOf( [Blur(p=1), MotionBlur(p=1), MedianBlur(p=1), GaussNoise(p=1)], p=1) aug = Compose([distortion_augs, effects_augs, misc_augs, blur_augs]) return aug
def strong_aug(p=0.8): """Find all the description of each function:https://github.com/albu/albumentations Probabilities: p1: decides if this augmentation will be applied. The most common case is p1=1 means that we always apply the transformations from above. p1=0 will mean that the transformation block will be ignored. p2: every augmentation has an option to be applied with some probability. p3: decide if OneOf will be applied. In the final run all the p1-p3 probabilities are multiplied. """ return Compose([ ShiftScaleRotate(shift_limit=0.2, scale_limit=0.3, rotate_limit=45, p=0.8, border_mode=cv2.BORDER_CONSTANT), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.3), MedianBlur(blur_limit=3, p=0.7), OneOf([ CLAHE(clip_limit=2, p=0.4), IAASharpen(p=0.4), IAAEmboss(p=0.4), RandomBrightnessContrast(p=0.6), HorizontalFlip(p=0.5) ]) ], p=p)
def strong_aug(p=.5): return Compose([ JpegCompression(p=0.9), HorizontalFlip(p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.5), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.5), ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.2, rotate_limit=15, p=.5), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.5), HueSaturationValue(p=0.5), ], p=p)
def strong_aug(p=0.5, crop_size=(512, 512)): return Compose([ RandomResizedCrop(crop_size[0], crop_size[1], scale=(0.3, 1.0), ratio=(0.75, 1.3), interpolation=4, p=1.0), RandomRotate90(), Flip(), Transpose(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.8), OneOf([ MotionBlur(p=0.5), MedianBlur(blur_limit=3, p=0.5), Blur(blur_limit=3, p=0.5), ], p=0.3), ShiftScaleRotate( shift_limit=0.2, scale_limit=0.5, rotate_limit=180, p=0.8), OneOf([ OpticalDistortion(p=0.5), GridDistortion(p=0.5), IAAPiecewiseAffine(p=0.5), ElasticTransform(p=0.5), ], p=0.3), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), OneOf([ GaussNoise(), RandomRain( p=0.2, brightness_coefficient=0.9, drop_width=1, blur_value=5), RandomSnow(p=0.4, brightness_coeff=0.5, snow_point_lower=0.1, snow_point_upper=0.3), RandomShadow(p=0.2, num_shadows_lower=1, num_shadows_upper=1, shadow_dimension=5, shadow_roi=(0, 0.5, 1, 1)), RandomFog( p=0.5, fog_coef_lower=0.3, fog_coef_upper=0.5, alpha_coef=0.1) ], p=0.3), RGBShift(), HueSaturationValue(p=0.9), ], p=p)
def _transform_generic_np(self, npimages, prob): """So, we assume these images are batched numpy values, scaled from 0..255""" batchsz, height, width, channels = npimages.shape assert height == width, "We assume squares as inputs." assert channels == 3, "We assume RGB images." assert npimages.dtype == np.uint8 assert 0 <= npimages.min() and npimages.max() <= 255 # make these the images that albumentation requires. outputs = np.zeros((batchsz, self.outsz, self.outsz, channels), dtype=np.uint8) ops = Compose( \ [ \ Compose( \ [ \ OneOf([ \ IAAAdditiveGaussianNoise(p=1.0), \ GaussNoise(p=1.0), \ ], p=0.5), \ OneOf([ \ MotionBlur(p=1.0), \ MedianBlur(blur_limit=3, p=1.0), \ Blur(blur_limit=3, p=1.0), \ ], p=0.5), \ RandomGamma(p=0.5), \ Rotate(limit=45, interpolation=cv2.INTER_CUBIC, p=0.5), \ ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, interpolation=cv2.INTER_CUBIC, p=0.5), \ OneOf([ \ OpticalDistortion(interpolation=cv2.INTER_CUBIC, p=1.0), \ GridDistortion(interpolation=cv2.INTER_CUBIC, p=1.0), \ IAAPiecewiseAffine(p=1.0), \ ], p=0.5), \ OneOf([ \ CLAHE(clip_limit=2, p=1.0), \ IAASharpen(p=1.0), \ IAAEmboss(p=1.0), \ RandomContrast(p=1.0), \ RandomBrightness(p=1.0), \ ], p=0.5), \ HueSaturationValue(p=0.5), \ ], \ p=prob \ ), \ Resize(self.outsz, self.outsz, interpolation=cv2.INTER_CUBIC), \ ], \ p=1.0 \ ) # So, the output of ops, should be a dictionary containing an image for idx in range(0, batchsz): vvv = ops(image=npimages[idx])["image"] outputs[idx] = vvv return outputs
def get_bboxes_augmentations(strength=1.): assert strength > 0, 'Value of `strength` should be of type positive int' coeff = int(3 * strength) k = max(1, coeff if coeff % 2 else coeff - 1) return Compose( [ Compose( [ # InvertImg(), # RandomRotate90(), # Flip(), # Transpose(), HorizontalFlip(), ], p=1.), Compose( [ OneOf([ CLAHE(clip_limit=2, p=.4), IAASharpen(p=.3), IAAEmboss(p=.3), ], p=0.5), OneOf([ IAAAdditiveGaussianNoise(p=.3), GaussNoise(p=.7), ], p=.5), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=k, p=.3), Blur(blur_limit=k, p=.5), ], p=.4), OneOf([ RandomContrast(), RandomBrightness(), ], p=.4), # ShiftScaleRotate(shift_limit=0.0625, scale_limit=(-.5, 0.), rotate_limit=45, p=.7), # OneOf([ # OpticalDistortion(p=0.3), # GridDistortion(p=0.3), # IAAPiecewiseAffine(p=0.3), # ], p=0.6), ], p=0.9) ], bbox_params={ 'format': 'coco', 'min_area': 22, 'min_visibility': .1, 'label_fields': ['category_id'] })
def augmentTest(self): return Compose([ OpticalDistortion(distort_limit=0.02, shift_limit=0.02, border_mode=0, value=0, p=0.1), GridDistortion(num_steps=9, distort_limit=0.1, border_mode=0, value=0, p=0.1), ShiftScaleRotate(shift_limit=0.03125, scale_limit=0.05, rotate_limit=4, p=.15), OneOf([ IAASharpen(), IAAEmboss(), ], p=0.05), HorizontalFlip(p=0.6), ToTensor() ], p=0.9)
def strong_aug(p=1): return Compose([ Flip(p=p), OneOf([ IAASharpen(alpha=(0.1, 0.3), lightness=(0.1, 0.3), p=1), IAAEmboss(p=1), CLAHE(clip_limit=2, p=1)], p=p), OneOf([ Blur(blur_limit=(1, 3), p=1), GaussNoise(var_limit=(10, 30), p=1)], p=p), ShiftScaleRotate(shift_limit=0.01, scale_limit=(-0.1, 0.5), rotate_limit=90, p=p), RandomBrightnessContrast(brightness_limit=(-0.01, 0.05), contrast_limit=(-0.01, 0.01), p=p), ], p=1)
def _get_train_data_loader(args, **kwargs): transform = Compose([ RandomResizedCrop(args.height, args.width), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), VerticalFlip(p=0.5), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.3), HueSaturationValue(p=0.3), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ) ], p=1.0) train_sampler = None train_dataloader = None dataset = AlbumentationImageDataset(image_path=os.path.join( args.data_dir, 'train'), transform=transform, args=args, check_img=True) drop_last = args.model_parallel train_sampler = data.distributed.DistributedSampler( dataset, num_replicas=int(args.world_size), rank=int( args.rank)) if args.multigpus_distributed else None train_dataloader = data.DataLoader(dataset, batch_size=args.batch_size, shuffle=train_sampler is None, sampler=train_sampler, drop_last=drop_last, **kwargs) return train_dataloader, train_sampler