def __init__(self, dataset_path, image_size, transform=None): """ BreastPathQ dataset: supervised fine-tuning on downstream task """ self.image_size = image_size self.transform = transform # Resize images self.transform1 = Compose([Resize(image_size, image_size, interpolation=2)]) # 256 # Data augmentations self.transform4 = Compose([Rotate(limit=(-90, 90), interpolation=2), CenterCrop(image_size, image_size)]) self.transform5 = Compose( [Rotate(limit=(-90, 90), interpolation=2), RandomScale(scale_limit=(0.8, 1.2), interpolation=2), Resize(image_size + 20, image_size + 20, interpolation=2), RandomCrop(image_size, image_size)]) self.datalist = [] data_paths = glob.glob(dataset_path + "*.h5") with tqdm(enumerate(sorted(data_paths)), disable=True) as t: for wj, data_path in t: data = h5py.File(data_path) data_patches = data['x'][:] cls_id = data['y'][:] for idx in range(len(data_patches)): self.datalist.append((data_patches[idx], cls_id[idx]))
def data_augmentation(original_image, original_mask, mode): """进行样本和掩膜的随机增强 Args: original_image: 原始图片 original_mask: 原始掩膜 Return: image_aug: 增强后的图片 mask_aug: 增强后的掩膜 """ original_height, original_width = original_image.shape[:2] augmentations = Compose([ RandomRotate90(p=0.3), HorizontalFlip(p=0.3), Rotate(limit=15, p=0.3), CLAHE(p=0.3), HueSaturationValue(20, 5, 5, p=0.7), # 亮度、对比度 RandomGamma(gamma_limit=(80, 120), p=0.4), RandomBrightnessContrast(p=0.4), # # # 模糊 # OneOf([ # # MotionBlur(p=0.1), # MedianBlur(blur_limit=3, p=0.1), # Blur(blur_limit=3, p=0.1), # ], p=0.3), # # OneOf([ # IAAAdditiveGaussianNoise(), # GaussNoise(), # ], p=0.2) ]) augmentations2 = Compose([ # HorizontalFlip(p=0.2), # HueSaturationValue(p=1), Rotate(limit=15, p=0.2), # CenterCrop(p=0.3, height=original_height, width=original_width), # 直方图均衡化 # CLAHE(p=0.4), ]) if mode == 'train': augmented = augmentations(image=original_image, mask=original_mask) image_aug = augmented['image'] mask_aug = augmented['mask'] return image_aug, mask_aug elif mode == 'validation': augmented = augmentations2(image=original_image, mask=original_mask) image_aug = augmented['image'] mask_aug = augmented['mask'] return image_aug, mask_aug
def __init__(self, data_path, json_path, image_size, transform=None): """ Camelyon16 dataset: supervised fine-tuning on downstream task """ self.transform = transform self.data_path = data_path self.json_path = json_path self._preprocess() # Data augmentations self.transform1 = Compose([Rotate(limit=(-90, 90), interpolation=2), CenterCrop(image_size, image_size)]) self.transform2 = Compose([Rotate(limit=(-90, 90), interpolation=2), RandomScale(scale_limit=(0.8, 1.2), interpolation=2), Resize(image_size + 20, image_size + 20, interpolation=2), RandomCrop(image_size, image_size)])
def train_transform(p=1): return Compose([ Rotate(90, p=p), VerticalFlip(p=p), HorizontalFlip(p=p)#, # RandomCrop(32, 32) ], p=p)
def albumentations_transforms(p=1.0, is_train=False): # Mean and standard deviation of train dataset mean = np.array([0.4914, 0.4822, 0.4465]) std = np.array([0.2023, 0.1994, 0.2010]) transforms_list = [] # Use data aug only for train data if is_train: transforms_list.extend([ HueSaturationValue(p=0.25), HorizontalFlip(p=0.5), Rotate(limit=15), CoarseDropout(max_holes=1, max_height=16, max_width=16, min_height=4, min_width=4, fill_value=mean*255.0, p=0.75), ]) transforms_list.extend([ Normalize( mean=mean, std=std, max_pixel_value=255.0, p=1.0 ), ToTensor() ]) transforms = Compose(transforms_list, p=p) return lambda img:transforms(image=np.array(img))["image"
def get_train_transform(smallest_max_size: int, size: int): return Compose([ SmallestMaxSize(smallest_max_size), RandomScale(scale_limit=0.125), # PadIfNeeded(256, 256, border_mode=cv2.BORDER_CONSTANT., value=0, p=1.), # ShiftScaleRotate( # shift_limit=0.0625, scale_limit=0.1, rotate_limit=30, # border_mode=cv2.BORDER_REFLECT_101, p=1.), Rotate(limit=20, border_mode=cv2.BORDER_REFLECT_101, p=1.), OneOf([ RandomCrop(size, size, p=0.9), CenterCrop(size, size, p=0.1), ], p=1.), HorizontalFlip(p=0.5), RandomContrast(limit=0.2, p=0.5), RandomGamma(gamma_limit=(80, 120), p=0.5), RandomBrightness(limit=0.2, p=0.5), # HueSaturationValue(hue_shift_limit=5, sat_shift_limit=20, # val_shift_limit=10, p=1.), # OneOf([ # OpticalDistortion(p=0.3), # GridDistortion(p=0.1), # IAAPiecewiseAffine(p=0.3), # ], p=0.2), # OneOf([ # IAAAdditiveGaussianNoise( # loc=0, scale=(1., 6.75), per_channel=False, p=0.3), # GaussNoise(var_limit=(5.0, 20.0), p=0.6), # ], p=0.5), # Cutout(num_holes=4, max_h_size=30, max_w_size=50, p=0.75), # JpegCompression(quality_lower=50, quality_upper=100, p=0.5) ])
def box_segmentation_aug(): return Compose([ OneOf([ RandomBrightnessContrast(brightness_limit=0.2, p=0.5), RandomGamma(gamma_limit=50, p=0.5), ChannelShuffle(p=0.5) ]), OneOf([ ImageCompression(quality_lower=0, quality_upper=20, p=0.5), MultiplicativeNoise(multiplier=(0.3, 0.8), elementwise=True, per_channel=True, p=0.5), Blur(blur_limit=(15, 15), p=0.5) ]), OneOf([ CenterCrop(height=1000, width=1000, p=0.1), RandomGridShuffle(grid=(3, 3), p=0.2), CoarseDropout(max_holes=20, max_height=100, max_width=100, fill_value=53, p=0.2) ]), OneOf([ GridDistortion(p=0.5, num_steps=2, distort_limit=0.2), ElasticTransform(alpha=157, sigma=80, alpha_affine=196, p=0.5), OpticalDistortion(distort_limit=0.5, shift_limit=0.5, p=0.5) ]), OneOf([ VerticalFlip(p=0.5), HorizontalFlip(p=0.5), Rotate(limit=44, p=0.5) ]) ])
def initialize_elements(self): self.using_roi = hasattr(self.params, "roi_crop") self.resizer = self.plain_resize if hasattr(self.params, "random_crop_scale"): self.resizer = RandomResizedCrop( height=self.params.default_height, width=self.params.default_width, scale=self.params.random_crop_scale, ratio=self.params.random_crop_ratio) if self.using_roi: self.roi_resize = Resize(height=self.params.roi_height, width=self.params.roi_width) starting_aug = [Rotate(limit=15), HorizontalFlip(p=0.5)] heavy_aug = [ # RandomGamma(p=0.1), ElasticTransform(p=0.1, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GaussNoise(p=0.05), GaussianBlur(p=0.05) ] if self.params.data_augmentation == constants.heavy_augmentation: starting_aug.extend(heavy_aug) self.aug = Compose(starting_aug)
def transforms_train(aug_proba=1.): return Compose(transforms=[ HorizontalFlip(p=0.5), Rotate(limit=25, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=0, interpolation=cv2.INTER_CUBIC), OneOf([ IAAAdditiveGaussianNoise(p=1), GaussNoise(p=1), ], p=0.2), OneOf([ HueSaturationValue(hue_shift_limit=10, sat_shift_limit=15, val_shift_limit=10, p=1), RGBShift(r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=1) ]), OneOf([RandomContrast(p=1), RandomBrightness(p=1)], p=0.3), OpticalDistortion(p=0.1), Resize(*SIZE), Normalize() ], p=aug_proba, additional_targets={'trimap': 'mask'})
def __init__(self, num_classes, input_size=None): super(HSRInhandObjectsDataTransform, self).__init__() random.seed(1000) if input_size is not None: height, width = input_size self._train_transform_list.append(Resize(height, width)) self._val_transform_list.append(Resize(height, width)) self._train_transform_list = self._train_transform_list + [ HorizontalFlip(p=0.5), Rotate(p=0.5, limit=(-15, 15)), GaussNoise(p=0.5), RandomBrightnessContrast(p=0.5), RandomShadow(p=0.5), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensor(num_classes=num_classes), ] self._val_transform_list = self._val_transform_list + [ Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensor(num_classes=num_classes), ] self._initialize_transform_dict()
class ImgAugTTATransform(AugmentationFactoryBase): TRANSFORM_LIST = ( NoOp(p=1), HorizontalFlip(p=1), VerticalFlip(p=1), Rotate(limit=(180, 180), p=1), # Lambda(RollingY(shift=(-20, 20)).transform), # Lambda(RollingY(shift=(-20, 20)).transform), ) CURR_TRANSFORM = TRANSFORM_LIST[0] def __init__(self, image_size=(126, 32)): self.image_size = image_size def build_train(self): pass def build_test(self): print("Using {}".format(self.CURR_TRANSFORM)) return Compose([ Lambda(minmax_norm), self.CURR_TRANSFORM, Resize( self.image_size[0], self.image_size[1], interpolation=cv2.INTER_CUBIC, ), Lambda(normalize), ToTensor(), ])
def udf_transformer(phase): list_transforms = [] # if target in [1,3,4]: # pc_min, pc_max = 150, 50000 # else: # pc_min, pc_max = 5, 50000 # list_transforms.extend([CropNonEmptyMaskIfExists(height = 256, # width = 400, # modify this # # ignore_values = [pc_min,pc_max], # [min, max], # p=1) # # Resize(height = 256, # # width = 1600) # ] # ) # list_transforms.append(ElasticTransform(p=0.6)) # list_transforms.append(OpticalDistortion(p=0.6)) if phase == 'train': list_transforms.append(HorizontalFlip(p=0.5)) list_transforms.append(VerticalFlip(p=0.5)) list_transforms.append(Rotate(limit=20, p=0.5)) # list_transforms.append(GaussNoise()) list_transforms.append(GridDistortion(distort_limit=(-0.2, 0.2), p =0.6)) list_transforms.append(Normalize([0.5], [0.5])) # important step list_transforms.append(ToTensor()) # using albumentations transformers, must use ToTensor from albumentations too return Compose(list_transforms)
def __init__(self, is_train: bool, to_pytorch: bool): preprocess = OneOf([ Resize(height=DATA_HEIGHT, width=DATA_WIDTH), Compose([ Resize(height=int(DATA_HEIGHT * 1.2), width=int(DATA_WIDTH * 1.2)), RandomCrop(height=DATA_HEIGHT, width=DATA_WIDTH) ]) ], p=1) if is_train: self._aug = Compose( [ preprocess, HorizontalFlip(p=0.5), GaussNoise(p=0.3), # OneOf([ # RandomBrightnessContrast(), # RandomGamma(), # ], p=0.3), # OneOf([ # ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), # GridDistortion(), # OpticalDistortion(distort_limit=2, shift_limit=0.5), # ], p=0.3), Rotate(limit=20), ], p=1) else: self._aug = preprocess self._need_to_pytorch = to_pytorch
def Rotate_Crop(img, v): # [-90, 90] assert -90 <= v <= 90 if random.random() < 0.5: v = -v transform = Compose([Flip(), Rotate(limit=v, interpolation=2), CenterCrop(img.shape[1], img.shape[1])]) Aug_img = transform(image=img) return Aug_img
def __build_augmentator(self): return Compose( [ ShiftScaleRotate( shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, p=0.3), OneOf([ RandomScale(scale_limit=0.05, interpolation=1, p=0.5), Rotate(limit=7, interpolation=1, border_mode=cv2.BORDER_CONSTANT, value=0, p=0.5) ], p=0.5), PadIfNeeded(always_apply=True, min_width=self.width, min_height=self.height), RandomCrop(width=self.width, height=self.height), OneOf( [ VerticalFlip(), # HorizontalFlip(p=0.2), ], p=0.5), # OneOf([ # RandomBrightness(limit=0.2, always_apply=False, p=0.5), # RandomContrast(), # RandomGamma() # ], p=0.7), ], p=self.p)
def augmentation(img, n): """ Make random augmentations with image n times. :rtype: ndarray :param img: image in matrix form :param n: how many augmentations need to apply :return: list of augmented versions of image """ methods = [ElasticTransform(**elastic_params), RandomGamma(**gamma_params), GridDistortion(**all_other), RGBShift(**r_shift_params), Rotate(**rotate_params), RandomBrightness(**brightness_params) ] for i in range(len(methods)): methods[i] = Compose([methods[i], ], p=1) chosen = np.random.choice(methods, replace=False, size=n) augmented = np.empty((n,), dtype=np.object) for i, method in enumerate(chosen): transformed = transform_image(method(image=img)["image"]) if to_normalize: transformed = normalize(transformed) augmented[i] = transformed return augmented
def __init__(self, means, stddev, settype): self.settype = settype self.means = np.array(means) self.stddev = np.array(stddev) if self.settype == 'train': print("Train set") self.albumentation_transform = Compose([ PadIfNeeded(min_height=72, min_width=72, border_mode=1, value=list(255 * self.means), p=1.0), # RandomBrightnessContrast(always_apply=False, p=0.5, brightness_limit=(-0.40, 0.82), contrast_limit=(-0.40, 0.82), brightness_by_max=True), RandomCrop(height=64, width=64, always_apply=True, p=1.0), HorizontalFlip(always_apply=False, p=0.5), Rotate(limit=15, interpolation=1, border_mode=4, value=None, mask_value=None, always_apply=False, p=0.5), # Cutout(always_apply=True, p=1.0, num_holes=1, max_h_size=8, max_w_size=8, fill_value=list(255 * self.means)), GaussNoise(always_apply=False, p=1.0, var_limit=(60, 100)), CoarseDropout(max_holes=1, max_height=16, max_width=16, min_holes=1, min_height=8, min_width=8, fill_value=list(255 * self.means), always_apply=False, p=1.0), Normalize( mean = list(self.means), std = list(self.stddev), ), ToTensor() ]) elif self.settype == 'test': print("Test set") self.albumentation_transform = Compose([ Normalize( mean = list(self.means), std = list(self.stddev), ), ToTensor() ])
def albumentations_transforms(p=1.0, is_train=False): # Mean and standard deviation of train dataset mean = np.array([0.4914, 0.4822, 0.4465]) std = np.array([0.2023, 0.1994, 0.2010]) transforms_list = [] # Use data aug only for train data if is_train: transforms_list.extend([ PadIfNeeded(min_height=72, min_width=72, p=1.0), RandomCrop(height=64, width=64, p=1.0), HorizontalFlip(p=0.25), Rotate(limit=15, p=0.25), RGBShift(r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, p=0.25), #CoarseDropout(max_holes=1, max_height=32, max_width=32, min_height=8, #min_width=8, fill_value=mean*255.0, p=0.5), ]) transforms_list.extend([ Normalize(mean=mean, std=std, max_pixel_value=255.0, p=1.0), ToTensor() ]) data_transforms = Compose(transforms_list, p=p) return lambda img: data_transforms(image=np.array(img))["image"]
def __init__(self, root_dir, annotation_lines, class_number, transform=None, loader=default_loader): self.annotation_lines = annotation_lines self.class_number = class_number self.transform = transform self.loader = loader self.root_dir = root_dir curr_size = 512 min_max_height = (curr_size - curr_size // 2, curr_size - 1) self.transform_strong = Compose([ RandomSizedCrop(min_max_height=min_max_height, height=curr_size, width=curr_size, p=1.0), OneOf([ Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), Rotate(p=0.5), ], p=1.0), ElasticTransform(alpha=curr_size, sigma=curr_size * 0.05, alpha_affine=10, p=1.0) ])
def augment(patch_size=patch_size): return Compose([ VerticalFlip(p=.5), HorizontalFlip(p=.5), HueSaturationValue(hue_shift_limit=(-15, 15), sat_shift_limit=0, val_shift_limit=0, p=.5), HueSaturationValue(hue_shift_limit=(-10, 10), sat_shift_limit=(-20, 20), val_shift_limit=0, p=.5), Rotate(limit=(0, 359), p=.5, border_mode=cv2.BORDER_CONSTANT), RandomBrightnessContrast(brightness_limit=0.15, contrast_limit=0.1, always_apply=False, p=0.5), ElasticTransform(always_apply=True, approximate=True, alpha=20, sigma=10, alpha_affine=0, border_mode=cv2.BORDER_CONSTANT), GridDistortion(num_steps=16, distort_limit=0.5, border_mode=cv2.BORDER_CONSTANT, always_apply=False, p=0.5), ])
def generate_mask(w=256, h=256, d1=96 / 224, d2=1., rotate=45, ratio=.3): if ratio == 1.: return torch.tensor(np.ones([h, w]) * 1.) # h, w = img.shape[0], img.shape[1] d1, d2 = int(d1 * min(h, w)), int(d2 * min(h, w)) hh, ww = int(1.5 * h), int(1.5 * w) d = np.random.randint(d1, d2) l = int(d * ratio + 0.5) mask = np.ones((hh, ww), np.float32) st_h = np.random.randint(d) st_w = np.random.randint(d) for i in range(-1, hh // d + 1): s = d * i + st_h t = s + l s = max(min(s, hh), 0) t = max(min(t, hh), 0) mask[s:t, :] *= 0 for i in range(-1, ww // d + 1): s = d * i + st_w t = s + l s = max(min(s, ww), 0) t = max(min(t, ww), 0) mask[:, s:t] *= 0 r = np.random.randint(rotate) mask = Rotate(limit=rotate, border_mode=cv2.BORDER_CONSTANT,\ p=1)(image=mask)['image'] mask = torch.tensor(mask[(hh - h) // 2:(hh - h) // 2 + h, (ww - w) // 2:(ww - w) // 2 + w]) mask = 1 - mask return mask
def strong_aug(p=0.6, im_height=700, im_width=1200): dropout_w = int(im_width / 82) dropout_h = int(im_height / 9.) return Compose([ Rotate(limit=2, p=0.5), RandomCrop( height=int(im_height * 0.95), width=int(im_width * 0.9), p=0.3), ElasticTransform(p=0.8), HorizontalFlip(p=0.5), CoarseDropout(max_holes=8, max_height=dropout_w, max_width=dropout_h, min_holes=1, min_height=5, min_width=5, fill_value=0, always_apply=False, p=0.85), OneOf([ MotionBlur(p=0.8), Blur(blur_limit=20, p=0.8), ], p=0.35), Resize(height=256, width=256, p=1) ], p=p)
def init_augmentations(self): common = [ HorizontalFlip(), Rotate(limit=10), RandomBrightnessContrast(), ToGray(p=0.05) ] random_crop_aug = [ RandomResizedCrop(height=self.params.input_height, width=self.params.input_width, scale=(0.35, 1.0)) ] random_crop_aug.extend(common) simple_resize_aug = [ Resize(height=self.params.input_height, width=self.params.input_width) ] simple_resize_aug.extend(common) crop = self.get_aug(random_crop_aug, min_visibility=0.5) resize = self.get_aug(simple_resize_aug, min_visibility=0.5) just_resize = self.get_aug([ Resize(height=self.params.input_height, width=self.params.input_width) ]) self.crop_aug = crop self.resize_aug = resize self.just_resize = just_resize
def __init__(self, is_train: bool, to_pytorch: bool, preprocess): if is_train: self._aug = Compose([ preprocess, OneOf([ Compose([ HorizontalFlip(p=0.5), GaussNoise(p=0.5), OneOf([ RandomBrightnessContrast(), RandomGamma(), ], p=0.5), Rotate(limit=20, border_mode=cv2.BORDER_CONSTANT), ImageCompression(), CLAHE(), Downscale(scale_min=0.2, scale_max=0.9, p=0.5), ISONoise(p=0.5), MotionBlur(p=0.5) ]), HorizontalFlip(p=0.5) ]) ], p=1) else: self._aug = preprocess self._need_to_pytorch = to_pytorch
def oneof_always_apply_crash(): aug = Compose( [HorizontalFlip(), Rotate(), OneOf([Blur(), MedianBlur()], p=1)], p=1) image = np.ones((8, 8)) data = aug(image=image) assert data
def _prepare_data(self): # Augumentation used only while training train_transforms_album = Compose([ HueSaturationValue(p=0.25), HorizontalFlip(p=0.5), Rotate(limit=15), CoarseDropout( max_holes=1, max_height=16, max_width=16, min_height=4, min_width=4, fill_value=np.array(self._train_mean()) * 255.0, p=0.75, ), Normalize(mean=self._train_mean(), std=self._train_std()), ToTensor(), ]) def train_transforms(img): return train_transforms_album(image=np.array(img))["image"] # train_transforms = transforms.Compose( # [ # transforms.RandomCrop(32, padding=4), # transforms.RandomHorizontalFlip(), # transforms.ToTensor(), # transforms.Normalize(self._train_mean(), self._train_std()), # ] # ) train_dataset = datasets.CIFAR10(root="./data", train=True, download=True, transform=train_transforms) # No Augumentation while testing test_transforms_album = Compose([ Normalize(mean=self._test_mean(), std=self._test_std()), ToTensor() # transforms.ToTensor(), # transforms.Normalize(self._test_mean(), self._test_std()), ]) def test_transforms(img): return test_transforms_album(image=np.array(img))["image"] # Pytorch default approach # test_transforms = transforms.Compose( # [ # transforms.ToTensor(), # transforms.Normalize(self._test_mean(), self._test_std()), # ] # ) test_dataset = datasets.CIFAR10(root="./data", train=False, transform=test_transforms) return train_dataset, test_dataset
def train_transform(p=1): return Compose([ Rotate(90, p=p), VerticalFlip(p=p), HorizontalFlip(p=p), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],p=p) # RandomCrop(32, 32) ], p=p)
def my_transforms(hor_flip=0.5, ver_flip=0.2, rotate=120): transforms = [] if hor_flip > 0: transforms.append(HorizontalFlip(p=hor_flip)) if ver_flip > 0: transforms.append(VerticalFlip(p=ver_flip)) if rotate > 0: transforms.append(Rotate(limit=rotate, p=1)) return Compose(transforms)
def test_rotate_interpolation(interpolation): image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8) mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8) aug = Rotate(limit=(45, 45), interpolation=interpolation, p=1) data = aug(image=image, mask=mask) expected_image = F.rotate(image, 45, interpolation=interpolation, border_mode=cv2.BORDER_REFLECT_101) expected_mask = F.rotate(mask, 45, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REFLECT_101) assert np.array_equal(data['image'], expected_image) assert np.array_equal(data['mask'], expected_mask)
def __init__(self): self.albTrainTransforms = Compose([ # Resize(256, 256), Rotate((-10.0, 10.0)), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), Cutout(num_holes=8, max_h_size=8, max_w_size=8), Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ToTensor() ]) # this is train transforms