def __init__(self):
        super(Augmentation, self).__init__()

        self._hflip = HorizontalFlip(p=0.5)
        self._vflip = VerticalFlip(p=0.5)
        self._clahe = CLAHE(p=.5)
        self._rotate = RandomRotate90(p=.5)
        self._brightness = RandomBrightnessContrast(p=0.5)
        self._gamma = RandomGamma(p=0.5)
        self._transpose = Transpose(p=0.5)
        self._elastic = ElasticTransform(p=1,
                                         alpha=120,
                                         sigma=120 * 0.05,
                                         alpha_affine=120 * 0.03)
        self._distort = GridDistortion(p=0.5)
        self._affine = ShiftScaleRotate(shift_limit=0.0625,
                                        scale_limit=0.1,
                                        rotate_limit=45,
                                        p=0.5)
示例#2
0
def get_augmentations_train():
    AUGMENTATIONS_TRAIN = Compose([
        HorizontalFlip(p=0.5),
        OneOf([
            RandomContrast(),
            RandomGamma(),
            RandomBrightness(),
        ], p=0.3),
        OneOf([
            ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(),
            OpticalDistortion(distort_limit=2, shift_limit=0.5),
        ],
              p=0.3),
        RandomSizedCrop(min_max_height=(176, 256), height=h, width=w, p=0.25),
        ToFloat(max_value=1)
    ],
                                  p=1)
    return AUGMENTATIONS_TRAIN
示例#3
0
def create_train_transforms(conf):
    height = conf['crop_height']
    width = conf['crop_width']
    return Compose([
        RandomSizedCropAroundBbox(min_max_height=(int(
            height * 0.8), int(height * 1.2)),
                                  w2h_ratio=1.,
                                  height=height,
                                  width=width,
                                  p=1),
        HorizontalFlip(),
        VerticalFlip(),
        RandomRotate90(),
        Transpose(),
        Lighting(alphastd=0.3),
        RandomBrightnessContrast(p=0.2),
        RandomGamma(p=0.2),
        RGBShift(p=0.2)
    ],
                   additional_targets={'image1': 'image'})
示例#4
0
def aug_baseline_CLAHE_Sharpen(image, mask):
    aug = Compose([
        HorizontalFlip(),
        VerticalFlip(),
        OneOf([RandomContrast(),
               RandomGamma(),
               RandomBrightness()], p=0.3),
        OneOf([
            ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(),
            OpticalDistortion(distort_limit=2, shift_limit=0.5)
        ],
              p=0.3),
        ShiftScaleRotate(),
        CLAHE(),
        IAASharpen()
    ])
    aug_img = aug(image=image, mask=mask)
    return aug_img['image'], aug_img['mask']
示例#5
0
 def __init__(self):
     self.train_transform = Compose([
         HorizontalFlip(),
         Rotate((-10.0, 10.0)),
         Normalize(
             mean=[0.485, 0.456, 0.406],
             std=[0.229, 0.224, 0.225],
         ),
         #CoarseDropout(p=(0.02, 0.1), size_px=None, size_percent=None, per_channel=False, min_size=3, seed=None, name=None, random_state='deprecated', deterministic='deprecated')
         #CoarseDropout(max_holes = 1, max_height = 6, max_width = 6, fill_value = [0.485, 0.456, 0.406], p = 0.5),
         PadIfNeeded(min_height=40,
                     min_width=40,
                     border_mode=4,
                     always_apply=False,
                     value=0,
                     p=1.0),
         RandomCrop(height=32, width=32, always_apply=True, p=1.0),
         Cutout(num_holes=1, max_h_size=6, max_w_size=6, p=0.5),
         ToTensor(),
     ])
示例#6
0
    def __init__(self):
        self.mean = np.array([0.485, 0.456, 0.406])
        self.std = np.array([0.229, 0.224, 0.225])

        self.transforms_elist = [ 
            PadIfNeeded(min_height=72, min_width=72, value=self.mean*255.0), 
            RandomCrop(height=64, width=64, p=1.0),
            HorizontalFlip(p=0.5),
            Rotate(7, p=0.5),
            Cutout(num_holes=2, max_h_size=8, max_w_size=8, fill_value=self.mean*255.0, p=0.5),
        ]

        self.transforms_test = [
            Resize(32,32),
        ]

        self.transforms_main = [
            Normalize(mean=self.mean, std=self.std, max_pixel_value=255.0, p=1.0),
		    ToTensor(),
        ]
示例#7
0
    def get_augmentations(self):
        if self.strength is None:
            return None

        transformations = [
            Compose([
                HorizontalFlip(),
            ], p=1.),
            Compose([
                self.get_photometric(),
                self.get_geoometric(),
            ], p=.95)
        ]
        return Compose(transformations,
                       bbox_params={
                           'format': 'pascal_voc',
                           'min_area': 0.,
                           'min_visibility': 0.,
                           'label_fields': ['category_id']
                       })
示例#8
0
def aug_with_crop(image_size = 256, crop_prob = 1):
    # Monkey-patch lol
    albu.augmentations.functional.MAX_VALUES_BY_DTYPE[np.dtype('float64')] = 1.0
    return albu.Compose([
        RandomCrop(width = image_size, height = image_size, p=crop_prob),
        HorizontalFlip(p=0.3),
        VerticalFlip(p=0.3),
        RandomRotate90(p=0.3),
        Transpose(p=0.3),
        ShiftScaleRotate(shift_limit=0.01, scale_limit=0.04, rotate_limit=0, p=0.1),
        RandomBrightnessContrast(p=0.3),
        RandomGamma(p=0.1),
        IAAEmboss(p=0.1),
        Blur(p=0.001, blur_limit = 3),
        OneOf([
            ElasticTransform(p=0.1, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(p=0.1),
            OpticalDistortion(p=0.05, distort_limit=2, shift_limit=0.5)
        ], p=0.5)
    ], p = 1)
示例#9
0
文件: data.py 项目: mike112223/sdd
def get_transforms(phase, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
                HorizontalFlip(), # only horizontal flip as of now
                #VerticalFlip(),
                #RandomBrightnessContrast(), 
                #RandomGamma(),
                #ShiftScaleRotate(rotate_limit=0),
            ]
        )
    list_transforms.extend(
        [
            Normalize(mean=mean, std=std, p=1),
            ToTensor(),
        ]
    )
    list_trfms = Compose(list_transforms)
    return list_trfms
 def __init__(self, train=True):
     if train:
         self.albumentations_transform = Compose([
             Rotate(limit=20, p=0.5),
             HorizontalFlip(),
             Cutout(num_holes=3, max_h_size=8, max_w_size=8, p=0.5),
             Normalize(
                 mean=[0.49139968, 0.48215841, 0.44653091],
                 std=[0.24703223, 0.24348513, 0.26158784],
             ),
             ToTensor()
         ])
     else:
         self.albumentations_transform = Compose([
             Normalize(
                 mean=[0.49139968, 0.48215841, 0.44653091],
                 std=[0.24703223, 0.24348513, 0.26158784],
             ),
             ToTensor()
         ])
示例#11
0
def strong_aug(p=.5):
    return Compose([
        RandomRotate90(p=0.2),
        Transpose(p=0.2),
        HorizontalFlip(p=0.5),
        VerticalFlip(p=0.5),
        OneOf([
            GaussNoise(),
        ], p=0.2),
        ShiftScaleRotate(p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            Sharpen(),
            Emboss(),
            RandomBrightnessContrast(),
        ],
              p=0.2),
        HueSaturationValue(p=0.2),
    ],
                   p=p)
示例#12
0
    def __getitem__(self, idx):
        file = self.train_df.iloc[idx].values[0]
        file_path = os.path.join((self.root + 'train_images'), file)
        image = cv2.imread(file_path)

        mean = (0.485, 0.456, 0.406)
        std = (0.229, 0.224, 0.225)
        train_aug = Compose([
            # PadIfNeeded(min_height=256, min_width=1600, p=1),
            VerticalFlip(p=0.5),
            HorizontalFlip(p=0.5),
            Normalize(mean=mean, std=std, p=1),
            ToTensor()
        ])

        augmented = train_aug(image=image)
        image = augmented['image']
        label = torch.tensor(
            np.array(self.train_df.iloc[idx].values[1:], dtype=np.float32))
        return image, label
示例#13
0
    def get_compose(self):
        augments = []
        if self.h_flip:
            augments.append(HorizontalFlip())

        if self.v_flip:
            augments.append(Flip())

        if (self.luminance >= 0 or self.contrast >= 0):
            augments.append(
                RandomBrightnessContrast(self.luminance, self.contrast))

        if (self.sel.rot_range >= 0 or self.scale >= 0
                or self.shift_range >= 0):
            augments.append(
                ShiftScaleRotate(shift_limit=self.shift_range,
                                 rot_range=self.rot_range,
                                 scale_limit=self.scale))

        return Compose(augments, p=0.75)
示例#14
0
def get_augmentations(augmentation, p, image_size = 256):
    if augmentation == 'train':
        augmentations = Compose([
            # RandomScale(scale_limit=0.125),
            HorizontalFlip(p=0.5),
            RandomBrightnessContrast(p=0.5),
            RandomGamma(p=0.3),
            ShiftScaleRotate(shift_limit=0.1625, scale_limit=0.6, rotate_limit=15, p=0.6),
            # ShiftScaleRotate(rotate_limit=20, p=0.6),
            Resize(image_size,image_size)
            
        ], p=p)
    elif augmentation == 'valid':
        augmentations = Compose([
            Resize(image_size, image_size)
        ], p=p)
    else:
        raise ValueError("Unknown Augmentations")

    return augmentations
示例#15
0
def train_transform(p=1.0):
    augmentation = Compose([
        FlipChannels(),
        VerticalFlip(p=p),
        HorizontalFlip(p=p),
        RandomRotate90(p=p),
        RandomGamma(p=p, gamma_limit=(90, 350)),
        OpticalDistortion(p=p, border_mode=cv2.BORDER_CONSTANT),
        GridDistortion(p=p, border_mode=cv2.BORDER_CONSTANT),
        ShiftScaleRotate(p=p, scale_limit=0.2, border_mode=cv2.BORDER_CONSTANT)
    ],
                           p=p)

    # @contrast_norm
    def transform_fun(img):
        data = {'image': img}
        augmented = augmentation(**data)
        return augmented['image']

    return transform_fun
示例#16
0
def undo_tta(imgs, TTA):
    part = []
    for img in imgs:
        augmentation = CenterCrop(height=args.resize_size,
                                  width=args.resize_size,
                                  p=1.0)
        data = {"image": img}
        prob = augmentation(**data)["image"]
        prob = cv2.resize(prob, (args.initial_size, args.initial_size))

        part.append(prob)

    if TTA == 'flip':
        augmentation = HorizontalFlip(p=1)
        data = {'image': part[1]}
        part[1] = augmentation(**data)['image']

    part = np.mean(np.array(part), axis=0)

    return part
示例#17
0
def img_augment(p=1.):
    return Compose([
        RandomSizedCrop((280, 345), 350, 525, p=0.9, w2h_ratio=1.5),
        HorizontalFlip(.5),
        VerticalFlip(.5),
        OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
                RandomContrast(),
                RandomBrightness(),
            ], p=0.3),
        #
        ShiftScaleRotate(shift_limit=0.15, scale_limit=0.15, rotate_limit=20, p=.75, border_mode=cv2.BORDER_REFLECT),
        Blur(blur_limit=3, p=.33),
        GaussNoise(p=0.8),
        OpticalDistortion(p=.33),
        GridDistortion(p=.33),
        HueSaturationValue(p=.33)
    ], p=p)
示例#18
0
def strong_aug(p=0.5):
    return Compose([
        RandomGridShuffle((2, 2), p=0.75),
        OneOf([
            ShiftScaleRotate(shift_limit=0.125),
            Transpose(),
            RandomRotate90(),
            VerticalFlip(),
            HorizontalFlip(),
            IAAAffine(shear=0.1)
        ]),
        OneOf([GaussNoise(),
               GaussianBlur(),
               MedianBlur(),
               MotionBlur()]),
        OneOf([RandomBrightnessContrast(),
               CLAHE(), IAASharpen()]),
        Cutout(10, 2, 2, 127),
    ],
                   p=p)
def train(cls_model='b2', shape=(320,320)):


    kfold = StratifiedKFold(n_splits=4, random_state=133, shuffle=True)
    train_df, img_2_vector = preprocess()

    albumentations_train = Compose([
        VerticalFlip(), HorizontalFlip(), Rotate(limit=20), GridDistortion()
    ], p=1)

    for n_fold, (train_indices, val_indices) in enumerate(kfold.split(train_df['Image'].values, train_df['Class'].map(lambda x: str(sorted(list(x)))))):
        train_imgs = train_df['Image'].values[train_indices]
        val_imgs = train_df['Image'].values[val_indices]
        data_generator_train = DataGenenerator(train_imgs, augmentation=albumentations_train,
                                               resized_height=shape[0], resized_width=shape[1],
                                               img_2_ohe_vector=img_2_vector)

        data_generator_train_eval = DataGenenerator(train_imgs, shuffle=False,
                                                    resized_height=shape[0], resized_width=shape[1],
                                                    img_2_ohe_vector=img_2_vector)

        data_generator_val = DataGenenerator(val_imgs, shuffle=False,
                                             resized_height=shape[0], resized_width=shape[1],
                                             img_2_ohe_vector=img_2_vector)

        model = get_model(cls_model, shape=shape)

        model.compile(optimizer=RAdam(), loss='binary_crossentropy',
                      metrics=['accuracy'])

        train_metric_callback = PrAucCallback(data_generator_train_eval)
        checkpoint_name = cls_model + '_' + str(n_fold)
        val_callback = PrAucCallback(data_generator_val, stage='val', checkpoint_name=checkpoint_name)

        history_0 = model.fit_generator(generator=data_generator_train,
                                        validation_data=data_generator_val,
                                        epochs=20,
                                        callbacks=[train_metric_callback, val_callback],
                                        workers=42,
                                        verbose=1
                                        )
def generate_transforms(image_size):

    train_transform = Compose([
        Resize(height=image_size[0], width=image_size[1]),
        OneOf(
            [RandomBrightness(limit=0.1, p=1),
             RandomContrast(limit=0.1, p=1)]),
        OneOf([
            MotionBlur(blur_limit=3),
            MedianBlur(blur_limit=3),
            GaussianBlur(blur_limit=3)
        ],
              p=0.5),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        ShiftScaleRotate(
            shift_limit=0.2,
            scale_limit=0.2,
            rotate_limit=20,
            interpolation=cv2.INTER_LINEAR,
            border_mode=cv2.BORDER_REFLECT_101,
            p=1,
        ),
        Normalize(mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225),
                  max_pixel_value=255.0,
                  p=1.0),
    ])

    val_transform = Compose([
        Resize(height=image_size[0], width=image_size[1]),
        Normalize(mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225),
                  max_pixel_value=255.0,
                  p=1.0),
    ])

    return {
        "train_transforms": train_transform,
        "val_transforms": val_transform
    }
示例#21
0
def main():
    # 输入
    train_path = base_train_path + 'image/*.tif'
    mask_path = base_train_path + 'label/*.png'

    # 增强结果输出目录
    augtrain_path = base_train_path + 'image_aug/'
    augmask_path = base_train_path + 'label_aug/'

    train_img, masks = data_num(train_path, mask_path)
    for data in range(len(train_img)):
        file_name = train_img[data].split('\\')[1].split('.')[0]
        image = cv2.imread(train_img[data])
        mask = np.array(Image.open(masks[data]))

        # 水平翻转
        augmented_1 = HorizontalFlip(p=1)(image=image, mask=mask)
        aug_image_1 = augmented_1['image']
        aug_mask_1 = Image.fromarray(augmented_1['mask'])
        cv2.imwrite(augtrain_path + "/{}_{}.tif".format(file_name, 1),
                    aug_image_1)
        aug_mask_1.save(augmask_path + "/{}_{}.png".format(file_name, 1))

        # 垂直翻转
        augmented_2 = VerticalFlip(p=1)(image=image, mask=mask)
        aug_image_2 = augmented_2['image']
        aug_mask_2 = Image.fromarray(augmented_2['mask'])
        cv2.imwrite(augtrain_path + "/{}_{}.tif".format(file_name, 2),
                    aug_image_2)
        aug_mask_2.save(augmask_path + "/{}_{}.png".format(file_name, 2))

        # 水平 + 垂直 翻转
        augmented_3 = Transpose(p=1)(image=image, mask=mask)
        aug_image_3 = augmented_3['image']
        aug_mask_3 = Image.fromarray(augmented_3['mask'])
        cv2.imwrite(augtrain_path + "/{}_{}.tif".format(file_name, 3),
                    aug_image_3)
        aug_mask_3.save(augmask_path + "/{}_{}.png".format(file_name, 3))

        if data % 1000 == 0:
            print(data)
示例#22
0
文件: loader.py 项目: chicm/inature
def img_augment(p=.8):
    return Compose(
        [
            HorizontalFlip(.5),
            OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
                RandomContrast(),
                RandomBrightness(),
            ],
                  p=0.3),
            #
            ShiftScaleRotate(
                shift_limit=0.0625, scale_limit=0.1, rotate_limit=20, p=.75),
            Blur(blur_limit=3, p=.33),
            OpticalDistortion(p=.33),
            GridDistortion(p=.33),
            #HueSaturationValue(p=.33)
        ],
        p=p)
示例#23
0
    def __init__(self, train=True):
        transformsList = []
        channel_means = (0.5, 0.5, 0.5)
        channel_stdevs = (0.5, 0.5, 0.5)

        if train:
            transformsList += [Rotate(-10.0, 10, 0)]
            transformsList += [HorizontalFlip(0.5)]
            # transformsList += [RandomCrop(height = 2, width = 2, p=0.5)]
            transformsList += [
                Cutout(num_holes=1, max_h_size=8, max_w_size=8, p=0.5)
            ]

        transformsList += [
            Normalize(mean=channel_means,
                      std=channel_stdevs,
                      always_apply=True),
            ToTensor()
        ]

        self.transform = Compose(transformsList)
示例#24
0
def downloading_data(data_set):

  
  train_transform = transforms.Compose(
    [transforms.ToTensor(),
     HorizontalFlip(),
     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

  test_transform = transforms.Compose(
    [transforms.ToTensor(),
    #  transforms.RandomRotation((-11.0, 11.0)),
     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

  trainset = data_set(root='./data', train=True,
                                        download=True, transform=train_transform)
  testset = data_set(root='./data', train=False,
                                       download=True, transform=test_transform)

  print('No.of images in train set are',len(trainset))
  print('No.of images in test set are',len(testset))
  return trainset,testset
示例#25
0
def get_train_transforms():
    return Compose(
        [
            RandomResizedCrop(int(H * 0.75), int(W * 0.75)),
            #Resize(int(H*0.75), int(W*0.75)),
            #Transpose(p=0.5),
            HorizontalFlip(p=0.5),
            #VerticalFlip(p=0.2),
            #ShiftScaleRotate(p=0.5),
            #RandomRotate90(p=0.5),
            #HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
            #RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
            Normalize(mean=[0.485, 0.456, 0.406],
                      std=[0.229, 0.224, 0.225],
                      max_pixel_value=255.0,
                      p=1.0),
            CoarseDropout(p=0.5),
            Cutout(p=0.5),
            ToTensorV2(p=1.0),
        ],
        p=1.)
def train_transformations(prob=1.0):
    return Compose([
        PadIfNeeded(min_height=384,
                    min_width=1280,
                    border_mode=cv2.BORDER_CONSTANT,
                    value=(0, 0, 0),
                    always_apply=True),
        OneOf([HorizontalFlip(p=0.5),
               Rotate(limit=20, p=0.3)], p=0.5),
        OneOf([
            ToGray(p=0.3),
            RandomBrightnessContrast(p=0.5),
            CLAHE(p=0.5),
            IAASharpen(p=0.45)
        ],
              p=0.5),
        RandomShadow(p=0.4),
        HueSaturationValue(p=0.3),
        Normalize(always_apply=True)
    ],
                   p=prob)
示例#27
0
def get_train_dataloader(file_list, opt):
    data_transform = Compose([
        PadIfNeeded(min_height=opt.train_crop_height,
                    min_width=opt.train_crop_width,
                    p=1),
        RandomCrop(
            height=opt.train_crop_height, width=opt.train_crop_width, p=1),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        Normalize(p=1)
    ],
                             p=1)
    train_dataset = RoboticsDataset(file_names=file_list,
                                    transform=data_transform,
                                    problem_type=opt.problem_type)
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.workers,
                                                   pin_memory=True)
    return train_dataloader
def get_augmentations(cfg):

    processes = []

    if cfg.augmentation['random_scale']['is_applied']:
        processes.append(
            RandomScale(**cfg.augmentation['random_scale']['params']))

    if cfg.augmentation['random_crop']['is_applied']:
        processes.append(
            RandomCrop(**cfg.augmentation['random_crop']['params']))

    if cfg.augmentation['LRflip']['is_applied']:
        processes.append(
            HorizontalFlip(**cfg.augmentation['LRflip']['params']))

    if cfg.augmentation['brightness_shift']['is_applied']:
        processes.append(
            RandomBrightness(**cfg.augmentation['brightness_shift']['params']))

    return Compose(processes)
示例#29
0
def train_transform_regr(sz, downscale=1, p=1):
    augmentation = Compose([
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        RandomRotate90(p=0.5),
        RandomGamma(p=0.9, gamma_limit=(80, 150)),
        HueSaturationValue(
            p=0.9, hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10),
    ],
                           p=p)

    def transform_fun(img_path, n=None):
        img = read_img(img_path, sz, downscale=downscale)
        data = {"image": img}
        augmented = augmentation(**data)
        img = augmented["image"]

        img = norm_fun(img)
        return img

    return transform_fun
def even_more_transform(height, width, mappings, p=2 / 3):
    scale = random.randint(2, 4)
    return Compose([
        OneOf([
            JpegCompression(quality_lower=20, quality_upper=70, p=0.5),
            Downscale(scale_min=0.25, scale_max=0.50, interpolation=1, p=0.5),
            Resize(height // scale, width // scale, interpolation=1, p=1.0)
        ],
              p=0.6),
        HorizontalFlip(p=0.5),
        A.augmentations.transforms.GaussNoise(p=0.2),
        A.RandomBrightnessContrast(p=0.2),
        A.RandomGamma(p=0.2),
        A.CLAHE(p=0.2),
        A.ChannelShuffle(p=0.2),
        A.MultiplicativeNoise(multiplier=[0.5, 1.5], elementwise=True, p=0.1),
        A.HueSaturationValue(
            hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.2),
    ],
                   p=0.9,
                   additional_targets=mappings)