Beispiel #1
0
def img_augment(p=1.):
    return Compose([
        RandomSizedCrop((280, 345), 350, 525, p=0.9, w2h_ratio=1.5),
        HorizontalFlip(.5),
        VerticalFlip(.5),
        OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
                RandomContrast(),
                RandomBrightness(),
            ], p=0.3),
        #
        ShiftScaleRotate(shift_limit=0.15, scale_limit=0.15, rotate_limit=20, p=.75, border_mode=cv2.BORDER_REFLECT),
        Blur(blur_limit=3, p=.33),
        GaussNoise(p=0.8),
        OpticalDistortion(p=.33),
        GridDistortion(p=.33),
        HueSaturationValue(p=.33)
    ], p=p)
Beispiel #2
0
def train_transform(p=1.0):
    augmentation = Compose([
        FlipChannels(),
        VerticalFlip(p=p),
        HorizontalFlip(p=p),
        RandomRotate90(p=p),
        RandomGamma(p=p, gamma_limit=(90, 350)),
        OpticalDistortion(p=p, border_mode=cv2.BORDER_CONSTANT),
        GridDistortion(p=p, border_mode=cv2.BORDER_CONSTANT),
        ShiftScaleRotate(p=p, scale_limit=0.2, border_mode=cv2.BORDER_CONSTANT)
    ],
                           p=p)

    # @contrast_norm
    def transform_fun(img):
        data = {'image': img}
        augmented = augmentation(**data)
        return augmented['image']

    return transform_fun
Beispiel #3
0
def create_train_transforms(conf):
    height = conf['crop_height']
    width = conf['crop_width']
    return Compose([
        RandomSizedCropAroundBbox(min_max_height=(int(
            height * 0.8), int(height * 1.2)),
                                  w2h_ratio=1.,
                                  height=height,
                                  width=width,
                                  p=1),
        HorizontalFlip(),
        VerticalFlip(),
        RandomRotate90(),
        Transpose(),
        Lighting(alphastd=0.3),
        RandomBrightnessContrast(p=0.2),
        RandomGamma(p=0.2),
        RGBShift(p=0.2)
    ],
                   additional_targets={'image1': 'image'})
Beispiel #4
0
def aug_baseline_CLAHE_Sharpen(image, mask):
    aug = Compose([
        HorizontalFlip(),
        VerticalFlip(),
        OneOf([RandomContrast(),
               RandomGamma(),
               RandomBrightness()], p=0.3),
        OneOf([
            ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(),
            OpticalDistortion(distort_limit=2, shift_limit=0.5)
        ],
              p=0.3),
        ShiftScaleRotate(),
        CLAHE(),
        IAASharpen()
    ])
    aug_img = aug(image=image, mask=mask)
    return aug_img['image'], aug_img['mask']
Beispiel #5
0
def strong_aug(p=.5):
    return Compose([
        RandomRotate90(p=0.2),
        Transpose(p=0.2),
        HorizontalFlip(p=0.5),
        VerticalFlip(p=0.5),
        OneOf([
            GaussNoise(),
        ], p=0.2),
        ShiftScaleRotate(p=0.2),
        OneOf([
            CLAHE(clip_limit=2),
            Sharpen(),
            Emboss(),
            RandomBrightnessContrast(),
        ],
              p=0.2),
        HueSaturationValue(p=0.2),
    ],
                   p=p)
def generate_transforms(image_size):

    train_transform = Compose([
        Resize(height=image_size[0], width=image_size[1]),
        OneOf(
            [RandomBrightness(limit=0.1, p=1),
             RandomContrast(limit=0.1, p=1)]),
        OneOf([
            MotionBlur(blur_limit=3),
            MedianBlur(blur_limit=3),
            GaussianBlur(blur_limit=3)
        ],
              p=0.5),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        ShiftScaleRotate(
            shift_limit=0.2,
            scale_limit=0.2,
            rotate_limit=20,
            interpolation=cv2.INTER_LINEAR,
            border_mode=cv2.BORDER_REFLECT_101,
            p=1,
        ),
        Normalize(mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225),
                  max_pixel_value=255.0,
                  p=1.0),
    ])

    val_transform = Compose([
        Resize(height=image_size[0], width=image_size[1]),
        Normalize(mean=(0.485, 0.456, 0.406),
                  std=(0.229, 0.224, 0.225),
                  max_pixel_value=255.0,
                  p=1.0),
    ])

    return {
        "train_transforms": train_transform,
        "val_transforms": val_transform
    }
Beispiel #7
0
def main():
    # 输入
    train_path = base_train_path + 'image/*.tif'
    mask_path = base_train_path + 'label/*.png'

    # 增强结果输出目录
    augtrain_path = base_train_path + 'image_aug/'
    augmask_path = base_train_path + 'label_aug/'

    train_img, masks = data_num(train_path, mask_path)
    for data in range(len(train_img)):
        file_name = train_img[data].split('\\')[1].split('.')[0]
        image = cv2.imread(train_img[data])
        mask = np.array(Image.open(masks[data]))

        # 水平翻转
        augmented_1 = HorizontalFlip(p=1)(image=image, mask=mask)
        aug_image_1 = augmented_1['image']
        aug_mask_1 = Image.fromarray(augmented_1['mask'])
        cv2.imwrite(augtrain_path + "/{}_{}.tif".format(file_name, 1),
                    aug_image_1)
        aug_mask_1.save(augmask_path + "/{}_{}.png".format(file_name, 1))

        # 垂直翻转
        augmented_2 = VerticalFlip(p=1)(image=image, mask=mask)
        aug_image_2 = augmented_2['image']
        aug_mask_2 = Image.fromarray(augmented_2['mask'])
        cv2.imwrite(augtrain_path + "/{}_{}.tif".format(file_name, 2),
                    aug_image_2)
        aug_mask_2.save(augmask_path + "/{}_{}.png".format(file_name, 2))

        # 水平 + 垂直 翻转
        augmented_3 = Transpose(p=1)(image=image, mask=mask)
        aug_image_3 = augmented_3['image']
        aug_mask_3 = Image.fromarray(augmented_3['mask'])
        cv2.imwrite(augtrain_path + "/{}_{}.tif".format(file_name, 3),
                    aug_image_3)
        aug_mask_3.save(augmask_path + "/{}_{}.png".format(file_name, 3))

        if data % 1000 == 0:
            print(data)
def produce_dataset(data_dirs, save_dir, times=1):
    image_dir = data_dirs + '\image_crop'
    label_dir = data_dirs + '\label_crop'

    aug = Compose([
        PadIfNeeded(min_height=img_width, min_width=img_height, p=1),
        VerticalFlip(p=0.5),
        RandomRotate90(p=0.5),
        OneOf([
            ElasticTransform(
                p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(p=0.5),
            OpticalDistortion(p=0.5, distort_limit=1, shift_limit=0.5)
        ],
              p=0.8)
    ])
    i = 0
    for epoch in range(times):
        for file in os.listdir(image_dir):
            ori_image = np.load(os.path.join(image_dir, file))
            ori_label = np.load(os.path.join(label_dir, file))

            assert ori_image.dtype == 'uint8'
            assert ori_label.dtype == 'int'

            augmented = aug(image=ori_image, mask=ori_label)

            image = augmented['image']
            label = augmented['mask']

            if label.max() == 0:
                a = np.random.rand()
                if a < 0.5:
                    continue

            np.save(os.path.join(save_dir + '\image', str(i) + '.npy'), image)
            np.save(os.path.join(save_dir + '\label', str(i) + '.npy'), label)

            i += 1
        print('epoch = ' + str(epoch))
    print(' Load over!\n i = ' + str(i))
def train(cls_model='b2', shape=(320,320)):


    kfold = StratifiedKFold(n_splits=4, random_state=133, shuffle=True)
    train_df, img_2_vector = preprocess()

    albumentations_train = Compose([
        VerticalFlip(), HorizontalFlip(), Rotate(limit=20), GridDistortion()
    ], p=1)

    for n_fold, (train_indices, val_indices) in enumerate(kfold.split(train_df['Image'].values, train_df['Class'].map(lambda x: str(sorted(list(x)))))):
        train_imgs = train_df['Image'].values[train_indices]
        val_imgs = train_df['Image'].values[val_indices]
        data_generator_train = DataGenenerator(train_imgs, augmentation=albumentations_train,
                                               resized_height=shape[0], resized_width=shape[1],
                                               img_2_ohe_vector=img_2_vector)

        data_generator_train_eval = DataGenenerator(train_imgs, shuffle=False,
                                                    resized_height=shape[0], resized_width=shape[1],
                                                    img_2_ohe_vector=img_2_vector)

        data_generator_val = DataGenenerator(val_imgs, shuffle=False,
                                             resized_height=shape[0], resized_width=shape[1],
                                             img_2_ohe_vector=img_2_vector)

        model = get_model(cls_model, shape=shape)

        model.compile(optimizer=RAdam(), loss='binary_crossentropy',
                      metrics=['accuracy'])

        train_metric_callback = PrAucCallback(data_generator_train_eval)
        checkpoint_name = cls_model + '_' + str(n_fold)
        val_callback = PrAucCallback(data_generator_val, stage='val', checkpoint_name=checkpoint_name)

        history_0 = model.fit_generator(generator=data_generator_train,
                                        validation_data=data_generator_val,
                                        epochs=20,
                                        callbacks=[train_metric_callback, val_callback],
                                        workers=42,
                                        verbose=1
                                        )
Beispiel #10
0
def crazy_custom_aug(targets):
    cage_aug = RandomCageOverlay(default_cage_maker, p=0.15)
    info_aug = RandomInfoOverlay(image_dict, max_overlay_num=10, p=0.5)
    aug = targets_aug(
        [
            cage_aug,
            info_aug,
            ShiftScaleRotate(),
            RandomCrop(256, 256),
            ElasticTransform(p=0.3),
            MedianBlur(p=0.3),
            RandomBrightness(p=0.3),
            HueSaturationValue(p=0.5),
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.5),
            RandomGamma(p=1),
        ],
        targets,
    )
    return aug
Beispiel #11
0
def get_transforms(phase, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
                Compose([
                    HorizontalFlip(p=0.5), # only horizontal flip as of now
                    VerticalFlip(p=0.5)], p=1.0),
                ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=20, p=0.5),
                # GaussNoise()
                # 
            ]
        )
    list_transforms.extend(
        [
            Normalize(mean=mean, std=std, p=1),
            ToTensor(),
        ]
    )
    list_trfms = Compose(list_transforms)
    return list_trfms
Beispiel #12
0
def aug_baseline_randCrop(image, mask):
    aug = Compose([
        HorizontalFlip(),
        VerticalFlip(),
        OneOf([RandomContrast(),
               RandomGamma(),
               RandomBrightness()], p=0.3),
        OneOf([
            ElasticTransform(
                alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            GridDistortion(),
            OpticalDistortion(distort_limit=2, shift_limit=0.5)
        ],
              p=0.3),
        RandomResizedCrop(width=cfg.TRAIN_CROP_SIZE[0],
                          height=cfg.TRAIN_CROP_SIZE[1],
                          scale=[0.5, 1.0]),
        RandomRotate90()
    ])
    aug_img = aug(image=image, mask=mask)
    return aug_img['image'], aug_img['mask']
Beispiel #13
0
def get_train_transforms(config):
    return Compose([
        RandomResizedCrop(config.image_size, config.image_size),
        HorizontalFlip(p=0.4),
        VerticalFlip(p=0.4),
        Transpose(p=0.3),
        ShiftScaleRotate(p=0.3),
        HueSaturationValue(hue_shift_limit=0.2,
                           sat_shift_limit=0.2,
                           val_shift_limit=0.2,
                           p=0.3),
        RandomBrightnessContrast(
            brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.3),
        Normalize(mean=[0.485, 0.456, 0.406],
                  std=[0.229, 0.224, 0.225],
                  max_pixel_value=255.0,
                  p=1.0),
        Cutout(p=0.3),
        ToTensorV2(p=1.0),
    ],
                   p=1.)
Beispiel #14
0
def get_train_dataloader(file_list, opt):
    data_transform = Compose([
        PadIfNeeded(min_height=opt.train_crop_height,
                    min_width=opt.train_crop_width,
                    p=1),
        RandomCrop(
            height=opt.train_crop_height, width=opt.train_crop_width, p=1),
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        Normalize(p=1)
    ],
                             p=1)
    train_dataset = RoboticsDataset(file_names=file_list,
                                    transform=data_transform,
                                    problem_type=opt.problem_type)
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.workers,
                                                   pin_memory=True)
    return train_dataloader
def train_transform_regr(sz, downscale=1, p=1):
    augmentation = Compose([
        VerticalFlip(p=0.5),
        HorizontalFlip(p=0.5),
        RandomRotate90(p=0.5),
        RandomGamma(p=0.9, gamma_limit=(80, 150)),
        HueSaturationValue(
            p=0.9, hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10),
    ],
                           p=p)

    def transform_fun(img_path, n=None):
        img = read_img(img_path, sz, downscale=downscale)
        data = {"image": img}
        augmented = augmentation(**data)
        img = augmented["image"]

        img = norm_fun(img)
        return img

    return transform_fun
Beispiel #16
0
def generate_transforms_resnext(img_size):
    train_transform = Compose([
        RandomResizedCrop(img_size, img_size),
        Transpose(p=0.5),
        HorizontalFlip(p=0.5),
        VerticalFlip(p=0.5),
        ShiftScaleRotate(p=0.5),
        Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        ),
        ToTensorV2(),
    ])

    val_transform = Compose([
        Resize(height=img_size, width=img_size),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensorV2(),
    ])

    return {"train": train_transform, "val": val_transform}
def argument(x_train, msk_train, y_train):
    aug_img = []
    aug_msk = []
    aug_y = []
    augments = [
        (1, HorizontalFlip(p=1)),
        (0.25, VerticalFlip(p=1)),
        (0.25, RandomRotate90(p=1)),
        (0.25, Transpose(p=1)),
        (0.25, ElasticTransform(p=1, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03)),
        (0.25, GridDistortion(p=1)),
        (0.25, OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)),
#         (0.5, RandomSizedCrop(p=1, min_max_height=(int(img_size_ori / 2), img_size_ori), height=img_size_ori, width=img_size_ori)),
    ]

    for ratio, aug in tqdm(augments):
        selidx = np.random.choice(x_train.shape[0], int(x_train.shape[0] * ratio), replace=False)
        for idx in tqdm(selidx):
            augmented = aug(image=x_train[idx], mask=msk_train[idx])
            aimg = augmented['image']
            amsk = augmented['mask']
            if len(aimg.shape) < 3:
                aimg = aimg[...,np.newaxis]
            if len(amsk.shape) < 3:
                amsk = amsk[...,np.newaxis]
            aug_img.append(aimg)
            aug_msk.append(amsk)
            aug_y.append(y_train[idx])

    aug_img = np.asarray(aug_img)
    aug_msk = np.asarray(aug_msk)
    aug_y = np.asarray(aug_y)
    x_train = np.append(x_train, aug_img, axis=0)
    msk_train = np.append(msk_train, aug_msk, axis=0)
    y_train = np.append(y_train, aug_y, axis=0)
    print(x_train.shape)
    print(msk_train.shape)
    print(y_train.shape)
    
    return x_train, msk_train, y_train
Beispiel #18
0
def aug_train_heavy(resolution, p=1.0):
    return Compose([
        Resize(resolution, resolution),
        OneOf([
            RandomRotate90(),
            Flip(),
            Transpose(),
            HorizontalFlip(),
            VerticalFlip()
        ],
              p=1.0),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.5),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ],
              p=0.2),
        ShiftScaleRotate(
            shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=.1),
            IAAPiecewiseAffine(p=0.3),
        ],
              p=0.1),
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(),
            IAAEmboss(),
            RandomBrightnessContrast(),
        ],
              p=0.3),
        HueSaturationValue(p=0.3),
        Normalize()
    ],
                   p=p)
Beispiel #19
0
def load_batch(chip_coords, all_density, all_gsd, image, gsd_train_mean,
               gsd_train_std, args):
    images = np.zeros((len(chip_coords), args.model_image_size[0],
                       args.model_image_size[1], 3))
    densities = np.zeros((len(chip_coords), 1))
    gsds = np.zeros((len(chip_coords), 2))

    for i in range(len(chip_coords)):
        x1, y1, x2, y2 = chip_coords[i]
        density = all_density[i]
        gsd = all_gsd[i]
        sub_image = image[y1:y2, x1:x2, :3]
        sub_image = cv2.resize(sub_image, args.model_image_size)

        aug = Compose([
            VerticalFlip(p=0.5),
            RandomRotate90(p=0.5),
            HorizontalFlip(p=0.5),
            Transpose(p=0.5),
            CLAHE(p=0.2),
            RandomBrightness(limit=0.2, p=0.2),
            RandomGamma(p=0.2),
            HueSaturationValue(hue_shift_limit=20,
                               sat_shift_limit=10,
                               val_shift_limit=10,
                               p=0.2),
            RandomContrast(limit=0.4, p=0.3)
        ])

        sub_image = aug(image=sub_image)['image']

        images[i, :, :, :] = sub_image

        densities[i, 0] = density
        gsds[i, 0] = (abs(gsd[0]) - gsd_train_mean[0]) / gsd_train_std[0]
        gsds[i, 1] = (abs(gsd[1]) - gsd_train_mean[1]) / gsd_train_std[1]

    images = imagenet_utils.preprocess_input(images) / 255.0

    return images, densities, gsds
Beispiel #20
0
def get_transform(model_name):
    if 'TAPNet' in model_name:
        # transform for sequences of images is very tricky
        # TODO: more transforms should be adopted for better results
        train_transform_ops = [
            PadIfNeeded(min_height=args.input_height,
                        min_width=args.input_width,
                        p=1),
            Normalize(p=1),
            # optional transforms
            Resize(height=args.input_height, width=args.input_width, p=1),
            # CenterCrop(height=args.input_height, width=args.input_width, p=1)
        ]
    else:
        train_transform_ops = [
            VerticalFlip(p=0.5),
            HorizontalFlip(p=0.5),
            PadIfNeeded(min_height=args.input_height,
                        min_width=args.input_width,
                        p=1),
            Normalize(p=1),
            # optional transforms
            # Resize(height=args.input_height, width=args.input_width, p=1),
            # CenterCrop(height=args.input_height, width=args.input_width, p=1)
            RandomCrop(height=args.input_height, width=args.input_width, p=1),
        ]

    valid_transform_ops = [
        Normalize(p=1),
        PadIfNeeded(min_height=args.input_height,
                    min_width=args.input_width,
                    p=1),
        # optional transforms
        Resize(height=args.input_height, width=args.input_width, p=1),
        # CenterCrop(height=args.input_height, width=args.input_width, p=1)
    ]
    return Compose(
        train_transform_ops,
        p=1,
    ), Compose(valid_transform_ops, p=1)
Beispiel #21
0
def get_transforms(phase):
    original_height = 1400
    original_width = 2100
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            OneOf([
                RandomSizedCrop(min_max_height=(50, 101),
                                height=original_height,
                                width=original_width,
                                p=0.5),
                PadIfNeeded(min_height=original_height,
                            min_width=original_width,
                            p=0.5)
            ],
                  p=1),
            VerticalFlip(p=0.5),
            # RandomRotate90(p=0.5),
            OneOf([
                ElasticTransform(p=0.5,
                                 alpha=120,
                                 sigma=120 * 0.05,
                                 alpha_affine=120 * 0.03),
                GridDistortion(p=0.5),
                OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
            ],
                  p=0.8),
            CLAHE(p=0.8),
            RandomBrightnessContrast(p=0.8),
            RandomGamma(p=0.8),
        ])
    list_transforms.extend([
        Resize(height=original_height,
               width=original_width,
               interpolation=cv2.INTER_NEAREST),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
 def __call__(self, x):
     return Compose([
         HorizontalFlip(),
         VerticalFlip(),
         Transpose(),
         RandomRotate90(),
         ShiftScaleRotate(
             shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
         OneOf([
             MotionBlur(p=.2),
             MedianBlur(blur_limit=3, p=0.1),
             Blur(blur_limit=3, p=0.1),
         ],
               p=0.2),
         OneOf([
             RandomBrightnessContrast(),
             RandomGamma(),
             CLAHE(),
             HueSaturationValue(p=0.3),
         ])
     ],
                    p=self.p)(image=x)['image']
def get_transforms(*, data):
    assert data in ('train', 'valid')

    if data == 'train':
        return Compose([
            HorizontalFlip(p=0.5),
            VerticalFlip(p=0.5),
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),
        ])

    elif data == 'valid':
        return Compose([
            Normalize(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            ),
            ToTensorV2(),
        ])
Beispiel #24
0
def strong_aug(p=0.75):
    return Compose(
        [
            ShiftScaleRotate(scale_limit=0.1, rotate_limit=90),
            Transpose(),
            #IAAAffine(shear=0.1),
            #IAAPerspective(),
            Cutout(num_holes=20, max_h_size=8, max_w_size=8),
            HorizontalFlip(),
            VerticalFlip(),
            GaussNoise(),
            JpegCompression(),
            #RandomShadow(shadow_roi=(0, 0, 1, 1), p=0.75),
            OneOf([MotionBlur(), GaussianBlur()]),
            OneOf([ToGray(), ToSepia()]),
            RandomBrightnessContrast(brightness_limit=0.75, p=0.75)
        ],
        bbox_params=BboxParams("pascal_voc",
                               label_fields=["category_id"],
                               min_area=0.0,
                               min_visibility=0.5),
        p=p)
    def __init__(self, dataframe, vertical_flip, horizontal_flip,
                 is_train=True):
        self.dataframe, self.is_train = dataframe, is_train
        self.vertical_flip, self.horizontal_flip = vertical_flip, horizontal_flip

        # Data Augmentation (custom for each dataset type)
        if is_train:
            self.transform = Compose([RandomResizedCrop(height=224, width=224, scale=(0.7, 1.0)),
                                      ShiftScaleRotate(rotate_limit=90, scale_limit = [0.7, 1]),
                                      HorizontalFlip(p = self.horizontal_flip),
                                      VerticalFlip(p = self.vertical_flip),
                                      HueSaturationValue(sat_shift_limit=[0.7, 1.3],
                                                         hue_shift_limit=[-0.1, 0.1]),
                                      RandomBrightnessContrast(brightness_limit=[0.01, 0.1],
                                                               contrast_limit= [0.01, 0.1]),
                                      #Normalize(),
                                      ToTensor()])

        else:
            self.transform = Compose([  # Normalize(),
            Resize(height=224, width=224),
            ToTensor()])
Beispiel #26
0
def make_seq_transf(seq_len, NUM_CHAN, image_preproc=None):
    additional_targets = OrderedDict()
    for i in range(seq_len):
        for j in range(NUM_CHAN):
            if i == 0 and j == 0:
                continue
            additional_targets['image' + '_' + str(i) + '_' + str(j)] = 'image'

    aug = Compose([
        VerticalFlip(p=0.4),
        RandomRotate90(p=0.4),
        Transpose(p=0.4),
        RandomBrightnessContrast(
            p=0.5,
            brightness_limit=0.3,
            contrast_limit=0.3,
        ),
    ],
                  additional_targets=additional_targets)

    return lambda imgs: seq_transformer(aug, imgs, additional_targets, seq_len,
                                        image_preproc)
    def __call__(self, original_image):
        self.augmentation_pipeline = Compose(
            [
                Resize(650, 650, always_apply=True),
                HorizontalFlip(p=0.5),
                VerticalFlip(p=0.5),
                ShiftScaleRotate(rotate_limit=25.0, p=0.7),
                Resize(self.height, self.width, always_apply=True),
                Normalize(
                    mean=[0.485, 0.456, 0.406],
                    std=[0.229, 0.224, 0.225],
                    always_apply=True
                ),
                ToTensor()
            ]
        )

        augmented = self.augmentation_pipeline(
            image=original_image
        )
        image = augmented["image"]
        return image
    def augmentation(image, mask, noise=False, transform=False, clahe=True, r_bright=True, r_gamma=True):
        aug_list = [
            VerticalFlip(p=0.5),
            HorizontalFlip(p=0.5),
            RandomRotate90(p=0.5),
                    ]
        if r_bright:
            aug_list += [RandomBrightnessContrast(p=.5)]
        if r_gamma:
            aug_list += [RandomGamma(p=.5)]
        if clahe:
            aug_list += [CLAHE(p=1., always_apply=True)]
        if noise:
            aug_list += [GaussNoise(p=.5, var_limit=1.)]
        if transform:
            aug_list += [ElasticTransform(p=.5, sigma=1., alpha_affine=20, border_mode=0)]
        aug = Compose(aug_list)

        augmented = aug(image=image, mask=mask)
        image_heavy = augmented['image']
        mask_heavy = augmented['mask']
        return image_heavy, mask_heavy
Beispiel #29
0
def image_loader(path, batch_size, augmentation=None,
                 samples_per_class=64, val_samples_per_class=64, random_state=24):
    if augmentation is None:
        transform = Compose([
            Normalize(mean=config.img_means, std=config.img_stds),
            ToTensor()
        ])
    else:
        transform = Compose([
            HorizontalFlip(p=AUG[augmentation]['p_fliph']),
            VerticalFlip(p=AUG[augmentation]['p_flipv']),
            OneOf([RandomSizedCrop((64, 90), 96, 96, p=1.0,
                                   interpolation=cv2.INTER_LANCZOS4)], p=AUG[augmentation]['p_crop']),
            ShiftScaleRotate(p=AUG[augmentation]['p_ssr'],
                             interpolation=cv2.INTER_LANCZOS4),
            OneOf([RGBShift(p=1.0),
                   RandomBrightness(p=1.0, limit=0.35),
                   Blur(p=1.0, blur_limit=4),
                   OpticalDistortion(p=1.0),
                   GridDistortion(p=1.0)], p=AUG[augmentation]['p_aug']),
            Cutout(num_holes=2, max_h_size=24, max_w_size=24,
                   p=AUG[augmentation]['p_cut']),
            Normalize(mean=config.img_means, std=config.img_stds),
            ToTensor()
        ])
    transform_val = Compose([
        Normalize(mean=config.img_means, std=config.img_stds),
        ToTensor()
    ])
    sup_train_data = AlbumentationsDataset(
        f'{path}/supervised/train', transform=transform, samples_per_class=samples_per_class, random_state=random_state)
    sup_val_data = AlbumentationsDataset(
        f'{path}/supervised/val', transform=transform_val, samples_per_class=val_samples_per_class, random_state=random_state)
    data_loader_sup_train = torch.utils.data.DataLoader(
        sup_train_data, batch_size=batch_size, shuffle=True, num_workers=0)
    data_loader_sup_val = torch.utils.data.DataLoader(
        sup_val_data, batch_size=batch_size, shuffle=False, num_workers=0)
    return data_loader_sup_train, data_loader_sup_val
Beispiel #30
0
def data_augmentation(original_image,
                      original_mask,
                      crop=False,
                      height=None,
                      width=None):

    augmentations = Compose([
        HorizontalFlip(p=0.4),
        VerticalFlip(p=0.4),
        ShiftScaleRotate(shift_limit=0.07, rotate_limit=0, p=0.4),
        CLAHE(p=0.3),
        RandomGamma(gamma_limit=(80, 120), p=0.1),
        RandomBrightnessContrast(p=0.1),
        OneOf([
            MotionBlur(p=0.1),
            MedianBlur(blur_limit=3, p=0.1),
            Blur(blur_limit=3, p=0.1),
        ],
              p=0.3),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.2)
    ])

    if crop:

        assert height and width
        crop_aug = RandomCrop(height=height, width=width, always_apply=True)
        crop_sample = crop_aug(image=original_image, mask=original_mask)
        original_image = crop_sample['image']
        original_mask = crop_sample['mask']

    augmented = augmentations(image=original_image, mask=original_mask)
    image_aug = augmented['image']
    mask_aug = augmented['mask']

    return image_aug, mask_aug