Exemple #1
0
    def __init__(self, dataset_path, image_size, transform=None):

        """
        BreastPathQ dataset: supervised fine-tuning on downstream task
        """

        self.image_size = image_size
        self.transform = transform

        # Resize images
        self.transform1 = Compose([Resize(image_size, image_size, interpolation=2)])  # 256

        # Data augmentations
        self.transform4 = Compose([Rotate(limit=(-90, 90), interpolation=2), CenterCrop(image_size, image_size)])
        self.transform5 = Compose(
            [Rotate(limit=(-90, 90), interpolation=2), RandomScale(scale_limit=(0.8, 1.2), interpolation=2),
             Resize(image_size + 20, image_size + 20, interpolation=2),
             RandomCrop(image_size, image_size)])

        self.datalist = []
        data_paths = glob.glob(dataset_path + "*.h5")
        with tqdm(enumerate(sorted(data_paths)), disable=True) as t:
            for wj, data_path in t:
                data = h5py.File(data_path)
                data_patches = data['x'][:]
                cls_id = data['y'][:]
                for idx in range(len(data_patches)):
                    self.datalist.append((data_patches[idx], cls_id[idx]))
Exemple #2
0
def get_train_transform(smallest_max_size: int, size: int):
    return Compose([
        SmallestMaxSize(smallest_max_size),
        RandomScale(scale_limit=0.125),
        # PadIfNeeded(256, 256, border_mode=cv2.BORDER_CONSTANT., value=0, p=1.),
        # ShiftScaleRotate(
        #     shift_limit=0.0625, scale_limit=0.1, rotate_limit=30,
        #     border_mode=cv2.BORDER_REFLECT_101, p=1.),
        Rotate(limit=20, border_mode=cv2.BORDER_REFLECT_101, p=1.),
        OneOf([
            RandomCrop(size, size, p=0.9),
            CenterCrop(size, size, p=0.1),
        ],
              p=1.),
        HorizontalFlip(p=0.5),
        RandomContrast(limit=0.2, p=0.5),
        RandomGamma(gamma_limit=(80, 120), p=0.5),
        RandomBrightness(limit=0.2, p=0.5),
        # HueSaturationValue(hue_shift_limit=5, sat_shift_limit=20,
        #                    val_shift_limit=10, p=1.),
        # OneOf([
        #     OpticalDistortion(p=0.3),
        #     GridDistortion(p=0.1),
        #     IAAPiecewiseAffine(p=0.3),
        # ], p=0.2),
        # OneOf([
        #     IAAAdditiveGaussianNoise(
        #         loc=0, scale=(1., 6.75), per_channel=False, p=0.3),
        #     GaussNoise(var_limit=(5.0, 20.0), p=0.6),
        # ], p=0.5),
        # Cutout(num_holes=4, max_h_size=30, max_w_size=50, p=0.75),
        # JpegCompression(quality_lower=50, quality_upper=100, p=0.5)
    ])
Exemple #3
0
 def __build_augmentator(self):
     return Compose(
         [
             ShiftScaleRotate(
                 shift_limit=0.0625, scale_limit=0.0, rotate_limit=0,
                 p=0.3),
             OneOf([
                 RandomScale(scale_limit=0.05, interpolation=1, p=0.5),
                 Rotate(limit=7,
                        interpolation=1,
                        border_mode=cv2.BORDER_CONSTANT,
                        value=0,
                        p=0.5)
             ],
                   p=0.5),
             PadIfNeeded(always_apply=True,
                         min_width=self.width,
                         min_height=self.height),
             RandomCrop(width=self.width, height=self.height),
             OneOf(
                 [
                     VerticalFlip(),
                     # HorizontalFlip(p=0.2),
                 ],
                 p=0.5),
             # OneOf([
             #     RandomBrightness(limit=0.2, always_apply=False, p=0.5),
             #     RandomContrast(),
             #     RandomGamma()
             # ], p=0.7),
         ],
         p=self.p)
Exemple #4
0
def build_transforms(cfg, mode='train', norm_image=True):
    assert mode in ['train', 'test', 'val']
    min_size = cfg.SCALES[0]
    max_size = cfg.SCALES[1]
    assert min_size <= max_size

    if mode == 'train':
        flip_prob = cfg.TRAIN.FLIP_PROB
    elif mode == 'test':
        flip_prob = cfg.TEST.FLIP_PROB
    else:
        flip_prob = cfg.VAL.FLIP_PROB

    to_bgr255 = True

    normalize_transform = T.Normalize(
        mean=cfg.NETWORK.PIXEL_MEANS, std=cfg.NETWORK.PIXEL_STDS, to_bgr255=to_bgr255
    )

    # transform = T.Compose(
    #     [
    #         T.Resize(min_size, max_size),
    #         T.RandomHorizontalFlip(flip_prob),
    #         T.ToTensor(),
    #         normalize_transform,
    #         T.FixPadding(min_size, max_size, pad=0)
    #     ]
    # )
    bbox_params = BboxParams(
        format='pascal_voc',
        min_area=0, 
        min_visibility=0.2,
        label_fields=['fake_label'])
    album_augs = [
        HorizontalFlip(p=0.5),
        # RandomBrightness(limit=0.2, p=0.5),
        # RandomContrast(limit=0.2, p=0.5),
        RandomScale(scale_limit=(-0.3, 0.0), p=0.3),
        # MedianBlur(blur_limit=5, p=0.3),
        # Rotate(limit=30, p=0.25),
    ]
    album_augs = Compose(album_augs, bbox_params=bbox_params)

    if mode == 'train':
        all_augs = [
            T.Resize(min_size, max_size),
            T.ToTensor(),
            album_augs,
        ]
    else:
        all_augs = [
            T.Resize(min_size, max_size),
            T.ToTensor(),
        ]
    if norm_image:
        all_augs.append(normalize_transform)
    transform = T.Compose(all_augs)
    return transform
Exemple #5
0
def get_augmentation_fcn(mode, interpolation_method=cv.INTER_LINEAR):
    augmentation_dict = {
        # 'all': Compose([RandomRotate90(p=1.),
        #                 Flip(p=1.),
        #                 Rotate(p=1., interpolation=interpolation_method),
        #                 RandomScale(p=1., interpolation=interpolation_method),
        #                 ShiftScaleRotate(p=1., interpolation=interpolation_method)]),
        # 'any': OneOf([RandomRotate90(p=1.),
        #               Flip(p=1.),
        #               Rotate(p=1., interpolation=interpolation_method),
        #               RandomScale(p=1., interpolation=interpolation_method),
        #               ShiftScaleRotate(p=1., interpolation=interpolation_method)]),
        'no_interpolation_necessary':
        OneOf([RandomRotate90(p=1.), Flip(p=1.)]),
        'interpolation_necessary':
        OneOf([
            Rotate(p=1., interpolation=interpolation_method),
            RandomScale(p=1., interpolation=interpolation_method),
            ShiftScaleRotate(p=1., interpolation=interpolation_method)
        ]),
        'affine':
        Compose([
            ShiftScaleRotate(p=1., interpolation=interpolation_method),
            HorizontalFlip(p=0.5)
        ]),
        'rot':
        Rotate(p=1., interpolation=interpolation_method),
        'rot90':
        RandomRotate90(p=1.),
        'flip':
        Flip(p=1.),
        'hflip':
        HorizontalFlip(p=1.),
        'vflip':
        VerticalFlip(p=1.),
        'scale':
        RandomScale(p=1., interpolation=interpolation_method),
        'ssr':
        ShiftScaleRotate(p=1., interpolation=interpolation_method),
        'none':
        None
    }
    return augmentation_dict[mode]
Exemple #6
0
def augment_flips_color(h=None, w=None, interpolation=0):
    t = [
        RandomRotate90(always_apply=True),
        HorizontalFlip(p=0.5),
        VerticalFlip(p=0.5),
        Transpose(p=0.5),
        Rotate(limit=(-45, 45), interpolation=interpolation, border_mode=0, always_apply=True),
        RandomScale(scale_limit=(-0.2, 0.5), interpolation=interpolation, always_apply=True),
        PadIfNeeded(h, w, border_mode=0, always_apply=True),
        Resize(h, w, interpolation=interpolation, always_apply=True),
    ]
    return Compose(t, p=1)
Exemple #7
0
    def __init__(self, data_path, json_path, image_size, transform=None):

            """ Camelyon16 dataset: supervised fine-tuning on downstream task """

            self.transform = transform
            self.data_path = data_path
            self.json_path = json_path
            self._preprocess()

            # Data augmentations
            self.transform1 = Compose([Rotate(limit=(-90, 90), interpolation=2), CenterCrop(image_size, image_size)])
            self.transform2 = Compose([Rotate(limit=(-90, 90), interpolation=2), RandomScale(scale_limit=(0.8, 1.2), interpolation=2),
                                       Resize(image_size + 20, image_size + 20, interpolation=2), RandomCrop(image_size, image_size)])
def pre_transforms(image_size=224, crop_from_gray=False, circle_crop=False, ben_preprocess=10,
                   random_scale=0.3, random_scale_p=0.75, brightness=0.2, contrast=0.2, color_p=0.5):
    transforms = [Resize(image_size, image_size)]
    if crop_from_gray is True:
        transforms = [Crop_From_Gray()] + transforms
    if (brightness > 0) or (contrast > 0):
        transforms.append(RandomBrightnessContrast(brightness_limit=brightness, contrast_limit=contrast, p=color_p))
    if random_scale > 0:
        transforms.append(RandomScale((0.0, random_scale), p=random_scale_p))
        transforms.append(CenterCrop(image_size, image_size))
    if ben_preprocess > 0:
        transforms.append(Ben_preprocess(ben_preprocess))
    if circle_crop is True:
        transforms.append(Circle_Crop())
    return Compose(transforms)
Exemple #9
0
def get_augmentations(img_size):
    return Compose([
        Resize(height=int(img_size * 1.5), width=int(img_size * 1.5), p=1),
        RandomSizedCrop(min_max_height=(int(img_size * 0.9), img_size),
                        height=img_size,
                        width=img_size,
                        always_apply=True,
                        p=1),
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            IAAAdditiveGaussianNoise(),
            GaussNoise(),
        ], p=0.4),
        OneOf([
            GlassBlur(p=1),
            GaussianBlur(p=1),
            MotionBlur(p=1),
            MedianBlur(blur_limit=3, p=1),
            Blur(blur_limit=3, p=1),
        ],
              p=0.4),
        ShiftScaleRotate(shift_limit=0.0625,
                         scale_limit=0.2,
                         rotate_limit=45,
                         p=0.2),
        OneOf([
            OpticalDistortion(p=1),
            ElasticTransform(),
            GridDistortion(p=1),
            IAAPiecewiseAffine(p=1),
        ],
              p=0.4),
        OneOf(
            [
                CLAHE(clip_limit=2),  # Histogram Equalization
                IAASharpen(),
                IAAEmboss(),
                RandomBrightnessContrast(),
                RGBShift()
            ],
            p=0.4),
        HueSaturationValue(p=0.3),
        ToSepia(p=0.2),
        Cutout(p=0.2),
        RandomScale(p=0.2)
    ])
 def init_augmentations(self):
     # TODO: change this
     width, height = 724, 1024
     wanted_size = 256
     aug = Compose([
         Resize(height=height, width=width),
         RandomScale(scale_limit=0.5, always_apply=True),
         RandomCrop(height=wanted_size, width=wanted_size),
         PadIfNeeded(min_height=wanted_size, min_width=wanted_size, p=0.5),
         Rotate(limit=4, p=0.5),
         VerticalFlip(p=0.5),
         GridDistortion(p=0.5),
         CLAHE(p=0.8),
         RandomBrightnessContrast(p=0.8),
         RandomGamma(p=0.8)
     ])
     return aug
Exemple #11
0
    def __getitem__(self, i):
        idx = self.ids[i]
        mask_file = glob(self.masks_dir + idx + '*')
        img_file = glob(self.imgs_dir + idx + '*')

        assert len(mask_file) == 1, \
            f'Either no mask or multiple masks found for the ID {idx}: {mask_file}'
        assert len(img_file) == 1, \
            f'Either no image or multiple images found for the ID {idx}: {img_file}'
        mask = Image.open(mask_file[0])
        img = Image.open(img_file[0])

        # random image augmentation
        aug = Compose([
            OneOf([
                ElasticTransform(p=0.5,
                                 alpha=120,
                                 sigma=120 * 0.05,
                                 alpha_affine=120 * 0.03),
                GridDistortion(p=0.5),
                RandomGamma(gamma_limit=(50, 130), p=0.5),
                CLAHE(clip_limit=2.0, p=0.5),
                RandomBrightnessContrast(brightness_limit=0.4, p=0.5),
                Rotate(limit=20),
                RandomScale(scale_limit=0.2)
            ],
                  p=0.8)
        ])
        augmented = aug(image=img, mask=mask)
        img = augmented['image']
        mask = augmented['mask']

        assert img.size == mask.size, \
            f'Image and mask {idx} should be the same size, but are {img.size} and {mask.size}'

        img = self.preprocess(img)
        # mask = np.array(mask)
        # mask[mask > 1] = 0
        mask = self.mask_preprocess(mask)

        return {'image': torch.from_numpy(img), 'mask': torch.from_numpy(mask)}
def get_augmentations(cfg):

    processes = []

    if cfg.augmentation['random_scale']['is_applied']:
        processes.append(
            RandomScale(**cfg.augmentation['random_scale']['params']))

    if cfg.augmentation['random_crop']['is_applied']:
        processes.append(
            RandomCrop(**cfg.augmentation['random_crop']['params']))

    if cfg.augmentation['LRflip']['is_applied']:
        processes.append(
            HorizontalFlip(**cfg.augmentation['LRflip']['params']))

    if cfg.augmentation['brightness_shift']['is_applied']:
        processes.append(
            RandomBrightness(**cfg.augmentation['brightness_shift']['params']))

    return Compose(processes)
Exemple #13
0
    def __init__(self, dataset_path, image_size):

        """
        Kather dataset: supervised fine-tuning on downstream task
        """

        self.image_size = image_size

        # Resize images
        self.transform1 = Compose([Resize(image_size, image_size, interpolation=2)])

        # Data augmentations
        self.transform4 = Compose([Rotate(limit=(-90, 90), interpolation=2), CenterCrop(image_size, image_size)])
        self.transform5 = Compose([Rotate(limit=(-90, 90), interpolation=2), RandomScale(scale_limit=(0.8, 1.2), interpolation=2),
                                   Resize(image_size + 20, image_size + 20, interpolation=2), RandomCrop(image_size, image_size)])

        self.datalist = []
        cls_paths = glob.glob('{}/*/'.format(dataset_path))
        with tqdm(enumerate(sorted(cls_paths)), disable=True) as t:
            for wj, cls_path in t:
                cls_id = str(os.path.split(os.path.dirname(cls_path))[-1])
                patch_pths = glob.glob('{}/*.tif'.format(cls_path))
                for pth in patch_pths:
                    self.datalist.append((pth, cls_id))
Exemple #14
0
def MedT_preprocess_v2_image(img, train, mean=None, std=None) -> torch.Tensor:
    if std is None:
        std = [0.5, 0.5, 0.5]
    if mean is None:
        mean = [0.5, 0.5, 0.5]

    degrees = int(random.random() * 360)
    n, m = random.randint(1, 4), random.randint(2, 15)
    ShearTranslateAug = ShearTranslate(n, m)

    augmentations = [
        RandomHorizontalFlip(),
        RandomRotation(degrees),
        RandomVerticalFlip(),
        RandomPerspective(),
        RandomBrightness(),
        RandomContrast(),
        RandomScale(),
        GaussianBlur(),
        RandomResizedCrop(), ShearTranslateAug
    ]

    augs_num_to_apply = random.randint(1, len(augmentations))
    augs = random.sample(augmentations, augs_num_to_apply)

    if train == True:
        augment = Compose([Image.fromarray, *augs])
        normilize = Compose([ToTensor(), Normalize(mean=mean, std=std)])
        augmented = augment(img)
        preprocced = normilize(augmented).unsqueeze(0)

        return preprocced, augmented

    preprocessing = Compose([ToTensor(), Normalize(mean=mean, std=std)])

    return preprocessing(img).unsqueeze(0), None
Exemple #15
0
                       p=.9),
    # CLAHE(p=1.0, clip_limit=2.0),
    # ShiftScaleRotate(
    #     shift_limit=0.0625, scale_limit=0.1,
    #     rotate_limit=15, border_mode=cv2.BORDER_REFLECT_101, p=0.8),
    ToFloat(max_value=255)
])

# Reference
# https://github.com/diceroll/kmnist/blob/master/dataloader.py
AUGMENTATIONS_KMNIST = Compose([
    Rotate(p=0.8, limit=5),
    PadIfNeeded(p=0.5, min_height=Height + 2, min_width=Width),
    PadIfNeeded(p=0.5, min_height=Height, min_width=Width + 2),
    Resize(p=1.0, height=Height, width=Width),
    RandomScale(p=1.0, scale_limit=0.1),
    PadIfNeeded(p=1.0, min_height=Height + 4, min_width=Width + 4),
    RandomCrop(p=1.0, height=Height, width=Width),
    Cutout(p=0.5, num_holes=4, max_h_size=4, max_w_size=4),
    ToFloat(max_value=255)
])

AUGMENTATIONS_VALID = Compose([
    # CLAHE(p=1.0, clip_limit=2.0),
    ToFloat(max_value=255)
])


class MobileAppImageSequence(Sequence):
    def __init__(self, image_path, y, batch_size, target_size, augmentations):
        self.image_paths = image_path
Exemple #16
0
def Scale_Resize_Crop(img):
    transform = Compose([Rotate(limit=(-90, 90), interpolation=2), RandomScale(scale_limit=(0.8, 1.2), interpolation=2), Resize(img.shape[1] + 20, img.shape[1] + 20, interpolation=2),
                         RandomCrop(img.shape[1], img.shape[1])])
    Aug_img = transform(image=img)
    return Aug_img
def make_augmentation(data_shape,
                      resize=None,
                      hflip=0,
                      vflip=0,
                      scale=None,
                      rotate=None,
                      color=None,
                      deform=None,
                      rand_crop=None,
                      windows=('soft_tissue', ),
                      windows_force_rgb=True,
                      max_value=1.0):
    transforms = []

    if resize == 'auto':
        resize = data_shape
    if resize:
        transforms.append(Resize(*resize))

    if hflip:
        transforms.append(HorizontalFlip(p=hflip))

    if vflip:
        transforms.append(VerticalFlip(p=vflip))

    if scale:
        if not isinstance(scale, dict):
            scale = {'scale_limit': scale}
        transforms.append(RandomScale(**scale))

    if rotate:
        if not isinstance(rotate, dict):
            rotate = {'limit': rotate}
        transforms.append(Rotate(**rotate))

    if deform:
        oneof = []
        deform_p = deform.get('p', .3)

        elastic = deform.get('elastic', None)
        grid = deform.get('grid', None)
        optical = deform.get('optical', None)

        if elastic:
            oneof.append(ElasticTransform(**elastic))

        if grid:
            oneof.append(GridDistortion(**grid))

        if optical:
            oneof.append(OpticalDistortion(**optical))

        transforms.append(OneOf(oneof, p=deform_p))

    transforms.append(
        PadIfNeeded(min_height=data_shape[1], min_width=data_shape[0]))

    if rand_crop:
        if not isinstance(rand_crop, dict):
            rand_crop = {'p': rand_crop}
        rand_crop.setdefault('p', 1.0)
        r_crop = RandomCrop(height=data_shape[1],
                            width=data_shape[0],
                            **rand_crop)
        transforms.append(r_crop)

        # rand_crop.setdefault('scale', (0, 0))
        # rand_crop.setdefault('ratio', (1.0, 1.0))
        # r_crop = RandomResizedCrop(height=data_shape[1], width=data_shape[0], **rand_crop)
        # transforms.append(r_crop)

    c_crop = CenterCrop(height=data_shape[1], width=data_shape[0])
    transforms.append(
        PadIfNeeded(min_height=data_shape[1], min_width=data_shape[0]))
    transforms.append(c_crop)

    if color:
        oneof = []
        color_p = color.get('p', .3)
        contrast = color.get('contrast', None)
        gamma = color.get('gamma', None)
        brightness = color.get('brightness', None)

        if contrast:
            oneof.append(RandomContrast(**contrast))

        if gamma:
            oneof.append(RandomGamma(**gamma))

        if brightness:
            oneof.append(RandomBrightness(**brightness))

        transforms.append(OneOf(oneof, p=color_p))

    transforms.append(
        ChannelWindowing(
            windows=windows,
            force_rgb=windows_force_rgb,
        ))

    transforms.append(ToFloat(max_value=max_value))

    return Compose(transforms)
def albumentations_transforms(
    crop_size,
    shorter_side,
    low_scale,
    high_scale,
    img_mean,
    img_std,
    img_scale,
    ignore_label,
    num_stages,
    dataset_type,
):
    from albumentations import (
        Normalize,
        HorizontalFlip,
        RandomRotate90,  # my addition
        RandomBrightnessContrast,  # my addition
        CLAHE,  # my addition
        RandomGamma,  # my addition
        ElasticTransform,  # my addition
        GridDistortion,  # my addition
        MotionBlur,  # my addition
        RandomCrop,
        PadIfNeeded,
        RandomScale,
        LongestMaxSize,
        SmallestMaxSize,
        OneOf,
    )
    from albumentations.pytorch import ToTensorV2 as ToTensor
    from densetorch.data import albumentations2densetorch

    if dataset_type == "densetorch":
        wrapper = albumentations2densetorch
    elif dataset_type == "torchvision":
        wrapper = albumentations2torchvision
    else:
        raise ValueError(f"Unknown dataset type: {dataset_type}")

    common_transformations = [
        Normalize(max_pixel_value=1.0 / img_scale, mean=img_mean, std=img_std),
        ToTensor(),
    ]
    train_transforms = []
    for stage in range(num_stages):
        train_transforms.append(
            wrapper([
                ChangeBackground("../backgrounds", p=0.5),  # my addition
                MotionBlur(p=0.5),
                OneOf([
                    RandomScale(scale_limit=(low_scale[stage],
                                             high_scale[stage])),
                    LongestMaxSize(max_size=shorter_side[stage]),
                    SmallestMaxSize(max_size=shorter_side[stage]),
                ]),
                PadIfNeeded(
                    min_height=crop_size[stage],
                    min_width=crop_size[stage],
                    border_mode=cv2.BORDER_CONSTANT,
                    value=np.array(img_mean) / img_scale,
                    mask_value=ignore_label,
                ),
                HorizontalFlip(p=0.5, ),
                RandomRotate90(p=0.5),
                RandomBrightnessContrast(
                    p=.8),  # only applies to images, not masks
                RandomGamma(p=0.8),  # only applies to images
                OneOf(
                    [
                        ElasticTransform(p=0.5,
                                         alpha=120,
                                         sigma=500 * 0.05,
                                         alpha_affine=500 * 0.03),
                        GridDistortion(p=0.5),
                        #         A.OpticalDistortion(distort_limit=1, shift_limit=0.5, p=1),
                    ],
                    p=.5),
                RandomCrop(
                    height=crop_size[stage],
                    width=crop_size[stage],
                ),
            ] + common_transformations))
    val_transforms = wrapper(common_transformations)
    return train_transforms, val_transforms
def transform(image, mask, image_name, mask_name):

    x, y = image, mask

    rand = random.uniform(0, 1)
    if (rand > 0.5):

        images_name = [f"{image_name}"]
        masks_name = [f"{mask_name}"]
        images_aug = [x]
        masks_aug = [y]

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

        return imagedict, masksdict

    mask_density = np.count_nonzero(y)

    ## Augmenting only images with Gloms
    if (mask_density > 0):
        try:
            h, w, c = x.shape
        except Exception as e:
            image = image[:-1]
            x, y = image, mask
            h, w, c = x.shape

        aug = Blur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x0 = augmented['image']
        y0 = augmented['mask']

        #    aug = CenterCrop(p=1, height=32, width=32)
        #    augmented = aug(image=x, mask=y)
        #    x1 = augmented['image']
        #    y1 = augmented['mask']

        ## Horizontal Flip
        aug = HorizontalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x2 = augmented['image']
        y2 = augmented['mask']

        aug = VerticalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x3 = augmented['image']
        y3 = augmented['mask']

        #      aug = Normalize(p=1)
        #      augmented = aug(image=x, mask=y)
        #      x4 = augmented['image']
        #      y4 = augmented['mask']

        aug = Transpose(p=1)
        augmented = aug(image=x, mask=y)
        x5 = augmented['image']
        y5 = augmented['mask']

        aug = RandomGamma(p=1)
        augmented = aug(image=x, mask=y)
        x6 = augmented['image']
        y6 = augmented['mask']

        ## Optical Distortion
        aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
        augmented = aug(image=x, mask=y)
        x7 = augmented['image']
        y7 = augmented['mask']

        ## Grid Distortion
        aug = GridDistortion(p=1)
        augmented = aug(image=x, mask=y)
        x8 = augmented['image']
        y8 = augmented['mask']

        aug = RandomGridShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x9 = augmented['image']
        y9 = augmented['mask']

        aug = HueSaturationValue(p=1)
        augmented = aug(image=x, mask=y)
        x10 = augmented['image']
        y10 = augmented['mask']

        #        aug = PadIfNeeded(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x11 = augmented['image']
        #        y11 = augmented['mask']

        aug = RGBShift(p=1)
        augmented = aug(image=x, mask=y)
        x12 = augmented['image']
        y12 = augmented['mask']

        ## Random Brightness
        aug = RandomBrightness(p=1)
        augmented = aug(image=x, mask=y)
        x13 = augmented['image']
        y13 = augmented['mask']

        ## Random  Contrast
        aug = RandomContrast(p=1)
        augmented = aug(image=x, mask=y)
        x14 = augmented['image']
        y14 = augmented['mask']

        #aug = MotionBlur(p=1)
        #augmented = aug(image=x, mask=y)
        #   x15 = augmented['image']
        #  y15 = augmented['mask']

        aug = MedianBlur(p=1, blur_limit=5)
        augmented = aug(image=x, mask=y)
        x16 = augmented['image']
        y16 = augmented['mask']

        aug = GaussianBlur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x17 = augmented['image']
        y17 = augmented['mask']

        aug = GaussNoise(p=1)
        augmented = aug(image=x, mask=y)
        x18 = augmented['image']
        y18 = augmented['mask']

        aug = GlassBlur(p=1)
        augmented = aug(image=x, mask=y)
        x19 = augmented['image']
        y19 = augmented['mask']

        aug = CLAHE(clip_limit=1.0,
                    tile_grid_size=(8, 8),
                    always_apply=False,
                    p=1)
        augmented = aug(image=x, mask=y)
        x20 = augmented['image']
        y20 = augmented['mask']

        aug = ChannelShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x21 = augmented['image']
        y21 = augmented['mask']

        aug = ToGray(p=1)
        augmented = aug(image=x, mask=y)
        x22 = augmented['image']
        y22 = augmented['mask']

        aug = ToSepia(p=1)
        augmented = aug(image=x, mask=y)
        x23 = augmented['image']
        y23 = augmented['mask']

        aug = JpegCompression(p=1)
        augmented = aug(image=x, mask=y)
        x24 = augmented['image']
        y24 = augmented['mask']

        aug = ImageCompression(p=1)
        augmented = aug(image=x, mask=y)
        x25 = augmented['image']
        y25 = augmented['mask']

        aug = Cutout(p=1)
        augmented = aug(image=x, mask=y)
        x26 = augmented['image']
        y26 = augmented['mask']

        #       aug = CoarseDropout(p=1, max_holes=8, max_height=32, max_width=32)
        #       augmented = aug(image=x, mask=y)
        #       x27 = augmented['image']
        #       y27 = augmented['mask']

        #       aug = ToFloat(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x28 = augmented['image']
        #       y28 = augmented['mask']

        aug = FromFloat(p=1)
        augmented = aug(image=x, mask=y)
        x29 = augmented['image']
        y29 = augmented['mask']

        ## Random Brightness and Contrast
        aug = RandomBrightnessContrast(p=1)
        augmented = aug(image=x, mask=y)
        x30 = augmented['image']
        y30 = augmented['mask']

        aug = RandomSnow(p=1)
        augmented = aug(image=x, mask=y)
        x31 = augmented['image']
        y31 = augmented['mask']

        aug = RandomRain(p=1)
        augmented = aug(image=x, mask=y)
        x32 = augmented['image']
        y32 = augmented['mask']

        aug = RandomFog(p=1)
        augmented = aug(image=x, mask=y)
        x33 = augmented['image']
        y33 = augmented['mask']

        aug = RandomSunFlare(p=1)
        augmented = aug(image=x, mask=y)
        x34 = augmented['image']
        y34 = augmented['mask']

        aug = RandomShadow(p=1)
        augmented = aug(image=x, mask=y)
        x35 = augmented['image']
        y35 = augmented['mask']

        aug = Lambda(p=1)
        augmented = aug(image=x, mask=y)
        x36 = augmented['image']
        y36 = augmented['mask']

        aug = ChannelDropout(p=1)
        augmented = aug(image=x, mask=y)
        x37 = augmented['image']
        y37 = augmented['mask']

        aug = ISONoise(p=1)
        augmented = aug(image=x, mask=y)
        x38 = augmented['image']
        y38 = augmented['mask']

        aug = Solarize(p=1)
        augmented = aug(image=x, mask=y)
        x39 = augmented['image']
        y39 = augmented['mask']

        aug = Equalize(p=1)
        augmented = aug(image=x, mask=y)
        x40 = augmented['image']
        y40 = augmented['mask']

        aug = Posterize(p=1)
        augmented = aug(image=x, mask=y)
        x41 = augmented['image']
        y41 = augmented['mask']

        aug = Downscale(p=1)
        augmented = aug(image=x, mask=y)
        x42 = augmented['image']
        y42 = augmented['mask']

        aug = MultiplicativeNoise(p=1)
        augmented = aug(image=x, mask=y)
        x43 = augmented['image']
        y43 = augmented['mask']

        aug = FancyPCA(p=1)
        augmented = aug(image=x, mask=y)
        x44 = augmented['image']
        y44 = augmented['mask']

        #       aug = MaskDropout(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x45 = augmented['image']
        #       y45 = augmented['mask']

        aug = GridDropout(p=1)
        augmented = aug(image=x, mask=y)
        x46 = augmented['image']
        y46 = augmented['mask']

        aug = ColorJitter(p=1)
        augmented = aug(image=x, mask=y)
        x47 = augmented['image']
        y47 = augmented['mask']

        ## ElasticTransform
        aug = ElasticTransform(p=1,
                               alpha=120,
                               sigma=512 * 0.05,
                               alpha_affine=512 * 0.03)
        augmented = aug(image=x, mask=y)
        x50 = augmented['image']
        y50 = augmented['mask']

        aug = CropNonEmptyMaskIfExists(p=1, height=22, width=32)
        augmented = aug(image=x, mask=y)
        x51 = augmented['image']
        y51 = augmented['mask']

        aug = IAAAffine(p=1)
        augmented = aug(image=x, mask=y)
        x52 = augmented['image']
        y52 = augmented['mask']

        #        aug = IAACropAndPad(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x53 = augmented['image']
        #        y53 = augmented['mask']

        aug = IAAFliplr(p=1)
        augmented = aug(image=x, mask=y)
        x54 = augmented['image']
        y54 = augmented['mask']

        aug = IAAFlipud(p=1)
        augmented = aug(image=x, mask=y)
        x55 = augmented['image']
        y55 = augmented['mask']

        aug = IAAPerspective(p=1)
        augmented = aug(image=x, mask=y)
        x56 = augmented['image']
        y56 = augmented['mask']

        aug = IAAPiecewiseAffine(p=1)
        augmented = aug(image=x, mask=y)
        x57 = augmented['image']
        y57 = augmented['mask']

        aug = LongestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x58 = augmented['image']
        y58 = augmented['mask']

        aug = NoOp(p=1)
        augmented = aug(image=x, mask=y)
        x59 = augmented['image']
        y59 = augmented['mask']

        #       aug = RandomCrop(p=1, height=22, width=22)
        #       augmented = aug(image=x, mask=y)
        #       x61 = augmented['image']
        #       y61 = augmented['mask']

        #      aug = RandomResizedCrop(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x63 = augmented['image']
        #      y63 = augmented['mask']

        aug = RandomScale(p=1)
        augmented = aug(image=x, mask=y)
        x64 = augmented['image']
        y64 = augmented['mask']

        #      aug = RandomSizedCrop(p=1, height=22, width=20, min_max_height = [32,32])
        #      augmented = aug(image=x, mask=y)
        #      x66 = augmented['image']
        #      y66 = augmented['mask']

        #      aug = Resize(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x67 = augmented['image']
        #      y67 = augmented['mask']

        aug = Rotate(p=1)
        augmented = aug(image=x, mask=y)
        x68 = augmented['image']
        y68 = augmented['mask']

        aug = ShiftScaleRotate(p=1)
        augmented = aug(image=x, mask=y)
        x69 = augmented['image']
        y69 = augmented['mask']

        aug = SmallestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x70 = augmented['image']
        y70 = augmented['mask']

        images_aug.extend([
            x, x0, x2, x3, x5, x6, x7, x8, x9, x10, x12, x13, x14, x16, x17,
            x18, x19, x20, x21, x22, x23, x24, x25, x26, x29, x30, x31, x32,
            x33, x34, x35, x36, x37, x38, x39, x40, x41, x42, x43, x44, x46,
            x47, x50, x51, x52, x54, x55, x56, x57, x58, x59, x64, x68, x69,
            x70
        ])

        masks_aug.extend([
            y, y0, y2, y3, y5, y6, y7, y8, y9, y10, y12, y13, y14, y16, y17,
            y18, y19, y20, y21, y22, y23, y24, y25, y26, y29, y30, y31, y32,
            y33, y34, y35, y36, y37, y38, y39, y40, y41, y42, y43, y44, y46,
            y47, y50, y51, y52, y54, y55, y56, y57, y58, y59, y64, y68, y69,
            y70
        ])

        idx = -1
        images_name = []
        masks_name = []
        for i, m in zip(images_aug, masks_aug):
            if idx == -1:
                tmp_image_name = f"{image_name}"
                tmp_mask_name = f"{mask_name}"
            else:
                tmp_image_name = f"{image_name}_{smalllist[idx]}"
                tmp_mask_name = f"{mask_name}_{smalllist[idx]}"
            images_name.extend(tmp_image_name)
            masks_name.extend(tmp_mask_name)
            idx += 1

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

    return imagedict, masksdict
Exemple #20
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--train-image-dirs', required=True, nargs='+')
    parser.add_argument('--train-mask-dirs', required=True, nargs='+')
    parser.add_argument('--train-dataset-types', nargs='+')

    parser.add_argument('--validation-image-dirs', required=True, nargs='+')
    parser.add_argument('--validation-mask-dirs', required=True, nargs='+')
    parser.add_argument('--validation-dataset-types', nargs='+')

    parser.add_argument('--save-dir', default='./runs')
    parser.add_argument('--cpu', action='store_true')
    parser.add_argument('--category-type', default='binary', choices=['binary', 'simple'])
    parser.add_argument('--epochs', type=int, default=10)
    parser.add_argument('--input-size', type=int, nargs=2, default=(640, 640))
    parser.add_argument('--jaccard-weight', type=float, default=0.3)

    available_networks = ['unet11', 'unet16']
    parser.add_argument('--model-name', type=str, default='unet11', choices=available_networks)

    parser.add_argument('--batch-size', type=int, default=128)
    parser.add_argument('--seed', type=int, default=1)
    parser.add_argument('--device-id', type=int, default=0)
    parser.add_argument('--run-name', type=str, required=True)

    args = parser.parse_args()
    print(args)

    device = torch_tools.get_device(args.cpu, args.device_id)
    torch_tools.set_seeds(args.seed, device)

    train_image_dirs = [Path(p) for p in args.train_image_dirs]
    train_mask_dirs = [Path(p) for p in args.train_mask_dirs]
    validation_image_dirs = [Path(p) for p in args.validation_image_dirs]
    validation_mask_dirs = [Path(p) for p in args.validation_mask_dirs]

    for data_dir in\
            train_image_dirs +\
            train_mask_dirs +\
            validation_image_dirs +\
            validation_mask_dirs:
        assert data_dir.exists(), f'{str(data_dir)} does not exist.'

    input_size = args.input_size
    w, h = input_size
    rate = 0.9

    '''
    train_transform = Compose([
        HorizontalFlip(p=0.5),
        RandomCrop(*input_size),
    ])
    '''
    # Transforms
    transforms = {}

    # Basic transform
    transforms['base'] = Compose([
        HorizontalFlip(p=0.5),
        #IAAPerspective(scale=(0.05, 0.1), p=0.3),
        Rotate(5, p=0.5),
        RandomGamma(p=0.5),
        HueSaturationValue(
            hue_shift_limit=10,
            sat_shift_limit=15,
            val_shift_limit=10,
            p=0.5
        ),
        RandomBrightnessContrast(p=0.5),
        OneOf([
            RandomSizedCrop((int(h * rate), int(w * rate)), h, w, p=1.0),
            RandomCrop(h, w, p=1.0),
        ], p=1.0)
    ])

    # BDD dataset
    transforms['bdd'] = transforms['base']

    # Always shrink to 22% - 50%
    transforms['walk'] = Compose([
        HorizontalFlip(p=0.5),
        Rotate(5, p=0.5),
        RandomGamma(p=0.5),
        HueSaturationValue(
            hue_shift_limit=10,
            sat_shift_limit=15,
            val_shift_limit=10,
            p=0.5
        ),
        RandomBrightnessContrast(p=0.5),
        RandomScale((-0.78, -0.5), p=1.0),
        RandomCrop(h, w, p=1.0),
    ])

    # MISC dataset transform
    transforms['misc'] = Compose([
        HorizontalFlip(p=0.5),
        Rotate(5, p=0.5),
        RandomGamma(p=0.5),
        HueSaturationValue(
            hue_shift_limit=10,
            sat_shift_limit=15,
            val_shift_limit=10,
            p=0.5
        ),
        RandomBrightnessContrast(p=0.5),
        Resize(h, w, p=1.0),
    ])

    validation_transform = Compose([
        CenterCrop(h, w),
    ])

    category_type = datasets.surface_types.from_string(args.category_type)

    # Logger
    log_dir = _get_log_dir(args)
    logger = logging.Logger(log_dir, n_save=16, image_size=256, category_type=category_type)
    logger.writer.add_text('args', str(args))

    train_datasets = []
    for image_dir, mask_dir, dataset_type in zip(train_image_dirs, train_mask_dirs, args.train_dataset_types):
        _dataset = datasets.create_dataset(
            dataset_type,
            [image_dir],
            [mask_dir],
            category_type,
            transforms[dataset_type],
        )
        train_datasets.append(_dataset)

    validation_datasets = []
    for image_dir, mask_dir, dataset_type in zip(validation_image_dirs, validation_mask_dirs, args.validation_dataset_types):
        _dataset = datasets.create_dataset(
            dataset_type,
            [image_dir],
            [mask_dir],
            category_type,
            validation_transform,
        )
        validation_datasets.append(_dataset)

    # Merge datasets
    train_dataset = ConcatDataset(train_datasets)
    validation_dataset = ConcatDataset(validation_datasets)

    train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True)
    validation_loader = DataLoader(validation_dataset, args.batch_size, shuffle=False)
    net = models.load_model(args.model_name, category_type).to(device)

    criterion = models.loss.get_criterion(category_type, args.jaccard_weight)
    optimizer = torch.optim.Adam(net.parameters())

    for epoch in range(1, args.epochs + 1):
        print(f'epoch: {epoch:03d}')
        sys.stdout.flush()
        train(net, train_loader, epoch, optimizer, criterion, device, logger)
        evaluate(net, validation_loader, epoch, criterion, device, logger, 'validation')
Exemple #21
0
import cv2
from albumentations import (
    Compose, HorizontalFlip, Rotate, HueSaturationValue, RandomBrightness,
    RandomContrast, RandomGamma, JpegCompression, GaussNoise, Cutout,
    MedianBlur, Blur, OneOf, IAAAdditiveGaussianNoise, OpticalDistortion,
    GridDistortion, IAAPiecewiseAffine, ShiftScaleRotate, CenterCrop,
    RandomCrop, CenterCrop, Resize, PadIfNeeded, RandomScale, SmallestMaxSize)
from albumentations.pytorch.transforms import ToTensor

cv2.setNumThreads(0)

train_transform = Compose([
    SmallestMaxSize(224),
    RandomScale(scale_limit=0.125),
    # PadIfNeeded(256, 256, border_mode=cv2.BORDER_CONSTANT., value=0, p=1.),
    # ShiftScaleRotate(
    #     shift_limit=0.0625, scale_limit=0.1, rotate_limit=30,
    #     border_mode=cv2.BORDER_REFLECT_101, p=1.),
    Rotate(limit=20, border_mode=cv2.BORDER_REFLECT_101, p=1.),
    OneOf([
        RandomCrop(192, 192, p=0.9),
        CenterCrop(192, 192, p=0.1),
    ], p=1.),
    HorizontalFlip(p=0.5),
    RandomContrast(limit=0.2, p=0.5),
    RandomGamma(gamma_limit=(80, 120), p=0.5),
    RandomBrightness(limit=0.2, p=0.5),
    # HueSaturationValue(hue_shift_limit=5, sat_shift_limit=20,
    #                    val_shift_limit=10, p=1.),
    # OneOf([
    #     OpticalDistortion(p=0.3),
Exemple #22
0
'''4. CutOut Augmentation'''
max_hole_size = int(IMG_SIZE/5)
aug4 = Cutout(p=1,max_h_size=max_hole_size,max_w_size=max_hole_size,num_holes=8 )#default num_holes=8

'''5. SunFlare Augmentation'''
aug5 = RandomSunFlare(src_radius=max_hole_size,
                      num_flare_circles_lower=10,
                      num_flare_circles_upper=20,
                      p=1)#default flare_roi=(0,0,1,0.5),

# 学習時のData Augmentationを作成
train_transform = Compose([
    aug1,
    aug2,
  #  aug3,
    RandomScale(),
    RandomGamma(),
    aug4,
    aug5,
    Resize(IMG_SIZE,IMG_SIZE),
],p=1)

"""Compose([
  #  RandomCrop(288),
    HorizontalFlip(),
    Rotate((-ROTATE, ROTATE)),
    RandomBrightnessContrast(),
#    HueSaturationValue(),
    RandomScale(),
    RandomGamma(),
  #  Resize(width=IMG_SIZE,height=IMG_SIZE),
Exemple #23
0
def Scale_Resize_Crop(img, v):  # [0.8, 1.2]
    assert 0.8 <= v <= 1.2
    transform = Compose([RandomScale(scale_limit=v, interpolation=2), Resize(img.shape[1] + 20, img.shape[1] + 20, interpolation=2),
                         RandomCrop(img.shape[1], img.shape[1])])
    Aug_img = transform(image=img)
    return Aug_img
Exemple #24
0
def albumentations_transforms(
    crop_size,
    shorter_side,
    low_scale,
    high_scale,
    img_mean,
    img_std,
    img_scale,
    ignore_label,
    num_stages,
    dataset_type,
):
    from albumentations import (
        Normalize,
        HorizontalFlip,
        RandomCrop,
        PadIfNeeded,
        RandomScale,
        LongestMaxSize,
        SmallestMaxSize,
        OneOf,
    )
    from albumentations.pytorch import ToTensorV2 as ToTensor
    from densetorch.data import albumentations2densetorch

    if dataset_type == "densetorch":
        wrapper = albumentations2densetorch
    elif dataset_type == "torchvision":
        wrapper = albumentations2torchvision
    else:
        raise ValueError(f"Unknown dataset type: {dataset_type}")

    common_transformations = [
        Normalize(max_pixel_value=1.0 / img_scale, mean=img_mean, std=img_std),
        ToTensor(),
    ]
    train_transforms = []
    for stage in range(num_stages):
        train_transforms.append(
            wrapper(
                [
                    OneOf(
                        [
                            RandomScale(
                                scale_limit=(low_scale[stage], high_scale[stage])
                            ),
                            LongestMaxSize(max_size=shorter_side[stage]),
                            SmallestMaxSize(max_size=shorter_side[stage]),
                        ]
                    ),
                    PadIfNeeded(
                        min_height=crop_size[stage],
                        min_width=crop_size[stage],
                        border_mode=cv2.BORDER_CONSTANT,
                        value=np.array(img_mean) / img_scale,
                        mask_value=ignore_label,
                    ),
                    HorizontalFlip(p=0.5,),
                    RandomCrop(height=crop_size[stage], width=crop_size[stage],),
                ]
                + common_transformations
            )
        )
    val_transforms = wrapper(common_transformations)
    return train_transforms, val_transforms