Пример #1
0
def cifar10_albumentations(mean, std):

    train_transforms = A.Compose([
        #         A.OneOf([
        #             A.GridDistortion(distort_limit=(-0.3, 0.3), p=0.5),
        #             A.Rotate(limit=(-10, 10), p=0.5)
        #         ]),
        A.GridDistortion(distort_limit=(-0.3, 0.3), p=0.5),
        A.Rotate(limit=(-10, 10), p=0.5),
        A.HorizontalFlip(p=0.25),
        A.Cutout(num_holes=1, max_h_size=12, max_w_size=12),
        A.Normalize(
            mean=mean, std=std
        ),  # Here, the order of normalization and ToTensor() methods matters. Same goes for test_transforms 
        APT.ToTensor()
        #         APT.ToTensorV2()
    ])

    test_transforms = A.Compose([
        A.Normalize(mean=mean, std=std),
        APT.ToTensor()
        #         APT.ToTensorV2()
    ])

    return Albumentation_Transforms(
        train_transforms), Albumentation_Transforms(test_transforms)
def albumentation():
    transform = albumentations.Compose([          
                    albumentations.OneOf([
                        albumentations.GaussNoise(),
                        albumentations.IAAAdditiveGaussianNoise()
                    ]),
                    albumentations.OneOf([
                        albumentations.MotionBlur(blur_limit=3, p=0.2),
                        albumentations.MedianBlur(blur_limit=3, p=0.1),
                        albumentations.Blur(blur_limit=2, p=0.1)
                    ]),
                    albumentations.OneOf([
                        albumentations.RandomBrightness(limit=(0.1, 0.4)),
                        albumentations.HueSaturationValue(hue_shift_limit=(0, 128), sat_shift_limit=(0, 60), val_shift_limit=(0, 20)),
                        albumentations.RGBShift(r_shift_limit=30, g_shift_limit=30, b_shift_limit=30)
                    ]),
                    albumentations.OneOf([
                        albumentations.CLAHE(),
                        albumentations.ChannelShuffle(),
                        albumentations.IAASharpen(),
                        albumentations.IAAEmboss(),
                        albumentations.RandomBrightnessContrast(),
                    ]),                
                    albumentations.OneOf([
                        albumentations.RandomGamma(gamma_limit=(35,255)),
                        albumentations.OpticalDistortion(),
                        albumentations.GridDistortion(),
                        albumentations.IAAPiecewiseAffine()
                    ]),                
                    A_torch.ToTensor(normalize={
                        "mean": [0.485, 0.456, 0.406],
                        "std" : [0.229, 0.224, 0.225]})
                    ])
    return transform
Пример #3
0
 def build_train(self):
     train_transforms = A.Compose([
         A.Normalize(mean=self.mean, std=self.std),
         AT.ToTensor(),
         A.Lambda(lambda x: torch.cat([x, x, x], 0), always_apply=True)
     ])
     return train_transforms
Пример #4
0
 def AlbumentationTrainTransform(self):
     tf = tc.Compose([
         ta.HorizontalFlip(),
         ta.Cutout(num_holes=1, max_h_size=16, max_w_size=16),
         tp.ToTensor(dict(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
     ])
     return lambda img: tf(image=np.array(img))["image"]
Пример #5
0
 def AlbumentationTestTransform(self):
     tf = tc.Compose([
         tp.ToTensor(
             dict(mean=(0.4802, 0.4481, 0.3975),
                  std=(0.2302, 0.2265, 0.2262)))
     ])
     return lambda img: tf(image=np.array(img))["image"]
Пример #6
0
def model10_resnet_train_transforms():
  transforms = C.Compose([
    A.HorizontalFlip(),
    #A.RandomCrop(height=30, width=30, p=5.0),
    A.Cutout(num_holes=1, max_h_size=16, max_w_size=16),
    P.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
    ])
  return lambda img: transforms(image = np.array(img))["image"]
Пример #7
0
 def AlbumentationTrainTransform(self):
     tf = tc.Compose([ta.PadIfNeeded(4, 4, always_apply=True),
                     ta.RandomCrop(height=32, width=32, always_apply=True),
                     ta.Cutout(num_holes = 1, max_h_size=8, max_w_size=8, always_apply=True),
                     ta.HorizontalFlip(),
                     tp.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
                     ])
     return lambda img: tf(image = np.array(img))["image"]
Пример #8
0
def cifar10_s11_albumentations(mean, std):
    train_transforms = A.Compose([
        A.PadIfNeeded(36, 36, p=1),
        A.RandomCrop(32, 32, p=1),
        A.HorizontalFlip(p=0.5),
        A.Cutout(num_holes=2, max_h_size=8, max_w_size=8),
        A.Normalize(
            mean=mean, std=std
        ),  # Here, the order of normalization and ToTensor() methods matters. Same goes for test_transforms 
        APT.ToTensor()
    ])

    test_transforms = A.Compose(
        [A.Normalize(mean=mean, std=std),
         APT.ToTensor()])

    return Albumentation_Transforms(
        train_transforms), Albumentation_Transforms(test_transforms)
Пример #9
0
 def test_augmentation(self):
     """
         Testing Augmentation applied on to the Test dataset
     """
     return AlbumentationTransforms(
         A.Compose([
             A.Normalize((0.4914, 0.4822, 0.4465),
                         (0.2023, 0.1994, 0.2010)),
             AT.ToTensor()
         ]))
Пример #10
0
 def test_augmentation(self):
     """
         Testing Augmentation applied on to the Test dataset
     """
     return AlbumentationTransforms(
         A.Compose([
             A.Normalize(mean=[0.4802, 0.4481, 0.3975],
                         std=[0.2302, 0.2265, 0.2262]),
             AT.ToTensor()
         ]))
Пример #11
0
    def build_train(self):
        train_transforms = A.Compose([
            A.Rotate((-15.0, 15.0), p=0.3),
            A.HorizontalFlip(),
            A.Normalize(mean=self.mean, std=self.std),
            A.Cutout(num_holes=4),
            AT.ToTensor()
        ])

        return AlbumentationTransforms(train_transforms)
Пример #12
0
def model12_train_transforms():
  transform = C.Compose([
    A.PadIfNeeded(min_height=70, min_width=70, border_mode=cv2.BORDER_CONSTANT,
         value=0.5),
    A.RandomCrop(height=64, width=64),
    A.HorizontalFlip(p=0.5),
    A.Cutout(num_holes=1, max_h_size=32, max_w_size=32, p=1),
    P.ToTensor(dict (mean=(0.4802, 0.4481, 0.3975), std=(0.2302, 0.2265, 0.2262)))
    ])
  return lambda img: transform(image = np.array(img))["image"]
Пример #13
0
def model11_davidnet_train_transforms():
  transform = C.Compose([
    A.PadIfNeeded(min_height=36, min_width=36, border_mode=cv2.BORDER_CONSTANT,
        value=0.5),
    A.RandomCrop(height=32, width=32, p=1),
    A.HorizontalFlip(p=0.5),
    A.Cutout(num_holes=1, max_h_size=8, max_w_size=8, p=1),
    P.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
    ])
  return lambda img: transform(image = np.array(img))["image"]
Пример #14
0
    def build_train(self):
        train_transforms = A.Compose([
            A.PadIfNeeded(min_height=36, min_width=36),
            A.RandomCrop(height=32, width=32),
            A.HorizontalFlip(),
            A.Normalize(mean=self.mean, std=self.std),
            A.Cutout(num_holes=4),
            AT.ToTensor()
        ])

        return AlbumentationTransforms(train_transforms)
Пример #15
0
def tinyimagenet_albumentations(mean, std):
    train_transforms = A.Compose([
        A.PadIfNeeded(68, 68, p=1),
        A.RandomCrop(64, 64, p=1),
        A.Rotate(limit=(-10, 10), p=0.5),
        A.HorizontalFlip(p=0.5),
        A.GridDistortion(distort_limit=(-0.3, 0.3), p=0.5),
        A.Cutout(num_holes=3, max_h_size=8, max_w_size=8),
        A.Normalize(
            mean=mean, std=std
        ),  # Here, the order of normalization and ToTensor() methods matters. Same goes for test_transforms 
        APT.ToTensor()
    ])

    test_transforms = A.Compose(
        [A.Normalize(mean=mean, std=std),
         APT.ToTensor()])

    return Albumentation_Transforms(
        train_transforms), Albumentation_Transforms(test_transforms)
Пример #16
0
 def AlbumentationTrainTransform(self):
     tf = tc.Compose([ta.HorizontalFlip(p=0.5),
                         ta.Rotate(limit=(-20, 20)),
                         # ta.VerticalFlip(p=0.5),
                         # ta.Cutout(num_holes=3, max_h_size=8, max_w_size=8, p=0.5),
                         # ta.Blur(),
                         # ta.ChannelShuffle(),
                         # ta.InvertImg(),
                         ta.RandomCrop(height=30, width=30, p=5.0),
                         ta.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
                         tp.ToTensor()
                         ])
     return lambda img: tf(image = np.array(img))["image"]
Пример #17
0
 def train_augmentation(self):
     """
         Training Augmentation applied on to the train dataset
     """
     return AlbumentationTransforms(
         A.Compose([
             A.PadIfNeeded(min_height=36, min_width=36),
             A.RandomCrop(32, 32),
             A.HorizontalFlip(),
             A.Normalize((0.4914, 0.4822, 0.4465),
                         (0.2023, 0.1994, 0.2010)),
             A.Cutout(num_holes=4),
             AT.ToTensor()
         ]))
Пример #18
0
 def train_augmentation(self):
     """
         Training Augmentation applied on to the train dataset
     """
     return AlbumentationTransforms(
         A.Compose([
             A.RandomCrop(64, 64),
             A.Rotate((-30.0, 30.0)),
             A.HorizontalFlip(),
             A.Normalize(mean=[0.4802, 0.4481, 0.3975],
                         std=[0.2302, 0.2265, 0.2262]),
             A.Cutout(num_holes=4),
             AT.ToTensor()
         ]))
Пример #19
0
def get_Dataset(dframe, train_dir):
    # convert to Dataset objects

    composed_transform = Compose([
        A.augmentations.transforms.Cutout(num_holes=8,
                                          max_h_size=64,
                                          max_w_size=64,
                                          p=0.5),
        transforms.ToTensor()
    ])
    # to extract features, uncomment this line and comment the line prior
    # composed_transform = Compose([transforms.ToTensor(), ToFeatures()])

    dset = GetFeatureDataset(dframe, train_dir, composed_transform)
    return dset
Пример #20
0
 def AlbumentationTestTransform(self):
     tf = tc.Compose([ta.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
                     tp.ToTensor()
                     # tp.ToTensor(dict(mean=(0.4914, 0.4822, 0.4465), std=(0.247, 0.2435, 0.2616)))
                     ])
     return lambda img: tf(image = np.array(img))["image"]
Пример #21
0
def run():

    if args.seed is not None:
        from csl_common.utils.common import init_random
        init_random(args.seed)

    # log.info(json.dumps(vars(args), indent=4))

    full_sizes = {
        'drive': 512,
        'stare': 512,
        'chase': 1024,
        'hrf': 2560,
    }
    full_size = full_sizes[args.dataset_train[0]]

    transform_train = alb.Compose([
        alb.Rotate(60, border_mode=cv2.BORDER_CONSTANT),
        alb.RandomSizedCrop(min_max_height=(int(full_size * 0.25),
                                            int(full_size * 0.5)),
                            height=args.input_size,
                            width=args.input_size,
                            p=1.0),

        # alb.RandomSizedCrop(
        #     min_max_height=(int(full_size*0.5), int(full_size*0.5)),
        #     height=1600, width=1600,
        # ),

        # alb.Resize(width=565*2, height=584*2),
        # alb.RandomCrop(args.input_size, args.input_size),

        # alb.CenterCrop(args.input_size, args.input_size),
        # alb.Resize(args.input_size, args.input_size),
        alb.RGBShift(p=0.5),
        alb.RandomBrightnessContrast(brightness_limit=0.5,
                                     contrast_limit=0.5,
                                     p=0.5),
        alb.RandomGamma(),
        alb.HorizontalFlip(p=0.5),
        alb.VerticalFlip(p=0.5),
        alb_torch.ToTensor(
            normalize=dict(mean=[0.518, 0.418, 0.361], std=[1, 1, 1]))
    ])

    transform_val = alb.Compose([
        alb.RandomSizedCrop(min_max_height=(int(full_size * 0.25),
                                            int(full_size * 0.5)),
                            height=args.input_size,
                            width=args.input_size),
        alb.Resize(args.input_size, args.input_size, always_apply=True),
        alb_torch.ToTensor(
            normalize=dict(mean=[0.518, 0.418, 0.361], std=[1, 1, 1]))
    ])

    torch.backends.cudnn.benchmark = True

    datasets = {}
    datasets[VAL] = retinadataset.create_dataset_multi(
        args.dataset_val,
        transform_val,
        num_samples=args.val_count,
        repeat_factor=5,
        train=False)

    if args.eval:
        fntr = VesselTraining(datasets, args)
        fntr.evaluate()
    else:
        datasets[TRAIN] = retinadataset.create_dataset_multi(
            args.dataset_train,
            transform_train,
            num_samples=args.train_count,
            train=True,
            repeat_factor=args.n_dataset_repeats)
        fntr = VesselTraining(datasets, args)
        fntr.train(num_epochs=args.epochs)
Пример #22
0
import os
import torch
import time
import cv2
import numpy as np
import matplotlib.pyplot as plt

import albumentations as alb
from albumentations.pytorch import transforms as alb_torch

from csl_common.utils.nn import to_numpy
from csl_common.vis import vis

_crop_to_tensor = alb.Compose([
    alb_torch.ToTensor(
        normalize=dict(mean=[0.518, 0.418, 0.361], std=[1, 1, 1]))
])


def _predict_center_crop(net, image, crop_size=544, gpu=True):
    h, w, c = image.shape
    image_probs = torch.zeros((h, w))

    x = (w - crop_size) // 2
    y = (h - crop_size) // 2
    image_crop = image[y:y + crop_size, x:x + crop_size]

    input = _crop_to_tensor(image=image_crop)['image']
    if gpu:
        input = input.cuda()
Пример #23
0
def model12_test_transforms():
  transform = C.Compose([
    P.ToTensor(dict (mean=(0.4802, 0.4481, 0.3975), std=(0.2302, 0.2265, 0.2262)))
    ])
  return lambda img: transform(image = np.array(img))["image"]
Пример #24
0
    def build_test(self):
        test_transforms = A.Compose(
            [A.Normalize(mean=self.mean, std=self.std),
             AT.ToTensor()])

        return AlbumentationTransforms(test_transforms)
Пример #25
0
def model9_resnet_test_transforms():
  transforms = C.Compose([
    P.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
    ])
  return lambda img: transforms(image = np.array(img))["image"]