def get_transforms(phase, size, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
                #                 HorizontalFlip(),
                ShiftScaleRotate(
                    shift_limit=0,  # no resizing
                    scale_limit=0.1,
                    rotate_limit=10,  # rotate
                    p=0.5,
                    border_mode=cv2.BORDER_CONSTANT
                ),
                #                 GaussNoise(),
            ]
        )
    list_transforms.extend(
        [
            Resize(size, size),
            Normalize(mean=mean, std=std, p=1),
            ToTensor(),
        ]
    )

    list_trfms = Compose(list_transforms)
    return list_trfms
def get_transforms(phase):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
            OneOf([
                RandomSizedCrop(min_max_height=(50, 101), height=original_height, width=original_width, p=0.5),
                PadIfNeeded(min_height=original_height, min_width=original_width, p=0.5)], p=1),    
                VerticalFlip(p=0.5),              
                # RandomRotate90(p=0.5),
                OneOf([
                    ElasticTransform(p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                    GridDistortion(p=0.5),
                    OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)                  
                ], p=0.8),
                CLAHE(p=0.8),
                RandomBrightnessContrast(p=0.8),    
                RandomGamma(p=0.8),
            ]
        )
    list_transforms.extend(
        [
            Resize(height=int(original_height/4), width=int(original_width/4),  interpolation=cv2.INTER_NEAREST),
            Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
            ToTensor(),
        ]
    )
    list_trfms = Compose(list_transforms)
    return list_trfms
def get_transforms(phase, size, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
                HorizontalFlip(p=0.5),
                # OneOf([
                #     RandomBrightnessContrast(),
                #     RandomGamma(),
                #     RandomBrightness(),
                # ], p=0.5),
                RandomBrightness(p=0.2, limit=0.2),
                RandomContrast(p=0.1, limit=0.2),
                ShiftScaleRotate(
                    shift_limit=0,  # no resizing
                    scale_limit=0.1,
                    rotate_limit=10,  # rotate
                    p=0.5,
                    border_mode=cv2.BORDER_CONSTANT
                ),
            ]
        )
    list_transforms.extend(
        [
            Normalize(mean=mean, std=std, p=1),
            Resize(size, size),
            ToTensor(),
        ]
    )

    list_trfms = Compose(list_transforms)
    return list_trfms
Beispiel #4
0
def get_augmetation(phase):

    list_transforms = []

    if phase == 'train':
        list_transforms.extend([
            albumentations.HorizontalFlip(),
            albumentations.VerticalFlip(),
            albumentations.ShiftScaleRotate(shift_limit=0.03,
                                            scale_limit=0,
                                            rotate_limit=(-3, 3),
                                            border_mode=0),
            albumentations.PadIfNeeded(min_height=IMAGE_SIZE[0],
                                       min_width=IMAGE_SIZE[1],
                                       border_mode=0),
            albumentations.RandomCrop(*IMAGE_SIZE),
            albumentations.RandomBrightness(limit=(-0.25, 0.25)),
            albumentations.RandomContrast(limit=(-0.15, 0.4)),
            albumentations.RGBShift(r_shift_limit=10,
                                    g_shift_limit=10,
                                    b_shift_limit=10),
        ])

    list_transforms.extend([
        albumentations.Normalize(mean=IMAGE_RGB_MEAN, std=IMAGE_RGB_STD),
        ToTensor(),
    ])

    return albumentations.Compose(list_transforms, p=1)
Beispiel #5
0
 def __init__(self,
              root_path,
              file_list,
              is_test=False,
              is_val=False,
              augment=False):
     self.is_test = is_test
     self.augment = augment
     self.root_path = root_path
     self.file_list = file_list
     self.pad = Compose([
         PadIfNeeded(p=1, min_height=PAD_SIZE, min_width=PAD_SIZE),
         ToTensor(),
     ])
     original_height, original_width = 101, 101
     self.augmentation = Compose([
         RandomSizedCrop(min_max_height=(50, 101),
                         height=original_height,
                         width=original_width,
                         p=0.9),
         HorizontalFlip(p=0.5),
         GridDistortion(p=0.8),
         RandomContrast(p=0.8),
         RandomBrightness(p=0.8),
         RandomGamma(p=0.8)
     ])
    def Auger(self):
        List_Transforms = []
        if self.is_train:
            List_Transforms.extend([
                HueSaturationValue(10, 10, 10, p=0.3),
                HorizontalFlip(0.3),
                VerticalFlip(p=0.3),

                # May be it not work,will rescale [0,255]  ->   [0.0,1.0]
                ToFloat(always_apply=True),
                ShiftScaleRotate(
                    shift_limit=0.1,  # no resizing
                    scale_limit=0.1,
                    rotate_limit=3,  # rotate
                    p=0.5,
                    border_mode=cv2.BORDER_REFLECT),
                PadIfNeeded(self.padshape, self.padshape),
            ])
        List_Transforms.extend([
            # [0.12110683835022196, 0.1308642819666743, 0.14265566800591103]
            #Normalize(mean=(0.397657144,0.351649219,0.305031406),std=(0.12110683835022196, 0.1308642819666743, 0.14265566800591103)),
            RandomCrop(self.shape, self.shape),
            ToTensor(),
        ])
        TR = Compose(List_Transforms)
        return TR
 def __getitem__(self, idx):
     file = self.files[idx]
     file_path = os.path.join(os.path.join(PATH, self.mode + '_images'),
                              file)
     image = cv2.imread(file_path)
     mean = (0.485, 0.456, 0.406)
     std = (0.229, 0.224, 0.225)
     train_aug = Compose([
         OneOf([
             VerticalFlip(),
             HorizontalFlip(),
         ], p=0.5),
         OneOf([
             MotionBlur(p=0.4),
             MedianBlur(p=0.4, blur_limit=3),
             Blur(p=0.5, blur_limit=3)
         ],
               p=0.4),
         OneOf([IAAAdditiveGaussianNoise(),
                GaussNoise()], p=0.4),
         Normalize(mean=mean, std=std, p=1),
         # CLAHE(p=0.5),
         ToTensor()
     ])
     augmented = train_aug(image=image)
     image = augmented['image']
     label = np.array(self.labels[idx])
     label = torch.tensor(label, dtype=torch.float32)
     return (image, label)
Beispiel #8
0
 def __init__(self, image_name, image_folder):
     self.image_name = image_name
     self.image_folder = image_folder
     self.num_samples = 1
     self.transform = Compose(
         [
             Resize(WIDTH, HEIGHT), Normalize(), ToTensor()
         ]
     )
 def __init__(self, root, df, mean, std):
     self.root = root
     df['ImageId'] = df['ImageId_ClassId'].apply(lambda x: x.split('_')[0])
     self.fnames = df['ImageId'].unique().tolist()
     self.num_samples = len(self.fnames)
     self.transform = Compose([
         Normalize(mean=mean, std=std, p=1),
         ToTensor(),
     ])
Beispiel #10
0
 def predict_on_batch(self, model, batch):
     model = model.to(self.device).eval()
     if isinstance(batch, np.ndarray):
         data = ToTensor()(image=batch)['image'].permute(1, 0, 2, 3)
     with torch.no_grad():
         images = data.to(self.device)
         predictions = model(images)
         for prediction in predictions:
             prediction = np.moveaxis(self.to_numpy(prediction), 0, -1)
             yield prediction
 def __init__(self, root, df, size, mean, std, tta=4):
     self.root = root
     self.size = size
     self.fnames = list(df["ImageId"])
     self.num_samples = len(self.fnames)
     self.transform = Compose([
         Normalize(mean=mean, std=std, p=1),
         Resize(size, size),
         ToTensor(),
     ])
Beispiel #12
0
def get_transforms(phase):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            HorizontalFlip(),
        ])
    list_transforms.extend([
        Normalize(),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
 def __init__(self, root, df, size, mean, std, tta=4):
     self.root = root
     self.size = size
     self.fnames = list(
         df["ImageId"])  # sorted(glob.glob(test_data_folder + '/*'))#
     self.num_samples = len(self.fnames)
     self.transform = Compose([
         # HorizontalFlip(p=1),
         Normalize(mean=mean, std=std, p=1),
         Resize(size, size),
         ToTensor(),
     ])
Beispiel #14
0
def get_transforms(phase, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            HorizontalFlip(p=0.5),  # only horizontal flip as of now
        ])
    list_transforms.extend([
        Normalize(mean=mean, std=std, p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Beispiel #15
0
 def load(self, path="/model"):
     self.transform = Compose([
         LongestMaxSize(max_size=224),
         PadIfNeeded(224, 224, border_mode=cv2.BORDER_CONSTANT),
         Normalize(),
         ToTensor(),
     ])
     self.model = torch.jit.load(os.path.join(path, "model.pth"))
     with open(os.path.join(path, "tag2class.json")) as fin:
         self.tag2class = json.load(fin)
         self.class2tag = {v: k for k, v in self.tag2class.items()}
         logging.debug(f"class2tag: {self.class2tag}")
    def get_transforms(stage: str = None, mode: str = None, size: int = 224):
        if mode == 'train':
            transforms = [
                Resize(size, size),
                OneOf([
                    RandomBrightnessContrast(brightness_limit=0.5,
                                             contrast_limit=0.5),
                    RandomGamma(),
                    CLAHE(),
                ],
                      p=0.5),
                ShiftScaleRotate(rotate_limit=45),
                OneOf([Blur(), MotionBlur()], p=0.5),
                JpegCompression(quality_lower=10),
                Normalize(),
                ToTensor()
            ]
        else:
            transforms = [Resize(size, size), Normalize(), ToTensor()]

        return Compose(transforms=transforms)
Beispiel #17
0
    def __getitem__(self, index):
        if isinstance(index, torch.Tensor):
            index = index.item()
        line = self.ids.iloc[index]

        image = cv2.imread(line['path'])
        image = self.transform(image=image)['image']

        label = np.array([self.mapping[line['label']]])
        label = ToTensor()(image=label)['image']
        label = onehot(label, self.num_classes)

        return {'image': image, 'mask': label}
Beispiel #18
0
def prepare_image(frame):
    """
    prepare image for neural network
    :param frame: one raw frame from video
    :return: processed image (transformed for NN input)
    """
    transforms = [
        Resize(224, 224),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensor()
    ]
    transforms = Compose(transforms)
    return transforms(image=frame)['image']
Beispiel #19
0
 def __init__(self,
              root,
              df,
              mean=(0.485, 0.456, 0.406),
              std=(0.229, 0.224, 0.225),
              TTA=False):
     self.root = root
     df['imageid'] = df['ID'].apply(
         lambda x: x.split('_')[0] + '_' + x.split('_')[1] + '.png')
     self.fnames = df['imageid'].unique().tolist()
     self.num_samples = len(self.fnames)
     if TTA == True:
         self.transform = Compose([
             HorizontalFlip(),
             albu.Resize(512, 512),
             Normalize(mean=mean, std=std, p=1),
             ToTensor(),
         ])
     else:
         self.transform = Compose([
             albu.Resize(512, 512),
             Normalize(mean=mean, std=std, p=1),
             ToTensor(),
         ])
Beispiel #20
0
    def __getitem__(self, index):
        if isinstance(index, torch.Tensor):
            index = index.item()

        line_1 = self.ids.iloc[index]
        label_1 = np.array([self.mapping[line_1['label']]])
        image_1 = cv2.imread(line_1['path'])

        if self.mixup and np.random.uniform(0, 1) > self.mixup_p:
            while True:
                idx = next(iter(
                    self.sampler)).item()  # generate idx with self.sampler
                line_2 = self.ids.iloc[idx]
                label_2 = np.array([self.mapping[line_2['label']]])
                if label_1 != label_2:
                    break

            image_2 = cv2.imread(line_2['path'])
            image_1 = self.transform(image=image_1)['image']
            image_2 = self.transform(image=image_2)['image']

            label_1 = ToTensor()(image=label_1)['image']
            label_2 = ToTensor()(image=label_2)['image']
            label_1 = onehot(label_1, self.num_classes)
            label_2 = onehot(label_2, self.num_classes)

            _lambda = np.random.beta(self.alpha, self.alpha)
            images = _lambda * image_1 + (1 - _lambda) * image_2
            labels = _lambda * label_1 + (1 - _lambda) * label_2

        else:
            images = self.transform(image=image_1)['image']
            label_1 = ToTensor()(image=label_1)['image']
            labels = onehot(label_1, self.num_classes)

        return {'image': images, 'mask': labels}
Beispiel #21
0
def get_transforms(phase, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            HorizontalFlip(p=0.5),  # only horizontal flip as of now
            VerticalFlip(p=0.5),
            # ShiftScaleRotate(),
        ])
    list_transforms.extend([
        albu.RandomCrop(256, 320, always_apply=True),
        Normalize(mean=mean, std=std, p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Beispiel #22
0
def create_transforms(transform_list):
    """
    Parses and composes a list of transform configurations.

    :param transform_list:
    :return: A list of transforms that has been torchvision.transform.Compose'd
    """

    trans_list = []
    for trans in transform_list:
        (trans_type, trans_params), = trans.items()
        trans_list.append(parse_transform(trans_type, trans_params))
    trans_list.append(ToTensor())

    return albu.Compose(trans_list)
Beispiel #23
0
 def load(self, path="/model"):
     image_size = 512
     self.transform = Compose([
         LongestMaxSize(max_size=image_size),
         PadIfNeeded(image_size,
                     image_size,
                     border_mode=cv2.BORDER_CONSTANT),
         Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225),
                   always_apply=True),
         ToTensor(),
     ])
     self.model = torch.jit.load(os.path.join(path, "model.pth"))
     with open(os.path.join(path, "tag2class.json")) as fin:
         self.tag2class = json.load(fin)
         self.class2tag = {v: k for k, v in self.tag2class.items()}
         logging.debug(f"class2tag: {self.class2tag}")
Beispiel #24
0
 def __getitem__(self, idx):
     file = self.files[idx]
     file_path = os.path.join(
         ('G:\\SteelDetection\\' + self.mode + '_images'), file)
     image = cv2.imread(file_path)
     mean = (0.485, 0.456, 0.406)
     std = (0.229, 0.224, 0.225)
     train_aug = Compose([
         VerticalFlip(p=0.5),
         HorizontalFlip(p=0.5),
         Normalize(mean=mean, std=std, p=1),
         # CLAHE(p=0.5),
         ToTensor()
     ])
     augmented = train_aug(image=image)
     image = augmented['image']
     label = self.labels[idx]
     return (image, label)
Beispiel #25
0
def make_transforms(phase,
                    mean=(0.485, 0.456, 0.406),
                    std=(0.229, 0.224, 0.225)):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            albu.HorizontalFlip(p=0.5),
            albu.Rotate(limit=15),
            albu.RandomContrast(limit=0.2),
            albu.RandomBrightness(limit=0.2)
        ])
    list_transforms.extend([
        albu.Resize(512, 512),
        Normalize(mean=mean, std=std, p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Beispiel #26
0
def get_transforms(phase, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            HorizontalFlip(p=0.5),  # only horizontal flip as of now
            albu.ShiftScaleRotate(scale_limit=0.5,
                                  rotate_limit=0,
                                  shift_limit=0.1,
                                  p=1,
                                  border_mode=0),
            albu.IAAAdditiveGaussianNoise(p=0.2),
            # albu.IAAPerspective(),
            albu.OneOf(
                [
                    albu.CLAHE(p=1),
                    albu.RandomBrightnessContrast(p=1),
                    albu.RandomGamma(p=1),
                ],
                p=0.9,
            ),
            #
            # albu.OneOf(
            #     [
            #         albu.IAASharpen(p=1),
            #         albu.Blur(blur_limit=3, p=1),
            #         albu.MotionBlur(blur_limit=3, p=1),
            #     ],
            #     p=0.9,
            # ),
            #
            # albu.OneOf(
            #     [
            #         albu.RandomBrightnessContrast(p=1),
            #         albu.HueSaturationValue(p=1),
            #     ],
            #     p=0.9,
            # ),
        ])
    list_transforms.extend([
        Normalize(mean=mean, std=std, p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Beispiel #27
0
def get_transforms(phase, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
                Compose([
                    HorizontalFlip(p=0.5), # only horizontal flip as of now
                    VerticalFlip(p=0.5)], p=1.0),
                ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=20, p=0.5),
                # GaussNoise()
                # 
            ]
        )
    list_transforms.extend(
        [
            Normalize(mean=mean, std=std, p=1),
            ToTensor(),
        ]
    )
    list_trfms = Compose(list_transforms)
    return list_trfms
Beispiel #28
0
def post_transform():
    return Compose([
        Normalize(
            mean=IMAGENET_MEAN,
            std=IMAGENET_STD),
        ToTensor()])
Beispiel #29
0
def post_transforms():
    return Compose([Normalize(), ToTensor()])
def test_torch_to_tensor_augmentations(image, mask):
    aug = ToTensor()
    data = aug(image=image, mask=mask)
    assert data['image'].dtype == torch.float32
    assert data['mask'].dtype == torch.float32