Exemple #1
0
def get_transforms(phase, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend(
            [
                Compose([
                    HorizontalFlip(p=0.5), # only horizontal flip as of now
                    VerticalFlip(p=0.5)], p=1.0),
                ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=20, p=0.5),
                # GaussNoise()
                # 
            ]
        )
    list_transforms.extend(
        [
            Normalize(mean=mean, std=std, p=1),
            ToTensor(),
        ]
    )
    list_trfms = Compose(list_transforms)
    return list_trfms
Exemple #2
0
def get_transforms(phase, size, mean, std):
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            #                 HorizontalFlip(),
            ShiftScaleRotate(
                shift_limit=0,  # no resizing
                scale_limit=0.1,
                rotate_limit=20,  # rotate
                p=0.5,
                border_mode=cv2.BORDER_CONSTANT),
            #                 GaussNoise(),
        ])
    list_transforms.extend([
        Normalize(mean=mean, std=std, p=1),
        # Resize(size, size),
        ToTensor(),
    ])

    list_trfms = Compose(list_transforms)
    return list_trfms
Exemple #3
0
def get_transforms(phase):
    original_height = 1400
    original_width = 2100
    list_transforms = []
    if phase == "train":
        list_transforms.extend([
            OneOf([
                RandomSizedCrop(min_max_height=(50, 101),
                                height=original_height,
                                width=original_width,
                                p=0.5),
                PadIfNeeded(min_height=original_height,
                            min_width=original_width,
                            p=0.5)
            ],
                  p=1),
            VerticalFlip(p=0.5),
            # RandomRotate90(p=0.5),
            OneOf([
                ElasticTransform(p=0.5,
                                 alpha=120,
                                 sigma=120 * 0.05,
                                 alpha_affine=120 * 0.03),
                GridDistortion(p=0.5),
                OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
            ],
                  p=0.8),
            CLAHE(p=0.8),
            RandomBrightnessContrast(p=0.8),
            RandomGamma(p=0.8),
        ])
    list_transforms.extend([
        Resize(height=original_height,
               width=original_width,
               interpolation=cv2.INTER_NEAREST),
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
        ToTensor(),
    ])
    list_trfms = Compose(list_transforms)
    return list_trfms
Exemple #4
0
def post_transform():
    return Compose([
        Normalize(
            mean=IMAGENET_MEAN,
            std=IMAGENET_STD),
        ToTensor()])
Exemple #5
0
def post_transforms():
    return Compose([Normalize(), ToTensor()])
def test_torch_to_tensor_augmentations(image, mask):
    aug = ToTensor()
    data = aug(image=image, mask=mask)
    assert data['image'].dtype == torch.float32
    assert data['mask'].dtype == torch.float32
Exemple #7
0
def post_transform():
    return Compose([
        Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ToTensor()
    ])
Exemple #8
0
    def __getitem__(self, idx):
        file = self.files[idx]
        if self.mode == 'train' or 'val':
            file_name = 'train'
        else:
            file_name = 'test'
        file_path = os.path.join(os.path.join(PATH, file_name + '_images'),
                                 file)

        image = cv2.imread(file_path)
        mask = rleToMask(self.masks[idx])
        mean = (0.485, 0.456, 0.406)
        std = (0.229, 0.224, 0.225)
        train_aug = Compose([
            OneOf([
                ShiftScaleRotate(),
                VerticalFlip(p=0.8),
                HorizontalFlip(p=0.8),
            ],
                  p=0.6),
            OneOf([
                RandomBrightnessContrast(),
                MotionBlur(p=0.5),
                MedianBlur(blur_limit=3, p=0.5),
                Blur(blur_limit=3, p=0.5),
            ],
                  p=0.6),
            OneOf([
                IAAAdditiveGaussianNoise(),
                GaussNoise(),
            ], p=0.6),
            Normalize(mean=mean, std=std, p=1)
            # ToTensor()
        ])
        val_aug = Compose([Normalize(mean=mean, std=std, p=1), ToTensor()])
        if self.mode == 'train':
            crop_images, crop_masks, new_crop_images = [], [], []
            crop_labels = np.zeros((num_crop, 4))
            for i in range(num_crop):
                crop_images.append(image[:, i * (1600 // num_crop):(i + 1) *
                                         (1600 // num_crop), :])
                crop_masks.append(mask[:, :, i * (1600 // num_crop):(i + 1) *
                                       (1600 // num_crop)])
            for i in range(num_crop):
                for ch in range(4):
                    if (crop_masks[i][ch, :, :] == np.zeros(
                        (crop_masks[i][ch, :, :].shape))).all():
                        crop_labels[i][ch] = 0
                    else:
                        crop_labels[i][ch] = 1
            for num in range(len(crop_images)):
                if self.mode == 'train':
                    augmented = train_aug(image=crop_images[num])
                    new_crop_images.append(augmented['image'])
                crop_labels[num] = np.array(crop_labels[num])
                crop_labels[num] = torch.tensor(crop_labels[num],
                                                dtype=torch.float32)
            new_crop_images = np.transpose(np.array(new_crop_images),
                                           (0, 3, 1, 2))
            return new_crop_images, crop_labels
        elif self.mode == 'val':
            augmented = val_aug(image=image)
            image = augmented['image']
            label = np.array(self.labels[idx])
            label = torch.tensor(label, dtype=torch.float32)
            return (image, label)
Exemple #9
0
def post_transform():
    return Compose([Normalize(mean=(0.485), std=(0.229)), ToTensor()])
augment_transform = Compose([
    HorizontalFlip(p=.5),
    VerticalFlip(p=.5),
    RandomRotate90(p=.5),
    ShiftScaleRotate(shift_limit=0, scale_limit=.1, rotate_limit=0, p=.5),
    OneOf([
        RandomContrast(limit=.2),
        RandomGamma(gamma_limit=(80, 120)),
        RandomBrightness(limit=.2)
    ],
          p=.5),
    Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
    # CenterCrop(IMG_CROP_SIZE, IMG_CROP_SIZE, p=.5),
    Resize(args.image_size, args.image_size),
    ToTensor()
])

base_transform = Compose([
    HorizontalFlip(p=.5),
    VerticalFlip(p=.5),
    RandomRotate90(p=.5),
    Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1),
    Resize(args.image_size, args.image_size),
    ToTensor()
])


class APTOSDataset(Dataset):
    def __init__(self,
                 df,
def post_transforms(normalize=True):
    transforms = [ToTensor()]
    if normalize is True:
        transforms = [Normalize()] + transforms
    return Compose(transforms)