示例#1
0
 def __init__(self, root_dir, transform=None):
     self.root_dir = root_dir
     self.fg_fns = os.listdir(Path(root_dir / 'fg'))
     self.bg_fns = os.listdir(Path(root_dir / 'bg'))
     self.rc320 = RandomCrop(height=320, width=320, always_apply=True)
     self.rc480 = RandomCrop(height=480, width=480, always_apply=True)
     self.rc640 = RandomCrop(height=640, width=640, always_apply=True)
     self.resize = Resize(height=320, width=320, always_apply=True)
     self.flip = Flip(p=.75)
示例#2
0
    def __im_preproc(self, image: np.ndarray) -> np.ndarray:
        image = cv2.resize(image, tuple(self.im_shape))
        blur = Blur(p=0.9, blur_limit=7)
        hsv = HueSaturationValue(p=0.9)
        crop = RandomCrop(p=1, height=self.im_shape[1], width=self.im_shape[0])

        image = blur.apply(image, **blur.get_params())
        image = hsv.apply(image, **hsv.get_params())
        image = crop.apply(image, **crop.get_params())
        if self.grayscale:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        return image[:, :, np.newaxis]
示例#3
0
 def augment(self):  #p=.5):
     return Compose([
         RandomRotate90(),
         Flip(),
         Transpose(),
         ShiftScaleRotate(
             shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=.2),
         OneOf([
             OpticalDistortion(p=0.3),
             GridDistortion(p=.1),
             IAAPiecewiseAffine(p=0.3),
         ],
               p=0.2),
         OneOf([
             IAAAdditiveGaussianNoise(),
             GaussNoise(),
         ], p=0.2),
         RandomCrop(256, 256, p=1),
         OneOf([
             MedianBlur(blur_limit=3, p=.1),
             Blur(blur_limit=3, p=.1),
         ],
               p=0.2),
         OneOf([
             CLAHE(clip_limit=2),
             IAASharpen(),
             IAAEmboss(),
             RandomContrast(),
             RandomBrightness(),
         ],
               p=0.3),
         HueSaturationValue(p=0.7),
     ],
                    p=1)
 def __init__(self):
     self.transforms = Compose([
         PadIfNeeded(40, 40),
         RandomCrop(32, 32),
         HorizontalFlip(p=.5),
         Cutout(8, 8),
         Normalize(mean=(0.4914, 0.4822, 0.4465),
                   std=(0.2023, 0.1994, 0.2010),
                   max_pixel_value=255.0,
                   always_apply=True,
                   p=1.0),
         ToTensor()
     ])
示例#5
0
def rand_aug():
    return Compose([
        RandomRotate90(p=0.2),
	GaussNoise(p=0.2),
	HorizontalFlip(p=0.2),
	RandomCrop(p=0.2),
	HueSaturationValue(p=0.2),
	RandomBrightness(p=0.2),
	RandomContrast(p=0.2),
	RandomGamma(p=0.2),
	GaussianBlur(p=0.2),
	]),
	Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1.0)
示例#6
0
def get_transfrom(image, size=512, crop_size=256):
    transform = Compose([
        Resize(size, size, interpolation=cv2.INTER_AREA),
        RandomCrop(crop_size, crop_size),
        Cutout(num_holes=4),
        HueSaturationValue(hue_shift_limit=0.2,
                           sat_shift_limit=0.2,
                           val_shift_limit=0.2),
        RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2),
        Flip(),
        Normalize(mean=[0.3835, 0.3737, 0.3698], std=[1.0265, 1.0440, 1.0499]),
        ToTensorV2()
    ])
    image_transform = transform(image=image)['image']

    return image_transform
    def __init__(self,
                 df,
                 df_controls,
                 stats_experiments,
                 img_dir,
                 mode,
                 verbose=True,
                 channels=[1, 2, 3, 4, 5, 6]):

        self.records = deepcopy(df).to_records(index=False)

        df_conts = deepcopy(df_controls)
        mask = (df_conts['well_type'] == 'negative_control') & \
               (df_conts['well'] == 'B02')
        df_neg_conts = df_conts[mask]
        self.records_neg_conts = df_neg_conts.to_records(index=False)
        mask = (df_conts['well_type'] == 'positive_control')
        df_pos_conts = df_conts[mask]
        self.records_pos_conts = df_pos_conts.to_records(index=False)

        self.stats_exps = stats_experiments
        self.mode = mode
        self.channels = channels
        self.img_dir = img_dir
        self.len = df.shape[0]
        self.transform_train = Compose([
            VerticalFlip(p=0.5),
            HorizontalFlip(p=0.5),
            ShiftScaleRotate(
                shift_limit=0, scale_limit=0, rotate_limit=180, p=1.0),
            RandomCrop(height=364, width=364, p=1.0)
        ],
                                       p=1.0)
        self.transform_val = Compose(
            [CenterCrop(height=364, width=364, p=1.0)], p=1.0)

        if verbose:
            print()
        self.imgs = self._load_imgs(self.records,
                                    desc='Images',
                                    verbose=verbose)
        self.imgs_neg_conts = self._load_imgs(self.records_neg_conts,
                                              desc='Negative controls',
                                              verbose=verbose)
        self.imgs_pos_conts = self._load_imgs(self.records_pos_conts,
                                              desc='Positive controls',
                                              verbose=verbose)
示例#8
0
    def transform(self, audio: Audio) -> t.Tuple[t.Any, t.Any]:
        raw = audio.spectrogram.copy()
        noised = Noise()(raw.copy())
        if self.mode == "train":
            _, w = raw.shape
            if w < self.resolution[1]:
                repeat = self.resolution[1] // w + 1
                noised = np.concatenate([noised] * repeat, axis=1)
                raw = np.concatenate([raw] * repeat, axis=1)

            resized = RandomCrop(height=self.resolution[0],
                                 width=self.resolution[1])(image=noised,
                                                           mask=raw)
            noised, raw = resized["image"], resized["mask"]
            noised, raw = HFlip1d(p=0.5)(noised, raw)
            noised, raw = VFlip1d(p=0.5)(noised, raw)
        return noised, raw
示例#9
0
    def __getitem__(self, i):
        img = self.image_loader(self.img_files[i])

        if self.mode in ['train', 'val']:
            mask = self.mask_loader(self.mask_files[i])
            data = {"image": img, "mask": mask}
            if self.augment:
                augmented = self.transforms.augment()(**data)
            else:
                augmented = RandomCrop(256, 256, p=1)(**data)
            img, mask = augmented["image"], augmented["mask"]
            #mask = np.asarray(self.transforms.resize_to_final(mask))
            #print(np.unique(mask))

            #img = np.asarray(self.transforms.resize_to_final(img))
            img = img_to_tensor(img)
            img = self.transforms.normalize(img)

            return img, torch.from_numpy(np.expand_dims(mask, 0)).float()
        else:
            #img = np.asarray(self.transforms.resize_to_final(img))
            img = img_to_tensor(img)
            img = self.transforms.normalize(img)
            return img
示例#10
0
	]),
	Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1.0)
    ])

from albumentations.pytorch.transforms import ToTensor
from albumentations.augmentations.transforms import (RandomRotate90, GaussNoise, HorizontalFlip, RandomCrop, HueSaturationValue, RandomBrightness, RandomContrast, RandomGamma, GaussianBlur, Normalize, Compose)

# Data Upload
print('\n[Phase 1] : Data Preparation')
transform_train = 
	Compose([
		Compose([
			RandomRotate90(p=0.2),
			GaussNoise(p=0.2),
			HorizontalFlip(p=0.2),
			RandomCrop(p=0.2),
			HueSaturationValue(p=0.2),
			RandomBrightness(p=0.2),
			RandomContrast(p=0.2),
			RandomGamma(p=0.2),
			GaussianBlur(p=0.2)
		], p=1.0),
		ToTensor(),
		Normalize(mean=mean[dataset], std=std[dataset], p=1.0)
	], p=1.0)
])

transform_test = Compose([
	ToTensor(),
	Normalize(mean=mean[dataset], std=std[dataset], p=1.0),
])