def get_transforms(phase, mean, std): list_transforms = [] if phase == "train": list_transforms.extend([ Compose( [ HorizontalFlip(p=0.5), # only horizontal flip as of now VerticalFlip(p=0.5) ], p=1.0), RandomBrightnessContrast(brightness_limit=0.05, contrast_limit=0.1, p=0.7), # ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=20, p=0.5), # GaussNoise(var_limit=5, p=0.5) # ]) list_transforms.extend([ Normalize(mean=mean, std=std, p=1), ToTensor(), ]) list_trfms = Compose(list_transforms) return list_trfms
def get_transforms(): """ Quick utility function to return the augmentations for the training/validation generators """ aug_train = Compose([ HorizontalFlip(p=0.5), OneOf([ RandomContrast(), RandomGamma(), RandomBrightness(), ], p=0.3), OneOf([ ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), GridDistortion(), OpticalDistortion(distort_limit=2, shift_limit=0.5), ], p=0.3), ToFloat(max_value=1) ],p=1) aug_val = Compose([ ToFloat(max_value=1) ],p=1) return aug_train, aug_val
def __init__(self, x_dir, y_dir): """ Instantiate .npy file dataset. :param x_dir: (str) observation directory :param y_dir: (str) label directory """ self.x_dir = x_dir self.y_dir = y_dir self.x_format = check_for_file_with_supported_format(self.x_dir) self.y_format = check_for_file_with_supported_format(self.y_dir) # sort is needed for order in data self.x_list = np.sort(os.listdir(x_dir)) self.y_list = np.sort(os.listdir(y_dir)) transforms = [ VerticalFlip(p=.33), HorizontalFlip(p=.33), Rotate(p=.33)] self.augmenter = Augmenter(list_of_transforms=transforms, p=.9)
def __init__(self, num_classes, input_size=None): super(CamvidDataTransform, self).__init__() random.seed(1000) if input_size is not None: height, width = input_size self._train_transform_list.append(Resize(height, width)) self._val_transform_list.append(Resize(height, width)) self._train_transform_list = self._train_transform_list + [ HorizontalFlip(p=0.5), Rotate(p=0.5, limit=(-10, 10)), GaussNoise(p=0.5), RandomBrightnessContrast(p=0.5), ToTensor(num_classes=num_classes), ] self._val_transform_list = self._val_transform_list + [ ToTensor(num_classes=num_classes), ] self._initialize_transform_dict()
def train_augments(augments_config, mean, std, image_size): box_scale = min(image_size) return Compose([ HorizontalFlip(p=augments_config["HORIZONTAL_FLIP_PR"]), VerticalFlip(p=augments_config["VERTICAL_FLIP_PR"]), RandomResizedCrop(image_size[0], image_size[1], scale=(1, 1 + augments_config["RESCALE_PERCENTAGE"]), ratio=(image_size[1] / image_size[0], image_size[1] / image_size[0]), interpolation=INTER_LINEAR, p=augments_config["RESCALE_PR"]), ElasticTransform(alpha=box_scale, sigma=box_scale * 0.05, alpha_affine=box_scale * 0.03, p=augments_config["ELASTIC_TRANSFORM_PR"]), RandomGamma(gamma_limit=(80, 120), p=augments_config["GAMMA_PR"]), Normalize(max_pixel_value=1.0, mean=mean, std=std, p=augments_config["NORMALIZE_PR"]) ], p=1)
def get_transforms(*, data): assert data in ('train', 'valid') if data == 'train': return Compose([ HorizontalFlip(p=0.5), VerticalFlip(p=0.5), Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2(), ]) elif data == 'valid': return Compose([ Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ToTensorV2(), ])
def prepare_data(self): root_path = f'/home/nvme/Kaggle/prostate-cancer-grade-assessment/' df = pd.read_csv(root_path + 'train.csv') mask_present = [ ] # Only about 100 images in the dataset have no mask so just ignore them for training for idx in df['image_id']: mask_present += [ os.path.isfile( os.path.join(root_path, 'train_label_masks', idx + '_mask.tiff')) ] df = df[mask_present] train_df, validation_df = train_test_split(df, test_size=0.1) transforms = Compose([ Transpose(p=0.5), VerticalFlip(p=0.5), HorizontalFlip(p=0.5), ShiftScaleRotate(p=0.5, border_mode=cv2.BORDER_CONSTANT, value=255) ]) self.train_set = PandaDataset(root_path, train_df, level=self.level, patch_size=self.patch_size, num_patches=self.num_patches, use_mask=True, transforms=transforms) self.validation_set = PandaDataset(root_path, validation_df, level=self.level, patch_size=self.patch_size, num_patches=self.num_patches, use_mask=True)
def __init__(self, opt, img_mask_dirs, train=True, loader=default_loader): self.annotations = img_mask_dirs self.train = train self.loader = loader if opt.debug: self.annotations = self.annotations[:8] curr_size = opt.img_size min_max_height = opt.crop_sizes self.transform_basic = Compose([ RandomSizedCrop(min_max_height=min_max_height, height=curr_size, width=curr_size, always_apply=True, p=1.0), OneOf([ Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), Rotate(p=0.5), ], p=1.0), ]) self.gt_transform = transforms.Compose([ lambda img: np.array(img)[np.newaxis, ...], #lambda nd: nd / 255, # max <= 1 lambda nd: torch.tensor(nd, dtype=torch.long) ]) self.img_transform = transforms.Compose([ lambda nd: torch.tensor(nd, dtype=torch.float32), lambda nd: nd / 255.0, transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) self.opt = opt
def strong_aug(p=.5, config=None): return Compose([ HorizontalFlip(p=0.5), VerticalFlip(p=0.5), RandomRotate90(p=0.5), Transpose(p=0.5), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=.2), MedianBlur(blur_limit=3, p=.1), Blur(blur_limit=3, p=.1), ], p=0.2), ShiftScaleRotate(shift_limit=0.001, scale_limit=0.1, rotate_limit=20, p=.2), Compose([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.2), OneOf([ CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomContrast(), RandomBrightness(), ], p=0.3), HueSaturationValue(p=0.3), RandomCrop(height=224, width=224, p=1.0), ])
def generate_transforms(image_size): train_transform = Compose([ #Resize(height=image_size[0], width=image_size[1]), OneOf( [RandomBrightness(limit=0.1, p=1), RandomContrast(limit=0.1, p=1)]), OneOf([ MotionBlur(blur_limit=3), MedianBlur(blur_limit=3), GaussianBlur(blur_limit=3) ], p=0.5), VerticalFlip(p=0.5), HorizontalFlip(p=0.5), ShiftScaleRotate( shift_limit=0.2, scale_limit=0.2, rotate_limit=20, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, p=1, ), Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0) ]) val_transform = Compose([ #Resize(height=image_size[0], width=image_size[1]), Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0, p=1.0) ]) return {"train": train_transform, "val": val_transform}
def __init__(self, image_height=224, image_width=224, padding_length=0, padding_ratio=0.05, horizontal_flip_probability=0.5, patch_replacement_probability=0.5, random_erasing_probability=0.5): # Initiation transforms = [] # Pad side of the image and crop a random part of it if padding_length > 0 or padding_ratio > 0: min_height = image_height + int( max(padding_length, image_height * padding_ratio)) min_width = image_width + int( max(padding_length, image_width * padding_ratio)) transforms.append( PadIfNeeded(min_height=min_height, min_width=min_width, border_mode=cv2.BORDER_CONSTANT, value=(123.68, 116.779, 103.939))) transforms.append( RandomCrop(height=image_height, width=image_width)) # Add HorizontalFlip transforms.append(HorizontalFlip(p=horizontal_flip_probability)) # Add RandomGrayscalePatchReplacement transforms.append( RandomGrayscalePatchReplacement(p=patch_replacement_probability)) # Add RandomErasing transforms.append(RandomErasing(p=random_erasing_probability)) # Compose transforms self.transformer = Compose(transforms=transforms)
def __init__(self, horizontal_flip_probability=0.5, rotate_limit=0, image_height=224, image_width=224, padding_length=20, padding_ratio=0): # Initiation self.transforms = [] self.transformer = None # Flip the input horizontally if horizontal_flip_probability > 0: self.transforms.append( HorizontalFlip(p=horizontal_flip_probability)) # Rotate the input by an angle selected randomly from the uniform distribution if rotate_limit > 0: self.transforms.append( Rotate(limit=rotate_limit, border_mode=cv2.BORDER_CONSTANT, value=0, p=1.0)) # Pad side of the image and crop a random part of it if padding_length > 0 or padding_ratio > 0: min_height = image_height + int( max(padding_length, image_height * padding_ratio)) min_width = image_width + int( max(padding_length, image_width * padding_ratio)) self.transforms.append( PadIfNeeded(min_height=min_height, min_width=min_width, border_mode=cv2.BORDER_CONSTANT, value=0)) self.transforms.append( RandomCrop(height=image_height, width=image_width))
def get_transform(model_name): if 'TAPNet' in model_name: # transform for sequences of images is very tricky # TODO: more transforms should be adopted for better results train_transform_ops = [ PadIfNeeded(min_height=args.input_height, min_width=args.input_width, p=1), Normalize(p=1), # optional transforms Resize(height=args.input_height, width=args.input_width, p=1), # CenterCrop(height=args.input_height, width=args.input_width, p=1) ] else: train_transform_ops = [ VerticalFlip(p=0.5), HorizontalFlip(p=0.5), PadIfNeeded(min_height=args.input_height, min_width=args.input_width, p=1), Normalize(p=1), # optional transforms # Resize(height=args.input_height, width=args.input_width, p=1), # CenterCrop(height=args.input_height, width=args.input_width, p=1) RandomCrop(height=args.input_height, width=args.input_width, p=1), ] valid_transform_ops = [ Normalize(p=1), PadIfNeeded(min_height=args.input_height, min_width=args.input_width, p=1), # optional transforms Resize(height=args.input_height, width=args.input_width, p=1), # CenterCrop(height=args.input_height, width=args.input_width, p=1) ] return Compose(train_transform_ops, p=1), Compose(valid_transform_ops, p=1)
def __init__(self, args, saved_weights): self.args = args self.transforms_train = Compose([ Resize(256, 256), HorizontalFlip(0.5), VerticalFlip(0.5), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) self.transforms_validation = Compose([ Resize(256, 256), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) self.train_dataset = DataSet('train', self.args.path, self.transforms_train) self.validation_dataset = DataSet('validation', self.args.path, self.transforms_validation) self.best_loss = 0 self.model = Unet(3, 21) self.best_model_wts = copy.deepcopy(self.model.state_dict()) #### if saved_weights: self.model.load_state_dict( torch.load(self.args.path + '/Unet_results' + '/training_results.pt')) #### self.model.cuda() self.train_hist = { 'tot_loss_validation': [], 'tot_loss_train': [], 'bce_loss_validation': [], 'bce_loss_train': [], 'dice_loss_validation': [], 'dice_loss_train': [], 'per_epoch_time': [], 'total_time': [] }
def __init__(self, settype): self.settype = settype self.stddev, self.means = self.dataset_calculate_mean_std() if self.settype == 'train': print("Train set") self.albumentation_transform = Compose([ PadIfNeeded(min_height=40, min_width=40, border_mode=1, value=list(255 * self.means), p=1.0), # RandomBrightnessContrast(always_apply=False, p=0.5, brightness_limit=(-0.40, 0.82), contrast_limit=(-0.40, 0.82), brightness_by_max=True), RandomCrop(height=32, width=32, always_apply=True, p=1.0), HorizontalFlip(always_apply=True, p=1.0), Cutout(always_apply=True, p=1.0, num_holes=1, max_h_size=8, max_w_size=8, fill_value=list(255 * self.means)), # GaussNoise(always_apply=False, p=1.0, var_limit=(60, 100)), # CoarseDropout(max_holes=2, max_height=16, max_width=16, min_holes=1, min_height=8, min_width=8, fill_value=list(255 * self.means), always_apply=False, p=1.0), Normalize( mean=list(self.means), std=list(self.stddev), ), ToTensor() ]) elif self.settype == 'test': print("Test set") self.albumentation_transform = Compose([ Normalize( mean=list(self.means), std=list(self.stddev), ), ToTensor() ])
def get_train_transform(border_mode, size=320): return Compose([ # PadIfNeeded(256, 256, border_mode=cv2.BORDER_CONSTANT., value=0, p=1.), # ShiftScaleRotate( # shift_limit=0.0625, scale_limit=0.1, rotate_limit=30, # border_mode=cv2.BORDER_REFLECT_101, p=1.), # RandomScale(scale_limit=0.125), # HorizontalFlip(p=0.5), # RandomContrast(limit=0.2, p=0.5), # RandomGamma(gamma_limit=(80, 120), p=0.5), # RandomBrightness(limit=0.2, p=0.5), # Rotate(limit=20, border_mode=border_mode, p=1.), HorizontalFlip(p=0.5), OneOf([ RandomBrightness(0.1, p=1), RandomContrast(0.1, p=1), ], p=0.3), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.0, rotate_limit=15, p=0.3), IAAAdditiveGaussianNoise(p=0.3), RandomCropIfNeeded(size * 2, size * 2), Resize(size, size), # HueSaturationValue(hue_shift_limit=5, sat_shift_limit=20, # val_shift_limit=10, p=1.), # OneOf([ # OpticalDistortion(p=0.3), # GridDistortion(p=0.1), # IAAPiecewiseAffine(p=0.3), # ], p=0.2), # OneOf([ # IAAAdditiveGaussianNoise( # loc=0, scale=(1., 6.75), per_channel=False, p=0.3), # GaussNoise(var_limit=(5.0, 20.0), p=0.6), # ], p=0.5), # Cutout(num_holes=4, max_h_size=30, max_w_size=50, p=0.75), # JpegCompression(quality_lower=50, quality_upper=100, p=0.5) ])
def strong_aug(p=0.5): return Compose([ HorizontalFlip(), OneOf([ IAAAdditiveGaussianNoise(), GaussNoise(), ], p=0.2), OneOf([ MotionBlur(p=0.25), GaussianBlur(p=0.5), Blur(blur_limit=3, p=0.25), ], p=0.2), HueSaturationValue(p=0.2), OneOf([ RandomBrightness(), IAASharpen(), IAAEmboss(), RandomBrightnessContrast(), ], p=0.6), ToSepia(p=0.1) ], p=p)
def get_aug(p=1.0): return Compose([ HorizontalFlip(), VerticalFlip(), RandomRotate90(), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=15, p=0.9, border_mode=cv2.BORDER_REFLECT), OneOf([ OpticalDistortion(p=0.3), GridDistortion(p=.1), IAAPiecewiseAffine(p=0.3), ], p=0.3), OneOf([ HueSaturationValue(10, 15, 10), CLAHE(clip_limit=2), RandomBrightnessContrast(), ], p=0.3), ], p=p)
def transform_v1(config): train_transforms = Compose([ ImageCompression(quality_lower=60, quality_upper=100, p=0.5), GaussNoise(p=0.1), GaussianBlur(blur_limit=3, p=0.05), HorizontalFlip(), Resize(config.image_size, config.image_size), OneOf([RandomBrightnessContrast(), FancyPCA(), HueSaturationValue()], p=0.7), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=0.5), ToTensor() ]) test_transforms = Compose( [Resize(config.image_size, config.image_size), ToTensor()]) return train_transforms, test_transforms
def get_train_transforms(): return Compose( [ RandomResizedCrop(args.img_size, args.img_size), Transpose(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(p=0.5), HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5), RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0), CoarseDropout(p=0.5), # Cutout(p=0.5), ToTensorV2(p=1.0), ], p=1.)
def __init__(self, num_classes, input_size): super(TuSimpleDataTransform, self).__init__() random.seed(1000) height, width = input_size self._train_transform_list = self._train_transform_list + [ HorizontalFlip(p=0.5), GaussNoise(p=0.5), RandomBrightnessContrast(p=0.5), RandomShadow(p=0.5), RandomRain(rain_type="drizzle", p=0.5), ShiftScaleRotate(rotate_limit=10, p=0.5), RandomResizedCrop( height=height, width=width, scale=(0.8, 1), p=0.5), ] self._train_transform_list.append(Resize(height, width)) self._val_transform_list.append(Resize(height, width)) self._train_transform_list.append(ToTensor(num_classes=num_classes)) self._val_transform_list.append(ToTensor(num_classes=num_classes)) self._initialize_transform_dict()
def albumentations_transforms(p=1.0, is_train=False): # Mean and standard deviation of train dataset mean = np.array([0.4914, 0.4822, 0.4465]) std = np.array([0.2023, 0.1994, 0.2010]) transforms_list = [] # Use data aug only for train data if is_train: transforms_list.extend([ PadIfNeeded(min_height=40, min_width=40, border_mode=BORDER_CONSTANT, value=mean * 255.0, p=1.0), OneOf([ RandomCrop(height=32, width=32, p=0.8), ], p=1.0), HorizontalFlip(p=0.5), ]) transforms_list.extend([ Normalize(mean=mean, std=std, max_pixel_value=255.0, p=1.0), ToTensor() ]) transforms = Compose(transforms_list, p=p) return lambda img: transforms(image=np.array(img))["image"]
def get_transforms(phase, mean, std): list_transforms = [] if phase == "train": list_transforms.extend([ # PadIfNeeded(min_height=256, min_width=256), # RandomCrop(height=256, width=256, p=1), # RandomCrop(height=224, width=224, p=1), HorizontalFlip(p=0.5), # only horizontal flip as of now Flip(p=0.5), # RandomRotate90(p=0.5), # PadIfNeeded(min_height=256, min_width=256) ]) else: list_transforms.extend([ PadIfNeeded(min_height=256, min_width=256), ]) list_transforms.extend([ Normalize(mean=mean, std=std, p=1), ToTensor(), ]) list_trfms = Compose(list_transforms) return list_trfms
def read_train_img(images_paths): images = [] gts = [] for image_path in images_paths: gt_path = image_path.replace('images', 'gt') image = tifffile.imread(image_path) gt = tifffile.imread(gt_path) # 数据扩充 h, w = image.shape[0], image.shape[1] aug = Compose([ VerticalFlip(p=0.5), RandomRotate90(p=0.5), HorizontalFlip(p=0.5), RandomSizedCrop(min_max_height=(128, 512), height=h, width=w, p=0.5) ]) augmented = aug(image=image, mask=gt) image = augmented['image'] gt = augmented['mask'] # 数据预处理 image = image / 255.0 gt_temp = gt.copy() gt[gt_temp == 255] = 1 gt = np.expand_dims(gt, axis=2) # gt = np_utils.to_categorical(gt, num_classes=1) images.append(image) gts.append(gt) return np.array(images), np.array(gts)
def create_train_transforms(size): return Compose([ # ImageCompression(quality_lower=60, quality_upper=100, p=0.5), GaussNoise(p=0.1), GaussianBlur(blur_limit=3, p=0.05), HorizontalFlip(), RandomRotate90(), Resize(height=size[0], width=size[1]), PadIfNeeded(min_height=size[0], min_width=size[1], border_mode=cv2.BORDER_CONSTANT), OneOf([RandomBrightnessContrast(), FancyPCA(), HueSaturationValue()], p=0.7), ToGray(p=0.1), ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=0.5), Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), ToTensorV2() ])
def __init__(self): self.transformer = Compose([ HorizontalFlip(p=0.5), CLAHE(clip_limit=(1, 8), tile_grid_size=(10, 10), p=0.3), OneOf([ GridDistortion(num_steps=5, distort_limit=(-0.46, 0.40)), ElasticTransform(alpha=1.68, sigma=48.32, alpha_affine=44.97), ], p=0.3), RandomResizedCrop(p=0.3, height=512, width=512, scale=(0.08, 1.0), ratio=(0.75, 1.33)), ShiftScaleRotate(p=0.3, shift_limit=(-0.06, 0.06), scale_limit=(-0.10, 0.10), rotate_limit=(-20, 20)), Normalize(mean=[0.46009655, 0.43957878, 0.41827092], std=[0.2108204, 0.20766491, 0.21656131], max_pixel_value=255.0, p=1.0), ToTensorV2(), ])
def vanilla_transform(p): return Compose([ HorizontalFlip(p=0.5), VerticalFlip(p=0.5), ShiftScaleRotate(rotate_limit=30, scale_limit=0.15, border_mode=cv2.BORDER_CONSTANT, value=[0, 0, 0], p=0.5), IAAAdditiveGaussianNoise(p=0.2), IAAPerspective(p=0.5), OneOf( [ CLAHE(p=1), RandomBrightness(p=1), RandomGamma(p=1), ], p=0.5, ), OneOf( [ IAASharpen(p=1), Blur(blur_limit=3, p=1), MotionBlur(blur_limit=3, p=1), ], p=0.5, ), OneOf( [ RandomContrast(p=1), HueSaturationValue(p=1), ], p=0.5, ), ], p=p)
def build_transforms(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), divide_by=255.0, scale_range=0.1, brightness_range=0.1): from src import torch_custom # CSAIL ResNet # norm = Normalize(mean=(102.9801, 115.9465, 122.7717), std=(1., 1., 1.), max_pixel_value=1, p=1.0) norm = Normalize(mean=mean, std=std, max_pixel_value=divide_by) train_transform = Compose([ torch_custom.RandomCropThenScaleToOriginalSize(limit=scale_range, p=1.0), RandomBrightness(limit=brightness_range, p=0.5), HorizontalFlip(p=0.5), norm, ]) eval_transform = Compose([ norm, ]) return train_transform, eval_transform
def strong_aug(p=0.5): return Compose( [ HorizontalFlip(p=0.5), RandomRotate90(p=0.4), Transpose(p=0.4), ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2), # OneOf([ # ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03), # GridDistortion(), # OpticalDistortion(distort_limit=2, shift_limit=0.3) # ], p=0.2), OneOf( [ RandomContrast(), RandomGamma(), RandomBrightness() # RandomBrightnessContrast(), ], p=0.3, ), ], p=p, )
def __init__(self, root, df, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), TTA=False): self.root = root df['imageid'] = df['ID'].apply( lambda x: x.split('_')[0] + '_' + x.split('_')[1] + '.png') self.fnames = df['imageid'].unique().tolist() self.num_samples = len(self.fnames) if TTA == True: self.transform = Compose([ HorizontalFlip(), albu.Resize(512, 512), Normalize(mean=mean, std=std, p=1), ToTensor(), ]) else: self.transform = Compose([ albu.Resize(512, 512), Normalize(mean=mean, std=std, p=1), ToTensor(), ])