def transform_v0(config): """ https://www.kaggle.com/nroman/melanoma-pytorch-starter-efficientnet/data?scriptVersionId=35726268 Args: config: CFG Returns: train_tranforms, test_transforms """ train_transforms = transforms.Compose([ transforms.RandomResizedCrop(size=224, scale=(0.7, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ColorJitter(brightness=32. / 255., saturation=0.5), transforms.Cutout(scale=(0.05, 0.007), value=(0, 0)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) test_transforms = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) return train_transforms, test_transforms
def __init__(self, image_size): self.data_transform = { 'train_transform':transforms.Compose([ transforms.RandomResizedCrop(size=image_size, scale=(0.7, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), #albumentatinoのNormalizeと同値 transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ]), 'test_transform': transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ])}
def get_transforms(): train_transform = transforms.Compose( [ AdvancedHairAugmentation(hairs_folder=f"{HAIRS}"), transforms.RandomResizedCrop(size=SIZE, scale=(0.8, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), Microscope(p=0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) test_transform = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) return train_transform, test_transform
from siim_isic_melanoma_classification.config import get_config from siim_isic_melanoma_classification.net import EfficientNetB5MLP # %% config = get_config() # %% util.initialize(config) if util.is_kaggle(): import kaggle_timm_pretrained kaggle_timm_pretrained.patch() # %% train_transform = transforms.Compose([ transforms.RandomResizedCrop(size=config.image_size, scale=(0.8, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), my_transforms.Microscope(p=0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) # %% all_source, _ = io.load_my_isic2020_csv(size=config.image_size, is_sanity_check=config.sanity_check) # %% fold_index = int(os.environ["KAGGLE_TRAIN_FOLD_INDEX"]) n_fold = int(os.environ["KAGGLE_N_FOLD"])
import torchtoolbox.transform as transforms from augmentation import * """ データセットに使うtransformer """ train_transform = transforms.Compose([ AdvancedHairAugmentation(hairs_folder = '/kaggle/input/melanoma-hairs'), transforms.RandomResizedCrop(size = 256, scale = (0.8, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), Microscope(p = 0.5), transforms.ToTensor(), transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]) ]) test_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225]) ])
def __init__(self, df: pd.DataFrame,config, imfolder: str, split = 'train', meta_features = None): """ Class initialization Args: df (pd.DataFrame): DataFrame with data description imfolder (str): folder with images split : train ,val,test transforms: image transformation method to be applied meta_features (list): list of features with meta information, such as sex and age """ self.df = df self.imfolder = imfolder self.split = split self.meta_features = meta_features self.input_size = config['input_size'] self.same_sized_crop = config['same_sized_crop'] self.hair_aug = config['hair_aug'] self.microscope_aug = config['microscope_aug'] self.config = config if split == 'train' or split == 'test': all_transforms = [] if self.hair_aug : all_transforms.append(AdvancedHairAugmentation(hairs_folder='melanoma_hair/')) if self.same_sized_crop: all_transforms.append(transforms.RandomCrop(self.input_size)) else: all_transforms.append(transforms.RandomResizedCrop(self.input_size,scale=(config.get('scale_min',0.08),1.0))) all_transforms.append(transforms.RandomHorizontalFlip()) all_transforms.append(transforms.RandomVerticalFlip()) #if config.get('full_rot',0) > 0: # if config.get('scale',False): # all_transforms.append(transforms.RandomChoice([transforms.RandomAffine(config['full_rot'], scale=config['scale'], shear=config.get('shear',0), resample=Image.NEAREST), # transforms.RandomAffine(config['full_rot'],scale=config['scale'],shear=config.get('shear',0), resample=Image.BICUBIC), # transforms.RandomAffine(config['full_rot'],scale=config['scale'],shear=config.get('shear',0), resample=Image.BILINEAR)])) # else: # all_transforms.append(transforms.RandomChoice([transforms.RandomRotation(config['full_rot'], resample=Image.NEAREST), # transforms.RandomRotation(config['full_rot'], resample=Image.BICUBIC), # transforms.RandomRotation(config['full_rot'], resample=Image.BILINEAR)])) if config.get('full_rot',0) > 0: if config.get('scale',False): all_transforms.append(transforms.RandomAffine(config['full_rot'], scale=config['scale'], shear=config.get('shear',0))) # transforms.RandomAffine(config['full_rot'],scale=config['scale'],shear=config.get('shear',0), resample=Image.BICUBIC), # transforms.RandomAffine(config['full_rot'],scale=config['scale'],shear=config.get('shear',0), resample=Image.BILINEAR)])) else: all_transforms.append(transforms.RandomRotation(config['full_rot'])) # transforms.RandomRotation(config['full_rot'], resample=Image.BICUBIC), # transforms.RandomRotation(config['full_rot'], resample=Image.BILINEAR)])) all_transforms.append(transforms.ColorJitter(brightness=32. / 255.,saturation=0.5)) if self.microscope_aug: all_transforms.append(Microscope(p=0.6)) if config['cutout']: all_transforms.append(Cutout_v0(n_holes=1,length=config['cutout_length'])) #all_transforms.append(transforms.Cutout(scale=(0.05, 0.007), value=(0, 0))) all_transforms.append(transforms.ToTensor()) all_transforms.append(transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])) self.composed = transforms.Compose(all_transforms) else: self.composed = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ])