Example #1
0
def transform_v0(config):
    """ https://www.kaggle.com/nroman/melanoma-pytorch-starter-efficientnet/data?scriptVersionId=35726268

    Args:
        config: CFG

    Returns: train_tranforms, test_transforms
    """
    train_transforms = transforms.Compose([
        transforms.RandomResizedCrop(size=224, scale=(0.7, 1.0)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ColorJitter(brightness=32. / 255., saturation=0.5),
        transforms.Cutout(scale=(0.05, 0.007), value=(0, 0)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    test_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    return train_transforms, test_transforms
Example #2
0
 def __init__(self, image_size):
     self.data_transform = {
 'train_transform':transforms.Compose([
 transforms.RandomResizedCrop(size=image_size, scale=(0.7, 1.0)),
 transforms.RandomHorizontalFlip(),
 transforms.RandomVerticalFlip(),
 transforms.ToTensor(),
 #albumentatinoのNormalizeと同値
 transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
 ]),
 'test_transform': transforms.Compose([
 transforms.ToTensor(),
 transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
 ])}
Example #3
0
def get_transforms():
    train_transform = transforms.Compose(
        [
            AdvancedHairAugmentation(hairs_folder=f"{HAIRS}"),
            transforms.RandomResizedCrop(size=SIZE, scale=(0.8, 1.0)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            Microscope(p=0.5),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ]
    )
    test_transform = transforms.Compose(
        [
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ]
    )

    return train_transform, test_transform
config = get_config()

# %%
util.initialize(config)
if util.is_kaggle():
    import kaggle_timm_pretrained

    kaggle_timm_pretrained.patch()

# %%
train_transform = transforms.Compose([
    transforms.RandomResizedCrop(size=config.image_size, scale=(0.8, 1.0)),
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    my_transforms.Microscope(p=0.5),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])

# %%
all_source, _ = io.load_my_isic2020_csv(size=config.image_size,
                                        is_sanity_check=config.sanity_check)

# %%
fold_index = int(os.environ["KAGGLE_TRAIN_FOLD_INDEX"])
n_fold = int(os.environ["KAGGLE_N_FOLD"])
experiment_name = os.environ.get("KAGGLE_EXPERIMENT_NAME")
task.train_nth_fold(EfficientNetB5MLP,
                    config,
                    all_source,
Example #5
0
def main():
    parser = argparse.ArgumentParser()
    # path
    parser.add_argument('--root-path', default=CFG.root_path, help="root path")
    parser.add_argument('--save-path', default=CFG.save_path, help="save path")
    parser.add_argument('--sub-name',
                        default=CFG.sub_name,
                        help="submission name")

    # learning
    parser.add_argument('--batch-size',
                        default=CFG.batch_size,
                        type=int,
                        help=f"batch size({CFG.batch_size})")
    parser.add_argument("--workers",
                        default=CFG.workers,
                        type=int,
                        help=f"number of workers({CFG.workers})")
    parser.add_argument("--seed",
                        default=CFG.seed,
                        type=int,
                        help=f"seed({CFG.seed})")

    # version
    parser.add_argument('--version', type=int)
    parser.add_argument('--exp-id', type=int)

    # etc
    parser.add_argument('--tta', action='store_true', default=False)

    args = parser.parse_args()

    CFG.root_path = args.root_path
    CFG.save_path = args.save_path
    CFG.sub_name = args.sub_name

    CFG.batch_size = args.batch_size
    CFG.workers = args.workers
    CFG.seed = args.seed

    CFG.model_path = f"./model/v{args.version}/exp_{args.exp_id}/"
    CFG.log_path = f"./log/v{args.version}/exp_{args.exp_id}/"

    CFG.tta = args.tta

    # get device
    CFG.device = get_device()

    # load train environment
    env = json.load(open(os.path.join(CFG.log_path, 'CFG.json'), 'r'))
    for k, v in env.items():
        setattr(CFG, k, v)

    loss, metric = 0, 0
    for fold in range(CFG.n_folds):
        fn = os.path.join(CFG.log_path, f"log.fold_{fold}.csv")
        score = pd.read_csv(fn).sort_values("val_loss", ascending=True).iloc[0]
        loss += score['val_loss'] / CFG.n_folds
        metric += score['val_metric'] / CFG.n_folds

    CFG.sub_name = f"submission." \
                   f"ver_{args.version}." \
                   f"exp_{args.exp_id}." \
                   f"loss_{loss:.4f}." \
                   f"metric_{metric:.4f}.csv"

    if CFG.tta:
        CFG.sub_name = "tta." + CFG.sub_name

    pprint({k: v for k, v in dict(CFG.__dict__).items() if '__' not in k})
    print()

    ### seed all
    seed_everything(CFG.seed)

    ### Data related logic
    # load data
    print("Load Raw Data")
    _, test_df = load_data(CFG)
    print()

    # preprocess data
    print("Preprocess Data")
    test_df = preprocess_data(CFG, test_df)
    print()

    # get transform
    print("Get Transform")
    _, test_transforms = get_transform(CFG)
    print()

    # dataset
    tst_data = MelanomaDataset(CFG, test_df, test_transforms)

    # if tta
    tta_transforms = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    tta_data = MelanomaDataset(CFG, test_df, tta_transforms)

    final_preds = np.zeros(test_df.shape[0])

    # folds
    for fold in range(CFG.n_folds):
        print(f"========== Fold: {fold} ==========")
        # load learner
        print("Load Model")
        model_name = f'model.fold_{fold}.best.pt'
        learner = Learner(CFG)
        learner.load(os.path.join(CFG.model_path, model_name),
                     f"model_state_dict")

        # prediction
        if not CFG.tta:
            test_preds = torch.sigmoid(
                learner.predict(tst_data).view(-1)).numpy()

        else:
            test_preds = np.zeros(test_df.shape[0])
            for _ in range(4):
                test_preds += torch.sigmoid(
                    learner.predict(tta_data).view(-1)).numpy() / 4

        final_preds += test_preds / CFG.n_folds
        print()

    ss_df = pd.read_csv(
        os.path.join(CFG.root_path, "melanoma-external-malignant-256",
                     "sample_submission.csv"))
    test_df['target'] = final_preds
    test_df.set_index("image_name", inplace=True)
    ss_df = test_df.loc[ss_df['image_name']].reset_index()[[
        'image_name', 'target'
    ]]
    ss_df.to_csv(os.path.join(CFG.save_path, f"{CFG.sub_name}"), index=False)

    print(ss_df.head())
Example #6
0
    def __init__(self, df: pd.DataFrame,config, imfolder: str, split = 'train', meta_features = None):
        """
        Class initialization
        Args:
            df (pd.DataFrame): DataFrame with data description
            imfolder (str): folder with images
            split : train ,val,test
            transforms: image transformation method to be applied
            meta_features (list): list of features with meta information, such as sex and age
            
        """
        self.df = df
        self.imfolder = imfolder
        self.split = split
        self.meta_features = meta_features
        self.input_size = config['input_size']
        self.same_sized_crop = config['same_sized_crop']
        self.hair_aug = config['hair_aug']
        self.microscope_aug = config['microscope_aug']
        self.config = config
        if split == 'train' or split == 'test':
            all_transforms = []
            if self.hair_aug :
                all_transforms.append(AdvancedHairAugmentation(hairs_folder='melanoma_hair/'))
            if self.same_sized_crop:
                all_transforms.append(transforms.RandomCrop(self.input_size))
            else:
                all_transforms.append(transforms.RandomResizedCrop(self.input_size,scale=(config.get('scale_min',0.08),1.0)))

             
            all_transforms.append(transforms.RandomHorizontalFlip())
            all_transforms.append(transforms.RandomVerticalFlip())   

            #if config.get('full_rot',0) > 0:
            #    if config.get('scale',False):
            #        all_transforms.append(transforms.RandomChoice([transforms.RandomAffine(config['full_rot'], scale=config['scale'], shear=config.get('shear',0), resample=Image.NEAREST),
            #                                                    transforms.RandomAffine(config['full_rot'],scale=config['scale'],shear=config.get('shear',0), resample=Image.BICUBIC),
            #                                                    transforms.RandomAffine(config['full_rot'],scale=config['scale'],shear=config.get('shear',0), resample=Image.BILINEAR)])) 
            #    else:
            #        all_transforms.append(transforms.RandomChoice([transforms.RandomRotation(config['full_rot'], resample=Image.NEAREST),
            #                                                    transforms.RandomRotation(config['full_rot'], resample=Image.BICUBIC),
            #                                                    transforms.RandomRotation(config['full_rot'], resample=Image.BILINEAR)]))

            if config.get('full_rot',0) > 0:
                if config.get('scale',False):
                    all_transforms.append(transforms.RandomAffine(config['full_rot'], scale=config['scale'], shear=config.get('shear',0)))
            #                                                    transforms.RandomAffine(config['full_rot'],scale=config['scale'],shear=config.get('shear',0), resample=Image.BICUBIC),
            #                                                    transforms.RandomAffine(config['full_rot'],scale=config['scale'],shear=config.get('shear',0), resample=Image.BILINEAR)])) 
                else:
                    all_transforms.append(transforms.RandomRotation(config['full_rot']))
            #                                                    transforms.RandomRotation(config['full_rot'], resample=Image.BICUBIC),
            #                                                    transforms.RandomRotation(config['full_rot'], resample=Image.BILINEAR)]))


            all_transforms.append(transforms.ColorJitter(brightness=32. / 255.,saturation=0.5))   

            if self.microscope_aug:
                all_transforms.append(Microscope(p=0.6))
            if config['cutout']:
                all_transforms.append(Cutout_v0(n_holes=1,length=config['cutout_length']))                
                #all_transforms.append(transforms.Cutout(scale=(0.05, 0.007), value=(0, 0)))       

            all_transforms.append(transforms.ToTensor())
            all_transforms.append(transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]))   
            self.composed = transforms.Compose(all_transforms)                  

        else:
             self.composed = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
                ])
Example #7
0
        self.module = module


model = UNet()
# model = ResConnNoise()
# model = nn.DataParallel(model)
model = wapper(model)
model.load_state_dict(
    torch.load('../param/5_gaussian.pt', map_location=torch.device('cpu')))
model = model.module.cpu()

pre_transform = RandomCrop(256, pad_if_needed=True)
source_transform = transform.Compose([
    RandomGaussianNoise(p=0.95, mean=0, std=25, fixed_distribution=False),
    # RandomTextOverlay(p=1, max_occupancy=30, length=(15, 30)),
    transform.ToTensor(),
])

test_transform = transform.ToTensor()
dt = PairDataset('/media/piston/data/Noise2Noise/test',
                 pre_transform=pre_transform,
                 source_transform=source_transform,
                 target_transform=test_transform)


def get_psnr(input, target):
    """Computes peak signal-to-noise ratio."""

    return 10 * torch.log10(1 / F.mse_loss(input, target))