Beispiel #1
0
    def test_transforms(self):
        transforms_list = []
        if (self.normalize):
            transforms_list.append(A.Normalize(self.mean, self.stdev))

        transforms_list.append(AP.ToTensor())
        self.transforms = A.Compose(transforms_list)
    def __init__(self, path: str = './data', folder: str = 'dataset_crf/lab', special_folder: str = '',
                 df: object = None, datatype: object = 'train', dataset='crf',
                 img_ids: object = None,
                 transforms: object = albu.Compose([albu.HorizontalFlip(), AT.ToTensor()]),
                 preprocessing: object = None,
                 use_mask: bool = False, use_corrected: bool = False, log_transform=False, load_any_mask=True) -> object:
        self.df = df
        self.datatype = datatype
        self.img_ids = img_ids
        self.transforms = transforms
        self.preprocessing = preprocessing
        self.use_mask = use_mask
        self.path = path
        self.folder = folder
        self.log_transform = log_transform
        self.use_corrected = use_corrected
        self.dataset = dataset
        self.load_any_mask = load_any_mask

        if self.use_corrected:
            self.image_names = os.listdir(f"{path}/{folder}/img_corrected_1/{special_folder}")
        else:
            if self.dataset != 'crf':
                self.image_names = os.listdir(f"{path}/{folder}/images/{special_folder}")
            else:
                self.image_names = os.listdir(f"{path}/{folder}/srgb8bit/{special_folder}")

        self.path = path
        self.folder = folder
Beispiel #3
0
 def __init__(
         self,
         images_path:
     str = 'C:\\Users\\Donik\\Dropbox\\Donik\\fax\\10_semestar\\Diplomski\\CubeDataset\\data\\Cube+\\',
         gt_path:
     str = 'C:\\Users\\Donik\\Dropbox\\Donik\\fax\\10_semestar\\Diplomski\\CubeDataset\\data\\Cube+\\cube+_gt.txt',
         df: object = None,
         datatype: object = 'train',
         transforms: object = albu.Compose(
             [albu.HorizontalFlip(), AT.ToTensor()]),
         preprocessing: object = None,
         log_transform=False,
         illumination_known=False,
         patch_width_ratio: float = 1. / 10,
         patch_height_ratio: float = 1. / 10) -> object:
     super().__init__(images_path,
                      gt_path,
                      df,
                      datatype,
                      transforms,
                      preprocessing,
                      log_transform,
                      illumination_known=illumination_known)
     self.patch_width_ratio = patch_width_ratio
     self.patch_height_ratio = patch_height_ratio
Beispiel #4
0
 def __init__(
         self,
         images_path:
     str = 'C:\\Users\\Donik\\Dropbox\\Donik\\fax\\10_semestar\\Diplomski\\CubeDataset\\data\\Cube+\\',
         gt_path:
     str = 'C:\\Users\\Donik\\Dropbox\\Donik\\fax\\10_semestar\\Diplomski\\CubeDataset\\data\\Cube+\\cube+_gt.txt',
         df: object = None,
         datatype: object = 'train',
         transforms: object = albu.Compose(
             [albu.HorizontalFlip(), AT.ToTensor()]),
         preprocessing: object = None,
         log_transform=False,
         illumination_known=False) -> object:
     self.df = df
     self.datatype = datatype
     self.transforms = transforms
     self.preprocessing = preprocessing
     self.images_path = images_path
     self.gt_path = gt_path
     self.log_transform = log_transform
     self.illumination_known = illumination_known
     if datatype == 'valid':
         self.image_names = list(range(1650, 1708))
     else:
         self.image_names = list(range(1, 1650))
     self.gts = np.loadtxt(gt_path)
 def __init__(self,
              data_folder: str,
              df: pd.DataFrame,
              im_ids: np.array,
              masks_folder: str = None,
              transforms=albu.Compose(
                  [albu.HorizontalFlip(),
                   AT.ToTensor()]),
              preprocessing=None,
              mask_shape=(320, 640)):
     """
     Attributes
         data_folder (str): path to the image directory
         df (pd.DataFrame): dataframe with the labels
         im_ids (np.ndarray): of image names.
         masks_folder (str): path to the masks directory
             assumes `use_resized_dataset == True`
         transforms (albumentations.augmentation): transforms to apply
             before preprocessing. Defaults to HFlip and ToTensor
         preprocessing: ops to perform after transforms, such as
             z-score standardization. Defaults to None.
         mask_shape (tuple): <- mask shape (numpy format, not cv2)
     """
     self.df = df
     self.data_folder = data_folder
     self.masks_folder = masks_folder
     if isinstance(masks_folder, str):
         self.use_resized_dataset = True
         print(f"Using resized masks in {masks_folder}...")
     else:
         self.use_resized_dataset = False
     self.img_ids = im_ids
     self.transforms = transforms
     self.preprocessing = preprocessing
     self.mask_shape = mask_shape
 def __init__(self,
              data_folder: str,
              df: pd.DataFrame,
              im_ids: np.array,
              masks_folder: str = None,
              transforms=albu.Compose(
                  [albu.HorizontalFlip(),
                   AT.ToTensor()]),
              preprocessing=None,
              mask_shape=(320, 640)):
     """
     Attributes
         data_folder (str): path to the image directory
         df (pd.DataFrame): dataframe with the labels
         im_ids (np.ndarray): of image names.
         masks_folder (str): path to the masks directory
             assumes `use_resized_dataset == True`
         transforms (albumentations.augmentation): transforms to apply
             before preprocessing. Defaults to HFlip and ToTensor
         preprocessing: ops to perform after transforms, such as
             z-score standardization. Defaults to None.
         mask_shape (tuple): <- mask shape (numpy format, not cv2)
     """
     df["hasMask"] = ~df["EncodedPixels"].isna()
     super().__init__(data_folder=data_folder,
                      df=df,
                      im_ids=im_ids,
                      masks_folder=masks_folder,
                      transforms=transforms,
                      preprocessing=preprocessing,
                      mask_shape=mask_shape)
Beispiel #7
0
def data_loader_mask():
    """
    Converting the images for PILImage to tensor,
    so they can be accepted as the input to the network
    :return :
    """
    print("Loading Dataset")

    default_transform = albu.Compose(
        [PadDifferentlyIfNeeded(512, 512, mask_value=0),
         AT.ToTensor()])

    transform = albu.Compose([
        albu.RandomRotate90(1.0),
        albu.HorizontalFlip(0.5),
        PadDifferentlyIfNeeded(512, 512, mask_value=0),
        AT.ToTensor()
    ])

    testset_gt = ImageDataset(root=TEST_ENHANCED_IMG_DIR,
                              transform=default_transform)

    trainset_2_gt = ImageDataset(root=ENHANCED2_IMG_DIR, transform=transform)

    testset_inp = ImageDataset(root=TEST_INPUT_IMG_DIR,
                               transform=default_transform)
    trainset_1_inp = ImageDataset(root=INPUT_IMG_DIR, transform=transform)

    train_loader_cross = torch.utils.data.DataLoader(
        ConcatDataset(trainset_1_inp, trainset_2_gt),
        num_workers=NUM_WORKERS,
        batch_size=BATCH_SIZE *
        GPUS_NUM,  # Enlarge batch_size by a factor of len(device_ids)
        shuffle=True,
    )

    test_loader = torch.utils.data.DataLoader(
        ConcatDataset(testset_inp, testset_gt),
        num_workers=NUM_WORKERS,
        batch_size=BATCH_SIZE *
        GPUS_NUM,  # Enlarge batch_size by a factor of len(device_ids)
        shuffle=False)
    print("Finished loading dataset")

    return train_loader_cross, test_loader
def augmentation(image_size, train=True):
    max_crop = image_size // 5
    if train:
        data_transform = A.Compose([
            A.Resize(image_size, image_size),
            A.Compose([
                A.OneOf([
                    A.RandomRain(p=0.1),
                    A.GaussNoise(mean=15),
                    A.GaussianBlur(blur_limit=10, p=0.4),
                    A.MotionBlur(p=0.2)
                ]),
                A.OneOf([
                    A.RGBShift(p=1.0,
                               r_shift_limit=(-10, 10),
                               g_shift_limit=(-10, 10),
                               b_shift_limit=(-10, 10)),
                    A.RandomBrightnessContrast(
                        brightness_limit=0.3, contrast_limit=0.1, p=1),
                    A.HueSaturationValue(hue_shift_limit=20, p=1),
                ],
                        p=0.6),
                A.OneOf([
                    A.CLAHE(clip_limit=2),
                    A.IAASharpen(),
                    A.IAAEmboss(),
                ]),
                A.OneOf([A.IAAPerspective(p=0.3),
                         A.ElasticTransform(p=0.1)]),
                A.OneOf([
                    A.Rotate(limit=25, p=0.6),
                    A.IAAAffine(
                        scale=0.9,
                        translate_px=15,
                        rotate=25,
                        shear=0.2,
                    )
                ],
                        p=1),
                A.Cutout(num_holes=1,
                         max_h_size=max_crop,
                         max_w_size=max_crop,
                         p=0.2)
            ],
                      p=1),
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            AT.ToTensor()
        ])
    else:
        data_transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((image_size, image_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    return data_transform
Beispiel #9
0
def get_valid_augmentation():
    # for valid
    valid_transform = [
        albu.Resize(height=configs.input_size, width=configs.input_size),
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        AT.ToTensor(),
    ]
    return albu.Compose(valid_transform)
Beispiel #10
0
def main(model_state_path):
    # parameters setting
    params = {'batch_size': 16, 'shuffle': False, 'num_workers': 1}
    class_num = 2

    # tensorboard setting
    writer = SummaryWriter('runs/pcam_experiment_1')

    # device setting
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # print(torch.cuda.current_device())
    print(device)

    # define transform
    # transform = transforms.Compose([
    #     transforms.ToPILImage(),
    #     transforms.Resize(224),
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean=[0.485, 0.456, 0.406],
    #                          std=[0.229, 0.224, 0.225]),
    # ])
    transform = albumentations.Compose([
        albumentations.Resize(224, 224),
        albumentations.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        AT.ToTensor()
    ])

    # load test data
    testset = Pcam('data/camelyonpatch_level_2_split_test_x.h5', 'data/camelyonpatch_level_2_split_test_y.h5', transform)
    testloader = data.DataLoader(testset, **params)

    # load model
    model = models.resnet50(pretrained=False)
    fc_in_feature = model.fc.in_features
    model.fc = nn.Sequential(
        nn.Dropout(p=0.6),
        nn.Linear(fc_in_feature, 512, bias=True),
        nn.SELU(),
        nn.Dropout(p=0.8),
        nn.Linear(512, class_num, bias=True),
    )
    model.load_state_dict(torch.load(model_state_path))
    model.to(device)

    # define loss function
    criterion = nn.CrossEntropyLoss()

    # clock setting
    start = time.time()

    # test
    test(model, testloader, criterion, device, writer, 0, False)
    # testset.close()

    print('Total Time: {}'.format(time.time()-start))
Beispiel #11
0
 def __init__(self, df: pd.DataFrame = None, datatype: str = 'train', img_ids: np.array = None,
              transforms=albu.Compose([albu.HorizontalFlip(), AT.ToTensor()]),
              preprocessing=None, path=None):
     self.df = df
     if datatype != 'test':
         self.data_folder = f"{path}/train_images"
     else:
         self.data_folder = f"{path}/test_images"
     self.img_ids = img_ids
     self.transforms = transforms
     self.preprocessing = preprocessing
Beispiel #12
0
    def __init__(self, split):
        self.split = split
        self.aug = albumentations.Compose([
            albumentations.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            AT.ToTensor()
        ])

        if self.split == 'train':
            self.aug = albumentations.Compose([
                albumentations.HorizontalFlip(),
                albumentations.RandomBrightness(),
                albumentations.ShiftScaleRotate(rotate_limit=15,
                                                scale_limit=0.10),
                albumentations.HueSaturationValue(),
                albumentations.Cutout(1, 4, 4, 0.5),
                albumentations.GaussNoise(),
                albumentations.ElasticTransform(),
                albumentations.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                AT.ToTensor()
            ])
Beispiel #13
0
 def __init__(self, path: str = './data', folder: str = 'dataset_crf/lab', special_folder: str = '',
              df: object = None, datatype: object = 'train', dataset='crf',
              img_ids: object = None,
              transforms: object = albu.Compose([albu.HorizontalFlip(), AT.ToTensor()]),
              preprocessing: object = None,
              use_mask: bool = False, use_corrected: bool = False,
              patch_width_ratio: float = 1. / 10, patch_height_ratio: float = 1. / 10,
              log_transform=False) -> object:
     super(MIPatchedDataset, self).__init__(path, folder, special_folder, df, datatype, dataset, img_ids, transforms,
                                            preprocessing, use_mask, use_corrected, log_transform)
     self.patch_width_ratio = patch_width_ratio
     self.patch_height_ratio = patch_height_ratio
Beispiel #14
0
def get_training_augmentation():
    # for train
    train_transform = [
        albu.Resize(height=configs.input_size, width=configs.input_size),
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        AT.ToTensor(),
    ]
    return albu.Compose(train_transform)
Beispiel #15
0
 def __init__(self, files, labels, normalization, size, augmentation=None):
     if isinstance(size, list):
         size = tuple(size)
     self.size = size
     self.files = files
     self.labels = labels
     pipeline = []
     if augmentation is not None:
         pipeline.append(augmentation)
     pipeline.append(albumentations.Resize(*size))
     pipeline.append(albumtorch.ToTensor())
     self.pipeline = albumentations.Compose(pipeline)
     self.normalization = normalization
Beispiel #16
0
    def train_transforms(self, before_norm=None, after_norm=None):
        transforms_list = []
        if before_norm:
            transforms_list = before_norm

        if (self.normalize):
            transforms_list.append(A.Normalize(self.mean, self.stdev))

        if after_norm:
            transforms_list.extend(after_norm)

        transforms_list.append(AP.ToTensor())

        self.transforms = A.Compose(transforms_list)
Beispiel #17
0
    def __init__(self,
                 transforms=albu.Compose(
                     [albu.HorizontalFlip(),
                      AT.ToTensor()]),
                 preprocessing=None,
                 k_folds=1):

        patch_loader = valid_PatchLoader(n_kfold=k_folds, seed=42)
        df = patch_loader.get_all_patches()

        self.len = df.shape[0]
        self.slide_path = df['slide_path']
        self.mask_path = df['mask_path']
        self.transforms = transforms
        self.preprocessing = preprocessing
Beispiel #18
0
    def __init__(self, model_name="mobilenet_v2", device="cpu"):
        """
        MRI Pulse Sequence classification object.

        The implementation is built from *torchvision*.

        For using pretrained weights, initialize the classifier object by using:
        `PulseSequenceClassifier().from_pretrained()`

        The classifier supports both instance level (with `predict_instance` method)
        and study level (with `predict_study` method).

        :param model_name: str, default="mobilenet_v2"

            Load model structure from `torchvision`, default="mobilenet_v2".
            Currently supported models are: "mobilenet_v2".
        :param device: str, default="cpu"
            Move model and data to a specific device, default="cpu".
            Change `device` to `cuda` in case of GPU inference.
        """
        self.device = torch.device(device)
        self.model_name = model_name

        supported_models = list(model_urls.keys())
        if model_name not in supported_models:
            raise NotImplementedError('Currently supported models are', list(model_urls.keys()))

        self.label_dict = dict([
            (0, "FLAIR"),
            (1, "T1C"),
            (2, "T2"),
            (3, "ADC"),
            (4, "DWI"),
            (5, "TOF"),
            (6, "OTHER"),
        ])

        self.model = create_model(self.model_name, pretrained=False, n_classes=len(self.label_dict.keys()))
        self.model = self.model.to(self.device)
        # self.model = nn.DataParallel(self.model)
        self.model = WrappedModel(self.model)

        self.transform = A.Compose([
            A.Resize(256, 256),
            A.Normalize(),
            AT.ToTensor()
        ])
Beispiel #19
0
 def __init__(self,
              df,
              datatype,
              img_ids,
              transforms=albu.Compose(
                  [albu.HorizontalFlip(),
                   AT.ToTensor()]),
              preprocessing=None,
              folder='../input/'):
     self.df = df
     if datatype != 'test':
         self.data_folder = f"{folder}train_images"
     else:
         self.data_folder = f"{folder}test_images"
     self.img_ids = img_ids
     self.transforms = transforms
     self.preprocessing = preprocessing
Beispiel #20
0
def predict(image_path):
    word_to_idx = pickle.load(open('model/word_to_idx.pickle', 'rb'))
    idx_to_word = pickle.load(open('model/idx_to_word.pickle', 'rb'))
    model = ImageCaptioningModel.EncoderDecoder(
        embedding_dims=CONFIG.embedding_dims,
        vocab_size=len(word_to_idx),
        hidden_dims=CONFIG.hidden_dims,
        num_layers=CONFIG.num_layer,
        bidirectional=CONFIG.bidirectional,
        dropout=CONFIG.dropout)

    model.load_state_dict(torch.load('model/Image_Captioning.bin'))
    model.eval()

    mean = (0.5, 0.5, 0.5)
    std = (0.5, 0.5, 0.5)
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    transform = alb.Compose([
        alb.Normalize(mean, std, always_apply=True),
        alb.Resize(224, 224, always_apply=True),
        AT.ToTensor()
    ])
    image = np.array(Image.open(image_path).convert('RGB'))
    image = transform(image=image)['image']

    image = image[None, :]
    sentence = []
    with torch.no_grad():
        x = model.encoder(image).unsqueeze(1)

        state = None
        for _ in range(max_len):
            x, state = model.decoder.rnn(x, state)
            predict = model.decoder.fc(x.squeeze(0))
            word = torch.softmax(predict, dim=-1)
            word = predict.argmax(1)
            prediction = idx_to_word.get(word.item())
            if prediction == '<EOS>':
                break
            sentence.append(prediction)
            x = model.decoder.embedding(word.unsqueeze(1))

    sentence = ' '.join(sentence)
    print(sentence)
def get_transforms(phase, mean, std, list_transforms=None):
    if not list_transforms:
        list_transforms = []

    if phase == 'train':
        list_transforms.extend([
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
            albu.RandomBrightnessContrast(p=0.1,
                                          brightness_limit=0.1,
                                          contrast_limit=0.1)
        ])
    list_transforms.extend(
        [albu.Normalize(mean=mean, std=std),
         albu_pytorch.ToTensor()])

    list_transforms = albu.Compose(list_transforms)
    return list_transforms
Beispiel #22
0
def stong_aug():
    # strong aug for  train
    train_transform = [
        albu.Resize(height=configs.input_size, width=configs.input_size),
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        albu.RandomRotate90(p=0.5),
        albu.OneOf([
            albu.CenterCrop(
                p=0.5, height=configs.input_size, width=configs.input_size),
            albu.ElasticTransform(
                p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            albu.GridDistortion(p=0.5),
            albu.OpticalDistortion(p=1, distort_limit=1, shift_limit=0.5),
        ],
                   p=0.8),
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225),
                       p=1),
        AT.ToTensor(),
    ]
    return albu.Compose(train_transform)
 def __init__(self,
              data_folder: str,
              df: pd.DataFrame,
              im_ids: np.array,
              transforms=albu.Compose(
                  [albu.HorizontalFlip(),
                   AT.ToTensor()]),
              preprocessing=None):
     """
     Attributes
         data_folder (str): path to the image directory
         df (pd.DataFrame): dataframe with the labels
         im_ids (np.ndarray): of image names.
         transforms (albumentations.augmentation): transforms to apply
             before preprocessing. Defaults to HFlip and ToTensor
         preprocessing: ops to perform after transforms, such as
             z-score standardization. Defaults to None.
     """
     df["hasMask"] = ~df["EncodedPixels"].isna()
     self.df = df
     self.data_folder = data_folder
     self.img_ids = im_ids
     self.transforms = transforms
     self.preprocessing = preprocessing
    def __init__(self, images_path: str = './data/dataset_hdr/HDR/cs/chroma/data/Nikon_D700/HDR_MATLAB_3x3/',
                 gt_path: str = './data/dataset_hdr/real_illum/real_illum',
                 df: object = None, datatype: object = 'train',
                 transforms: object = albu.Compose([albu.HorizontalFlip(), AT.ToTensor()]),
                 preprocessing: object = None,
                 log_transform=False, illumination_known=False) -> object:
        self.df = df
        self.datatype = datatype
        self.transforms = transforms
        self.preprocessing = preprocessing
        self.images_path = images_path
        self.gt_path = gt_path
        self.log_transform = log_transform
        self.illumination_known = illumination_known

        all_images = os.listdir(self.images_path)
        self.gt_names = os.listdir(self.gt_path)
        self.gt_names = list(map(lambda x: x[:-4], self.gt_names))
        self.gt_names = list(filter(lambda x: x[:-11] + '.hdr' in all_images, self.gt_names))
        self.image_names = list(map(lambda x: x[:-11] + '.hdr', self.gt_names))
        if datatype == 'train':
            self.image_names = self.image_names[0:-20]
        else:
            self.image_names = self.image_names[-19:]
Beispiel #25
0
def load(train_augmentation=[],
         test_augmentation=[],
         mode='default_test_aug',
         mean=(0.5, 0.5, 0.5),
         stdev=(0.5, 0.5, 0.5),
         gpu_batch_size=128):
    import numpy as np
    import albumentations as A

    channel_means = mean  # r,g,b channels
    channel_stdevs = stdev

    if mode == 'default_train_aug' or mode == 'default_aug':
        train_transform = AlbumentationTransforms([
            A.Normalize(mean=channel_means, std=channel_stdevs),
            AP.ToTensor()
        ])
    else:
        train_augmentation.extend([AP.ToTensor()])
        train_transform = AlbumentationTransforms(train_augmentation)

    # Test Phase transformations
    if mode == 'default_test_aug' or mode == 'default_aug':
        test_transform = AlbumentationTransforms([
            A.Normalize(mean=channel_means, std=channel_stdevs),
            AP.ToTensor()
        ])
    else:
        test_augmentation.extend([AP.ToTensor()])
        test_transform = AlbumentationTransforms(test_augmentation)
    #Get the Train and Test Set
    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=train_transform)
    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=test_transform)
    SEED = 1
    # CUDA?
    cuda = torch.cuda.is_available()
    print("CUDA Available?", cuda)

    # For reproducibility
    torch.manual_seed(SEED)
    if cuda:
        torch.cuda.manual_seed(SEED)

# dataloader arguments - something you'll fetch these from cmdprmt
    dataloader_args = dict(shuffle=True,
                           batch_size=gpu_batch_size,
                           num_workers=4,
                           pin_memory=True) if cuda else dict(shuffle=True,
                                                              batch_size=64)

    trainloader = torch.utils.data.DataLoader(trainset, **dataloader_args)
    testloader = torch.utils.data.DataLoader(testset, **dataloader_args)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    return classes, trainloader, testloader
    def __init__(self, transforms_list=[]):
        transforms_list.append(AP.ToTensor())

        self.transforms = A.Compose(transforms_list)
 def __init__(self, transforms_list=[]):
     transforms_list = []
     transforms_list.append(A.Normalize(mean=0.5,std=0.5))
     transforms_list.append(AP.ToTensor())
     self.transforms = A.Compose(transforms_list)
Beispiel #28
0
def main(batch_size, lr, seed, save_model, log_interval):
    # params setting
    params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 1}
    class_num = 2
    epoch_num = 11

    # cudnn setting
    cudnn.benchmark = True

    # tensorboard setting
    writer = SummaryWriter('runs/pcam_experiment_1')

    # random seed setting
    seed_setting(seed)

    # device setting
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    # print(torch.cuda.current_device())
    print(device)

    # define transforms
    # train_transforms = transforms.Compose([
    #     transforms.ToPILImage(),
    #     transforms.Resize(224),
    #     transforms.RandomHorizontalFlip(),
    #     transforms.RandomVerticalFlip(),
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean=[0.485, 0.456, 0.406],
    #                          std=[0.229, 0.224, 0.225]),
    # ])
    # valid_transforms = transforms.Compose([
    #     transforms.ToPILImage(),
    #     transforms.Resize(224),
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean=[0.485, 0.456, 0.406],
    #                          std=[0.229, 0.224, 0.225]),
    # ])
    train_transforms = albumentations.Compose([
        albumentations.Resize(224, 224),
        albumentations.RandomRotate90(p=0.5),
        albumentations.Transpose(p=0.5),
        albumentations.Flip(p=0.5),
        albumentations.OneOf([
            albumentations.CLAHE(clip_limit=2),
            albumentations.IAASharpen(),
            albumentations.IAAEmboss(),
            albumentations.RandomBrightness(),
            albumentations.RandomContrast(),
            albumentations.JpegCompression(),
            albumentations.Blur(),
            albumentations.GaussNoise()
        ],
                             p=0.5),
        albumentations.HueSaturationValue(p=0.5),
        albumentations.ShiftScaleRotate(shift_limit=0.15,
                                        scale_limit=0.15,
                                        rotate_limit=45,
                                        p=0.5),
        albumentations.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        AT.ToTensor()
    ])
    valid_transforms = albumentations.Compose([
        albumentations.Resize(224, 224),
        albumentations.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        AT.ToTensor()
    ])

    # load training set
    trainset = Pcam('data/camelyonpatch_level_2_split_train_x.h5',
                    'data/camelyonpatch_level_2_split_train_y.h5',
                    train_transforms)
    trainloader = data.DataLoader(trainset, **params)

    # load validation set
    validset = Pcam('data/camelyonpatch_level_2_split_valid_x.h5',
                    'data/camelyonpatch_level_2_split_valid_y.h5',
                    valid_transforms)
    validloader = data.DataLoader(validset, **params)

    # load pretrained model
    model = models.resnet50(pretrained=True)
    print(model)

    # freeze op
    # for param in model.parameters():
    #     param.requires_grad = False

    fc_in_feature = model.fc.in_features
    model.fc = nn.Sequential(
        nn.Dropout(p=0.6),
        nn.Linear(fc_in_feature, 512, bias=True),
        nn.SELU(),
        nn.Dropout(p=0.8),
        nn.Linear(512, class_num, bias=True),
    )
    model.to(device)

    # define loss function and optimizer
    criterion = nn.CrossEntropyLoss()

    # setting optimizer
    ignored_params = list(map(id, model.fc.parameters()))
    base_params = filter(lambda p: id(p) not in ignored_params,
                         model.parameters())
    optimizer = torch.optim.Adam([{
        'params': base_params,
        'lr': 2e-4
    }, {
        'params': model.fc.parameters(),
        'lr': 4e-4
    }],
                                 weight_decay=1e-4)
    # optimizer = optim.Adam(model.fc.parameters(), lr=lr, weight_decay=1e-4)
    # lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)
    # lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
    # lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=1, epochs=16)

    # clock setting
    start = time.time()

    # train + valid
    for epoch in range(1, epoch_num + 1):
        lr_scheduler.step()
        print('learning rate: {}'.format(
            optimizer.state_dict()['param_groups'][0]['lr']))
        train(model, trainloader, criterion, optimizer, epoch, log_interval,
              device, writer)
        # trainset.close()
        test(model, validloader, criterion, device, writer, epoch, True)
        print('finish testing')
        # validset.close()
        if save_model:
            torch.save(model.state_dict(),
                       './checkpoints/epoch{}.pth'.format(epoch))
        print('finish saving epoch{}'.format(epoch))

    print('Finish Training, Total Time: {}'.format(time.time() - start))
Beispiel #29
0
def albumentations_transforms(data_map,
                              data_stats,
                              do_img_aug=False,
                              im_size=64):
    transforms_list_map = {}
    transforms_map = {}
    for dname in data_map.keys():
        transforms_list_map[dname] = []

    # Resize
    size = (im_size, im_size)
    for k, v in transforms_list_map.items():
        v.append(
            A.Resize(height=size[0],
                     width=size[0],
                     interpolation=cv2.INTER_LANCZOS4,
                     always_apply=True))
    # Use data aug only for train data
    if do_img_aug:
        # RGB shift and Hue Saturation value
        for k, v in transforms_list_map.items():
            if k not in ("bg", "fg_bg"):
                continue
            v.append(
                A.OneOf([
                    A.RGBShift(r_shift_limit=20,
                               g_shift_limit=20,
                               b_shift_limit=20,
                               p=0.5),
                    A.HueSaturationValue(hue_shift_limit=20,
                                         sat_shift_limit=30,
                                         val_shift_limit=20,
                                         p=0.5)
                ],
                        p=0.5))
        # GaussNoise
        for k, v in transforms_list_map.items():
            if k not in ("bg", "fg_bg"):
                continue
            v.append(A.GaussNoise(p=0.5))
        # Random horizontal flipping
        if random.random() > 0.5:
            for k, v in transforms_list_map.items():
                v.append(A.HorizontalFlip(always_apply=True))
        # Random vertical flipping
        if random.random() > 0.7:
            for k, v in transforms_list_map.items():
                v.append(A.VerticalFlip(always_apply=True))
        # Random rotate
        if random.random() > 0.3:
            angle = random.uniform(-15, 15)
            for k, v in transforms_list_map.items():
                v.append(
                    A.Rotate(limit=(angle, angle),
                             interpolation=cv2.INTER_LANCZOS4,
                             always_apply=True))
        # Coarse dropout
        for k, v in transforms_list_map.items():
            if k not in ("bg", "fg_bg"):
                continue
            v.append(
                A.CoarseDropout(max_holes=2,
                                fill_value=0,
                                max_height=size[0] // 3,
                                max_width=size[1] // 4,
                                p=0.5))
            # fill_value=data_stats[k]["mean"]*255.0

    # for k, v in transforms_list_map.items():
    # 	v.append(
    # 		A.Normalize(
    # 			mean=data_stats[k]["mean"],
    # 			std=data_stats[k]["std"],
    # 			max_pixel_value=255.0,
    # 			always_apply=True
    # 		)
    # 	)
    for k, v in transforms_list_map.items():
        v.append(AP.ToTensor())

    for k, v in transforms_list_map.items():
        np_img = np.array(data_map[k])
        if np_img.ndim == 2:
            np_img = np_img[:, :, np.newaxis]
        transforms_map[k] = A.Compose(v, p=1.0)(image=np_img)["image"]

    # transforms_map["fg_bg_mask"] = torch.gt(transforms_map["fg_bg_mask"], 0.8).float()
    return transforms_map
Beispiel #30
0
 def __init__(self, normalize=None):
     super(ToTensor, self).__init__(always_apply=True, p=1)
     self.alb_totensor = alb_pytorch.ToTensor()
     self.normalize = normalize