Esempio n. 1
0
def rotationCaeAugmentationsNoJitterNoFlip(dataset_config, given_shape):
    """
    Applies augmentations for the training of color restoration autoencoders.
    Augmentations: max_div_images --> RandomCropWithResize --> Random90xRotation --> clip_by_value

    :param given_shape: (Array) Array containing the shape of the data from the input pipeline. E.g. [None,None,3]
    :param dataset_config: (Dictionary) The config of the dataset to train and val on.
    :return: dataset_augmentor_train: (DatasetAugmentationBuilder) Handles the augmentation of the training dataset.
    :return: dataset_augmentor_val: (DatasetAugmentationBuilder) Handles the augmentation of the validation datasets.
    """
    data_shape = dataset_config["dataShape"]
    crop_shape = [1, data_shape[0], data_shape[1], data_shape[2]]

    max_div_images = MaxDivNormalizer(255.0)

    dataset_augmentor_train = DatasetAugmentationBuilder(
        preprocessors=[
            max_div_images,
            RandomCropWithResize(crop_shape=crop_shape)
        ],
        generators=[Random90xRotation()],
        output_preprocessors=[ClipByValue()])

    dataset_augmentor_val = dataset_augmentor_train

    return dataset_augmentor_train, dataset_augmentor_val
Esempio n. 2
0
def denoisingCaeAugmentationsNoJitter(dataset_config, given_shape):
    """
    Applies augmentations for the training of denoising autoencoders.
    Augmentations: max_div_images --> RandomCropWithResize --> RandomHorizontalFlip --> noise_images --> clip_by_value
                                                                                              |--> ClipByValue
    :param given_shape: (Array) Array containing the shape of the data from the input pipeline. E.g. [None,None,3]
    :param dataset_config: (Dictionary) The config of the dataset to train and val on.
    :return: dataset_augmentor_train: (DatasetAugmentationBuilder) Handles the augmentation of the training dataset.
    :return: dataset_augmentor_val: (DatasetAugmentationBuilder) Handles the augmentation of the validation datasets.
    """

    data_shape = dataset_config["dataShape"]
    crop_shape = [1, data_shape[0], data_shape[1], data_shape[2]]

    max_div_images = MaxDivNormalizer(255.0)
    noise_images = RandomNormalNoise(input_name="image",
                                     output_name="noisy_image")
    clip_by_value = ClipByValue(input_name="noisy_image",
                                output_name="noisy_image")

    dataset_augmentor_train = DatasetAugmentationBuilder(
        preprocessors=[
            max_div_images,
            RandomCropWithResize(crop_shape=crop_shape),
            RandomHorizontalFlip()
        ],
        generators=[noise_images],
        output_preprocessors=[ClipByValue(), clip_by_value])

    dataset_augmentor_val = dataset_augmentor_train

    return dataset_augmentor_train, dataset_augmentor_val
Esempio n. 3
0
def caeAugmentations(dataset_config, given_shape):
    """
    Applies base augmentations for the training of plain autoencoders.
    Augmentations: max_div_images --> RandomCropWithResize --> RandomHorizontalFlip --> RandomColorJitter --> ClipByValue

    :param given_shape: (Array) Array containing the shape of the data from the input pipeline. E.g. [None,None,3]
    :param dataset_config: (Dictionary) The config of the dataset to train and val on.
    :return: dataset_augmentor_train: (DatasetAugmentationBuilder) Handles the augmentation of the training dataset.
    :return: dataset_augmentor_val: (DatasetAugmentationBuilder) Handles the augmentation of the validation datasets.
    """

    data_shape = dataset_config["dataShape"]
    crop_shape = [1, data_shape[0], data_shape[1], data_shape[2]]

    max_div_images = MaxDivNormalizer(255.0)

    dataset_augmentor_train = DatasetAugmentationBuilder(preprocessors=[
        max_div_images,
        RandomCropWithResize(crop_shape=crop_shape),
        RandomHorizontalFlip(),
        RandomColorJitter(),
        ClipByValue()
    ])

    dataset_augmentor_val = dataset_augmentor_train

    return dataset_augmentor_train, dataset_augmentor_val
Esempio n. 4
0
def simClrAugmentations(dataset_config, given_shape):
    """
    Applies augmentations for the training of contrastive learning.
    Augmentations: max_div_images --> random_crop_1 --> random_flip_2 --> jitter_images_2 --> clip_2
                                            |--> random_crop_2--> random_flip_1 --> jitter_images_1--> clip_1

    :param given_shape: (Array) Array containing the shape of the data from the input pipeline. E.g. [None,None,3]
    :param dataset_config: (Dictionary) The config of the dataset to train and val on.
    :return: dataset_augmentor_train: (DatasetAugmentationBuilder) Handles the augmentation of the training dataset.
    :return: dataset_augmentor_val: (DatasetAugmentationBuilder) Handles the augmentation of the validation datasets.
    """

    data_shape = dataset_config["dataShape"]
    crop_shape = [1, data_shape[0], data_shape[1], data_shape[2]]

    max_div_images = MaxDivNormalizer(255.0)

    # Order matters here! First image to image2!
    random_crop_1 = RandomCropWithResize(input_name="image",
                                         output_name="image2",
                                         crop_shape=crop_shape)
    random_crop_2 = RandomCropWithResize(input_name="image",
                                         output_name="image",
                                         crop_shape=crop_shape)

    random_flip_1 = RandomHorizontalFlip(input_name="image",
                                         output_name="image")
    random_flip_2 = RandomHorizontalFlip(input_name="image2",
                                         output_name="image2")

    jitter_images_1 = RandomColorJitter(input_name="image",
                                        output_name="image")
    jitter_images_2 = RandomColorJitter(input_name="image2",
                                        output_name="image2")

    clip_1 = ClipByValue(input_name="image", output_name="image")
    clip_2 = ClipByValue(input_name="image2", output_name="image2")

    dataset_augmentor_train = DatasetAugmentationBuilder(
        preprocessors=[max_div_images],
        generators=[
            random_crop_1, random_crop_2, random_flip_1, random_flip_2,
            jitter_images_1, jitter_images_2
        ],
        output_preprocessors=[clip_1, clip_2])

    dataset_augmentor_val = dataset_augmentor_train

    return dataset_augmentor_train, dataset_augmentor_val
Esempio n. 5
0
def prepareDataset(provider,
                   dataset_config,
                   xfold_seed=None,
                   augment_data=False,
                   normalize_data="zeroMeanUnitVariance",
                   rescale_input=None):
    """
    Prepares the given dataset for the experiment: If a xfold_seed is given the dataset will be shuffled.
    :param provider: (ADataProvider) The DataProvider to load the dataset.
    :param dataset_config: (Dictionary) The config of the dataset.
    :param xfold_seed: (Integer) The seed for the shuffle operation. None by default.
    :param augment_data: (Boolean) If True the data will be augmented as defined below. False by default.
    :param normalize_data: (Boolean) If True the data will be normalized. True by default.
    :param rescale_input: (Array) If set the input data will be rescaled. None by default.
    :return: dataset: (Dictionary) The shuffled dataset in the given split.
    :return: dataset_generator: (keras.preprocessing.image.ImageDataGenerator) The keras generator for the dataset.
    """
    dataset_split = dataset_config["splitOfDataset"]

    dataset = provider.getSplittedDatasetInNumpy()
    if xfold_seed:
        dataset = shuffelDatasetForXFold(dataset, xfold_seed, dataset_split)

    if normalize_data == "zeroMeanUnitVariance":
        dataset = ZeroMeanUnitVarianceNormalizer().preprocessingInNumpy(
            dataset)
    elif normalize_data == "maxDiv":
        dataset = MaxDivNormalizer(255.0).preprocessingInNumpy(dataset)

    if rescale_input:
        dataset = rescaleImages(dataset, rescale_input)

    if augment_data:
        dataset_generator = ImageDataGenerator(
            rotation_range=15,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
        )
    else:
        dataset_generator = ImageDataGenerator()

    dataset_generator.fit(dataset["x_train"])

    return dataset, dataset_generator
Esempio n. 6
0
def targetTaskAugmentations(dataset_config, given_shape):
    """
    Applies base augmentations for the training of target models.
    Train Augmentations: max_div_images --> RandomCropWithResize --> RandomHorizontalFlip --> RandomColorJitter --> ClipByValue
                         one_hot
    Val Augmentations: max_div_images --> (CenterCropWithResize) --> (ClipByValue)
                       one_hot

    :param given_shape: (Array) Array containing the shape of the data from the input pipeline. E.g. [None,None,3]
    :param dataset_config: (Dictionary) The config of the dataset to train and val on.
    :return: dataset_augmentor_train: (DatasetAugmentationBuilder) Handles the augmentation of the training dataset.
    :return: dataset_augmentor_val: (DatasetAugmentationBuilder) Handles the augmentation of the validation datasets.
    """

    data_shape = dataset_config["dataShape"]
    crop_shape = [1, data_shape[0], data_shape[1], data_shape[2]]

    if "labelName" in dataset_config.keys():
        label_name = dataset_config["labelName"]
    else:
        label_name = "label"

    max_div_images = MaxDivNormalizer(255.0)
    one_hot = OneHot(dataset_config["numClasses"],
                     input_name=label_name,
                     output_name="label")

    dataset_augmentor_train = DatasetAugmentationBuilder(preprocessors=[
        one_hot, max_div_images,
        RandomCropWithResize(crop_shape=crop_shape),
        RandomHorizontalFlip(),
        RandomColorJitter(),
        ClipByValue()
    ])

    val_preprocessors = [one_hot, max_div_images]
    if list(given_shape) != list(data_shape):
        val_preprocessors.append(CenterCropWithResize(crop_shape=crop_shape))
        val_preprocessors.append(ClipByValue())

    dataset_augmentor_val = DatasetAugmentationBuilder(
        preprocessors=val_preprocessors)

    return dataset_augmentor_train, dataset_augmentor_val