def test_init_shared_label_space(numpy_data, shared_label_space):
    x, y = numpy_data
    dummy = InMemoryDataset(x, y)

    Trsf_0 = []
    Trsf_1 = [transforms.RandomAffine(degrees=[40, 50])]
    Trsf_2 = [transforms.RandomAffine(degrees=[85, 95])]

    dummy_transf = [Trsf_0, Trsf_1, Trsf_2]

    scenario = TransformationIncremental(
        cl_dataset=dummy,
        incremental_transformations=dummy_transf,
        shared_label_space=shared_label_space
    )

    for task_id, taskset in enumerate(scenario):
        assert taskset.nb_classes == NB_CLASSES
        classes = taskset.get_classes()
        if shared_label_space:
            assert classes.max() == NB_CLASSES - 1
            assert classes.min() == 0
        else:
            assert classes.max() == (NB_CLASSES * (task_id + 1)) - 1
            assert classes.min() == (NB_CLASSES * task_id)
Esempio n. 2
0
def test_get_task_transformation(numpy_data):
    x, y = numpy_data
    dummy = InMemoryDataset(x, y)

    Trsf_0 = []
    Trsf_1 = [transforms.RandomAffine(degrees=[40, 50])]
    Trsf_2 = [transforms.RandomAffine(degrees=[85, 95])]

    dummy_transf = [Trsf_0, Trsf_1, Trsf_2]

    base_transformations = [
        transforms.ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ]

    scenario = TransformationIncremental(
        cl_dataset=dummy,
        incremental_transformations=dummy_transf,
        base_transformations=base_transformations)

    for task_id, taskset in enumerate(scenario):
        # first task specific transformation then base_transformation
        tot_transf_task = transforms.Compose(dummy_transf[task_id] +
                                             base_transformations)

        # we compare the str representation of the composition
        assert tot_transf_task.__repr__() == scenario.get_task_transformation(
            task_id).__repr__()
Esempio n. 3
0
 def __init__(self, data_root, effect):
     self.data_root = os.path.join(data_root, effect)
     self.data_figure_root = os.path.join(data_root, effect + '-figure')
     self.effect = effect
     self.data_list = sorted(os.listdir(self.data_root), key=lambda x: int(x.replace('.png', '')))
     self.data_figure_list = sorted(os.listdir(self.data_figure_root), key=lambda x: int(x.replace('.png', '')))
     self.effect_shape = cv2.imread(os.path.join(self.data_root, self.data_list[0])).shape[:2]
     self.figure_shape = cv2.imread(os.path.join(self.data_figure_root, self.data_figure_list[0])).shape[:2]
     # self.scaled_shape = (int(self.train_shape[0] * 0.5), int(self.train_shape[1] * 0.8))
     diff_h = (self.figure_shape[0] - self.effect_shape[0]) // 2
     diff_w = (self.figure_shape[1] - self.effect_shape[1]) // 2
     self.target_transforms = transforms.Compose([
         transforms.ToPILImage(),
         transforms.RandomAffine(degrees=(-20, 20), translate=(0.2, 0.3), scale=(0.5, 1.1), ),
     ])
     self.source_transforms = transforms.Compose([
         transforms.ToPILImage(),
         transforms.Pad((diff_w, diff_h, diff_w, diff_h)),  # 左,上,右,下
         transforms.ToTensor(),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
     ])
     self.final_transforms = transforms.Compose([
         transforms.ToPILImage(),
         transforms.RandomAffine(degrees=(-20, 20), translate=(0.2, 0.5), scale=(0.5, 1.1), ),
         transforms.ToTensor(),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
     ])
     self.length = len(self.data_list) - 1
Esempio n. 4
0
def get_transforms(dataset, aug: Union[List, str], cutout: int):
    if 'imagenet' in dataset:
        return _get_imagenet_transforms()

    if dataset == 'cifar10':
        MEAN = [0.49139968, 0.48215827, 0.44653124]
        STD = [0.24703233, 0.24348505, 0.26158768]
        transf = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]
    elif dataset == 'cifar100':
        MEAN = [0.507, 0.487, 0.441]
        STD = [0.267, 0.256, 0.276]
        transf = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]
    elif dataset == 'svhn':
        MEAN = [0.4914, 0.4822, 0.4465]
        STD = [0.2023, 0.1994, 0.20100]
        transf = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]
    elif dataset == 'mnist':
        MEAN = [0.13066051707548254]
        STD = [0.30810780244715075]
        transf = [
            transforms.RandomAffine(degrees=15,
                                    translate=(0.1, 0.1),
                                    scale=(0.9, 1.1),
                                    shear=0.1)
        ]
    elif dataset == 'fashionmnist':
        MEAN = [0.28604063146254594]
        STD = [0.35302426207299326]
        transf = [
            transforms.RandomAffine(degrees=15,
                                    translate=(0.1, 0.1),
                                    scale=(0.9, 1.1),
                                    shear=0.1),
            transforms.RandomVerticalFlip()
        ]
    else:
        raise ValueError('dataset not recognized: {}'.format(dataset))

    normalize = [transforms.ToTensor(), transforms.Normalize(MEAN, STD)]

    train_transform = transforms.Compose(transf + normalize)
    test_transform = transforms.Compose(normalize)

    # add additional aug and cutout transformations
    _add_augs(train_transform, aug, cutout)

    return train_transform, test_transform
Esempio n. 5
0
def test_init_range(numpy_data):
    x, y = numpy_data
    dummy = InMemoryDataset(x, y)

    Trsf_0 = []
    Trsf_1 = [transforms.RandomAffine(degrees=[40, 50])]
    Trsf_2 = [transforms.RandomAffine(degrees=[85, 95])]

    list_transf = [Trsf_0, Trsf_1, Trsf_2]

    scenario = TransformationIncremental(
        cl_dataset=dummy, incremental_transformations=list_transf)
Esempio n. 6
0
 def get_simclr_pipeline_transform(size, s=1, num_aug=5):
     """Return a set of data augmentation transformations as described in the SimCLR paper."""
     color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s,
                                           0.2 * s)
     if num_aug == 5:
         data_transforms = transforms.Compose([
             transforms.RandomResizedCrop(size=size),
             transforms.RandomHorizontalFlip(),
             transforms.RandomApply([color_jitter], p=0.8),
             transforms.RandomGrayscale(p=0.2),
             GaussianBlur(kernel_size=int(0.1 * size)),
             transforms.ToTensor()
         ])
     elif num_aug == 7:
         data_transforms = transforms.Compose([
             transforms.RandomResizedCrop(size=size),
             transforms.RandomHorizontalFlip(),
             transforms.RandomApply([color_jitter], p=0.8),
             transforms.RandomGrayscale(p=0.2),
             GaussianBlur(kernel_size=int(0.1 * size)),
             transforms.RandomRotation(degrees=45),
             transforms.RandomAffine(degrees=45),
             transforms.ToTensor()
         ])
     return data_transforms
Esempio n. 7
0
    def get_random_region_mask(self, x):
        """
        Chooses random masks from `self.segmentation_masks`.

        :param (torch.Tensor) x: tensor image
        """
        mask = torch.zeros_like(x)
        batch_size, img_height, img_width = x.shape[0], x.shape[-2], x.shape[
            -1]

        # Apply distortions in the form of affine transformations to the masks for diversification
        transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((img_height, img_width)),
            transforms.RandomAffine(degrees=(-45, 45),
                                    translate=(0.25, 0.25),
                                    shear=(-10, 10),
                                    fillcolor=0),
            transforms.ToTensor(),
        ])

        for i in range(batch_size):
            segmentation_mask = random.choice(self.segmentation_masks).repeat(
                3, 1, 1)
            mask[i] = transform(segmentation_mask)

        return mask
Esempio n. 8
0
def create_dataloader(data_path,
                      target_size,
                      train_val_split,
                      batch_size,
                      verbose: bool = False) -> Dict[str, DataLoader]:
    # Data augmentation and normalization for training
    # Just normalization for validation
    width, height = target_size
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.Resize((width, height)),
            transforms.ColorJitter(brightness=0.2,
                                   contrast=0.2,
                                   saturation=0.5,
                                   hue=0.5),
            transforms.RandomAffine(degrees=10,
                                    translate=(0.05, 0.05),
                                    scale=(0.95, 1.05),
                                    shear=10),
            transforms.ToTensor(),
            transforms.Normalize([0.6205, 0.6205, 0.6205],
                                 [0.1343, 0.1343, 0.1343])
        ]),
        'test':
        transforms.Compose([
            transforms.Resize((width, height)),
            transforms.ToTensor(),
            transforms.Normalize([0.6205, 0.6205, 0.6205],
                                 [0.1343, 0.1343, 0.1343])
        ]),
    }

    # Load dataset
    if "car" in data_path.lower():
        dataset = CAR(data_path,
                      transform=data_transforms,
                      train_val_split=train_val_split,
                      verbose=verbose)
    else:
        dataset = CVL(data_path,
                      transform=data_transforms,
                      train_val_split=train_val_split,
                      verbose=verbose)
    if verbose:
        print(dataset)

    # Create training and validation dataloaders
    loader_names = ['train', 'test']
    if train_val_split < 1.0:
        loader_names.append('val')
    dataloaders_dict = {
        x: DataLoader(dataset.subsets[x],
                      batch_size=batch_size,
                      shuffle=True,
                      num_workers=4)
        for x in loader_names
    }
    return dataloaders_dict
Esempio n. 9
0
 def __init__(self, root):
     super().__init__(root, download=True)
     self.my_transform = transforms.Compose(
         [transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=5),
          transforms.ToTensor(),
          transforms.Normalize(0, 1),
          my_transforms.AddGaussianNoise(0, 0.1)]
     )
 def __init__(self, resize_to=(224, 224)):
     list_of_transforms = [
         transforms.RandomAffine(degrees=(-180, 180)),
         transforms.CenterCrop(size=resize_to),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ]
     self.transform = transforms.Compose(list_of_transforms)
Esempio n. 11
0
def lfw(stage, configs=None, augment=False, tta=False, tta_size=48):
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    folder_path = os.path.join(configs["data_path"], f"{stage}")

    dataset = datasets.ImageFolder(
        folder_path,
        transforms.Compose([
            #transforms.RandomResizedCrop(224),
            #transforms.RandomHorizontalFlip(),
            transforms.Resize(224),
            transforms.ToTensor(),
            #normalize,
        ]),
    )
    if augment:
        dataset2 = datasets.ImageFolder(
            folder_path,
            transforms.Compose([
                transforms.Resize(224),
                transforms.RandomHorizontalFlip(),
                transforms.RandomRotation((1, 20)),
                transforms.ColorJitter(),
                transforms.ToTensor(),
            ]),
        )
        dataset3 = datasets.ImageFolder(
            folder_path,
            transforms.Compose([
                transforms.Resize(224),
                transforms.RandomHorizontalFlip(),
                transforms.RandomRotation((-20, -1)),
                transforms.ColorJitter(),
                transforms.ToTensor(),
            ]),
        )

        dataset4 = datasets.ImageFolder(
            folder_path,
            transforms.Compose([
                transforms.Resize(224),
                transforms.RandomHorizontalFlip(),
                transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
                transforms.ColorJitter(),
                transforms.ToTensor(),
                AddGaussianNoise(0., 0.05),
            ]),
        )
        return torch.utils.data.ConcatDataset(
            [dataset4, dataset2, dataset3, dataset])
    else:
        return dataset
Esempio n. 12
0
def gen_dataloaders(indir,
                    val_split=0.05,
                    shuffle=True,
                    batch_size=4,
                    seed=42,
                    img_size=224,
                    cuda=True):
    data_transforms = {
        'train':
        transforms.Compose([
            # transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
            transforms.RandomResizedCrop(img_size, scale=(0.1, 1.0)),
            transforms.RandomAffine(10.),
            transforms.RandomRotation(13.),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ]),
        'valid':
        transforms.Compose([
            transforms.Resize((img_size, img_size)),
            transforms.ToTensor(),
        ])
    }

    mask_train_dataset = MaskDataset(path=indir,
                                     transform=data_transforms['train'])
    mask_valid_dataset = MaskDataset(path=indir,
                                     transform=data_transforms['valid'])

    # Creating data indices for training and validation splits:
    dataset_size = len(mask_train_dataset)
    indices = list(range(dataset_size))
    split = int(np.floor(val_split * dataset_size))
    if shuffle:
        np.random.seed(seed)
        np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]

    # Creating data samplers and loaders
    train_sampler = SubsetRandomSampler(train_indices)
    valid_sampler = SubsetRandomSampler(val_indices)

    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}

    train_loader = torch.utils.data.DataLoader(mask_train_dataset,
                                               batch_size=batch_size,
                                               sampler=train_sampler,
                                               **kwargs)
    valid_loader = torch.utils.data.DataLoader(mask_valid_dataset,
                                               batch_size=batch_size,
                                               sampler=valid_sampler,
                                               **kwargs)
    return train_loader, valid_loader
def test_init(numpy_data):
    x, y = numpy_data
    dummy = InMemoryDataset(x, y, train='train')

    Trsf_0 = []
    Trsf_1 = [transforms.RandomAffine(degrees=[45, 45])]
    Trsf_2 = [transforms.RandomAffine(degrees=[90, 90])]

    list_transf = [Trsf_0, Trsf_1, Trsf_2]

    scenario = TransformationIncremental(
        cl_dataset=dummy, incremental_transformations=list_transf
    )

    ref_data = None
    raw_ref_data = None
    for task_id, taskset in enumerate(scenario):

        samples, _, _ = taskset.get_random_samples(10)
        # we need raw data to apply same transformation as the TransformationIncremental class
        raw_samples, _, _ = taskset.get_raw_samples(range(10))

        if task_id == 0:
            ref_data = samples
            raw_ref_data = raw_samples
        else:
            # we verify that data has changed
            assert not torch.all(ref_data.eq(samples))

            assert (raw_samples == raw_ref_data
                    ).all()  # raw data should be the same in this scenario

            # we test transformation on one data point and verify if it is applied
            trsf = list_transf[task_id][0]
            raw_sample = Image.fromarray(raw_ref_data[0].astype("uint8"))
            trsf_data = trsf(raw_sample)
            trsf_data = transforms.ToTensor()(trsf_data)

            assert torch.all(trsf_data.eq(samples[0]))
Esempio n. 14
0
def test_list_transforms(fake_data):
    nb_tasks = 5
    list_trsfs = []
    for _ in range(nb_tasks - 1):
        list_trsfs.append([transforms.RandomAffine(degrees=[0, 90])])

    # should fail since nb_task != len(list_trsfs)
    with pytest.raises(ValueError) as e:
        scenario = ClassIncremental(
            cl_dataset=fake_data,
            increment=2,
            transformations=list_trsfs
        )
def augment_image(img):
    # Convert weird half transparent background to completely transparent
    arr = np.array(img)
    for i in range(arr.shape[0]):
        for j in range(arr.shape[1]):
            if arr[i, j][3] == 63:
                arr[i, j] = (0, 0, 0, 0)
    img = Image.fromarray(arr)

    # Create and apply transform
    transform = transforms.RandomAffine(degrees=[-180, 180], )
    transformed_img = transform(img)

    return transformed_img
Esempio n. 16
0
    def get_transforms(self) -> tuple:
        MEAN = [0.13066051707548254]
        STD = [0.30810780244715075]
        transf = [
            transforms.RandomAffine(degrees=15,
                                    translate=(0.1, 0.1),
                                    scale=(0.9, 1.1),
                                    shear=0.1)
        ]

        normalize = [transforms.ToTensor(), transforms.Normalize(MEAN, STD)]

        train_transform = transforms.Compose(transf + normalize)
        test_transform = transforms.Compose(normalize)

        return train_transform, test_transform
    def get_transforms(self) -> tuple:
        MEAN = [0.28604063146254594]
        STD = [0.35302426207299326]
        transf = [
            transforms.RandomAffine(degrees=15,
                                    translate=(0.1, 0.1),
                                    scale=(0.9, 1.1),
                                    shear=0.1),
            transforms.RandomVerticalFlip()
        ]

        normalize = [transforms.ToTensor(), transforms.Normalize(MEAN, STD)]

        train_transform = transforms.Compose(transf + normalize)
        test_transform = transforms.Compose(normalize)

        return train_transform, test_transform
def get_transforms(opt):
    """
    Return Composed torchvision transforms based on specified arguments.
    """
    transforms_list = []
    if "none" in opt.input_transforms:
        return
    every = "all" in opt.input_transforms

    if every or "vflip" in opt.input_transforms:
        transforms_list.append(transforms.RandomVerticalFlip())
    if every or "hflip" in opt.input_transforms:
        transforms_list.append(transforms.RandomHorizontalFlip())
    if every or "affine" in opt.input_transforms:
        transforms_list.append(
            transforms.RandomAffine(
                degrees=10, translate=(0.1, 0.1), scale=(0.8, 1.2), shear=20
            )
        )
    if every or "perspective" in opt.input_transforms:
        transforms_list.append(transforms.RandomPerspective())

    return transforms.RandomOrder(transforms_list)
Esempio n. 19
0
def get_transforms(config, image_size=None):
    config = config.get_dictionary()
    if image_size is not None:
        image_size = image_size
    elif config['estimator'] not in resize_size_dict:
        image_size = 32
    else:
        image_size = resize_size_dict[config['estimator']]

    val_transforms = transforms.Compose([
        transforms.Resize(image_size),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
    ])
    if check_for_bool(config['aug']):
        if check_for_bool(config['auto_aug']):
            # from .transforms import AutoAugment
            data_transforms = {
                'train':
                transforms.Compose([
                    # AutoAugment(),
                    transforms.Resize(image_size),
                    transforms.RandomCrop(image_size,
                                          padding=int(image_size / 8)),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                ]),
                'val':
                val_transforms,
            }
        else:
            transform_list = []
            if check_for_bool(config['jitter']):
                transform_list.append(
                    transforms.ColorJitter(brightness=config['brightness'],
                                           saturation=config['saturation'],
                                           hue=config['hue']))
            if check_for_bool(config['affine']):
                transform_list.append(
                    transforms.RandomAffine(degrees=config['degree'],
                                            shear=config['shear']))

            transform_list.append(transforms.RandomResizedCrop(image_size))
            transform_list.append(transforms.RandomCrop(image_size, padding=4))

            if check_for_bool(config['random_flip']):
                transform_list.append(transforms.RandomHorizontalFlip())

            transform_list.append(transforms.ToTensor())

            data_transforms = {
                'train': transforms.Compose(transform_list),
                'val': val_transforms
            }
    else:
        data_transforms = {
            'train':
            transforms.Compose([
                transforms.Resize(image_size),
                transforms.CenterCrop(image_size),
                transforms.ToTensor(),
            ]),
            'val':
            val_transforms,
        }
    return data_transforms
Esempio n. 20
0
def create_cmnist_datasets(
    *,
    root: str,
    scale: float,
    train_pcnt: float,
    download: bool = False,
    seed: int = 42,
    rotate_data: bool = False,
    shift_data: bool = False,
    padding: bool = False,
    quant_level: int = 8,
    input_noise: bool = False,
    classes_to_keep: Optional[Sequence[_Classes]] = None,
) -> Tuple[LdTransformedDataset, LdTransformedDataset]:
    """Create and return colourised MNIST train/test pair.

    Args:
        root: Where the images are downloaded to.
        scale: The amount of 'bias' in the colour. Lower is more biased.
        train_pcnt: The percentage of data to make the test set.
        download: Whether or not to download the data.
        seed: Random seed for reproducing results.
        rotate_data: Whether or not to rotate the training images.
        shift_data: Whether or not to shift the training images.
        padding: Whether or not to pad the training images.
        quant_level: the number of bins to quantize the data into.
        input_noise: Whether or not to add noise to the training images.
        classes_to_keep: Which digit classes to keep. If None or empty then all classes will be kept.

    Returns:
        tuple of train and test data as a Dataset.
    """
    np.random.seed(seed)
    random.seed(seed)
    torch.manual_seed(seed)

    base_aug = [transforms.ToTensor()]
    data_aug = []
    if rotate_data:
        data_aug.append(transforms.RandomAffine(degrees=15))
    if shift_data:
        data_aug.append(
            transforms.RandomAffine(degrees=0, translate=(0.11, 0.11)))
    if padding > 0:
        base_aug.insert(0, transforms.Pad(padding))
    if quant_level != 8:
        base_aug.append(Quantize(int(quant_level)))
    if input_noise:
        base_aug.append(NoisyDequantize(int(quant_level)))

    mnist_train = MNIST(root=root, train=True, download=download)
    mnist_test = MNIST(root=root, train=False, download=download)

    if classes_to_keep:
        mnist_train = _filter_classes(dataset=mnist_train,
                                      classes_to_keep=classes_to_keep)
        mnist_test = _filter_classes(dataset=mnist_test,
                                     classes_to_keep=classes_to_keep)

    all_data: ConcatDataset = ConcatDataset([mnist_train, mnist_test])
    train_data, test_data = train_test_split(all_data, train_pcnt=train_pcnt)

    colorizer = LdColorizer(scale=scale,
                            background=False,
                            black=True,
                            binarize=True,
                            greyscale=False)
    train_data = DatasetWrapper(train_data, transform=base_aug + data_aug)
    train_data = LdTransformedDataset(
        dataset=train_data,
        ld_transform=colorizer,
        target_dim=10,
        label_independent=False,
        discrete_labels=True,
    )
    test_data = DatasetWrapper(test_data, transform=base_aug)
    test_data = LdTransformedDataset(
        test_data,
        ld_transform=colorizer,
        target_dim=10,
        label_independent=True,
        discrete_labels=True,
    )

    return train_data, test_data
Esempio n. 21
0
        width = int(data.size[0] / (data.size[1] / height))
        data = data.resize((width, height))
        if self.transform is not None:
            data = self.transform(data)
        label = re.search('([0-9_]+)', img_name).group(1)
        label = re.sub('\D', '', label)
        return data, label

    def __len__(self):
        return self.fileList.__len__()


myTransform = transforms.Compose([
    transforms.RandomAffine(5.0,
                            translate=None,
                            scale=(1.05, 0.95),
                            shear=0.2,
                            resample=False,
                            fillcolor=127),
    transforms.Grayscale(),
    transforms.ToTensor(),
    transforms.Lambda(lambda x: do(x)),
    #transforms.Normalize((0.1307,), (0.3081,)),
])

if __name__ == '__main__':
    import matplotlib.pyplot as plt
    import torchvision

    manualSeed = random.randint(1, 10000)  # fix seed
    random.seed(manualSeed)
    np.random.seed(manualSeed)
@Time    : 2020/10/24 19:40
"""
import torch, os, cv2
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import random

ImgResize = (1024, 1280)  # image size

# %% Data augmentation
TrainImgTransform = transforms.Compose([
    transforms.RandomAffine(degrees=(-45, 45),
                            translate=(0.2, 0.2),
                            scale=(1, 1.),
                            shear=10),
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.RandomResizedCrop(ImgResize,
                                 scale=(1., 1.),
                                 interpolation=Image.BILINEAR),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.46], std=[0.10]),
])
TrainLabelTransform = transforms.Compose([
    transforms.RandomAffine(degrees=(-45, 45),
                            translate=(0.2, 0.2),
                            scale=(1, 1.),
                            shear=10),
    transforms.RandomHorizontalFlip(),
    device = torch.device("cuda:0")
else:
    torch.set_default_tensor_type(torch.FloatTensor)
    device = torch.device("cpu")

transform = dict()
mean = [
    0.054813755064775954, 0.0808928726780973, 0.08367144133595689,
    0.05226083561943362
]
std = [
    0.15201123862047256, 0.14087982537762958, 0.139965362113942,
    0.10123220339551285
]
transform['train'] = transforms.Compose([
    transforms.RandomAffine(20, shear=20, resample=PIL.Image.BILINEAR),
    #transforms.RandomRotation(20),
    transforms.RandomResizedCrop(512),
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])
transform['val'] = transforms.Compose([
    transforms.Resize(570),
    transforms.CenterCrop(512),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])

Esempio n. 24
0
from torch import classes
from torchvision.transforms import transforms


def imshow(img):
    img = img / 2 + 0.5  # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()


if __name__ == '__main__':
    transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomAffine(5,
                                translate=(2 / 32, 2 / 32),
                                scale=(0.95, 1.05),
                                resample=PIL.Image.NEAREST),
        transforms.ToTensor(),
        transforms.RandomChoice([
            transforms.Normalize((0.5, 0.5, 0.5), (0.9, 0.9, 0.9)),
            transforms.Normalize((0.5, 0.5, 0.5), (0.7, 0.7, 0.7)),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
Esempio n. 25
0
 def RandomAffine(self, **args):
     return self._add(transforms.RandomAffine(**args))
Esempio n. 26
0
        r = args.random
        train_transforms = transforms.Compose([
            transforms.ToTensor(),
            #transforms.RandomApply([
            #    transforms.GaussianBlur(3, sigma=(0.1, 2.0))
            #], p=0.2),
            transforms.RandomApply(
                [transforms.Grayscale(num_output_channels=3)], p=0.2),
            transforms.RandomApply([
                transforms.ColorJitter(brightness=r,
                                       contrast=r,
                                       saturation=r,
                                       hue=r)
            ]),
            transforms.RandomApply(
                [transforms.RandomAffine(r * 10, shear=r * 10)]),
            transforms.RandomResizedCrop((32, 32), scale=(1 - r, 1.0)),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])
        test_transforms = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])
    else:
        train_transforms = transforms.ToTensor()
        test_transforms = transforms.ToTensor()

    # LOADING DATA
    dataname = args.dataset
    if dataname == "mnist":
import cv2
from torchvision.transforms import transforms
from torchvision.transforms.transforms import RandomAffine, RandomPerspective, RandomRotation, RandomVerticalFlip, ToPILImage

CLASSES = ("glass", "paper", "cardboard", "plastic", "metal", "trash")

testTransform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(mean=(104, 117, 123), std=(57.4, 57.1, 58.4)),
])

augmentTransform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.RandomAffine(degrees=5, shear=10),
    transforms.RandomRotation(degrees=[90, 270]),
    transforms.RandomVerticalFlip(),
    transforms.RandomHorizontalFlip(),
    transforms.RandomPerspective(),
    # transforms.RandomCrop((300,300)),
])

trainTransform = transforms.Compose([
    *augmentTransform.transforms,
    *testTransform.transforms[1:]  # Exclude PIL image conversion
    # transforms.Resize((224, 224)),
    # transforms.ToTensor(),
    # transforms.Normalize(mean=(104, 117, 123), std=(57.4, 57.1, 58.4)),
])
@Author  : Xiaoqi Cheng
@Time    : 2021/1/15 19:40
"""
import torch, os, cv2
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import transforms
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import random

ImgResize = (1024, 1280)  # image size
# %% Data augmentation
TrainImgTransform = transforms.Compose([
    transforms.RandomAffine(degrees=(-10, 10),
                            translate=(0.1, 0.1),
                            scale=(0.5, 2.),
                            shear=10),
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.RandomResizedCrop(ImgResize,
                                 scale=(1., 1.),
                                 interpolation=Image.BILINEAR),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.46], std=[0.10]),
])
TrainLabelTransform = transforms.Compose([
    transforms.RandomAffine(degrees=(-10, 10),
                            translate=(0.1, 0.1),
                            scale=(0.5, 2.),
                            shear=10),
    transforms.RandomHorizontalFlip(),
val_dataset = torch.utils.data.Subset(my_dataset, mask)

mask = list(
    range(num_training + num_validation,
          num_training + num_validation + num_test))
test_dataset = torch.utils.data.Subset(my_dataset, mask)

data_aug_transforms = []
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
if data_aug == True:

    transformsList = [
        # transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(15),
        transforms.RandomAffine(degrees=0, translate=(.3, .7)),
        # transforms.ColorJitter(
        #         brightness=float(0.1*np.random.rand(1)),
        #         contrast=float(0.1*np.random.rand(1)),
        #         saturation=float(0.1*np.random.rand(1)),
        #         hue=float(0.1*np.random.rand(1))),
        #transforms.RandomGrayscale(p=0.1)
    ]

    data_aug_transforms = transformsList
    data_aug_transforms = transforms.RandomApply(transformsList, p=0.5)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# norm_transform = transforms.Compose([data_aug_transforms]+[transforms.ToTensor()])
# test_transform = transforms.Compose([transforms.ToTensor()])

train_loader = torch.utils.data.DataLoader(dataset=train_dataset,