Exemple #1
0
 def __init__(self, resize, mean, std):
     self.data_transform = {
         'valid': transforms.Compose([
             transforms.RandomOrder([
             transforms.RandomRotation(20),
             transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1), 
             transforms.RandomHorizontalFlip(), 
             transforms.RandomVerticalFlip(),
             transforms.RandomResizedCrop(
                 resize, scale=(0.8,1.0)),]),
             transforms.ToTensor(), 
             transforms.Normalize(mean, std)  
         ]),
         'train': transforms.Compose([
             transforms.RandomOrder([
             transforms.RandomRotation(20),
             transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),  
             transforms.RandomHorizontalFlip(),  
             transforms.RandomVerticalFlip(),
             transforms.RandomResizedCrop(
                 resize, scale=(0.8,1.0)),]),
             transforms.ToTensor(),  
             transforms.Normalize(mean, std)  
         ]),
         'test': transforms.Compose([
             transforms.Resize(resize),  
             transforms.ToTensor(),  
             transforms.Normalize(mean, std)  
         ])
     }
Exemple #2
0
 def __init__(self, split, crop_size, scaling_factor, lr_img_type,
              hr_img_type):
     """
     :param split: one of 'train' or 'test'
     :param crop_size: crop size of HR images
     :param scaling_factor: LR images will be downsampled from the HR images by this factor
     :param lr_img_type: the target format for the LR image; see convert_image() above for available formats
     :param hr_img_type: the target format for the HR image; see convert_image() above for available formats
     """
     self.split = split.lower()
     self.crop_size = crop_size
     self.scaling_factor = scaling_factor
     self.lr_img_type = lr_img_type
     self.hr_img_type = hr_img_type
     self.downsample_methods = [
         Image.NEAREST, Image.BOX, Image.BILINEAR, Image.HAMMING,
         Image.BICUBIC, Image.LANCZOS
     ]
     self.downsample_proba = [0.1, 0.1, 0.2, 0.1, 0.3, 0.2]
     self.jpeg_quality_dist = bounded_norm_dist(mean=50,
                                                std=25,
                                                lower=1,
                                                upper=100)
     # weights = scipy.io.loadmat(path.join('./processing/jpeg_artifacts/weights/q{}.mat'.format(40)))
     # self.denoiser = ARCNN(weights).to("cpu").eval()
     assert self.split in {'train', 'test'}
     self.augmenter = transforms.RandomOrder([
         transforms.ColorJitter(brightness=0.15,
                                contrast=0.15,
                                saturation=0.15,
                                hue=0.1),
         transforms.RandomGrayscale(p=0.15),
         transforms.RandomHorizontalFlip(0.25)
     ])
Exemple #3
0
def get_batch_data(image_list, batch_size):
    batch_x = []
    batch_y = []
    while len(batch_x) < batch_size:
        index = np.random.randint(args.num_classes)
        label = index

        image = Image.open('./standard_img/%s' % image_list[index]).convert('RGB')
        image = image.resize((128, 128), Image.ANTIALIAS)

        compose = [transforms.RandomAffine(180, shear=5, resample=Image.BILINEAR),
                   transforms.ColorJitter(brightness=0.6, contrast=0.6, saturation=0.6)
                   ]

        compose = transforms.Compose([transforms.RandomOrder(compose),
                                      transforms.ToTensor(),
                                      # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
                                      ])
        image = compose(image)

        batch_x.append(image.numpy())
        batch_y.append(label)

    batch_x = torch.Tensor(batch_x)
    batch_y = torch.Tensor(batch_y)
    return batch_x, batch_y
Exemple #4
0
def load_datasets():
    randomized_transformations = transforms.RandomOrder([
        transforms.RandomApply([tr])
        for tr in TRANSFORMATIONS
    ])


    all_transform_combinations = []
    for n in range(len(TRANSFORMATIONS) + 1): # (from 0 to len)
        for comb in itertools.permutations(TRANSFORMATIONS, n):
            all_transform_combinations.append(transforms.Compose(comb))

    train_dataset = CityscapesDataset(randomized_transformations)
    valid_datasets = [CityscapesDataset(translist) for translist in all_transform_combinations]
    ids = list(range(len(train_dataset)))

    random.shuffle(ids)

    train_sampler = torchdata.sampler.SubsetRandomSampler(ids[VALIDATION_SET_SIZE:])
    valid_sampler = torchdata.sampler.SubsetRandomSampler(ids[:VALIDATION_SET_SIZE])
    
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, sampler=train_sampler, num_workers=NUM_WORKERS)
    valid_loaders = [
        torch.utils.data.DataLoader(dset, batch_size=BATCH_SIZE, sampler=valid_sampler, num_workers=NUM_WORKERS)
        for dset in valid_datasets
    ]

    train_size = len(train_sampler)
    valid_size = len(valid_sampler) * len(valid_datasets)

    return train_size, valid_size, train_loader, valid_loaders, train_dataset.result_to_image
Exemple #5
0
 def build_transform(self):
     if self.train_eval_flag == "train":
         self.transform = transforms.Compose([
             transforms.RandomOrder([
                 RandomTransWrapper(seq=iaa.GaussianBlur((0, 1.5)), p=0.09),
                 RandomTransWrapper(seq=iaa.AdditiveGaussianNoise(
                     loc=0, scale=(0.0, 0.05), per_channel=0.5),
                                    p=0.09),
                 RandomTransWrapper(seq=iaa.Dropout((0.0, 0.10),
                                                    per_channel=0.5),
                                    p=0.3),
                 RandomTransWrapper(seq=iaa.CoarseDropout(
                     (0.0, 0.10), size_percent=(0.08, 0.2),
                     per_channel=0.5),
                                    p=0.3),
                 RandomTransWrapper(seq=iaa.Add((-20, 20), per_channel=0.5),
                                    p=0.3),
                 RandomTransWrapper(seq=iaa.Multiply((0.9, 1.1),
                                                     per_channel=0.2),
                                    p=0.4),
                 RandomTransWrapper(seq=iaa.ContrastNormalization(
                     (0.8, 1.2), per_channel=0.5),
                                    p=0.09),
             ]),
             transforms.ToTensor()
         ])
     else:
         self.transform = transforms.Compose([
             transforms.ToTensor(),
         ])
Exemple #6
0
def train_data_loader(data_path, img_size, use_augment=False):
    if use_augment:
        data_transforms = transforms.Compose([
            transforms.RandomOrder([
                transforms.RandomApply([transforms.ColorJitter(contrast=0.5)], .5),
                transforms.Compose([
                    transforms.RandomApply([transforms.ColorJitter(saturation=0.5)], .5),
                    transforms.RandomApply([transforms.ColorJitter(hue=0.1)], .5),
                ])
            ]),
            transforms.RandomApply([transforms.ColorJitter(brightness=0.125)], .5),
            transforms.RandomApply([transforms.RandomRotation(15)], .5),
            transforms.RandomResizedCrop(img_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    else:
        data_transforms = transforms.Compose([
            transforms.RandomResizedCrop(img_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

    image_dataset = datasets.ImageFolder(data_path, data_transforms)

    return image_dataset
    def __init__(self, args):
        super(CifarLoader, self).__init__()
        transform = transforms.Compose([
            # TODO: Add data augmentations here
            transforms.RandomOrder([
                transforms.RandomVerticalFlip(),
                transforms.RandomGrayscale(),
                transforms.ColorJitter()
            ]),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])

        trainset = torchvision.datasets.CIFAR10(root='./data',
                                                train=True,
                                                download=True,
                                                transform=transform)
        self.trainloader = torch.utils.data.DataLoader(
            trainset, batch_size=args.batchSize, shuffle=True, num_workers=2)

        testset = torchvision.datasets.CIFAR10(root='./data',
                                               train=False,
                                               download=True,
                                               transform=transform_test)
        self.testloader = torch.utils.data.DataLoader(
            testset, batch_size=args.batchSize, shuffle=False, num_workers=2)

        self.classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog',
                        'horse', 'ship', 'truck')
Exemple #8
0
def get_experiment_inception_transformations_composition(device):
    """See base_inception_experiment.
    Note: this experiment uses data agumentation methods: horizontal flip, vertical flip,
    brightness, contrast and hue transformations and random rotation"""
    pcam_train_transform = transforms.Compose(transforms=[
        transforms.RandomApply([
            transforms.RandomOrder(transforms=[
                transforms.RandomHorizontalFlip(p=0.5),
                transforms.RandomVerticalFlip(p=0.5),
                transforms.ColorJitter(brightness=0.05),
                transforms.ColorJitter(contrast=0.05),
                transforms.ColorJitter(hue=0.025),
                transforms.RandomRotation(degrees=5)
            ])
        ],
                               p=0.5),
        transforms.Resize(299),
        transforms.ToTensor()
    ])
    pcam_valid_transform = transforms.Compose([transforms.ToTensor()])
    pcam_test_transform = transforms.Compose([transforms.ToTensor()])

    pcam_data_transform = {
        'train': pcam_train_transform,
        'valid': pcam_valid_transform,
        'test': pcam_test_transform
    }
    pcam_dataset = dataset.PCamDatasets(data_transforms=pcam_data_transform)
    train_set = pcam_dataset.train
    valid_set = pcam_dataset.valid
    test_set = pcam_dataset.test
    data_dict = {
        dataset.TRAIN: train_set,
        dataset.VALID: valid_set,
        dataset.TEST: test_set
    }

    return base_inception_experiment(
        experiment_name="inception_transformation_composition",
        learn_rate=1e-3,
        batch_size=32,
        validation_batch_size=128,
        num_epochs=25,
        weight_decay=0,
        pretrained=True,
        fixed_weights=False,
        data_dict=data_dict,
        device=device)
def load_data():
    """Load the data: The detail folders contain a '1 bee' folder with 78x78 grayscale PNG images
       of bees in honeycombs and a '0 notbee' folder with negative examples."""
    b_size = 200
    sigma = 15

    transform_train = transforms.Compose([
        transforms.RandomOrder([
            transforms.RandomRotation(180),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.ColorJitter(brightness=0.5, contrast=0.5)
        ]),
        transforms.CenterCrop(54),
        transforms.Resize(32),
        transforms.Grayscale(num_output_channels=1),
        transforms.ToTensor()
    ])

    transform_test = transforms.Compose([
        transforms.CenterCrop(54),
        transforms.Resize(32),
        transforms.Grayscale(num_output_channels=1),
        transforms.ToTensor()
    ])

    trainset = beeimagefolder.BeeImageFolder(
        root='../Videos/detail_training', valid_classes=['0 notbee', '1 bee'])
    translated_trainset = mydataset.MyDataset(dataset=trainset,
                                              sigma=sigma,
                                              transform=transform_train)
    testset = beeimagefolder.BeeImageFolder(
        root='../Videos/detail_test',
        valid_classes=['0 notbee', '1 bee'],
        transform=transform_test)

    print('class_to_idx trainset: {}\t class_to_idx testset: {}'.format(
        trainset.class_to_idx, testset.class_to_idx))

    trainloader = torch.utils.data.DataLoader(translated_trainset,
                                              num_workers=2,
                                              batch_size=b_size,
                                              shuffle=True)
    testloader = torch.utils.data.DataLoader(testset)

    classes = ('notbee', 'bee')

    return trainloader, testloader, classes, b_size, sigma
Exemple #10
0
def get_experiment_densenet_transformation_composition(device):
    """See transformations_densenet_experiment_base"""
    train_transform = transforms.Compose(
        transforms=[
            transforms.RandomApply(
                [transforms.RandomOrder(transforms=[
                    transforms.RandomHorizontalFlip(p=0.5),
                    transforms.RandomVerticalFlip(p=0.5),
                    transforms.ColorJitter(brightness=0.05),
                    transforms.ColorJitter(contrast=0.05),
                    transforms.ColorJitter(hue=0.025),
                    transforms.RandomRotation(degrees=5)])], p=0.5),
            transforms.ToTensor()])
    return transformations_densenet_experiment_base(
        train_transformations=train_transform,
        experiment_name="densenet_transformation_composition", learn_rate=1e-4,
        batch_size=32, validation_batch_size=1024, num_epochs=20, weight_decay=0,
        pretrained=True, fixed_weights=False, device=device)
def random_transforms(aug_degree: float) -> t.RandomOrder:
    assert 0 <= aug_degree <= 3

    aug_list = [
        t.functional.hflip,
        t.RandomResizedCrop(size=SIZE, scale=(7 / 8, 7 / 8)),
        t.RandomAffine(degrees=aug_degree * 10,
                       translate=(0.1 * aug_degree, 0.1 * aug_degree),
                       scale=(1 - 0.1 * aug_degree, 1 + 0.1 * aug_degree),
                       shear=aug_degree * 5,
                       fillcolor=0),
        t.ColorJitter(brightness=0.1 * aug_degree,
                      contrast=0.1 * aug_degree,
                      saturation=0.1 * aug_degree)
    ]
    transforms = t.RandomOrder(
        [t.RandomApply([aug], p=0.4 + 0.1 * aug_degree) for aug in aug_list])
    return transforms
Exemple #12
0
def import_transfrom(data_dir):

    train_dir = data_dir + '/train'
    valid_dir = data_dir + '/valid'
    test_dir = data_dir + '/test'

    train_transforms = transforms.Compose([
        transforms.RandomOrder([
            transforms.RandomRotation(30),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip()
        ]),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    valid_transforms = transforms.Compose([
        transforms.Resize(255),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    test_transforms = transforms.Compose([
        transforms.Resize(255),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    # TODO: Load the datasets with ImageFolder

    train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
    valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
    test_data = datasets.ImageFolder(test_dir, transform=test_transforms)

    # TODO: Using the image datasets and the trainforms, define the dataloaders
    trainloader = torch.utils.data.DataLoader(train_data,
                                              batch_size=64,
                                              shuffle=True)
    validloader = torch.utils.data.DataLoader(valid_data, batch_size=64)
    testloader = torch.utils.data.DataLoader(valid_data, batch_size=64)
    return train_data, valid_data, test_data, trainloader, validloader, testloader
Exemple #13
0
    def set_transforms(self, p: float) -> None:
        if p > 0:
            transforms_list = [self._add_random_const_transform,
                               self._ampl_inversion_transform,
                               self._sync_impulse_transform,
                               self._sine_distortion_transform,
                               self._energy_absorption_transform]
            random_transforms = t.RandomOrder([t.RandomApply([transform], p=p) for transform in transforms_list])

            self._transforms = t.Compose([random_transforms,
                                          self._add_random_noise_transform,
                                          t.RandomApply([self._ampl_zero_transform,
                                                         self._high_noise_trace_transform], p=p),
                                          data_normalize_and_limiting,
                                          t.ToTensor()])
        elif p == 0:
            self._transforms = t.Compose([data_normalize_and_limiting,
                                          t.ToTensor()])
        else:
            ValueError('Probability must be non-negative')
Exemple #14
0
def get_cars(augment: bool,
             train_dir: str,
             project_dir: str,
             test_dir: str,
             img_size=224):
    shape = (3, img_size, img_size)
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)

    normalize = transforms.Normalize(mean=mean, std=std)
    transform_no_augment = transforms.Compose([
        transforms.Resize(size=(img_size, img_size)),
        transforms.ToTensor(), normalize
    ])

    if augment:
        transform = transforms.Compose([
            transforms.Resize(size=(img_size + 32,
                                    img_size + 32)),  #resize to 256x256
            transforms.RandomOrder([
                transforms.RandomPerspective(distortion_scale=0.5, p=0.5),
                transforms.ColorJitter((0.6, 1.4), (0.6, 1.4), (0.6, 1.4),
                                       (-0.4, 0.4)),
                transforms.RandomHorizontalFlip(),
                transforms.RandomAffine(degrees=15, shear=(-2, 2)),
            ]),
            transforms.RandomCrop(size=(img_size, img_size)),  #crop to 224x224
            transforms.ToTensor(),
            normalize,
        ])
    else:
        transform = transform_no_augment

    trainset = torchvision.datasets.ImageFolder(train_dir, transform=transform)
    projectset = torchvision.datasets.ImageFolder(
        project_dir, transform=transform_no_augment)
    testset = torchvision.datasets.ImageFolder(test_dir,
                                               transform=transform_no_augment)
    classes = trainset.classes

    return trainset, projectset, testset, classes, shape
Exemple #15
0
    def test_random_order(self):
        random_state = random.getstate()
        random.seed(42)
        random_order_transform = transforms.RandomOrder(
            [transforms.Resize(20),
             transforms.CenterCrop(10)])
        img = transforms.ToPILImage()(torch.rand(3, 25, 25))
        num_samples = 250
        num_normal_order = 0
        resize_crop_out = transforms.CenterCrop(10)(transforms.Resize(20)(img))
        for _ in range(num_samples):
            out = random_order_transform(img)
            if out == resize_crop_out:
                num_normal_order += 1

        p_value = stats.binom_test(num_normal_order, num_samples, p=0.5)
        random.setstate(random_state)
        assert p_value > 0.0001

        # Checking if RandomOrder can be printed as string
        random_order_transform.__repr__()
Exemple #16
0
def get_birds(augment: bool,
              train_dir: str,
              project_dir: str,
              test_dir: str,
              img_size=224):
    shape = (3, img_size, img_size)
    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)
    normalize = transforms.Normalize(mean=mean, std=std)
    transform_no_augment = transforms.Compose([
        transforms.Resize(size=(img_size, img_size)),
        transforms.ToTensor(), normalize
    ])
    if augment:
        transform = transforms.Compose([
            transforms.Resize(size=(img_size, img_size)),
            transforms.RandomOrder([
                transforms.RandomPerspective(distortion_scale=0.2, p=0.5),
                transforms.ColorJitter((0.6, 1.4), (0.6, 1.4), (0.6, 1.4),
                                       (-0.02, 0.02)),
                transforms.RandomHorizontalFlip(),
                transforms.RandomAffine(degrees=10,
                                        shear=(-2, 2),
                                        translate=[0.05, 0.05]),
            ]),
            transforms.ToTensor(),
            normalize,
        ])
    else:
        transform = transform_no_augment

    trainset = torchvision.datasets.ImageFolder(train_dir, transform=transform)
    projectset = torchvision.datasets.ImageFolder(
        project_dir, transform=transform_no_augment)
    testset = torchvision.datasets.ImageFolder(test_dir,
                                               transform=transform_no_augment)
    classes = trainset.classes
    for i in range(len(classes)):
        classes[i] = classes[i].split('.')[1]
    return trainset, projectset, testset, classes, shape
Exemple #17
0
    def prepareData(self):
        # The output of torchvision datasets are PILImage images of range [0, 1]
        # We transform them to Tensor type
        # And normalize the data
        # Be sure you do same normalization for your train and test data
        print('Preparing dataset...')

        # The transform function for train data
        transform_train = transforms.Compose([
            transforms.RandomOrder([
                transforms.RandomCrop(128, padding=4),
                transforms.RandomHorizontalFlip(),
            ]),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (1, 1, 1)),
            # you can apply more augment function
            # [document]: https://pytorch.org/docs/stable/torchvision/transforms.html
        ])

        # The transform function for test data
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (1, 1, 1)),
        ])

        # TODO
        self.trainset = torchvision.datasets.ImageFolder(
            "dataset", transform=transform_train)
        self.testset = torchvision.datasets.ImageFolder(
            "testset", transform=transform_train)

        self.trainloader = torch.utils.data.DataLoader(
            self.trainset, batch_size=self.batch_size, shuffle=True)
        self.testloader = torch.utils.data.DataLoader(
            self.testset, batch_size=self.batch_size, shuffle=False)
        # you can also split validation set
        # self.validloader = torch.utils.data.DataLoader(self.validset, batch_size=self.batch_size, shuffle=True)
        return
    def transform_tr(self, sample):

        color_transforms = [
            transforms.RandomApply([transforms.ColorJitter(brightness=0.1)
                                    ]),  # brightness
            transforms.RandomApply([transforms.ColorJitter(contrast=0.1)
                                    ]),  # contrast
            transforms.RandomApply([transforms.ColorJitter(saturation=0.1)
                                    ]),  # saturation
            transforms.RandomApply([transforms.ColorJitter(hue=0.05)])
        ]  # hue

        joint_transforms = transforms.Compose([
            tr.RandomHorizontalFlip(),
            tr.RandomScaleCrop(base_size=self.args.base_size,
                               crop_size=self.args.crop_size,
                               fill=255),
            tr.equalize(),
            tr.RandomGaussianBlur(),
            tr.RandomRotate(degree=7)
        ])

        image_transforms = transforms.Compose([
            transforms.RandomOrder(color_transforms),
            transforms.RandomGrayscale(p=0.3)
        ])

        normalize_transforms = transforms.Compose([
            tr.Normalize(mean=(0.485, 0.456, 0.406),
                         std=(0.229, 0.224, 0.225)),
            tr.ToTensor()
        ])

        tmp_sample = joint_transforms(sample)
        tmp_sample['image'] = image_transforms(tmp_sample['image'])
        tmp_sample = normalize_transforms(tmp_sample)

        return tmp_sample
Exemple #19
0
def aug_image(image, is_infer=False, augment = None):
    if is_infer:
        flip_code = augment[0]

        if flip_code == 1:
            image = TF.hflip(image)
        elif flip_code == 2:
            image = TF.vflip(image)
        elif flip_code == 3:
            image = TF.vflip(image)
            image = TF.hflip(image)
        elif flip_code == 0:
            return image

    else:
        dist_scale = random.random() / 10 + 0.01
        aug_factor = 1.5
        transform = transforms.Compose([
            transforms.RandomAffine(degrees=(-15, 15),
                                    shear=(-20, 20)),
            transforms.RandomOrder([
                transforms.RandomApply([transforms.ColorJitter(brightness=0.1*aug_factor, contrast=0.1*aug_factor)], p=0.2),
                transforms.RandomApply([transforms.ColorJitter(saturation=0.1*aug_factor, hue=0.1*aug_factor)], p=0.5),
                transforms.RandomPerspective(distortion_scale=dist_scale*aug_factor, p=0.5)#,
                #transforms.RandomApply([transforms.Compose([
                #    transforms.ToTensor(),
                #    transforms.RandomErasing(p=1.0, scale=(0.02, 0.1), ratio=(0.25, 1)),
                #    transforms.ToPILImage()
                #])], p=0.7),
                #transforms.RandomApply([transforms.Compose([
                #    transforms.ToTensor(),
                #    AddGaussianNoise(0., 2.),
                #    transforms.ToPILImage()
                #])], p=0.5)
            ])
        ])
        image = transform(image)
    return image
Exemple #20
0
    def __init__(self, data_path, train_eval_flag, batch_size):
        """
        Args:
            data_path (string): Path to the data folder.    
            train_eval_flag (string): "train" or "eval".
            batch_size (int): Recommended to be consistent with the number of processor cores
        """

        self.data_path = data_path
        self.batch_size = batch_size

        if train_eval_flag == "train":
            self.tran = transforms.Compose([
                transforms.RandomOrder([
                    #各类数据增强方法,随机顺序,且按一定概率决定是不是使用
                    sometimes(0.09, iaa.GaussianBlur(sigma=(0, 1.5))),
                    sometimes(
                        0.09,
                        iaa.AdditiveGaussianNoise(loc=0,
                                                  scale=(0.0, 0.05),
                                                  per_channel=0.5)),
                    sometimes(
                        0.09,
                        iaa.ContrastNormalization((0.8, 1.2),
                                                  per_channel=0.5)),
                    sometimes(0.3, iaa.Dropout((0.0, 0.10), per_channel=0.5)),
                    sometimes(
                        0.3,
                        iaa.CoarseDropout((0.0, 0.10),
                                          size_percent=(0.08, 0.2),
                                          per_channel=0.5)),
                    sometimes(0.3, iaa.Add((-20, 20), per_channel=0.5)),
                    sometimes(0.4, iaa.Multiply((0.9, 1.1), per_channel=0.2)),
                ]),
                transforms.ToTensor()
            ])
        else:
            self.tran = transforms.Compose([transforms.ToTensor()])
Exemple #21
0
 def __init__(self, rootDir=Path('/home/zzr/Data/XinGuan/lung'),
              mode='train',
              size=(160, 240)):
     super().__init__(rootDir, mode, size)
     self.mean, self.std = .5536, .2696
     if mode == 'train':
         self.to_tensorImg = \
             transforms.Compose([
                 transforms.RandomOrder([
                     transforms.RandomHorizontalFlip(),
                     transforms.RandomVerticalFlip(),
                     transforms.RandomApply([
                         transforms.RandomRotation(45,
                                                   resample=Image.BILINEAR)], p=.5),
                     transforms.RandomApply([
                         transforms.ColorJitter(.1, .1, .1, .1)], p=.5)]),
                     transforms.ToTensor(),
                     transforms.Normalize((self.mean, ), (self.std, ))])
     else:
         self.to_tensorImg = \
             transforms.Compose([
                 transforms.ToTensor(),
                 transforms.Normalize((self.mean, ),(self.std, ))])
Exemple #22
0
 def set_choice(self):
     choice = {
         # random_choice 1:  Blur, rotaion, Blur + rotation + scale_translate (random_order)
         self.augmentation_name[1]: [
             Stage2_RandomAffine(degrees=15, translate=(0, 0),
                                 scale=(1, 1)),
             transforms.RandomOrder([
                 Stage2_GaussianNoise(),
                 Stage2_RandomAffine(degrees=30,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2))
             ])
         ],
         # random_choice 2:  noise, crop, noise + crop + rotation_scale_translate (random_order)
         self.augmentation_name[2]: [
             Stage2_GaussianNoise(),
             Stage2_RandomAffine(degrees=30,
                                 translate=(0.3, 0.3),
                                 scale=(0.8, 1.5)),
             transforms.RandomOrder([
                 Stage2_GaussianNoise(),
                 Stage2_RandomAffine(degrees=30,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2))
             ])
         ],
         # random_choice 3:  noise + blur , noise + rotation ,noise + blur + rotation_scale_translate
         self.augmentation_name[3]: [
             transforms.RandomOrder([
                 Stage2_RandomAffine(degrees=30,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2)),
                 Stage2_RandomAffine(degrees=30,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2))
             ]),
             transforms.RandomOrder([
                 Stage2_RandomAffine(degrees=30,
                                     translate=None,
                                     scale=(1, 1)),
                 Stage2_RandomAffine(degrees=0,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2))
             ]),
             transforms.RandomOrder([
                 Stage2_GaussianNoise(),
                 Stage2_RandomAffine(degrees=30,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2))
             ]),
             transforms.RandomOrder([
                 Stage2_RandomAffine(degrees=30,
                                     translate=None,
                                     scale=(1, 1)),
                 Stage2_RandomAffine(degrees=0,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2))
             ])
         ],
         # random_choice 4:  noise + crop , blur + crop ,noise + blur + crop + rotation_scale_translate
         self.augmentation_name[4]: [
             transforms.RandomOrder([
                 Stage2_RandomAffine(degrees=15,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2)),
                 Stage2_RandomAffine(degrees=30,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2))
             ]),
             transforms.Compose([
                 Stage2_GaussianNoise(),
                 Stage2_RandomAffine(degrees=30,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2)),
             ]),
             transforms.RandomOrder([
                 Stage2_RandomAffine(degrees=0,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2)),
                 Stage2_RandomAffine(degrees=30,
                                     translate=(0.3, 0.3),
                                     scale=(1, 2))
             ])
         ]
     }
     self.randomchoice = choice
Exemple #23
0
from torchvision import transforms

to_PIL = transforms.Compose([transforms.ToPILImage()])

data_augmentation = transforms.RandomOrder([
    transforms.RandomHorizontalFlip(0.5),
    transforms.ColorJitter(brightness=0.3,
                           contrast=0.3,
                           saturation=0.3,
                           hue=0.1),
    transforms.RandomRotation(40)
])

random_erasing = transforms.Compose(
    [transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3))])

model_transforms = {
    'reseption_v1':
    transforms.Compose([
        transforms.Resize((299, 299)),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ]),
    'reseption_v2':
    transforms.Compose([
        transforms.Resize((299, 299)),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ]),
    'reseption_ensemble':
    transforms.Compose([
Exemple #24
0
def normalize(img):
	nimg = img.numpy()
	m, M = np.min(nimg), np.max(nimg)
	nimg -= m
	nimg /= (M-m)
	return torch.tensor(nimg).float()

Normalize = transforms.Normalize([0.]*3, [1.]*3)

normalize_transforms = [transforms.Lambda(normalize)]
feature_build_transforms = []#[transforms.Lambda(feature_build)] #[]

## List of transforms for Data Augmentation
RC = lambda ts : transforms.RandomChoice(ts)
RA = lambda ts : transforms.RandomApply(ts, p=0.7)
RO = lambda ts : transforms.RandomOrder(ts)
HFlip = transforms.RandomHorizontalFlip(p=0.7)
VFlip = transforms.RandomVerticalFlip(p=0.7)
Rotation = transforms.RandomRotation((0, 180), resample=False, expand=False, center=None)
## White noise
noise = transforms.Lambda(lambda img : img+0.01*Variable(torch.randn(img.size())))
Resize = transforms.Lambda(lambda img : transforms.functional.resize(img, (shape, shape)) if (shape > 0) else img)
CJ = transforms.ColorJitter(brightness=0.4, contrast=0, saturation=0.5, hue=0)
## For custom transformations
# transforms.LinearTransformation(transformation_matrix)
# transforms.Lambda(lambd)
data_augmentation = [RC([HFlip, VFlip, Rotation, RA([CJ])])]*2

data_transform_train = transforms.Compose(
	[Resize]+
	[RO(data_augmentation)]+
# -*- coding: utf_8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from auto_augment import AutoAugment, Cutout

import os
from torch.utils.data import Dataset
from torchvision import transforms, datasets
from PIL import Image
"""
transforms.RandomOrder([
    transforms.RandomApply([transforms.ColorJitter(contrast=0.5)], .5),
    transforms.Compose([
        transforms.RandomApply([transforms.ColorJitter(saturation=0.5)], .5),
        transforms.RandomApply([transforms.ColorJitter(hue=0.1)], .5),
    ])
]),
transforms.RandomApply([transforms.ColorJitter(brightness=0.125)], .5),
transforms.RandomApply([transforms.RandomRotation(15)], .5),
transforms.RandomResizedCrop(img_size),
transforms.RandomHorizontalFlip(),
"""


def train_data_loader(data_path, img_size, use_augment=False):
    if use_augment:
        data_transforms = transforms.Compose([
            transforms.RandomResizedCrop(img_size),
            AutoAugment(),
            Cutout(),
Exemple #26
0
    'n02105641': 'Old English sheepdog',
    'n02088364': 'Beagle',
    'n02111889': 'Samoyed',
    'n02093754': 'Border terrier',
    'n02089973': 'English foxhound',
    'n02096294': 'Australian terrier',
    'n02115641': 'Dingo'
}
MODEL_TYPE = 'efficientnet-b0'
NUM_CLASSES = len(CLASS_NAMES)
INPUT_SHAPE = (224, 224, 3)

NORMALIZE_MEAN = [0, 0, 0]
NORMALIZE_STD = [255, 255, 255]
TRAIN_DATA_TRANSFORMS = transforms.Compose([
    transforms.ToTensor(),
    transforms.RandomResizedCrop(INPUT_SHAPE[:2]),
    transforms.RandomOrder([
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.25, contrast=0.25, saturation=0.1, hue=0.05),
            transforms.RandomAffine(degrees=5, scale=(0.8, 1.2), fillcolor=None),
            transforms.GaussianBlur(kernel_size=3)
    ]),
    transforms.Normalize([0, 0, 0], [255, 255, 255])
])
TEST_DATA_TRANSFORMS = transforms.Compose([
    transforms.ToTensor(),
    transforms.Resize(INPUT_SHAPE[:2]),
    transforms.Normalize(NORMALIZE_MEAN, NORMALIZE_STD)
])
Exemple #27
0
normalize = transforms.Normalize(mean, std)

preprocess = transforms.Compose([rescale, transforms.ToTensor(), normalize])

v_flip = transforms.RandomVerticalFlip()
h_flip = transforms.RandomHorizontalFlip()
s_color = transforms.ColorJitter(saturation=0.1)
c_color = transforms.ColorJitter(contrast=0.1)
h_color = transforms.ColorJitter(hue=0.1)
grayscale = transforms.RandomGrayscale()

augment1 = transforms.RandomApply(
    [h_flip, s_color, c_color, h_color, v_flip, grayscale])
augment2 = transforms.RandomChoice(
    [h_flip, s_color, c_color, h_color, v_flip, grayscale])
augment3 = transforms.RandomOrder(
    [h_flip, s_color, c_color, h_color, v_flip, grayscale])


class TrainDataset(Dataset):
    def __init__(self, transform=None):

        with open(TRAIN, 'r') as f:
            self.train_paths = f.read().splitlines()
        self.transform = transform

    def __len__(self):
        return len(self.train_paths)

    def __getitem__(self, idx):
        csv_file = self.train_paths[idx]
Exemple #28
0
 def set_choice(self):
     choice = {
         # random_choice 1:  Blur, rotaion, Blur + rotation + scale_translate (random_order)
         self.augmentation_name[1]: [
             GaussianNoise(),
             RandomAffine(degrees=15,
                          translate=(0.1, 0.1),
                          scale=(0.9, 1.2)),
             transforms.RandomOrder([
                 GaussianNoise(),
                 RandomAffine(degrees=15,
                              translate=(0.1, 0.1),
                              scale=(0.9, 1.2))
             ])
         ],
         # random_choice 2:  noise, crop, noise + crop + rotation_scale_translate (random_order)
         self.augmentation_name[2]: [
             Blurfilter(),
             RandomAffine(degrees=15,
                          translate=(0.1, 0.1),
                          scale=(0.9, 1.2)),
             transforms.RandomOrder([
                 Blurfilter(),
                 RandomAffine(degrees=15,
                              translate=(0.1, 0.1),
                              scale=(0.9, 1.2))
             ])
         ],
         # random_choice 3:  noise + blur , noise + rotation ,noise + blur + rotation_scale_translate
         self.augmentation_name[3]: [
             transforms.RandomOrder([
                 GaussianNoise(),
                 RandomAffine(degrees=15,
                              translate=(0.1, 0.1),
                              scale=(0.9, 1.2))
             ]),
             transforms.RandomOrder([
                 GaussianNoise(),
                 RandomAffine(degrees=15,
                              translate=(0.1, 0.1),
                              scale=(0.9, 1.2))
             ]),
             transforms.RandomOrder([
                 GaussianNoise(),
                 Blurfilter(),
                 RandomAffine(degrees=15,
                              translate=(0.1, 0.1),
                              scale=(0.9, 1.2))
             ])
         ],
         # random_choice 4:  noise + crop , blur + crop ,noise + blur + crop + rotation_scale_translate
         self.augmentation_name[4]: [
             transforms.RandomOrder([
                 GaussianNoise(),
                 RandomAffine(degrees=15,
                              translate=(0.1, 0.1),
                              scale=(0.9, 1.2))
             ]),
             transforms.Compose([
                 Blurfilter(),
                 RandomAffine(degrees=15,
                              translate=(0.1, 0.1),
                              scale=(0.9, 1.2))
             ]),
             transforms.RandomOrder([
                 GaussianNoise(),
                 Blurfilter(),
                 RandomAffine(degrees=15,
                              translate=(0.1, 0.1),
                              scale=(0.9, 1.2))
             ])
         ]
     }
     self.randomchoice = choice
Exemple #29
0
    transforms.RandomChoice([
        transforms.RandomVerticalFlip(p=1),
        transforms.RandomHorizontalFlip(p=1)
    ]),

    # 2 RandomApply
    transforms.RandomApply([
        transforms.RandomAffine(degrees=0, shear=45, fillcolor=(255, 0, 0)),
        transforms.Grayscale(num_output_channels=3)
    ],
                           p=0.5),
    # 3 RandomOrder
    transforms.RandomOrder([
        transforms.RandomRotation(15),
        transforms.Pad(padding=32),
        transforms.RandomAffine(degrees=0,
                                translate=(0.01, 0.1),
                                scale=(0.9, 1.1))
    ]),

    # transforms.ToTensor(),
    # transforms.Normalize(norm_mean, norm_std),
])

valid_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std)
])

# 构建MyDataset实例
import tfrecord_creator	
from config import im_size, unknown_code, fg_path, bg_path, a_path, num_valid, valid_ratio, num_fgs, num_bgs
from utils import safe_crop, parse_args, maybe_random_interp

global args
args = parse_args()

# Data augmentation and normalization for training
# Just normalization for validation
if args.data_augumentation:
    data_transforms = {
        'train': transforms.Compose([
            transforms.ColorJitter(brightness=0.125, contrast=0.125, saturation=0.125),
            transforms.RandomApply([
                transforms.RandomOrder([
                    transforms.RandomApply([transforms.RandomAffine(degrees=30, scale=[0.8,1.25], shear=10)]),
                    transforms.RandomApply([transforms.RandomHorizontalFlip(0.5)]),
                    transforms.RandomApply([transforms.RandomCrop(size=(320, 320), pad_if_needed=True)])])]),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'valid': transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }
else:
    data_transforms = {
    'train': transforms.Compose([
        transforms.ColorJitter(brightness=0.125, contrast=0.125, saturation=0.125),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),