Example #1
0
def load_CIFAR10():
    # C10+
    transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
                                          transforms.RandomHorizontalFlip(),
                                          transforms.ToTensor(),
                                          transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.247, 0.243, 0.261)),
                                          ])

    transforms_test = transforms.Compose([transforms.ToTensor(),
                                          transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])

    train_set = datasets.CIFAR10(root='/data', train=True, transform=transform_train, download=True)
    train_loader = DataLoader(train_set, batch_size=64, num_workers=4)

    test_set = datasets.CIFAR10(root='/data', train=False, transform=transforms_test, download=True)
    test_loader = DataLoader(test_set, batch_size=100, num_workers=4)

    return train_loader, test_loader
Example #2
0
def get_image_dataloader(path_to_data, batch_size=16):
    my_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.ToPILImage(),
        transforms.RandomHorizontalFlip(),
        transforms.RandomGrayscale(),
        transforms.RandomRotation([-30, 30]),
        # transforms.Normalize([],[]) # get mean and std
        transforms.ToTensor(),
    ])
    dataset = ImageFolder(root=path_to_data,
                          transform=my_transforms,
                          is_valid_file=check_valid)
    print(len(dataset.classes))
    dataloader = DataLoader(dataset=dataset,
                            batch_size=batch_size,
                            shuffle=True)
    return dataloader, dataset
Example #3
0
def get_cifar10_transform(size, train=True):
    if train:
        transform = transforms.Compose([
            transforms.RandomCrop(size, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

    else:
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

    return transform
Example #4
0
def get_augmentation(dataset):
    if dataset in [DATA.cifar_10, DATA.cifar_100]:
        transform_train = tv_transforms.Compose([
            tv_transforms.RandomCrop(32, padding=4),
            tv_transforms.RandomHorizontalFlip(),
            tv_transforms.ToTensor(),
            tv_transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD)
        ])

        transform_test = tv_transforms.Compose([
            tv_transforms.ToTensor(),
            tv_transforms.Normalize(_CIFAR_MEAN, _CIFAR_STD),
        ])
    else:
        raise NotImplementedError(
            'Not yet implemented for dataset={}.'.format(dataset))

    return transform_train, transform_test
Example #5
0
def cifar10_train_augmented():
    """
    ### Augmented CIFAR 10 train dataset
    """
    from torchvision.datasets import CIFAR10
    from torchvision.transforms import transforms
    return CIFAR10(str(lab.get_data_path()),
                   train=True,
                   download=True,
                   transform=transforms.Compose([
                       # Pad and crop
                       transforms.RandomCrop(32, padding=4),
                       # Random horizontal flip
                       transforms.RandomHorizontalFlip(),
                       #
                       transforms.ToTensor(),
                       transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                   ]))
Example #6
0
 def __init__(
     self,
     *,
     crop_size,
     resize_size,
     mean=(0.43216, 0.394666, 0.37645),
     std=(0.22803, 0.22145, 0.216989),
     hflip_prob=0.5,
 ):
     trans = [
         ConvertBHWCtoBCHW(),
         transforms.ConvertImageDtype(torch.float32),
         transforms.Resize(resize_size),
     ]
     if hflip_prob > 0:
         trans.append(transforms.RandomHorizontalFlip(hflip_prob))
     trans.extend([transforms.Normalize(mean=mean, std=std), transforms.RandomCrop(crop_size), ConvertBCHWtoCBHW()])
     self.transforms = transforms.Compose(trans)
Example #7
0
 def build_data_loader(self):
     if self.train_set:
         transform = transforms.Compose([
             transforms.RandomHorizontalFlip(),
             transforms.RandomVerticalFlip(),
             transforms.ToTensor()
         ])
     else:
         transform = transforms.ToTensor()
     cifar10_dtst = CIFAR10(root='data',
                            download=True,
                            train=self.train_set,
                            transform=transform)
     cifar10_dl = data.DataLoader(cifar10_dtst,
                                  batch_size=self.batch_size,
                                  shuffle=True,
                                  num_workers=self.num_workers)
     return cifar10_dl
Example #8
0
    def __init__(self, data_path, mode):

        if mode == "train" or mode == "val":
            img_paths = sorted(glob.glob(os.path.join(data_path, '*.jpg')))
            random.shuffle(img_paths)
        else:
            img_paths = sorted(glob.glob(os.path.join(data_path, '*.jpg')),
                               key=get_name)

        split = int(len(img_paths) * 0.75)

        if mode == "train":
            self.img_paths = img_paths[:split]
        elif mode == "val":
            self.img_paths = img_paths[split:]
        else:
            self.img_paths = img_paths

        if mode == "train" or mode == "val":
            self.labels = [
                path.split("/")[-1].split(".")[0] for path in self.img_paths
            ]
            self.labels = [int(label == "cat") for label in self.labels]
        else:
            self.img_ids = [
                path.split("/")[-1].split(".")[0] for path in self.img_paths
            ]

        if mode == "train":
            transforms = [
                T.Grayscale(),
                T.Resize((RE_HEIGHT, RE_WIDTH)),
                T.RandomHorizontalFlip(),
                T.ToTensor()
            ]
        else:  # "val" and "test"
            transforms = [
                T.Grayscale(),
                T.Resize((RE_HEIGHT, RE_WIDTH)),
                T.ToTensor()
            ]

        self.transforms = T.Compose(transforms)
        self.mode = mode
Example #9
0
def train(args):
    bsd300_train = load_bsd300('../data', split='train')
    bsd300_test = load_bsd300('../data', split='test')

    img_transforms = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(15),
    ])

    train_data = IsrDataset(bsd300_train,
                            output_size=200,
                            scale_factor=args.scale_factor,
                            deterministic=False,
                            base_image_transform=img_transforms,
                            transform=transforms.ToTensor(),
                            target_transform=transforms.ToTensor())
    n_train = int(len(train_data) * 0.8)
    split = [n_train, len(train_data) - n_train]
    train_data, val_data = random_split(train_data, split)
    test_data = IsrDataset(bsd300_test,
                           output_size=200,
                           scale_factor=args.scale_factor,
                           deterministic=True,
                           transform=transforms.ToTensor(),
                           target_transform=transforms.ToTensor())

    model = SubPixelSrCnn(hparams=args)
    trainer = Trainer()
    trainer.fit(
        model,
        train_dataloader=DataLoader(test_data,
                                    shuffle=True,
                                    batch_size=32,
                                    num_workers=2),
        val_dataloaders=DataLoader(val_data,
                                   shuffle=False,
                                   batch_size=32,
                                   num_workers=2),
    )
    trainer.test(model,
                 test_dataloaders=DataLoader(test_data,
                                             shuffle=False,
                                             batch_size=32,
                                             num_workers=2))
def train_images():
    from train.images.image_utils import params
    data_args = params['data_args']
    train_args = params['train_args']
    model_args = params['model_args']

    if params['use_wandb']:
        wandb.login(key=os.environ['wanda_api_key'])
        run_wandb = wandb.init(
            project='dalle_train_vae',
            job_type='train_model',
            config=params,
            resume=train_args['checkpoint_path'] is not None
        )
    else:
        run_wandb = RunDisabled()

    model = VqVae(**model_args).to('cuda')
    print('num of trainable parameters: %d' % get_model_size(model))
    print(model)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    unnormalize = NormalizeInverse(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    training_data = ImagesDataset(
        data_args['root_dir'],
        transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize
        ])
    )

    training_loader = torch.utils.data.DataLoader(
        training_data, batch_size=data_args['batch_size'], shuffle=True, num_workers=data_args['num_workers'])

    train_object = TrainVqVae(model=model, training_loader=training_loader, run_wandb=run_wandb,
                              unnormalize=unnormalize,
                              **train_args)
    try:
        train_object.train()
    finally:
        run_wandb.finish()
def create_test_dataloaders(model_size, dataset_dir, batch_size):
    normalize = transforms.Normalize(mean=(0.5, 0.5, 0.5),
                                     std=(0.5, 0.5, 0.5))

    train_transforms = transforms.Compose([
        transforms.RandomResizedCrop(model_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ])

    dummy_config = type('dummy', (object,), {'dataset_dir': dataset_dir})()
    train_dataset = create_cifar(dummy_config, dataset_config='cifar10', is_train=True, transform=train_transforms)
    pin_memory = True
    workers = 1

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=workers,
                                               pin_memory=pin_memory)
    return train_loader, train_dataset
Example #12
0
class Base:
    base = ResNet
    args = list()
    kwargs = dict()
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.Resize(32),
        transforms.RandomCrop(32, padding=4),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])

    transform_test = transforms.Compose([
        transforms.Resize(32),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])
Example #13
0
def load_data(batch_size=32, valid_batch_size=32):
    train_transformations = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
    test_transformations = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    kwargs_dl = {'root': '../data', 'download': True}
    train_set = ds.CIFAR10(train=True, transform=train_transformations, **kwargs_dl)
    test_set = ds.CIFAR10(train=False, transform=test_transformations, **kwargs_dl)
    kwargs_train = {'shuffle': True, 'batch_size': batch_size, 'num_workers': NUM_WORKER}
    kwargs_test = {'shuffle': True, 'batch_size': valid_batch_size, 'num_workers': NUM_WORKER}
    train_set = DataLoader(train_set, **kwargs_train)
    test_set = DataLoader(test_set, **kwargs_test)
    return train_set, test_set
Example #14
0
def load_CIFAR10(rate, batch_size=128):
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='.data/cifar10',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    L = len(trainset)
    target_array = np.asarray(trainset.targets)
    thr = 1 - rate * 10 / 9
    target_array = np.where(
        np.random.rand(L) <= thr, target_array, np.random.randint(0, 10, L))
    trainset.targets = target_array.tolist()
    idx = np.random.permutation(L)
    train_idx = idx[:int(L * 0.8)]
    val_idx = idx[int(L * 0.8):]
    trainloader = torch.utils.data.DataLoader(
        trainset,
        batch_size=batch_size,
        sampler=SubsetRandomSampler(train_idx))
    valloader = torch.utils.data.DataLoader(
        trainset, batch_size=batch_size, sampler=SubsetRandomSampler(val_idx))

    testset = torchvision.datasets.CIFAR10(root='.data/cifar10',
                                           train=False,
                                           download=True,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=batch_size,
                                             shuffle=False)
    return None, trainloader, valloader, testloader
Example #15
0
    def __init__(self, opt):
        BaseDataset.__init__(self, opt)

        self.shuffle = True if opt.isTrain else False 
        self.lr_size = opt.load_size // opt.scale_factor
        self.hr_size = opt.load_size

        self.img_dir = opt.dataroot
        self.img_names = self.get_img_names()

        self.aug = transforms.Compose([
                transforms.RandomHorizontalFlip(),
                Scale((1.0, 1.3), opt.load_size) 
                ])

        self.to_tensor = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                ])
Example #16
0
def data_transform(data_path, name, train=True):
    with open(data_path + 'decathlon_mean_std.pickle', 'rb') as handle:
        dict_mean_std = pickle._Unpickler(handle)
        dict_mean_std.encoding = 'latin1'
        dict_mean_std = dict_mean_std.load()

    means = dict_mean_std[name + 'mean']
    stds = dict_mean_std[name + 'std']

    if name in ['gtsrb', 'omniglot', 'svhn']:  # no horz flip
        transform_train = transforms.Compose([
            transforms.Resize(72),
            transforms.CenterCrop(72),
            transforms.ToTensor(),
            transforms.Normalize(means, stds),
        ])
    else:
        transform_train = transforms.Compose([
            transforms.Resize(72),
            transforms.RandomCrop(72),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(means, stds),
        ])
    if name in ['gtsrb', 'omniglot', 'svhn']:  # no horz flip
        transform_test = transforms.Compose([
            transforms.Resize(72),
            transforms.CenterCrop(72),
            transforms.ToTensor(),
            transforms.Normalize(means, stds),
        ])
    else:
        transform_test = transforms.Compose([
            transforms.Resize(72),
            transforms.CenterCrop(72),
            transforms.ToTensor(),
            transforms.Normalize(means, stds),
        ])
    if train:
        return transform_train
    else:
        return transform_test
Example #17
0
def main():
    start_time = time.time()
    args = args_parser()
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    write_dict(vars(args), os.path.join(args.save_dir, 'arguments.csv'))

    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    torch.cuda.manual_seed_all(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.device_ids

    train_transformer = transforms.Compose([
        transforms.Resize(size=(256, 256)),
        transforms.RandomCrop(size=image_size),
        transforms.RandomRotation(10),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        transforms.Lambda(normalize)
    ])
    train_dataset = ChestXRay14Dataset(args.image_dir, args.train_file,
                                       train_transformer)

    val_transformer = transforms.Compose([
        transforms.Resize(size=image_size),
        transforms.ToTensor(),
        # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        transforms.Lambda(normalize)
    ])

    val_dataset = ChestXRay14Dataset(args.image_dir, args.val_file,
                                     val_transformer)
    print(vars(args))
    if args.model == 'simple':
        model = simple_mlc_model(train_dataset.get_tag_size(),
                                 backbone=args.backbone)
    else:
        model = mlc_model(train_dataset.get_tag_size(), backbone=args.backbone)

    train(model, train_dataset, val_dataset, args)
Example #18
0
def build_transforms(cfg, split="train"):
    if split=="train":
        min_size = min(cfg.EXTERNAL.IMAGE.HEIGHT,cfg.EXTERNAL.IMAGE.WIDTH)
        max_size = max(cfg.EXTERNAL.IMAGE.HEIGHT,cfg.EXTERNAL.IMAGE.WIDTH)
        flip_horizontal_prob = 0.5  # cfg.INPUT.FLIP_PROB_TRAIN
        flip_vertical_prob = cfg.INPUT.VERTICAL_FLIP_PROB_TRAIN
        brightness = cfg.INPUT.BRIGHTNESS
        contrast = cfg.INPUT.CONTRAST
        saturation = cfg.INPUT.SATURATION
        hue = cfg.INPUT.HUE
    else:
        min_size = min(cfg.EXTERNAL.IMAGE.HEIGHT, cfg.EXTERNAL.IMAGE.WIDTH)
        max_size = max(cfg.EXTERNAL.IMAGE.HEIGHT, cfg.EXTERNAL.IMAGE.WIDTH)
        flip_horizontal_prob = 0.0
        flip_vertical_prob = 0.0
        brightness = 0.0
        contrast = 0.0
        saturation = 0.0
        hue = 0.0

    to_bgr255 = cfg.INPUT.TO_BGR255
    normalize_transform = T.Normalize(
        mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD,
    )
    color_jitter = T.ColorJitter(
        brightness=brightness,
        contrast=contrast,
        saturation=saturation,
        hue=hue,
    )

    transform = T.Compose(
        [
            color_jitter,
            T.Resize(min_size, max_size),
            T.RandomHorizontalFlip(flip_horizontal_prob),
            T.RandomVerticalFlip(flip_vertical_prob),
            T.ToTensor(),
            normalize_transform,
        ]
    )
    return transform
Example #19
0
    def load_cifar_data(self):
        if self.params.transform_train:
            transform_train = transforms.Compose([
                transforms.RandomCrop(32, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                self.normalize,
            ])
        else:
            transform_train = transforms.Compose([
                transforms.ToTensor(),
                self.normalize,
            ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            self.normalize,
        ])
        self.train_dataset = torchvision.datasets.CIFAR10(
            root=self.params.data_path,
            train=True,
            download=True,
            transform=transform_train)
        if self.params.poison_images:
            self.train_loader = self.remove_semantic_backdoors()
        else:
            self.train_loader = DataLoader(self.train_dataset,
                                           batch_size=self.params.batch_size,
                                           shuffle=True,
                                           num_workers=0)
        self.test_dataset = torchvision.datasets.CIFAR10(
            root=self.params.data_path,
            train=False,
            download=True,
            transform=transform_test)
        self.test_loader = DataLoader(self.test_dataset,
                                      batch_size=self.params.test_batch_size,
                                      shuffle=False,
                                      num_workers=0)

        self.classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog',
                        'horse', 'ship', 'truck')
        return True
def predict_image(image_path):
    print("prediciton in progress")
    image = Image.open(image_path)
    transformation = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    image_tensor = transformation(image).float()
    image_tensor = image_tensor.unsqueeze_(0)

    if torch.cuda.is_available():
        image_tensor.cuda()

    input = Variable(image_tensor)
    output = model(input)

    index = output.data.numpy().argmax()
    return index
Example #21
0
    def get_transforms(self) -> tuple:
        # TODO: Need to rethink the food101 transforms
        MEAN = [0.5451, 0.4435, 0.3436]
        STD = [0.2171, 0.2251,
               0.2260]  # TODO: should be [0.2517, 0.2521, 0.2573]
        train_transf = [
            transforms.Resize((32, 32)),
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]

        # food101 has images of varying sizes and are ~512 each side
        test_transf = [transforms.Resize((32, 32))]

        normalize = [transforms.ToTensor(), transforms.Normalize(MEAN, STD)]

        train_transform = transforms.Compose(train_transf + normalize)
        test_transform = transforms.Compose(test_transf + normalize)

        return train_transform, test_transform
Example #22
0
def gen_transform(input_size):
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize(input_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val':
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize(input_size),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }

    return data_transforms
Example #23
0
    def __init__(
        self,
        *,
        crop_size,
        mean=(0.485, 0.456, 0.406),
        std=(0.229, 0.224, 0.225),
        interpolation=InterpolationMode.BILINEAR,
        hflip_prob=0.5,
        auto_augment_policy=None,
        random_erase_prob=0.0,
    ):
        trans = [
            transforms.RandomResizedCrop(crop_size,
                                         interpolation=interpolation)
        ]
        if hflip_prob > 0:
            trans.append(transforms.RandomHorizontalFlip(hflip_prob))
        if auto_augment_policy is not None:
            if auto_augment_policy == "ra":
                trans.append(
                    autoaugment.RandAugment(interpolation=interpolation))
            elif auto_augment_policy == "ta_wide":
                trans.append(
                    autoaugment.TrivialAugmentWide(
                        interpolation=interpolation))
            elif auto_augment_policy == "augmix":
                trans.append(autoaugment.AugMix(interpolation=interpolation))
            else:
                aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)
                trans.append(
                    autoaugment.AutoAugment(policy=aa_policy,
                                            interpolation=interpolation))
        trans.extend([
            transforms.PILToTensor(),
            transforms.ConvertImageDtype(torch.float),
            transforms.Normalize(mean=mean, std=std),
        ])
        if random_erase_prob > 0:
            trans.append(transforms.RandomErasing(p=random_erase_prob))

        self.transforms = transforms.Compose(trans)
Example #24
0
    def get_transforms(self) -> tuple:
        MEAN = [0.485, 0.456, 0.406]
        STD = [0.229, 0.224, 0.225]

        _IMAGENET_PCA = {
            'eigval': [0.2175, 0.0188, 0.0045],
            'eigvec': [
                [-0.5675, 0.7192, 0.4009],
                [-0.5808, -0.0045, -0.8140],
                [-0.5836, -0.6948, 0.4203],
            ]
        }

        transform_train, transform_test = None, None

        transform_train = transforms.Compose([
            transforms.RandomResizedCrop(
                224,
                scale=(
                    0.08,
                    1.0),  # TODO: these two params are normally not specified
                interpolation=Image.BICUBIC),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4,
                                   hue=0.2),
            transforms.ToTensor(),
            # TODO: Lighting is not used in original darts paper
            # Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
            transforms.Normalize(mean=MEAN, std=STD)
        ])

        transform_test = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=MEAN, std=STD)
        ])

        return transform_train, transform_test
Example #25
0
def load_data(data_, labels_):
    '''
    Data loader

    :param data_: imput images
    :param labels_: labels of images
    :return: train and test data transformed
    '''
    # Import data, scale them to values between [0,1] and cast them to the appropriate types
    data = np.load(data_)
    labels = np.load(labels_)

    # Normalize each channel
    img_mean = np.mean(np.swapaxes(data / 255.0, 0, 1).reshape(3, -1), 1)
    img_std = np.std(np.swapaxes(data / 255.0, 0, 1).reshape(3, -1), 1)

    normalize = transforms.Normalize(mean=list(img_mean), std=list(img_std))

    x_train, x_test, y_train, y_test = train_test_split(data,
                                                        labels,
                                                        test_size=0.15,
                                                        random_state=42)

    train = MyDataset(
        x_train, y_train,
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(), normalize
        ]))

    test = MyDataset(
        x_test, y_test,
        transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(),
            normalize,
        ]))

    return train, test
Example #26
0
def load_data(data_path, use_openfire=False, img_size=224, crop_pct=0.8):
    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_transforms = transforms.Compose([
        transforms.RandomResizedCrop(size=img_size, scale=(crop_pct, 1.0)),
        transforms.RandomRotation(degrees=5),
        transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])

    val_transforms = transforms.Compose([
        transforms.Resize(size=(img_size)),
        transforms.CenterCrop(size=img_size),
        transforms.ToTensor(), normalize
    ])

    print("Loading data")
    if use_openfire:
        train_set = OpenFire(root=data_path,
                             train=True,
                             download=True,
                             transform=train_transforms)
        val_set = OpenFire(root=data_path,
                           train=False,
                           download=True,
                           transform=val_transforms)

    else:
        train_dir = os.path.join(data_path, 'train')
        val_dir = os.path.join(data_path, 'val')
        train_set = ImageFolder(train_dir,
                                train_transforms,
                                target_transform=target_transform)
        val_set = ImageFolder(val_dir,
                              val_transforms,
                              target_transform=target_transform)

    return train_set, val_set
Example #27
0
    def __init__(self, imsize, data_path, startidx=0):
        super(Dataset64, self).__init__()
        self.imsize = imsize
        self.data_path = data_path
        self.startidx = startidx
        self.transform = transforms.Compose([lambda x: Image.open(x).convert('RGB'),
                                             transforms.RandomResizedCrop(imsize),
                                             transforms.RandomHorizontalFlip(),
                                             transforms.ToTensor(),
                                             transforms.Normalize((0.485, 0.456, 0.406),
                                                                  (0.229, 0.224, 0.225))])

        self.path_images = os.path.join(data_path, 'miniimagenet', 'images')  # image path

        csvdata = self.loadCSV(os.path.join(data_path, 'miniimagenet', 'train.csv'))  # csv path
        self.data = []
        self.img2label = {}
        for i, (k, v) in enumerate(csvdata.items()):
            self.data.extend(v)
            self.img2label[k] = i + self.startidx
        self.num_classes = len(self.img2label)
Example #28
0
    def __getitem__(self, index):

        rgb = np.array(self.f["rgb"][index])
        label = np.array((self.f["labels"][index] - self.f["Mean"])/self.f["Variance"])
        
        t_rgb = torch.zeros(3, 224, 224)
        
        prob = random.uniform(0, 1)
        prob2 = random.uniform(0, 1)

        if self.transform is not None:
            if (prob > 0.5 and not self.test):
                flip_transform = transforms.Compose([transforms.ToPILImage(), transforms.RandomHorizontalFlip(1.0)])
                rgb[:,:,:] = flip_transform(rgb[:,:,:])
            if (prob2 > 0.5 and not self.test):
                color_jitter_transform = transforms.Compose([transforms.ToPILImage() ,transforms.ColorJitter(brightness = 0.5, contrast = 0.5, saturation = 0.5, hue = 0.2)])
                rgb[:,:,:] = color_jitter_transform(rgb[:,:,:])

            t_rgb[:,:,:] = self.transform(rgb[:,:,:])                
        
        return t_rgb, label
Example #29
0
File: data.py Project: pauldb89/ml
def get_train_data_loader(dataset_name: str, batch_size: int, num_workers: int,
                          resize_dim: int) -> DataLoader:
    with DatasetTimer():
        dataset = ImageFolder(root=os.path.join("/datasets", dataset_name,
                                                "train"),
                              transform=transforms.Compose([
                                  transforms.RandomResizedCrop(resize_dim),
                                  transforms.RandomHorizontalFlip(),
                                  transforms.ToTensor(),
                                  transforms.Normalize(mean=IMAGENET_MEAN,
                                                       std=IMAGENET_STD),
                              ]))

        return DataLoader(
            dataset=dataset,
            batch_size=batch_size,
            sampler=RandomSampler(data_source=dataset),
            num_workers=num_workers,
            collate_fn=collate_fn,
            drop_last=True,
        )
Example #30
0
	def __init__(self, config: BaseConfig):
		super().__init__(config)
		self.config = config
		# self.img_dir = os.path.join(self.config.dataset_config.data_root_dir, self.config.dataset_config.img_dir)
		train_attr, test_attr, self.label = get_attr_binary(self.config.dataset_config)
		train, val, test = get_img(self.config.dataset_config)

		if config.isTrain:

			# distribution:每个属性的正样本占比

			self.img = train['data']
			self.img_ids = train['ids']
			self.img_attr = train_attr

			self.transforms = transforms.Compose([
				transforms.Resize(size=(288, 144)),
				transforms.RandomHorizontalFlip(),
				transforms.ToTensor(),
				transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
			])

		else:
			self.transforms = transforms.Compose([
				transforms.Resize(size=(288, 144)),
				transforms.ToTensor(),
				transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
			])
			self.img_attr = test_attr

			if self.config.isTest:
				# test set
				self.img = test['data']
				self.img_ids = test['ids']
			else:
				# val set
				self.img = val['data']
				self.img_ids = val['ids']

		self.length = len(self.img)