コード例 #1
0
    def __init__(self, root, n_way, k_shot, k_query,
                 resize, split, augment='0', test=None, shuffle=True):

        self.n_way = n_way
        self.k_shot = k_shot
        self.k_query = k_query
        self.resize = resize
        self.split = split
        self.shuffle = shuffle
        if test is not None:
            self.test_manner = test.manner
            self.test_ep_num = test.ep_num
            self.test_query_num = test.query_num

        if augment == '0':
            self.transform = T.Compose([
                lambda x: Image.open(x).convert('RGB'),
                T.Resize((self.resize, self.resize)),
                T.ToTensor(),
                T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
            ])
        elif augment == '1':
            if self.split == 'train':
                self.transform = T.Compose([
                    lambda x: Image.open(x).convert('RGB'),
                    T.Resize((self.resize+20, self.resize+20)),
                    T.RandomCrop(self.resize),
                    T.RandomHorizontalFlip(),
                    T.ColorJitter(brightness=.1, contrast=.1, saturation=.1, hue=.1),
                    T.ToTensor(),
                    T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
                ])
            else:
                self.transform = T.Compose([
                    lambda x: Image.open(x).convert('RGB'),
                    T.Resize((self.resize + 20, self.resize + 20)),
                    T.RandomCrop(self.resize),
                    T.ToTensor(),
                    T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
                ])

        self.path = os.path.join(root, 'images')
        csvdata = self._loadCSV(os.path.join(root, self.split + '.csv'))
        self.data = []          # store images for each class [[img1, img2, ...], [img111, img222, ...]]
        self.img2label = {}     # {"img_name[:9]": label}

        total_sample = 0
        for i, (k, v) in enumerate(csvdata.items()):
            self.data.append(v)
            self.img2label[k] = i
            total_sample += len(v)

        self.total_sample = total_sample
        self.cls_num = len(self.data)
        self.support_sz = self.n_way * self.k_shot  # num of samples per support set
        self.query_sz = self.n_way * self.k_query

        self.support_x_batch = []
        self.query_x_batch = []
        self._create_batch()
コード例 #2
0
def get_train_transform(data_augmentation, dataset):
    assert dataset in {'cifar', 'imagenet'}

    if data_augmentation:
        if dataset == 'cifar':
            random_crop = transforms.RandomCrop(
                base_settings.CIFAR_RANDOM_CROP_SIZE,
                padding=base_settings.CIFAR_RANDOM_CROP_PADDING)
        else:
            random_crop = transforms.RandomCrop(
                base_settings.MINI_IMAGENET_RANDOM_CROP_SIZE,
                padding=base_settings.MINI_IMAGENET_RANDOM_CROP_PADDING)
        train_transform = [random_crop, transforms.RandomHorizontalFlip()]
    else:
        train_transform = []

    if dataset == 'cifar':
        normalize_transform = transforms.Normalize(base_settings.CIFAR_MEAN,
                                                   base_settings.CIFAR_STD)
    else:
        normalize_transform = transforms.Normalize(
            base_settings.MINI_IMAGENET_MEAN, base_settings.MINI_IMAGENET_STD)

    train_transform.extend([transforms.ToTensor(), normalize_transform])
    return transforms.Compose(train_transform)
コード例 #3
0
def get_transformer(S_H, T_H, is_train=True):
    '''
        preprocessing 코드에서 resizing 후 새로 dataset을 저장하므로 transforms.Resize를 모두 주석
        preprocessing 없이 바로 train.py 실행 시 resize 주석 해재 필요
        transforms.ToTensor() --> Tensor의 범위가 [0, 1]
        transfroms.Normarlize((0.5, ), (0.5, ))는 [0, 1] --> [-1, 1]로 변환
    '''
    if is_train:  # transformer for train.py
        S_transformer = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            # transforms.Resize((S_H, 1024)),
            transforms.RandomCrop((400, 400)),
            transforms.ToTensor(),
            transforms.Normalize(
                (0.5, 0.5, 0.5),
                (0.5, 0.5, 0.5))  # (mean), (std) --> -1 에서 1사이로 normalize
        ])

        T_transformer = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            # transforms.Resize((T_H, 1024)),
            transforms.RandomCrop((400, 400)),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        return S_transformer, T_transformer
コード例 #4
0
    def __init__(self, n_way, k_shot, k_query, resize,
                 labels, images,
                 augment='0', split='train', test=None):

        self.n_way = n_way
        self.k_shot = k_shot
        self.k_query = k_query
        self.resize = resize
        self.split = split
        if test is not None:
            self.test_manner = test.manner
            if self.test_manner == 'standard':
                self.test_ep_num = test.ep_num
                self.test_query_num = test.query_num

        self._label_specific = labels[0]
        self._label_general = labels[1]
        self._label_specific_str = labels[2]
        self._label_general_str = labels[3]
        self.labels_str = self._label_specific_str
        self.labels = self._label_specific
        self.images = images

        if augment == '0':
            self.transform = T.Compose([
                lambda ind: Image.fromarray(self.images[ind]).convert('RGB'),   # self.images[ind]: array, 0-255
                T.Resize((self.resize, self.resize)),
                T.ToTensor(),
                T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
            ])
        elif augment == '1':
            if self.split == 'train':
                self.transform = T.Compose([
                    lambda ind: Image.fromarray(self.images[ind]).convert('RGB'),
                    T.Resize((self.resize+20, self.resize+20)),
                    T.RandomCrop(self.resize),
                    T.RandomHorizontalFlip(),
                    T.ColorJitter(brightness=.1, contrast=.1, saturation=.1, hue=.1),
                    T.ToTensor(),
                    T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
                ])
            else:
                self.transform = T.Compose([
                    lambda ind: Image.fromarray(self.images[ind]).convert('RGB'),
                    T.Resize((self.resize + 20, self.resize + 20)),
                    T.RandomCrop(self.resize),
                    T.ToTensor(),
                    T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
                ])

        self.cls_num = len(np.unique(self.labels))
        self.support_sz = self.n_way * self.k_shot      # num of samples per support set
        self.query_sz = self.n_way * self.k_query       # num of samples per query set

        self.support_x_batch, self.query_x_batch = [], []
        self.support_y_batch, self.query_y_batch = [], []
        self._create_batch()
コード例 #5
0
ファイル: data.py プロジェクト: LongJohnCoder/archai
def get_transforms(dataset, aug: Union[List, str], cutout: int):
    if 'imagenet' in dataset:
        return _get_imagenet_transforms()

    if dataset == 'cifar10':
        MEAN = [0.49139968, 0.48215827, 0.44653124]
        STD = [0.24703233, 0.24348505, 0.26158768]
        transf = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]
    elif dataset == 'cifar100':
        MEAN = [0.507, 0.487, 0.441]
        STD = [0.267, 0.256, 0.276]
        transf = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]
    elif dataset == 'svhn':
        MEAN = [0.4914, 0.4822, 0.4465]
        STD = [0.2023, 0.1994, 0.20100]
        transf = [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]
    elif dataset == 'mnist':
        MEAN = [0.13066051707548254]
        STD = [0.30810780244715075]
        transf = [
            transforms.RandomAffine(degrees=15,
                                    translate=(0.1, 0.1),
                                    scale=(0.9, 1.1),
                                    shear=0.1)
        ]
    elif dataset == 'fashionmnist':
        MEAN = [0.28604063146254594]
        STD = [0.35302426207299326]
        transf = [
            transforms.RandomAffine(degrees=15,
                                    translate=(0.1, 0.1),
                                    scale=(0.9, 1.1),
                                    shear=0.1),
            transforms.RandomVerticalFlip()
        ]
    else:
        raise ValueError('dataset not recognized: {}'.format(dataset))

    normalize = [transforms.ToTensor(), transforms.Normalize(MEAN, STD)]

    train_transform = transforms.Compose(transf + normalize)
    test_transform = transforms.Compose(normalize)

    # add additional aug and cutout transformations
    _add_augs(train_transform, aug, cutout)

    return train_transform, test_transform
コード例 #6
0
    def __init__(self, image_file, autoaugment=False):
        super(ImageNetTrainingDataset, self).__init__()
        self.image_file = image_file
        # self.data = None
        with open(self.image_file, "r") as file:
            self.data = file.readlines()
        # shuffle the dataset
        for i in range(10):
            random.shuffle(self.data)
        self.imagenet_normalization_paramters = transforms.Normalize(
            mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        # 先resize到512 再crop到448
        # 用config来指定模型的size
        self.model_size = ModelSize("resnet50_448")
        model_size = self.model_size.imagesize_choice()
        self.BASE_RESIZE_SIZE = model_size["resize"]
        self.INPUT_SIZE = model_size["input"]
        self.BRIGHTNESS = 0.4
        self.HUE = 0.1
        self.CONTRAST = 0.4
        self.SATURATION = 0.4

        # autoaugment
        self.Autoaugment = autoaugment

        self.index_sampler = [i for i in range(len(self.data))]

        # 当前的数据增强【随机crop, 随机水平翻转,颜色变换,随机灰度,】
        if self.Autoaugment:
            self.image_transforms = transforms.Compose([
                transforms.Resize(
                    (self.BASE_RESIZE_SIZE, self.BASE_RESIZE_SIZE),
                    Image.BILINEAR),
                transforms.RandomCrop(self.INPUT_SIZE),
                transforms.RandomHorizontalFlip(),
                ImageNetPolicy(),
                transforms.ToTensor(), self.imagenet_normalization_paramters
            ])
        else:
            self.image_transforms = transforms.Compose([
                # transforms.RandomResizedCrop(self.INPUT_SIZE, scale=(0.2, 1.)),
                transforms.Resize(
                    (self.BASE_RESIZE_SIZE, self.BASE_RESIZE_SIZE),
                    Image.BILINEAR),
                transforms.RandomCrop(self.INPUT_SIZE),
                transforms.RandomHorizontalFlip(),
                transforms.RandomRotation(degrees=15),
                transforms.ColorJitter(brightness=self.BRIGHTNESS,
                                       contrast=self.CONTRAST,
                                       hue=self.HUE,
                                       saturation=self.SATURATION),
                transforms.ToTensor(),
                self.imagenet_normalization_paramters
            ])
コード例 #7
0
ファイル: cifar100.py プロジェクト: ddayzzz/fl-pytorch
def make_data(options):
    data_prefix = os.path.join(DATA, 'data')
    # 可能需要进行处理, 默认图像大小为 32, 原文中没有对图像进行 disort(random flip)
    crop_size = options['cifar100_image_size']
    # tff 中使用的 per_image_standard
    CIFAR100_MEAN = (0.5070751592371323, 0.48654887331495095,
                     0.4409178433670343)
    CIFAR100_STD = (0.2673342858792401, 0.2564384629170883,
                    0.27615047132568404)
    train_transform = transforms.Compose([
        transforms.ToPILImage(),
        # TODO 目标大小24, 应该不会调用 padding
        transforms.RandomCrop(crop_size, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(CIFAR100_MEAN, CIFAR100_STD),
    ])

    valid_transform = transforms.Compose([
        transforms.ToPILImage(),
        # 根据 tff 给出的实验设置
        transforms.CenterCrop(crop_size),
        transforms.ToTensor(),
        transforms.Normalize(CIFAR100_MEAN, CIFAR100_STD),
    ])

    train_client_data = _CIFAR100TFFVersion(data_prefix=data_prefix,
                                            is_train=True)
    test_client_data = _CIFAR100TFFVersion(data_prefix=data_prefix,
                                           is_train=False)
    train_clients, train_data = create_dataset(train_client_data,
                                               trans=train_transform)
    test_clients, test_data = create_dataset(test_client_data,
                                             trans=valid_transform)
    return train_clients, train_data, test_clients, test_data
コード例 #8
0
    def __init__(self, root, batchsz, k_shot, k_query, resize, crop_size,
                 test_dataset_indx, dictTrainAs, dictTrainBs, dataset_num):

        self.batchsz = batchsz  # batch of set, not batch of imgs
        self.k_shot = k_shot  # k-shot
        self.k_query = k_query  # for evaluation
        self.setsz = self.k_shot  # num of samples per set
        self.querysz = self.k_query  # number of samples per set for evaluation
        self.resize = resize  # resize
        self.crop_size = crop_size  # crop size
        self.dataset_num = dataset_num
        print('shuffle b:%d, %d-shot, %d-query, resize:%d' %
              (batchsz, k_shot, k_query, resize))
        self.dictTrainAs, self.dictTrainBs = dictTrainAs, dictTrainBs

        self.transform = transforms.Compose([
            lambda x: Image.open(x).convert('RGB'),
            transforms.Resize(self.resize),
            transforms.RandomCrop(self.crop_size),
            transforms.RandomHorizontalFlip(),
            # transforms.RandomRotation(5),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        dataset_num_list = list(range(self.dataset_num))
        dataset_num_list.pop(test_dataset_indx)
        print('Meta training dataset number : {}'.format(self.dataset_num))

        self.create_batch(self.batchsz, dataset_num_list, test_dataset_indx)
コード例 #9
0
def get_data_loaders(batch_size, num_workers, train_images_path,
                     test_images_path):
    # Train
    train_transform = transforms.Compose([
        transforms.RandomCrop(256, pad_if_needed=True),
        transforms.ColorJitter(),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), NORMALIZE_IMAGENETTE
    ])

    train_set = torchvision.datasets.ImageFolder(train_images_path,
                                                 transform=train_transform)

    train_set_loader = torch.utils.data.DataLoader(train_set,
                                                   batch_size=batch_size,
                                                   num_workers=num_workers,
                                                   shuffle=True,
                                                   pin_memory=True)

    # Test
    test_transform = transforms.Compose([
        transforms.CenterCrop(256),
        transforms.ToTensor(), NORMALIZE_IMAGENETTE
    ])
    test_set = torchvision.datasets.ImageFolder(test_images_path,
                                                transform=test_transform)
    test_set_loader = torch.utils.data.DataLoader(test_set,
                                                  batch_size=batch_size,
                                                  num_workers=num_workers,
                                                  shuffle=False,
                                                  pin_memory=True)

    return train_set_loader, test_set_loader
コード例 #10
0
def get_data_loader(data_dir, batch_size, num_workers):
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])
    train_dataset = CIFAR10(data_dir, train=True, transform=transform_train)
    test_dataset = CIFAR10(data_dir, train=False, transform=transform_test)
    # train_dataset = CIFAR10Dataset(data_dir)
    # test_dataset = CIFAR10Dataset(data_dir, split_name='test')

    train_data_loader = DataLoader(train_dataset,
                                   batch_size,
                                   shuffle=True,
                                   num_workers=num_workers,
                                   collate_fn=collate_fn)

    test_data_loader = DataLoader(test_dataset,
                                  500,
                                  shuffle=False,
                                  num_workers=1,
                                  collate_fn=collate_fn)

    return train_data_loader, test_data_loader
コード例 #11
0
def load_data(batch_size=32, valid_batch_size=32):
    train_transformations = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
    test_transformations = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    kwargs_dl = {'root': '../data', 'download': True}
    train_set = ds.CIFAR10(train=True,
                           transform=train_transformations,
                           **kwargs_dl)
    test_set = ds.CIFAR10(train=False,
                          transform=test_transformations,
                          **kwargs_dl)
    kwargs_train = {
        'shuffle': True,
        'batch_size': batch_size,
        'num_workers': NUM_WORKER
    }
    kwargs_test = {
        'shuffle': True,
        'batch_size': valid_batch_size,
        'num_workers': NUM_WORKER
    }
    train_set = DataLoader(train_set, **kwargs_train)
    test_set = DataLoader(test_set, **kwargs_test)
    return train_set, test_set
コード例 #12
0
def make_dataloaders(batch_size,
                     num_workers,
                     val_split,
                     isgpu,
                     _log):
    # if isinstance(device, list):
    #     device = device[0]
    transform_train = transforms.Compose(
        [transforms.RandomCrop(32,padding=4),
         transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
         transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]
    )
    transform_test = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]
    )

    # target_transform = transforms.Compose([transforms.ToTensor()])

    # dset = MyCifar("cifar_10", download=False,transform= transform_train)
    # test_dset = MyCifar("cifar_10", download=False, train=False,transform= transform_test)

    dset = CIFAR10("cifar_10", download=False, transform=transform_train)
    test_dset = CIFAR10("cifar_10", download=False, train=False, transform=transform_test)

    _log.info("Loaded dataset on 'cuda'")

    total = len(dset)
    train_num = int(total * (1 - val_split))
    val_num = total - train_num

    _log.info("Split dataset into {%d} train samples and {%d} \
    validation samples"%(train_num,val_num))

    train, val = torch.utils.data.dataset.random_split(dset,
                                                       [train_num, val_num])

    train_loader = torch.utils.data.DataLoader(
        train,
        batch_size=batch_size,
        num_workers=num_workers,
        shuffle=True,)


    val_loader = torch.utils.data.DataLoader(
        val,
        batch_size=batch_size,
        num_workers=num_workers,
        shuffle=True, )

    test_loader = torch.utils.data.DataLoader(
        test_dset,
        batch_size=batch_size,
        num_workers=num_workers,
        shuffle=False, )

    # next(iter(train_loader))

    return dset, train_loader, val_loader, test_loader
コード例 #13
0
def get_data_loaders(batch_size, num_workers):
    dataset_directory = "experiments/datasets"

    NORMALIZE_CIFAR10 = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
                                             std=[0.2023, 0.1994, 0.2010])
    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.ColorJitter(),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), NORMALIZE_CIFAR10
    ])

    train_set = torchvision.datasets.CIFAR10(dataset_directory,
                                             download=True,
                                             transform=train_transform)
    train_set_loader = torch.utils.data.DataLoader(train_set,
                                                   batch_size=batch_size,
                                                   num_workers=num_workers,
                                                   shuffle=True,
                                                   pin_memory=True)

    test_transform = transforms.Compose(
        [transforms.ToTensor(), NORMALIZE_CIFAR10])
    test_set = torchvision.datasets.CIFAR10(dataset_directory,
                                            train=False,
                                            transform=test_transform)
    test_set_loader = torch.utils.data.DataLoader(test_set,
                                                  batch_size=batch_size,
                                                  num_workers=num_workers,
                                                  shuffle=False,
                                                  pin_memory=True)

    return train_set_loader, test_set_loader
コード例 #14
0
def get_train_data_loader(settings_model: ModelSettings,
                          settings_data: DataSettings) -> DataLoader:
    # Define image data pre-processing transforms
    #   ToTensor() normalizes pixel values between [0, 1]
    #   Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) normalizes pixel values between [-1, 1]

    transforms_train = transforms.Compose([
        transforms.RandomCrop(size=160),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
    ])

    data_loader_train = DataLoader(
        dataset=TripletFaceDataset(
            root_dir=settings_data.dataset_dir,
            csv_name=settings_data.dataset_csv_file,
            num_triplets=settings_model.num_triplets_train,
            training_triplets_path=settings_data.training_triplets_path,
            transform=transforms_train,
        ),
        batch_size=settings_model.batch_size,
        num_workers=settings_model.num_workers,
        shuffle=False,
    )
    return data_loader_train
コード例 #15
0
def predict_defect(image_path):
    image = cv_imread(image_path)
    # 对图像进行预处理
    image = preprocess(image)  # 此时image为增强对比度后的单通道灰度图

    # 对图像进行转换
    transformation = transforms.Compose([
        transforms.Resize((32, 32)),  # 图像分辨率调整为(32 * 32)
        transforms.RandomHorizontalFlip(),  # 对图像随机水平翻转
        transforms.RandomCrop(32),  # 对图像随机裁剪,第一个参数指定裁剪尺寸为32*32
        transforms.ToTensor(),  # 将图像转换成pytorch能够使用的格式并归一化至【0,1】
        transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))  # 将数据按通道进行标准化,让所有像素范围处于【-1,1】之间
    ])

    # 将opencv图像转换为PIL图像
    image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
    image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

    image_tensor = transformation(image).float()
    image_tensor = image_tensor.unsqueeze_(0) # 额外添加一个批次维度

    inputs = Variable(image_tensor)
    output = model(inputs)  # 预测图片中的瑕疵所属类别
    defect_kind = output.data.numpy().argmax()
    return defect_kind  #返回预测类的索引
コード例 #16
0
def load_CIFAR10():
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),  # 패딩 4 주고 랜덤 크롭
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),  # Normalize 시키기 위해 tensor로 변환
        transforms.Normalize(mean=(0.5, 0.5, 0.5),
                             std=(0.5, 0.5,
                                  0.5)),  # per-pixel mean subtract 적용
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
    ])

    train_set = datasets.CIFAR10(root="/data",
                                 train=True,
                                 transform=transform_train,
                                 download=True)
    train_loader = DataLoader(train_set,
                              batch_size=128,
                              shuffle=True,
                              num_workers=4)

    test_set = datasets.CIFAR10(root="/data",
                                train=False,
                                transform=transform_test)
    test_loader = DataLoader(test_set,
                             batch_size=100,
                             shuffle=False,
                             num_workers=4)

    return train_loader, test_loader
コード例 #17
0
def get_dataloader(dataset_name):
    trainloader, testloader = None, None
    if dataset_name == 'cifar10':
        train_transform = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ])

        test_transform = transforms.Compose([transforms.ToTensor()])

        trainset = torchvision.datasets.CIFAR10(root='./data',
                                                train=True,
                                                download=True,
                                                transform=train_transform)
        trainloader = torch.utils.data.DataLoader(trainset,
                                                  batch_size=100,
                                                  shuffle=True,
                                                  num_workers=2)

        testset = torchvision.datasets.CIFAR10(root='./data',
                                               train=False,
                                               download=True,
                                               transform=test_transform)
        testloader = torch.utils.data.DataLoader(testset,
                                                 batch_size=100,
                                                 shuffle=False,
                                                 num_workers=2)

    return trainloader, testloader
コード例 #18
0
def get_train_transform(mean, std, size):
    """
    Data augmentation and normalization for training
    :param mean:
    :param std:
    :param size: width, height
    :return:
    """
    if isinstance(size, int):
        size = (int(size), (size))
    else:
        size = size

    train_transform = transforms.Compose([
        RelativePreservingResize((int(size[0] * (256 / 224)), int(size[1] * (256 / 224)))),
        transforms.RandomCrop(size),
        RandomShift(p=0.25, size=4),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.02),
        RandomFilter(blur=True),
        RandomRotate(90, 0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std),
    ])
    return train_transform
コード例 #19
0
def create_dataloader(args):
    train_transform = transforms.Compose([
        # Resize(128),
        transforms.Scale(256),
        transforms.RandomCrop((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

    ])

    test_transform = transforms.Compose([
        # Resize(128),
        transforms.Scale(256),
        transforms.CenterCrop((224, 224)),
        transforms.ToTensor(),
        # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    train_path = os.path.join(args.data_path, 'train')
    test_path = os.path.join(args.data_path, 'test')
    train_dataset = dataset.ImageFolder(root=train_path, ground_truth=True, transform=train_transform)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4)

    test_dataset = dataset.ImageFolder(root=test_path, ground_truth=False, transform=test_transform)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4)

    return train_loader, test_loader
def get_data_loaders_cifar10(marked_images_directory,
                             augment,
                             batch_size=512,
                             num_workers=1):

    cifar10_dataset_root = "experiments/datasets"  # Will download here

    # Base Training Set
    base_train_set = torchvision.datasets.CIFAR10(cifar10_dataset_root,
                                                  download=True)

    # Load marked data from Numpy img format - no transforms
    extensions = ("npy")
    marked_images = torchvision.datasets.DatasetFolder(marked_images_directory,
                                                       numpy_loader,
                                                       extensions=extensions)

    # Setup Merged Training Set: Vanilla -> Merged <- Marked
    # MergedDataset allows you to replace certain examples with marked alternatives
    merge_to_vanilla = [None] * len(marked_images)
    for i, (path, target) in enumerate(marked_images.samples):
        img_id = re.search('[0-9]+', os.path.basename(path))
        merge_to_vanilla[i] = int(img_id[0])

    merged_train_set = MergedDataset(base_train_set, marked_images,
                                     merge_to_vanilla)

    # Add Transform and Get Training set dataloader
    transforms_list = []
    if augment:
        transforms_list += [
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip()
        ]

    transforms_list += [transforms.ToTensor(), NORMALIZE_CIFAR10]

    train_transform = transforms.Compose(transforms_list)
    merged_train_set.transform = train_transform

    train_set_loader = torch.utils.data.DataLoader(merged_train_set,
                                                   batch_size=batch_size,
                                                   num_workers=num_workers,
                                                   shuffle=True,
                                                   pin_memory=True)

    # Test Set (Simple)
    test_transform = transforms.Compose(
        [transforms.ToTensor(), NORMALIZE_CIFAR10])
    test_set = torchvision.datasets.CIFAR10(cifar10_dataset_root,
                                            train=False,
                                            transform=test_transform)
    test_set_loader = torch.utils.data.DataLoader(test_set,
                                                  batch_size=batch_size,
                                                  num_workers=num_workers,
                                                  shuffle=False,
                                                  pin_memory=True)

    return train_set_loader, test_set_loader
コード例 #21
0
def transform_cifar(is_train):

    return transforms.Compose([
        transforms.RandomCrop(size=26) if is_train else Identity(),
        transforms.RandomHorizontalFlip(p=0.5) if is_train else Identity(),
        transforms.CenterCrop(size=26) if not is_train else Identity(),
        transforms.ToTensor(),
    ])
コード例 #22
0
 def __init__(self, resize_to=(224, 224)):
     list_of_transforms = [
         transforms.RandomHorizontalFlip(),
         transforms.RandomCrop(size=resize_to),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ]
     self.transform = transforms.Compose(list_of_transforms)
コード例 #23
0
def train_transform():
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    yield transform_train
コード例 #24
0
 def finetune_transform(self):
     tfms = transforms.Compose([
         # transforms.Resize(32),
         transforms.RandomHorizontalFlip(),
         transforms.RandomCrop(self.config.sz),
         transforms.ToTensor(),
         transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
     ])
     return tfms
コード例 #25
0
def data_loaders_imagenet_imagenette(train_images_path, test_images_path,
                                     marked_images_directory, normalizer,
                                     batch_size, num_workers, world_size,
                                     rank):

    # Base Training Set
    base_train_set = torchvision.datasets.ImageFolder(train_images_path)

    # Load marked data from Numpy img format - no transforms
    extensions = ("npy")
    marked_images = torchvision.datasets.DatasetFolder(marked_images_directory,
                                                       numpy_loader,
                                                       extensions=extensions)

    # Setup Merged Training Set: Vanilla -> Merged <- Marked
    # MergedDataset allows you to replace certain examples with marked alternatives
    merge_to_vanilla = [None] * len(marked_images)
    for i, (path, target) in enumerate(marked_images.samples):
        img_id = re.search('[0-9]+', os.path.basename(path))
        merge_to_vanilla[i] = int(img_id[0])

    merged_train_set = MergedDataset(base_train_set, marked_images,
                                     merge_to_vanilla)

    # Add Transform, sampler and get loader
    train_transform = transforms.Compose([
        transforms.RandomCrop(256, pad_if_needed=True),
        transforms.ColorJitter(),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalizer
    ])

    merged_train_set.transform = train_transform

    train_sampler = torch.utils.data.distributed.DistributedSampler(
        merged_train_set, num_replicas=world_size, rank=rank)

    train_set_loader = torch.utils.data.DataLoader(merged_train_set,
                                                   batch_size=batch_size,
                                                   num_workers=num_workers,
                                                   shuffle=False,
                                                   sampler=train_sampler,
                                                   pin_memory=True)

    # Test
    test_transform = transforms.Compose(
        [transforms.CenterCrop(256),
         transforms.ToTensor(), normalizer])
    test_set = torchvision.datasets.ImageFolder(test_images_path,
                                                transform=test_transform)
    test_set_loader = torch.utils.data.DataLoader(test_set,
                                                  batch_size=batch_size,
                                                  num_workers=num_workers,
                                                  shuffle=False,
                                                  pin_memory=True)

    return train_set_loader, test_set_loader
コード例 #26
0
    def test_splitting_working_with_transforms(self):
        ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}
        transforms_expected = {'train': transforms.RandomCrop(10), 'val': None, 'test': None}

        splitter = WildFireSplitter(ratios, transforms=transforms_expected)
        splitter.fit(self.wildfire)

        for (set_, transform_expected) in transforms_expected.items():
            self.assertIs(getattr(splitter, set_).transform, transform_expected)
コード例 #27
0
    def infer(self, img_file):
        tik = time.time()
        img = Image.open(img_file)

        preprocess = transforms.Compose([
            transforms.Resize(227),
            transforms.RandomCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        img = preprocess(img)
        img.unsqueeze_(0)

        img = img.to(self.device)

        outputs = self.model(img)
        outputs = F.softmax(outputs, dim=1)

        # get TOP-K output labels and corresponding probabilities
        topK_prob, topK_label = torch.topk(outputs, self.topK)
        prob = topK_prob.to("cpu").detach().numpy().tolist()

        _, predicted = torch.max(outputs.data, 1)
        tok = time.time()

        if prob[0][0] >= cfg['thresholds']['plant_disease_recognition']:
            return {
                'status':
                0,
                'message':
                'success',
                'elapse':
                tok - tik,
                'results': [{
                    'name':
                    self.key_type[int(topK_label[0][i].to("cpu"))],
                    'disease':
                    int(topK_label[0][i].data.to("cpu").numpy()),
                    'prob':
                    round(prob[0][i], 4)
                } for i in range(self.topK)]
            }
        else:
            return {
                'status':
                0,
                'message':
                'success',
                'elapse':
                tok - tik,
                'results': [{
                    'name': "Unknown",
                    'disease': -1,
                    'prob': round(prob[0][0], 4)
                }]
            }
コード例 #28
0
    def __init__(self,
                 root,
                 train=True,
                 transform=None,
                 target_transform=None,
                 download=False,
                 loader='pil'):
        self.root = os.path.expanduser(root)
        #https://github.com/dragen1860/LearningToCompare-Pytorch/blob/master/MiniImagenet.py
        self._MEAN = [0.485, 0.456, 0.406]
        self._STD = [0.229, 0.224, 0.225]
        self.transform = transform
        if train:
            self.transform = transforms.Compose([
                transforms.RandomCrop(64, padding=4),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(self._MEAN, self._STD),
            ])
            # if args.cutout:
            #     train_transform.transforms.append(Cutout(args.cutout_length))
        else:
            self.transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(self._MEAN, self._STD),
            ])
        self.target_transform = target_transform
        self.train = train  # training set or test set
        self.fpath = os.path.join(root, self.download_fname)
        self.loader = loader

        if download:
            self.download()

        if not check_integrity(self.fpath, self.md5):
            raise RuntimeError('Dataset not found or corrupted.' +
                               ' You can use download=True to download it')

        _, class_to_idx = find_classes(
            os.path.join(self.root, self.base_folder, 'wnids.txt'))
        # self.classes = classes

        if self.train:
            dirname = 'train'
        else:
            dirname = 'val'

        self.data_info = make_dataset(self.root, self.base_folder, dirname,
                                      class_to_idx)

        if len(self.data_info) == 0:
            raise (RuntimeError("Found 0 images in subfolders of: " + root +
                                "\n"
                                "Supported image extensions are: " +
                                ",".join(IMG_EXTENSIONS)))
コード例 #29
0
def medium_augment():
    medium_augment_ = transforms.Compose([
        transforms.CenterCrop((100, 100)),
        transforms.RandomCrop((80, 80)),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomRotation(degrees=(-90, 90)),
        transforms.RandomVerticalFlip(p=0.5),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])
    return medium_augment_
コード例 #30
0
ファイル: model_wrapper.py プロジェクト: RuqiBai/Hawkeye
 def __init__(self, model):
     self.model = model
     self.device = next(model.parameters()).device
     self.transform = transforms.Compose([
         transforms.Pad(4),
         transforms.RandomHorizontalFlip(),
         transforms.RandomCrop(32),
         transforms.ToTensor()
     ])
     self.stat = Counter()
     self.path = "./data/models_dict/%s.ckpt" % self.model.__class__.__name__