def get_eval_loader(conf):
    eval_transform = trans.Compose([
        trans.ToPILImage(),
        trans.RandomResizedCrop(size=tuple(conf.input_size), scale=(0.9, 1.1)),
        trans.RandomHorizontalFlip(),
        trans.ToTensor(),
        trans.Normalize(mean=127.5, std=128)
    ])
    root_path = '{}/{}'.format(conf.eval_root_path, conf.patch_info)
    evalset = DatasetFolderFT(root_path, eval_transform, None, conf.ft_width,
                              conf.ft_height)
    # evalset = CelebASpoofDataset(conf.val_annotation_path,
    #                               root_prefix=conf.prefix,
    #                               transform=eval_transform,
    #                               ft_width=conf.ft_width,
    #                               ft_height=conf.ft_height)

    # evalset = DatasetFolderFT(root_path, eval_transform,
    #                            None, conf.ft_width, conf.ft_height)
    eval_loader = DataLoader(evalset,
                             batch_size=conf.batch_size,
                             shuffle=True,
                             pin_memory=True,
                             num_workers=16)
    return eval_loader
def get_train_loader(conf):
    train_transform = trans.Compose([
        trans.ToPILImage(),
        trans.RandomResizedCrop(size=tuple(conf.input_size), scale=(0.9, 1.1)),
        trans.ColorJitter(brightness=0.4,
                          contrast=0.4,
                          saturation=0.4,
                          hue=0.1),
        trans.RandomRotation(10),
        trans.RandomHorizontalFlip(),
        trans.ToTensor(),
        trans.Normalize(mean=127.5, std=128),
    ])
    root_path = '{}/{}'.format(conf.train_root_path, conf.patch_info)

    trainset = DatasetFolderFT(root_path, train_transform, None, conf.ft_width,
                               conf.ft_height)
    # trainset = CelebASpoofDataset(conf.train_annotation_path,
    #                               root_prefix=conf.prefix,
    #                               transform=train_transform,
    #                               ft_width=conf.ft_width,
    #                               ft_height=conf.ft_height)
    train_loader = DataLoader(trainset,
                              batch_size=conf.batch_size,
                              shuffle=True,
                              pin_memory=True,
                              num_workers=16)
    return train_loader
예제 #3
0
 def predict(self, img):
     test_transform = trans.Compose([
         trans.ToTensor(),
     ])
     img = test_transform(img)
     img = img.unsqueeze(0).to(self.device)
     self.model.eval()
     with torch.no_grad():
         result = self.model.forward(img)
         result = F.softmax(result).cpu().numpy()
     return result
def get_train_loader(conf):
    train_transform = trans.Compose([
        trans.ToPILImage(),
        trans.RandomResizedCrop(size=tuple(conf.input_size), scale=(0.9, 1.1)),
        trans.ColorJitter(brightness=0.4,
                          contrast=0.4,
                          saturation=0.4,
                          hue=0.1),
        trans.RandomRotation(10),
        trans.RandomHorizontalFlip(),
        trans.ToTensor()
    ])
    root_path = '{}/{}'.format(conf.train_root_path, conf.patch_info)
    dataset = DatasetFolderFT(root_path, train_transform, None, conf.ft_width,
                              conf.ft_height)

    # Creating data indices for training and validation splits:
    shuffle_dataset = True
    random_seed = 42

    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    split = int(np.floor(conf.val_size * dataset_size))
    if shuffle_dataset:
        np.random.seed(random_seed)
        np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]

    # Creating PT data samplers and loaders:
    train_sampler = SubsetRandomSampler(train_indices)
    valid_sampler = SubsetRandomSampler(val_indices)

    train_loader = DataLoader(dataset,
                              batch_size=conf.batch_size,
                              sampler=train_sampler,
                              num_workers=16)
    val_loader = DataLoader(dataset,
                            batch_size=conf.batch_size,
                            sampler=valid_sampler,
                            num_workers=16)

    return train_loader, val_loader