def eval(): denoiser.eval() model.eval() LR_image = [ join(opt.input, x) for x in listdir(opt.input) if is_image_file(x) ] SR_image = [ join(opt.Result, x) for x in listdir(opt.input) if is_image_file(x) ] for i in range(LR_image.__len__()): t0 = time.time() LR = Image.open(LR_image[i]).convert('RGB') with torch.no_grad(): prediction = chop_forward(LR) t1 = time.time() print("===> Processing: %s || Timer: %.4f sec." % (str(i), (t1 - t0))) prediction = prediction * 255.0 prediction = prediction.clamp(0, 255) Image.fromarray(np.uint8(prediction)).save(SR_image[i])
def eval(): denoiser.eval() model.eval() LR_image = [ join(opt.input, x) for x in listdir(opt.input) if is_image_file(x) ] SR_image = [ join(opt.Result, x) for x in listdir(opt.input) if is_image_file(x) ] avg_psnr_predicted = 0.0 for i in range(LR_image.__len__()): t0 = time.time() LR = Image.open(LR_image[i]).convert('RGB') LR_90 = LR.transpose(Image.ROTATE_90) LR_180 = LR.transpose(Image.ROTATE_180) LR_270 = LR.transpose(Image.ROTATE_270) LR_f = LR.transpose(Image.FLIP_LEFT_RIGHT) LR_90f = LR_90.transpose(Image.FLIP_LEFT_RIGHT) LR_180f = LR_180.transpose(Image.FLIP_LEFT_RIGHT) LR_270f = LR_270.transpose(Image.FLIP_LEFT_RIGHT) with torch.no_grad(): pred = chop_forward(LR) pred_90 = chop_forward(LR_90) pred_180 = chop_forward(LR_180) pred_270 = chop_forward(LR_270) pred_f = chop_forward(LR_f) pred_90f = chop_forward(LR_90f) pred_180f = chop_forward(LR_180f) pred_270f = chop_forward(LR_270f) pred_90 = np.rot90(pred_90, 3) pred_180 = np.rot90(pred_180, 2) pred_270 = np.rot90(pred_270, 1) pred_f = np.fliplr(pred_f) pred_90f = np.rot90(np.fliplr(pred_90f), 3) pred_180f = np.rot90(np.fliplr(pred_180f), 2) pred_270f = np.rot90(np.fliplr(pred_270f), 1) prediction = (pred + pred_90 + pred_180 + pred_270 + pred_f + pred_90f + pred_180f + pred_270f) * 255.0 / 8.0 t1 = time.time() print("===> Processing: %s || Timer: %.4f sec." % (str(i), (t1 - t0))) prediction = prediction.clip(0, 255) Image.fromarray(np.uint8(prediction)).save(SR_image[i])
def load_data(mode='cifar10', batch_size=16): assert mode in ['cifar10', 'mnist', 'faces'], '未知数据集' if mode == 'faces': root_path = 'G:/Dataset/celebAHQ/' image_list = [x for x in os.listdir(root_path) if is_image_file(x)] train_list = image_list[:int(0.8 * len(image_list))] test_list = image_list[int(0.8 * len(image_list)):] assert len(train_list) > 0 assert len(test_list) >= 0 trainset = MyDataset(train_list, root_path, input_height=None, crop_height=None, output_height=32, is_mirror=True) testset = MyDataset(test_list, root_path, input_height=None, crop_height=None, output_height=32, is_mirror=False) trainloader = MyDataLoader(trainset, batch_size) testloader = MyDataLoader(testset, batch_size, shuffle=False) classes = None return trainset, trainloader, testset, testloader, classes elif mode == 'cifar10': root_path = 'G:/Dataset/cifar10/' classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'trunk') transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) trainset = torchvision.datasets.CIFAR10(root=root_path, train=True, download=False, transform=transform) testset = torchvision.datasets.CIFAR10(root=root_path, train=False, download=False, transform=transform) elif mode == 'mnist': root_path = 'G:/Dataset/mnist/' classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, ))]) trainset = torchvision.datasets.MNIST(root=root_path, train=True, download=False, transform=transform) testset = torchvision.datasets.MNIST(root=root_path, train=False, download=False, transform=transform) trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True, drop_last=False, num_workers=2) testloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True, drop_last=False, num_workers=2) return trainset, trainloader, testset, testloader, classes