コード例 #1
0
def valid():
    model.eval()
    avg_psnr, avg_ssim = 0, 0
    for i, batch in enumerate(testing_data_loader):
        lr_tensor, hr_tensor = batch[0], batch[1]
        if args.cuda:
            lr_tensor = lr_tensor.to(device)
            hr_tensor = hr_tensor.to(device)

        with torch.no_grad():
            pre = model(lr_tensor)

        sr_img = utils.tensor2np(pre.detach()[0])
        gt_img = utils.tensor2np(hr_tensor.detach()[0])
        crop_size = args.scale
        cropped_sr_img = utils.shave(sr_img, crop_size)
        cropped_gt_img = utils.shave(gt_img, crop_size)
        if args.isY is True:
            im_label = utils.quantize(sc.rgb2ycbcr(cropped_gt_img)[:, :, 0])
            im_pre = utils.quantize(sc.rgb2ycbcr(cropped_sr_img)[:, :, 0])
        else:
            im_label = cropped_gt_img
            im_pre = cropped_sr_img

        psnr = utils.compute_psnr(im_pre, im_label)
        ssim = utils.compute_ssim(im_pre, im_label)

        avg_psnr += psnr
        avg_ssim += ssim
        print(
            f" Valid {i}/{len(testing_data_loader)} with PSNR = {psnr} and SSIM = {ssim}"
        )
    print("===> Valid. psnr: {:.4f}, ssim: {:.4f}".format(
        avg_psnr / len(testing_data_loader),
        avg_ssim / len(testing_data_loader)))
コード例 #2
0
def process(imname, model):

    im_l = cv2.imread(imname, cv2.IMREAD_COLOR)[:, :, [2, 1, 0]]  # BGR to RGB
    # im_l = sio.imread(opt.test_lr_folder + '/' + imname.split('/')[-1])  # RGB
    im_input = im_l / 255.0
    im_input = np.transpose(im_input, (2, 0, 1))
    im_input = im_input[np.newaxis, ...]
    im_input = torch.from_numpy(im_input).float()

    if cuda:
        print("use cuda!!!")
        im_input = im_input.to(device)

    with torch.no_grad():
        start = time.time() * 1000
        out = model(im_input)
        torch.cuda.synchronize()
        time_cost = time.time() * 1000 - start
        print(f"time cost = {time_cost} ms")

    out_img = utils.tensor2np(out.detach()[0])
    crop_size = upscale_factor
    cropped_sr_img = utils.shave(out_img, crop_size)
コード例 #3
0
ファイル: test.py プロジェクト: zdyshine/RTC2020_EfficientSR
    im_input = torch.from_numpy(im_input).float()

    if cuda:
        model = model.to(device)
        im_input = im_input.to(device)

    with torch.no_grad():
        start.record()
        out = model(im_input)
        end.record()
        torch.cuda.synchronize()
        time_list[i] = start.elapsed_time(end)  # milliseconds

    out_img = utils.tensor2np(out.detach()[0])
    crop_size = opt.upscale_factor
    cropped_sr_img = utils.shave(out_img, crop_size)
    cropped_gt_img = utils.shave(im_gt, crop_size)
    if opt.is_y is True:
        im_label = utils.quantize(sc.rgb2ycbcr(cropped_gt_img)[:, :, 0])
        im_pre = utils.quantize(sc.rgb2ycbcr(cropped_sr_img)[:, :, 0])
    else:
        im_label = cropped_gt_img
        im_pre = cropped_sr_img
    psnr_list[i] = utils.compute_psnr(im_pre, im_label)
    ssim_list[i] = utils.compute_ssim(im_pre, im_label)

    output_folder = os.path.join(
        opt.output_folder,
        imname.split('/')[-1].split('.')[0] + 'x' + str(opt.upscale_factor) +
        '.png')
コード例 #4
0
    def predict(self, data, **kwargs):
        # eng = meng.start_matlab()
        # for name, param in self.model.named_parameters():
        #     a = param.clone().cpu().data.numpy()
        #     print(name, a.max(), a.min())
        # print('\n')
        save_dir = os.path.join(self.config['preds_dir'], kwargs['testset'])
        mkdir_if_not_exist(save_dir)
        self.model.eval()
        psnr_list = []
        ssim_list = []
        b_psnr_list = []
        b_ssim_list = []
        with torch.no_grad():
            for img_bundle in data:
                # print(img_bundle['name'])
                if "color" in self.config.keys() and self.config["color"]:
                    x = img_bundle['origin']
                    y = img_bundle['y']
                    multichannel = True
                else:
                    x = img_bundle['x']
                    y = img_bundle['y']
                    (rows, cols, channel) = y.shape
                    y, _, _ = np.split(y, indices_or_sections=channel, axis=2)
                    multichannel = False

                x = torch.from_numpy(x).float().view(1, -1, x.shape[0],
                                                     x.shape[1])
                if self.config['cuda']:
                    x = x.cuda()
                # print(x[:5])
                lr_size = (x.shape[2], x.shape[3])
                hr_size = img_bundle['size']
                if self.config['progressive']:
                    inter_sizes = np_utils.interval_size(
                        lr_size, hr_size, self.config['max_gradual_scale'])
                else:
                    inter_sizes = []
                inter_sizes.append(hr_size)

                if self.config['net'] == 'wmcnn':
                    preds = self.model(x)
                    preds = [p.data.cpu().numpy() for p in preds]
                    # preds = [matlab.double(p.data.cpu().numpy().squeeze().tolist()) for p in preds]
                    # preds = eng.idwt2(*preds, 'bior1.1')
                    preds = pywt.idwt2((preds[0], (preds[1:])), 'bior1.1')
                else:
                    preds = self.model(x)

                if isinstance(preds, list):
                    # Y-channel's pixels are within [16, 235]
                    preds = np.clip(preds[-1].data.cpu().numpy(), 16 / 255,
                                    235 / 255).astype(np.float64)
                    # preds = np.clip(preds[-1].data.cpu().numpy(), 0, 1).astype(np.float64)
                else:
                    try:
                        preds = preds.data.cpu().numpy()
                    except AttributeError:
                        preds = preds
                    # preds = preds.mul(255).clamp(0, 255).round().div(255)
                    preds = np.clip(preds, 16 / 255,
                                    235 / 255).astype(np.float64)
                    # preds = np.clip(preds, 0, 1).astype(np.float64)

                preds = preds.squeeze()
                if len(preds.shape) == 3:
                    preds = preds.transpose([1, 2, 0])
                preds = modcrop(preds.squeeze(), kwargs['upscale'])
                preds_bd = shave(preds.squeeze(), kwargs['upscale'])
                y = modcrop(y.squeeze(), kwargs['upscale'])
                y_bd = shave(y.squeeze(), kwargs['upscale'])

                # print(preds_bd.shape, y_bd.shape)
                x = x.data.cpu().numpy().squeeze()
                bic = imresize.imresize(x, scalar_scale=kwargs['upscale'])
                bic = np.clip(bic, 16 / 255, 235 / 255).astype(np.float64)
                bic = shave(bic.squeeze(), kwargs['upscale'])
                b_psnr = measure.compare_psnr(bic, y_bd)
                b_ssim = measure.compare_ssim(bic, y_bd)
                b_psnr_list.append(b_psnr)
                b_ssim_list.append(b_ssim)

                m_psnr = measure.compare_psnr(preds_bd, y_bd)
                m_ssim = measure.compare_ssim(preds_bd,
                                              y_bd,
                                              multichannel=multichannel)
                print('PSNR of image {} is {}'.format(img_bundle['name'],
                                                      m_psnr))
                print('SSIM of image {} is {}'.format(img_bundle['name'],
                                                      m_ssim))
                psnr_list.append(m_psnr)
                ssim_list.append(m_ssim)
                self.save_preds(save_dir, preds, img_bundle, True)

        print('Averaged PSNR is {}'.format(np.mean(np.array(psnr_list))))
        print('Averaged SSIM is {}'.format(np.mean(np.array(ssim_list))))
        print('Averaged BIC PSNR is {}'.format(np.mean(np.array(b_psnr_list))))
        print('Averaged BIC SSIM is {}'.format(np.mean(np.array(b_ssim_list))))
コード例 #5
0
ファイル: test.py プロジェクト: dandingbudanding/DC-SR-agora
start = time.time()


for _,batch in enumerate(testing_data_loader):
    lr_tensor, hr_tensor = batch[0], batch[1]
    if 1:
        lr_tensor = lr_tensor.to(device)
        hr_tensor = hr_tensor.to(device)

    with torch.no_grad():
        pre = model(lr_tensor)

    sr_img = utils.tensor2np(pre.detach()[0])
    gt_img = utils.tensor2np(hr_tensor.detach()[0])
    crop_size = 2
    cropped_sr_img = utils.shave(sr_img, crop_size)
    cropped_gt_img = utils.shave(gt_img, crop_size)

    im_label = utils.quantize(sc.rgb2ycbcr(cropped_gt_img)[:, :, 0])
    im_pre = utils.quantize(sc.rgb2ycbcr(cropped_sr_img)[:, :, 0])

    img_save=sr_img.transpose(2,0,1)

    cv2.imwrite(os.path.join("./results/",str(_)+".png"),img_save)

    avg_psnr += utils.compute_psnr(im_pre, im_label)
    avg_ssim += utils.compute_ssim(im_pre, im_label)

end = time.time()-start
print(end)
print("===> Valid. psnr: {:.4f}, ssim: {:.4f}".format(avg_psnr / len(testing_data_loader), avg_ssim / len(testing_data_loader)))
コード例 #6
0
ファイル: sr_infer.py プロジェクト: boyuzz/SISR-KD-Ensemble
	def predict(self, data, **kwargs):
		# eng = meng.start_matlab()
		# for name, param in self.model.named_parameters():
		#     a = param.clone().cpu().data.numpy()
		#     print(name, a.max(), a.min())
		# print('\n')

		cost_time = 0
		save_dir = os.path.join(self.config['preds_dir'], kwargs['testset'])
		mkdir_if_not_exist(save_dir)
		self.model.eval()
		psnr_list = []
		ssim_list = []
		b_psnr_list = []
		b_ssim_list = []
		gap_list = {}
		diversity = 0
		with torch.no_grad():
			for img_bundle in data:
				# print(img_bundle['name'])
				if "color" in self.config.keys() and self.config["color"]:
					x = img_bundle['origin']
					y = img_bundle['y']
					multichannel = True
				else:
					x = img_bundle['x']
					y = img_bundle['y']
					if len(y.shape) == 3:
						(rows, cols, channel) = y.shape
						y, _, _ = np.split(y, indices_or_sections=channel, axis=2)
					else:
						(rows, cols) = y.shape

					multichannel = False

				x = torch.from_numpy(x).float().view(1, -1, x.shape[0], x.shape[1])
				if self.config['cuda']:
					x = x.cuda()
				# print(x[:5])
				lr_size = (x.shape[2], x.shape[3])
				hr_size = img_bundle['size']
				if self.config['progressive']:
					inter_sizes = np_utils.interval_size(lr_size, hr_size, self.config['max_gradual_scale'])
				else:
					inter_sizes = []
				inter_sizes.append(hr_size)

				start_time = time.time()
				if self.config['net'] == 'rrgun':
					preds = self.model(x, y_sizes=inter_sizes)
				elif self.config['net'] == 'lapsrn':
					# step = len(inter_sizes)
					# if kwargs['upscale'] % 2 != 0:
					#     step = step + 1
					step = int(np.ceil(math.log(kwargs['upscale'], 2)))
					preds = self.model(x, step=step)[-1]

					# y_numpy = preds[-1].data.cpu().numpy().squeeze()
					# x = misc.imresize(y_numpy, size=hr_size,
					#                    interp='bicubic', mode='F')
					# x = np.array(x, dtype=np.float64)
					# preds = torch.from_numpy(x)

					# resize = tfs.Compose([tfs.ToPILImage(), tfs.Resize(hr_size, interpolation=Image.BICUBIC),
					#                       tfs.ToTensor()])
					# preds = resize(preds[-1].squeeze(0))
					# preds = F.upsample(preds[-1], size=hr_size, mode='bilinear')
					# preds = preds[-1]
				elif self.config['net'] == 'lapgun':
					preds = self.model(x, y_sizes=inter_sizes)
				elif self.config['net'] in ['lapinternet', 'lapmtnet']:
					# print(img.shape)
					preds = self.model(x, size=inter_sizes[-1], step=self.config['step'])
				elif self.config['net'] in ['ensemsr', 'stacksr', 'stacksr_back', 'stacksr_uni']:
					input_list = self.em_generator(x)
					preds, parts = self.model(input_list)
					parts = torch.cat(tuple(parts), 0)
					diversity += self.chisquare(parts)
					# preds = com[-1]
				elif self.config['net'] == 'wmcnn':
					preds = self.model(x)
					preds = [p.data.cpu().numpy() for p in preds]
					# preds = [matlab.double(p.data.cpu().numpy().squeeze().tolist()) for p in preds]
					# preds = eng.idwt2(*preds, 'bior1.1')
					preds = pywt.idwt2((preds[0], (preds[1:])), 'bior1.1')
				else:
					preds = self.model(x)

				# for c in com:
				#     c = c.data.cpu().numpy()
				#     continue
				cost_time += time.time() - start_time
				if isinstance(preds, list):
					preds = np.clip(preds[-1].data.cpu().numpy(), 16/255, 235/255).astype(np.float64)
					# preds = np.clip(preds[-1].data.cpu().numpy(), 0, 1).astype(np.float64)
				else:
					try:
						preds = preds.data.cpu().numpy()
					except AttributeError:
						preds = preds
					# preds = preds.mul(255).clamp(0, 255).round().div(255)
					preds = np.clip(preds, 16/255, 235/255).astype(np.float64)
					# preds = np.clip(preds, 0, 1).astype(np.float64)

				preds = preds.squeeze()
				if len(preds.shape) == 3:
					preds = preds.transpose([1, 2, 0])
				preds = modcrop(preds.squeeze(), kwargs['upscale'])
				preds_bd = shave(preds.squeeze(), kwargs['upscale'])
				y = modcrop(y.squeeze(), kwargs['upscale'])
				# y = np.round(y * 255).astype(np.uint8)
				y_bd = shave(y.squeeze(), kwargs['upscale'])#/ 255.

				# print(preds_bd.shape, y_bd.shape)
				x = x.data.cpu().numpy().squeeze()
				# bic = x
				bic = imresize.imresize(x, scalar_scale=kwargs['upscale'])
				# bic = np.clip(bic, 16 / 255, 235 / 255).astype(np.float64)
				bic = np.round(bic*255).astype(np.uint8)
				bic = shave(bic.squeeze(), kwargs['upscale']) / 255.

				b_psnr = measure.compare_psnr(bic, y_bd, data_range=1)
				# b_ssim = measure.compare_ssim(bic, y_bd, data_range=1)
				b_ssim = self.calculate_ssim(bic* 255, y_bd* 255)
				# b_ssim = self.vifp_mscale(bic, y_bd)
				b_psnr_list.append(b_psnr)
				b_ssim_list.append(b_ssim)

				m_psnr = measure.compare_psnr(preds_bd, y_bd)

				# m_ssim = measure.compare_ssim(preds_bd, y_bd, multichannel=multichannel)
				m_ssim = self.calculate_ssim(preds_bd* 255, y_bd* 255)
				# print('image {} PSNR: {} SSIM: {}'.format(img_bundle['name'], m_psnr, m_ssim))
				gap_list[m_psnr-b_psnr] = img_bundle['name']
				psnr_list.append(m_psnr)
				ssim_list.append(m_ssim)
				test_value = '{}_{}'.format(m_psnr, m_ssim)
				# self.save_preds(save_dir, test_value, preds, img_bundle, True)

		diversity = diversity / len(data)
		print('Averaged Diversity is {}'.format(diversity))
		print('Averaged PSNR is {}, SSIM is {}'.format(np.mean(np.array(psnr_list)), np.mean(np.array(ssim_list))))
		print('Averaged BIC PSNR is {}, SSIM is {}'.format(np.mean(np.array(b_psnr_list)), np.mean(np.array(b_ssim_list))))
		# print(self.model.module.w_output)
		# print(self.model.module.w_inter)
		bigest_gap = sorted(gap_list, reverse=True)
		print(bigest_gap)
		print(gap_list[bigest_gap[0]], gap_list[bigest_gap[1]])
		print('Inference cost time {}s'.format(cost_time))