def test(): test_path = os.path.join(os.getcwd(), TEST_IMAGE_FOLDER) imgs_path_array = glob.glob(test_path + "/*.jpg") psnr_arr = [] for img_path in imgs_path_array: lr_img_data, hr_img_data = preprocessing.process_original(img_path) sr_img_data = rebuild(lr_img_data) psnr_arr.append(PSNR.cal_psnr(hr_img_data, sr_img_data)) """ lr_path = img_path.split('.')[0] + "_LR." + img_path.split('.')[-1] sr_path = img_path.split('.')[0] + "_SR." + img_path.split('.')[-1] lr_img_rgb_data = preprocessing.ycbcr2rgb(lr_img_data) sr_img_rgb_data = preprocessing.ycbcr2rgb(sr_img_data) lr_img_rgb = Image.fromarray(lr_img_rgb_data.astype('uint8')) sr_img_rgb = Image.fromarray(sr_img_rgb_data.astype('uint8')) lr_img_rgb.save(lr_path) sr_img_rgb.save(sr_path) """ psnr_mean = np.sum(psnr_arr) / len(psnr_arr) #print(psnr_arr) print("This model's PSNR: [%.4f]" % (psnr_mean)) with open("PSNR.txt", 'a') as file: file.write("---------------------------------------------------\n") file.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + '\n') file.write("Ratio: " + str(preprocessing.RATIO) + "\n") file.write("Activation Function: relu\n") file.write("Epoch: " + espcn.get_epoch() + "\tBatch_size: " + espcn.get_batch_size() + "\n") file.write("Test images set: " + TEST_IMAGE_FOLDER .split('/')[-1] + "\n") file.write("Average PSNR: [%.4f]\n" % (psnr_mean)) file.write("---------------------------------------------------\n\n\n")
def main(): img = cv2.imread('../data/Test/Set14/baboon.bmp') img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)[:, :, 0] m, n = img.shape # convert to matlab ycbcr # for i in range(m): # for j in range(n): # img[i,j] = 16 + 219.*(img[i,j]/255.) img_lr = bicubic(img, int(m / 3), int(n / 3)) img_hr = bicubic(img_lr, m, n) # img_cv = cv2.resize(img, (int(n/3), int(m/3)), interpolation=cv2.INTER_CUBIC) # img_cv = cv2.resize(img_cv, (n, m), interpolation=cv2.INTER_CUBIC) # cv2.imshow('bicubic', img_hr) # cv2.imshow('cv2', img_cv) # cv2.waitKey() print PSNR.PSNR(img, img_hr)
def main(): args = get_args() Atest, Btest = data.train_dataset(args.dir, args.batch_size, args.image_size, 1) B_test_iter = iter(Btest) A_test_iter = iter(Atest) B_test = Variable(B_test_iter.next()[0]) A_test = Variable(A_test_iter.next()[0]) G_12 = model.Generator(64) G_21 = model.Generator(64) checkpoint = torch.load(args.state_dict) G_12.load_state_dict(checkpoint['G_12_state_dict']) G_21.load_state_dict(checkpoint['G_21_state_dict']) if torch.cuda.is_available(): test = test.cuda() noised = noised.cuda() G_12 = G_12.cuda() G_21 = G_21.cuda() G_12.eval() G_21.eval() generate_A_image = G_21(B_test.float()) grid = vutils.make_grid(generate_A_image, nrow=8, normalize=True) vutils.save_image(grid, "generate_A_image.png") generate_B_image = G_12(A_test.float()) grid = vutils.make_grid(generate_A_image, nrow=8, normalize=True) vutils.save_image(grid, "generate_B_image.png") loss = PSNR.PSNR() estimate_loss_generate_A = loss(generate_A_image, A_test) estimate_loss_generate_B = loss(generate_B_image, B_test) print(estimate_loss_generate_A) print(estimate_loss_generate_B)
def main(): args = get_args() Atest, Btest = data.train_dataset(args.dir, args.batch_size, args.image_size, 1000) B_test_iter = iter(Btest) A_test_iter = iter(Atest) G_21 = model.Generator(args.batch_size) #G_21 = residual_model.Generator(1,1) checkpoint = torch.load(args.state_dict) G_21.load_state_dict(checkpoint['G_21_state_dict']) estimate_loss_generate = 0 for i in range(1000): B_test = Variable(B_test_iter.next()[0]) A_test = Variable(A_test_iter.next()[0]) grid = vutils.make_grid(B_test, nrow=8) vutils.save_image(grid,"B_image.png") if torch.cuda.is_available(): B_test = B_test.cuda() A_test = A_test.cuda() G_21 = G_21.cuda() G_21.eval() generate_A_image = G_21(B_test.float()) grid = vutils.make_grid(generate_A_image, nrow=8) vutils.save_image(grid,"generate_A_image.png") loss = PSNR.PSNR() estimate_loss_generate = estimate_loss_generate +loss(generate_A_image, A_test) estimate_loss_generate = estimate_loss_generate /1000 print(estimate_loss_generate)
def do_filtering(img_name, folder, file): img = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE) folder = folder.replace('whole_raw_image', 'bm3d') e1 = cv2.getTickCount() # cv2.getTickCount Basic_img = BM3D_1st_step(img) e2 = cv2.getTickCount() time = (e2 - e1) / cv2.getTickFrequency() print("The Processing time of the First step is %f s" % time) Final_img = BM3D_2nd_step(Basic_img, img) e3 = cv2.getTickCount() time = (e3 - e2) / cv2.getTickFrequency() print("The Processing time of the Second step is %f s" % time) filename = folder + file print(filename) cv2.imwrite(filename, Final_img) psnr = PSNR.PSNR(img, Final_img) print("The PSNR between the two img of the Second step is %f" % psnr) time = (e3 - e1) / cv2.getTickFrequency() print("The total Processing time is %f s" % time)
def calALL(): path = opt.folders ps = [] ss = [] list_PSNR =[] list_SSIM =[] file_list = os.listdir(path) f = open(path+"average.txt", 'w') f.write('average (PSNR,SSIM)\n') f.close() file_list = sorted(file_list, key = len) for Npath in file_list: newPath = path+Npath+'/' averPSNR = 0 averSSIM = 0 count = 0 if not (search(newPath)): continue else: f = open(newPath+"PSNR.txt", 'w') f.close() f = open(newPath+"SSIM.txt", 'w') f.close() print("*folder* = "+newPath) newfile_list = os.listdir(newPath) list_PSNR =[] list_SSIM =[] for i in newfile_list: if(i[-9:]=='epoch.jpg'): os.remove(newPath+'epoch.jpg') continue if(i[-3:]=='txt' or i[-8:]=='SSIM.jpg' or i[-8:]=='PSNR.jpg'): continue img = i LR = newPath + img print('open '+ LR) HR = opt.HR HR = HR + img print('open '+HR) #biimg = cv2.imread(LR) #biimg = cv2.resize(biimg, None, fx=4, fy=4, interpolation=cv2.INTER_CUBIC) #cv2.imwrite(opt.HR+'../bicubic/'+i ,biimg) #LR = opt.HR+'../bicubic/'+i #print(LR) try: one, two = PSNR.cal_PSNRandSSIM(HR, LR) except: print('skip') continue print(f"{one} is PSNR, {two} is SSIM") f = open(newPath+"PSNR.txt", 'a') f.write(img+f" PSNR is {one}\n") f.close() f = open(newPath+"SSIM.txt", 'a') f.write(img+f" SSIM is {two}\n") f.close() list_PSNR.append(one) list_SSIM.append(two) #well.. im not good at python.. averPSNR += one averSSIM += two count +=1 print('=============================') graphing.graph(list_PSNR, [], newPath,'PSNR',opt.picofname) graphing.graph(list_SSIM, [], newPath,'SSIM',opt.picofname) if(count==0): averPSNR = 0 averSSIM = 0 else: averPSNR /= count averSSIM /= count print("{} is average PSNR, {} is average SSIM".format(averPSNR,averSSIM)) ps.append(averPSNR) ss.append(averSSIM) graphing.graph(ps, [], path,'PSNR',opt.X) graphing.graph(ss, [], path,'SSIM',opt.X) f = open(path+"average.txt", 'a') f.write("{} : {} is average PSNR, {} is average SSIM\n".format(Npath,averPSNR,averSSIM)) f.close()
if __name__ == '__main__': cv2.setUseOptimized(True) # OpenCV 中的很多函数都被优化过(使用 SSE2,AVX 等)。也包含一些没有被优化的代码。使用函数 cv2.setUseOptimized() 来开启优化。 img_name = "C:/Users/admin/Desktop/BM3D-Denoise/BM3D_test_images/lw3.png" # 图像的路径 img = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE) # 读入图像,cv2.IMREAD_GRAYSCALE:以灰度模式读入图像 # 记录程序运行时间 e1 = cv2.getTickCount() # cv2.getTickCount 函数返回从参考点到这个函数被执行的时钟数 # if(img is not None): # print("success") Basic_img = BM3D_1st_step(img) e2 = cv2.getTickCount() time = (e2 - e1) / cv2.getTickFrequency() # 计算函数执行时间 print ("The Processing time of the First step is %f s" % time) cv2.imwrite("Basic3.jpg", Basic_img) psnr = PSNR.PSNR(img, Basic_img) print ("The PSNR between the two img of the First step is %f" % psnr) # Basic_img = cv2.imread("Basic3.jpg", cv2.IMREAD_GRAYSCALE) Final_img = BM3D_2nd_step(Basic_img, img) e3 = cv2.getTickCount() time = (e3 - e2) / cv2.getTickFrequency() print ("The Processing time of the Second step is %f s" % time) cv2.imwrite("Final3.jpg", Final_img) psnr = PSNR.PSNR(img, Final_img) print ("The PSNR between the two img of the Second step is %f" % psnr) time = (e3 - e1) / cv2.getTickFrequency() print ("The total Processing time is %f s" % time)
"./data/test_set_s3.h5") #h5数据集制作工具在data中,可自己制作 test_loader = Data.DataLoader(dataset=test_set, num_workers=1, batch_size=40, shuffle=False) net = torch.load("./checkpoint/model_epoch_100.pth")["model"] net.cpu() i = random.randint(0, 100) aa, bb = test_loader.dataset[i] lim1 = tran_im(aa) label = tran_im(bb) lim1.show() print("CUBIC_PSNR:", PSNR.psnr(lim1, label)) T_X = torch.unsqueeze(tran_ten(lim1), 0) prediction = net(T_X) prediction[prediction < 0] = 0 prediction[prediction > 1] = 1 lim2 = tran_im(prediction[0]) lim2.show() print("NET_PSNR:", PSNR.psnr(lim2, label)) label.show()
data = MSE_hybnetd[j] dd = np.ravel(data) ivmin = min(ivmin, np.amin(dd)) ivmax = max(ivmax, np.amax(dd)) images.append(a.imshow(data, cmap=cm.viridis)) if j <= 4: a.add_patch(plt.Rectangle((curve1_pos_x[j] - 15, curve1_pos_y[j] - 15), width=31, height=31, linewidth=1, edgecolor='r', facecolor='none')) a.add_patch(plt.Rectangle((curve2_pos_x[j] - 15, curve2_pos_y[j] - 15), width=31, height=31, linewidth=1, edgecolor='lime', facecolor='none')) else: # Coordinates of cutted ICVL images has 15 pixels right shift a.add_patch(plt.Rectangle((curve1_pos_x[j] - 15 - 15, curve1_pos_y[j] - 15), width=31, height=31, linewidth=1, edgecolor='r', facecolor='none')) a.add_patch(plt.Rectangle((curve2_pos_x[j] - 15 - 15, curve2_pos_y[j] - 15), width=31, height=31, linewidth=1, edgecolor='lime', facecolor='none')) a.text(40, 70, 'PSNR=' + str('%.2f' % PSNR.mse2psnr(np.mean(dd), bitdepth=1)) + 'dB', fontproperties=FontProperties(size=5), color='w') if i == 2: a.axis('off') data = MSE_hsnetd[j] dd = np.ravel(data) ivmin = min(ivmin, np.amin(dd)) ivmax = max(ivmax, np.amax(dd)) images.append(a.imshow(data, cmap=cm.viridis)) if j <= 4: a.add_patch(plt.Rectangle((curve1_pos_x[j] - 15, curve1_pos_y[j] - 15), width=31, height=31, linewidth=1, edgecolor='r', facecolor='none')) a.add_patch(plt.Rectangle((curve2_pos_x[j] - 15, curve2_pos_y[j] - 15), width=31, height=31, linewidth=1, edgecolor='lime', facecolor='none')) else: # Coordinates of cutted ICVL images has 15 pixels right shift a.add_patch(plt.Rectangle((curve1_pos_x[j] - 15 - 15, curve1_pos_y[j] - 15), width=31, height=31, linewidth=1,