def main(): args = get_arguments() with open("./params.json", 'r') as f: params = json.load(f) if check_params(args, params) == False: return sess = tf.Session() net = EspcnNet(filters_size=params['filters_size'], channels=params['channels'], ratio=params['ratio']) lr_image = tf.placeholder(tf.uint8) sr_image = net.generate(lr_image) saver = tf.train.Saver() saver.restore(sess, args.checkpoint) lr_image_data = misc.imread(args.lr_image) lr_image_ycbcr_data = rgb2ycbcr(lr_image_data) lr_image_y_data = lr_image_ycbcr_data[:, :, 0:1] lr_image_cb_data = lr_image_ycbcr_data[:, :, 1:2] lr_image_cr_data = lr_image_ycbcr_data[:, :, 2:3] lr_image_batch = np.zeros((1, ) + lr_image_y_data.shape) lr_image_batch[0] = lr_image_y_data sr_image_y_data = sess.run(sr_image, feed_dict={lr_image: lr_image_batch}) sr_image_y_data = my_shuffle(sr_image_y_data[0], args.ratio) sr_image_ycbcr_data = misc.imresize( lr_image_ycbcr_data, params['ratio'] * np.array(lr_image_data.shape[0:2]), 'bicubic') edge = params['edge'] * params['ratio'] / 2 sr_image_ycbcr_data = np.concatenate( (sr_image_y_data, sr_image_ycbcr_data[edge:-edge, edge:-edge, 1:3]), axis=2) sr_image_data = ycbcr2rgb(sr_image_ycbcr_data) misc.imsave(args.out_path + '.png', sr_image_data) if args.hr_image != None: hr_image_data = misc.imread(args.hr_image) model_psnr = psnr(hr_image_data, sr_image_data, edge) print('PSNR of the model: {:.2f}dB'.format(model_psnr)) sr_image_bicubic_data = misc.imresize( lr_image_data, params['ratio'] * np.array(lr_image_data.shape[0:2]), 'bicubic') misc.imsave(args.out_path + '_bicubic.png', sr_image_bicubic_data) bicubic_psnr = psnr(hr_image_data, sr_image_bicubic_data, 0) print('PSNR of Bicubic: {:.2f}dB'.format(bicubic_psnr))
def super_resolution(path_string_lr, path_string_truth, generator): pic = cv2.imread(path_string_lr) pic_truth = cv2.imread(path_string_truth) pic_norm = pic / 255.0 orig_shape = pic_norm.shape decomposed_picture = test_transform.test_image_decomposition(pic_norm, input_size=16) decomposed_picture_4dim = f2.ndarray_to_4dim(decomposed_picture[0], image_size=16) pic_predicted = generator.predict(decomposed_picture_4dim) whole_predicted = test_transform.text_image_composition( pic_predicted, decomposed_picture[2]) predicted_orig_shape = cv2.resize(whole_predicted, (orig_shape[1] * 2, orig_shape[0] * 2), interpolation=cv2.INTER_LINEAR) psnr_item = psnr.psnr(predicted_orig_shape, pic_truth / 255.0) return psnr_item, pic, pic_truth, predicted_orig_shape
def yuv_diff_temporal(filename_input, start_frame_input, w, h, frame_size, block_size, scale): # read yuv : input array_input_y, array_input_cbcr = read_yuv420(filename_input, w, h, frame_size, start_frame=start_frame_input) input_y = array_input_y.squeeze() ## make a copy of pixel domain #input_y_pel = input_y.copy() # normalization input_y = input_y / 255. # make a copy of pixel domain input_y_pel = input_y.copy() # count for each frame block_count = 0 # loop based on block list_df_psnr = [] list_df_ssim = [] for frame_index in range(1, frame_size): list_psnr_frame = [] list_ssim_frame = [] for y in range(0, h, block_size): list_psnr_row = [] list_ssim_row = [] for x in range(0, w, block_size): # pick block from input sub_input_y_prev = input_y[frame_index-1, y:y+block_size, x:x+block_size] sub_input_y_curr = input_y[frame_index, y:y+block_size, x:x+block_size] # PSNR calculation each_psnr = psnr(sub_input_y_prev, sub_input_y_curr, scale) each_ssim = ssim(sub_input_y_prev, sub_input_y_curr, data_range=1) list_psnr_row.append(float("{0:.4f}".format(each_psnr))) list_ssim_row.append(float("{0:.4f}".format(each_ssim))) block_count += 1 list_psnr_frame.append(list_psnr_row) list_ssim_frame.append(list_ssim_row) # outside of the loop # stat the block-level PSNR # list_psnr to df df_psnr = pd.DataFrame(list_psnr_frame) df_ssim = pd.DataFrame(list_ssim_frame) # append to list list_df_psnr.append(df_psnr) list_df_ssim.append(df_ssim) return list_df_psnr, list_df_ssim, input_y_pel
def computeError(img1, img2): img_psnr = psnr.psnr(img1, img2) gt_y = cv2.cvtColor(cv2.cvtColor(img1.astype(np.uint8), cv2.COLOR_RGB2BGR), cv2.COLOR_BGR2YCR_CB)[:, :, 0] pred_y = cv2.cvtColor( cv2.cvtColor(img2.astype(np.uint8), cv2.COLOR_RGB2BGR), cv2.COLOR_BGR2YCR_CB)[:, :, 0] img_ssim = compare_ssim(gt_y, pred_y, gaussian_weight=True) return img_psnr, img_ssim
def test_pVDSR_with_sess(ckpt_path, data_path, sess): folder_list = glob.glob(os.path.join(data_path, 'Set*')) print 'folder_list', folder_list if not os.path.exists('./output_img'): os.mkdir('./output_img') psnr_dict = {} for folder_path in folder_list: psnr_list = [] img_list = get_img_list(folder_path) for i in range(len(img_list)): input_list, gt_list, scale_list = get_test_image(img_list, i, 1) input_y = input_list[0] gt_y = gt_list[0] start_t = time.time() img_vdsr_y = sess.run([output_tensor], feed_dict={input_tensor: np.resize(input_y, (1, input_y.shape[0], input_y.shape[1], 1))}) img_vdsr_y = np.resize(img_vdsr_y, (input_y.shape[0], input_y.shape[1])) end_t = time.time() # save test image and results id = img_list[i][1].split('/')[-1].split('.')[0] output_id = 'output' + id + '.png' input_id = 'input' + id + '.png' misc.imsave(os.path.join('./output_img',output_id),img_vdsr_y) misc.imsave(os.path.join('./output_img',input_id),input_y) print "end_t",end_t,"start_t",start_t print "time consumption",end_t-start_t print "image_size", input_y.shape # calculate psnr psnr_bicub = psnr(input_y, gt_y, scale_list[0]) psnr_vdsr = psnr(img_vdsr_y, gt_y, scale_list[0]) print "PSNR: bicubic %f\tpVDSR %f" % (psnr_bicub, psnr_vdsr) psnr_list.append([psnr_bicub, psnr_vdsr, scale_list[0]]) psnr_dict[os.path.basename(folder_path)] = psnr_list if not os.path.exists('./psnr'): os.mkdir('psnr') with open('psnr/%s' % "result", 'wb') as f: pickle.dump(psnr_dict, f)
def fitness(chromosome, stego, secret): """Computes fitness for current chromosome""" if len(chromosome) > 7: chromosome = helper_individual.packchromosome(chromosome) # Embed the secret sequence try: stego1 = embed(stego, secret, chromosome) except: return (0, ) return (psnr(stego, stego1), )
def test(): ''' Test the model, need model.h5 -> run the train module before testing ''' srcnn_model = model() srcnn_model.load_weights("./model/srcnn_model.h5") img = cv2.imread(IMG_NAME) shape = img.shape img = cv2.resize(img, (int(shape[1] / 2), int(shape[0] / 2)), cv2.INTER_CUBIC) img = cv2.resize(img, (shape[1], shape[0]), cv2.INTER_CUBIC) cv2.imwrite(BICUBIC_NAME, img) img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb) Y = numpy.zeros((1, img.shape[0], img.shape[1], 1)) Y[0, :, :, 0] = img[:, :, 0] pre = srcnn_model.predict(Y, batch_size=1) pre[pre[:] > 255] = 255 pre[pre[:] < 0] = 0 pre = pre.astype(numpy.uint8) img[6:-6, 6:-6, 0] = pre[0, :, :, 0] img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR) if denoise: img = cv2.fastNlMeansDenoisingColored(img, None, 5, 5, 7, 21) cv2.imwrite(OUTPUT_NAME, img) # PSNR and MSE calculation: im1 = cv2.imread(IMG_NAME, cv2.IMREAD_COLOR) im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2YCrCb)[6:-6, 6:-6, 0] im2 = cv2.imread(BICUBIC_NAME, cv2.IMREAD_COLOR) im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2YCrCb)[6:-6, 6:-6, 0] im3 = cv2.imread(OUTPUT_NAME, cv2.IMREAD_COLOR) im3 = cv2.cvtColor(im3, cv2.COLOR_BGR2YCrCb)[6:-6, 6:-6, 0] print("test completed... below is the report") print("PSNR bicubic: ", psnr.psnr(im2, im1)) print("PSNR SRCNN: ", psnr.psnr(im3, im1)) print("MSE bicubic: ", psnr.mse(im2, im1)) print("MSE srcnn: ", psnr.mse(im3, im1))
def yuv_diff_single_frame(label_y, input_y, w, h, block_size, scale): # normalization label_y = label_y / 255. input_y = input_y / 255. df_psnr = pd.DataFrame() df_sse = pd.DataFrame() # count for each frame block_count = 0 # loop based on block list_psnr_frame = [] list_sse_frame = [] for y in range(0, h, block_size): list_psnr_row = [] list_sse_row = [] for x in range(0, w, block_size): # pick block from input sub_label_y = label_y[y:y+block_size, x:x+block_size] sub_input_y = input_y[y:y+block_size, x:x+block_size] # PSNR calculation each_psnr = psnr(sub_input_y, sub_label_y, scale) #each_ssim = ssim(sub_input_y, sub_label_y, data_range=256) list_psnr_row.append(float("{0:.4f}".format(each_psnr))) # SSE each_sse = np.sum((sub_label_y - sub_input_y) ** 2) list_sse_row.append(float("{0:.4f}".format(each_sse))) block_count += 1 list_psnr_frame.append(list_psnr_row) list_sse_frame.append(list_sse_row) # outside of the loop-block # stat the block-level PSNR # list_psnr to df df_psnr_frm = pd.DataFrame(list_psnr_frame) df_sse_frm = pd.DataFrame(list_sse_frame) # append to global table. df_psnr = df_psnr.append(df_psnr_frm) df_sse = df_sse.append(df_sse_frm) return df_psnr, df_sse
def psnr(ref, teste): # RFZ: precisa converter aqui para grayscale (?) l = plt.imread(ref)#[:,:,1] #print(l.shape) #f = open('arquivo.txt','w') #f.write(ref + ) #for i in range(1, 9): k = plt.imread(teste)#[:,:,1] psrn = psnr.psnr(l, k) return psnr
def psnr(ref, teste): # RFZ: precisa converter aqui para grayscale (?) l = plt.imread(ref) #[:,:,1] #print(l.shape) #f = open('arquivo.txt','w') #f.write(ref + ) #for i in range(1, 9): k = plt.imread(teste) #[:,:,1] psrn = psnr.psnr(l, k) return psnr
def index_results(dist_filelist, ref_filelist): psnr_list = [] ssim_list = [] uiqm_list = [] for dist_file in dist_filelist: #遍历 dist_file_dir = os.path.join(dist_path, dist_file) #参考文件绝对路径 if os.path.isdir(dist_file_dir): #如果是文件夹,跳过 continue dist_img = scipy.misc.imread(dist_file_dir, flatten=True).astype(numpy.float32) #filename = os.path.splitext(dist_file)[0] + os.path.splitext(dist_file)[1] #ref filename filename = os.path.splitext(dist_file)[0] + '.jpg' #ref filename ref_img = scipy.misc.imread(ref_path + '\\' + filename, flatten=True).astype( numpy.float32) #读取参考图像对应的测试图像 psnr_data = psnr.psnr(ref_img, dist_img) #psnr指标 ssim_data = ssim.ssim_exact(ref_img / 255, dist_img / 255) #ssim指标 dist_img = cv2.imread(dist_path + '\\' + os.path.splitext(dist_file)[0] + '.jpg') #UIQM指标 uiqm_data = uqim_utils.getUIQM(dist_img) #UIQM print("img:" + str(filename) + " psnr:" + str(psnr_data) + " ssim:" + str(ssim_data) + " UIQM:" + str(uiqm_data)) data = str(filename) + " psnr:" + str(psnr_data) + " ssim:" + str( ssim_data) + " UIQM:" + str(uiqm_data) psnr_list.append(psnr_data) ssim_list.append(ssim_data) uiqm_list.append(uiqm_data) average = "psnr_average:" + str( sum(psnr_list) / len(psnr_list)) + " ssim_average:" + str( sum(ssim_list) / len(ssim_list)) + " UIQM:" + str( sum(uiqm_list) / len(uiqm_list)) return average
# print(gt_gray.size, enhanced_gray.size) # ssim needs same sized images enhanced_gray = ImageOps.grayscale(enhanced_image) enhanced_image = np.asanyarray(enhanced_image) enhanced_gray = np.asanyarray(enhanced_gray) # FR_IQA if os.path.isfile(gt_path): ssim, mse = ssim_sk(gt_image, enhanced_image) metrics["SSIM"][0] += ssim metrics["SSIM"][1] += 1 psnr_val = psnr(gt_image, enhanced_image) metrics["PSNR"][0] += psnr_val metrics["PSNR"][1] += 1 metrics["MSE"][0] += mse metrics["MSE"][1] += 1 try: metrics["IE"][0] += entropy_sk(gt_image, enhanced_image) metrics["IE"][1] += 1 except: pass try: metrics["VIF"][0] += compute_vif(gt_image, enhanced_image) metrics["VIF"][1] += 1 except:
dist2 = scipy.misc.imread( dist2_file, flatten=True).astype(numpy.float32) dist3 = scipy.misc.imread( dist3_file, flatten=True).astype(numpy.float32) dist4 = scipy.misc.imread( dist4_file, flatten=True).astype(numpy.float32) sharp = scipy.misc.imread( sharp_file, flatten=True).astype(numpy.float32) # print(ref_file + ' ' + dist_file + ':') num += 1 # vifp1 = vifp.vifp_mscale(ref, dist) ssim1 = ssim.ssim_exact(sharp / 255, dist1 / 255) ssim2 = ssim.ssim_exact(sharp / 255, dist2 / 255) ssim3 = ssim.ssim_exact(sharp / 255, dist3 / 255) ssim4 = ssim.ssim_exact(sharp / 255, dist4 / 255) psnr1 = psnr.psnr(sharp, dist1) psnr2 = psnr.psnr(sharp, dist2) psnr3 = psnr.psnr(sharp, dist3) psnr4 = psnr.psnr(sharp, dist4) # reco1 = reco.reco(ref / 255, dist / 255) # vifp_sum += vifp1 # reco_sum += reco1 # ref_values.append(ref_file) # dist_values.append(dist_file) # vifp_values.append(vifp1) # ssim_values.append(ssim1) # psnr_values.append(psnr1) # reco_values.append(reco1) f.write("%s:\n" % sharp_file) f.write("%f\t%f\t%f\t%f\n" % (ssim1, ssim2, ssim3, ssim4))
if 'compress' in mode: orig_img = img.copy() res_img = compress(img, x_01, x_01, mu, lamb, m_ratio, display, method, output) img = res_img if 'decompress' in mode: res_img = decompress(img, x_01, x_01, mu, lamb, m_ratio, display, method, output) if orig_img is not None: psnr_val = psnr(orig_img, res_img) print(psnr_val) p_name = os.path.splitext(os.path.basename(output))[0] p_file = output.replace(p_name, p_name + 'psnr') p_file = output.replace( os.path.splitext(os.path.basename(output))[1], '.txt') with open(p_file, 'w') as f: f.write(str(psnr_val)) ## img_width, img_height = img.shape[:2] ## ## img = cv2.medianBlur(img,3) ## cv2.imshow('image orig', img.astype(np.uint8)) ## cv2.waitKey(0) ## ## img = img.astype(np.float32)
def generate(): args = get_arguments() with open("./params.json", 'r') as f: params = json.load(f) if check_params(args, params) == False: return sess = tf.Session() net = ESPCN(filters_size=params['filters_size'], channels=params['channels'], ratio=params['ratio'], batch_size=1, lr_size=params['lr_size'], edge=params['edge']) loss, images, labels = net.build_model() lr_image = tf.placeholder(tf.uint8) lr_image_data = misc.imread(args.lr_image) lr_image_ycbcr_data = rgb2ycbcr(lr_image_data) lr_image_y_data = lr_image_ycbcr_data[:, :, 0:1] lr_image_cb_data = lr_image_ycbcr_data[:, :, 1:2] lr_image_cr_data = lr_image_ycbcr_data[:, :, 2:3] lr_image_batch = np.zeros((1, ) + lr_image_y_data.shape) lr_image_batch[0] = lr_image_y_data sr_image = net.generate(lr_image) saver = tf.train.Saver() try: model_loaded = net.load(sess, saver, args.checkpoint) except: raise Exception( "Failed to load model, does the ratio in params.json match the ratio you trained your checkpoint with?" ) if model_loaded: print("[*] Checkpoint load success!") else: print("[*] Checkpoint load failed/no checkpoint found") return sr_image_y_data = sess.run(sr_image, feed_dict={lr_image: lr_image_batch}) sr_image_y_data = shuffle(sr_image_y_data[0], params['ratio']) sr_image_ycbcr_data = misc.imresize( lr_image_ycbcr_data, params['ratio'] * np.array(lr_image_data.shape[0:2]), 'bicubic') edge = params['edge'] * params['ratio'] / 2 sr_image_ycbcr_data = np.concatenate( (sr_image_y_data, sr_image_ycbcr_data[edge:-edge, edge:-edge, 1:3]), axis=2) sr_image_data = ycbcr2rgb(sr_image_ycbcr_data) misc.imsave(args.out_path + '.png', sr_image_data) if args.hr_image != None: hr_image_data = misc.imread(args.hr_image) model_psnr = psnr(hr_image_data, sr_image_data, edge) print('PSNR of the model: {:.2f}dB'.format(model_psnr)) sr_image_bicubic_data = misc.imresize( lr_image_data, params['ratio'] * np.array(lr_image_data.shape[0:2]), 'bicubic') misc.imsave(args.out_path + '_bicubic.png', sr_image_bicubic_data) bicubic_psnr = psnr(hr_image_data, sr_image_bicubic_data, 0) print('PSNR of Bicubic: {:.2f}dB'.format(bicubic_psnr))
print "Frame=%d VIFP=%f SSIM=%f" % (frame_num, vifp_value, ssim_value) frame_num += 1 else: # Inputs are image files ref = scipy.misc.imread(ref_file, flatten=True).astype(numpy.float32) dist = scipy.misc.imread(dist_file, flatten=True).astype(numpy.float32) width, height = ref.shape[1], ref.shape[0] print "Comparing %s to %s, resolution %d x %d" % (ref_file, dist_file, width, height) vifp_value = vifp.vifp_mscale(ref, dist) print "VIFP=%f" % (vifp_value) ssim_value = ssim.ssim_exact(ref / 255, dist / 255) print "SSIM=%f" % (ssim_value) # FIXME this is buggy, disable for now # ssim_value2 = ssim.ssim(ref/255, dist/255) # print "SSIM approx=%f" % (ssim_value2) psnr_value = psnr.psnr(ref, dist) print "PSNR=%f" % (psnr_value) # niqe_value = niqe.niqe(dist/255) # print "NIQE=%f" % (niqe_value) reco_value = reco.reco(ref / 255, dist / 255) print "RECO=%f" % (reco_value)
plt.figure(figsize=(8, 8)) for quality in range(0, 101, 1): subprocess.check_call('gm convert %s -quality %d %s' % (ref_file, quality, dist_file), shell=True) file_size = os.path.getsize(dist_file) dist = scipy.misc.imread(dist_file, flatten=True).astype(numpy.float32) quality_values.append(quality) size_values.append(int(file_size / 1024)) vifp_values.append(vifp.vifp_mscale(ref, dist)) ssim_values.append(ssim.ssim_exact(ref / 255, dist / 255)) psnr_values.append(psnr.psnr(ref, dist)) # niqe_values.append( niqe.niqe(dist/255) ) reco_values.append(reco.reco(ref / 255, dist / 255)) plt.plot(quality_values, vifp_values, label='VIFP') plt.plot(quality_values, ssim_values, label='SSIM') # plt.plot(niqe_values, label='NIQE') plt.plot(quality_values, reco_values, label='RECO') plt.plot(quality_values, numpy.asarray(psnr_values) / 100.0, label='PSNR/100') plt.legend(loc='lower right') plt.xlabel('JPEG Quality') plt.ylabel('Metric') plt.savefig('jpg_demo_quality.png') plt.figure(figsize=(8, 8))
import numpy as np import cv2 from psnr import psnr # img = cv2.imread('dataset/test/crop/070_im.png') mask = cv2.imread('mask.png',0) p = 0 l2 = 0 l1 = 0 for i in range(1,101): img = cv2.imread('dataset/val/paris_eval_gt/%03d_im.png'%i) img=cv2.resize(img,(128,128),interpolation=cv2.INTER_CUBIC) dst = cv2.inpaint(img,mask,3,cv2.INPAINT_NS) p = p+psnr(img[32:32+64,32:32+64,:].astype(np.float32),dst[32:32+64,32:32+64,:].astype(np.float32)) cv2.imwrite('result/test/old_all/%03d_im.png'%i,dst) t = img[32:32+64,32:32+64,:].astype(np.float32)/127.5-dst[32:32+64,32:32+64,:].astype(np.float32)/127.5 l2 = l2 + np.mean(np.square(t)) t = np.abs(t) l1 = l1 + np.mean(t) print(l1/100.0) print(l2/100.0) print(p/100.0)
# reconst_img = scipy.misc.imread(reconst_path, flatten=False, model='RGB').astype(numpy.float32) # cps_path = name = os.path.splitext(os.path.basename(reconst_path))[0] cps_path = glob.glob(os.path.join(stream_dir, model, name+'*'))[0] stream_file_size = os.path.getsize(cps_path) reconst_img = cv2.imread(reconst_path).astype(numpy.float32) # print cps_img.shape #exit() path_list.append(reconst_path) size_values.append( int(stream_file_size/1024) ) ## kb # vifp_values.append( vifp.vifp_mscale(ref_img, cps_img) ) ssim_values.append( ssim.ssim_exact(ref_img/255, reconst_img/255) ) psnr_values.append( psnr.psnr(ref_img, reconst_img) ) # reco_values.append( reco.reco(ref_img/255, cps_img/255) ) # cps_path_list = glob.glob(os.path.join(stream_dir, model, ref_name+'*')) # # print ref_path # for cps_path in cps_path_list: # file_size = os.path.getsize(cps_path) # # reconst_path = os.path.join(reconst_dir, model, ref_name) ## have to be same format with reference image # print size_values;exit() for i in range(len(size_values)): # csv_writer.writerow output_row=[os.path.basename(ref_path), os.path.basename(path_list[i]), str(ori_shape), str(ori_size), str(size_values[i]), str(float(ori_size)/size_values[i]), str(psnr_values[i]), str(ssim_values[i])]
def generate(): args = get_arguments() with open("./params2.json", 'r') as f: params = json.load(f) if check_params(args, params) == False: return sess = tf.Session() net = ESPCN(filters_size=params['filters_size'], channels=params['channels'], ratio=params['ratio'], batch_size=1, lr_size=params['lr_size'], edge=params['edge']) loss, images, labels = net.build_model() files = [f for f in os.listdir(args.lr_image_dir) if os.path.isfile(os.path.join(args.lr_image_dir, f))] saver = tf.train.Saver() if net.load(sess, saver, args.checkpoint): print("[*] Checkpoint load success!") else: print("[*] Checkpoint load failed/no checkpoint found") return frame_range = (87, 10000) for fileName in files: try: ts = time() frame_cnt = int(fileName[5:10]) if frame_cnt < frame_range[0] or frame_cnt > frame_range[1]: print('Ignoring frame ' + str(frame_cnt)) continue else: print('start sr for frame ' + str(frame_cnt)) input_file = os.path.join(args.lr_image_dir, fileName) output_file = os.path.join(args.out_path_dir, fileName) lr_image = tf.placeholder(tf.uint8) lr_image_data = misc.imread(input_file) # pip install pillow lr_image_ycbcr_data = rgb2ycbcr(lr_image_data) lr_image_y_data = lr_image_ycbcr_data[:, :, 0:1] lr_image_cb_data = lr_image_ycbcr_data[:, :, 1:2] lr_image_cr_data = lr_image_ycbcr_data[:, :, 2:3] lr_image_batch = np.zeros((1,) + lr_image_y_data.shape) lr_image_batch[0] = lr_image_y_data print('preprocessed %d ms' % ((time()-ts)*1000)) ts = time() sr_image = net.generate(lr_image) print('network generated %d ms' % ((time()-ts)*1000)) ts = time() sr_image_y_data = sess.run(sr_image, feed_dict={lr_image: lr_image_batch}) print('run %d ms' % ((time()-ts)*1000)) ts = time() sr_image_y_data = shuffle(sr_image_y_data[0], args.ratio) sr_image_ycbcr_data = misc.imresize(lr_image_ycbcr_data, params['ratio'] * np.array(lr_image_data.shape[0:2]), 'bicubic') edge = params['edge'] * params['ratio'] / 2 sr_image_ycbcr_data = np.concatenate((sr_image_y_data, sr_image_ycbcr_data[edge:-edge,edge:-edge,1:3]), axis=2) print('mixed %d ms' % ((time()-ts)*1000)) ts = time() sr_image_data = ycbcr2rgb(sr_image_ycbcr_data) #sr_image_data = sr_image_ycbcr_data print('converted %d ms' % ((time()-ts)*1000)) ts = time() misc.imsave(output_file, sr_image_data) print(output_file + ' generated %d ms' % ((time()-ts)*1000)) ts = time() if args.hr_image_dir != None: hr_image_path = os.path.join(args.hr_image_dir, fileName) hr_image_data = misc.imread(hr_image_path) model_psnr = psnr(hr_image_data, sr_image_data, edge) print('PSNR of the model: {:.2f}dB'.format(model_psnr)) sr_image_bicubic_data = misc.imresize(lr_image_data, params['ratio'] * np.array(lr_image_data.shape[0:2]), 'bicubic') bicubic_path = os.path.join(args.out_path_dir, fileName + '_bicubic.png') misc.imsave(bicubic_path, sr_image_bicubic_data) bicubic_psnr = psnr(hr_image_data, sr_image_bicubic_data, 0) print('PSNR of Bicubic: {:.2f}dB'.format(bicubic_psnr)) except IndexError: print('Index error caught') except IOError: print('Cannot identify image file: ' + fileName) except ValueError: print('Cannot parse file name: ' + fileName)
ssim_value = ssim.ssim(ref, dist) print "Frame=%d VIFP=%f SSIM=%f" % (frame_num, vifp_value, ssim_value) frame_num += 1 else: # Inputs are image files ref = scipy.misc.imread(ref_file, flatten=True).astype(numpy.float32) dist = scipy.misc.imread(dist_file, flatten=True).astype(numpy.float32) width, height = ref.shape[1], ref.shape[0] print "Comparing %s to %s, resolution %d x %d" % (ref_file, dist_file, width, height) vifp_value = vifp.vifp_mscale(ref, dist) print "VIFP=%f" % (vifp_value) ssim_value = ssim.ssim_exact(ref / 255, dist / 255) print "SSIM=%f" % (ssim_value) # FIXME this is buggy, disable for now # ssim_value2 = ssim.ssim(ref/255, dist/255) # print "SSIM approx=%f" % (ssim_value2) psnr_value = psnr.psnr(ref, dist) print "PSNR=%f" % (psnr_value) # niqe_value = niqe.niqe(dist/255) # print "NIQE=%f" % (niqe_value) reco_value = reco.reco(ref / 255, dist / 255) print "RECO=%f" % (reco_value)
_weight = weight(i, j, i + x, j + y) * _flag tem_weight += _weight _weight = _weight.repeat(3).reshape(N1, N1, 3) # 扩充成三维 tem_img += _n_img[i + x:i + N1 + x, j + y:j + N1 + y, :] * _weight tem_weight += (tem_weight == 0) # 避免除以零 tem_weight = tem_weight.repeat(3).reshape(N1, N1, 3) # 扩充成三维 final_img[i:i + N1, j:j + N1:, :] += tem_img / tem_weight num_img[i:i + N1, j:j + N1:, :] += tem_weight > 0 num_img += (num_img == 0) # 避免除以零 final_img = final_img / num_img # 每个像素点都进行了多次估计,直接平均 final_img = final_img[blank:H + blank, blank:W + blank, :] + n_img outFileName = 'result/NLM/' + noiseFile[:-4] + '_' + str(N1) + '_' + str( N2) + '_' + str(sigma) + '_' + str(h) + noiseFile[-4:] cv2.imwrite(outFileName, final_img) final_img = cv2.imread(outFileName) print("\nP=%d B=%d sigma=%d h = %f" % (N1, N2, sigma, h), psnr(img, final_img)) end = time.clock() print('time:%d' % (end - start)) # The results # noiseFile = 'Lena_RS1_0.5.png' # P=9 B=15 sigma=0 h = 1.000000 [33.802286078114356, 34.37252084511732, 37.22837467495257, 35.13439386606141] time:569 # # noiseFile = 'Lena_RS1_0.3.png' # P=15 B=21 sigma=0 h = 1.000000 [31.073116649651563, 31.0775574382444, 33.684435010928624, 31.945036366274863] time:785
def testFromH5Folder(input_dataset_dir, output_dataset_dir, upscale_factor, input_field_name, model_name, cuda_flag, if_save): h5_file_list = [x for x in listdir(input_dataset_dir) if is_h5_file(x)] h5_file_list.sort() h5_len = len(h5_file_list) model_PSNR = np.zeros(h5_len) model_SSIM = np.zeros(h5_len) bicubic_PSNR = np.zeros(h5_len) bicubic_SSIM = np.zeros(h5_len) model_time = np.zeros(h5_len) # Load SR Model model = Net_SRCNN(upscale_factor=UPSCALE_FACTOR) model = torch.load(MODEL_NAME) if torch.cuda.is_available() & cuda_flag: model = model.cuda() else: model = model.cpu() if not os.path.exists(output_dataset_dir): os.makedirs(output_dataset_dir) for idx in tqdm(range(h5_len), desc='SR of upscale factor = ' + str(upscale_factor)): h5_file_name = h5_file_list[idx] h5_file = h5py.File(input_dataset_dir + '/' + h5_file_name, 'r') img_HR_y = h5_file['HR'].value img_LR_y = h5_file['LR'].value img_LR_bic_y = h5_file['LR_bic_y'].value img_LR_bic_cb = h5_file['LR_bic_cb'].value img_LR_bic_cr = h5_file['LR_bic_cr'].value img_HR_RGB = h5_file['HR_RGB'].value img_HR_y = img_HR_y.astype(np.float32) img_LR_y = img_LR_y.astype(np.float32) img_LR_bic_y = img_LR_bic_y.astype(np.float32) img_LR_bic_cb = img_LR_bic_cb.astype(np.float32) img_LR_bic_cr = img_LR_bic_cr.astype(np.float32) img_HR_RGB = img_HR_RGB.astype(np.float32) img_HR_y = img_HR_y / 255.0 img_LR_y = img_LR_y / 255.0 img_HR_RGB = img_HR_RGB / 255.0 if input_field_name == 'LR': img_LR_4d = img_LR_y.reshape(1, 1, img_LR_y.shape[0], img_LR_y.shape[1]) elif input_field_name == 'LR_bic_y': img_LR_4d = img_LR_bic_y.reshape(1, 1, img_LR_bic_y.shape[0], img_LR_bic_y.shape[1]) image = Variable(torch.from_numpy(img_LR_4d)) if torch.cuda.is_available() & cuda_flag: image = image.cuda() start = time.time() target = model(image) end = time.time() target = target.cpu() img_HR_y_net = target.data[0][0].numpy() if if_save: img_HR_ycbcr_net = np.zeros(img_HR_RGB.shape) img_HR_ycbcr_net[:,:,0] = img_HR_y_net img_HR_ycbcr_net[:,:,1] = img_LR_bic_cb img_HR_ycbcr_net[:,:,2] = img_LR_bic_cr img_HR_ycbcr_net = img_HR_ycbcr_net.clip(0.0, 1.0) img_HR_RGB_net = ycbcr2rgb(img_HR_ycbcr_net) img_HR_RGB_net = img_HR_RGB_net.clip(0.0, 1.0) img_HR_RGB_net *= 255.0 img_HR_BGR_net = rgb2bgr(img_HR_RGB_net) image_name = 'img_' + str(idx) + '_net.png' cv2.imwrite(output_dataset_dir + '/' + image_name, img_HR_BGR_net.astype(np.uint8)) # Compute Stat model_PSNR[idx] = psnr((img_HR_y*255.0).astype(int), (img_HR_y_net*255.0).astype(int)) model_SSIM[idx] = ssim((img_HR_y*255.0).astype(int), (img_HR_y_net*255.0).astype(int)) bicubic_PSNR[idx] = psnr((img_HR_y*255.0).astype(int), (img_LR_bic_y*255.0).astype(int)) bicubic_SSIM[idx] = ssim((img_HR_y*255.0).astype(int), (img_LR_bic_y*255.0).astype(int)) model_time[idx] = (end-start) print("===> Test on" + input_dataset_dir +" Complete: Model PSNR: {:.4f} dB, Model SSIM: {:.4f} , Bicubic PSNR: {:.4f} dB, Bicubic SSIM: {:.4f} , Average time: {:.4f}" .format(np.average(model_PSNR), np.average(model_SSIM), np.average(bicubic_PSNR), np.average(bicubic_SSIM), np.average(model_time)*1000))
int(opt.imageSize / 4):int(opt.imageSize / 4 + opt.imageSize / 2)] = fake.data vutils.save_image(real_cpu, 'val_real_samples.png', normalize=True) vutils.save_image(input_cropped.data, 'val_cropped_samples.png', normalize=True) vutils.save_image(recon_image.data, 'val_recon_samples.png', normalize=True) p = 0 l1 = 0 l2 = 0 fake = fake.data.numpy() real_center = real_center.data.numpy() from psnr import psnr import numpy as np t = real_center - fake l2 = np.mean(np.square(t)) l1 = np.mean(np.abs(t)) real_center = (real_center + 1) * 127.5 fake = (fake + 1) * 127.5 for i in range(opt.batchSize): p = p + psnr(real_center[i].transpose(1, 2, 0), fake[i].transpose(1, 2, 0)) print(l2) print(l1) print(p / opt.batchSize)
#loading the image mat_f = loadmat(file_name) true = mat_f['img'] #original image img = mat_f['imm256'] #Cai's processed Image M, N = img.shape img = img.reshape(1, M, N, 1) model_file = 'sigma' + str( sigma) + 'p' + pp + '.mat' #name of the model file to be used load_from = os.path.join('models', model_file) session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) sess = tf.Session(config=session_conf) cnn = denoiser_net(is_load=True, filename=load_from) global_step = tf.Variable(0, name="global_step", trainable=False) sess.run(tf.global_variables_initializer()) out, _ = sess.run([cnn.out, global_step], feed_dict={cnn.input_x: img}) out = out.reshape(M / 2, N / 2) out[out > 1] = 1 out[out < 0] = 0 mat_f['cnn'] = out savemat(file_name, mat_f) #mat_f now contains the cnn denoised image as well. print("PSNR: ") print(psnr(out, true, 1.0))
args = parse_args() pred_imgs_path = os.path.join(args.output_path, args.model_flag + '_' + args.exp_num) imgs_list_file = '/home/chenli/code/SRN-Deblur/test_datalist.txt' imgs_datalist = open(imgs_list_file, 'rt').read().splitlines() imgs_datalist = list(map(lambda x: x.split(' '), imgs_datalist)) imgsName = [x[1] for x in imgs_datalist] psnr_values = [] ssim_values = [] for imgName in imgsName: real = scipy.misc.imread(imgName, flatten=True).astype(numpy.float32) split_name = imgName.split('/') pred_imgName = os.path.join(pred_imgs_path, split_name[-3], split_name[-2], split_name[-1]) pred = scipy.misc.imread(pred_imgName, flatten=True).astype(numpy.float32) ssim_value = ssim.ssim_exact(real / 255, pred / 255) psnr_value = psnr.psnr(real, pred) print('psnr:%.5f ssim:%.5f' % (psnr_value, ssim_value)) ssim_values.append(ssim_value) psnr_values.append(psnr_value) final_psnr_value = numpy.mean(psnr_values) final_ssim_value = numpy.mean(ssim_values) print('final psnr:%.5f final ssim:%.5f' % (final_psnr_value, final_ssim_value))
def main(): ''' 训练时时并行的,测试时也应当并行,不然会报告如下的错误: Missing key(s) in state_dict: ...(如:conv1.weight) ''' print('testing processing....') #加载模型 test_model = VRCNN(opt.upscale_factor) test_model = torch.nn.DataParallel(test_model,device_ids=gpus_list,output_device=gpus_list[1]) test_model = test_model.cuda(gpus_list[0]) print('---------- Networks architecture -------------') print_network(test_model) print('----------------------------------------------') #加载预训练模型 model_name = os.path.join(opt.model_save_folder,opt.exp_name,opt.test_model) print('model_name=',model_name) if os.path.exists(model_name): pretrained_dict=torch.load(model_name,map_location=lambda storage, loc: storage) model_dict=test_model.state_dict() # 1. filter out unnecessary keys pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} # 2. overwrite entries in the existing state dict model_dict.update(pretrained_dict) test_model.load_state_dict(model_dict) print('Pre-trained SR model is loaded.') if not os.path.exists(opt.pre_result): os.mkdir(opt.pre_result) with open(opt.train_log + '/psnr_ssim-xr-200.txt', 'a') as psnr_ssim: with torch.no_grad(): ave_psnr = 0 ave_ssim = 0 single_ave_psnr = 0 single_ave_ssim = 0 numb = 2 valSet = ValidationsetLoader(opt.val_dataset_hr,opt.val_dataset_lr) valLoader = DataLoader(dataset=valSet,batch_size=opt.test_val_batchSize,shuffle=False) val_bar = tqdm(valLoader) for data in val_bar: test_model.eval() # dual_net.eval() batch_lr_y, label, SR_cb,SR_cr,idx,bicubic_restore = data batch_lr_y,label = Variable(batch_lr_y).cuda(gpus_list[0]), Variable(label).cuda(gpus_list[0]) output = test_model(batch_lr_y) SR_ycbcr = np.concatenate((np.array(output.squeeze(0).data.cpu()), SR_cb, SR_cr), axis=0).transpose(1,2,0) SR_rgb = ycbcr2rgb(SR_ycbcr) * 255.0 SR_rgb = np.clip(SR_rgb, 0, 255) SR_rgb = ToPILImage()(SR_rgb.astype(np.uint8)) #ToTensor() ---image(0-255)==>image(0-1), (H,W,C)==>(C,H,W) SR_rgb = ToTensor()(SR_rgb) #将给定的Tensor保存成image文件。如果给定的是mini-batch tensor,那就用make-grid做成雪碧图,再保存。与utils.make_grid()配套使用 if not os.path.exists(opt.pre_result+'/'+opt.exp_name): os.mkdir(opt.pre_result+'/'+opt.exp_name) utils.save_image(SR_rgb, opt.pre_result+'/' +opt.exp_name +'/' + 'my'+str(numb).rjust(3,'0')+'.png') numb = numb + 1 psnr_value = psnr(np.array(torch.squeeze(label).data.cpu())*255,np.array(torch.squeeze(output).data.cpu())*255) ave_psnr = ave_psnr + psnr_value single_ave_psnr = single_ave_psnr + psnr_value ssim_value = calculate_ssim(np.array(torch.squeeze(label).data.cpu())*255,np.array(torch.squeeze(output).data.cpu())*255) ave_ssim = ave_ssim + ssim_value single_ave_ssim = single_ave_ssim + ssim_value val_bar.set_description('===>{}th video {}th frame, wsPSNR:{:.4f} dB,wsSSIM:{:.6f}'.format(idx // 98 + 1,idx % 98 + 1,psnr_value,ssim_value)) if idx == 293 or idx == 97 or idx == 195 or idx == 391: print("===> {}th video Avg. wsPSNR: {:.4f} dB".format(idx // 98+1,single_ave_psnr / 98)) print("===> {}th video Avg. wsSSIM: {:.6f}".format(idx // 98+1,single_ave_ssim / 98)) psnr_ssim.write('===>{}th video avg wsPSNR:{:.4f} dB,wsSSIM:{:.6f}\n'.format(idx // 98+1,single_ave_psnr / 98,single_ave_ssim / 98)) single_ave_psnr = 0 single_ave_ssim = 0 print("===> All Avg. wsPSNR: {:.4f} dB".format(ave_psnr / len(valLoader))) print("===> ALL Avg. wsSSIM: {:.6f}".format(ave_ssim / len(valLoader))) psnr_ssim.write('===>all videos avg wsPSNR:{:.4f} dB,wsSSIM:{:.6f}\n'.format(ave_psnr / len(valLoader),ave_ssim / len(valLoader))) print('testing finished!')
dist_file_dir = os.path.join(dist_path, dist_file) #参考文件绝对路径 if os.path.isdir(dist_file_dir): #如果是文件夹,跳过 continue dist_img = scipy.misc.imread(dist_file_dir, flatten=True).astype(numpy.float32) #filename = os.path.splitext(dist_file)[0] + os.path.splitext(dist_file)[1] #ref filename filename = os.path.splitext(dist_file)[0] + '.png' #ref filename ref_img = scipy.misc.imread(ref_path + '\\' + filename, flatten=True).astype( numpy.float32) #读取参考图像对应的测试图像 psnr_data = psnr.psnr(ref_img, dist_img) #psnr指标 ssim_data = ssim.ssim_exact(ref_img / 255, dist_img / 255) #ssim指标 dist_img = cv2.imread(dist_path + '\\' + os.path.splitext(dist_file)[0] + '.png') #UIQM指标 uiqm_data = uqim_utils.getUIQM(dist_img) #UIQM print("img:" + str(filename) + " psnr:" + str(psnr_data) + " ssim:" + str(ssim_data) + " UIQM:" + str(uiqm_data)) data = str(filename) + " psnr:" + str(psnr_data) + " ssim:" + str( ssim_data) + " UIQM:" + str(uiqm_data) psnr_list.append(psnr_data) ssim_list.append(ssim_data) uiqm_list.append(uiqm_data)
ori_shape = ref_img.shape ori_size = 1 for dim in ori_shape: ori_size = ori_size * dim ori_size = int(ori_size / 1024) print cps_path_list for cps_path in cps_path_list: file_size = os.path.getsize(cps_path) cps_img = scipy.misc.imread(cps_path, flatten=False, mode='RGB').astype(numpy.float32) print cps_img.shape #exit() size_values.append(int(file_size / 1024)) ## kb # vifp_values.append( vifp.vifp_mscale(ref_img, cps_img) ) ssim_values.append(ssim.ssim_exact(ref_img / 255, cps_img / 255)) psnr_values.append(psnr.psnr(ref_img, cps_img)) # reco_values.append( reco.reco(ref_img/255, cps_img/255) ) # print size_values;exit() for i in range(len(size_values)): csv_writer.writerow output_row = [ os.path.basename(ref_path), os.path.basename(cps_path_list[i]), str(ori_shape), str(ori_size), str(size_values[i]), str(float(ori_size) / size_values[i]), str(psnr_values[i]), str(ssim_values[i]) ] csv_writer.writerow(output_row)
for j in range(col): if mask.mask[i][j] == 1: img_r[i][j] = img[i][j][0] img_g[i][j] = img[i][j][1] img_b[i][j] = img[i][j][2] cv2.imwrite(data_path.image_r_path, img_r) cv2.imwrite(data_path.image_g_path, img_g) cv2.imwrite(data_path.image_b_path, img_b) if __name__ == '__main__': color_to_grey.color_to_grey_1() bm3d_r() bm3d_g() bm3d_b() refresh() grey_to_color.grey_to_color() print("for iteration 1:") psnr.psnr() for i in range(2, iter_time + 1): Threshold_Hard3D = Threshold_Hard3D * 0.6 color_to_grey.color_to_grey_2() bm3d_r() bm3d_g() bm3d_b() refresh() grey_to_color.grey_to_color() print("for iteration %d:" % i) psnr.psnr()
weight_img[i: i + N1, j: j + N1:, :] += tem_weight > 0 # 记录每个像素点被估计的次数,最后平均 # 用凯撒窗的代码,对应上面两行 # final_img[i: i + N1, j: j + N1:, :] += tem_img / tem_weight * Kaiser # weight_img[i: i + N1, j: j + N1:, :] += Kaiser weight_img += (weight_img == 0) # 避免除以零 final_img = final_img / weight_img # 每个像素点都进行了多次估计,直接平均 final_img = final_img[blank: H + blank, blank: W + blank, :] _bias = np.repeat(f, 3).reshape(H, W, 3) * bias # 像素点rgb都等于零对应YCbCr等于(16, 128, 128),修正final_img final_img = yuv_to_bgr(final_img + _bias) + n_img outFileName = 'result/NLM/' + noiseFile[:-4] + '_' + str(N1) + '_' + str(N2) + '_' + str(sigma) + '_' + str( h) + noiseFile[-4:] cv2.imwrite(outFileName, final_img) final_img = cv2.imread(outFileName) print("\nP=%d B=%d sigma=%d h = %f" % (N1, N2, sigma, h), psnr(img, final_img)) end = time.clock() print('time:%d' % (end - start)) # The results # noiseFile = 'Lena_RS1_0.5.png' 用YUV三个通道计算权重,无凯撒窗 # P=9 B=15 sigma=0 h = 1.000000 [33.93627406850448, 34.510636345073365, 37.35516440584259, 35.26735827314015] time:675 # # noiseFile = 'Lena_RS1_0.3.png' # P=15 B=21 sigma=0 h = 1.000000 [31.195591935621792, 31.183651976211202, 33.775171616441405, 32.05147184275813]time:685 # # noiseFile = 'test.png'(150*150取自Lena.png) # 用RGB计算权重 # P=9 B=15 sigma=0 h = 1.000000 [31.411476278247996, 32.27519874157173, 35.13957722424206, 32.942084081353926] # 仅Y通道计算权重 # P=9 B=15 sigma=0 h = 1.000000 [31.325063582425457, 32.3117315982166, 35.16094564206194, 32.93258027423467]
targetRGB[1, :, :] = (targetRGB[1, :, :] * 0.5) + 0.5 targetRGB[2, :, :] = (targetRGB[2, :, :] * 0.5) + 0.5 imageRGB_rand_recon[imageRGB_rand_recon > 1] = 1 imageRGB_adap_recon[imageRGB_adap_recon > 1] = 1 corrupt_image_rand[corrupt_image_rand > 1] = 1 corrupt_image_adap[corrupt_image_adap > 1] = 1 imageRGB_rand_recon[imageRGB_rand_recon < 0] = 0 imageRGB_adap_recon[imageRGB_adap_recon < 0] = 0 corrupt_image_rand[corrupt_image_rand < 0] = 0 corrupt_image_adap[corrupt_image_adap < 0] = 0 # Compute Stat here netE_rand_PSNR[img_idx] = psnr( (targetRGB * 255.0).astype(int), (imageRGB_rand_recon * 255.0).astype(int)) netE_adap_PSNR[img_idx] = psnr( (targetRGB * 255.0).astype(int), (imageRGB_adap_recon * 255.0).astype(int)) netE_rand_MSE[img_idx] = mse((targetRGB * 255.0).astype(int), (imageRGB_rand_recon * 255.0).astype(int)) netE_adap_MSE[img_idx] = mse((targetRGB * 255.0).astype(int), (imageRGB_adap_recon * 255.0).astype(int)) rand_corrupt_PSNR[img_idx] = psnr( (targetRGB * 255.0).astype(int), (corrupt_image_rand * 255.0).astype(int)) adap_corrupt_PSNR[img_idx] = psnr( (targetRGB * 255.0).astype(int),
niqe_values = [] reco_values = [] plt.figure(figsize=(8, 8)) for quality in range(0, 101, 1): subprocess.check_call('gm convert %s -quality %d %s'%(ref_file, quality, dist_file), shell=True) file_size = os.path.getsize(dist_file) dist = scipy.misc.imread(dist_file, flatten=True).astype(numpy.float32) quality_values.append( quality ) size_values.append( int(file_size/1024) ) vifp_values.append( vifp.vifp_mscale(ref, dist) ) ssim_values.append( ssim.ssim_exact(ref/255, dist/255) ) psnr_values.append( psnr.psnr(ref, dist) ) # niqe_values.append( niqe.niqe(dist/255) ) reco_values.append( reco.reco(ref/255, dist/255) ) plt.plot(quality_values, vifp_values, label='VIFP') plt.plot(quality_values, ssim_values, label='SSIM') # plt.plot(niqe_values, label='NIQE') plt.plot(quality_values, reco_values, label='RECO') plt.plot(quality_values, numpy.asarray(psnr_values)/100.0, label='PSNR/100') plt.legend(loc='lower right') plt.xlabel('JPEG Quality') plt.ylabel('Metric') plt.savefig('jpg_demo_quality.png') plt.figure(figsize=(8, 8))