コード例 #1
0
def image_quality_evaluation(sr_filename: str, hr_filename: str, device: torch.device = "cpu"):
    """Image quality evaluation function.

    Args:
        sr_filename (str): Image file name after super resolution.
        hr_filename (str): Original high resolution image file name.
        device (optional, torch.device): Selection of data processing equipment in PyTorch. (Default: ``cpu``).

    Returns:
        If the `simple` variable is set to ``False`` return `mse, rmse, psnr, ssim, msssim, niqe, sam, vifp, lpips`,
        else return `psnr, ssim`.
    """
    # Reference sources from `https://github.com/richzhang/PerceptualSimilarity`
    lpips_loss = lpips.LPIPS(net="vgg", verbose=False).to(device)
    # Evaluate performance
    sr = cv2.imread(sr_filename)
    hr = cv2.imread(hr_filename)

    # For LPIPS evaluation
    sr_tensor = opencv2tensor(sr, device)
    hr_tensor = opencv2tensor(hr, device)

    # Complete estimate.
    mse_value = mse(sr, hr)
    rmse_value = rmse(sr, hr)
    psnr_value = psnr(sr, hr)
    ssim_value = ssim(sr, hr)
    msssim_value = msssim(sr, hr)
    niqe_value = niqe(sr_filename)
    sam_value = sam(sr, hr)
    vifp_value = vifp(sr, hr)
    lpips_value = lpips_loss(sr_tensor, hr_tensor)
    return mse_value, rmse_value, psnr_value, ssim_value, msssim_value, niqe_value, sam_value, vifp_value, lpips_value
コード例 #2
0
def evaluate(GT, P):

    score = {'rmse': 1e9, 'psnr': 0, 'ssim': 0, 'hough': 1e9}
    for ratio in np.arange(1.0, 1.3, 0.05):
        GT_enlarge = enlarge_and_crop(GT, ratio, 256)
        score['rmse'] = min(score['rmse'], rmse(GT_enlarge, P))
        score['psnr'] = max(score['psnr'], psnr(GT_enlarge, P))
        score['ssim'] = max(score['ssim'], ssim(GT_enlarge, P)[0])
        score['hough'] = min(score['hough'], Hough_score(GT_enlarge, P))
    print(score)
    return score
コード例 #3
0
def obtain_similarity_metrics(GT_img, distorted_img):
    # MEAN SQUARED ERROR
    mse_value = mse(GT_img, distorted_img)
    # STRUCTURAL SIMILARITY
    ssim_value = ssim(GT_img, distorted_img)
    # PEAK SIGNAL TO NOISE RATIO
    psnr_value = psnr(GT_img, distorted_img)
    # ROOT MEAN SQUARED ERROR
    rmse_value = rmse(GT_img, distorted_img)
    # VISUAL INFORMATION FIDELITY
    vif_value = vifp(GT_img, distorted_img)
    # UNIVERSAL IMAGE QUALITY INDEX
    uqi_value = uqi(GT_img, distorted_img)
    # MULTI-SCALE STRUCTURAL SIMILARITY INDEX
    msssim_value = msssim(GT_img, distorted_img)
    # PSNR-HVS-M  &  PSNR-HVS
    p_hvs_m, p_hvs = psnrhmam.color_psnrhma(GT_img, distorted_img)

    return mse_value, ssim_value, psnr_value, rmse_value, vif_value, uqi_value, msssim_value, p_hvs_m, p_hvs
コード例 #4
0
    sr = model(lr)
end_time = time.time()

vutils.save_image(lr, "lr.png")
vutils.save_image(sr, "sr.png")
vutils.save_image(hr, "hr.png")

# Evaluate performance
src_img = cv2.imread("sr.png")
dst_img = cv2.imread("hr.png")

# Reference sources from `https://github.com/richzhang/PerceptualSimilarity`
lpips_loss = lpips.LPIPS(net="vgg").to(device)

mse_value = mse(src_img, dst_img)
rmse_value = rmse(src_img, dst_img)
psnr_value = psnr(src_img, dst_img)
ssim_value = ssim(src_img, dst_img)
ms_ssim_value = msssim(src_img, dst_img)  # 30.00+000j
niqe_value = cal_niqe("sr.png")
sam_value = sam(src_img, dst_img)
vif_value = vifp(src_img, dst_img)
lpips_value = lpips_loss(sr, hr)

print("\n")
print("====================== Performance summary ======================")
print(
    f"MSE: {mse_value:.2f}\n"
    f"RMSE: {rmse_value:.2f}\n"
    f"PSNR: {psnr_value:.2f}\n"
    f"SSIM: {ssim_value[0]:.4f}\n"
	:returns:  float -- rase value.
	"""
RASE_img = full_ref.rase(ref_img, img, ws=8)
#print("RASE: relative average spectral error = ", RASE_img)


######################################################################
#RMSE
"""calculates root mean squared error (rmse).

	:param GT: first (original) input image.
	:param P: second (deformed) input image.

	:returns:  float -- rmse value.
	"""
rmse_img = full_ref.rmse(ref_img, img)
print("RMSE: root mean squared error = ", rmse_img)



######################################################################
#root mean squared error (rmse) using sliding window
"""calculates root mean squared error (rmse) using sliding window.

	:param GT: first (original) input image.
	:param P: second (deformed) input image.
	:param ws: sliding window size (default = 8).

	:returns:  tuple -- rmse value,rmse map.	
	"""
rmse_sw_img = full_ref.rmse_sw(ref_img, img, ws=8)
コード例 #6
0
def main(sample):
    input_file_name = sys.argv[1]
    original_file_object = cv2.imread(input_file_name)
    original_file_size = original_file_object.shape[
        0] * original_file_object.shape[1] * original_file_object.shape[2]

    converted_image = cv2.cvtColor(original_file_object, cv2.COLOR_BGR2YCR_CB)
    y, cr_original, cb_original = cv2.split(converted_image)

    steps = FetchSteps(sample)

    # cr_new=np.zeros((a,b))

    cr_new = cr_original[::steps[0], ::steps[1]]
    cb_new = cb_original[::steps[0], ::steps[1]]

    # for

    compressed_file_size = y.shape[0] * y.shape[1] + cb_new.shape[
        0] * cb_new.shape[1] + cr_new.shape[0] * cr_new.shape[1]

    cr_de = np.repeat(cr_new, steps[0], axis=0)
    cr_de = np.repeat(cr_de, steps[1], axis=1)

    cb_de = np.repeat(cb_new, steps[0], axis=0)
    cb_de = np.repeat(cb_de, steps[1], axis=1)

    while (y.shape[0] != cr_de.shape[0]):
        cr_de = np.delete(cr_de, cr_de.shape[0] - 1, 0)

    while (y.shape[0] != cb_de.shape[0]):
        cb_de = np.delete(cb_de, cb_de.shape[0] - 1, 0)

    while (cb_original.shape[1] != cb_de.shape[1]):
        cb_de = np.delete(cb_de, cb_de.shape[0] - 1, 1)

    while (cr_original.shape[1] != cr_de.shape[1]):
        cr_de = np.delete(cr_de, cr_de.shape[0] - 1, 1)

    # print(y.shape)
    # print(cb_original.shape)
    # print(cr_original.shape)
    # print(cb_de.shape)
    # print(cr_de.shape)

    combined = np.dstack((y, cr_de, cb_de))
    decoded = cv2.cvtColor(combined, cv2.COLOR_YCR_CB2BGR)
    difference = np.dstack((y - y, cr_original - cr_de, cb_original - cb_de))

    # file1=input_file_name.split[0]
    # print(type(input_file_name))
    file2 = input_file_name.split('.')[1]
    a = sample[0]
    b = sample[1]
    c = sample[2]
    add = str(a) + str(b) + str(c)

    cv2.imwrite('./output/decoded' + add + '.' + file2, decoded)
    cv2.imwrite('./output/decoded_diff_' + add + '.' + file2, difference)
    ratio = (original_file_size) / compressed_file_size
    print("Compression Ratio: " + str(ratio))
    # print("psnr")
    print("PSNR: " + str(psnr(combined, converted_image)))
    print("MSE: " + str(mse(combined, converted_image)))
    print("RMSE: " + str(rmse(combined, converted_image)))
コード例 #7
0
ファイル: model_evaluate.py プロジェクト: Luoyi3819/ClawGAN
    #save

    os.makedirs("test_data/fake_visible", exist_ok = True)
    cv2.imwrite("test_data/fake_visible/" + vis_path , fake_A[0][:,:,::-1] * 255)

    totol_metric_dict_matched = {"mse":0.0,"rmse":0.0,"uqi":0.0,"ssim":0.0,"psnr":0.0,"psnrb":0.0,"vifp":0.0}  #参数指标

true_path = "test_data/visible"
fake_path = "test_data/fake_visiblee"

lenth = len(os.listdir(true_path))

for true_name,fake_name in zip(os.listdir(true_path),os.listdir(fake_path)):
	true = cv2.imread(os.path.join(true_path,true_name))
	fake = cv2.imread(os.path.join(fake_path,fake_name))

	metric_dict_matched = {"mse":mse(fake,true),"rmse":rmse(fake,true),"uqi":uqi(fake,true),"ssim":ssim(fake,true)[0] \
	   				,"psnr":psnr(fake,true),"psnrb":psnrb(fake,true),"vifp":vifp(fake,true)}
	for key,value in metric_dict_matched.items():
		totol_metric_dict_matched[key] = totol_metric_dict_matched[key]+value

for key,value in totol_metric_dict_matched.items():
	totol_metric_dict_matched[key] /= lenth
print(totol_metric_dict_matched)
#path = ["train_data/" + method + "_infrared","train_data/" + method + "_visible"]
path = [true_path,fake_path]
fid_value = fid.calculate_fid_given_paths(path, inception_path = None, low_profile=False)
print("FID: ", fid_value)  
print("done")
コード例 #8
0
ファイル: test.py プロジェクト: updating00/FSRCNN-PyTorch-1
    out_image_y = out_image_y.clip(0, 255)
    out_image_y = Image.fromarray(np.uint8(out_image_y[0]), mode="L")

    out_img_cb = cb.resize(out_image_y.size, Image.BICUBIC)
    out_img_cr = cr.resize(out_image_y.size, Image.BICUBIC)
    out_img = Image.merge("YCbCr",
                          [out_image_y, out_img_cb, out_img_cr]).convert("RGB")
    # before converting the result in RGB
    out_img.save(f"result/{filename}")

    # Evaluate performance
    src_img = cv2.imread(f"result/{filename}")
    dst_img = cv2.imread(f"{target}/{filename}")

    total_mse_value += mse(src_img, dst_img)
    total_rmse_value += rmse(src_img, dst_img)
    total_psnr_value += psnr(src_img, dst_img)
    total_ssim_value += ssim(src_img, dst_img)
    total_ms_ssim_value += msssim(src_img, dst_img)
    total_niqe_value += cal_niqe(f"result/{filename}")
    total_sam_value += sam(src_img, dst_img)
    total_vif_value += vifp(src_img, dst_img)

    total_file += 1

print(f"Avg MSE: {total_mse_value / total_file:.2f}\n"
      f"Avg RMSE: {total_rmse_value / total_file:.2f}\n"
      f"Avg PSNR: {total_psnr_value / total_file:.2f}\n"
      f"Avg SSIM: {total_ssim_value / total_file:.4f}\n"
      f"Avg MS-SSIM: {total_ms_ssim_value / total_file:.4f}\n"
      f"Avg NIQE: {total_niqe_value / total_file:.2f}\n"
コード例 #9
0
def main():

    prepare()
    file = sys.argv[1]
    file = cv2.imread(file)
    file = cv2.cvtColor(file, cv2.COLOR_BGR2RGB)
    original_file_size = file.shape[0] * file.shape[1] * file.shape[2]

    r, g, b = cv2.split(file)

    symb2freq_r = defaultdict(int)
    symb2freq_g = defaultdict(int)
    symb2freq_b = defaultdict(int)
    dic_r = {}
    dic_g = {}
    dic_b = {}
    dic_r_rev = {}
    dic_g_rev = {}
    dic_b_rev = {}
    # np_r=np.zeros(256)
    # np_b=np.zeros(256)
    # np_g=np.zeros(256)

    for x in r:
        for y in x:
            symb2freq_r[y] += 1

    huff_r = HuffmanEncoder(symb2freq_r)
    # print(r)
    exp_len_r = 0
    # entropy_r=0
    # print("Symbol\tFrequency\tProbability\tHuffman Code")
    for p in huff_r:
        # print ("%s\t%s\t\t%s\t\t%s" % (p[0], symb2freq_r[p[0]],symb2freq_r[p[0]]/(r.shape[0]*r.shape[1]),len(p[1])))
        dic_r[p[0]] = p[1]
        dic_r_rev[p[1]] = p[0]

        # prob=(symb2freq_r[p[0]]/(r.shape[0]*r.shape[1]))
        # exp_len_r=exp_len_r+prob*len(p[1])
        # prob_inv=1/prob
        # entropy=entropy+(prob*math.log((prob_inv),2))
        # print((symb2freq_r[p[0]]/(r.shape[0]*r.shape[1])))
        # print(len(p[1]))
        # print(entropy)
        # print(prob_inv)
        # print(math.log((prob_inv),2))

        # np_r[p[0]]=symb2freq_r[p[0]]

    # print("Expected Length: "+str(exp_len_r))
    # print("Entropy: "+str(entropy))

    for x in g:
        for y in x:
            symb2freq_g[y] += 1

    huff_g = HuffmanEncoder(symb2freq_g)
    # print("Symbol\tWeight\tHuffman Code")
    for p in huff_g:
        # print ("%s\t%s\t%s" % (p[0], symb2freq_g[p[0]], p[1]))
        dic_g[p[0]] = p[1]
        dic_g_rev[p[1]] = p[0]

        # np_g[p[0]]=symb2freq_g[p[0]]

    for x in b:
        for y in x:
            symb2freq_b[y] += 1

    huff_b = HuffmanEncoder(symb2freq_b)
    # print("Symbol\tWeight\tHuffman Code")
    for p in huff_b:
        # print ("%s\t%s\t%s" % (p[0], symb2freq_b[p[0]], p[1]))
        dic_b[p[0]] = p[1]
        dic_b_rev[p[1]] = p[0]

        # np_b[p[0]]=symb2freq_b[p[0]]

    # print(len(dic_b))

    file_r = open("./output/r.txt", "w+")
    file_g = open("./output/g.txt", "w+")
    file_b = open("./output/b.txt", "w+")
    total = 0

    for x in r:
        for y in x:
            total = total + len(dic_r[y])
            # print(dic_r[y])
            file_r.write(dic_r[y])
            file_r.write('\n')

            # print(total)

    for x in g:
        for y in x:
            # file_r.write(str(y)+" ")
            total = total + len(dic_g[y])
            file_g.write(dic_g[y])
            file_g.write('\n')

    for x in b:
        for y in x:
            # file_r.write(str(y)+" ")
            total = total + len(dic_b[y])
            file_b.write(dic_b[y])
            file_b.write('\n')

    file_r.close()
    file_b.close()
    file_g.close()

    file_r = open("./output/r.txt", "r")
    file_g = open("./output/g.txt", "r")
    file_b = open("./output/b.txt", "r")

    red = np.zeros((r.shape[0], r.shape[1]), dtype=np.uint8)
    blue = np.zeros((b.shape[0], b.shape[1]), dtype=np.uint8)
    green = np.zeros((g.shape[0], g.shape[1]), dtype=np.uint8)

    i = 0
    j = 0

    for line in file_r:
        line = line.rstrip()
        # line=line.split()[1]
        red[i][j] = np.uint8(dic_r_rev[line])
        # print(str(i)+" "+str(j)+" "+line+" "+str(dic_r_rev[line]))
        if (j == r.shape[1] - 1):
            i = i + 1
            j = 0
        else:
            j = j + 1

    i = 0
    j = 0

    for line in file_g:
        line = line.rstrip()
        # line=line.split()[1]
        green[i][j] = np.uint8(dic_g_rev[line])
        # print(str(i)+" "+str(j)+" "+line+" "+str(dic_r_rev[line]))
        if (j == g.shape[1] - 1):
            i = i + 1
            j = 0
        else:
            j = j + 1

    i = 0
    j = 0

    for line in file_b:
        line = line.rstrip()
        # line=line.split()[1]
        blue[i][j] = np.uint8(dic_b_rev[line])
        # print(str(i)+" "+str(j)+" "+line+" "+str(dic_r_rev[line]))
        if (j == b.shape[1] - 1):
            i = i + 1
            j = 0
        else:
            j = j + 1

    combined = np.dstack((red, green, blue))

    print("PSNR: " + str(psnr(combined, file)))
    print("MSE: " + str(mse(combined, file)))
    print("RMSE: " + str(rmse(combined, file)))
    ratio = (original_file_size * 8) / total
    print("Compression Ratio: " + str(ratio))

    combined = cv2.cvtColor(combined, cv2.COLOR_RGB2BGR)
    cv2.imwrite('./output/combined.png', combined)

    combined_d = np.dstack((r - red, g - green, b - blue))
    # print(np.min(r-red))
    # print(np.max(r-red))
    # print(np.min(g-green)
    # print(b-blue)

    combined_d = cv2.cvtColor(combined_d, cv2.COLOR_RGB2BGR)
    cv2.imwrite('./output/dif.png', combined_d)
コード例 #10
0
def main():
	k=10

	if(k<=50):
		k=50/k
	elif k==100:
		k=2-(99/50)
	else:
		k=2-(k/50)

	original_file_name=sys.argv[1]
	original_file_object=cv2.imread(original_file_name)
	original_file_object=cv2.cvtColor(original_file_object, cv2.COLOR_BGR2YCR_CB)
	
	x=original_file_object.shape[0]
	y=original_file_object.shape[1]
	if(x>8):
		x=int(x/8)
		x=x*8
	if(y>8):
		y=int(y/8)
		y=y*8

	original_file_object=cv2.resize(original_file_object,(y,x))

	y,cr,cb=cv2.split(original_file_object)

	y_dct=np.zeros(y.shape,dtype=np.int)
	cr_dct=np.zeros(cr.shape,dtype=np.int)
	cb_dct=np.zeros(cb.shape,dtype=np.int)
	y_idct=np.zeros(y.shape,dtype=np.uint8)
	cr_idct=np.zeros(cr.shape,dtype=np.uint8)
	cb_idct=np.zeros(cb.shape,dtype=np.uint8)
	# print(cr_new.shape)

	for i in range(0,cb.shape[0],8):
		for j in range(0,cb.shape[1],8):
			# print(str(i)+" "+str(j))

			# if(cb.shape[0]-i<0 or cb.shape[1]-j<0)
				# break

			cr_dct[i:i+8,j:j+8]=transform(cr[i:i+8,j:j+8],k,1)
			cb_dct[i:i+8,j:j+8]=transform(cb[i:i+8,j:j+8],k,1)
			y_dct[i:i+8,j:j+8]=transform(y[i:i+8,j:j+8],k,0)

			# print(y[i:i+8,j:j+8])
			# print(y_dct[i:i+8,j:j+8])
			# print("yes")

			cr_idct[i:i+8,j:j+8]=itransform(cr_dct[i:i+8,j:j+8],k,1)
			cb_idct[i:i+8,j:j+8]=itransform(cb_dct[i:i+8,j:j+8],k,1)
			y_idct[i:i+8,j:j+8]=itransform(y_dct[i:i+8,j:j+8],k,0)

	# print(cb_dct)

	# for i in range(0,cb.shape[0],8):
	# 	for j in range(0,cb.shape[1],8):
	# 		# print(str(i)+" "+str(j))
	# 		cr_idct[i:i+8,j:j+8]=itransform(cr_dct[i:i+8,j:j+8],k,1)
	# 		cb_idct[i:i+8,j:j+8]=itransform(cb_dct[i:i+8,j:j+8],k,1)
	# 		y_idct[i:i+8,j:j+8]=itransform(y_dct[i:i+8,j:j+8],k,0)

			# print(cr_idct[i:i+8,j:j+8])
			# print(cr[i:i+8,j:j+8]-cr_idct[i:i+8,j:j+8])
	# print(y)
	# print(y-y_idct)
	# print(cr)
	# print(cr-cr_idct)
	# print(cb)
	# print(cb_idct)
	# co

	# print(y)
	# print(y_idct)
	# print(cr)
	# print(cr_idct)
	# print(cb)
	# print(cb_idct)

	# print(y_dct)
	# print(zigzag(y_dct))
	# print(runs(zigzag(y_dct)))
	# print(y.shape[0]*y.shape[1])
	# print(len(runs(zigzag(y_dct))))

	# print(cr_dct)
	# print(zigzag(cr_dct))
	# print(runs(zigzag(cr_dct)))
	# print(len(runs(zigzag(cr_dct))))

	# print(cb_dct)
	# print(zigzag(cb_dct))
	# print(runs(zigzag(cb_dct)))
	# print(len(runs(zigzag(cb_dct))))

	original_size=y.shape[0]*y.shape[1]*3*8
	run_y=runs(zigzag(y_dct))
	run_cb=runs(zigzag(cb_dct))
	run_cr=runs(zigzag(cr_dct))

	total_y=HuffmanKernel(run_y)
	total_cb=HuffmanKernel(run_cb)
	total_cr=HuffmanKernel(run_cr)

	new_file=total_y+total_cb+total_cr
	# print("After Run Length: ",end="")
	# print((original_size/8)/(run_y[1]+run_cb[1]+run_cr[1]))
	# print("After Huffman: ",end="")
	# print(original_size/new_file)
	# print(original_size)
	# print(new_file)


	combined = np.dstack((y_idct, cr_idct,cb_idct))
	decoded = cv2.cvtColor(combined,cv2.COLOR_YCR_CB2BGR)
	# decoded=combined
	# print(original_file_object.shape)
	# print(decoded.shape)

	print("Compression Ratio after runlength: "+str((original_size/8)/(run_y[1]+run_cb[1]+run_cr[1])))
	print("Compression Ratio after huffman: "+str(original_size/new_file))
	print("PSNR: "+str(psnr(combined,original_file_object)))
	print("MSE: "+str(mse(combined,original_file_object)))
	print("RMSE: "+str(rmse(combined,original_file_object)))

	# f=open("./output/res.txt",'w')
	# f.write(str((original_size/8)/(run_y[1]+run_cb[1]+run_cr[1])))
	# f.write('\n')
	# f.write(str(original_size/new_file))
	# f.write('\n')
	# f.write(str(psnr(combined,original_file_object)))
	# f.write('\n')
	# f.write(str(mse(combined,original_file_object)))
	# f.write('\n')
	# f.write(str(rmse(combined,original_file_object)))
	# f.close()

	file2=original_file_name.split('.')[1]
	cv2.imwrite('./output/decoded'+'.'+file2,decoded)
	cv2.imwrite('./output/diff'+'.'+file2,original_file_object-combined)