示例#1
0
def image_quality_evaluation(sr_filename: str, hr_filename: str, device: torch.device = "cpu"):
    """Image quality evaluation function.

    Args:
        sr_filename (str): Image file name after super resolution.
        hr_filename (str): Original high resolution image file name.
        device (optional, torch.device): Selection of data processing equipment in PyTorch. (Default: ``cpu``).

    Returns:
        If the `simple` variable is set to ``False`` return `mse, rmse, psnr, ssim, msssim, niqe, sam, vifp, lpips`,
        else return `psnr, ssim`.
    """
    # Reference sources from `https://github.com/richzhang/PerceptualSimilarity`
    lpips_loss = lpips.LPIPS(net="vgg", verbose=False).to(device)
    # Evaluate performance
    sr = cv2.imread(sr_filename)
    hr = cv2.imread(hr_filename)

    # For LPIPS evaluation
    sr_tensor = opencv2tensor(sr, device)
    hr_tensor = opencv2tensor(hr, device)

    # Complete estimate.
    mse_value = mse(sr, hr)
    rmse_value = rmse(sr, hr)
    psnr_value = psnr(sr, hr)
    ssim_value = ssim(sr, hr)
    msssim_value = msssim(sr, hr)
    niqe_value = niqe(sr_filename)
    sam_value = sam(sr, hr)
    vifp_value = vifp(sr, hr)
    lpips_value = lpips_loss(sr_tensor, hr_tensor)
    return mse_value, rmse_value, psnr_value, ssim_value, msssim_value, niqe_value, sam_value, vifp_value, lpips_value
示例#2
0
def get_metrics(GT, reconstructed, mode=False):
    if mode:
        pass
    _psnr = psnr(np.array(GT), np.array(reconstructed))
    _ssim = ssim(np.array(GT), np.array(reconstructed))
    #_niqe = niqe(np.array(reconstructed))
    return _psnr, _ssim[0], 0
示例#3
0
def evaluate(GT, P):

    score = {'rmse': 1e9, 'psnr': 0, 'ssim': 0, 'hough': 1e9}
    for ratio in np.arange(1.0, 1.3, 0.05):
        GT_enlarge = enlarge_and_crop(GT, ratio, 256)
        score['rmse'] = min(score['rmse'], rmse(GT_enlarge, P))
        score['psnr'] = max(score['psnr'], psnr(GT_enlarge, P))
        score['ssim'] = max(score['ssim'], ssim(GT_enlarge, P)[0])
        score['hough'] = min(score['hough'], Hough_score(GT_enlarge, P))
    print(score)
    return score
示例#4
0
def get_metrics(compression_model: BaseModel, original_image: np.ndarray) -> Dict[str, float]:
    compressed_image = compression_model.compress(original_image)
    decompressed_image = compression_model.decompress(compressed_image)
    return {
        "mse": mse(original_image, decompressed_image),
        "ssim": ssim(original_image, decompressed_image)[0],
        "vif-p": vifp(original_image, decompressed_image),
        "psnr-b": psnrb(original_image, decompressed_image),
        "psnr": psnr(original_image, decompressed_image),
        "original_image_size": asizeof.asizeof(original_image),
        "compressed_image_size": asizeof.asizeof(compressed_image),
        "compression_ratio": asizeof.asizeof(compressed_image) / asizeof.asizeof(original_image)
    }
def plot_predict(low_reso_imgs, high_reso_imgs, srgan_model, idx, n_imgs):
    plt.figure(figsize=(12, 12))
    plt.tight_layout()
    n_imgs = n_imgs
    for i in range(0, n_imgs * 3, 3):
        #idx = np.random.randint(0,low_reso_imgs.shape[0]-1)
        idx = idx
        plt.subplot(n_imgs, 3, i + 1)

        h_img = high_reso_imgs[idx]

        plt.imshow(h_img)
        plt.grid('off')
        plt.axis('off')
        #plt.title('Source')
        plt.title('Original')
        plt.subplot(n_imgs, 3, i + 2)
        plt.imshow(
            cv2.resize(low_reso_imgs[idx], (256, 256),
                       interpolation=cv2.INTER_LINEAR))
        plt.grid('off')
        plt.axis('off')
        #plt.title('X4 (bicubic)')
        plt.title('low resolution (bicubic)')
        img = srgan_model.generator.predict(
            np.expand_dims(low_reso_imgs[idx], axis=0) / 127.5 - 1)
        img_unnorm = (img + 1) * 127.5

        plt.subplot(n_imgs, 3, i + 3)
        plt.imshow(np.squeeze(img_unnorm, axis=0).astype(np.uint8))

        prd_val_psnr = psnr(h_img, img_unnorm[0])
        prd_val_ssim = ssim(h_img, img_unnorm[0])

        plt.title('SRGAN_result')
        plt.ylabel('PSNR/SSIM')
        plt.xlabel(str(prd_val_psnr) + '/' + str(prd_val_ssim))

        img_rgb = cv2.cvtColor(
            np.squeeze(img_unnorm, axis=0).astype(np.uint8), cv2.COLOR_BGR2RGB)
        cv2.imwrite('predict_img' + str(idx) + '.png', img_rgb)
        #plt.grid('off')
        #plt.axis('off')

    plt.savefig('predicted_' + str(idx) + '.png')
示例#6
0
def obtain_similarity_metrics(GT_img, distorted_img):
    # MEAN SQUARED ERROR
    mse_value = mse(GT_img, distorted_img)
    # STRUCTURAL SIMILARITY
    ssim_value = ssim(GT_img, distorted_img)
    # PEAK SIGNAL TO NOISE RATIO
    psnr_value = psnr(GT_img, distorted_img)
    # ROOT MEAN SQUARED ERROR
    rmse_value = rmse(GT_img, distorted_img)
    # VISUAL INFORMATION FIDELITY
    vif_value = vifp(GT_img, distorted_img)
    # UNIVERSAL IMAGE QUALITY INDEX
    uqi_value = uqi(GT_img, distorted_img)
    # MULTI-SCALE STRUCTURAL SIMILARITY INDEX
    msssim_value = msssim(GT_img, distorted_img)
    # PSNR-HVS-M  &  PSNR-HVS
    p_hvs_m, p_hvs = psnrhmam.color_psnrhma(GT_img, distorted_img)

    return mse_value, ssim_value, psnr_value, rmse_value, vif_value, uqi_value, msssim_value, p_hvs_m, p_hvs
示例#7
0
    def calculate_ssim_psnr(self, img1, img2):
        self.input1 = cv2.imread(img1)
        self.input2 = cv2.imread(img2)

        self.ssim_result.append(ssim(self.input1, self.input2))
        self.psnr_result.append(psnr(self.input1, self.input2))
示例#8
0
def psnr_scores():
    i = 0
    for filename in os.listdir(with_coloration_path):
        i += 1
        gt_name = filename[:filename.find(
            '_')] + '.png'  # till the first occurence of underscore
        # print(gt_name)
        # print(gt_path + gt_name)
        gt_img = cv2.imread(gt_path + gt_name)
        im_path = with_coloration_path + filename
        inp_img = cv2.imread(im_path)
        predicted_path = get_path(inp_img, 117)  # threshold=117
        ws['A{}'.format(i + 1)] = filename
        ws['B{}'.format(i + 1)] = psnr(gt_img, inp_img)
        dehazenet_image = cv2.imread(
            dehazenet_coloration_path +
            '{}_finalWithoutCLAHE.jpg'.format(filename[:-4]))
        psnr_dehaze = psnr(gt_img, dehazenet_image)
        ws['C{}'.format(i + 1)] = psnr_dehaze

        if predicted_path == 1:
            ws['D{}'.format(i + 1)] = psnr(
                gt_img, apply_CLAHE(adaptive_enhance(inp_img)))
        else:
            ws['D{}'.format(i + 1)] = psnr_dehaze

        # Comparison with raw input image scores
        if ws['D{}'.format(i + 1)].value > ws['B{}'.format(i + 1)].value:
            ws['E{}'.format(i + 1)] = "Yes"
        elif ws['D{}'.format(i + 1)].value < ws['B{}'.format(i + 1)].value:
            ws['E{}'.format(i + 1)] = "No"
        else:
            ws['E{}'.format(i + 1)] = "Equal"

        # Comparison with DehazeNet scores
        if ws['D{}'.format(i + 1)].value > ws['C{}'.format(i + 1)].value:
            ws['F{}'.format(i + 1)] = "Yes"
        elif ws['D{}'.format(i + 1)].value < ws['C{}'.format(i + 1)].value:
            ws['F{}'.format(i + 1)] = "No"
        else:
            ws['F{}'.format(i + 1)] = "Equal"

        print("PSNR computed for: ", filename)

    for filename in os.listdir(without_coloration_path):
        i += 1
        gt_name = filename[:filename.find(
            '_')] + '.png'  # till the first occurence of underscore
        gt_img = cv2.imread(gt_path + gt_name)
        im_path = without_coloration_path + filename
        inp_img = cv2.imread(im_path)
        predicted_path = get_path(inp_img, 117)  # threshold=117
        ws['A{}'.format(i + 1)] = filename
        ws['B{}'.format(i + 1)] = psnr(gt_img, inp_img)
        dehazenet_image = cv2.imread(
            dehazenet_without_coloration_path +
            '{}_finalWithoutCLAHE.jpg'.format(filename[:-4]))
        psnr_dehaze = psnr(gt_img, dehazenet_image)
        ws['C{}'.format(i + 1)] = psnr_dehaze

        if predicted_path == 1:
            ws['D{}'.format(i + 1)] = psnr(
                gt_img, apply_CLAHE(adaptive_enhance(inp_img)))
        else:
            ws['D{}'.format(i + 1)] = psnr_dehaze

        # Comparison with raw input image scores
        if ws['D{}'.format(i + 1)].value > ws['B{}'.format(i + 1)].value:
            ws['E{}'.format(i + 1)] = "Yes"
        elif ws['D{}'.format(i + 1)].value < ws['B{}'.format(i + 1)].value:
            ws['E{}'.format(i + 1)] = "No"
        else:
            ws['E{}'.format(i + 1)] = "Equal"

        # Comparison with DehazeNet scores
        if ws['D{}'.format(i + 1)].value > ws['C{}'.format(i + 1)].value:
            ws['F{}'.format(i + 1)] = "Yes"
        elif ws['D{}'.format(i + 1)].value < ws['C{}'.format(i + 1)].value:
            ws['F{}'.format(i + 1)] = "No"
        else:
            ws['F{}'.format(i + 1)] = "Equal"

        print("PSNR computed for: ", filename)

    wb.save(excel_path)
    print("Script complete and excel saved!")
msssim_img=full_ref.msssim(ref_img, img, weights=[0.0448, 0.2856, 0.3001, 0.2363, 0.1333], ws=11, K1=0.01, K2=0.03, MAX=None)

print("MSSSIM: multi-scale structural similarity index = ", msssim_img)


##############################################################################
#PSNR
"""calculates peak signal-to-noise ratio (psnr).

	:param GT: first (original) input image.
	:param P: second (deformed) input image.
	:param MAX: maximum value of datarange (if None, MAX is calculated using image dtype).

	:returns:  float -- psnr value in dB.
	"""
psnr_img=full_ref.psnr(ref_img, img, MAX=None)

print("PSNR: peak signal-to-noise ratio = ", psnr_img)


##########################################################################
#PSNRB: Calculates PSNR with Blocking Effect Factor for a given pair of images (PSNR-B)
"""Calculates PSNR with Blocking Effect Factor for a given pair of images (PSNR-B)

	:param GT: first (original) input image in YCbCr format or Grayscale.
	:param P: second (corrected) input image in YCbCr format or Grayscale..
	:return: float -- psnr_b.
	"""
#psnrb_img = full_ref.psnrb(ref_img, img)

#print("PSNRB: peak signal-to-noise ratio with blocking effect = ", psnrb_img)
示例#10
0
        #img_temp = visuals['SR']
        sr_img = util.tensor2img(visuals['SR'])  # uint8

        #pdb.set_trace()

        if need_HR:  # load GT image and calculate psnr
            gt_img = util.tensor2img(visuals['HR'])

            crop_border = test_loader.dataset.opt['scale']
            cropped_sr_img = sr_img[crop_border:-crop_border,
                                    crop_border:-crop_border, :]
            cropped_gt_img = gt_img[crop_border:-crop_border,
                                    crop_border:-crop_border, :]
            #psnr = util.psnr(cropped_sr_img, cropped_gt_img)
            psnr = fr.psnr(cropped_sr_img, cropped_gt_img, MAX=1.0)
            #ssim = util.ssim(cropped_sr_img, cropped_gt_img, multichannel=True)
            ssim = fr.ssim(cropped_sr_img, cropped_gt_img, MAX=1.0)[0]
            #ssim = fr.uqi(cropped_sr_img, cropped_gt_img)
            #niqe = nr.niqe(cropped_sr_img)
            test_results['psnr'].append(psnr)
            test_results['ssim'].append(ssim)
            #test_results['niqe'].append(niqe)

            #pdb.set_trace()
            if gt_img.shape[2] == 3:  # RGB image
                cropped_sr_img_y = bgr2ycbcr(cropped_sr_img, only_y=True)
                cropped_gt_img_y = bgr2ycbcr(cropped_gt_img, only_y=True)
                #psnr_y = util.psnr(cropped_sr_img_y, cropped_gt_img_y)
                #ssim_y = util.ssim(cropped_sr_img_y, cropped_gt_img_y, multichannel=False)
                psnr_y = fr.psnr(cropped_sr_img_y, cropped_gt_img_y, MAX=1.0)
示例#11
0
    def run(self):
        encrypted=None
        running = True
        while running:
            data = ''
            try:
                if self.buff.count(';')>=4:
                    data=self.buff                   
                else:
                    data = self.clientSocket.recv(1024);
                    if len(data)>0:
                        data = data.decode('UTF-8')
                        data = self.buff + data
                    else:
                        data=''
                if data:                             
                    tmp = parse(data)
                    self.buff=tmp[1]
                    tmp=tmp[0]
                    err = []
                    if tmp[0]!=None:
                        if tmp[0]=='login':
                            try:
                                self.server.userLock.acquire()
                                s='logok;;;;'
                                if tmp[1] in self.server.users:
                                    s='logfail;;;;'
                                else:
                                    self.server.users[tmp[1]]=self.clientSocket
                                echodata = bytes(s,'UTF-8')
                                self.server.clientsLocks[self.clientSocket].acquire()
                                self.clientSocket.send(echodata)
                            except:
                                err.append(self.clientSocket)
                            finally:
                                self.server.userLock.release()
                                self.server.clientsLocks[self.clientSocket].release()

                            if s == 'logok;;;;':
                                self.userUpdate()

                        elif tmp[0]=='msg':
                            secret1 = lsb.hide("/Users/SalmaOssama/Desktop/1.png", tmp[3])
                            secret1.save("/Users/SalmaOssama/Desktop/hidden.png")
                            st=None

                            with open("/Users/SalmaOssama/Desktop/hidden.png", "rb") as imageFile:
                                st = base64.b64encode(imageFile.read())
                            i1 = cv2.imread("/Users/SalmaOssama/Desktop/1.png")
                            i2 = cv2.imread("/Users/SalmaOssama/Desktop/hidden.png")
                            fidelity = psnr(i1,i2)
                            print("Fidelity")
                            print(fidelity)  
                            print (os.stat('/Users/SalmaOssama/Desktop/1.png').st_size) 
                            capacity=  (os.stat('/Users/SalmaOssama/Desktop/1.png').st_size)/ ((os.stat('/Users/SalmaOssama/Desktop/1.png').st_size)*8)
                            print('Capacity')
                            print(capacity)
                            s='msg;'+tmp[1]+';'+tmp[2]+';'+tmp[3]+';' +'!##!'
                            echodata = bytes(s,'UTF-8') +st +bytes('##endoffile##','UTF-8')
                            if tmp[2]=='ALL':
                                for user in self.server.users.values():
                                    try:
                                        self.server.clientsLocks[user].acquire()
                                        user.send(echodata)                          
                                    except:
                                        err.append(user)
                                    finally:
                                        self.server.clientsLocks[user].release()
                            else:
                                try:
                                    self.server.clientsLocks[self.server.users[tmp[1]]].acquire()
                                    self.server.users[tmp[1]].send(echodata)
                                except:
                                    err.append(self.server.users[tmp[1]])
                                finally:
                                    self.server.clientsLocks[self.server.users[tmp[1]]].release()
                                if tmp[1]!=tmp[2]:                              
                                    try:
                                        self.server.clientsLocks[self.server.users[tmp[2]]].acquire()
                                        self.server.users[tmp[2]].send(echodata)
                                    except:
                                        err.append(self.server.users[tmp[2]])
                                    finally:
                                        self.server.clientsLocks[self.server.users[tmp[2]]].release()

                        elif tmp[0]=='logout':
                            running = False
                            self.server.clean_client(self.clientSocket)
                            self.userUpdate()
                            break
                        elif tmp[0]=='password':
                            key= (tmp[1][2:len(tmp[1])-1]).encode()
                            encrypted= ((tmp[2][2:]) + (tmp[3][:len(tmp[3])-1])).encode()

         

                            f = Fernet(key)
                            decrypted = f.decrypt(encrypted)

 
                                      
                    if len(err):
                        self.server.clean_clients(err)  
                        self.userUpdate()
                                    
                else:
                    running = False
                    self.server.clean_client(self.clientSocket)
                    self.userUpdate()
                    break
                    
            except:
                self.server.clean_client(self.clientSocket)
                self.userUpdate()
                running = False
                break
def main():

    prepare()
    file = sys.argv[1]
    file = cv2.imread(file)
    file = cv2.cvtColor(file, cv2.COLOR_BGR2RGB)
    original_file_size = file.shape[0] * file.shape[1] * file.shape[2]

    r, g, b = cv2.split(file)

    symb2freq_r = defaultdict(int)
    symb2freq_g = defaultdict(int)
    symb2freq_b = defaultdict(int)
    dic_r = {}
    dic_g = {}
    dic_b = {}
    dic_r_rev = {}
    dic_g_rev = {}
    dic_b_rev = {}
    # np_r=np.zeros(256)
    # np_b=np.zeros(256)
    # np_g=np.zeros(256)

    for x in r:
        for y in x:
            symb2freq_r[y] += 1

    huff_r = HuffmanEncoder(symb2freq_r)
    # print(r)
    exp_len_r = 0
    # entropy_r=0
    # print("Symbol\tFrequency\tProbability\tHuffman Code")
    for p in huff_r:
        # print ("%s\t%s\t\t%s\t\t%s" % (p[0], symb2freq_r[p[0]],symb2freq_r[p[0]]/(r.shape[0]*r.shape[1]),len(p[1])))
        dic_r[p[0]] = p[1]
        dic_r_rev[p[1]] = p[0]

        # prob=(symb2freq_r[p[0]]/(r.shape[0]*r.shape[1]))
        # exp_len_r=exp_len_r+prob*len(p[1])
        # prob_inv=1/prob
        # entropy=entropy+(prob*math.log((prob_inv),2))
        # print((symb2freq_r[p[0]]/(r.shape[0]*r.shape[1])))
        # print(len(p[1]))
        # print(entropy)
        # print(prob_inv)
        # print(math.log((prob_inv),2))

        # np_r[p[0]]=symb2freq_r[p[0]]

    # print("Expected Length: "+str(exp_len_r))
    # print("Entropy: "+str(entropy))

    for x in g:
        for y in x:
            symb2freq_g[y] += 1

    huff_g = HuffmanEncoder(symb2freq_g)
    # print("Symbol\tWeight\tHuffman Code")
    for p in huff_g:
        # print ("%s\t%s\t%s" % (p[0], symb2freq_g[p[0]], p[1]))
        dic_g[p[0]] = p[1]
        dic_g_rev[p[1]] = p[0]

        # np_g[p[0]]=symb2freq_g[p[0]]

    for x in b:
        for y in x:
            symb2freq_b[y] += 1

    huff_b = HuffmanEncoder(symb2freq_b)
    # print("Symbol\tWeight\tHuffman Code")
    for p in huff_b:
        # print ("%s\t%s\t%s" % (p[0], symb2freq_b[p[0]], p[1]))
        dic_b[p[0]] = p[1]
        dic_b_rev[p[1]] = p[0]

        # np_b[p[0]]=symb2freq_b[p[0]]

    # print(len(dic_b))

    file_r = open("./output/r.txt", "w+")
    file_g = open("./output/g.txt", "w+")
    file_b = open("./output/b.txt", "w+")
    total = 0

    for x in r:
        for y in x:
            total = total + len(dic_r[y])
            # print(dic_r[y])
            file_r.write(dic_r[y])
            file_r.write('\n')

            # print(total)

    for x in g:
        for y in x:
            # file_r.write(str(y)+" ")
            total = total + len(dic_g[y])
            file_g.write(dic_g[y])
            file_g.write('\n')

    for x in b:
        for y in x:
            # file_r.write(str(y)+" ")
            total = total + len(dic_b[y])
            file_b.write(dic_b[y])
            file_b.write('\n')

    file_r.close()
    file_b.close()
    file_g.close()

    file_r = open("./output/r.txt", "r")
    file_g = open("./output/g.txt", "r")
    file_b = open("./output/b.txt", "r")

    red = np.zeros((r.shape[0], r.shape[1]), dtype=np.uint8)
    blue = np.zeros((b.shape[0], b.shape[1]), dtype=np.uint8)
    green = np.zeros((g.shape[0], g.shape[1]), dtype=np.uint8)

    i = 0
    j = 0

    for line in file_r:
        line = line.rstrip()
        # line=line.split()[1]
        red[i][j] = np.uint8(dic_r_rev[line])
        # print(str(i)+" "+str(j)+" "+line+" "+str(dic_r_rev[line]))
        if (j == r.shape[1] - 1):
            i = i + 1
            j = 0
        else:
            j = j + 1

    i = 0
    j = 0

    for line in file_g:
        line = line.rstrip()
        # line=line.split()[1]
        green[i][j] = np.uint8(dic_g_rev[line])
        # print(str(i)+" "+str(j)+" "+line+" "+str(dic_r_rev[line]))
        if (j == g.shape[1] - 1):
            i = i + 1
            j = 0
        else:
            j = j + 1

    i = 0
    j = 0

    for line in file_b:
        line = line.rstrip()
        # line=line.split()[1]
        blue[i][j] = np.uint8(dic_b_rev[line])
        # print(str(i)+" "+str(j)+" "+line+" "+str(dic_r_rev[line]))
        if (j == b.shape[1] - 1):
            i = i + 1
            j = 0
        else:
            j = j + 1

    combined = np.dstack((red, green, blue))

    print("PSNR: " + str(psnr(combined, file)))
    print("MSE: " + str(mse(combined, file)))
    print("RMSE: " + str(rmse(combined, file)))
    ratio = (original_file_size * 8) / total
    print("Compression Ratio: " + str(ratio))

    combined = cv2.cvtColor(combined, cv2.COLOR_RGB2BGR)
    cv2.imwrite('./output/combined.png', combined)

    combined_d = np.dstack((r - red, g - green, b - blue))
    # print(np.min(r-red))
    # print(np.max(r-red))
    # print(np.min(g-green)
    # print(b-blue)

    combined_d = cv2.cvtColor(combined_d, cv2.COLOR_RGB2BGR)
    cv2.imwrite('./output/dif.png', combined_d)
示例#13
0
def main(sample):
    input_file_name = sys.argv[1]
    original_file_object = cv2.imread(input_file_name)
    original_file_size = original_file_object.shape[
        0] * original_file_object.shape[1] * original_file_object.shape[2]

    converted_image = cv2.cvtColor(original_file_object, cv2.COLOR_BGR2YCR_CB)
    y, cr_original, cb_original = cv2.split(converted_image)

    steps = FetchSteps(sample)

    # cr_new=np.zeros((a,b))

    cr_new = cr_original[::steps[0], ::steps[1]]
    cb_new = cb_original[::steps[0], ::steps[1]]

    # for

    compressed_file_size = y.shape[0] * y.shape[1] + cb_new.shape[
        0] * cb_new.shape[1] + cr_new.shape[0] * cr_new.shape[1]

    cr_de = np.repeat(cr_new, steps[0], axis=0)
    cr_de = np.repeat(cr_de, steps[1], axis=1)

    cb_de = np.repeat(cb_new, steps[0], axis=0)
    cb_de = np.repeat(cb_de, steps[1], axis=1)

    while (y.shape[0] != cr_de.shape[0]):
        cr_de = np.delete(cr_de, cr_de.shape[0] - 1, 0)

    while (y.shape[0] != cb_de.shape[0]):
        cb_de = np.delete(cb_de, cb_de.shape[0] - 1, 0)

    while (cb_original.shape[1] != cb_de.shape[1]):
        cb_de = np.delete(cb_de, cb_de.shape[0] - 1, 1)

    while (cr_original.shape[1] != cr_de.shape[1]):
        cr_de = np.delete(cr_de, cr_de.shape[0] - 1, 1)

    # print(y.shape)
    # print(cb_original.shape)
    # print(cr_original.shape)
    # print(cb_de.shape)
    # print(cr_de.shape)

    combined = np.dstack((y, cr_de, cb_de))
    decoded = cv2.cvtColor(combined, cv2.COLOR_YCR_CB2BGR)
    difference = np.dstack((y - y, cr_original - cr_de, cb_original - cb_de))

    # file1=input_file_name.split[0]
    # print(type(input_file_name))
    file2 = input_file_name.split('.')[1]
    a = sample[0]
    b = sample[1]
    c = sample[2]
    add = str(a) + str(b) + str(c)

    cv2.imwrite('./output/decoded' + add + '.' + file2, decoded)
    cv2.imwrite('./output/decoded_diff_' + add + '.' + file2, difference)
    ratio = (original_file_size) / compressed_file_size
    print("Compression Ratio: " + str(ratio))
    # print("psnr")
    print("PSNR: " + str(psnr(combined, converted_image)))
    print("MSE: " + str(mse(combined, converted_image)))
    print("RMSE: " + str(rmse(combined, converted_image)))
示例#14
0
end_time = time.time()

vutils.save_image(lr, "lr.png")
vutils.save_image(sr, "sr.png")
vutils.save_image(hr, "hr.png")

# Evaluate performance
src_img = cv2.imread("sr.png")
dst_img = cv2.imread("hr.png")

# Reference sources from `https://github.com/richzhang/PerceptualSimilarity`
lpips_loss = lpips.LPIPS(net="vgg").to(device)

mse_value = mse(src_img, dst_img)
rmse_value = rmse(src_img, dst_img)
psnr_value = psnr(src_img, dst_img)
ssim_value = ssim(src_img, dst_img)
ms_ssim_value = msssim(src_img, dst_img)  # 30.00+000j
niqe_value = cal_niqe("sr.png")
sam_value = sam(src_img, dst_img)
vif_value = vifp(src_img, dst_img)
lpips_value = lpips_loss(sr, hr)

print("\n")
print("====================== Performance summary ======================")
print(
    f"MSE: {mse_value:.2f}\n"
    f"RMSE: {rmse_value:.2f}\n"
    f"PSNR: {psnr_value:.2f}\n"
    f"SSIM: {ssim_value[0]:.4f}\n"
    f"MS-SSIM: {ms_ssim_value.real:.4f}\n"
示例#15
0
    out_image_y = Image.fromarray(np.uint8(out_image_y[0]), mode="L")

    out_img_cb = cb.resize(out_image_y.size, Image.BICUBIC)
    out_img_cr = cr.resize(out_image_y.size, Image.BICUBIC)
    out_img = Image.merge("YCbCr",
                          [out_image_y, out_img_cb, out_img_cr]).convert("RGB")
    # before converting the result in RGB
    out_img.save(f"result/{filename}")

    # Evaluate performance
    src_img = cv2.imread(f"result/{filename}")
    dst_img = cv2.imread(f"{target}/{filename}")

    total_mse_value += mse(src_img, dst_img)
    total_rmse_value += rmse(src_img, dst_img)
    total_psnr_value += psnr(src_img, dst_img)
    total_ssim_value += ssim(src_img, dst_img)
    total_ms_ssim_value += msssim(src_img, dst_img)
    total_niqe_value += cal_niqe(f"result/{filename}")
    total_sam_value += sam(src_img, dst_img)
    total_vif_value += vifp(src_img, dst_img)

    total_file += 1

print(f"Avg MSE: {total_mse_value / total_file:.2f}\n"
      f"Avg RMSE: {total_rmse_value / total_file:.2f}\n"
      f"Avg PSNR: {total_psnr_value / total_file:.2f}\n"
      f"Avg SSIM: {total_ssim_value / total_file:.4f}\n"
      f"Avg MS-SSIM: {total_ms_ssim_value / total_file:.4f}\n"
      f"Avg NIQE: {total_niqe_value / total_file:.2f}\n"
      f"Avg SAM: {total_sam_value / total_file:.4f}\n"
示例#16
0
for img_name in test_image_names:

    img_name = img_name[0:-4]  # for real vs foggy
    # img_name = img_name[0:-19]
    # print(img_name)

    normal_image = scipy.misc.imread(first_images_dir + str(img_name) +
                                     '_fake_B.png',
                                     mode="RGB")
    foggy_image = scipy.misc.imread(foggy_images_dir + str(img_name) +
                                    '_fake_B.png',
                                    mode="RGB")

    diff_mse = mse(normal_image, foggy_image)
    diff_ssim = ssim(normal_image, foggy_image)
    diff_psnr = psnr(normal_image, foggy_image)

    diff_mse_list += [diff_mse]
    diff_psnr_list += [diff_psnr]
    diff_ssim_list += [diff_ssim]

# print(diff_mse_list)
print(diff_ssim_list)
# print(diff_psnr_list)

#Get average, max and min
print("MSE stats: ")
print(np.mean(np.array(diff_mse_list)))
print(max(diff_mse_list))
print(min(diff_mse_list))
print(np.std(np.array(diff_mse_list)))
示例#17
0
from sewar.full_ref import ergas
from sewar.full_ref import scc
from sewar.full_ref import rase
from sewar.full_ref import sam
from sewar.full_ref import msssim
from sewar.full_ref import vifp

img1 = cv2.imread("ssim/png_source.png")
img2 = cv2.imread("ssim/png_sr.png")

print("Metricas\n")

uqi = uqi(img1, img2)
print("uqi: ", uqi)

psnr = psnr(img1, img2)
print("psnr: ", psnr)

ssim = ssim(img1, img2)
print("ssim: ", ssim)

mse = mse(img1, img2)
print("mse: ", mse)

rmse_sw = rmse_sw(img1, img2)
# print("rmse_sw: ", rmse_sw)

ergas = ergas(img1, img2)
print("ergas: ", ergas)

scc = scc(img1, img2)
def main():

    prepare()

    original_file_name = sys.argv[1]
    original_file_object = cv2.imread(original_file_name)
    original_file_object = cv2.cvtColor(original_file_object,
                                        cv2.COLOR_BGR2RGB)
    original_file_size = original_file_object.shape[
        0] * original_file_object.shape[1] * original_file_object.shape[2]
    # original_file_object=cv2.resize(original_file_object,(512, 512))

    r, g, b = cv2.split(original_file_object)
    # original_file_object.clear()

    final_r = []
    final_b = []
    final_g = []
    count = 0

    for i in range(0, r.shape[0]):

        temp_r = []
        temp_b = []
        temp_g = []
        count_r = 0
        count_b = 0
        count_g = 0

        for j in range(0, r.shape[1] - 1):
            r_ = r[i][j]
            r__ = r[i][j + 1]

            # print(str(r_)+" "+str(r__))

            if r_ == r__:
                count_r = count_r + 1
            else:
                if count_r > 0:
                    temp_r.append((r_, count_r + 1))
                    # temp_r.append(_)
                    count_r = 0
                    # count=count+3
                else:
                    count_r = 0
                    temp_r.append(r_)
                    # count=count+1

            b_ = b[i][j]
            b__ = b[i][j + 1]

            if b_ == b__:
                count_b = count_b + 1
            else:
                if count_b > 0:
                    temp_b.append((b_, count_b + 1))
                    # temp_r.append(_)
                    count_b = 0
                    # count=count+3
                else:
                    count_b = 0
                    temp_b.append(b_)
                    # count=count+1

            g_ = g[i][j]
            g__ = g[i][j + 1]

            if g_ == g__:
                count_g = count_g + 1
            else:
                if count_g > 0:
                    temp_g.append((g_, count_g + 1))
                    # temp_r.append(_)
                    count_g = 0
                    # count=count+3
                else:
                    count_g = 0
                    temp_g.append(g_)
                    # count=count+1

        if count_r > 0:
            temp_r.append((r_, count_r + 1))
        else:
            temp_r.append(r__)

        if count_b > 0:
            temp_b.append((b_, count_b + 1))
        else:
            temp_b.append(b__)

        if count_g > 0:
            temp_g.append((g_, count_g + 1))
        else:
            temp_g.append(g__)

        final_r.append(temp_r)
        final_b.append(temp_b)
        final_g.append(temp_g)

        count = count + len(temp_r) + len(temp_b) + len(temp_g)

    # print(r)
    # print(final_r)
    # final_size=len(final_r)+len(final_g)+len(final_b)
    # print(final_size)
    # print(original_file_size)
    ratio = original_file_size / count
    # ratios.append(ratio)

    recon_r = []
    recon_g = []
    recon_b = []

    for x in final_r:
        ttt = []
        for y in x:
            if (isinstance(y, tuple)):
                for i in range(0, y[1]):
                    ttt.append(y[0])
            else:
                ttt.append(y)

        recon_r.append(ttt)

    for x in final_g:
        ttt = []
        for y in x:
            if (isinstance(y, tuple)):
                for i in range(0, y[1]):
                    ttt.append(y[0])
            else:
                ttt.append(y)

        recon_g.append(ttt)

    for x in final_b:
        ttt = []
        for y in x:
            if (isinstance(y, tuple)):
                for i in range(0, y[1]):
                    ttt.append(y[0])
            else:
                ttt.append(y)

        recon_b.append(ttt)

    # print(len(recon_r[1]))

    nrecon_r = np.array(recon_r)
    nrecon_b = np.array(recon_b)
    nrecon_g = np.array(recon_g)

    # print(nrecon_r.shape)
    # print(nrecon_b.shape)
    # print(nrecon_g.shape)

    file2 = original_file_name.split('.')[1]

    combined = np.dstack((nrecon_r, nrecon_g, nrecon_b))
    difference = np.dstack((r - nrecon_r, g - nrecon_g, b - nrecon_b))
    combined = cv2.cvtColor(combined, cv2.COLOR_RGB2BGR)
    difference = cv2.cvtColor(difference, cv2.COLOR_RGB2BGR)
    cv2.imwrite('./output/decoded' + '.' + file2, combined)
    cv2.imwrite('./output/decoded_diff_' + '.' + file2, difference)
    original_file_object = cv2.cvtColor(original_file_object,
                                        cv2.COLOR_RGB2BGR)

    print("Ratio")
    print(ratio)
    print("MSE")
    print(mse(original_file_object, combined))
    print("psnr")
    print(psnr(original_file_object, combined))
示例#19
0
    #save

    os.makedirs("test_data/fake_visible", exist_ok = True)
    cv2.imwrite("test_data/fake_visible/" + vis_path , fake_A[0][:,:,::-1] * 255)

    totol_metric_dict_matched = {"mse":0.0,"rmse":0.0,"uqi":0.0,"ssim":0.0,"psnr":0.0,"psnrb":0.0,"vifp":0.0}  #参数指标

true_path = "test_data/visible"
fake_path = "test_data/fake_visiblee"

lenth = len(os.listdir(true_path))

for true_name,fake_name in zip(os.listdir(true_path),os.listdir(fake_path)):
	true = cv2.imread(os.path.join(true_path,true_name))
	fake = cv2.imread(os.path.join(fake_path,fake_name))

	metric_dict_matched = {"mse":mse(fake,true),"rmse":rmse(fake,true),"uqi":uqi(fake,true),"ssim":ssim(fake,true)[0] \
	   				,"psnr":psnr(fake,true),"psnrb":psnrb(fake,true),"vifp":vifp(fake,true)}
	for key,value in metric_dict_matched.items():
		totol_metric_dict_matched[key] = totol_metric_dict_matched[key]+value

for key,value in totol_metric_dict_matched.items():
	totol_metric_dict_matched[key] /= lenth
print(totol_metric_dict_matched)
#path = ["train_data/" + method + "_infrared","train_data/" + method + "_visible"]
path = [true_path,fake_path]
fid_value = fid.calculate_fid_given_paths(path, inception_path = None, low_profile=False)
print("FID: ", fid_value)  
print("done")
示例#20
0
def main():
	k=10

	if(k<=50):
		k=50/k
	elif k==100:
		k=2-(99/50)
	else:
		k=2-(k/50)

	original_file_name=sys.argv[1]
	original_file_object=cv2.imread(original_file_name)
	original_file_object=cv2.cvtColor(original_file_object, cv2.COLOR_BGR2YCR_CB)
	
	x=original_file_object.shape[0]
	y=original_file_object.shape[1]
	if(x>8):
		x=int(x/8)
		x=x*8
	if(y>8):
		y=int(y/8)
		y=y*8

	original_file_object=cv2.resize(original_file_object,(y,x))

	y,cr,cb=cv2.split(original_file_object)

	y_dct=np.zeros(y.shape,dtype=np.int)
	cr_dct=np.zeros(cr.shape,dtype=np.int)
	cb_dct=np.zeros(cb.shape,dtype=np.int)
	y_idct=np.zeros(y.shape,dtype=np.uint8)
	cr_idct=np.zeros(cr.shape,dtype=np.uint8)
	cb_idct=np.zeros(cb.shape,dtype=np.uint8)
	# print(cr_new.shape)

	for i in range(0,cb.shape[0],8):
		for j in range(0,cb.shape[1],8):
			# print(str(i)+" "+str(j))

			# if(cb.shape[0]-i<0 or cb.shape[1]-j<0)
				# break

			cr_dct[i:i+8,j:j+8]=transform(cr[i:i+8,j:j+8],k,1)
			cb_dct[i:i+8,j:j+8]=transform(cb[i:i+8,j:j+8],k,1)
			y_dct[i:i+8,j:j+8]=transform(y[i:i+8,j:j+8],k,0)

			# print(y[i:i+8,j:j+8])
			# print(y_dct[i:i+8,j:j+8])
			# print("yes")

			cr_idct[i:i+8,j:j+8]=itransform(cr_dct[i:i+8,j:j+8],k,1)
			cb_idct[i:i+8,j:j+8]=itransform(cb_dct[i:i+8,j:j+8],k,1)
			y_idct[i:i+8,j:j+8]=itransform(y_dct[i:i+8,j:j+8],k,0)

	# print(cb_dct)

	# for i in range(0,cb.shape[0],8):
	# 	for j in range(0,cb.shape[1],8):
	# 		# print(str(i)+" "+str(j))
	# 		cr_idct[i:i+8,j:j+8]=itransform(cr_dct[i:i+8,j:j+8],k,1)
	# 		cb_idct[i:i+8,j:j+8]=itransform(cb_dct[i:i+8,j:j+8],k,1)
	# 		y_idct[i:i+8,j:j+8]=itransform(y_dct[i:i+8,j:j+8],k,0)

			# print(cr_idct[i:i+8,j:j+8])
			# print(cr[i:i+8,j:j+8]-cr_idct[i:i+8,j:j+8])
	# print(y)
	# print(y-y_idct)
	# print(cr)
	# print(cr-cr_idct)
	# print(cb)
	# print(cb_idct)
	# co

	# print(y)
	# print(y_idct)
	# print(cr)
	# print(cr_idct)
	# print(cb)
	# print(cb_idct)

	# print(y_dct)
	# print(zigzag(y_dct))
	# print(runs(zigzag(y_dct)))
	# print(y.shape[0]*y.shape[1])
	# print(len(runs(zigzag(y_dct))))

	# print(cr_dct)
	# print(zigzag(cr_dct))
	# print(runs(zigzag(cr_dct)))
	# print(len(runs(zigzag(cr_dct))))

	# print(cb_dct)
	# print(zigzag(cb_dct))
	# print(runs(zigzag(cb_dct)))
	# print(len(runs(zigzag(cb_dct))))

	original_size=y.shape[0]*y.shape[1]*3*8
	run_y=runs(zigzag(y_dct))
	run_cb=runs(zigzag(cb_dct))
	run_cr=runs(zigzag(cr_dct))

	total_y=HuffmanKernel(run_y)
	total_cb=HuffmanKernel(run_cb)
	total_cr=HuffmanKernel(run_cr)

	new_file=total_y+total_cb+total_cr
	# print("After Run Length: ",end="")
	# print((original_size/8)/(run_y[1]+run_cb[1]+run_cr[1]))
	# print("After Huffman: ",end="")
	# print(original_size/new_file)
	# print(original_size)
	# print(new_file)


	combined = np.dstack((y_idct, cr_idct,cb_idct))
	decoded = cv2.cvtColor(combined,cv2.COLOR_YCR_CB2BGR)
	# decoded=combined
	# print(original_file_object.shape)
	# print(decoded.shape)

	print("Compression Ratio after runlength: "+str((original_size/8)/(run_y[1]+run_cb[1]+run_cr[1])))
	print("Compression Ratio after huffman: "+str(original_size/new_file))
	print("PSNR: "+str(psnr(combined,original_file_object)))
	print("MSE: "+str(mse(combined,original_file_object)))
	print("RMSE: "+str(rmse(combined,original_file_object)))

	# f=open("./output/res.txt",'w')
	# f.write(str((original_size/8)/(run_y[1]+run_cb[1]+run_cr[1])))
	# f.write('\n')
	# f.write(str(original_size/new_file))
	# f.write('\n')
	# f.write(str(psnr(combined,original_file_object)))
	# f.write('\n')
	# f.write(str(mse(combined,original_file_object)))
	# f.write('\n')
	# f.write(str(rmse(combined,original_file_object)))
	# f.close()

	file2=original_file_name.split('.')[1]
	cv2.imwrite('./output/decoded'+'.'+file2,decoded)
	cv2.imwrite('./output/diff'+'.'+file2,original_file_object-combined)