Пример #1
0
def psnr(file1, file2, width, height, frames, start=0, channel='Y'):
    """
    Calculate PSNR for each frame
    :param file1: YUV file1 path
    :param file2: YUV file2 path
    :param width: width of the YUV
    :param height: height of the YUV
    :param frames: total number of frames for PSNR calculation
    :param start: start frame index (default=0)
    :param channel: 'Y','U','V', or 'YUV'
    :return: list of the PSNR for each frame
    """
    psnr_list = []

    for frame in range(start, frames):
        yuv1, y1, u1, v1 = yuv.read_yuv420_frame(file1, width, height, frame)
        yuv2, y2, u2, v2 = yuv.read_yuv420_frame(file2, width, height, frame)

        if channel == 'Y':
            psnr = cv2.PSNR(y1, y2)
        elif channel == 'U':
            psnr = cv2.PSNR(u1, u2)
        elif channel == 'V':
            psnr = cv2.PSNR(v1, v2)
        else:
            psnr = cv2.PSNR(yuv1, yuv2)

        psnr_list.append(psnr)

    return psnr_list
Пример #2
0
def predict_image(_path):
    srcnn_model = predict_model()
    srcnn_model.load_weights("SRCNN_weights.h5")

    names = os.listdir(_path)
    names = sorted(names)
    nums = names.__len__()
    pre_input = 0
    pre_output = 0
    for i in range(nums):
        name = _path + names[i]
        if name.endswith(".jpg"):
            hr_img = misc.imread(name, mode='RGB')
            IMG_NAME = name
            INPUT_NAME = os.path.splitext(name)[0] + "_input.jpg"
            BICUBIC_NAME = os.path.splitext(name)[0] + "_bicubic.jpg"
            OUTPUT_NAME = os.path.splitext(name)[0] + "_predicted.jpg"

            img = misc.imread(IMG_NAME, mode='RGB')
            shape = img.shape
            Y_img = misc.imresize(img[:, :, 0],
                                  (shape[0] / SCALE, shape[1] / SCALE))

            Y_img = misc.imresize(Y_img, (shape[0], shape[1]), "nearest")
            img[:, :, 0] = Y_img
            misc.imsave(INPUT_NAME, img)
            Y_img = misc.imresize(Y_img, (shape[0], shape[1]), "bicubic")

            img[:, :, 0] = Y_img
            misc.imsave(BICUBIC_NAME, img)

            Y = numpy.zeros((1, img.shape[0], img.shape[1], 1), dtype=float)
            Y[0, :, :, 0] = Y_img.astype(float) / 255.
            pre = srcnn_model.predict(Y, batch_size=1) * 255.

            pre = pre.astype(numpy.uint8)
            img[6:-6, 6:-6, 0] = pre[0, :, :, 0]
            misc.imsave(OUTPUT_NAME, img)

            # psnr calculation:
            im1 = misc.imread(INPUT_NAME)
            im2 = misc.imread(OUTPUT_NAME)
            imHR = img

            input_PSNR = cv2.PSNR(imHR, im1)
            output_PSNR = cv2.PSNR(imHR, im2)

            if pre_output < output_PSNR:
                pre_input = input_PSNR
                pre_output = output_PSNR
                demo = IMG_NAME

    print "PSNR HR - INPUT"
    print pre_input
    print "PSNR HR - OUTPUT"
    print pre_output
    print demo
Пример #3
0
def predict(model, img_path, result_path):
    srcnn_model = predict_model()
    srcnn_model.load_weights(model)
#    srcnn_model.load_weights("SRCNN_check_building.h5")
#    IMG_NAME = "/home/mark/Engineer/SR/data/Set14/flowers.bmp"
    img_names = os.listdir(img_path)
    names = []
    bicubic = []
    SRCNN = []

    for img_name in img_names:
        name, form = os.path.splitext(img_name)
#        hr_img_name = name + '_hr' + form
        lr_img_name = name + '_lr' + form
        sr_img_name = name + '_sr' + form

        img = cv2.imread(os.path.join(img_path, img_name), cv2.IMREAD_COLOR)
        img_hr = np.copy(img)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
        shape = img.shape
        Y_img = cv2.resize(img[:, :, 0], (shape[1] // 2, shape[0] // 2), cv2.INTER_CUBIC)
        Y_img = cv2.resize(Y_img, (shape[1], shape[0]), cv2.INTER_CUBIC)
        img[:, :, 0] = Y_img
        img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
        cv2.imwrite(os.path.join(result_path,lr_img_name), img)
        img_lr = np.copy(img)

        Y = np.zeros((1, img.shape[0], img.shape[1], 1), dtype=float)
        Y[0, :, :, 0] = Y_img.astype(float) / 255.
        pre = srcnn_model.predict(Y, batch_size=1) * 255.
        pre[pre[:] > 255] = 255
        pre[pre[:] < 0] = 0
        pre = pre.astype(np.uint8)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
        img[6: -6, 6: -6, 0] = pre[0, :, :, 0]
        img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
        cv2.imwrite(os.path.join(result_path,sr_img_name), img)
        img_sr = np.copy(img)

        # psnr calculation:
        im1 = cv2.imread(os.path.join(img_path, img_name), cv2.IMREAD_COLOR)
        im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2YCrCb)[6: -6, 6: -6, 0]
        im2 = cv2.imread(os.path.join(result_path,lr_img_name), cv2.IMREAD_COLOR)
        im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2YCrCb)[6: -6, 6: -6, 0]
        im3 = cv2.imread(os.path.join(result_path,sr_img_name), cv2.IMREAD_COLOR)
        im3 = cv2.cvtColor(im3, cv2.COLOR_BGR2YCrCb)[6: -6, 6: -6, 0]

        imgs = [img_hr, img_lr, img_sr]
        result_img_compare_save(imgs, os.path.join(result_path, name+'.png'))

        names.append(name)
        bicubic.append(cv2.PSNR(im1, im2))
        SRCNN.append(cv2.PSNR(im1, im3))
        stats = [names, bicubic, SRCNN]
    return stats
Пример #4
0
def predict():
    srcnn_model = Model(input_shape=(None, None, 1))
    srcnn_model.SRCNN.load_weights("./checkpoint/saved-model-200.h5")

    IMG_NAME = "./Test/Temp/butterfly_GT.bmp"
    INPUT_NAME = "./result/bicubic.png"
    OUTPUT_NAME = "./result/SRCNN.png"

    img = cv2.imread(IMG_NAME, cv2.IMREAD_COLOR)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    shape = img.shape
    Y_img = cv2.resize(img[:, :, 0], (shape[1] // 2, shape[0] // 2), cv2.INTER_CUBIC)
    Y_img = cv2.resize(Y_img, (shape[1], shape[0]), cv2.INTER_CUBIC)
    img[:, :, 0] = Y_img
    img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
    cv2.imwrite(INPUT_NAME, img)

    Y = numpy.zeros((1, img.shape[0], img.shape[1], 1), dtype=float)
    Y[0, :, :, 0] = Y_img.astype(float) / 255.
    pre = srcnn_model.SRCNN.predict(Y, batch_size=1) * 255.
    pre[pre[:] > 255] = 255
    pre[pre[:] < 0] = 0
    pre = pre.astype(numpy.uint8)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    img[6: -6, 6: -6, 0] = pre[0, :, :, 0]
    img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
    cv2.imwrite(OUTPUT_NAME, img)

    # psnr calculation:
    im1 = cv2.imread(IMG_NAME, cv2.IMREAD_COLOR)
    im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2YCrCb)[6: -6, 6: -6, 0]
    im2 = cv2.imread(INPUT_NAME, cv2.IMREAD_COLOR)
    im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2YCrCb)[6: -6, 6: -6, 0]
    im3 = cv2.imread(OUTPUT_NAME, cv2.IMREAD_COLOR)
    im3 = cv2.cvtColor(im3, cv2.COLOR_BGR2YCrCb)[6: -6, 6: -6, 0]

    original = mpimg.imread(IMG_NAME)
    bicubic = mpimg.imread(INPUT_NAME)
    bicubic_snr = cv2.PSNR(im1, im2)
    srcnn = mpimg.imread(OUTPUT_NAME)
    srcnn_snr = cv2.PSNR(im1, im3)

    fig, axs = plt.subplots(2, 2)
    axs[0, 0].imshow(original)
    axs[0, 0].set_title('Original / PSNR')
    axs[0, 0].axis('off')
    axs[0, 1].imshow(bicubic)
    axs[0, 1].set_title('Bicubic / %.2f dB' % (bicubic_snr))
    axs[0, 1].axis('off')

    axs[1, 1].imshow(srcnn)
    axs[1, 1].set_title('SRCNN / %.2f dB' % (srcnn_snr))
    axs[1, 1].axis('off')
    plt.show()
Пример #5
0
def predict():
    srcnn_model = model()
    srcnn_model.load_weights(imp.NMD)
    IMG_NAME = "Test/Set5/baby_GT.bmp"
    INPUT_NAME = "input2.jpg"
    OUTPUT_NAME = "pre2.jpg"

    import cv2
    img = cv2.imread(IMG_NAME, cv2.IMREAD_COLOR)
    im1 = img
    img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    shape = img.shape
    Y_img = cv2.resize(img[:, :,
                           0], (shape[1] // imp.scale, shape[0] // imp.scale),
                       cv2.INTER_CUBIC)
    Y_img = cv2.resize(Y_img, (shape[1], shape[0]), cv2.INTER_CUBIC)
    img[:, :, 0] = Y_img
    img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
    im2 = img
    cv2.imwrite(INPUT_NAME, img)

    Y = npy.zeros((1, img.shape[0], img.shape[1], 1), dtype=float)
    Y[0, :, :, 0] = Y_img.astype(float) / 255.
    pre = srcnn_model.predict(Y, batch_size=1) * 255.
    pre[pre[:] > 255] = 255
    pre[pre[:] < 0] = 0
    pre = pre.astype(npy.uint8)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    img[imp.margin:-imp.margin, imp.margin:-imp.margin, 0] = pre[0, :, :, 0]
    img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
    im3 = img
    cv2.imwrite(OUTPUT_NAME, img)

    plt.figure(figsize=(20, 20))
    plt.subplot(1, 3, 1)
    plt.xticks([])
    plt.yticks([])
    plt.imshow(im1)
    plt.xlabel("qwq")
    plt.subplot(1, 3, 2)
    plt.xticks([])
    plt.yticks([])
    plt.imshow(im2)
    plt.xlabel("bicubic:{}".format(cv2.PSNR(im1, im2)))
    plt.subplot(1, 3, 3)
    plt.xticks([])
    plt.yticks([])
    plt.imshow(im3)
    plt.xlabel("SRCNN:{}".format(cv2.PSNR(im1, im3)))
    plt.show()
Пример #6
0
def prepare_data(data_folder_path):
    #------STEP-1--------
    #get the directories (one directory for each subject) in data folder
    dirs = os.listdir(data_folder_path)

    #let's go through each directory and read images within it
    for dir_name in dirs:

        #our subject directories start with letter 's' so
        #ignore any non-relevant directories if any
        if not dir_name.startswith("s"):
            continue

        #build path of directory containin images for current subject subject
        #sample subject_dir_path = "training-data/s1"
        subject_dir_path = data_folder_path + "/" + dir_name

        #get the images names that are inside the given subject directory
        subject_images_names = os.listdir(subject_dir_path)

        #------STEP-3--------
        #go through each image name, read image,
        #detect face and add face to list of faces
        count = 0
        for image_name in subject_images_names:
            print(count)
            count += 1
            for t in [.05, .10, .15, .20, .25, .30, .35, .40]:
                #ignore system files like .DS_Store
                if not image_name.endswith("png"):
                    continue

                image_path = subject_dir_path + "/" + image_name

                image = cv2.cvtColor(cv2.imread(image_path),
                                     cv2.COLOR_BGR2GRAY)

                nimage = noise(image, thresh=t)
                psnr_noise = cv2.PSNR(image, nimage)

                #cv2.imshow(str(psnr_noise), np.hstack((image,nimage)))
                #cv2.waitKey(0)
                remove_artifacts(nimage)
                psnr_cleaned = cv2.PSNR(image, nimage)
                #cv2.imshow(str(psnr_cleaned), np.hstack((image,nimage)))
                #cv2.waitKey(0)
                #print((psnr_noise, psnr_cleaned))
                psnrs[t].append((psnr_noise, psnr_cleaned))
 def test_globalTM(self):
     radiance = cv.imread('../TestImg/memorial.hdr', -1)
     golden = cv.imread('../ref/p2_gtm.png')
     ldr = globalTM(radiance, scale=1.0)
     psnr = cv.PSNR(golden, ldr)
     self.assertGreaterEqual(psnr, 45)
     return psnr
Пример #8
0
def calImgDif(image1, image2):
    # se as imagens já forem cinza, a conversão gera um erro
    if len(image1.shape) > 2:
        image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)

    if len(image2.shape) > 2:
        image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)

    print('image1 = ' + str(image1.shape))
    print('image2 = ' + str(image2.shape))
    print('..')

    # imagens com tamanhos diferentes precisam ser redimensionadas para o mesmo tamanho, ou também irá gerar erro
    if image1.size > image2.size:
        image1 = cv2.resize(image1, (image2.shape[1], image2.shape[0]))
        print('image1 = ' + str(image1.shape))
    elif image1.size < image2.size:
        image2 = cv2.resize(image2, (image1.shape[1], image1.shape[0]))
        print('image2 = ' + str(image2.shape))

    (score, diff) = compare_ssim(image1, image2, full=True)
    diff = (diff * 255).astype("uint8")

    print('Erro médio quadrático: ' + str(np.square(diff).mean()) +
          ', PSNR: ' + str(cv2.PSNR(image1, image2)))
    print('<Pressione ENTER para continuar>')
    viewImage(diff, 'Imagem diferença')
    input()
Пример #9
0
def estimate_perspective_ii(level_map, img2, img3, operator_position):
    """用可部署地块选出从地图坐标到通常视角的透视矩阵。

    同名参数含义与estimate_perspective函数一致,返回值也一致,但是透视矩阵是三维的。
    """
    height, width = img2.shape[:2]
    mask = cv2.compare(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY),
                       cv2.cvtColor(img3, cv2.COLOR_BGR2GRAY), cv2.CMP_NE)
    perspectives = [
        generate_perspective((width, height), level_map, view, True)
        for view in range(4)
    ]
    # 寻找最匹配的一种视角。
    view = np.argmax([
        # 峰值信噪比是一种评价图像质量的客观标准……
        cv2.PSNR(
            mask,
            cv2.inRange(
                generate_bullet_time_buildable_mask(
                    (width, height), level_map, perspective,
                    operator_position), 16, 255))
        for perspective in perspectives
    ])
    return generate_perspective((width, height), level_map, view,
                                False), perspectives[view]
Пример #10
0
    def test_visualize_bev(self):
        # Test the bev visualization
        context = self.dataset[0]
        lidar = context[0][-1]

        ontology = self.dataset.dataset_metadata.ontology_table.get('bounding_box_3d', None)
        class_colormap = ontology._contiguous_id_colormap
        id_to_name = ontology.contiguous_id_to_name

        w = 100
        h = int(3 * w / 4)
        bev_pixels_per_meter = 10

        img = visualize_bev([lidar],
                            class_colormap,
                            show_instance_id_on_bev=False,
                            id_to_name=id_to_name,
                            bev_font_scale=.5,
                            bev_line_thickness=2,
                            bev_metric_width=w,
                            bev_metric_height=h,
                            bev_pixels_per_meter=bev_pixels_per_meter,
                            bev_center_offset_w=25)

        assert img.shape == (h * bev_pixels_per_meter, w * bev_pixels_per_meter, 3)

        # Compare this image with a previously generated image
        gt_image_file = os.path.join(self.DGP_TEST_DATASET_DIR, 'visualization', 'bev_test_image_01.jpeg')
        gt_img = cv2.cvtColor(cv2.imread(gt_image_file), cv2.COLOR_BGR2RGB)

        assert cv2.PSNR(img, gt_img) >= 40.0
Пример #11
0
    def test_psnr(self):
        """
        Compare against opencv and check that the psnr is above
        the minimum possible value.
        """
        import cv2

        im1 = torch.rand(100, 3, 256, 256).cuda()
        im1_uint8 = (im1 * 255).to(torch.uint8)
        im1_rounded = im1_uint8.float() / 255
        for max_diff in 10 ** torch.linspace(-5, 0, 6):
            im2 = im1 + (torch.rand_like(im1) - 0.5) * 2 * max_diff
            im2 = im2.clamp(0.0, 1.0)
            im2_uint8 = (im2 * 255).to(torch.uint8)
            im2_rounded = im2_uint8.float() / 255
            # check that our psnr matches the output of opencv
            psnr = calc_psnr(im1_rounded, im2_rounded)
            # some versions of cv2 can only take uint8 input
            psnr_cv2 = cv2.PSNR(
                im1_uint8.cpu().numpy(),
                im2_uint8.cpu().numpy(),
            )
            self.assertAlmostEqual(float(psnr), float(psnr_cv2), delta=1e-4)
            # check that all PSNRs are bigger than the minimum possible PSNR
            max_mse = max_diff ** 2
            min_psnr = 10 * math.log10(1.0 / max_mse)
            for _im1, _im2 in zip(im1, im2):
                _psnr = calc_psnr(_im1, _im2)
                self.assertGreaterEqual(float(_psnr) + 1e-6, min_psnr)
Пример #12
0
def psnr_dir(file_1, file_2, _formate, result, scale):
    psnr_mean = []
    files = glob.glob(file_1 + "/" + "*" + _formate)
    print(files.__len__())
    print(file_1 + "/" + "*" + _formate)

    result_txt = open(result, "w+")
    result_txt.truncate()  # 清空文件
    for file in files:
        pic_name = file.split("/")[-1]
        src1 = cv2.imread(file_1 + "/" + pic_name)
        src2 = cv2.imread(file_2 + "/" + pic_name)
        h, w = src1.shape[0], src1.shape[1]
        src1 = src1[0:h - h % scale, 0:w - w % scale, :]
        print(src1.shape)
        print(src2.shape)
        if src1.shape == src2.shape:
            src1=cv2.cvtColor(src1,cv2.COLOR_BGR2YCrCb)[:,:,0]
            src2 = cv2.cvtColor(src2, cv2.COLOR_BGR2YCrCb)[:, :, 0]
            psnr = cv2.PSNR(src1, src2)
            psnr_mean.append(psnr)
            result_txt.write("{}".format(psnr) + "  " + file_1 + "/" + pic_name + "  " + file_2 + "/" + pic_name + "\n")
    psnr_mean = np.mean(psnr_mean)
    result_txt.write("{}".format(psnr_mean) + "\n")
    result_txt.write("-----------------------------------------")
    result_txt.close()
 def test_gaussian(self):
     impulse = np.load('../ref/p3_impulse.npy')
     golden = np.load('../ref/p3_gaussian.npy').astype(float)
     test = gaussianFilter(impulse, 5, 15).astype(float)
     psnr = cv.PSNR(golden, test)
     self.assertGreaterEqual(psnr, 60)
     return psnr
 def test_bilateral(self):
     step = np.load('../ref/p4_step.npy')
     golden = np.load('../ref/p4_bilateral.npy').astype(float)
     test = bilateralFilter(step, 9, 50, 10).astype(float)
     psnr = cv.PSNR(golden, test)
     self.assertGreaterEqual(psnr, 60)
     return psnr
Пример #15
0
def video_function(base , box = "images/box1.jpg", ad = "images/ad2.jpg"):
    mag_img = base[:]

    box_img = cv2.imread(box)

    ad_img = cv2.imread(ad)

    p = 20
    orgHeight, orgWidth = box_img.shape[:2]
    size = (orgWidth//p,orgHeight//p)
    box_img = cv2.resize(box_img, size)

    ad_img = cv2.resize(ad_img, (box_img.shape[1], box_img.shape[0]))

    src_pts, dst_pts = get_match_features(box_img ,mag_img)
    # print(src_pts.shape)
    
    h = get_homography(src_pts, dst_pts)
    if True:
        h_inv = get_homography(dst_pts, src_pts)

        transformed_src = transform_homography(mag_img, h, box_img)
        # print("psnr:",cv2.PSNR(transformed_src, box_img))
        psnr = cv2.PSNR(transformed_src, box_img)
    # h = get_homography(dst_pts, src_pts)
    # pprint.pprint(h)

    transformed_ad = transform_homography(ad_img, h, mag_img)
    # cv2.imwrite("tmp.jpg", transformed_ad)

    output = fusion(mag_img, transformed_ad)
    return output,psnr
Пример #16
0
def plot_generated_images(model, dim=(1, 3), figsize=(15, 5)):

    # for i in range(0,5):
    rand_nums = np.array([0, 1, 2, 3, 4])
    image_batch_hr = denormalize(x_test_hr[rand_nums])
    image_batch_lr = x_test_lr[rand_nums]
    [gen_img, gan_output] = model.predict(image_batch_lr)
    generated_image = denormalize(gen_img)
    image_batch_lr = denormalize(image_batch_lr)
    for i in range(0, 5):
        print(cv2.PSNR(image_batch_hr[i], generated_image[i]))
        #generated_image = deprocess_HR(generator.predict(image_batch_lr))

        plt.figure(figsize=figsize)

        plt.subplot(dim[0], dim[1], 1)
        plt.imshow(image_batch_lr[i], interpolation='nearest')
        plt.axis('off')

        plt.subplot(dim[0], dim[1], 2)
        plt.imshow(generated_image[i], interpolation='nearest')
        plt.axis('off')

        plt.subplot(dim[0], dim[1], 3)
        plt.imshow(image_batch_hr[i], interpolation='nearest')
        plt.axis('off')

        plt.tight_layout()
        plt.savefig('output5/gan_generated_image_epoch_%d.png' % i)
 def test_globalTMwb(self):
     radiance = cv.imread('../TestImg/memorial.hdr', -1)
     golden = cv.imread('../ref/p5_wb_gtm.png')
     wb_hdr = whiteBalance(radiance, (457, 481), (400, 412))
     test = globalTM(wb_hdr)
     psnr = cv.PSNR(golden, test)
     self.assertGreaterEqual(psnr, 45)
     return psnr
Пример #18
0
def main():
    args = parse_args()

    clear = np.array(Image.open(args.clear_image).convert('RGB'))
    noise = np.array(Image.open(args.noise_image).convert('RGB'))

    psnr = cv2.PSNR(clear, noise)
    print('PSNR: {}'.format(psnr))
 def test_localTMgaussian(self):
     radiance = cv.imread('../TestImg/vinesunset.hdr', -1)
     golden = cv.imread('../ref/p3_ltm.png')
     gauhw1 = partial(gaussianFilter, N=35, sigma_s=100)
     test = localTM(radiance, gauhw1, scale=3)
     psnr = cv.PSNR(golden, test)
     self.assertGreaterEqual(psnr, 45)
     return psnr
    def test_psnr_random_input(self):
        image_1 = np.random.random((5, 5))
        image_2 = np.random.random((5, 5))

        cv2_psnr = cv2.PSNR(image_1, image_2, R=1)
        psnr = self.psnr_func(torch.Tensor(image_1), torch.Tensor(image_2))

        assert float(cv2_psnr) == pytest.approx(float(psnr))
 def test_localTMbilateral(self):
     radiance = np.load('../ref/p4_imgpatch.npy')
     golden = cv.imread('../ref/p4_ltm_patch.png')
     bilhw1 = partial(bilateralFilter, N=35, sigma_s=100, sigma_r=0.8)
     test = localTM(radiance, bilhw1, scale=3)
     psnr = cv.PSNR(golden, test)
     self.assertGreaterEqual(psnr, 45)
     return psnr
Пример #22
0
def cpsnr(image1, image2, i, j):
    psnr = cv2.PSNR(
        img1, img2
    )  #calcola il psnr dell'immagine di partenza con quella ricostruita
    #print(psnr)
    key = str(i)
    key2 = str(j)
    psnrdati[str(key), str(key2)] = psnr  #aggiungo i psnr per poi salvarli
Пример #23
0
def PSNR():
    image_name_1 = input(
        "Введите название первого изображения (с расширением): ")
    image_name_2 = input(
        "Введите название второго изображения (с расширением): ")
    im1 = cv2.imread(image_name_1)
    im2 = cv2.imread(image_name_2)
    psnr = cv2.PSNR(im1, im2)  # расчет PSNR функцией библиотеки OpenCV
    print("Значение PSNR: ", psnr, "dB")
Пример #24
0
def BGR_PSNR(img_origin, img_result):

    Bo, Go, Ro = cv2.split(img_origin)
    Br, Gr, Rr = cv2.split(img_result)
    conc_origin = np.concatenate((Bo, Go, Ro))
    conc_result = np.concatenate((Br, Gr, Rr))
    psnr = cv2.PSNR(conc_origin, conc_result)

    return psnr
def predict():
    srcnn_model = predict_model()
    srcnn_model.load_weights("SRCNN_scale3.h5")  #("3051crop_weight_200.h5")
    IMG_NAME = "/home/huixin/DLProject_SRCNN/SRCNN-keras/Test/Set5/butterfly_GT.bmp"  #IMAGE PATH
    INPUT_NAME = "butterfly_bicubic_scale3.jpg"
    OUTPUT_NAME = "butterfly_SRCNN_scale3.jpg"

    #Bicubic computation
    import cv2
    scale = 3
    img = cv2.imread(IMG_NAME, cv2.IMREAD_COLOR)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    shape = img.shape
    Y_img = cv2.resize(img[:, :, 0], (shape[1] // scale, shape[0] // scale),
                       cv2.INTER_CUBIC)  #INTEGER DIVISION
    Y_img = cv2.resize(Y_img, (shape[1], shape[0]), cv2.INTER_CUBIC)
    img[:, :, 0] = Y_img
    img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
    cv2.imwrite(INPUT_NAME, img)

    #SRCNN computation
    Y = np.zeros((1, img.shape[0], img.shape[1], 1), dtype=float)
    Y[0, :, :, 0] = Y_img.astype(float) / 255.
    pre = srcnn_model.predict(Y, batch_size=1) * 255.
    pre[pre[:] > 255] = 255
    pre[pre[:] < 0] = 0
    pre = pre.astype(np.uint8)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    img[6:-6, 6:-6, 0] = pre[0, :, :, 0]
    img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
    cv2.imwrite(OUTPUT_NAME, img)

    # psnr calculation:
    im1 = cv2.imread(IMG_NAME, cv2.IMREAD_COLOR)
    im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2YCrCb)[6:-6, 6:-6, 0]
    im2 = cv2.imread(INPUT_NAME, cv2.IMREAD_COLOR)
    im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2YCrCb)[6:-6, 6:-6, 0]
    im3 = cv2.imread(OUTPUT_NAME, cv2.IMREAD_COLOR)
    im3 = cv2.cvtColor(im3, cv2.COLOR_BGR2YCrCb)[6:-6, 6:-6, 0]

    print("bicubic:")
    print(cv2.PSNR(im1, im2))
    print("SRCNN:")
    print(cv2.PSNR(im1, im3))
Пример #26
0
def predict():
    srcnn_model = predict_model()
    srcnn_model.load_weights("3051crop_weight_200.h5")
    IMG_NAME = "./wxx.png"
    INPUT_NAME = "input2.jpg"
    OUTPUT_NAME = "pre2.png"

    import cv2
    img = cv2.imread(IMG_NAME, cv2.IMREAD_COLOR)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    shape = img.shape
    print(shape)
    # bug 将/改为//
    Y_img = cv2.resize(img[:, :, 0], (shape[1] // 2, shape[0] // 2),
                       cv2.INTER_CUBIC)
    Y_img = cv2.resize(Y_img, (shape[1], shape[0]), cv2.INTER_CUBIC)
    img[:, :, 0] = Y_img
    img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
    cv2.imwrite(INPUT_NAME, img)

    Y = numpy.zeros((1, img.shape[0], img.shape[1], 1), dtype=float)
    Y[0, :, :, 0] = Y_img.astype(float) / 255.
    pre = srcnn_model.predict(Y, batch_size=1) * 255.
    pre[pre[:] > 255] = 255
    pre[pre[:] < 0] = 0
    pre = pre.astype(numpy.uint8)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    img[6:-6, 6:-6, 0] = pre[0, :, :, 0]
    img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)
    cv2.imwrite(OUTPUT_NAME, img)

    # psnr calculation:
    im1 = cv2.imread(IMG_NAME, cv2.IMREAD_COLOR)
    im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2YCrCb)[6:-6, 6:-6, 0]
    im2 = cv2.imread(INPUT_NAME, cv2.IMREAD_COLOR)
    im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2YCrCb)[6:-6, 6:-6, 0]
    im3 = cv2.imread(OUTPUT_NAME, cv2.IMREAD_COLOR)
    im3 = cv2.cvtColor(im3, cv2.COLOR_BGR2YCrCb)[6:-6, 6:-6, 0]

    print("bicubic:")
    print(cv2.PSNR(im1, im2))
    print("SRCNN:")
    print(cv2.PSNR(im1, im3))
Пример #27
0
    def callback(slice_id):
        kspace = data[slice_id]
        img = kspace_to_image(kspace)

        kspace[var_sampling_mask] = 0
        masked = kspace_to_image(kspace)

        rec = pred[slice_id]

        # Add a header
        border_size = 20
        render = cv.hconcat((img, masked, rec))
        render = cv.copyMakeBorder(render, border_size, 0, 0, 0, cv.BORDER_CONSTANT, value=255)
        cv.putText(render, 'Original', (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
        cv.putText(render, 'Sampled (PSNR %.1f)' % cv.PSNR(img, masked), (width, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)
        cv.putText(render, 'Reconstructed (PSNR %.1f)' % cv.PSNR(img, rec), (width*2, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, color=0)

        cv.imshow(WIN_NAME, render)
        cv.waitKey(1)
Пример #28
0
 def getPSNR(self):
     _x1 = min(int(self.x1.get()), int(self.x2.get()))
     _y1 = min(int(self.y1.get()), int(self.y2.get()))
     _x2 = max(int(self.x1.get()), int(self.x2.get()))
     _y2 = max(int(self.y1.get()), int(self.y2.get()))
     if(abs(_x2-_x1)!=0 and abs(_y2-_y1)!=0):
         tempImg = self.img[_y1:_y2, _x1:_x2]
         resized = cv2.resize(tempImg, (self.reference.shape[1],self.reference.shape[0]))
         self.psnrVal.set(cv2.PSNR(self.reference, resized))
         return self.psnrVal.get()
def get_psnr_mse(image_1, image_2):

    img1 = cv2.imread(image_1)
    img2 = cv2.imread(image_2)
    img2_resize = cv2.resize(img2, (360, 288))

    psnr = cv2.PSNR(img1, img2_resize)
    mse = np.mean((img1 - img2_resize)**2)

    return psnr, mse
Пример #30
0
def calculate_similarity(img_1, img_2):
    '''
  Enter image 1 and image 2 to calcualte mean square error and structural similarity.
  PSNR and SSIM are not used as they are more semantic metrics. Only MSE is used from this function.
  '''
    mse_1 = mean_squared_error(img_1, img_2)
    ssim_1 = ssim(img_1, img_2)  # data_range=img_2.max() - img_2.min())
    psnr = cv2.PSNR(final, bin_mask_1)
    #print("PSNR:", round(psnr,4))
    print("MSE:", round(mse_1, 5))