示例#1
0
def main():
    parser = argparse.ArgumentParser(prog="pyssim",
                                     description="Compares two images using the SSIM metric")
    parser.add_argument('base_image', metavar='image1.png', type=argparse.FileType('r'))
    parser.add_argument('comparison_image', metavar='image2.png', type=argparse.FileType('r'))
    args = parser.parse_args()
    
    im1 = Image.open(args.base_image)
    im2 = Image.open(args.comparison_image)
    
    print ssim.compute_ssim(im1, im2)
    def run_and_check_ssim_and_size(
        self,
        url,
        mediawiki_reference_thumbnail,
        perfect_reference_thumbnail,
        expected_width,
        expected_height,
        expected_ssim,
        size_tolerance,
    ):
        """Request URL and check ssim and size.

        Arguments:
        url -- thumbnail URL
        mediawiki_reference_thumbnail -- reference thumbnail file
        expected_width -- expected thumbnail width
        expected_height -- expected thumbnail height
        expected_ssim -- minimum SSIM score
        size_tolerance -- maximum file size ratio between reference and result
        perfect_reference_thumbnail -- perfect lossless version of the target thumbnail, for visual comparison
        """
        result = self.fetch(url)

        result.seek(0)

        generated = Image.open(result)

        expected_path = os.path.join(
            os.path.dirname(__file__),
            'thumbnails',
            mediawiki_reference_thumbnail
        )

        visual_expected_path = os.path.join(
            os.path.dirname(__file__),
            'thumbnails',
            perfect_reference_thumbnail
        )
        visual_expected = Image.open(visual_expected_path).convert(generated.mode)

        assert generated.size[0] == expected_width, \
            'Width differs: %d (should be == %d)\n' % (generated.size[0], expected_width)

        assert generated.size[1] == expected_height, \
            'Height differs: %d (should be == %d)\n' % (generated.size[1], expected_height)

        ssim = compute_ssim(generated, visual_expected)

        assert ssim >= expected_ssim, 'Images too dissimilar: %f (should be >= %f)\n' % (ssim, expected_ssim)

        expected_filesize = float(os.path.getsize(expected_path))
        generated_filesize = float(len(result.getvalue()))

        ratio = generated_filesize / expected_filesize
        assert ratio <= size_tolerance, \
            'Generated file bigger than size tolerance: %f (should be <= %f)' % (ratio, size_tolerance)

        return result
示例#3
0
    def qualify(self, image1_path, image2_path):
        print("Received %s  %s" % (image1_path, image2_path))

        image1 = Image.open(image1_path)
        image2 = Image.open(image2_path)
        score = compute_ssim(image1, image2, gaussian_kernel_sigma=1.5, gaussian_kernel_width=11)

        print "qualified", score
        return score
示例#4
0
文件: base.py 项目: GDxU/thumbor
def get_ssim(actual, expected):
    if actual.size[0] != expected.size[0] or actual.size[1] != expected.size[1]:
        raise RuntimeError(
            "Can't calculate SSIM for images of different sizes (one is %dx%d, the other %dx%d)." % (
                actual.size[0], actual.size[1],
                expected.size[0], expected.size[1],
            )
        )

    return compute_ssim(actual, expected)
示例#5
0
 def is_noisy(self):
     '''compute ssim on denoised image (using MedianFilter) and if greater than
     threshold the return false. 1.0 == no difference'''
     #TODO: Get rid of PIL dependency
     original = Image.open(self.filename)
     denoised = original.filter(ImageFilter.MedianFilter(3))
     similarity = compute_ssim(original, denoised)
     if(similarity > 0.75):
         return False
     else:
         return True
def threshold_check(new_pic, old_pic=None, settings=settings):
    if old_pic:
        logging.debug("Checking threshold of {} vs {}".format(
                      new_pic, old_pic))
    else:
        logging.debug("Checking threshold of {}".format(new_pic))

    if old_pic is None:
        old_pic = settings.baseline_image
        if(compute_ssim(settings.stills_folder + new_pic,
                        old_pic) > settings.threshold_percentage):
            return True
        else:
            return False

    if(compute_ssim(settings.stills_folder + new_pic,
                    settings.stills_folder + old_pic) >
       settings.threshold_percentage):
        return True

    # if we get here, nothing has gotten past the threshold percentage.
    return False
示例#7
0
文件: base.py 项目: abaldwin1/thumbor
def get_ssim(actual, expected):
    im = Image.fromarray(actual)
    im2 = Image.fromarray(expected)

    if im.size[0] != im2.size[0] or im.size[1] != im2.size[1]:
        raise RuntimeError(
            "Can't calculate SSIM for images of different sizes (one is %dx%d, the other %dx%d)." % (
                im.size[0], im.size[1],
                im2.size[0], im2.size[1],
            )
        )

    return compute_ssim(im, im2)
示例#8
0
def main(name):
    img = Image.open(name)
    result_img = Image.new("RGB", img.size)
    draw = ImageDraw.Draw(result_img)
    draw.rectangle([(0, 0), result_img.size], fill=(255, 255, 255))
    max_width, max_height = result_img.size
    for i in range(6000):
        best_img = result_img.copy()
        best_score = ssim.compute_ssim(img, result_img)
        for l in range(40):
            temp_img = result_img.copy()
            draw = ImageDraw.Draw(temp_img)
            line = generate_line(max_width, max_height)
            draw.line(line, width=1, fill=(0, 0, 0))
            score = ssim.compute_ssim(img, temp_img)
            if score > best_score:
                print i, score
                score = best_score
                best_img = temp_img.copy()
        result_img = best_img
        if (i % 100) == 0:
            output_name = str(i).zfill(6) + '.png'
            result_img.save(output_name)
示例#9
0
def calculate_perceptual_speed_index(progress, directory):
    from ssim import compute_ssim
    x = len(progress)
    dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), directory)
    first_paint_frame = os.path.join(dir, "ms_{0:06d}.png".format(progress[1]["time"]))
    target_frame = os.path.join(dir, "ms_{0:06d}.png".format(progress[x - 1]["time"]))
    ssim_1 = compute_ssim(first_paint_frame, target_frame)
    per_si = float(progress[1]['time'] )
    last_ms = progress[1]['time']
    # Full Path of the Target Frame
    logging.debug("Target image for perSI is %s" % target_frame)
    ssim = ssim_1
    for p in progress[1:]:
        elapsed = p['time'] - last_ms
        #print '*******elapsed %f'%elapsed
        # Full Path of the Current Frame
        current_frame = os.path.join(dir, "ms_{0:06d}.png".format(p["time"]))
        logging.debug("Current Image is %s" % current_frame)
        # Takes full path of PNG frames to compute SSIM value
        per_si += elapsed * (1.0 - ssim)
        ssim = compute_ssim(current_frame, target_frame)
        last_ms = p['time']
    return int(per_si)
示例#10
0
 def calculate_perceptual_speed_index(self, progress):
     from ssim import compute_ssim
     x = len(progress)
     first_paint_frame = progress[1]['image_fp']
     target_frame = progress[x - 1]['image_fp']
     ssim_1 = compute_ssim(first_paint_frame, target_frame)
     per_si = float(progress[1]['time'])
     last_ms = progress[1]['time']
     # Full Path of the Target Frame
     logger.info("Target image for perSI is %s" % target_frame)
     ssim = ssim_1
     for p in progress[1:]:
         elapsed = p['time'] - last_ms
         # print '*******elapsed %f'%elapsed
         # Full Path of the Current Frame
         current_frame = p['image_fp']
         logger.info("Current Image is %s" % current_frame)
         # Takes full path of PNG frames to compute SSIM value
         per_si += elapsed * (1.0 - ssim)
         ssim = compute_ssim(current_frame, target_frame)
         gc.collect()
         last_ms = p['time']
     return int(per_si)
def capture_baseline(settings=settings):
    '''Used to create the first image, the one that determines when to begin
    actually creating the timelapse.'''

    baseline_images = ['baseline1.jpg', 'baseline2.jpg', 'baseline3.jpg']
    # we set this here so we can remove the baseline on keyboard interrupt
    # in main()
    settings.baseline_images = baseline_images

    for picture in baseline_images:
        sleep(1)
        logging.debug("Capturing baseline image {}".format(picture))
        take_picture(picture)

    x = settings.stills_folder + baseline_images[0]
    y = settings.stills_folder + baseline_images[1]
    z = settings.stills_folder + baseline_images[2]

    if(compute_ssim(x, y) > settings.threshold_percentage):
        logging.debug("First baseline check succeeded!")
        if compute_ssim(y, z) > settings.threshold_percentage:
            logging.debug("Second baseline check succeeded!")
            if compute_ssim(z, x) > settings.threshold_percentage:
                logging.info("Successfully created baseline!")
                settings.baseline_image = settings.stills_folder +\
                    baseline_images[0]
                return True
            else:
                logging.debug("Third baseline check failed!")
                return None
        else:
            logging.debug("Second baseline check failed!")
            return None
    else:
        logging.debug("First baseline check failed!")
        return None
示例#12
0
def genPatchesSSIM():
	size=512
	ssimMap=[]
	genAllPatches("./patches/",3)
	for i in range(size):
		imA=Image.open("./patches/"+str(i)+".png")
		row=[]
		for j in range(size):
			imB=Image.open("./patches/"+str(j)+".png")
			simVal=ssim.compute_ssim(imA,imB)
			print("(%s %s): %s" %(i,j,simVal))
			row.append(simVal)
		ssimMap.append(row)

	f=open("./PatchSSIM.dat","wb")
	pickle.dump(ssimMap,f)
	f.close()
	pass
示例#13
0
def calculate_perceptual_speed_index(progress):
    from ssim import compute_ssim
    x = len(progress)
    target_frame = progress[x - 1]['image_fp']
    per_si = 0.0
    last_ms = progress[0]['time']
    # Full Path of the Target Frame
    logger.info("Target image for perSI is %s" % target_frame)
    for p in progress:
        elapsed = p['time'] - last_ms
        # print '*******elapsed %f'%elapsed
        # Full Path of the Current Frame
        current_frame = p['image_fp']
        # Takes full path of PNG frames to compute SSIM value
        ssim = compute_ssim(current_frame, target_frame)
        per_si += elapsed * (1.0 - ssim)
        gc.collect()
        last_ms = p['time']
    return int(per_si)
示例#14
0
def relative_ssim(progress, directory):
    from ssim import compute_ssim
    ssims = []
    last_ms = progress[0]['time']
    x = len(progress)
    # Full Path of the Target Frame
    dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), directory)
    for p in progress:
        elapsed = p['time'] - last_ms
        # Full Path of the Current Frame
        current_frame = os.path.join(dir, "ms_{0:06d}.png".format(p["time"]))
        target_frame = os.path.join(dir, "ms_{0:06d}.png".format(progress[x - 1]["time"]))
        logging.debug("Target image for perSI is %s" % target_frame)
        logging.debug("Current Image is %s" % current_frame)
        # Takes full path of PNG frames to compute SSIM value
        ssim = compute_ssim(current_frame, target_frame)
        ssims.append(ssim)
        last_ms = p['time']
    return ssims
示例#15
0
def calculate_perceptual_speed_index(progress, directory):
    from ssim import compute_ssim
    per_si = 0
    last_ms = progress[0]['time']
    x = len(progress)
    # Full Path of the Target Frame
    dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), directory)
    target_frame = os.path.join(dir, "ms_{0:06d}.png".format(progress[x - 1]["time"]))
    logging.debug("Target image for perSI is %s" % target_frame)
    ssim = 0
    for p in progress:
        elapsed = p['time'] - last_ms
        # Full Path of the Current Frame
        current_frame = os.path.join(dir, "ms_{0:06d}.png".format(p["time"]))
        logging.debug("Current Image is %s" % current_frame)
        # Takes full path of PNG frames to compute SSIM value
        per_si += elapsed * (1.0 - ssim)
        last_ms = p['time']
        ssim = compute_ssim(current_frame, target_frame)
    return int(per_si)
示例#16
0
            for j in range(0, height):
                new_image[j][y + i] = random.randint(0, 255)

    return new_image


if __name__ == '__main__':
    # 下载数据集
    fashion_mnist = keras.datasets.fashion_mnist
    (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()

    # 输出结果
    start = time.time()
    images = aiTest(test_images, (100, 28, 28, 1))
    print("攻击时间", time.time() - start) #修改传入的所有图片的花费时间

    count = 0
    ssim_sum = 0.0
    start = time.time()
    for i in range(len(images)):
        new_image = attack(images[i])
        if not get_true_label(new_image) == get_true_label(images[i]):
            count += 1
            ssim_sum += ssim.compute_ssim(new_image, images[i])
            print(i, ssim.compute_ssim(new_image, images[i]))
    end = time.time()
    print("输出时间", end - start)
    print("成功个数:", count, ",  成功率:", count/100, ", 平均相似度:", ssim_sum/count)

    # util.plot_images(images, test_labels, 100)
示例#17
0
def main():
    parser = argparse.ArgumentParser(prog="pyuvssim",
                                     description="Compares two YUV I420/IYUV raw video files using the SSIM metric")
    parser.add_argument('base_video', metavar='video1.yuv')
    parser.add_argument('comparison_video', metavar='video2.yuv')
    parser.add_argument('-W', '--width',
                        type=int,
                        action='store',
                        default=1920,
                        nargs='?',
                        help='video width in pixels')
    parser.add_argument('-H', '--height',
                        type=int,
                        action='store',
                        default=1080,
                        nargs='?',
                        help='video width in pixels')
    args = parser.parse_args()

    vid1 = args.base_video
    vid2 = args.comparison_video

    width = args.width
    height = args.height
    frame_size = width * height
    frame_weight = (frame_size * 3) / 2
    video_size = min(os.stat(vid1)[6], os.stat(vid2)[6])
    nb_frames = video_size / frame_weight

    print("Videos information:")
    print("width: {} px".format(width))
    print("height: {} px".format(height))
    print("frame size: {} px^2".format(frame_size))
    print("frame weight: {} ({})".format(utils.bytes2human(frame_weight), frame_weight))
    print("video size: {} ({})".format(utils.bytes2human(video_size), video_size))
    print("number of frames: {}\n".format(nb_frames))

    f1 = open(vid1, 'rb')
    f2 = open(vid2, 'rb')

    print("Pic #, SSIM value")
    for n in range(nb_frames):
        frame_offset = (n * frame_weight)
        im1 = Image.new("RGB", (width, height))
        im2 = Image.new("RGB", (width, height))
        pix1 = im1.load()
        pix2 = im2.load()
        # I420/IYUV: NxN Y plane, then (N/2)x(N/2) U and V planes
        for y in range(height):
            for x in range(width):
                pos_y = frame_offset + (y * width + x)
                pos_u = frame_offset + (y/2 * width/2 + x/2 + frame_size)
                pos_v = frame_offset + (y/2 * width/2 + x/2 + frame_size + frame_size/4)

                f1.seek(pos_y, 0)
                y1 = ord(f1.read(1))
                f1.seek(pos_u, 0)
                u1 = ord(f1.read(1))
                f1.seek(pos_v, 0)
                v1 = ord(f1.read(1))

                f2.seek(pos_y, 0)
                y2 = ord(f2.read(1))
                f2.seek(pos_u, 0)
                u2 = ord(f2.read(1))
                f2.seek(pos_v, 0)
                v2 = ord(f2.read(1))

                pix1[x, y] = utils.yuv2rgb(y1, u1, v1)
                pix2[x, y] = utils.yuv2rgb(y2, u2, v2)
        print("{}, {}".format(n, ssim.compute_ssim(im1, im2)))

    f1.close()
    f2.close()
示例#18
0
    return imgMat


'''
Main program
'''
if __name__ == '__main__':

    #First image
    imgRefMat = build_mat_from_grayscale_image("einstein.gif")
    (w, h) = (imgRefMat.shape[0], imgRefMat.shape[1])

    #First subplot
    figure()
    subplot(121)
    plt.imshow(imgRefMat, cmap=cm.gray, hold=True)

    #Second image
    imgOutMat = build_mat_from_grayscale_image("einstein-noise-it1.png")

    #Second subplot
    subplot(122)
    plt.imshow(imgOutMat, cmap=cm.gray, hold=True)
    plt.show()

    #Compute SSIM
    cSSIM = ssim.compute_ssim(imgRefMat, imgOutMat)

    print "SSIM=", cSSIM
示例#19
0
    gen_samples = np.concatenate((y_test, x2_test, samples, diff_imgs), axis=0)
    save_images(gen_samples, [4, batch_size],
                saves_dir + 'test_%s.jpg' % (iters))

    prev_mse.append(np.mean((x2_test - y_test)**2))
    mse.append(np.mean((samples - y_test)**2))

    target = ((y_test[0]) * 255).astype('uint8')
    source = ((x2_test[0]) * 255).astype('uint8')
    pred = ((samples[0]) * 255).astype('uint8')
    prev_psnr.append(measure.compare_psnr(source, target))
    cpsnr.append(measure.compare_psnr(pred, target))

    prev_ssim.append(
        ssim.compute_ssim(
            Image.fromarray(cv2.cvtColor(target, cv2.COLOR_RGB2BGR)),
            Image.fromarray(cv2.cvtColor(source, cv2.COLOR_RGB2BGR))))
    cssim.append(
        ssim.compute_ssim(
            Image.fromarray(cv2.cvtColor(target, cv2.COLOR_RGB2BGR)),
            Image.fromarray(cv2.cvtColor(pred, cv2.COLOR_RGB2BGR))))

prev_psnr = np.mean(np.array(prev_psnr))
prev_ssim = np.mean(np.array(prev_ssim))
prev_mse = np.mean(np.array(prev_mse))

cpsnr = np.mean(np.array(cpsnr))
cssim = np.mean(np.array(cssim))
mse = np.mean(np.array(mse))

str1 = ('prev MSE: %f\n' % prev_mse)
示例#20
0
def _ssim(image1, image2):
    i1 = Image.open(image1)
    i2 = Image.open(image2)

    return ssim.compute_ssim(i1, i2)
示例#21
0
    def ssim_compare_images(self, found_image_file, guarded_image_file):
        # Opens a image in RGB mode
        found_image = Image.open(found_image_file)
        guarded_image = Image.open(guarded_image_file)

        # Size of the image in pixels
        width_found_image, height_found_image = found_image.size
        width_guarded_image, height_guarded_image = guarded_image.size

        cropped = False
        cropped_width = False
        cropped_height = False

        if width_found_image < width_guarded_image and "crop" in str(
                guarded_image_file):
            width_cropped = width_found_image
            cropped = True
            cropped_width = True
        else:
            width_cropped = width_guarded_image

        if height_found_image < height_guarded_image and "crop" in str(
                guarded_image_file):
            height_cropped = height_found_image
            cropped = True
            cropped_height = True
        else:
            height_cropped = height_guarded_image

        if cropped:
            # Setting the points for cropped image
            if cropped_width:
                left = int((width_guarded_image - width_cropped) / 2)
                right = int(width_guarded_image -
                            ((width_guarded_image - width_cropped) / 2))
            else:
                left = 0
                right = width_guarded_image
            if cropped_height:
                top = int(height_guarded_image -
                          ((height_guarded_image - height_cropped) / 2))
                bottom = int((height_guarded_image - height_cropped) / 2)
            else:
                bottom = 0
                top = height_guarded_image
            # Cropped image of above dimension
            newsize = (width_cropped, height_cropped)
            top = top - 1
            right = right - 1
            guarded_image_non_cropped = guarded_image.resize(newsize)
            guarded_image = guarded_image.crop((left, bottom, right, top))
            guarded_image = guarded_image.resize(newsize)
        try:
            ssim_value = ssim.compute_ssim(found_image, guarded_image)
            if ssim_value > IMAGE_COMPARISON_SSIM_MATCH_THRESHOLD:
                return True
            else:
                if cropped:
                    ssim_value = ssim.compute_ssim(found_image,
                                                   guarded_image_non_cropped)
                    if ssim_value > IMAGE_COMPARISON_SSIM_MATCH_THRESHOLD:
                        return True
                return False
        except Exception as ex:
            print("Error comparing images: " + str(ex))
            return False
示例#22
0
            for i in range(3):
                predicted_rgb_img[:, :, i] = predicted_img[i, :, :]
                true_rgb_img[:, :, i] = true_img[i, :, :]
                diff_rgb_img[:, :, i] = diff_img[i, :, :]

            predicted_pil_img = Image.fromarray(predicted_rgb_img)
            if save_images:
                predicted_pil_img.save(opt.outf + opt.name + '-' + str(bdx) +
                                       '-' + str(idx) + '.png')
            true_pil_img = Image.fromarray(true_rgb_img)
            if save_images:
                true_pil_img.save(opt.outf + opt.name + '-' + str(bdx) + '-' +
                                  str(idx) + '-gt.png')

            # ssim
            ssim_val = compute_ssim(true_pil_img, predicted_pil_img)
            net_ssim.append(ssim_val)

            # emd
            emd_val = compute_emd(predicted_rgb_img, true_rgb_img,
                                  emd_cost_mat)
            net_emd.append(emd_val)

            if save_images:
                Image.fromarray(diff_rgb_img).save(opt.outf + opt.name + '-' +
                                                   str(bdx) + '-' + str(idx) +
                                                   '-diff.png')

    #
    all_rmse_errs.append(net_rmse)
    all_rel_errs.append(net_rel_error)
示例#23
0
'''
Get 2D matrix from an image file, possibly displayed with matplotlib
@param path: Image file path on HD
@return A 2D matrix
'''
def build_mat_from_grayscale_image(img):
    img=ImageOps.grayscale(img)
    imgData=img.getdata()
    imgTab=numpy.array(imgData)
    w,h=img.size
    imgMat=numpy.reshape(imgTab,(h,w))

    return imgMat

with open("config.yml", 'r') as stream:
    try:
        config = yaml.load(stream)
    except yaml.YAMLError as exc:
        print(exc)
print(config)
response = requests.get(config['image_1'])
img1 = Image.open(StringIO(response.content))
response = requests.get(config['image_2'])
img2 = Image.open(StringIO(response.content))

imgMat1 = build_mat_from_grayscale_image(img1)
imgMat2 = build_mat_from_grayscale_image(img2)

SSIMIndex = ssim.compute_ssim(imgMat1, imgMat2)
print "SSIM = ", SSIMIndex
示例#24
0
def main():
    parser = argparse.ArgumentParser(
        prog="pyuvssim",
        description=
        "Compares two YUV I420/IYUV raw video files using the SSIM metric")
    parser.add_argument('base_video', metavar='video1.yuv')
    parser.add_argument('comparison_video', metavar='video2.yuv')
    parser.add_argument('-W',
                        '--width',
                        type=int,
                        action='store',
                        default=1920,
                        nargs='?',
                        help='video width in pixels')
    parser.add_argument('-H',
                        '--height',
                        type=int,
                        action='store',
                        default=1080,
                        nargs='?',
                        help='video width in pixels')
    args = parser.parse_args()

    vid1 = args.base_video
    vid2 = args.comparison_video

    width = args.width
    height = args.height
    frame_size = width * height
    frame_weight = (frame_size * 3) / 2
    video_size = min(os.stat(vid1)[6], os.stat(vid2)[6])
    nb_frames = video_size / frame_weight

    print("Videos information:")
    print("width: {} px".format(width))
    print("height: {} px".format(height))
    print("frame size: {} px^2".format(frame_size))
    print("frame weight: {} ({})".format(utils.bytes2human(frame_weight),
                                         frame_weight))
    print("video size: {} ({})".format(utils.bytes2human(video_size),
                                       video_size))
    print("number of frames: {}\n".format(nb_frames))

    f1 = open(vid1, 'rb')
    f2 = open(vid2, 'rb')

    print("Pic #, SSIM value")
    for n in range(nb_frames):
        frame_offset = (n * frame_weight)
        im1 = Image.new("RGB", (width, height))
        im2 = Image.new("RGB", (width, height))
        pix1 = im1.load()
        pix2 = im2.load()
        # I420/IYUV: NxN Y plane, then (N/2)x(N/2) U and V planes
        for y in range(height):
            for x in range(width):
                pos_y = frame_offset + (y * width + x)
                pos_u = frame_offset + (y / 2 * width / 2 + x / 2 + frame_size)
                pos_v = frame_offset + (y / 2 * width / 2 + x / 2 +
                                        frame_size + frame_size / 4)

                f1.seek(pos_y, 0)
                y1 = ord(f1.read(1))
                f1.seek(pos_u, 0)
                u1 = ord(f1.read(1))
                f1.seek(pos_v, 0)
                v1 = ord(f1.read(1))

                f2.seek(pos_y, 0)
                y2 = ord(f2.read(1))
                f2.seek(pos_u, 0)
                u2 = ord(f2.read(1))
                f2.seek(pos_v, 0)
                v2 = ord(f2.read(1))

                pix1[x, y] = utils.yuv2rgb(y1, u1, v1)
                pix2[x, y] = utils.yuv2rgb(y2, u2, v2)
        print("{}, {}".format(n, ssim.compute_ssim(im1, im2)))

    f1.close()
    f2.close()
示例#25
0
 def similar_image(self, other):
     self.similarity = ssim.compute_ssim(Image.open(self.image), Image.open(other.image))
     click.echo('{:>15}: {}'.format('similarity', self.similarity))
     return self.similarity > 0.55
unsorted_timestamps = [
    long(timestamp) for timestamp in timestamp_to_bytearray
]
sorted_timestamps = sorted(unsorted_timestamps)

last_unique_frame_index = 0

for i in xrange(len(sorted_timestamps)):
    timestamp = sorted_timestamps[i]
    prevTimestamp = sorted_timestamps[i - 1]
    frame = timestamp_to_bytearray[str(timestamp)]
    prevFrame = timestamp_to_bytearray[str(prevTimestamp)]
    frame_duration[i] = max(timestamp - prevTimestamp, 0)

    cSSIM = ssim.compute_ssim(frame, prevFrame)
    if cSSIM == 1.0:
        # seen_frames[frame] = seen_frames[frame] + 1
        frame_duration[last_unique_frame_index] = sorted_timestamps[
            i] - sorted_timestamps[last_unique_frame_index]
    else:
        # seen_frames[frame] = 1
        last_unique_frame_index = i
        # frame_duration[i] = max(sorted_timestamps[i] - sorted_timestamps[i-1], 0)

all_durations_temp = [value for poop, value in frame_duration.iteritems()]
all_durations = all_durations_temp[2:]  # ignore first two
if len(all_durations) > 1:
    average_duration = sum(all_durations) / len(all_durations)
    max_duration = max(all_durations)
    min_duration = min(all_durations)
示例#27
0
def main(prefix, image_size, K, T, gpu, p):
    data_path = "../data/KTH/"
    f = open(data_path + "test_data_list.txt", "r")
    testfiles = f.readlines()

    c_dim = 1

    checkpoint_dir = "../models/" + prefix + "/"
    test_model = "MCNET.model-" + str(p)

    with tf.device("/gpu:%d" % gpu[0]):
        model = MCNET(image_size=[image_size, image_size],
                      batch_size=1,
                      K=K,
                      T=T,
                      c_dim=c_dim,
                      checkpoint_dir=checkpoint_dir,
                      is_train=False)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False,
                            gpu_options=gpu_options)
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:

        tf.global_variables_initializer().run()

        loaded, model_name = model.load(sess, checkpoint_dir, test_model)

        if loaded:
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed... exitting")
            return

        quant_dir = "../results/quantitative/KTH/" + prefix + "/"
        save_path = quant_dir + "results_model=" + model_name + ".npz"
        if not exists(quant_dir):
            makedirs(quant_dir)

        vid_names = []
        psnr_err = np.zeros((0, T))
        ssim_err = np.zeros((0, T))
        for i in range(len(testfiles)):
            tokens = testfiles[i].split()
            vid_path = data_path + tokens[0] + "_uncomp.avi"
            while True:
                try:
                    vid = imageio.get_reader(vid_path, "ffmpeg")
                    break
                except Exception:
                    print("imageio failed loading frames, retrying")

            action = vid_path.split("_")[1]
            if action in ["running", "jogging"]:
                n_skip = 3
            else:
                n_skip = T

            for j in range(int(tokens[1]), int(tokens[2]) - K - T - 1, n_skip):
                print("Video " + str(i) + "/" + str(len(testfiles)) +
                      ". Index " + str(j) + "/" +
                      str(vid.get_length() - T - 1))

                folder_pref = vid_path.split("/")[-1].split(".")[0]
                folder_name = folder_pref + "." + str(j) + "-" + str(j + T)
                vid_names.append(folder_name)
                savedir = "../results/images/KTH/" + prefix + "/" + folder_name

                seq_batch = np.zeros((1, image_size, image_size, K + T, c_dim),
                                     dtype="float32")
                diff_batch = np.zeros((1, image_size, image_size, K - 1, 1),
                                      dtype="float32")
                for t in range(K + T):
                    # imageio fails randomly sometimes
                    while True:
                        try:
                            img = cv2.resize(vid.get_data(j + t),
                                             (image_size, image_size))
                            break
                        except Exception:
                            print("imageio failed loading frames, retrying")

                    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
                    seq_batch[0, :, :, t] = transform(img[:, :, None])

                for t in range(1, K):
                    prev = inverse_transform(seq_batch[0, :, :, t - 1])
                    next = inverse_transform(seq_batch[0, :, :, t])
                    diff = next.astype("float32") - prev.astype("float32")
                    diff_batch[0, :, :, t - 1] = diff

                true_data = seq_batch[:, :, :, K:, :].copy()
                pred_data = np.zeros(true_data.shape, dtype="float32")
                xt = seq_batch[:, :, :, K - 1]
                pred_data[0] = sess.run(model.G,
                                        feed_dict={
                                            model.diff_in: diff_batch,
                                            model.xt: xt
                                        })

                if not os.path.exists(savedir):
                    os.makedirs(savedir)

                cpsnr = np.zeros((K + T, ))
                cssim = np.zeros((K + T, ))
                pred_data = np.concatenate((seq_batch[:, :, :, :K], pred_data),
                                           axis=3)
                true_data = np.concatenate((seq_batch[:, :, :, :K], true_data),
                                           axis=3)
                for t in range(K + T):
                    pred = (inverse_transform(pred_data[0, :, :, t]) *
                            255).astype("uint8")
                    target = (inverse_transform(true_data[0, :, :, t]) *
                              255).astype("uint8")

                    cpsnr[t] = measure.compare_psnr(pred, target)
                    cssim[t] = ssim.compute_ssim(
                        Image.fromarray(
                            cv2.cvtColor(target, cv2.COLOR_GRAY2BGR)),
                        Image.fromarray(cv2.cvtColor(pred,
                                                     cv2.COLOR_GRAY2BGR)))

                    # ================================== Produce Samples ======================================
                    pred = draw_frame(pred, t < K)
                    target = draw_frame(target, t < K)

                    cv2.imwrite(
                        savedir + "/pred_" + "{0:04d}".format(t) + ".png",
                        pred)
                    cv2.imwrite(
                        savedir + "/gt_" + "{0:04d}".format(t) + ".png",
                        target)

                cmd1 = "rm " + savedir + "/pred.gif"
                cmd2 = ("ffmpeg -f image2 -framerate 7 -i " + savedir +
                        "/pred_%04d.png " + savedir + "/pred.gif")
                cmd3 = "rm " + savedir + "/pred*.png"

                system(cmd1)
                system(cmd2)
                system(cmd3)

                cmd1 = "rm " + savedir + "/gt.gif"
                cmd2 = ("ffmpeg -f image2 -framerate 7 -i " + savedir +
                        "/gt_%04d.png " + savedir + "/gt.gif")
                cmd3 = "rm " + savedir + "/gt*.png"

                system(cmd1)
                system(cmd2)
                system(cmd3)

                psnr_err = np.concatenate((psnr_err, cpsnr[None, K:]), axis=0)
                ssim_err = np.concatenate((ssim_err, cssim[None, K:]), axis=0)

        np.savez(save_path, psnr=psnr_err, ssim=ssim_err)
        print("Results saved to " + save_path)
    print("Done.")
示例#28
0
    def run_and_check_ssim_and_size(
        self,
        url,
        mediawiki_reference_thumbnail,
        perfect_reference_thumbnail,
        expected_width,
        expected_height,
        expected_ssim,
        size_tolerance,
    ):
        """Request URL and check ssim and size.

        Arguments:
        url -- thumbnail URL
        mediawiki_reference_thumbnail -- reference thumbnail file
        expected_width -- expected thumbnail width
        expected_height -- expected thumbnail height
        expected_ssim -- minimum SSIM score
        size_tolerance -- maximum file size ratio between reference and result
        perfect_reference_thumbnail -- perfect lossless version of the target thumbnail, for visual comparison
        """
        try:
            result = self.fetch(url)
        except Exception as e:
            assert False, 'Exception occured: %r' % e

        assert result is not None, 'No result'
        assert result.code == 200, 'Response code: %s' % result.code

        result.buffer.seek(0)

        generated = Image.open(result.buffer)

        expected_path = os.path.join(
            os.path.dirname(__file__),
            'thumbnails',
            mediawiki_reference_thumbnail
        )

        visual_expected_path = os.path.join(
            os.path.dirname(__file__),
            'thumbnails',
            perfect_reference_thumbnail
        )
        visual_expected = Image.open(visual_expected_path).convert(generated.mode)

        assert generated.size[0] == expected_width, \
            'Width differs: %d (should be == %d)\n' % (generated.size[0], expected_width)

        assert generated.size[1] == expected_height, \
            'Height differs: %d (should be == %d)\n' % (generated.size[1], expected_height)

        ssim = compute_ssim(generated, visual_expected)

        try:
            assert ssim >= expected_ssim, 'Images too dissimilar: %f (should be >= %f)\n' % (ssim, expected_ssim)
        except AssertionError as e:
            output_file = NamedTemporaryFile(delete=False)
            output_file.write(result.buffer.getvalue())
            output_file.close()
            logger.error('Dumped generated test image for debugging purposes: %s' % output_file.name)
            raise e

        expected_filesize = float(os.path.getsize(expected_path))
        generated_filesize = float(len(result.buffer.getvalue()))

        ratio = generated_filesize / expected_filesize
        assert ratio <= size_tolerance, \
            'Generated file bigger than size tolerance: %f (should be <= %f)' % (ratio, size_tolerance)

        return result.buffer
示例#29
0
def get_SSIM(img_ori_path, img_de_path,count_multi):
    count_multi[0] += 1
    print "Processing image " + str(count_multi[0]) + "..."
    ob = ssim.compute_ssim(img_ori_path,img_de_path)
    return float(ob)
示例#30
0

list = list(os.listdir(data_test_path))

flames_num = 10


for dir in list:
    this_dic = data_test_path+'/'+dir
    file_res = open(this_dic+'/'+'error.txt','w')

    for i in range(1,flames_num):
        input = Image.open(this_dic+'/'+'input_t_%d.png'%(i))
        pre = Image.open(this_dic + '/' + 'pred_t_%d.png' %(i))

        file_res.write('input%d and pred%d  ssim  is %.4f\n'%(i,i,ssim.compute_ssim(input,pre)))

        file_res.write('input%d and pred%d   mse  is %.4f\n' % (i, i, mse(input, pre)))

    final_flame = Image.open(this_dic + '/' + 'true_next_flame.png')
    final_flame_pred = Image.open(this_dic + '/' + 'pred_t_%d.png' % (flames_num))
    file_res.write('\n')
    file_res.write('final true and pred  ssim  is %.4f\n' % ( ssim.compute_ssim(final_flame, final_flame_pred)))

    file_res.write('final true and pred   mse  is %.4f\n' % ( mse(final_flame, final_flame_pred)))

    file_res.write('\n')
    file_res.write('final true and last_flame  ssim  is %.4f\n' % (ssim.compute_ssim(final_flame, input)))

    file_res.write('final true and last_flame   mse  is %.4f\n' % (mse(final_flame, input)))
示例#31
0
    enc.Outcomes()
    
    print '\n****************************'
    print '* JPEG Decoder Initialized *'
    print '****************************'
    
    huffile = enc.dirOut + enc.filepath.split('/')[-1:][0].split('.')[0] + '.huff'
    dec = jpg.Decoder(huffile)
    imrec = dec._run_()
    
    luma1 = cv2.cvtColor(img, cv2.COLOR_YCR_CB2BGR)
    luma2 = cv2.cvtColor(imrec, cv2.COLOR_YCR_CB2BGR)
    imdif = (luma1[:,:,0]-luma2[:,:,0]) + 128

    print '\n- Computing SSIM...'
    SSIMout = ssim.compute_ssim(luma1[:,:,0], luma2[:,:,0])
    print '    :: SSIM = ', SSIMout[0]
    
    cv2.imshow('Original Image', img)
    cv2.imshow('Recued Image', imrec)
    cv2.imshow('Luma Diference', imdif)
    cv2.imshow('SSIM_Map', SSIMout[1])

#    cv2.imshow('Diferenca', img-imrec)
    
    print '\n- Press <ESC> to close...'
    k = cv2.waitKey(0)    
    if k == 27:                # wait for ESC key to exit
        cv2.destroyAllWindows()
    
#plt.imshow(np.uint8(luma1[:,:,0]-luma2[:,:,0]), 'gray')
示例#32
0
    return imgMat

'''
Main program
'''
if __name__ == '__main__':

    #First image 
    imgRefMat=build_mat_from_grayscale_image("1.jpg")
    (w,h) = (imgRefMat.shape[0],imgRefMat.shape[1])
    print avhash("1.jpg")
    #First subplot
    #figure()
    #subplot(123)
    #plt.imshow(imgRefMat, cmap=cm.gray, hold=True)
    
    #Second image
    imgOutMat=build_mat_from_grayscale_image("2.jpg")
    print avhash("3.jpg")
    #Second subplot
   # subplot(123)
   # plt.imshow(imgOutMat, cmap=cm.gray, hold=True)
    
    #Compute SSIM
    cSSIM=ssim.compute_ssim(imgRefMat,imgOutMat)
    #subplot(133)
   
    #plt.imshow(cSSIM,cmap = cm.Greys_r)
    plt.show()
    print cSSIM
    
def main(lr, prefix, K, T, cpu):
    data_path = "../data/UCF101/UCF-101/"
    f = open(data_path.rsplit("/", 2)[0] + "/testlist01.txt", "r")
    testfiles = f.readlines()
    image_size = [240, 320]
    c_dim = 3
    iters = 0

    if prefix == "paper_models":
        checkpoint_dir = "../models/" + prefix + "/S1M/"
        best_model = "MCNET.model-102502"
    else:
        checkpoint_dir = "../models/" + prefix + "/"
        best_model = None  # will pick last model

    with tf.device('/cpu:0'):
        model = MCNET(image_size=image_size,
                      batch_size=1,
                      K=K,
                      T=T,
                      c_dim=c_dim,
                      checkpoint_dir=checkpoint_dir,
                      is_train=False)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=True)) as sess:

        tf.global_variables_initializer().run()

        loaded, model_name = model.load(sess, checkpoint_dir, best_model)

        if loaded:
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed... exitting")
            return

        quant_dir = "../results/quantitative/UCF101/" + prefix + "/"
        save_path = quant_dir + "results_model=" + model_name + ".npz"
        if not exists(quant_dir):
            makedirs(quant_dir)

        vid_names = []
        psnr_err = np.zeros((0, T))
        ssim_err = np.zeros((0, T))
        for i in xrange(0, len(testfiles), 10):
            print(" Video " + str(i) + "/" + str(len(testfiles)))

            tokens = testfiles[i].split("/")[1].split()

            testfiles[i] = testfiles[i].replace("/HandStandPushups/",
                                                "/HandstandPushups/")

            vid_path = data_path + testfiles[i].split()[0]
            vid = imageio.get_reader(vid_path, "ffmpeg")
            folder_name = vid_path.split("/")[-1].split(".")[0]
            vid_names.append(folder_name)
            vid = imageio.get_reader(vid_path, "ffmpeg")
            savedir = "../results/images/UCF101/" + prefix + "/" + str(i + 1)

            seq_batch = np.zeros(
                (1, image_size[0], image_size[1], K + T, c_dim),
                dtype="float32")
            diff_batch = np.zeros((1, image_size[0], image_size[1], K - 1, 1),
                                  dtype="float32")
            for t in xrange(K + T):
                img = vid.get_data(t)[:, :, ::-1]
                seq_batch[0, :, :, t] = transform(img)

            for t in xrange(1, K):
                prev = inverse_transform(seq_batch[0, :, :, t - 1]) * 255
                prev = cv2.cvtColor(prev.astype("uint8"), cv2.COLOR_BGR2GRAY)
                next = inverse_transform(seq_batch[0, :, :, t]) * 255
                next = cv2.cvtColor(next.astype("uint8"), cv2.COLOR_BGR2GRAY)
                diff = next.astype("float32") - prev.astype("float32")
                diff_batch[0, :, :, t - 1] = diff[:, :, None] / 255.

            true_data = seq_batch[:, :, :, K:, :].copy()
            pred_data = np.zeros(true_data.shape, dtype="float32")
            xt = seq_batch[:, :, :, K - 1]
            pred_data[0] = sess.run(model.G,
                                    feed_dict={
                                        model.diff_in: diff_batch,
                                        model.xt: xt
                                    })

            if not os.path.exists(savedir):
                os.makedirs(savedir)

            cpsnr = np.zeros((K + T, ))
            cssim = np.zeros((K + T, ))
            pred_data = np.concatenate((seq_batch[:, :, :, :K], pred_data),
                                       axis=3)
            true_data = np.concatenate((seq_batch[:, :, :, :K], true_data),
                                       axis=3)
            for t in xrange(K + T):
                pred = (inverse_transform(pred_data[0, :, :, t]) *
                        255).astype("uint8")
                target = (inverse_transform(true_data[0, :, :, t]) *
                          255).astype("uint8")

                cpsnr[t] = measure.compare_psnr(pred, target)
                cssim[t] = ssim.compute_ssim(Image.fromarray(target),
                                             Image.fromarray(pred))

                pred = draw_frame(pred, t < K)
                target = draw_frame(target, t < K)

                cv2.imwrite(savedir + "/pred_" + "{0:04d}".format(t) + ".png",
                            pred)
                cv2.imwrite(savedir + "/gt_" + "{0:04d}".format(t) + ".png",
                            target)

            cmd1 = "rm " + savedir + "/pred.gif"
            cmd2 = ("ffmpeg -f image2 -framerate 3 -i " + savedir +
                    "/pred_%04d.png " + savedir + "/pred.gif")
            cmd3 = "rm " + savedir + "/pred*.png"

            # Comment out "system(cmd3)" if you want to keep the output images
            # Otherwise only the gifs will be kept
            system(cmd1)
            system(cmd2)
            system(cmd3)

            cmd1 = "rm " + savedir + "/gt.gif"
            cmd2 = ("ffmpeg -f image2 -framerate 3 -i " + savedir +
                    "/gt_%04d.png " + savedir + "/gt.gif")
            cmd3 = "rm " + savedir + "/gt*.png"

            # Comment out "system(cmd3)" if you want to keep the output images
            # Otherwise only the gifs will be kept
            system(cmd1)
            system(cmd2)
            system(cmd3)

            psnr_err = np.concatenate((psnr_err, cpsnr[None, K:]), axis=0)
            ssim_err = np.concatenate((ssim_err, cssim[None, K:]), axis=0)

        np.savez(save_path, psnr=psnr_err, ssim=ssim_err)
        print("Results saved to " + save_path)
    print("Done.")
def main(lr, batch_size, alpha, beta, image_size_h, image_size_w, K, T, B, convlstm_layer_num, num_iter, gpu, cpu,
         load_pretrain, tf_record_train_dir, tf_record_test_dir, color_channel_num, dec,no_store,pixel_loss, pretrain_model,
         dyn_enc_model, reference_mode, debug, print_train_instead, dis_length, model, Unet, no_d, fade_in, use_gt, res_mode, gif_per_vid):
  data_path = "../../../data/KTH/"
  f = open(data_path + "test_data_list.txt", "r")
  testfiles = f.readlines()
  c_dim = 1
  iters = 0

  best_model = None # will pick last model
  prefix = ("KTH_convlstm"
            + "_image_size_h=" + str(image_size_h)
            + "_image_size_w=" + str(image_size_w)
            + "_K=" + str(K)
            + "_T=" + str(T)
            + "_B=" + str(B)
            + "_convlstm_layer_num=" + str(convlstm_layer_num)
            + "_dec=" + str(dec)
            + "_dis_length=" + str(dis_length)
            + "_batch_size=" + str(batch_size)
            + "_alpha=" + str(alpha)
            + "_beta=" + str(beta)
            + "_lr=" + str(lr)
            + "_model=" + str(model)
            + "_fade_in=" + str(fade_in)
            + "_no_d=" + str(no_d)
            + "_use_gt=" + str(use_gt)
            + "_res_mode=" + str(res_mode)
            + "_pixel_loss=" + str(pixel_loss)
            + "_Unet=" + str(Unet))
  checkpoint_dir = "../../models/KTH/" + prefix + "/"
  device_string = ""
  if cpu:
    device_string = "/cpu:0"
  elif gpu:
    device_string = "/gpu:%d" % gpu[0]
  with tf.device(device_string):
      # test batch size has to be 1
      model = bi_convlstm_net(dec=dec, image_size=[image_size_h, image_size_w], c_dim=color_channel_num,
                              dis_length=dis_length,
                              K=K, T=T, B=B, convlstm_layer_num=convlstm_layer_num, batch_size=1,
                              checkpoint_dir=checkpoint_dir,
                              debug=debug, reference_mode=reference_mode, model=model, Unet=Unet, use_gt=False,
                              res_mode=res_mode,
                              pixel_loss=pixel_loss)
  # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
  config = tf.ConfigProto(allow_soft_placement=True,
                          log_device_placement=False)
  config.gpu_options.allow_growth = True
  with tf.Session(config=config) as sess:

    tf.global_variables_initializer().run()
    print checkpoint_dir
    loaded, model_name = model.load(sess, checkpoint_dir, best_model)

    if loaded:
      print(" [*] Load SUCCESS")
    else:
      print(" [!] Load failed... exitting")
      return

    quant_dir = "../../results/quantitative/KTH/"+prefix+"/"
    save_path = quant_dir+"results_model="+model_name+".npz"
    if not exists(quant_dir):
      makedirs(quant_dir)


    vid_names = []
    psnr_err = np.zeros((0, T))
    ssim_err = np.zeros((0, T))
    for i in xrange(len(testfiles)):
      tokens = testfiles[i].split()
      vid_path = data_path+tokens[0]+"_uncomp.avi"
      while True:
        try:
          vid = imageio.get_reader(vid_path,"ffmpeg")
          break
        except Exception:
          print("imageio failed loading frames, retrying")

      action = vid_path.split("_")[1]
      if action in ["running", "jogging"]:
        n_skip = 3
      else:
        n_skip = T
      start = int(tokens[1])
      length = B * (K + T) + K
      end = int(tokens[2])-length-1
      if gif_per_vid != '-1':
          end = min(end, max(start + 1, start + (gif_per_vid - 1) * n_skip + 1))
      for j in xrange(start,end,n_skip):
        print("Video "+str(i)+"/"+str(len(testfiles))+". Index "+str(j)+
              "/"+str(vid.get_length()-length-1))

        folder_pref = vid_path.split("/")[-1].split(".")[0]
        folder_name = folder_pref+"."+str(j)+"-"+str(j+T)

        vid_names.append(folder_name)
        savedir = "../../results/images/KTH/"+prefix+"/"+folder_name

        seq_batch = np.zeros((1, image_size_h, image_size_w,
                              length, c_dim), dtype="float32")
        for t in xrange(length):

          # imageio fails randomly sometimes
          while True:
            try:
              img = cv2.resize(vid.get_data(j+t), (image_size_h, image_size_w))
              break
            except Exception:
              print("imageio failed loading frames, retrying")

          img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
          seq_batch[0,:,:,t] = transform(img[:,:,None])

        true_data = seq_batch.copy()
        pred_data = np.zeros(true_data.shape, dtype="float32")
        seq_batch_tran = create_missing_frames(seq_batch.transpose([0, 3, 1, 2, 4]),K,T)
        forward_seq = seq_batch_tran

        conv_layer_weight = np.zeros((convlstm_layer_num), dtype=np.float)
        conv_layer_weight[-1] = 1
        pred_data[0] = sess.run(model.G,
                                feed_dict={model.forward_seq: forward_seq,
                                           model.target: seq_batch,
                                           model.conv_layer_weight: conv_layer_weight,
                                           model.conv_layer_index: convlstm_layer_num - 1,
                                           model.loss_reduce_weight: conv_layer_weight[convlstm_layer_num - 1],
                                           model.is_dis: False,
                                           model.is_gen: False})

        if not os.path.exists(savedir):
          os.makedirs(savedir)

        cpsnr = np.zeros((length,))
        cssim = np.zeros((length,))
        for t in xrange(length):
          pred = (inverse_transform(pred_data[0,:,:,t])*255).astype("uint8")
          target = (inverse_transform(true_data[0,:,:,t])*255).astype("uint8")

          cpsnr[t] = measure.compare_psnr(pred,target)
          cssim[t] = ssim.compute_ssim(Image.fromarray(cv2.cvtColor(target,
                                                       cv2.COLOR_GRAY2BGR)),
                                       Image.fromarray(cv2.cvtColor(pred,
                                                       cv2.COLOR_GRAY2BGR)))
          pred = draw_frame(pred, t % (T+K) < K)
          blank = (inverse_transform(seq_batch_tran[0, t, :, :]) * 255).astype("uint8")

          cv2.imwrite(savedir+"/pred_"+"{0:04d}".format(t)+".png", pred)
          cv2.imwrite(savedir+"/gt_"+"{0:04d}".format(t)+".png", target)
          cv2.imwrite(savedir+"/blk_gt_"+"{0:04d}".format(t)+".png", blank)

        cmd1 = "rm "+savedir+"/pred.gif"
        cmd2 = ("ffmpeg -f image2 -framerate 7 -i "+savedir+
                "/pred_%04d.png "+savedir+"/pred.gif")
        cmd3 = "rm "+savedir+"/pred*.png"

        # Comment out "system(cmd3)" if you want to keep the output images
        # Otherwise only the gifs will be kept
        system(cmd1); system(cmd2); # system(cmd3);

        cmd1 = "rm "+savedir+"/gt.gif"
        cmd2 = ("ffmpeg -f image2 -framerate 7 -i "+savedir+
                "/gt_%04d.png "+savedir+"/gt.gif")
        cmd3 = "rm "+savedir+"/gt*.png"
        # Comment out "system(cmd3)" if you want to keep the output images
        # Otherwise only the gifs will be kept
        system(cmd1); system(cmd2); # system(cmd3);

        cmd1 = "rm " + savedir + "/blk_gt.gif"
        cmd2 = ("ffmpeg -f image2 -framerate 7 -i " + savedir +
              "/blk_gt_%04d.png " + savedir + "/blk_gt.gif")
        cmd3 = "rm " + savedir + "/blk_gt*.png"

        system(cmd1);system(cmd2);  # system(cmd3);
    #     print psnr_err.shape
    #     print ssim_err.shape
    #     psnr_err = np.concatenate((psnr_err, cpsnr[None,:]), axis=0)
    #     ssim_err = np.concatenate((ssim_err, cssim[None,:]), axis=0)
    #
    # np.savez(save_path, psnr=psnr_err, ssim=ssim_err)
    print("Results saved to "+save_path)
  print("Done.")
示例#35
0
----- version 1.0 ("ssim" library written by jterrace)
----- https://github.com/jterrace/pyssim
"""

from PIL import Image
import ssim
import glob
import csv

# ---- obtain the original image and quantized images with different numbers of colors
temp_img = Image.open('../../../../img/sky.jpg')
quantized_img_path_list = []
quantized_img_path_list = glob.glob(
    r'../../../img/sky/rgb_cs/quantized_img/*.png')
quantized_img_path_list.sort()
print(quantized_img_path_list)

# ---- ssim evaluation
score_list = []
for i in quantized_img_path_list:
    img = Image.open(i)
    score = ssim.compute_ssim(img, temp_img)
    score_list.append(score)

# ---- save ssim score to csv file
csvfile = "sky_ssim.csv"
with open(csvfile, "w") as output:
    writer = csv.writer(output, lineterminator='\n')
    for val in score_list:
        writer.writerow([val])
示例#36
0
        starget = x_test[0]
        gen_samples = np.concatenate((starget, samples), axis=0)
        save_images(gen_samples, [2, nt], saves_dir + 'gen_%s.jpg' % (iters))

        cmse = np.zeros((nt - 1, ))
        cprev = np.zeros((nt - 1, ))
        cpsnr = np.zeros((nt - 1, ))
        cssim = np.zeros((nt - 1, ))
        for t in range(1, nt):
            cmse[t - 1] = np.mean((x_test[0, t] - x_hat[0, t])**2)
            cprev[t - 1] = np.mean((x_test[0, t] - x_test[0, t - 1])**2)
            pred = (x_hat[0, t] * 255).astype('uint8')
            target = (x_test[0, t] * 255).astype('uint8')
            cpsnr[t - 1] = measure.compare_psnr(pred, target)
            cssim[t - 1] = ssim.compute_ssim(
                Image.fromarray(cv2.cvtColor(target, cv2.COLOR_RGB2BGR)),
                Image.fromarray(cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)))

        total_mse = np.concatenate((total_mse, cmse[None, :]), axis=0)
        total_prev = np.concatenate((total_prev, cprev[None, :]), axis=0)
        total_ssim = np.concatenate((total_ssim, cssim[None, :]), axis=0)
        total_psnr = np.concatenate((total_psnr, cpsnr[None, :]), axis=0)

total_mse = np.mean(total_mse, axis=0)
total_prev = np.mean(total_prev, axis=0)
total_ssim = np.mean(total_ssim, axis=0)
total_psnr = np.mean(total_psnr, axis=0)
mean_mse = np.mean(total_mse)
mean_prev = np.mean(total_prev)
mean_ssim = np.mean(total_ssim)
mean_psnr = np.mean(total_psnr)
示例#37
0
    print '\n****************************'
    print '* JPEG Decoder Initialized *'
    print '****************************'

    huffile = enc.dirOut + enc.filepath.split('/')[-1:][0].split(
        '.')[0] + '.huff'
    dec = jpg.Decoder(huffile)
    imrec = dec._run_()

    luma1 = cv2.cvtColor(img, cv2.COLOR_YCR_CB2BGR)
    luma2 = cv2.cvtColor(imrec, cv2.COLOR_YCR_CB2BGR)
    imdif = (luma1[:, :, 0] - luma2[:, :, 0]) + 128

    print '\n- Computing SSIM...'
    SSIMout = ssim.compute_ssim(luma1[:, :, 0], luma2[:, :, 0])
    print '    :: SSIM = ', SSIMout[0]

    cv2.imshow('Original Image', img)
    cv2.imshow('Recued Image', imrec)
    cv2.imshow('Luma Diference', imdif)
    cv2.imshow('SSIM_Map', SSIMout[1])

    #    cv2.imshow('Diferenca', img-imrec)

    print '\n- Press <ESC> to close...'
    k = cv2.waitKey(0)
    if k == 27:  # wait for ESC key to exit
        cv2.destroyAllWindows()

#plt.imshow(np.uint8(luma1[:,:,0]-luma2[:,:,0]), 'gray')