def plot_fingerprints(
    fps_w_keyframes, 
    input_offset, 
    no_of_fps_on_display=5):
    from matplotlib import pyplot as plt
    from matplotlib.image import imread

    total_no_of_fps = len(fps_w_keyframes.keys())

    # 1) Keyframe
    # 2) Color Correlation
    fig, axs = plt.subplots(2, no_of_fps_on_display)
    
    offset = input_offset
    # Ensure that the offsets are in range
    if offset + no_of_fps_on_display >= total_no_of_fps:
        offset = total_no_of_fps - no_of_fps_on_display
        
    for i in range(no_of_fps_on_display):
        keyframe, fingerprint = fps_w_keyframes[offset+i]
        
        orb = fingerprint.orb
        
        if orb:
            img  = cv2.drawKeypoints(keyframe.image, orb.keypoints, None, color=(0, 255, 0), flags=0)
            axs[0][i].imshow(rgb(img))
        else:
            plot_keyframe(axs[0][i], keyframe)
            
        plot_color_correlation(axs[1][i], fingerprint.color_correlation)    
        plt.axis('off')

    plt.show()
def plot_keyframes(id_to_keyframe_fingerprint_collection_map):
    from matplotlib import pyplot as plt
    from matplotlib.image import imread

    no_of_frames = len(id_to_keyframe_fingerprint_collection_map.keys())
    fig, axs = plt.subplots(1, no_of_frames)

    for i in range(no_of_frames):
        keyframe, _ = id_to_keyframe_fingerprint_collection_map[i]
        axs[i].imshow(rgb(keyframe.image))
        axs[i].axis('off')

    plt.show()
reference_segment_id = fingerprint_comparison.reference_segment_id

# %% [markdown]
# First, we look at the respective keyframes,

# %%
from notebook_util import rgb

fig = plt.figure()

query_keyframe, query_fingerprint = query_id_to_keyframe_fps_map[
    query_segment_id]
assert (query_fingerprint.segment_id == query_segment_id)

ax = fig.add_subplot(121)
ax.imshow(rgb(query_keyframe.image))

reference_keyframe, reference_fingerprint = reference_id_to_keyframe_fps_map[
    reference_segment_id]
assert (reference_fingerprint.segment_id == reference_segment_id)

ax = fig.add_subplot(122)
ax.imshow(rgb(reference_keyframe.image))

plt.show()

# %% [markdown]
# Rendering their color correlation by stacking the histogram bins on top of one another should yield no stacked bars, refer to 2.0-fingerprint-comparison.py for an example of what that looks like,

# %%
from video_reuse_detector.color_correlation import ColorCorrelation, CORRELATION_CASES
Ejemplo n.º 4
0
# 2. center our keyframe on what is heuristically the most pertinent information in the video material.
#
# To elaborate on the second item, generally the subject matter of a film tends to be toward the center of the frame whereas the content at the edges is not as significant.

# %%
from video_reuse_detector.keyframe import Keyframe
from notebook_util import rgb

fig, axs = plt.subplots(no_of_chunks, 1)

keyframes = []

for i in range(no_of_chunks):
    keyframe = Keyframe.from_frame_paths(paths_to_extracted_frames[i])
    keyframes.append(keyframe)
    axs[i].imshow(rgb(keyframe.image))
    axs[i].axis('off')

plt.show()

# %% [markdown]
# ## 4. Producing a thumbnail
#
# The first artefact that constitutes the fingerprint of a video segment is its thumbnail representation. The thumbnail representation for a video segment is produced by mirroring the keyframe image around its horizontal center, and discarding its color information. If two thumbnails are similar, it means that the visual component of the video segments are similar enough to compare other parts of the fingerprint, such as its color make-up and the objects shown in the frame.
#
# When comparing the fingerprints of two video segments, the thumbnail is the first characteristic that is compared, to quickly determine if two keyframes are remotely similar.
#
# Looking at a thumbnail, we find that it is not readily discernable to us humans what operations went into producing it, so before we construct an _actual_ thumbnail, let us apply the same transformations to our keyframe image _first_ without changing the size of the image as to adequately convey what is happening.
#
# Thumbnails are created through three operations,
#
Ejemplo n.º 5
0
def plot_fingerprints(query_fps_w_keyframes,
                      reference_fps_w_keyframes,
                      comparisons_sorted_by_segment_id,
                      query_input_offset,
                      reference_input_offset,
                      no_of_fps_on_display=5):
    from matplotlib import pyplot as plt
    from matplotlib.image import imread

    query_total_no_of_fps = len(query_fps_w_keyframes.keys())
    reference_total_no_of_fps = len(reference_fps_w_keyframes.keys())

    # 1. Query keyframes with ORB
    # 2. Reference keyframes with ORB
    # 3. TODO: Query thumbnail
    # 4. TODO: Reference thumbnail
    # 5 (3). CC overlay between both videos
    # 6 (4). Text data
    fig, axs = plt.subplots(4, no_of_fps_on_display)

    query_offset = query_input_offset
    # Ensure that the offsets are in range
    if query_input_offset + no_of_fps_on_display >= query_total_no_of_fps:
        query_offset = query_total_no_of_fps - no_of_fps_on_display

    reference_offset = reference_input_offset
    # Ensure that the offsets are in range
    if reference_input_offset + no_of_fps_on_display >= reference_total_no_of_fps:
        reference_offset = reference_total_no_of_fps - no_of_fps_on_display

    comparisons = comparisons_sorted_by_segment_id

    for i in range(no_of_fps_on_display):
        query_keyframe, query_fingerprint = query_fps_w_keyframes[query_offset
                                                                  + i]
        #plot_keyframe(axs[0][i], keyframe)

        reference_keyframe, reference_fingerprint = reference_fps_w_keyframes[
            reference_offset + i]
        #plot_keyframe(axs[1][i], keyframe)

        query_orb = query_fingerprint.orb
        reference_orb = reference_fingerprint.orb

        if query_orb:
            query_img = cv2.drawKeypoints(query_keyframe.image,
                                          query_orb.keypoints,
                                          None,
                                          color=(0, 255, 0),
                                          flags=0)
            axs[0][i].imshow(rgb(query_img))
        else:
            plot_keyframe(axs[0][i], query_keyframe)

        if reference_orb:
            reference_img = cv2.drawKeypoints(reference_keyframe.image,
                                              reference_orb.keypoints,
                                              None,
                                              color=(0, 255, 0),
                                              flags=0)
            axs[1][i].imshow(rgb(reference_img))
        else:
            plot_keyframe(axs[1][i], reference_keyframe)

        plot_stacked_color_correlation(axs[2][i], query_fingerprint,
                                       reference_fingerprint)

        comparison = comparisons[query_offset + i][reference_offset + i]
        query_segment_id = comparison.query_segment_id
        reference_segment_id = comparison.reference_segment_id

        text_data = '\n'.join([
            f'Q_id: {query_segment_id}', f'R_id: {reference_segment_id}',
            f'{comparison.match_level.name}',
            f' th: {comparison.similar_enough_th}',
            f' cc: {comparison.could_compare_cc and comparison.similar_enough_cc}',
            f' orb: {comparison.could_compare_orb and comparison.similar_enough_orb}',
            f'{10*comparison.similarity_score:.3f}'
        ])

        axs[3][i].text(0, -1.5, text_data)
        plt.sca(axs[3][i])
        plt.axis('off')

    plt.show()