예제 #1
0
def stich(imgs, keypoints, descriptors, matches):
    ### YOUR CODE HERE
    transforms = []
    for i in range(len(imgs) - 1):
        H, _ = ransac(keypoints[i], keypoints[i + 1], matches[i], threshold=1)
        transforms.append(H)
    mid = len(keypoints) // 2
    transforms.insert(mid, np.eye(3))
    Hs = [np.eye(3)] * (len(imgs))

    accu = np.eye(3)
    for i in range(mid + 1, len(imgs)):
        accu = accu @ transforms[i]
        Hs[i] = accu

    accu = np.eye(3)
    for i in range(mid, -1, -1):
        accu = accu @ np.linalg.inv(transforms[i])
        Hs[i] = accu

    output_shape, offset = get_output_space(imgs[mid], imgs, Hs)
    merged = np.zeros(output_shape, dtype=np.float32)
    overlap = np.zeros(output_shape, dtype=np.float32)
    for i in range(len(imgs)):
        warped = warp_image(imgs[i], Hs[i], output_shape, offset)
        m = (warped != -1)  # Mask == 1 inside the image
        warped[~m] = 0  # Return background values to 0
        merged += warped
        overlap += m * 1.0
    panorama = merged / np.maximum(overlap, 1)
    ### END YOUR CODE

    return panorama
예제 #2
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    transforms = [np.eye(3)]
    for i in range(len(matches)):
        transform_to_prev, robust_matches = ransac(keypoints[i],
                                                   keypoints[i + 1],
                                                   matches[i],
                                                   threshold=1)
        transforms.append(transform_to_prev @ transforms[-1])

    output_shape, offset = get_output_space(imgs[0], imgs[1:], transforms[1:])

    imgs_warped = []
    for i in range(len(transforms)):
        img_warped = warp_image(imgs[i], transforms[i], output_shape, offset)
        img_warped[img_warped == -1] = 0
        imgs_warped.append(img_warped)

    panorama = imgs_warped[0]
    for img in imgs_warped[1:]:
        panorama = linear_blend(panorama, img)

    return panorama
예제 #3
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    print("imgs num:", len(imgs))
    Hs = []
    for i in range(len(imgs) - 1):
        H, robust_matches = ransac(keypoints[i],
                                   keypoints[i + 1],
                                   matches[i],
                                   threshold=1)
        Hs.append(H)
    print("H num:", len(Hs))
    print("H shape:", Hs[0].shape)
    Res = []
    images = []  #without middle img
    mid = len(imgs) // 2
    for i in range(len(imgs) - 1):
        tmp = np.eye(Hs[0].shape[0], Hs[0].shape[1])
        if i < mid:
            for j in range(i, mid):
                tmp = np.matmul(tmp, np.linalg.inv(Hs[j]))
        elif i >= mid:
            for j in range(mid, i + 1):
                tmp = np.matmul(tmp, Hs[j])
        Res.append(tmp)
        # print(tmp)
        if i != mid:
            images.append(imgs[i])
    images.append(imgs[-1])
    print("Res shape", Res[0].shape)

    output_shape, offset = get_output_space(imgs[mid], images, Res)

    img_warpeds = []
    img_masks = []
    for i in range(len(imgs)):
        # print(i)
        if i == mid:
            img_warp = warp_image(imgs[i], np.eye(3), output_shape, offset)
            img_mask = (img_warp != -1)
            img_warp[~img_mask] = 0

            img_warpeds.append(img_warp)
            img_masks.append(img_mask)
        else:
            if i > mid:
                img_warp = warp_image(imgs[i], Res[i - 1], output_shape,
                                      offset)
            else:
                img_warp = warp_image(imgs[i], Res[i], output_shape, offset)
            img_mask = (img_warp != -1)
            img_warp[~img_mask] = 0

            img_warpeds.append(img_warp)
            img_masks.append(img_mask)

    merged = img_warpeds[0]
    overlap = img_masks[0] * 1.0
    for i in range(1, len(img_warpeds)):
        merged += img_warpeds[i]
        overlap += img_masks[i]
    panorama = merged / np.maximum(overlap, 1)
    ### END YOUR CODE

    return panorama
예제 #4
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE

    # np.eye: Return a 2-D array with ones on the diagonal and zeros elsewhere
    imgs_warped = []
    arr = [np.eye(3)]

    for i in range(len(imgs) - 1):
        ransacResult = ransac(keypoints[i],
                              keypoints[i + 1],
                              matches[i],
                              threshold=1)
        arr.append(ransacResult[0])

    for i in range(1, len(imgs)):
        arr[i] = arr[i].dot(arr[i - 1])

    output_shape, offset = get_output_space(imgs[0], imgs[1:], arr[1:])

    for i in range(len(imgs)):
        warpedImage = warp_image(imgs[i], arr[i], output_shape, offset)
        imgs_warped.append(warpedImage)
        # ~ : NOT - inverts all bits
        img_mask = ~(imgs_warped[-1] != -1)
        imgs_warped[-1][img_mask] = 0

    panorama = imgs_warped[0]

    # linearly blending the images
    for i in range(1, len(imgs)):
        panorama = linear_blend(panorama, imgs_warped[i])

    ### END YOUR CODE

    return panorama
예제 #5
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    Hs = [np.eye(3)]
    for i in range(len(imgs) - 1):
        Hs.append(
            ransac(keypoints[i], keypoints[i + 1], matches[i], threshold=1)[0])
    for i in range(1, len(imgs)):
        Hs[i] = Hs[i].dot(
            Hs[i - 1]
        )  # combine multiple transformation matrices by accumulation previous homogeneous matrix
    output_shape, offset = get_output_space(
        imgs[0], imgs[1:],
        Hs[1:])  # get panorama image output shape and offset
    for i in range(len(imgs)):
        img_warped = warp_image(imgs[i], Hs[i], output_shape, offset)
        img_mask = (img_warped != -1)  # Mask == 1 inside the image
        img_warped[~img_mask] = 0  # Return background values to 0
        if i == 0:
            panorama = img_warped
        else:
            panorama = linear_blend(panorama, img_warped)

    pass

    ### END YOUR CODE

    return panorama
예제 #6
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)

        matches.append(mtchs)

    ### YOUR CODE HERE
    H = []
    for i in range(len(imgs) - 1):
        H.append(ransac(keypoints[i], keypoints[i + 1], matches[i])[0])

    # take left
    ref_ind = (len(imgs) - 1) // 2
    output_shape, offset = get_output_space(
        imgs[ref_ind], [imgs[0], imgs[2], imgs[3]],
        [np.linalg.inv(H[0]), H[1], H[1].dot(H[2])])

    img1_warped = warp_image(imgs[0], np.linalg.inv(H[0]), output_shape,
                             offset)
    img1_mask = (img1_warped != -1)
    img1_warped[~img1_mask] = 0

    img2_warped = warp_image(imgs[1], np.eye(3), output_shape, offset)
    img2_mask = (img2_warped != -1)
    img2_warped[~img2_mask] = 0

    img3_warped = warp_image(imgs[2], H[1], output_shape, offset)
    img3_mask = (img3_warped != -1)
    img3_warped[~img3_mask] = 0

    img4_warped = warp_image(imgs[3], H[1].dot(H[2]), output_shape, offset)
    img4_mask = (img4_warped != -1)
    img4_warped[~img4_mask] = 0

    plt.subplot(2, 2, 1)
    plt.imshow(img1_warped)
    plt.title('Image 1 warped')
    plt.axis('off')

    plt.subplot(2, 2, 2)
    plt.imshow(img2_warped)
    plt.title('Image 2 warped')
    plt.axis('off')

    plt.subplot(2, 2, 3)
    plt.imshow(img3_warped)
    plt.title('Image 3 warped')
    plt.axis('off')

    plt.subplot(2, 2, 4)
    plt.imshow(img4_warped)
    plt.title('Image 4 warped')
    plt.axis('off')

    plt.show()
    merged2 = img1_warped + img2_warped + img3_warped + img4_warped

    # Track the overlap by adding the masks together
    #overlap = (img2_mask * 1.0 +  # Multiply by 1.0 for bool -> float conversion
    #       img1_mask + img3_mask + img4_mask)
    #normalized = merged / np.maximum(overlap, 1)
    ### END YOUR CODE

    return merged2
예제 #7
0
파일: panorama.py 프로젝트: Gorogorov/cs131
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i], kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
                  # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs)-1):
        mtchs = match_descriptors(descriptors[i], descriptors[i+1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    Hs = []
    Hprev = np.eye(3)
    for i in range(len(imgs)-1):
        H, robust_matches = ransac(keypoints[i], keypoints[i+1], matches[i], threshold=40)
        H = Hprev @ np.linalg.inv(H)
        Hs.append(H)

    output_shape, offset = get_output_space(imgs[0], imgs[1:], Hs)
    
    img_warped = warp_image(imgs[0], np.eye(3), output_shape, offset)
    img_mask = (img_warped != -1)
    img_warped[~img_mask] = 0

    panorama = img_warped
    overlap = img_mask

    for i in range(len(imgs)-1):
        img_warped = warp_image(imgs[i+1], Hs[i], output_shape, offset)
        img_mask = (img_warped != -1)
        img_warped[~img_mask] = 0

        panorama += img_warped
        overlap += img_mask
    
    panorama = panorama / np.maximum(overlap, 1)
    ### END YOUR CODE

    return panorama
예제 #8
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    #transformations
    H_trans = [np.eye(3)]
    for i in range(len(imgs) - 1):
        #get affine matrix and add
        H_trans.append(
            ransac(keypoints[i], keypoints[i + 1], matches[i], threshold=1)[0])

    #calc for each pic
    for i in range(1, len(imgs)):
        H_trans[i] = H_trans[i].dot(H_trans[i - 1])

    output_shape, offset = get_output_space(imgs[0], imgs[1:], H_trans[1:])
    warpedImgs = []
    for i in range(len(imgs)):
        warpedImgs.append(warp_image(imgs[i], H_trans[i], output_shape,
                                     offset))
        img_mask = (warpedImgs[-1] != -1)
        warpedImgs[-1][~img_mask] = 0

        #WIthout linear blend, last image gets messed up
        #pano_mask = (panorama != -1)
        #panorama[~pano_mask] = 0
        #panorama += warpedImgs[-1]
        # Track the overlap by adding the masks together
        #overlap = (pano_mask * 1.0 + img_mask)
        # Normalize through division by `overlap` - but ensure the minimum is 1
        #panorama /= np.maximum(overlap, 1)

    panorama = warpedImgs[0]
    for i in range(1, len(imgs)):
        panorama = linear_blend(panorama, warpedImgs[i])

    return panorama
예제 #9
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    H = []
    robust_matches = []
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)
        # 计算仿射矩阵
        h, robust_m = ransac(keypoints[i], keypoints[i + 1], matches[i])
        H.append(h)
        robust_matches.append(robust_m)

    # 变换到一张图片上,使用第二章图片为参考图片
    output_shape, offset = get_output_space(
        imgs[1], [imgs[0], imgs[2], imgs[3]],
        [np.linalg.inv(H[0]), H[1],
         np.dot(H[1], H[2])])

    img1_warped = warp_image(imgs[0], np.linalg.inv(H[0]), output_shape,
                             offset)
    img1_mask = (img1_warped != -1)  # Mask == 1 inside the image
    img1_warped[~img1_mask] = 0  # Return background values to 0

    img2_warped = warp_image(img[1], np.eye(3), output_shape, offset)
    img2_mask = (img2_warped != -1)  # Mask == 1 inside the image
    img2_warped[~img2_mask] = 0  # Return background values to 0

    img3_warped = warp_image(img[2], H[1], output_shape, offset)
    img3_mask = (img3_warped != -1)  # Mask == 1 inside the image
    img3_warped[~img3_mask] = 0  # Return background values to 0

    img4_warped = warp_image(imgs[3], np.dot(H[1], H[2]), output_shape, offset)
    img4_mask = (img4_warped != -1)  # Mask == 1 inside the image
    img4_warped[~img4_mask] = 0  # Return background values to 0

    merged = linear_blend(img1_warped, img2_warped)
    merged = linear_blend(merged, img3_warped)
    merged = linear_blend(merged, img4_warped)

    return merged
예제 #10
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    Hs = [np.identity(3)]

    for i in range(len(imgs) - 1):
        # calculate transformation matrices betw images except for the last image
        Hs.append(
            ransac(keypoints[i], keypoints[i + 1], matches[i], threshold=1)[0])

    for i in range(1, len(imgs)):
        # combine transformation matrices to go from 1st image to ith image
        Hs[i] = Hs[i].dot(Hs[i - 1])

    output_shape, offset = get_output_space(imgs[0], imgs[1:], Hs[1:])
    imgs_warped = []

    for i in range(len(imgs)):
        imgs_warped.append(warp_image(imgs[i], Hs[i], output_shape, offset))
        img_mask = (imgs_warped[-1] != -1)
        imgs_warped[-1][~img_mask] = 0
    panorama = imgs_warped[0]

    for i in range(1, len(imgs)):
        # build panorama by blending images
        panorama = linear_blend(panorama, imgs_warped[i])
    ### END YOUR CODE

    return panorama
예제 #11
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    transforms = []
    for i in range(len(imgs) - 1):
        H = ransac(keypoints[i], keypoints[i + 1], matches[i])[0]
        transforms.append(H)
    '''    
    mid_index = len(imgs)//2

    new_transforms_left = []
    for i in range(mid_index-1, -1, -1):
        new_transforms_left.append(np.linalg.inv(transforms[i]))
    for i in range(1, len(new_transforms_left)):
        new_transforms_left[i] = new_transforms_left[i-1]@new_transforms_left[i]
    '''

    for i in range(1, len(transforms)):
        transforms[i] = transforms[i - 1] @ transforms[i]
    output_shape, offset = get_output_space(imgs[0], imgs[1:], transforms)
    panorama = imgs[0]
    panorama = warp_image(panorama, np.eye(3), output_shape, offset)
    panorama_mask = (panorama != -1)
    panorama[~panorama_mask] = 0
    for i in range(1, len(imgs)):

        img2_warped = warp_image(imgs[i], transforms[i - 1], output_shape,
                                 offset)
        img2_mask = (img2_warped != -1)
        img2_warped[~img2_mask] = 0

        panorama = linear_blend(panorama, img2_warped)

    panorama[panorama > 1] = 1
    panorama[panorama < 0] = 0
    ### END YOUR CODE

    return panorama
예제 #12
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    Hs = []
    robust_matches = []
    for i in range(len(imgs) - 1):
        H, rm = ransac(keypoints[i], keypoints[i + 1], matches[i], threshold=1)
        Hs.append(H)
        robust_matches.append(rm)

    idx_ref = len(imgs) // 2
    img_ref = imgs[idx_ref]

    for i in reversed(range(idx_ref)):
        Hs[i] = np.linalg.inv(Hs[i])

        for j in range(i + 1, idx_ref):
            Hs[i] = Hs[i].dot(np.linalg.inv(Hs[j]))

        for i in range(idx_ref + 1, len(imgs) - 1):
            Hs[i] = Hs[i].dot(Hs[i - 1])

    others_imgs = imgs[:idx_ref] + imgs[idx_ref + 1:]
    output_shape, offset = get_output_space(img_ref, others_imgs, Hs)

    iref_warped = warp_image(img_ref, np.eye(3), output_shape, offset)
    iref_mask = (iref_warped != -1)  # Mask == 1 inside the image
    iref_warped[~iref_mask] = 0  # Return background values to 0

    merged = iref_warped
    overlap = iref_mask * 1.0  # Multiply by 1.0 for bool -> float conversion

    for img, H in zip(others_imgs, Hs):
        img_warped = warp_image(img, H, output_shape, offset)
        img_mask = (img_warped != -1)  # Mask == 1 inside the image
        img_warped[~img_mask] = 0  # Return background values to 0
        merged += img_warped
        overlap += img_mask

    panorama = merged / np.maximum(overlap, 1)
    ### END YOUR CODE

    return panorama
예제 #13
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    HList = []
    for i in range(len(imgs) - 1):
        H, robust_matches = ransac(keypoints[i],
                                   keypoints[i + 1],
                                   matches[i],
                                   threshold=1)
        HList.append(H)
        '''
        p1 = keypoints[i][matches[i][:,0]]
        p2 = keypoints[i+1][matches[i][:,1]]
        H = fit_affine_matrix(p1, p2)
        print(H)
        HList.append(H)
        '''
    imgWarpedList = []
    idxRef = len(imgs) // 2 - 1
    imgRef = imgs[idxRef]
    HMerged = []

    for i in range(len(imgs)):

        if i < idxRef:

            HRev = np.eye(3)
            for j in range(idxRef - i):
                HRev = np.dot(HRev, np.linalg.inv(HList[i + j]))

            HMerged.append(HRev)

            output_shape, offset = get_output_space(imgRef, [imgs[i]], [HRev])

            imgRef = warp_image(imgRef, np.eye(3), output_shape, offset)

            #imgWarpedRev = warp_image(img[i], HRev, output_shape, offset)
            #imgWarpedList.append(imgWarpedRev)

        elif i > idxRef:

            H = np.eye(3)
            for j in range(i - idxRef):
                H = np.dot(H, HList[i - j - 1])
            HMerged.append(H)

            output_shape, offset = get_output_space(imgRef, [imgs[i]], [H])

            imgRef = warp_image(imgRef, np.eye(3), output_shape, offset)

            #imgWarpedRev = warp_image(img[i], HRev, output_shape, offset)
            #imgWarpedList.append(imgWarpedRev)
    imgMaskRef = (imgRef != -1)  # Mask == 1 inside the image
    imgRef[~imgMaskRef] = 0  # Return background values to 0

    imgs.pop(idxRef)
    for i in range(len(imgs)):
        imgWarped = warp_image(imgs[i], HMerged[i], output_shape, offset)
        imgMask = (imgWarped != -1)  # Mask == 1 inside the image
        imgWarped[~imgMask] = 0  # Return background values to 0
        imgWarpedList.append(imgWarped)

    #panorama = imgWarpedList[0]+imgWarpedList[1]+imgWarpedList[2]+imgRef

    imgWarpedList.insert(idxRef, imgRef)
    merged = imgWarpedList[0]
    for i in range(len(imgWarpedList) - 1):
        merged = linear_blend(merged, imgWarpedList[i + 1])

    panorama = merged

    ### END YOUR CODE

    return panorama
예제 #14
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i], kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
                  # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs)-1):
        mtchs = match_descriptors(descriptors[i], descriptors[i+1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    Hs = []
    imgs_init = imgs.copy()
    for i, mtchs in enumerate(matches):
        H, _ = ransac(keypoints[i], keypoints[i+1], mtchs, threshold=1)
        Hs.append(H)

    ref_num = (len(imgs) // 2)
    ref_img = imgs[ref_num]

    for i in range(ref_num+1, len(imgs)-1):
    	Hs[i] = np.dot(Hs[i], Hs[i-1])

    Hs[ref_num-1] = np.linalg.inv(Hs[ref_num-1])
    for i in range(ref_num-2,-1,-1):
    	Hs[i] = np.dot(np.linalg.inv(Hs[i]), Hs[i+1])

    del imgs[ref_num]

    output_shape, offset = get_output_space(ref_img, imgs, Hs)

    Hs.insert(ref_num, np.eye(3))
    imgs_warped = []
    for i, img in enumerate(imgs_init):
    	warped = warp_image(img, Hs[i], output_shape, offset)
    	imgs_warped.append(warped)
    
    panorama = imgs_warped[0]
    for war in imgs_warped[1:]:
    	panorama = linear_blend(panorama, war)
    
    ### END YOUR CODE

    return panorama
예제 #15
0
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # Describe keypoints
    descriptors = []  # descriptors[i] corresponds to keypoints[i]
    for i, kypnts in enumerate(keypoints):
        desc = describe_keypoints(imgs[i],
                                  kypnts,
                                  desc_func=desc_func,
                                  patch_size=patch_size)
        descriptors.append(desc)
    # Match keypoints in neighboring images
    matches = []  # matches[i] corresponds to matches between
    # descriptors[i] and descriptors[i+1]
    for i in range(len(imgs) - 1):
        mtchs = match_descriptors(descriptors[i], descriptors[i + 1], 0.7)
        matches.append(mtchs)

    ### YOUR CODE HERE
    Hs = []
    for i in range(len(imgs) - 1):
        H, _ = ransac(keypoints[i], keypoints[i + 1], matches[i])
        if (i == 0):
            Hs.append(H)
        else:
            Hs.append(Hs[i - 1] @ H)

    # Take image2 as reference image
    output_shape, offset = get_output_space(imgs[0], imgs[1:], Hs)

    imgs_warped = []
    imgs_mask = []

    for i in range(len(imgs)):
        img_warped = []
        if (i == 0):
            img_warped = warp_image(imgs[i], np.eye(3), output_shape, offset)
        else:
            img_warped = warp_image(imgs[i], Hs[i - 1], output_shape, offset)
        img_mask = (img_warped != -1)
        img_warped[~img_mask] = 0

        imgs_warped.append(img_warped)
        imgs_mask.append(img_mask)

    imgs_warped = np.array(imgs_warped)
    imgs_mask = np.array(imgs_mask)

    merged = np.sum(imgs_warped, axis=0)
    overlap = np.sum(imgs_mask * 1.0, axis=0)
    panorama = merged / np.maximum(overlap, 1)
    print(panorama.shape)
    ### END YOUR CODE

    return panorama
예제 #16
0
#%% [markdown]
# After checking that your `fit_affine_matrix` function is running correctly, run the following code to apply it to images.
# Images will be warped and image 2 will be mapped to image 1.

#%%
from utils import get_output_space, warp_image

# Extract matched keypoints
p1 = keypoints1[matches[:, 0]]
p2 = keypoints2[matches[:, 1]]

# Find affine transformation matrix H that maps p2 to p1
H = fit_affine_matrix(p1, p2)

# 这里的实现还没看懂!!!
output_shape, offset = get_output_space(img1, [img2], [H])
# print("Output shape:", output_shape)
# print("Offset:", offset)

# Warp images into output sapce
img1_warped = warp_image(img1, np.eye(3), output_shape, offset)
img1_mask = (img1_warped != -1)  # Mask == 1 inside the image
img1_warped[~img1_mask] = 0  # Return background values to 0

img2_warped = warp_image(img2, H, output_shape, offset)
img2_mask = (img2_warped != -1)  # Mask == 1 inside the image
img2_warped[~img2_mask] = 0  # Return background values to 0

# Plot warped images
plt.subplot(1, 2, 1)
plt.imshow(img1_warped)
예제 #17
0
파일: panorama.py 프로젝트: r03922123/cs131
def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):
    """
    Stitch an ordered chain of images together.

    Args:
        imgs: List of length m containing the ordered chain of m images
        desc_func: Function that takes in an image patch and outputs
            a 1D feature vector describing the patch
        patch_size: Size of square patch at each keypoint

    Returns:
        panorama: Final panorma image in coordinate frame of reference image
    """
    # Detect keypoints in each image
    keypoints = []  # keypoints[i] corresponds to imgs[i]
    for img in imgs:
        kypnts = corner_peaks(harris_corners(img, window_size=3),
                              threshold_rel=0.05,
                              exclude_border=8)
        keypoints.append(kypnts)
    # # Describe keypoints
    # descriptors = []  # descriptors[i] corresponds to keypoints[i]
    # for i, kypnts in enumerate(keypoints):
    #     desc = describe_keypoints(imgs[i], kypnts,
    #                               desc_func=desc_func,
    #                               patch_size=patch_size)
    #     descriptors.append(desc)
    # # Match keypoints in neighboring images
    # matches = []  # matches[i] corresponds to matches between
    #               # descriptors[i] and descriptors[i+1]
    # for i in range(len(imgs)-1):
    #     mtchs = match_descriptors(descriptors[i], descriptors[i+1], 0.7)
    #     matches.append(mtchs)

    ### YOUR CODE HERE
    for i in range(len(imgs)-1):
        keypoints = []  # keypoints[i] corresponds to imgs[i]
        descriptors = []  # descriptors[i] corresponds to keypoints[i]
        for j in range(2):
            kypnts = corner_peaks(harris_corners(imgs[i+j], window_size=3),
                                threshold_rel=0.05,
                                exclude_border=8)
            keypoints.append(kypnts)
            desc = describe_keypoints(imgs[i+j], kypnts,
                                    desc_func=desc_func,
                                    patch_size=patch_size)
            descriptors.append(desc)
        mtchs = match_descriptors(descriptors[0], descriptors[1], 0.7)

        H, robust_matches = ransac(keypoints[0], keypoints[1], mtchs, threshold=1)
        output_shape, offset = get_output_space(imgs[i], [imgs[i+1]], [H])
        img1_warped = warp_image(imgs[i], np.eye(3), output_shape, offset)
        img1_mask = (img1_warped != -1)  # Mask == 1 inside the image
        img1_warped[~img1_mask] = 0      # Return background values to 0

        img2_warped = warp_image(imgs[i+1], H, output_shape, offset)
        img2_mask = (img2_warped != -1)  # Mask == 1 inside the image
        img2_warped[~img2_mask] = 0      # Return background values to 0

        # Merge the warped images using linear blending scheme
        imgs[i+1] = linear_blend(img1_warped, img2_warped)
    panorama = imgs[-1]
    ### END YOUR CODE

    return panorama