예제 #1
0
def get_transform_image(img0,
                        img1,
                        kp0,
                        kp1,
                        transform_matrix,
                        mask,
                        matches,
                        affine=False):
    if isinstance(mask, np.ndarray):
        matches_mask = mask.ravel().tolist()
    else:
        matches_mask = None

    h, w = img0.shape
    pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                      [w - 1, 0]]).reshape(-1, 1, 2)
    if affine:
        dst = cv2.transform(
            pts, transform_matrix
        )  #this seemed to work well...todo verify this does what you think it does...
    else:
        dst = cv2.perspectiveTransform(pts, transform_matrix)

    img2 = cv2.polylines(img1, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

    draw_params = dict(
        singlePointColor=None,
        matchesMask=matches_mask,  # draw only inliers
        flags=2)

    return cv2.drawMatches(img0, kp0, img2, kp1, matches, None, **draw_params)
예제 #2
0
def box_enhanced_binary_erosion(img,
                                erosion_kernel_size=(3, 3),
                                box_kernel_size=(5, 5),
                                box_kernel_depth=-1,
                                iterations=1):
    ret_img = np.array(img)
    erosion_kernel = np.ones(erosion_kernel_size,
                             np.uint8)  #binary erosion kernel
    box_kwargs = {"ksize": box_kernel_size, "ddepth": box_kernel_depth}
    for i in range(iterations):
        ret_img = cv2.erode(cv2.boxFilter(ret_img, **box_kwargs),
                            erosion_kernel)
    return ret_img
예제 #3
0
def get_affine_transform(matches, kp0, kp1, full_affine=False):
    src_pts = np.float32([kp0[m.queryIdx].pt
                          for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp1[m.trainIdx].pt
                          for m in matches]).reshape(-1, 1, 2)

    return cv2.estimateRigidTransform(src_pts, dst_pts, fullAffine=full_affine)
예제 #4
0
def calc_sid(img_a, img_b):
    """
    https://siddhantahuja.wordpress.com/tag/sum-of-squared-differences/
    """
    if img_a.dtype == np.uint8:
        img_a = img_a.astype(np.uint16)
    if img_b.dtype == np.uint8:
        img_b = img_b.astype(np.uint16)
    return sum(sum(cv2.absdiff(img_a, img_b)))
예제 #5
0
def calc_thresholded_eroded_sid(img_a,
                                img_b,
                                n_erosions=3,
                                replace_kernel=None,
                                thresh_args=[
                                    255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY_INV, 55, 3
                                ]):
    if replace_kernel:
        thresh_args[-2] = replace_kernel
    thresh_a = cv2.adaptiveThreshold(img_a, *thresh_args)
    thresh_b = cv2.adaptiveThreshold(img_b, *thresh_args)
    abs_diff = cv2.absdiff(thresh_a, thresh_b)
    erosion_kernel = np.ones((3, 3), np.uint8)
    eroded_abs_diff = cv2.erode(abs_diff,
                                erosion_kernel,
                                iterations=n_erosions)

    return sum(sum(np.uint64(eroded_abs_diff) / 255))
예제 #6
0
def get_projective_transform(matches, kp0, kp1, **kwargs):
    homography_kwargs = {"method": cv2.RANSAC, "ransacReprojThreshold": 5.0}
    homography_kwargs.update(kwargs)

    src_pts = np.float32([kp0[m.queryIdx].pt
                          for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp1[m.trainIdx].pt
                          for m in matches]).reshape(-1, 1, 2)

    return cv2.findHomography(src_pts, dst_pts, **homography_kwargs)
예제 #7
0
def apply_perspective_transform(target_image,
                                reference_image,
                                transform_matrix,
                                borderValue=None):
    if not borderValue:
        borderValue = int(np.amin(reference_image))

    return cv2.warpPerspective(target_image,
                               transform_matrix,
                               reference_image.shape,
                               borderValue=borderValue)
예제 #8
0
def match_key_points(img0,
                     img1,
                     match_thresh=0.7,
                     detector="SURF",
                     matcher="flann"):
    FLANN_INDEX_KDTREE = 0

    #initialize surf detector
    if detector == "SIFT":
        det = cv2.xfeatures2d.SIFT_create()
    else:
        det = cv2.xfeatures2d.SURF_create()

    #intitialize flann matcher or brute matcher
    if matcher.lower() == "brute":
        matcher = cv2.BFMatcher(cv2.NORM_L1, crossCheck=False)
    else:
        idx_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        srch_params = dict(checks=50)
        matcher = cv2.FlannBasedMatcher(idx_params, srch_params)

    print detector

    if detector == "SIFT":
        root = RootSIFT()
        kp0 = det.detect(img0)
        kp1 = det.detect(img1)
        kp0, des0 = root.compute(img0, kp0)
        kp1, des1 = root.compute(img1, kp1)
    else:
        kp0, des0 = det.detectAndCompute(img0, None)
        kp1, des1 = det.detectAndCompute(img1, None)

    matches = matcher.knnMatch(des0, des1, k=2)

    #determine good matches with match_thresh
    good_matches = [
        m for m, n in matches if (m.distance < match_thresh * n.distance)
    ]

    return good_matches, kp0, kp1
예제 #9
0
def apply_affine_transform(target_image,
                           reference_image,
                           transform_matrix,
                           borderValue=None):
    if not borderValue:
        borderValue = int(np.amin(reference_image))
        if borderValue > 100:
            borderValue = int(np.amin(convert_array_to_norm(reference_image)))

    return cv2.warpAffine(target_image,
                          transform_matrix,
                          reference_image.shape,
                          borderValue=borderValue)
예제 #10
0
def threshold_erode_img(img,
                        thresh_args=(255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                     cv2.THRESH_BINARY_INV, 31, 3)):
    thresh = cv2.adaptiveThreshold(img, *thresh_args)
    erosion_kernel = np.ones((2, 2), np.uint8)  #binary erosion kernel
    return cv2.erode(thresh, erosion_kernel, iterations=1)
예제 #11
0
def adaptive_histogram_equalization(img, clipLimit=5.0, tileGridSize=(20, 20)):
    clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
    return clahe.apply(img)
예제 #12
0
 def __init__(self):
     # initialize the SIFT feature extractor
     self.extractor = cv2.DescriptorExtractor_create("SIFT")
예제 #13
0
    c = transform_matrix[1][0]
    d = transform_matrix[1][1]
    scale_x = np.sign(a) * np.sqrt((a**2) + (b**2))
    scale_y = np.sign(d) * np.sqrt((c**2) + (d**2))
    rot_theta_0 = np.arctan2(-b, a)
    rot_theta_1 = np.arctan2(c, d)
    if rot_theta_0 != rot_theta_1:
        raise ValueError("Both rot_thetas need to be equal...")
    return scale_x, scale_y, rot_theta_0


if __name__ == "__main__":
    import matplotlib.pyplot as plt
    from copy import deepcopy

    img0 = cv2.imread(
        r"\\AIBSDATA2\nc-ophys\1022\These are images\match_img10.png", 0)
    img1 = cv2.imread(
        r"\\AIBSDATA2\nc-ophys\1022\These are images\ref_img10.png", 0)

    img0_adapt = adaptive_histogram_equalization(img0)
    img1_adapt = adaptive_histogram_equalization(img1)

    img0_adapt = cv2.imread(
        r"C:\Users\chrism\Desktop\Images\adaptive_match_ext.png", 0)
    img1_adapt = cv2.imread(
        r"C:\Users\chrism\Desktop\Images\adaptive_ref_ext.png", 0)

    good_matches, kp0, kp1 = match_key_points(img0_adapt, img1_adapt)

    projective_matrix, mask = get_projective_transform(good_matches, kp0, kp1)