def transform(diff_image, map_patch):
    os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
    h, w = diff_image.shape[::-1]

    pad = np.zeros(map_patch.shape)
    pad[:diff_image.shape[0], :diff_image.shape[1]] = diff_image
    diff_image = pad

    sift_ocl1 = sift.SiftPlan(template=diff_image, devicetype="GPU")
    sift_ocl2 = sift.SiftPlan(template=map_patch, devicetype="GPU")

    kp1 = sift_ocl1.keypoints(diff_image)
    kp2 = sift_ocl2.keypoints(map_patch)

    sift_m = sift.MatchPlan(devicetype="GPU")
    matches = sift_m.match(kp1, kp2)
    sa = sift.LinearAlign(diff_image, devicetype="GPU")

    transformed_img = sa.align(map_patch)
    transformed_img = transformed_img[:w, :h]

    theta_rad = abs(matches.angle[0][0]) + abs(matches.angle[0][1])
    theta = (theta_rad * 180) / math.pi

    return transformed_img, theta
def img_ex(image1, image2):
    os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'

    h1, w1 = image1.shape[:2]
    h2, w2 = image2.shape[:2]

    image1_resized = cv2.resize(image1,
                                dsize=(int(w1 / 2), int(h1 / 2)),
                                interpolation=cv2.INTER_CUBIC)

    pad = np.zeros(image1_resized.shape)
    pad[:h2, :w2] = image2
    image2_resized = pad

    sift_ocl1 = sift.SiftPlan(template=image1_resized, devicetype="GPU")
    sift_ocl2 = sift.SiftPlan(template=image2_resized, devicetype="GPU")

    kp1 = sift_ocl1.keypoints(image1_resized)
    kp2 = sift_ocl2.keypoints(image2_resized)

    sift_m = sift.MatchPlan(devicetype="GPU")
    matches = sift_m.match(kp1, kp2)

    u = 0
    v = 1
    while (v != len(matches)):
        if v == len(matches) - 1:
            u = u + 1
            v = u + 1
        dx1 = (matches.x[u][0] - matches.x[v][0])**2
        dy1 = (matches.y[u][0] - matches.y[v][0])**2
        dx2 = (matches.x[u][1] - matches.x[v][1])**2
        dy2 = (matches.y[u][1] - matches.x[v][1])**2
        d1 = math.sqrt(dx1 + dy1)
        d2 = math.sqrt(dx2 + dy2)
        v = v + 1
        if d1 < d2 + 10 and d1 > d2 - 10:
            break

    pt1 = 2 * (matches.y[0][0])

    y1 = int(pt1 - 1080)
    y2 = int(pt1 + 1080)

    if (y1 < 0):
        y2 = y2 + abs(y1)
        y1 = 0
    if (y2 > h1):
        y = y2 - h1
        y2 = h1
        y1 = y1 - y

    image1 = image1[y1:y2, 0:w1]

    return image1, y1
Ejemplo n.º 3
0
    def __createSiftData(self, image, second_image):
        """Generate key points and aligned images from 2 images.

        If no keypoints matches, unaligned data are anyway returns.

        :rtype: Tuple(numpy.ndarray,numpy.ndarray)
        """
        devicetype = "GPU"

        # Compute base image
        sift_ocl = sift.SiftPlan(template=image, devicetype=devicetype)
        keypoints = sift_ocl(image)

        # Check image compatibility
        second_keypoints = sift_ocl(second_image)
        mp = sift.MatchPlan()
        match = mp(keypoints, second_keypoints)
        _logger.info("Number of Keypoints within image 1: %i" % keypoints.size)
        _logger.info("                    within image 2: %i" %
                     second_keypoints.size)

        self.__matching_keypoints = (match[:].x[:, 0], match[:].y[:, 0],
                                     match[:].scale[:, 0])
        matching_keypoints = match.shape[0]
        _logger.info("Matching keypoints: %i" % matching_keypoints)
        if matching_keypoints == 0:
            return image, second_image

        # TODO: Problem here is we have to compute 2 time sift
        # The first time to extract matching keypoints, second time
        # to extract the aligned image.

        # Normalize the second image
        sa = sift.LinearAlign(image, devicetype=devicetype)
        data1 = image
        # TODO: Create a sift issue: if data1 is RGB and data2 intensity
        # it returns None, while extracting manually keypoints (above) works
        result = sa.align(second_image, return_all=True)
        data2 = result["result"]
        self.__transformation = self.__toAffineTransformation(result)
        return data1, data2
Ejemplo n.º 4
0
img2 = cv2.rotate(data.images[0], cv2.ROTATE_90_CLOCKWISE)

sift_ocl = sift.SiftPlan(template=img, devicetype="GPU")
keypoints_1 = sift_ocl.keypoints(img1)
keypoints_2 = sift_ocl.keypoints(img2)

figure, ax = plt.subplots(
    1,
    2,
)
ax[0].imshow(img1, cmap='gray')
ax[0].plot(keypoints_1[:].x, keypoints_1[:].y, ".r")
ax[1].imshow(img2, cmap='gray')
ax[1].plot(keypoints_2[:].x, keypoints_2[:].y, ".r")

mp = sift.MatchPlan()
matching_keypoints = mp(keypoints_1, keypoints_2)
print("Number of Keypoints - image 1 :", keypoints_1.size, " - image 2 : ",
      keypoints_2.size, " - Matching keypoints : ",
      matching_keypoints.shape[0])

img3 = cv2.hconcat([img1, img2])
fig, ax = plt.subplots()
ax.imshow(img3, cmap='gray')
ax.axis('off')
ax.plot(matching_keypoints[:, 0].x, matching_keypoints[:, 0].y, '.r')
ax.plot(matching_keypoints[:, 1].x + img1.shape[1], matching_keypoints[:, 1].y,
        '.r')
for m in matching_keypoints:
    ax.arrow(m[0].x, m[0].y, m[1].x - m[0].x + img1.shape[1], m[1].y - m[0].y)
fig.show()