def transform(diff_image, map_patch):
    os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
    h, w = diff_image.shape[::-1]

    pad = np.zeros(map_patch.shape)
    pad[:diff_image.shape[0], :diff_image.shape[1]] = diff_image
    diff_image = pad

    sift_ocl1 = sift.SiftPlan(template=diff_image, devicetype="GPU")
    sift_ocl2 = sift.SiftPlan(template=map_patch, devicetype="GPU")

    kp1 = sift_ocl1.keypoints(diff_image)
    kp2 = sift_ocl2.keypoints(map_patch)

    sift_m = sift.MatchPlan(devicetype="GPU")
    matches = sift_m.match(kp1, kp2)
    sa = sift.LinearAlign(diff_image, devicetype="GPU")

    transformed_img = sa.align(map_patch)
    transformed_img = transformed_img[:w, :h]

    theta_rad = abs(matches.angle[0][0]) + abs(matches.angle[0][1])
    theta = (theta_rad * 180) / math.pi

    return transformed_img, theta
Ejemplo n.º 2
0
    def __createSiftData(self, image, second_image):
        """Generate key points and aligned images from 2 images.

        If no keypoints matches, unaligned data are anyway returns.

        :rtype: Tuple(numpy.ndarray,numpy.ndarray)
        """
        devicetype = "GPU"

        # Compute base image
        sift_ocl = sift.SiftPlan(template=image, devicetype=devicetype)
        keypoints = sift_ocl(image)

        # Check image compatibility
        second_keypoints = sift_ocl(second_image)
        mp = sift.MatchPlan()
        match = mp(keypoints, second_keypoints)
        _logger.info("Number of Keypoints within image 1: %i" % keypoints.size)
        _logger.info("                    within image 2: %i" %
                     second_keypoints.size)

        self.__matching_keypoints = (match[:].x[:, 0], match[:].y[:, 0],
                                     match[:].scale[:, 0])
        matching_keypoints = match.shape[0]
        _logger.info("Matching keypoints: %i" % matching_keypoints)
        if matching_keypoints == 0:
            return image, second_image

        # TODO: Problem here is we have to compute 2 time sift
        # The first time to extract matching keypoints, second time
        # to extract the aligned image.

        # Normalize the second image
        sa = sift.LinearAlign(image, devicetype=devicetype)
        data1 = image
        # TODO: Create a sift issue: if data1 is RGB and data2 intensity
        # it returns None, while extracting manually keypoints (above) works
        result = sa.align(second_image, return_all=True)
        data2 = result["result"]
        self.__transformation = self.__toAffineTransformation(result)
        return data1, data2