def kp_rmse(img_path_lr, img_path_hr, device='CPU', plot=False):
    #path_lr = utilstest.getfile(img_path_lr)
    #path_hr = utilstest.getfile(img_path_hr)

    lr = np.asarray(Image.open(img_path_lr))
    hr = np.asarray(Image.open(img_path_hr))

    log.warning(f"LR shape: {np.shape(lr)} ")
    log.warning(f"HR shape: {np.shape(hr)} ")

    sift_ocl_lr = sift.SiftPlan(template=lr, devicetype=device)
    sift_ocl_hr = sift.SiftPlan(template=hr, devicetype=device)

    t = time.time()
    kps_lr = sift_ocl_lr(lr)
    t = time.time() - t
    log.warning(f"Got {len(kps_lr)} keypoints for lr in {t:.2f} secs")

    t = time.time()
    kps_hr = sift_ocl_hr(hr)
    t = time.time() - t
    log.warning(f"Got {len(kps_hr)} keypoints for hr in {t:.2f} secs")

    mp = sift.MatchPlan()
    match = mp(kps_hr, kps_lr)
    log.warning(f"Got {match.shape[0]} matches")

    if match.shape[0] == 0:
        return (-1, 0)

    lr_shape = np.shape(lr)
    hr_shape = np.shape(hr)

    mp_hr = np.column_stack(([match[:, 0].x, match[:, 0].y]))
    mp_lr = np.column_stack(([match[:, 1].x, match[:, 1].y]))

    mp_hr_norm = np.column_stack(
        ([mp_hr[:, 0] / hr_shape[1], mp_hr[:, 1] / hr_shape[0]]))

    mp_lr_norm = np.column_stack(
        ([mp_lr[:, 0] / lr_shape[1], mp_lr[:, 1] / lr_shape[0]]))

    rmse = mean_squared_error(mp_hr_norm, mp_lr_norm, squared=False)
    log.warning(f"RMSE: {rmse}")

    if plot:
        fig, (ax_lr, ax_hr) = plt.subplots(1, 2)
        ax_hr.imshow(hr)
        ax_hr.plot(match[:, 0].x, match[:, 0].y, ".g", markersize=2)

        ax_lr.imshow(lr)
        ax_lr.plot(match[:, 1].x, match[:, 1].y, ".g", markersize=2)
        plt.show()

    return (rmse, match.shape[0])
Exemplo n.º 2
0
 def init_feature_extractor(self, image):
     """
     initialize the feature extractor with different template image
     :param image: The new template image
     :return: Nothing
     """
     self.gpu_sift = sift.SiftPlan(template=image,
                                   init_sigma=self.sigma,
                                   PIX_PER_KP=self.pix_per_kp,
                                   devicetype=self.device)
Exemplo n.º 3
0
    def __createSiftData(self, image, second_image):
        """Generate key points and aligned images from 2 images.

        If no keypoints matches, unaligned data are anyway returns.

        :rtype: Tuple(numpy.ndarray,numpy.ndarray)
        """
        devicetype = "GPU"

        # Compute base image
        sift_ocl = sift.SiftPlan(template=image, devicetype=devicetype)
        keypoints = sift_ocl(image)

        # Check image compatibility
        second_keypoints = sift_ocl(second_image)
        mp = sift.MatchPlan()
        match = mp(keypoints, second_keypoints)
        _logger.info("Number of Keypoints within image 1: %i" % keypoints.size)
        _logger.info("                    within image 2: %i" %
                     second_keypoints.size)

        self.__matching_keypoints = (match[:].x[:, 0], match[:].y[:, 0],
                                     match[:].scale[:, 0])
        matching_keypoints = match.shape[0]
        _logger.info("Matching keypoints: %i" % matching_keypoints)
        if matching_keypoints == 0:
            return image, second_image

        # TODO: Problem here is we have to compute 2 time sift
        # The first time to extract matching keypoints, second time
        # to extract the aligned image.

        # Normalize the second image
        sa = sift.LinearAlign(image, devicetype=devicetype)
        data1 = image
        # TODO: Create a sift issue: if data1 is RGB and data2 intensity
        # it returns None, while extracting manually keypoints (above) works
        result = sa.align(second_image, return_all=True)
        data2 = result["result"]
        self.__transformation = self.__toAffineTransformation(result)
        return data1, data2
Exemplo n.º 4
0
def _sift_on_pair(fixed, moving, devicetype, verbose=False):
    # Initialize the SIFT
    sift_ocl = sift.SiftPlan(template=fixed, devicetype=devicetype)
    # print("Device used for calculation: ", sift_ocl.ctx.devices[0].name)

    # Compute keypoints
    keypoints_ref = sift_ocl(fixed)
    keypoints_mov = sift_ocl(moving)

    # Match keypoints
    mp = sift.MatchPlan()
    match = mp(keypoints_ref, keypoints_mov)

    # Determine offset
    if len(match) == 0:
        print('Warning: No matching keypoints found!')
        offset = (0., 0.)
    else:
        offset = (np.median(match[:, 1].x - match[:, 0].x), np.median(match[:, 1].y - match[:, 0].y))

    if verbose:
        print('offset = {}'.format(offset))

    return offset
Exemplo n.º 5
0
from glob import glob
from tifffile import imread, imsave
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Pool
from vigra import gaussianSmoothing
from silx.image import sift
from scipy.ndimage.interpolation import shift
from skimage.transform import downscale_local_mean, rescale
import pyelastix
from skimage.feature import register_translation
from skimage import filters


# To check which device the SIFT runs on
print('Running SIFT on ' + sift.SiftPlan(shape=(10, 10)).ctx.devices[0].name)


def default_elastix_params():

    return dict(
        # These are copied from the default affine parameter file:
        # http://elastix.bigr.nl/wiki/images/c/c5/Parameters_Affine.txt

        # The internal pixel type, used for internal computations
        # Leave to float in general.
        # NB: this is not the type of the input images! The pixel
        # type of the input images is automatically read from the
        # images themselves.
        # This setting can be changed to "short" to save some memory
        # in case of very large 3D images.
Exemplo n.º 6
0
def get_3d_points(im1, im2, K, plotMatches=False):

    # Initiate SIFT detector get keypoints in images
    siftp = sift.SiftPlan(im1.shape, im1.dtype, devicetype="GPU")
    kp1 = siftp.keypoints(im1)
    siftp = sift.SiftPlan(im2.shape, im2.dtype, devicetype="GPU")
    kp2 = siftp.keypoints(im2)

    # Extract descriptors from keypoints
    des1 = np.array([k[4] for k in kp1])
    des2 = np.array([k[4] for k in kp2])

    # Find matching keypoints in images
    matchp = sift.MatchPlan()
    matches = matchp.match(kp1,kp2)
    src_pts, dst_pts = [], []

    # Use matches to find fundamental matrix and essential matrix
    for m in matches:  
        src_pts.append([m[0][0], m[0][1]])
        dst_pts.append([m[1][0], m[1][1]])

    src_pts = np.array(src_pts)
    dst_pts = np.array(dst_pts)

    F, mask = cv2.findFundamentalMat(src_pts,dst_pts,cv2.FM_LMEDS)
    matchesMask = mask.ravel().tolist()
    # retval, H1, H2 = cv2.stereoRectifyUncalibrated(src_pts, dst_pts, F, (1000,1500))
    # im1 = cv2.warpPerspective(im1, H1, (1500,1000))
    # im2 = cv2.warpPerspective(im2, H2, (1500,1000))
    # stereo = cv2.StereoBM_create(numDisparities=32, blockSize=11)
    # disparity = stereo.compute(im1,im2)
    # plt.imshow(disparity, 'gray')
    # plt.figure()

    # plt.imshow(cv2.warpPerspective(im1, H1, (1500,1000)), 'gray')
    # plt.figure()
    # plt.imshow(cv2.warpPerspective(im2, H2, (1500,1000)), 'gray')
    # plt.show()
    E = np.matmul(K.T, np.matmul(F, K))

    # Plot matches if flag is set
    if plotMatches:
        plt.subplot(1,2,1)
        plt.imshow(im1,'gray')
        plt.scatter(src_pts[:,0], src_pts[:,1])
        plt.subplot(1,2,2)
        plt.imshow(im2,'gray')
        plt.scatter(dst_pts[:,0], dst_pts[:,1])
        plt.show()

    # Find all possible rotations and translations of camera 2
    U, S, Vh = np.linalg.svd(E)
    R90 = np.array([[np.cos(np.pi/2), -np.sin(np.pi/2), 0],[np.sin(np.pi/2), np.cos(np.pi/2), 0], [0, 0, 1]])
    t = np.array([U[:,2], -U[:,2]])
    R = np.array([np.matmul(Vh.T, np.linalg.inv(np.matmul(U,R90.T))).T,
                -np.matmul(Vh.T, np.linalg.inv(np.matmul(U,R90.T))).T,
                np.matmul(Vh.T, np.linalg.inv(np.matmul(U,R90))).T,
                -np.matmul(Vh.T, np.linalg.inv(np.matmul(U,R90))).T])

    # Construct P1 for camera 1
    Rt1 = np.concatenate((np.eye(3),np.zeros((3,1))), axis=1)
    P1 = np.matmul(K,Rt1)

    # The number of points in front of the image planes for all combinations
    num_points = np.zeros((t.shape[0],R.shape[0]))
    # The reconstruction error for all combinations (for debugging)
    # errs = np.full((t.shape[0],R.shape[0]), np.inf)

    # Find R2 and t2 from R,t such that largest number of points lie in front
    # of the image planes of the two cameras
    for i,ti in enumerate(t):
        for j,ri in enumerate(R):
            P2 = np.matmul(K, np.concatenate((ri,ti.reshape((3,1))), axis=1))      
            points_3d, err = find_3d_points(P1,P2,matches)
            
            Z1 = points_3d[:,2]
            Z2 = ri[2,:].dot(points_3d.T)+ti[2]
            Z2 = Z2.T
            num_points[i,j] = np.sum(np.logical_and(Z1>0, Z2>0))
            # errs[i,j] = err

    ti,ri = np.unravel_index(np.argmax(num_points), num_points.shape)
    P2 = np.matmul(K, np.concatenate((R[ri],t[ti].reshape((3,1))), axis=1))

    # Compute the 3D points with the final P2
    points, err = find_3d_points(P1,P2,matches)
    return points, err, R[ri], t[ti]
 def initSift(self):
     print(self.shape, self.dtype, self.devicetype)
     self.siftPlan = sift.SiftPlan(self.shape,
                                   self.dtype,
                                   devicetype=self.devicetype)
     self.matchPlan = sift.MatchPlan()
Exemplo n.º 8
0
 def match(self, _, I2):
     while True:
         I1 = self.webcam.get_current_frame()
         # gray1 = cv2.cvtColor(I1, cv2.COLOR_RGB2GRAY)
         # gray2 = cv2.cvtColor(I2, cv2.COLOR_RGB2GRAY)
         os.environ["PYOPENCL_COMPILER_OUTPUT"] = "0"
         devicetype = "GPU"
         sift_ocl = sift.SiftPlan(template=I1, devicetype=devicetype)
         keypoints_1 = sift_ocl(I1)
         print("Device used for calculation: ",
               sift_ocl.ctx.devices[0].name)
         # SIFT and SURF not included in opencv-contrib-python >= 3.4.3.18
         # sift = cv2.xfeatures2d.SIFT_create()
         # kpt1, des1 = sift_ocl.detectAndCompute(gray1, None)
         # kpt2, des2 = sift_ocl.detectAndCompute(gray2, None)
         # Matching Brute force
         bf = cv2.BFMatcher_create()
         matches = bf.knnMatch(des2, des1, 2)  # knn: k nearest neighbor
         # Choose good matches
         good = []
         new_good = []
         for m, n in matches:
             if m.distance < 0.4 * n.distance:
                 good.append([m])
                 new_good.append(m)
         if len(good) > 3:
             srcPoints = np.float32([kpt2[m.queryIdx].pt
                                     for m in new_good]).reshape(-1, 1, 2)
             dstPoints = np.float32([kpt1[m.trainIdx].pt
                                     for m in new_good]).reshape(-1, 1, 2)
             # print(srcPoints)
             # print(dstPoints)
             M, H = cv2.findHomography(srcPoints, dstPoints)
             w = gray2.shape[1] - 1
             h = gray2.shape[0] - 1
             n_corners = np.float32([[0, 0], [w, 0], [w, h],
                                     [0, h]]).reshape(-1, 1, 2)
             # moving_line = np.float32([[0, h / 2], [w, h / 2]]).reshape(-1, 1, 2)
             # print(n_corners)
             # n_corners = np.float32([[0, h], [w/2, h], [w, h], [w, h/2], [w, 0], [w/2, 0], [0, 0], [0, h/2]]).reshape(-1, 1, 2)
             if M is not None:
                 self.npts = cv2.perspectiveTransform(n_corners,
                                                      M).reshape(4, 2)
                 self.move = np.float32([(self.npts[3] + self.npts[0]) / 2,
                                         (self.npts[2] + self.npts[1]) / 2
                                         ]).reshape(-1)
                 if self.initial == 1:
                     self.corners_old = self.npts
                     self.initial = 0
                 ret, self.npts = stabilize_corners(self.corners_old,
                                                    self.npts)
                 if ret is True:
                     self.corners_old = self.npts
                 # print(self.move)
                 # print(self.move[0])
                 # self.M, _ = cv2.findHomography(self.npts.reshape(-1, 1, 2), cv2.perspectiveTransform(n_corners, M))
                 # print(self.npts)
                 # self.npts = np.int32(self.npts)
             self.glyph_found = True
         else:
             self.glyph_found = False