Exemplo n.º 1
0
def get_virt_x1x2_np(
        im_shape, F_gt, K, pts1_virt_b,
        pts2_virt_b):  ##  [RUI] TODO!!!!! Convert into seq loader!
    ## s.t. SHOULD BE ALL ZEROS: losses = utils_F.compute_epi_residual(pts1_virt_ori, pts2_virt_ori, F_gts, loss_params['clamp_at'])
    ## Reproject by minimizing distance to groundtruth epipolar lines
    pts1_virt, pts2_virt = cv2.correctMatches(F_gt,
                                              np.expand_dims(pts2_virt_b, 0),
                                              np.expand_dims(pts1_virt_b, 0))
    pts1_virt[np.isnan(pts1_virt)] = 0.
    pts2_virt[np.isnan(pts2_virt)] = 0.

    # nan1 = np.logical_and(
    #         np.logical_not(np.isnan(pts1_virt[:,:,0])),
    #         np.logical_not(np.isnan(pts1_virt[:,:,1])))
    # nan2 = np.logical_and(
    #         np.logical_not(np.isnan(pts2_virt[:,:,0])),
    #         np.logical_not(np.isnan(pts2_virt[:,:,1])))
    # _, midx = np.where(np.logical_and(nan1, nan2))
    # good_pts = len(midx)
    # while good_pts < num_pts_full:
    #     midx = np.hstack((midx, midx[:(num_pts_full-good_pts)]))
    #     good_pts = len(midx)
    # midx = midx[:num_pts_full]
    # pts1_virt = pts1_virt[:,midx]
    # pts2_virt = pts2_virt[:,midx]

    pts1_virt = homo_np(pts1_virt[0])
    pts2_virt = homo_np(pts2_virt[0])
    pts1_virt_normalized = (np.linalg.inv(K) @ pts1_virt.T).T
    pts2_virt_normalized = (np.linalg.inv(K) @ pts1_virt.T).T
    return pts1_virt_normalized, pts2_virt_normalized, pts1_virt, pts2_virt
Exemplo n.º 2
0
def correctMatches_with_E(E, K, kps1, kps2, matches):
    """
    I am not so sure how to use this function, since it changes the coordinately of the points :<
    [ref](http://answers.opencv.org/question/341/python-correctmatches/?answer=402#post-id-402)
    :param E:
    :param K:
    :param kps1:
    :param kps2:
    :param matches:
    :return:
    """
    F_backward = pe_utils.find_F_from_E_and_K(E, K)
    """correctMatches(F, points1, points2[, newPoints1[, newPoints2]]) -> newPoints1, newPoints2"""

    pts1, pts2, _ = pe_utils.key_points_to_matched_pixel_points(
        kps1, kps2, matches)

    for i in range(10):
        print(pts1[i])

    pts1_tmp = np.reshape(pts1, (1, -1, 2))
    pts2_tmp = np.reshape(pts2, (1, -1, 2))

    newPts1, newPts2 = cv2.correctMatches(F_backward, pts1_tmp, pts2_tmp)

    newPts1 = newPts1.reshape(-1, 2)
    newPts2 = newPts2.reshape(-1, 2)

    for i in range(10):
        print(newPts1[i])

    print(
        "In correctMatches: pts1.shape:{}->newPts1.shape:{}, newPts2.shape:{}".
        format(pts1.shape, newPts1.shape, newPts2.shape))
Exemplo n.º 3
0
def computeRelativeOrientation(tie_pts, cam_m, distor):
    #  computes relative orientation  using OpenCV 5 point algorithms

    tp1, tp2, tp1_u, tp2_u = UndistorTiePoints(tie_pts, cam_m, distor)

    # eight point algorithm
    #F, mask = cv2.findFundamentalMat(tp1[0, :], tp2[0, :], param1=0.1, param2=0.95, method = cv2.FM_RANSAC)
    #em = cam_m.T.dot(F).dot(cam_m)

    em, mask = cv2.findEssentialMat(tp1[0, :],
                                    tp2[0, :],
                                    threshold=0.05,
                                    prob=0.95,
                                    focal=cam_m[0, 0],
                                    pp=(cam_m[0, 2], cam_m[1, 2]))

    F = np.linalg.inv(cam_m.T).dot(em).dot(np.linalg.inv(cam_m))

    # optimal solution for triangulation of  object points
    p1, p2 = cv2.correctMatches(em, tp1_u, tp2_u)

    pts, R, t, mask = cv2.recoverPose(em, p1, p2)
    P = np.hstack((R, t))
    pm1 = np.eye(3, 4)

    pts_sps = compute3Dpoints(pm1, P, p1, p2)

    return pts_sps, P, p1, p2
Exemplo n.º 4
0
def computeRelativeOrientation(tie_pts, cam_m, distor):
    #  computes relative orientation  using OpenCV 5 point algorithms

    tp1, tp2, tp1_u, tp2_u  = UndistorTiePoints(tie_pts, cam_m, distor)

    # eight point algorithm 
    #F, mask = cv2.findFundamentalMat(tp1[0, :], tp2[0, :], param1=0.1, param2=0.95, method = cv2.FM_RANSAC)
    #em = cam_m.T.dot(F).dot(cam_m)
    
    em, mask = cv2.findEssentialMat(tp1[0, :], tp2[0, :], 
                                    threshold=0.05, 
                                    prob=0.95, 
                                    focal=cam_m[0, 0], 
                                    pp=(cam_m[0, 2], cam_m[1, 2]))


    F = np.linalg.inv(cam_m.T).dot(em).dot(np.linalg.inv(cam_m))

    # optimal solution for triangulation of  object points
    p1, p2 = cv2.correctMatches(em, tp1_u, tp2_u)

    pts, R, t, mask = cv2.recoverPose(em, p1, p2)
    P = np.hstack((R, t))
    pm1 = np.eye(3, 4)

    pts_sps = compute3Dpoints(pm1, P, p1, p2)

    return pts_sps, P, p1, p2
Exemplo n.º 5
0
    def refine_points(self, F):
        n = len(self.px_planar_ref)
        p1, p2 = cv2.correctMatches(F, np.reshape(self.px_planar_ref,
                                                  (1, n, 2)),
                                    np.reshape(self.px_planar_cur, (1, n, 2)))

        self.px_planar_ref, self.px_planar_cur = np.reshape(
            p1, (n, 2)), np.reshape(p2, (n, 2))
Exemplo n.º 6
0
def refine_points(norm_pts1, norm_pts2, E):
    '''Refine the coordinates of the corresponding points using the Optimal Triangulation Method.'''
    # convert to 1xNx2 arrays for cv2.correctMatches
    refined_pts1 = np.array([ [pt[0] for pt in norm_pts1 ] ])
    refined_pts2 = np.array([ [pt[0] for pt in norm_pts2 ] ])
    refined_pts1, refined_pts2 = cv2.correctMatches(E, refined_pts1, refined_pts2)

    # refined_pts are 1xNx2 arrays
    return refined_pts1, refined_pts2
Exemplo n.º 7
0
def generate_virtual_positive_corr(F):
    step = 0.1
    xx, yy = np.meshgrid(np.arange(-1, 1, step), np.arange(-1, 1, step))
    # Points in first image before projection
    pts1 = np.float32(np.vstack((xx.flatten(), yy.flatten())).T)
    # Points in second image before projection
    pts2 = np.float32(pts1)
    pts1, pts2 = pts1.reshape(1, -1, 2), pts2.reshape(1, -1, 2)
    pts1, pts2 = cv2.correctMatches(F.reshape(3, 3), pts1, pts2)
    return np.concatenate([pts1[0], pts2[0]], 1).astype(np.float32)
Exemplo n.º 8
0
def refine_points(norm_pts1, norm_pts2, E):
    '''Refine the coordinates of the corresponding points using the Optimal Triangulation Method.'''
    # convert to 1xNx2 arrays for cv2.correctMatches
    refined_pts1 = np.array([[pt[0] for pt in norm_pts1]])
    refined_pts2 = np.array([[pt[0] for pt in norm_pts2]])
    refined_pts1, refined_pts2 = cv2.correctMatches(E, refined_pts1,
                                                    refined_pts2)

    # refined_pts are 1xNx2 arrays
    return refined_pts1, refined_pts2
Exemplo n.º 9
0
def betterMatches(F, points1, points2):
    """ Minimize the geometric error between corresponding image coordinates.
    For more information look into OpenCV's docs for the cv2.correctMatches function."""

    # Reshaping for cv2.correctMatches
    points1 = np.reshape(points1, (1, points1.shape[0], 2))
    points2 = np.reshape(points2, (1, points2.shape[0], 2))

    newPoints1, newPoints2 = cv2.correctMatches(F, points1, points2)

    return newPoints1[0], newPoints2[0]
Exemplo n.º 10
0
def get_virtual_matches(ess_vec, intrinsic, vgrid_step=0.02):
    """Calculate inlier matches of an essential matrice
    First calcuate Fundamental matrix  from essential matrix and the camera intrinsics
    Fit matches using cv.correctMatches()
    
    Args:
        ess_vec: the essential matrice
        intrinsices: a tuple of 4 (K1, K2, K1_inv, K2_inv), the camera calibriation matrices and their inverse 
        vgrid_step: the resolution to calculate the virtual points grids that are used into for fitting
    
    Return:
        matches: the array consists of (1 / vgrid_step) ** 2 ) point correspondences
        F: the Fundamental matrix
    """

    xx, yy = np.meshgrid(np.arange(0, 1, vgrid_step),
                         np.arange(0, 1, vgrid_step))
    num_vpts = int((1 / vgrid_step)**2)
    E = ess_vec.reshape((3, 3))
    K1, K2, K1_inv, K2_inv = intrinsic
    F = K2_inv.T @ E @ K1_inv
    F = F / np.linalg.norm(F)  # Normalize F

    w1, h1 = 2 * K1[0, 2], 2 * K1[1, 2]
    w2, h2 = 2 * K2[0, 2], 2 * K2[1, 2]

    pts1_vgrid = np.float32(
        np.vstack((w1 * xx.flatten(), h1 * yy.flatten())).T)
    pts2_vgrid = np.float32(
        np.vstack((w2 * xx.flatten(), h2 * yy.flatten())).T)
    pts1_virt, pts2_virt = cv2.correctMatches(
        F, np.expand_dims(pts1_vgrid, axis=0),
        np.expand_dims(pts2_vgrid, axis=0))

    nan1 = np.logical_and(np.logical_not(np.isnan(pts1_virt[:, :, 0])),
                          np.logical_not(np.isnan(pts1_virt[:, :, 1])))
    nan2 = np.logical_and(np.logical_not(np.isnan(pts2_virt[:, :, 0])),
                          np.logical_not(np.isnan(pts2_virt[:, :, 1])))

    _, pids = np.where(np.logical_and(nan1, nan2))
    num_good_pts = len(pids)
    while num_good_pts < num_vpts:
        pids = np.hstack((pids, pids[:(num_vpts - num_good_pts)]))
        num_good_pts = len(pids)

    pts1_virt = pts1_virt[:, pids]
    pts2_virt = pts2_virt[:, pids]

    # Homogenous
    ones = np.ones((pts1_virt.shape[1], 1))
    pts1_virt = np.hstack((pts1_virt[0], ones))
    pts2_virt = np.hstack((pts2_virt[0], ones))
    matches = (pts1_virt.astype(np.float32), pts2_virt.astype(np.float32))
    return matches, F
Exemplo n.º 11
0
 def getFundamentalMat(self):
     '''Returns the f-mat of camera 2 to 1 and camera 3 to 1.'''
     ei,eii = self.getEpipoles()
     F21 = array([dot(dot(skew(ei),t),eii) for t in self.T]).T
     F31 = array([dot(dot(skew(eii),t.T),ei) for t in self.T]).T
     if self.x0 != None and self.x1 != None:
         a = r_[[self.x0[:2].T]]
         b = r_[[self.x1[:2].T]]
         pts0, pts1 = cv2.correctMatches(F21, a, b)
         self.x0[:2] = pts0[0].T
         self.x1[:2] = pts1[0].T
     return F21, F31
Exemplo n.º 12
0
 def getFundamentalMat(self):
     """Returns the f-mat of camera 2 to 1 and camera 3 to 1."""
     ei, eii = self.getEpipoles()
     F21 = array([dot(dot(skew(ei), t), eii) for t in self.T]).T
     F31 = array([dot(dot(skew(eii), t.T), ei) for t in self.T]).T
     if self.x0 != None and self.x1 != None:
         a = r_[[self.x0[:2].T]]
         b = r_[[self.x1[:2].T]]
         pts0, pts1 = cv2.correctMatches(F21, a, b)
         self.x0[:2] = pts0[0].T
         self.x1[:2] = pts1[0].T
     return F21, F31
Exemplo n.º 13
0
def correct(pts1, pts2):
    '''
    :param pts1: shape: 1xNx2
    :param pts2: shape: 1xNx2
    :return: pts1_new: shape Nx2
    '''
    F = get_fundamental_Matrix()
    pts1_new, pts2_new = cv2.correctMatches(F, pts1, pts2)

    pts1_new = pts1_new[0] # Nx2

    # take the right camera pts prediction
    return pts1_new
Exemplo n.º 14
0
 def filter(self, pt0, pt1):
     try:
         # filter by epipolar constraint
         F, msk = cvu.F(pt0, pt1)
         if F is not None:
             # 1. apply mask
             pt0 = pt0[msk]
             pt1 = pt1[msk]
             # 2. correct matches
             pt0, pt1 = cv2.correctMatches(F, pt0[None, ...], pt1[None,
                                                                  ...])
             pt0, pt1 = pt0[0], pt1[0]
     except Exception as e:
         print('exception', e)
     return pt0, pt1
Exemplo n.º 15
0
    def correctMatches(self, e_gt):
        step = 0.1
        xx, yy = np.meshgrid(np.arange(-1, 1, step), np.arange(-1, 1, step))
        # Points in first image before projection
        pts1_virt_b = np.float32(np.vstack((xx.flatten(), yy.flatten())).T)
        # Points in second image before projection
        pts2_virt_b = np.float32(pts1_virt_b)
        pts1_virt_b, pts2_virt_b = pts1_virt_b.reshape(1, -1,
                                                       2), pts2_virt_b.reshape(
                                                           1, -1, 2)

        pts1_virt_b, pts2_virt_b = cv2.correctMatches(e_gt.reshape(3, 3),
                                                      pts1_virt_b, pts2_virt_b)

        return pts1_virt_b.squeeze(), pts2_virt_b.squeeze()
Exemplo n.º 16
0
def get_range(correlations, F, P1, P2):
    """
    Using the set of correlations produced by match_points, identify the distance of detected people in the scene.

    :param correlations: List of correlations, each element a list of one or more pairs of points, to use to
                         calculate distances
    :returns: A list with the distances for each correlated point.
    """

    all_dist = []

    for left_group, right_group in correlations:
        # multiple intersection points are provided, average out the distances
        left_corrected, right_corrected = cv2.correctMatches(
            F,
            np.array([[left_group]]).astype(float),
            np.array([[right_group]]).astype(float))
        collection = cv2.triangulatePoints(P1, P2, left_corrected,
                                           right_corrected)
        triangulated = np.mean(collection, axis=1).reshape(
            4, 1)  # Average the results of the triangulation

        # print(left_group)
        # print(left_corrected)
        # print("before mean")
        # print(collection)
        # print(triangulated)

        # h**o to normal coords by dividing by last element. squares are 25mm.
        # for some reason, was getting distances *10 of actual so divided the size of the square here by 10
        in_m = (triangulated[0:3] / triangulated[3]) * 0.0025
        x, y, z = (float(n) for n in in_m)

        # z <= 0 means behind the camera -- not actually possible.
        if z > 0:
            # euclidean distance in 3d space with known coords. camera would be (0,0,0)
            distance = (
                x**2 + y**2 +
                z**2)**0.5  # length of vector to get distance to object

            # reference the distance against the average of the intersection points for each image.
            #             left_range[tuple(np.mean(left_group, axis=0))] = (distance, x, y, z)
            #             right_range[tuple(np.mean(right_group, axis=0))] = (distance, x, y, z)
            all_dist.append(distance)
            print(distance)
            print("---------------")

    return all_dist
Exemplo n.º 17
0
    def optimal_triangulation(self, kpts1, kpts2):
        # For each given point corresondence points1[i] <-> points2[i], and a
        # fundamental matrix F, computes the corrected correspondences
        # new_points1[i] <-> new_points2[i] that minimize the geometric error
        # d(points1[i], new_points1[i])^2 + d(points2[i], new_points2[i])^2,
        # subject to the epipolar constraint new_points2^t * F * new_points1 = 0
        # Here we are using the OpenCV's function CorrectMatches.

        # @param kpts1 : keypoints in one image
        # @param kpts2 : keypoints in the other image
        # @return new_points1 : the optimized points1
        # @return new_points2 : the optimized points2

        # First, we have to reshape the keypoints. They must be a 1 x n x 2
        # array.

        kpts1 = np.float32(kpts1)  # Points in the first camera
        kpts2 = np.float32(kpts2)  # Points in the second camera

        # 3D Matrix : [kpts1[0] kpts[1]... kpts[n]]

        pt1 = np.reshape(kpts1, (1, len(kpts1), 2))

        pt2 = np.reshape(kpts2, (1, len(kpts2), 2))

        new_points1, new_points2 = cv2.correctMatches(self.F, pt2, pt1)

        self.correctedkpts1 = new_points1
        self.correctedkpts2 = new_points2

        # Transform to a 2D Matrix: 2xn

        kpts1 = (np.reshape(new_points1, (len(kpts1), 2))).T
        kpts2 = (np.reshape(new_points2, (len(kpts2), 2))).T

        print np.shape(kpts1)

        points3D = cv2.triangulatePoints(self.cam1.P, self.cam2.P, kpts2, kpts1)

        self.structure = points3D / points3D[3]  # Normalize points [x, y, z, 1]

        array = np.zeros((4, len(self.structure[0])))

        for i in range(len(self.structure[0])):

            array[:, i] = self.structure[:, i]

        self.structure = array
Exemplo n.º 18
0
    def correctMatches(self, p1, p2):
        if p1.shape[0] >= 8 and p1.shape[0] == p2.shape[0]:
            start = time.time()
            F, mask = cv.findFundamentalMat(p1, p2, method=cv.RANSAC, ransacReprojThreshold=0.05, confidence=0.95)
            
            if F is not None:
                #logging.debug("{0} correctMatches: {1}".format(self.loggingName, (F, mask)))
                mask = mask.ravel()
                p1 = np.expand_dims(p1[mask==1], axis=0)
                p2 = np.expand_dims(p2[mask==1], axis=0)
                p1_n, p2_n = cv.correctMatches(F, p1, p2)
                end = time.time()
                self.correctMatchesTime = end-start

                return F, np.reshape(p1_n, (-1,2)), np.reshape(p2_n, (-1,2))
        return None, p1, p2
Exemplo n.º 19
0
    def generate_virtual_points(
            self,
            step: Optional[float] = 0.01) -> Tuple[np.ndarray, np.ndarray]:
        # set grid points for each image
        grid_x, grid_y = np.meshgrid(np.arange(0, 1, step),
                                     np.arange(0, 1, step))
        num_points_eval = len(grid_x.flatten())

        pts1_grid = np.float32(
            np.vstack((self.image_1.shape[1] * grid_x.flatten(),
                       self.image_1.shape[0] *
                       grid_y.flatten())).T)[np.newaxis, :, :]
        pts2_grid = np.float32(
            np.vstack((self.image_2.shape[1] * grid_x.flatten(),
                       self.image_2.shape[1] *
                       grid_y.flatten())).T)[np.newaxis, :, :]

        pts1_virt, pts2_virt = cv2.correctMatches(self.f_matrix_forward,
                                                  pts1_grid, pts2_grid)

        valid_1 = np.logical_and(
            np.logical_not(np.isnan(pts1_virt[:, :, 0])),
            np.logical_not(np.isnan(pts1_virt[:, :, 1])),
        )
        valid_2 = np.logical_and(
            np.logical_not(np.isnan(pts2_virt[:, :, 0])),
            np.logical_not(np.isnan(pts2_virt[:, :, 1])),
        )

        _, valid_idx = np.where(np.logical_and(valid_1, valid_2))
        good_pts = len(valid_idx)

        while good_pts < num_points_eval:
            valid_idx = np.hstack(
                (valid_idx, valid_idx[:(num_points_eval - good_pts)]))
            good_pts = len(valid_idx)

        valid_idx = valid_idx[:num_points_eval]

        pts1_virt = pts1_virt[:, valid_idx]
        pts2_virt = pts2_virt[:, valid_idx]

        ones = np.ones((pts1_virt.shape[1], 1))

        pts1_virt = np.hstack((pts1_virt[0], ones))
        pts2_virt = np.hstack((pts2_virt[0], ones))
        return pts1_virt, pts2_virt
Exemplo n.º 20
0
def analytical_update(x_a, x_b, translation, rotation):
    """
    Computes the analytic solution to the point match correction problem
    (see Section 12.5 of Multiple View Geometry book)
    """
    n = x_a.shape[1]
    x_a = x_a[:2, :].reshape(1, n, 2)
    x_b = x_b[:2, :].reshape(1, n, 2)

    x_a_cor, x_b_cor = cv.correctMatches(
        mt.skew(translation) @ rotation.R, x_a, x_b)

    x_a_cor = x_a_cor[0, :, :].T
    x_b_cor = x_b_cor[0, :, :].T
    x_a_cor = np.block([[x_a_cor], [np.ones(n)]])
    x_b_cor = np.block([[x_b_cor], [np.ones(n)]])

    return x_a_cor, x_b_cor
Exemplo n.º 21
0
    def init_traj(self,error=10,inlier_only=False):
        '''
        Select the first two cams in the sequence, compute fundamental matrix, triangulate points
        '''

        self.select_most_overlap(init=True)

        t1, t2 = self.sequence[0], self.sequence[1]
        K1, K2 = self.cameras[t1].K, self.cameras[t2].K

        # Find correspondences
        if self.cameras[t1].fps > self.cameras[t2].fps:
            d1, d2 = util.match_overlap(self.detections_global[t1], self.detections_global[t2])
        else:
            d2, d1 = util.match_overlap(self.detections_global[t2], self.detections_global[t1])
        
        # Compute fundamental matrix
        F,inlier = ep.computeFundamentalMat(d1[1:],d2[1:],error=error)
        E = np.dot(np.dot(K2.T,F),K1)

        if not inlier_only:
            inlier = np.ones(len(inlier))
        x1, x2 = util.homogeneous(d1[1:,inlier==1]), util.homogeneous(d2[1:,inlier==1])

        # Find corrected corresponding points for optimal triangulation
        N = d1[1:,inlier==1].shape[1]
        pts1=d1[1:,inlier==1].T.reshape(1,-1,2)
        pts2=d2[1:,inlier==1].T.reshape(1,-1,2)
        m1,m2 = cv2.correctMatches(F,pts1,pts2)
        x1,x2 = util.homogeneous(np.reshape(m1,(-1,2)).T), util.homogeneous(np.reshape(m2,(-1,2)).T)

        mask = np.logical_not(np.isnan(x1[0]))
        x1 = x1[:,mask]
        x2 = x2[:,mask]

        # Triangulte points
        X, P = ep.triangulate_from_E(E,K1,K2,x1,x2)
        self.traj = np.vstack((d1[0][inlier==1][mask],X[:-1]))

        # Assign the camera matrix for these two cameras
        self.cameras[t1].P = np.dot(K1,np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]]))
        self.cameras[t2].P = np.dot(K2,P)
        self.cameras[t1].decompose()
        self.cameras[t2].decompose()
Exemplo n.º 22
0
def transto3d(image1_point,
              image2_point,
              camera1,
              camera2,
              fundamental_matrix=None):
    p1 = np.array(image1_point, dtype=np.float32).reshape((1, 1, 2))
    p2 = np.array(image2_point, dtype=np.float32).reshape((1, 1, 2))

    # print(fundamental_matrix)
    if fundamental_matrix is not None:
        # print("before correctMatches: p1:{} p2:{}".format(p1, p2))
        p1, p2 = cv2.correctMatches(fundamental_matrix, p1, p2)
        # print("after correctMatches: p1:{} p2:{}".format(p1, p2))

    X = cv2.triangulatePoints(camera1.camera_matrix.proj_mat,
                              camera2.camera_matrix.proj_mat, p1, p2)
    X /= X[3]
    X = X[:3].T.reshape((1, 3)).tolist()
    return X
def refine_matches(x1, x2, F):
    # use the optimal triangulation method (Algorithm 12.1 from MVG)
    nx1, nx2 = cv2.correctMatches(F, np.reshape(x1, (1, -1, 2)), np.reshape(x2, (1, -1, 2)))

    # get the points back in matrix configuration
    xr1 = np.float32(np.reshape(nx1,(-1, 2)))
    xr2 = np.float32(np.reshape(nx2,(-1, 2)))

    if h.debug >= 0:
        print("  Matches corrected with Optimal Triangulation Method")

    if h.debug > 2: 
        print("xr1: \n", xr1)
        print("xr2: \n", xr2)
        print("after correctMatches: ")
        xrh1 = make_homogeneous(xr1)
        xrh2 = make_homogeneous(xr2)
        mth.print_epipolar_eq(xrh1, xrh2, F)

    return xr1.T, xr2.T 
Exemplo n.º 24
0
    def compute_virtual_points(self, index):
        """Compute virtual points for a single sample.

        Args:
            index (int): sample index

        Returns:
            tuple: virtual points in first image, virtual points in second image
        """
        pts2_virt, pts1_virt = cv2.correctMatches(self.F[index],
                                                  self.pts2_grid[index],
                                                  self.pts1_grid[index])

        valid_1 = np.logical_and(
            np.logical_not(np.isnan(pts1_virt[:, :, 0])),
            np.logical_not(np.isnan(pts1_virt[:, :, 1])),
        )
        valid_2 = np.logical_and(
            np.logical_not(np.isnan(pts2_virt[:, :, 0])),
            np.logical_not(np.isnan(pts2_virt[:, :, 1])),
        )

        _, valid_idx = np.where(np.logical_and(valid_1, valid_2))
        good_pts = len(valid_idx)

        while good_pts < self.num_points_eval:
            valid_idx = np.hstack(
                (valid_idx, valid_idx[:(self.num_points_eval - good_pts)]))
            good_pts = len(valid_idx)

        valid_idx = valid_idx[:self.num_points_eval]

        pts1_virt = pts1_virt[:, valid_idx]
        pts2_virt = pts2_virt[:, valid_idx]

        ones = np.ones((pts1_virt.shape[1], 1))

        pts1_virt = np.hstack((pts1_virt[0], ones))
        pts2_virt = np.hstack((pts2_virt[0], ones))

        return pts1_virt, pts2_virt
Exemplo n.º 25
0
    def opt_triangulation(self, x1, x2, P1, P2):
        # For each given point corresondence points1[i] <-> points2[i], and a
        # fundamental matrix F, computes the corrected correspondences
        # new_points1[i] <-> new_points2[i] that minimize the geometric error
        # d(points1[i], new_points1[i])^2 + d(points2[i], new_points2[i])^2,
        # subject to the epipolar constraint new_points2^t * F * new_points1 = 0
        # Here we are using the OpenCV's function CorrectMatches.

        # @param x1: points in the first camera, list of vectors x, y
        # @param x2: points in the second camera
        # @param P1: Projection matrix of the first camera
        # @param P2: Projection matrix of the second camera
        # @return points3d: Structure of the scene, 3 x n matrix

        x1 = np.float32(x1)  # Imhomogeneous
        x2 = np.float32(x2)

        # 3D Matrix : [kpts1[0] kpts[1]... kpts[n]]

        x1 = np.reshape(x1, (1, len(x1), 2))

        x2 = np.reshape(x2, (1, len(x2), 2))

        self.correctedkpts1, self.correctedkpts2 = cv2.correctMatches(self.F,
                                                                      x1, x2)
        # Now, reshape to n x 2 shape
        self.correctedkpts1 = self.correctedkpts1[0]
        self.correctedkpts2 = self.correctedkpts2[0]
        # and make homogeneous
        x1 = self.make_homog(np.transpose(self.correctedkpts1))
        x2 = self.make_homog(np.transpose(self.correctedkpts2))

        # Triangulate
        # This function needs as arguments the coordinates of the keypoints
        # (form 3 x n) and the projection matrices

        points3d = self.triangulate_list(x1, x2, P2, P1)

        self.structure = points3d  # 3 x n matrix

        return points3d
    def opt_triangulation(self, x1, x2, P1, P2):
        # For each given point corresondence points1[i] <-> points2[i], and a
        # fundamental matrix F, computes the corrected correspondences
        # new_points1[i] <-> new_points2[i] that minimize the geometric error
        # d(points1[i], new_points1[i])^2 + d(points2[i], new_points2[i])^2,
        # subject to the epipolar constraint new_points2^t * F * new_points1 = 0
        # Here we are using the OpenCV's function CorrectMatches.

        # @param x1: points in the first camera, list of vectors x, y
        # @param x2: points in the second camera
        # @param P1: Projection matrix of the first camera
        # @param P2: Projection matrix of the second camera
        # @return points3d: Structure of the scene, 3 x n matrix

        x1 = np.float32(x1)  # Imhomogeneous
        x2 = np.float32(x2)

        # 3D Matrix : [kpts1[0] kpts[1]... kpts[n]]

        x1 = np.reshape(x1, (1, len(x1), 2))

        x2 = np.reshape(x2, (1, len(x2), 2))

        self.correctedkpts1, self.correctedkpts2 = cv2.correctMatches(
            self.F, x1, x2)
        # Now, reshape to n x 2 shape
        self.correctedkpts1 = self.correctedkpts1[0]
        self.correctedkpts2 = self.correctedkpts2[0]
        # and make homogeneous
        x1 = self.make_homog(np.transpose(self.correctedkpts1))
        x2 = self.make_homog(np.transpose(self.correctedkpts2))

        # Triangulate
        # This function needs as arguments the coordinates of the keypoints
        # (form 3 x n) and the projection matrices

        points3d = self.triangulate_list(x1, x2, P2, P1)

        self.structure = points3d  # 3 x n matrix

        return points3d
Exemplo n.º 27
0
def compute_virtual_points(img_h: int, img_w: int, i2_F_i1: np.ndarray, step: float = 0.5):
	"""Compute virtual points for a single sample.
	Args:
		index (int): sample index
	Returns:
		tuple: virtual points in first image, virtual points in second image
	"""
	
	# set grid points for each image
	grid_x, grid_y = np.meshgrid(
		np.arange(0, img_w, int(step * img_w)),
		np.arange(0, img_h, int(step * img_h))
	)
	num_points_eval = len(grid_x.flatten())

	grid_x = grid_x.flatten().reshape(-1,1)
	grid_y = grid_y.flatten().reshape(-1,1)

	pts1_grid = np.hstack([grid_x, grid_y]).reshape(1,-1,2)
	pts2_grid = copy.deepcopy(pts1_grid).reshape(1,-1,2)

	pts1_virt, pts2_virt = cv2.correctMatches(i2_F_i1, pts1_grid, pts2_grid)
	pts1_virt = pts1_virt.squeeze()
	pts2_virt = pts2_virt.squeeze()

	valid = np.logical_and.reduce(
		[
			np.logical_not(np.isnan(pts1_virt[:, 0])),
			np.logical_not(np.isnan(pts1_virt[:, 1])),
			np.logical_not(np.isnan(pts2_virt[:, 0])),
			np.logical_not(np.isnan(pts2_virt[:, 1]))
		])
	valid_idx = np.where(valid)[0]
	good_pts = len(valid_idx)

	pts1_virt = pts1_virt[valid_idx]
	pts2_virt = pts2_virt[valid_idx]

	return pts1_virt, pts2_virt
def triangulatePoints(pts1,pts2,F,P1,P2):
	pts1 = np.array([pts1[:]])
	pts2 = np.array([pts2[:]])
	npts1, npts2 = cv2.correctMatches(F, pts1, pts2)
	#npts1 = unNormalizePoints(K,npts1)
	#npts2 = unNormalizePoints(K,npts2)
	npts1 = npts1[0]
	npts2 = npts2[0]
	#P1 = np.copy(P1)
	#P2 = np.copy(P2)
	#P1[0:3,0:3] = np.linalg.inv(Tprime).dot(P1[0:3,0:3]).dot(T)
	#P2[0:3,0:3] = np.linalg.inv(Tprime).dot(P2[0:3,0:3]).dot(T)
	#P1 = createP(np.identity(3),c=np.array([0,0,0]))
	#P2 = createP(np.identity(3),R=rot, c=translation)
	p11 = P1[0,:]
	p12 = P1[1,:]
	p13 = P1[2,:]
	p21 = P2[0,:]
	p22 = P2[1,:]
	p23 = P2[2,:]
	X = np.zeros((0,4))
	#invK = np.linalg.inv(K)
	#print(invK)
	for npt1,npt2 in zip(npts1,npts2):
		A = np.zeros((0,4))
		A = np.vstack([A,npt1[0]*p13-p11])
		A = np.vstack([A,npt1[1]*p13-p12])
		A = np.vstack([A,npt2[0]*p23-p21])
		A = np.vstack([A,npt2[1]*p23-p22])
		#A = A/np.linalg.norm(A)
		d, u, v = cv2.SVDecomp(A,flags = cv2.SVD_FULL_UV)
		pos = v[3,:]
		pos /= pos[3]
		#print(invK.dot(pos[0:3]))
		X = np.vstack([X, pos])
	#print(X)
	return X
Exemplo n.º 29
0
 def getCorrespodingPoints(self, leftCurves, rightCurves):
     if leftCurves.size and rightCurves.size:
         leftCurves = cv2.convertPointsToHomogeneous(leftCurves)
         rightCurves = cv2.convertPointsToHomogeneous(rightCurves)
         # distMat = np.empty([len(leftCurves),len(rightCurves)])
         distMatCV = np.empty([len(leftCurves),len(rightCurves)])
         # minDistMat = np.empty([len(leftCurves), 2])
         for i in range(0, len(leftCurves)):
             # lineRight = np.matmul(self.fundamentalMatrix, leftCurves[i])
             lineRightCV = cv2.computeCorrespondEpilines(leftCurves[i], 1, self.fundamentalMatrix)
             for j in range(0, len(rightCurves)):
                 # distMat[i, j] = np.abs(np.matmul(rightCurves[j], lineRight))
                 distMatCV[i, j] = np.abs(np.matmul(rightCurves[j], lineRightCV[0,0,:]))
         leftIdx = []
         rightIdx = []
         for i in range(len(leftCurves)):
             potentialRightIdx = np.argmin(distMatCV[i, :])
             potentialLeftIdx = np.argmin(distMatCV[:, potentialRightIdx])
             if potentialLeftIdx == i and distMatCV[i, potentialRightIdx] < 0.5:
                 leftIdx.append(potentialLeftIdx)
                 rightIdx.append(potentialRightIdx)
         if leftIdx and rightIdx:
             matchedLeftPoints = cv2.convertPointsFromHomogeneous(np.array([leftCurves[leftIdx[i]] for i in
                                                                            range(0, len(leftIdx))])).reshape([1, len(leftIdx), 2])
             matchedRightPoints = cv2.convertPointsFromHomogeneous(np.array([rightCurves[rightIdx[i]] for i in
                                                                             range(0, len(rightIdx))])).reshape([1, len(leftIdx), 2])
             matchedLeftPointsCV, matchedRightPointsCV = cv2.correctMatches(self.fundamentalMatrix, matchedLeftPoints
                                                                        , matchedRightPoints)
             matchedLeftPointsCV = matchedLeftPointsCV.reshape([len(leftIdx), 2])
             matchedRightPointsCV = matchedRightPointsCV.reshape(len(leftIdx), 2)
         else:
             matchedLeftPointsCV = []
             matchedRightPointsCV = []
     else:
         matchedLeftPointsCV = []
         matchedRightPointsCV = []
     return matchedLeftPointsCV, matchedRightPointsCV
Exemplo n.º 30
0
def triangulatePoints(pts1, pts2, F, P1, P2):
    pts1 = np.array([pts1[:]])
    pts2 = np.array([pts2[:]])
    npts1, npts2 = cv2.correctMatches(F, pts1, pts2)
    #npts1 = unNormalizePoints(K,npts1)
    #npts2 = unNormalizePoints(K,npts2)
    npts1 = npts1[0]
    npts2 = npts2[0]
    #P1 = np.copy(P1)
    #P2 = np.copy(P2)
    #P1[0:3,0:3] = np.linalg.inv(Tprime).dot(P1[0:3,0:3]).dot(T)
    #P2[0:3,0:3] = np.linalg.inv(Tprime).dot(P2[0:3,0:3]).dot(T)
    #P1 = createP(np.identity(3),c=np.array([0,0,0]))
    #P2 = createP(np.identity(3),R=rot, c=translation)
    p11 = P1[0, :]
    p12 = P1[1, :]
    p13 = P1[2, :]
    p21 = P2[0, :]
    p22 = P2[1, :]
    p23 = P2[2, :]
    X = np.zeros((0, 4))
    #invK = np.linalg.inv(K)
    #print(invK)
    for npt1, npt2 in zip(npts1, npts2):
        A = np.zeros((0, 4))
        A = np.vstack([A, npt1[0] * p13 - p11])
        A = np.vstack([A, npt1[1] * p13 - p12])
        A = np.vstack([A, npt2[0] * p23 - p21])
        A = np.vstack([A, npt2[1] * p23 - p22])
        #A = A/np.linalg.norm(A)
        d, u, v = cv2.SVDecomp(A, flags=cv2.SVD_FULL_UV)
        pos = v[3, :]
        pos /= pos[3]
        #print(invK.dot(pos[0:3]))
        X = np.vstack([X, pos])
    #print(X)
    return X
Exemplo n.º 31
0
    def getPoseAndPoints(self, pt1, pt2, midx, method):
        # == 0 :  unroll parameters ==
        Kmat = self.K_
        distCoeffs = self.dC_

        # rectify input
        pt1 = np.float32(pt1)
        pt2 = np.float32(pt2)

        # find fundamental matrix
        Fmat, msk = cv2.findFundamentalMat(pt1, pt2,
                method=method,
                param1=0.1, param2=0.999) # TODO : expose these thresholds
        if msk is None:
            return None
        msk = np.asarray(msk[:,0]).astype(np.bool)

        # filter points + bookkeeping mask
        pt1 = pt1[msk]
        pt2 = pt2[msk]
        midx = midx[msk]

        # correct matches
        pt1 = pt1[None, ...] # add axis 0
        pt2 = pt2[None, ...]
        pt2, pt1 = cv2.correctMatches(Fmat, pt1, pt2) # TODO : not sure if this is necessary
        pt1 = pt1[0, ...] # remove axis 0
        pt2 = pt2[0, ...]

        # filter NaN
        msk = np.logical_and(np.isfinite(pt1), np.isfinite(pt2))
        msk = np.all(msk, axis=-1)

        # filter points + bookkeeping mask
        pt1 = pt1[msk]
        pt2 = pt2[msk]
        midx = midx[msk]

        if len(pt1) <= 8:
            # Insufficient # of points
            # TODO : expose this threshold
            return None

        # TODO : expose these thresholds
        Emat, msk = cv2.findEssentialMat(pt1, pt2, Kmat,
                method=method, prob=0.999, threshold=0.1)
        msk = np.asarray(msk[:,0]).astype(np.bool)

        # filter points + bookkeeping mask
        pt1 = pt1[msk]
        pt2 = pt2[msk]
        midx = midx[msk]

        n_in, R, t, msk, _ = cv2.recoverPose(Emat,
                pt1,
                pt2,
                cameraMatrix=Kmat,
                distanceThresh=1000.0)#np.inf) # TODO : or something like 10.0/s ??
        msk = np.asarray(msk[:,0]).astype(np.bool)

        if msk.sum() <= 0:
            return None

        # filter points + bookkeeping mask
        pt1 = pt1[msk]
        pt2 = pt2[msk]
        midx = midx[msk]

        # validate triangulation
        try:
            pts_h = cv2.triangulatePoints(
                    Kmat.dot(np.eye(3,4)),
                    Kmat.dot(np.concatenate([R, t], axis=1)),
                    pt1[None,...],
                    pt2[None,...]).astype(np.float32)
        except Exception as e:
            print R
            print t
            print pt1.shape, pt1.dtype
            print pt2.shape, pt2.dtype

        # PnP Validation
        #pts3 = (pts_h[:3] / pts_h[3:]).T
        #_, rvec, tvec, inliers = res = cv2.solvePnPRansac(
        #        pts3, pt2, self.K2_, self.dC_,
        #        useExtrinsicGuess=False,
        #        iterationsCount=1000,
        #        reprojectionError=2.0
        #        )
        #print 'PnP Validation'
        #print tx.euler_from_matrix(R), t
        #print rvec, tvec

        # Apply NaN/Inf Check
        msk = np.all(np.isfinite(pts_h),axis=0)
        pt1 = pt1[msk]
        pt2 = pt2[msk]
        midx = midx[msk]
        pts_h = pts_h[:, msk]

        return [R, t, pt1, pt2, midx, pts_h]
Exemplo n.º 32
0
def run():
    global pts3
    global pts4

    if simulation and view:
        plot.plot3D(data3D, 'Original 3D Data')

    if view:
        plot.plot2D(pts1_raw, name='First Statics (Noise not shown)')
        plot.plot2D(pts2_raw, name='Second Statics (Noise not shown)')

    # FUNDAMENTAL MATRIX
    F = getFundamentalMatrix(pts1, pts2)

    # ESSENTIAL MATRIX (HZ 9.12)
    E, w, u, vt = getEssentialMatrix(F, K1, K2)

    # PROJECTION/CAMERA MATRICES from E (HZ 9.6.2)
    P1, P2 = getNormalisedPMatrices(u, vt)
    P1_mat = np.mat(P1)
    P2_mat = np.mat(P2)

    # FULL PROJECTION MATRICES (with K) P = K[Rt]
    KP1 = K1 * P1_mat
    KP2 = K2 * P2_mat

    print "\n> KP1:\n", KP1
    print "\n> KP2:\n", KP2

    # SYNCHRONISATION + CORRECTION
    if rec_data and simulation is False:
        print "---Synchronisation---"
        pts3, pts4 = synchroniseGeometric(pts3, pts4, F)

        pts3 = pts3.reshape((1, -1, 2))
        pts4 = pts4.reshape((1, -1, 2))
        newPoints3, newPoints4 = cv2.correctMatches(F, pts3, pts4)
        pts3 = newPoints3.reshape((-1, 2))
        pts4 = newPoints4.reshape((-1, 2))

    elif simulation:
        print "> Simulation: Use whole point set for reconstruction"
        pts3 = pts1
        pts4 = pts2

    # Triangulate the trajectory
    p3d = triangulateCV(KP1, KP2, pts3, pts4)

    # Triangulate goalposts
    if simulation is False:
        goalPosts = triangulateCV(KP1, KP2, postPts1, postPts2)

    # SCALING AND PLOTTING
    if simulation:
        if view:
            plot.plot3D(p3d, 'Simulation Reconstruction')
        reprojectionError(K1, P1_mat, K2, P2_mat, pts3, pts4, p3d)
        p3d = simScale(p3d)
        if view:
            plot.plot3D(p3d, 'Scaled Simulation Reconstruction')

    else:
        # add the post point data into the reconstruction for context
        if len(postPts1) == 4:
            print "> Concatenate goal posts to trajectory"
            pts3_gp = np.concatenate((postPts1, pts3), axis=0)
            pts4_gp = np.concatenate((postPts2, pts4), axis=0)
            p3d_gp = np.concatenate((goalPosts, p3d), axis=0)

        scale = getScale(goalPosts)

        scaled_gp_only = [[a * scale for a in inner] for inner in goalPosts]
        scaled_gp = [[a * scale for a in inner] for inner in p3d_gp]
        scaled = [[a * scale for a in inner] for inner in p3d]

        if view:
            plot.plot3D(scaled_gp, 'Scaled 3D Reconstruction')
        reprojectionError(K1, P1_mat, K2, P2_mat, pts3_gp, pts4_gp, p3d_gp)

        getMetrics(scaled, scaled_gp_only)
        scaled_gp = transform(scaled_gp)
        if view:
            plot.plot3D(scaled_gp, 'Final (Reorientated) 3D Reconstruction')
        if ground_truth_provided:
            reconstructionError(data3D, scaled_gp)

        # write X Y Z to file
        outfile = open('sessions/' + clip + '/3d_out.txt', 'w')
        for p in scaled_gp:
            p0 = round(p[0], 2)
            p1 = round(p[1], 2)
            p2 = round(p[2], 2)
            string = str(p0) + ' ' + str(p1) + ' ' + str(p2)
            outfile.write(string + '\n')
        outfile.close()
    def optimal_triangulation(self, kpts1, kpts2, P1=None, P2=None, F=None):
        """This method computes the structure of the scene given the image
        coordinates of a 3D point :math:`\\mathbf{X}` in two views and the
        camera matrices of those views.

        As Hartley and Zisserman said in their book (HZ_), *naive triangulation
        by back-projecting rays from measured image points will fail, because
        the rays will not intersect in general, due to errors in the measured
        image coordinates*. In order to triangulate properly the image points
        it is necessary to estimate a best solution for the point in
        :math:`\\mathbb{R}^3`.

        The method proposed in HZ_, which is **projective-invariant**, consists
        in estimate a 3D point :math:`\\hat{\\mathbf{X}}` which exactly
        satisfies the supplied camera geometry (i.e, the given camera matrices),
        so it projects as

        .. math::

            \\hat{\\mathbf{x}} = P\\hat{\\mathbf{X}}

        .. math::

            \\hat{\\mathbf{x}}' = P'\\hat{\\mathbf{X}}

        and the aim is to estimate :math:`\\hat{\\mathbf{X}}` from the image
        measurements :math:`\\mathbf{x}` and :math:`\\mathbf{x}'`. The MLE,
        under the assumption of Gaussian noise is given by the point
        :math:`\\hat{\\mathbf{X}}` that minimizes the **reprojection error**

        .. math::

            \\epsilon(\\mathbf{x}, \\mathbf{x}') = d(\\mathbf{x},
                                          \\hat{\\mathbf{x}})^2 + d(\\mathbf{x}'
                                           ,\\hat{\\mathbf{x}}')^2

        subject to

        .. math::

            \\hat{\\mathbf{x}}'^TF\\hat{\\mathbf{x}} = 0

        where :math:`d(*,*)` is the Euclidean distance between the points.

        .. image:: ../Images/triangulation.png

        So, the proposed algorithm by Hartley and Zisserman in their book is
        first to find the corrected image points :math:`\\hat{\\mathbf{x}}` and
        :math:`\\hat{\\mathbf{x}}'` minimizing :math:`\\epsilon(\\mathbf{x},
        \\mathbf{x}')` and then compute :math:`\\hat{\\mathbf{X}}'` using the
        DLT triangulation method (see HZ_ chapter 12).

        :param kpts1: Measured image points in the first image,
                      :math:`\\mathbf{x}`.
        :param kpts2: Measured image points in the second image,
                      :math:`\\mathbf{x}'`.
        :param P1: First camera, :math:`P`.
        :param P2: Second camera, :math:`P'`.
        :param F: Fundamental matrix.
        :type kpts1: Numpy nx2 ndarray
        :type kpts2: Numpy nx2 ndarray
        :type P1: Numpy 3x4 ndarray
        :type P2: Numpy 3x4 ndarray
        :type F: Numpy 3x3 ndarray

        :returns: The two view scene structure :math:`\\hat{\\mathbf{X}}` and
                  the corrected image points :math:`\\hat{\\mathbf{x}}` and
                  :math:`\\hat{\\mathbf{x}}'`.
        :rtype: * :math:`\\hat{\\mathbf{X}}` :math:`\\rightarrow`  Numpy nx3 ndarray
                * :math:`\\hat{\\mathbf{x}}` and :math:`\\hat{\\mathbf{x}}'`
                  :math:`\\rightarrow` Numpy nx2 ndarray.

        """

        kpts1 = np.float32(kpts1)  # Points in the first camera
        kpts2 = np.float32(kpts2)  # Points in the second camera

        # 3D Matrix : [kpts1[0] kpts[1]... kpts[n]]

        pt1 = np.reshape(kpts1, (1, len(kpts1), 2))

        pt2 = np.reshape(kpts2, (1, len(kpts2), 2))

        new_points1, new_points2 = cv2.correctMatches(self.F, pt2, pt1)

        self.correctedkpts1 = new_points1
        self.correctedkpts2 = new_points2

        # Transform to a 2D Matrix: 2xn

        kpts1 = (np.reshape(new_points1, (len(kpts1), 2))).T
        kpts2 = (np.reshape(new_points2, (len(kpts2), 2))).T

        #print(np.shape(kpts1))

        points3D = cv2.triangulatePoints(self.cam1.P, self.cam2.P, kpts2,
                                         kpts1)

        self.structure = points3D / points3D[
            3]  # Normalize points [x, y, z, 1]

        array = np.zeros((4, len(self.structure[0])))

        for i in range(len(self.structure[0])):

            array[:, i] = self.structure[:, i]

        self.structure = array
Exemplo n.º 34
0
        cv2.circle(im, (int(im1_pts[i, 0]), int(im1_pts[i, 1])), 2,
                   (255, 0, 0), 2)
        cv2.circle(im, (int(im2_pts[i, 0] + im1.shape[1]), int(im2_pts[i, 1])),
                   2, (255, 0, 0), 2)

    im1_pts_augmented = np.zeros((1, im1_pts.shape[0], im1_pts.shape[1]))
    im1_pts_augmented[0, :, :] = im1_pts
    im2_pts_augmented = np.zeros((1, im2_pts.shape[0], im2_pts.shape[1]))
    im2_pts_augmented[0, :, :] = im2_pts

    im1_pts_ud = cv2.undistortPoints(im1_pts_augmented, K, D)
    im2_pts_ud = cv2.undistortPoints(im2_pts_augmented, K, D)

    E, mask = cv2.findFundamentalMat(im1_pts_ud, im2_pts_ud, cv2.FM_RANSAC)

    im1_pts_ud_fixed, im2_pts_ud_fixed = cv2.correctMatches(
        E, im1_pts_ud, im2_pts_ud)
    use_corrected_matches = True
    if not (use_corrected_matches):
        im1_pts_ud_fixed = im1_pts_ud
        im2_pts_ud_fixed = im2_pts_ud

    epipolar_error = np.zeros((im1_pts_ud_fixed.shape[1], ))
    for i in range(im1_pts_ud_fixed.shape[1]):
        epipolar_error[i] = test_epipolar(E, im1_pts_ud_fixed[0, i, :],
                                          im2_pts_ud_fixed[0, i, :])

    # calculate F since we know K
    F = np.linalg.inv(K.T).dot(E).dot(np.linalg.inv(K))
    U, Sigma, V = np.linalg.svd(E)

    # these are the two possible rotation matrices
Exemplo n.º 35
0
def correct_matches(F, pta, ptb):
    """ cv2.correctMatches() wrapper """
    pta_f, ptb_f = cv2.correctMatches(F, pta[None,...], ptb[None,...])
    pta_f, ptb_f = [np.squeeze(p, axis=0) for p in (pta_f, ptb_f)]
    return pta_f, ptb_f
Exemplo n.º 36
0
		im2_pts[i,1] = correspondences[1][i][1]

		cv2.circle(im,(int(im1_pts[i,0]),int(im1_pts[i,1])),2,(255,0,0),2)
		cv2.circle(im,(int(im2_pts[i,0]+im1.shape[1]),int(im2_pts[i,1])),2,(255,0,0),2)

	im1_pts_augmented = np.zeros((1,im1_pts.shape[0],im1_pts.shape[1]))
	im1_pts_augmented[0,:,:] = im1_pts
	im2_pts_augmented = np.zeros((1,im2_pts.shape[0],im2_pts.shape[1]))
	im2_pts_augmented[0,:,:] = im2_pts

	im1_pts_ud = cv2.undistortPoints(im1_pts_augmented,K,D)
	im2_pts_ud = cv2.undistortPoints(im2_pts_augmented,K,D)

	E, mask = cv2.findFundamentalMat(im1_pts_ud,im2_pts_ud,cv2.FM_RANSAC,10**-3)

	im1_pts_ud_fixed, im2_pts_ud_fixed = cv2.correctMatches(E, im1_pts_ud, im2_pts_ud)
	use_corrected_matches = True
	if not(use_corrected_matches):
		im1_pts_ud_fixed = im1_pts_ud
		im2_pts_ud_fixed = im2_pts_ud

	epipolar_error = np.zeros((im1_pts_ud_fixed.shape[1],))
	for i in range(im1_pts_ud_fixed.shape[1]):
		epipolar_error[i] = test_epipolar(E,im1_pts_ud_fixed[0,i,:],im2_pts_ud_fixed[0,i,:])

	F = np.linalg.inv(K.T).dot(E).dot(np.linalg.inv(K))
	U, Sigma, V = np.linalg.svd(E)

	R1 = U.dot(W).dot(V)
	R2 = U.dot(W.T).dot(V)
Exemplo n.º 37
0
		im2_pts[i,1] = correspondences[1][i][1]

		cv2.circle(im,(int(im1_pts[i,0]),int(im1_pts[i,1])),2,(255,0,0),2)
		cv2.circle(im,(int(im2_pts[i,0]+im1.shape[1]),int(im2_pts[i,1])),2,(255,0,0),2)

	im1_pts_augmented = np.zeros((1,im1_pts.shape[0],im1_pts.shape[1]))
	im1_pts_augmented[0,:,:] = im1_pts
	im2_pts_augmented = np.zeros((1,im2_pts.shape[0],im2_pts.shape[1]))
	im2_pts_augmented[0,:,:] = im2_pts

	im1_pts_ud = cv2.undistortPoints(im1_pts_augmented,K,D)
	im2_pts_ud = cv2.undistortPoints(im2_pts_augmented,K,D)

	E, mask = cv2.findFundamentalMat(im1_pts_ud,im2_pts_ud,cv2.FM_RANSAC)

	im1_pts_ud_fixed, im2_pts_ud_fixed = cv2.correctMatches(E, im1_pts_ud, im2_pts_ud)
	use_corrected_matches = True
	if not(use_corrected_matches):
		im1_pts_ud_fixed = im1_pts_ud
		im2_pts_ud_fixed = im2_pts_ud

	epipolar_error = np.zeros((im1_pts_ud_fixed.shape[1],))
	for i in range(im1_pts_ud_fixed.shape[1]):
		epipolar_error[i] = test_epipolar(E,im1_pts_ud_fixed[0,i,:],im2_pts_ud_fixed[0,i,:])

	F = np.linalg.inv(K.T).dot(E).dot(np.linalg.inv(K))
	U, Sigma, V = np.linalg.svd(E)

	R1 = U.dot(W).dot(V)
	R2 = U.dot(W.T).dot(V)
Exemplo n.º 38
0
def compute_depths():
	global F
	global im
	kp1 = detector.detect(im1_bw)
	kp2 = detector.detect(im2_bw)

	dc, des1 = extractor.compute(im1_bw,kp1)
	dc, des2 = extractor.compute(im2_bw,kp2)

	# do matches both ways so we can better screen out spurious matches
	matches = matcher.knnMatch(des1,des2,k=2)
	matches_reversed = matcher.knnMatch(des2,des1,k=2)

	# apply the ratio test in one direction
	good_matches_prelim = []
	for m,n in matches:
		if m.distance < ratio_threshold*n.distance and kp1[m.queryIdx].response > corner_threshold and kp2[m.trainIdx].response > corner_threshold:
			good_matches_prelim.append((m.queryIdx, m.trainIdx))

	# apply the ratio test in the other direction
	good_matches = []
	for m,n in matches_reversed:
		if m.distance < ratio_threshold*n.distance and (m.trainIdx,m.queryIdx) in good_matches_prelim:
			good_matches.append((m.trainIdx, m.queryIdx))

	auto_pts1 = np.zeros((1,len(good_matches),2))
	auto_pts2 = np.zeros((1,len(good_matches),2))

	for idx in range(len(good_matches)):
		match = good_matches[idx]
		auto_pts1[0,idx,:] = kp1[match[0]].pt
		auto_pts2[0,idx,:] = kp2[match[1]].pt

	auto_pts1_orig = auto_pts1
	auto_pts2_orig = auto_pts2

	# remove the effect of the intrinsic parameters as well as radial distortion
	auto_pts1 = cv2.undistortPoints(auto_pts1, K, D)
	auto_pts2 = cv2.undistortPoints(auto_pts2, K, D)

	correspondences = [[],[]]
	for i in range(auto_pts1_orig.shape[1]):
		correspondences[0].append((auto_pts1_orig[0,i,0],auto_pts1_orig[0,i,1]))
		correspondences[1].append((auto_pts2_orig[0,i,0],auto_pts2_orig[0,i,1]))

	im1_pts = np.zeros((len(correspondences[0]),2))
	im2_pts = np.zeros((len(correspondences[1]),2))

	im = np.array(np.hstack((im1,im2)))

	# plot the points
	for i in range(len(im1_pts)):
		im1_pts[i,0] = correspondences[0][i][0]
		im1_pts[i,1] = correspondences[0][i][1]
		im2_pts[i,0] = correspondences[1][i][0]
		im2_pts[i,1] = correspondences[1][i][1]

		cv2.circle(im,(int(im1_pts[i,0]),int(im1_pts[i,1])),2,(255,0,0),2)
		cv2.circle(im,(int(im2_pts[i,0]+im1.shape[1]),int(im2_pts[i,1])),2,(255,0,0),2)

	# the np.array bit makes the points into a 1xn_pointsx2 numpy array since that is what undistortPoints requires
	im1_pts_ud = cv2.undistortPoints(np.array([im1_pts]),K,D)
	im2_pts_ud = cv2.undistortPoints(np.array([im2_pts]),K,D)

	# since we are using undistorted points we are really computing the essential matrix
	E, mask = cv2.findFundamentalMat(im1_pts_ud,im2_pts_ud,cv2.FM_RANSAC,epipolar_threshold)

	# correct matches using the optimal triangulation method of Hartley and Zisserman
	im1_pts_ud_fixed, im2_pts_ud_fixed = cv2.correctMatches(E, im1_pts_ud, im2_pts_ud)

	epipolar_error = np.zeros((im1_pts_ud_fixed.shape[1],))
	for i in range(im1_pts_ud_fixed.shape[1]):
		epipolar_error[i] = test_epipolar(E,im1_pts_ud_fixed[0,i,:],im2_pts_ud_fixed[0,i,:])

	# since we used undistorted points to compute F we really computed E, now we use E to get F
	F = np.linalg.inv(K.T).dot(E).dot(np.linalg.inv(K))
	U, Sigma, V = np.linalg.svd(E)

	# these are the two possible rotations
	R1 = U.dot(W).dot(V)
	R2 = U.dot(W.T).dot(V)

	# flip sign of E if necessary
	if np.linalg.det(R1)+1.0 < 10**-8:
		# flip sign of E and recompute everything
		E = -E
		F = np.linalg.inv(K.T).dot(E).dot(np.linalg.inv(K))
		U, Sigma, V = np.linalg.svd(E)

		R1 = U.dot(W).dot(V)
		R2 = U.dot(W.T).dot(V)

	# these are the two possible translations between the two cameras (up to a scale)
	t1 = U[:,2]
	t2 = -U[:,2]

	# the first camera has a camera matrix with no translation or rotation
	P = np.array([[1.0,	0.0, 0.0, 0.0],
				  [0.0,	1.0, 0.0, 0.0],
				  [0.0,	0.0, 1.0, 0.0]]);
	P1_possibilities = [np.column_stack((R1, t1)),
						np.column_stack((R1, t2)),
						np.column_stack((R2, t1)),
						np.column_stack((R2, t2))]

	pclouds = []
	for P1 in P1_possibilities:
		pclouds.append(triangulate_points(im1_pts_ud_fixed, im2_pts_ud_fixed, P, P1))

	# compute the proportion of points in front of the cameras
	infront_of_camera = []
	for i in range(len(P1_possibilities)):
		infront_of_camera.append(test_triangulation(P,pclouds[i])+test_triangulation(P1_possibilities[i],pclouds[i]))

	# the highest proportion of points in front of the cameras is the one we select
	best_pcloud_idx = np.argmax(infront_of_camera)
	best_pcloud = pclouds[best_pcloud_idx]

	# scale the depths between 0 and 1 so it is easier to visualize
	depths = best_pcloud[:,2] - min(best_pcloud[:,2])
	depths = depths / max(depths)

	return best_pcloud, depths, im1_pts, im2_pts
Exemplo n.º 39
0
    def compute_depths(self, im1, im2, im1_bw, im2_bw):
        global im

        kp1 = self.detector.detect(im1_bw)
        kp2 = self.detector.detect(im2_bw)

        dc, des1 = self.extractor.compute(im1_bw, kp1)
        dc, des2 = self.extractor.compute(im2_bw, kp2)

        # do matches both ways so we can better screen out spurious matches
        matches = self.matcher.knnMatch(des1, des2, k = 2)
        matches_reversed = self.matcher.knnMatch(des2, des1, k = 2)

        # apply the ratio test in one direction
        good_matches_prelim = []
        for m,n in matches:
            if m.distance < self.ratio_threshold * n.distance and \
               kp1[m.queryIdx].response > self.corner_threshold and \
               kp2[m.trainIdx].response > self.corner_threshold:
                good_matches_prelim.append((m.queryIdx, m.trainIdx))

        # apply the ratio test in the other direction
        good_matches = []
        for m,n in matches_reversed:
            if m.distance < self.ratio_threshold * n.distance and \
               (m.trainIdx, m.queryIdx) in good_matches_prelim:
                good_matches.append((m.trainIdx, m.queryIdx))

        if len(good_matches) == 0:
            print "No good matches, yo!"
            return None

        auto_pts1 = np.zeros((1, len(good_matches), 2))
        auto_pts2 = np.zeros((1, len(good_matches), 2))

        for idx in range(len(good_matches)):
            match = good_matches[idx]
            auto_pts1[0, idx, :] = kp1[match[0]].pt
            auto_pts2[0, idx, :] = kp2[match[1]].pt

        auto_pts1_orig = auto_pts1
        auto_pts2_orig = auto_pts2

        # remove the effect of the intrinsic parameters as well as radial distortion
        auto_pts1 = cv2.undistortPoints(auto_pts1, self.K, self.D)
        auto_pts2 = cv2.undistortPoints(auto_pts2, self.K, self.D)

        correspondences = [[],[]]
        for i in range(auto_pts1_orig.shape[1]):
            correspondences[0].append((auto_pts1_orig[0,i,0],auto_pts1_orig[0,i,1]))
            correspondences[1].append((auto_pts2_orig[0,i,0],auto_pts2_orig[0,i,1]))

        im1_pts = np.zeros((len(correspondences[0]),2))
        im2_pts = np.zeros((len(correspondences[1]),2))

        # usage of global im
        im = np.array(np.hstack((im1, im2)))

        # plot the points
        for i in range(len(im1_pts)):
            im1_pts[i,0] = correspondences[0][i][0]
            im1_pts[i,1] = correspondences[0][i][1]
            im2_pts[i,0] = correspondences[1][i][0]
            im2_pts[i,1] = correspondences[1][i][1]

            cv2.circle(im, (int(im1_pts[i, 0]), int(im1_pts[i, 1])), 2,
                       (255, 0, 0), 5)
            cv2.circle(im, (int(im2_pts[i, 0] + im1.shape[1]),
                       int(im2_pts[i, 1])), 2, (255, 0, 0), 5)

        # the np.array bit makes the points into a 1xn_pointsx2 numpy array since that is what undistortPoints requires
        # TODO; DK : we haven't confirmed the undistorted points... plot them somehow?
        im1_pts_ud = cv2.undistortPoints(np.array([im1_pts]),self.K,self.D)
        im2_pts_ud = cv2.undistortPoints(np.array([im2_pts]),self.K,self.D)

        # since we are using undistorted points we are really computing the essential matrix
        # TODO; DK : check E?
        self.E, mask = cv2.findFundamentalMat(im1_pts_ud, im2_pts_ud,
                                              cv2.FM_RANSAC,
                                              self.epipolar_threshold)

        # correct matches using the optimal triangulation method of Hartley and Zisserman
        # TODO; DK : check corrected pts,,, also plot these somehow? plot im1_pts_ud with lines, and im_pts_fixed with differently colored lines
        im1_pts_ud_fixed, im2_pts_ud_fixed = cv2.correctMatches(self.E,
                                                                im1_pts_ud,
                                                                im2_pts_ud)

        M, mask = cv2.findHomography(auto_pts1_orig, auto_pts2_orig, cv2.RANSAC)

        if self.show_homography:
            self.find_and_display_homography(im1, M)

        epipolar_error = np.zeros((im1_pts_ud_fixed.shape[1],))
        for i in range(im1_pts_ud_fixed.shape[1]):
            epipolar_error[i] = self.test_epipolar(im1_pts_ud_fixed[0, i, :],
                                                   im2_pts_ud_fixed[0, i, :])

        # since we used undistorted points to compute F we really computed E, now we use E to get F
        # F is just for display funsies...
        self.F = np.linalg.inv(self.K.T).dot(self.E).dot(np.linalg.inv(self.K))
        U, Sigma, V = np.linalg.svd(self.E)

        # these are the two possible rotations
        # only need R for the sake of finding P1
        R1 = U.dot(self.W).dot(V)
        R2 = U.dot(self.W.T).dot(V)

        # flip sign of E if necessary
        if np.linalg.det(R1)+1.0 < 10**-8:
            # flip sign of E and recompute everything
            self.E = -self.E
            self.F = np.linalg.inv(self.K.T).dot(self.E).dot(np.linalg.inv(self.K))
            U, Sigma, V = np.linalg.svd(self.E)

            R1 = U.dot(self.W).dot(V)
            R2 = U.dot(self.W.T).dot(V)

        # these are the two possible translations between the two cameras (up to a scale)
        t1 = U[:,2]
        t2 = -U[:,2]

        P1_possibilities = [np.column_stack((R1, t1)),
                            np.column_stack((R1, t2)),
                            np.column_stack((R2, t1)),
                            np.column_stack((R2, t2))]

        pclouds = []
        for P1 in P1_possibilities:
            pclouds.append(self.triangulate_points(im1_pts_ud_fixed,
                                                   im2_pts_ud_fixed, P1))

        # compute the proportion of points in front of the cameras
        infront_of_camera = []
        for i in range(len(P1_possibilities)):
            infront_of_camera.append(self.test_triangulation(self.P, pclouds[i]) + \
                                     self.test_triangulation(P1_possibilities[i], pclouds[i]))

        # the highest proportion of points in front of the cameras is the one we select
        best_pcloud_idx = np.argmax(infront_of_camera)
        # TODO; DK : check P1?? some test calculations (for when we know what translation/rotation we used in real life)
        #       We've tried to check P1 by plotting the homography translation of one camera plane to the next

        best_pcloud = pclouds[best_pcloud_idx]

        # filtering before publishing things into rviz

        negatives = [] # list of indices of points with negative z values (to be passed into np.delete)        
        for i in range(len(best_pcloud[:,2])):
            if best_pcloud[:,2][i] <= 0:
                negatives.append(i)
        best_pcloud = np.delete(best_pcloud, negatives, 0)

        outliers = [] # waytoofar
        mean = np.mean(best_pcloud[:,2]) # filtering against the mean assumes that the pts of interest are all close to each other
                                         # this mean is still affected by the faraway outliers 
        outlier_threshold = 5*mean

        for i in range(len(best_pcloud[:,2])):
            if best_pcloud[:,2][i] >= outlier_threshold:
                outliers.append(i)
        best_pcloud = np.delete(best_pcloud, outliers, 0)

        # scale the depths between 0 and 1 so it is easier to visualize
        depths = best_pcloud[:, 2] - min(best_pcloud[:, 2])
        depths = depths / max(depths)

        return best_pcloud, depths, im1_pts, im2_pts
Exemplo n.º 40
-1
def polynomial_triangulation(u1, P1, u2, P2):
    """
    Polynomial (Optimal) triangulation.
    Uses Linear-Eigen for final triangulation.
    Relative speed: 0.1
    
    (u1, P1) is the reference pair containing normalized image coordinates (x, y) and the corresponding camera matrix.
    (u2, P2) is the second pair.
    
    u1 and u2 are matrices: amount of points equals #rows and should be equal for u1 and u2.
    
    The status-vector is based on the assumption that all 3D points have finite coordinates.
    """
    P1_full = np.eye(4); P1_full[0:3, :] = P1[0:3, :]    # convert to 4x4
    P2_full = np.eye(4); P2_full[0:3, :] = P2[0:3, :]    # convert to 4x4
    P_canon = P2_full.dot(cv2.invert(P1_full)[1])    # find canonical P which satisfies P2 = P_canon * P1
    
    # "F = [t]_cross * R" [HZ 9.2.4]; transpose is needed for numpy
    F = np.cross(P_canon[0:3, 3], P_canon[0:3, 0:3], axisb=0).T
    
    # Other way of calculating "F" [HZ (9.2)]
    #op1 = (P2[0:3, 3:4] - P2[0:3, 0:3] .dot (cv2.invert(P1[0:3, 0:3])[1]) .dot (P1[0:3, 3:4]))
    #op2 = P2[0:3, 0:4] .dot (cv2.invert(P1_full)[1][0:4, 0:3])
    #F = np.cross(op1.reshape(-1), op2, axisb=0).T
    
    # Project 2D matches to closest pair of epipolar lines
    u1_new, u2_new = cv2.correctMatches(F, u1.reshape(1, len(u1), 2), u2.reshape(1, len(u1), 2))
    
    # For a purely sideways trajectory of 2nd cam, correctMatches() returns NaN for all possible points!
    if np.isnan(u1_new).all() or np.isnan(u2_new).all():
        F = cv2.findFundamentalMat(u1, u2, cv2.FM_8POINT)[0]    # so use a noisy version of the fund mat
        u1_new, u2_new = cv2.correctMatches(F, u1.reshape(1, len(u1), 2), u2.reshape(1, len(u1), 2))
    
    # Triangulate using the refined image points
    return linear_eigen_triangulation(u1_new[0], P1, u2_new[0], P2)    # TODO: replace with linear_LS: better results for points not at Inf