예제 #1
0
    def find_essential(self,
                       fname1,
                       fname2,
                       save=False,
                       load_intrinsics=True,
                       intrinsics_load_path=' ',
                       stereo_save_path=' ',
                       mtx1=None,
                       dist1=None,
                       mtx2=None,
                       dist2=None):
        if load_intrinsics:
            #Try to load intrnsic values if specified, otherwise raise error
            try:
                mtx1, dist1 = self._load_intrinsics(intrinsics_load_path +
                                                    '/camera1')
                mtx2, dist2 = self._load_intrinsics(intrinsics_load_path +
                                                    '/camera2')
            except:
                raise AttributeError('No Intrinsics found')
        #Check that the matricies are of proper form
        else:
            if not type(mtx1) != type(np.array()):
                raise ValueError("R is not of type np.array")
            if not type(dist1) != type(np.array()):
                raise ValueError("R is not of type np.array")
            if not type(mtx2) != type(np.array()):
                raise ValueError("R is not of type np.array")
            if not type(dist2) != type(np.array()):
                raise ValueError("R is not of type np.array")

        img_name = self._image_mouse_click_directory(fname1, fname2)
        #Return clicked points
        pnts1, pnts2 = self.get_points()
        img = cv2.imread(img_name)
        imshape = (img.shape[0], img.shape[1])

        #print(pnts1)
        E = cv2.findEssentialMat(
            pnts1, pnts2)  #, focal = mtx1[0,0], pp = (mtx1[0,2], mtx1[1,2]))
        R1 = np.zeros((3, 3))
        R2 = np.zeros((3, 3))
        t = np.zeros((3, 1))

        cv2.decomposeEssentialMat(E[0], R1, R2, t)
        print(R1, R2)

        if save:
            np.savetxt(stereo_save_path + '/rotation_matrix.csv',
                       R2,
                       fmt='%1.3f',
                       delimiter=",")
            np.savetxt(stereo_save_path + '/translation_matrix.csv',
                       t,
                       fmt='%1.3f',
                       delimiter=",")
            np.savetxt(stereo_save_path + '/essential_matrix.csv',
                       E[0],
                       fmt='%1.3f',
                       delimiter=",")
예제 #2
0
def pose_estimate(K0, K1, hp0, hp1, strict_mask, rot, th=0.0001):
    # epipolar geometry
    from models.submodule import F_ngransac
    tmphp0 = hp0[:, strict_mask]
    tmphp1 = hp1[:, strict_mask]
    #num_samp = min(300000,tmphp0.shape[1])
    #num_samp = min(30000,tmphp0.shape[1])
    num_samp = min(3000, tmphp0.shape[1])
    submask = np.random.choice(range(tmphp0.shape[1]), num_samp)
    tmphp0 = tmphp0[:, submask]
    tmphp1 = tmphp1[:, submask]

    rotx, transx, Ex = F_ngransac(torch.Tensor(tmphp0.T[np.newaxis]).cuda(),
                                  torch.Tensor(tmphp1.T[np.newaxis]).cuda(),
                                  torch.Tensor(K0[np.newaxis]).cuda(),
                                  False,
                                  0,
                                  Kn=torch.Tensor(K1[np.newaxis]).cuda())
    R01 = cv2.Rodrigues(np.asarray(rotx[0]))[0]
    T01 = np.asarray(transx[0])
    E = np.asarray(Ex[0])
    #    _,R01,T01,_ = cv2.recoverPose(E.astype(float), tmphp0[:2].T, tmphp1[:2].T, K0)  # RT are 0->1 points transform
    #    T01 = T01[:,0]
    #    R01=R01.T
    #    T01=-R01.dot(T01)  # now are 1->0 points transform

    R1, R2, T = cv2.decomposeEssentialMat(E)
    for rott in [(R1, T), (R2, T), (R1, -T), (R2, -T)]:
        if testEss(K0, K1, rott[0], rott[1], tmphp0, tmphp1):
            R01 = rott[0].T
            T01 = -R01.dot(rott[1][:, 0])
    if not 'T01' in locals():
        T01 = np.asarray([0, 0, 1])
        R01 = np.eye(3)


#    E, maskk = cv2.findEssentialMat(np.linalg.inv(K0).dot(hp0[:,strict_mask])[:2].T,
#                                    np.linalg.inv(K1).dot(hp1[:,strict_mask])[:2].T, np.eye(3),
#                                   cv2.LMEDS,threshold=th)
#
#    valid_points = np.ones((strict_mask.sum())).astype(bool)
#    valid_points[~maskk[:,0].astype(bool)]=False
#    fmask = strict_mask.copy()
#    fmask[strict_mask]=valid_points
#
#    R1, R2, T = cv2.decomposeEssentialMat(E)
#    for rott in [(R1,T),(R2,T),(R1,-T),(R2,-T)]:
#        if testEss(K0,K1,rott[0],rott[1],hp0[:,fmask], hp1[:,fmask]):
#            R01=rott[0].T
#            T01=-R01.dot(rott[1][:,0])
#    if not 'T01' in locals():
#        T01 = np.asarray([0,0,1])
#        R01 = np.eye(3)
#    T01t = T01.copy()

# compensate R
    H01 = K0.dot(R01).dot(np.linalg.inv(K1))  # plane at infinity
    comp_hp1 = H01.dot(hp1)
    comp_hp1 = comp_hp1 / comp_hp1[-1:]
    return R01, T01, H01, comp_hp1, E
    def processFrame(self, total_vel, positions):
        self.px_ref_old,self.px_cur = featureTracking(self.last_frame,self.new_frame,self.px_ref_old)
        E,mask = cv2.findEssentialMat(self.px_cur,self.px_ref_old, focal = self.focal, pp=self.pp,method=cv2.RANSAC,prob=0.999,threshold=.3)
        R1,R2,_ = cv2.decomposeEssentialMat(E)
        if np.trace(R1)>2.5:
            R_des = R1
        elif np.trace(R2)>2.5:
            R_des = R2
        else:
            print ("both are bad")
            R_des = np.array([[1,0,0],[0,1,0],[0,0,1]])
        R = R_des
        # print (R1)
        _,_,t,mask = cv2.recoverPose(E,self.px_cur,self.px_ref_old,focal=self.focal,pp = self.pp)
        # eulers = transforms3d.euler.mat2euler(R,'rxyz')
        # print (eulers[0])
        absolute_scale = self.getAbsoluteScale(total_vel,positions)
        if(absolute_scale > 0):
            self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t)
            self.cur_R = R.dot(self.cur_R)
        if(self.px_ref.shape[0] < kMinNumFeature):
            mask = np.zeros_like(self.new_frame)
            mask[:] = 255
            ##goodFeaturesToTrack
            # self.px_cur = cv2.goodFeaturesToTrack(self.new_frame, mask = mask, **feature_params)
            # self.px_cur = np.squeeze(self.px_cur)

            ##FAST
            self.px_cur = self.detector.detect(self.new_frame)
            self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
        self.px_ref_old = self.px_ref
        self.px_ref = self.px_cur
예제 #4
0
def test_frames(intrinsic_mat, fr1, fr2):
    corr = build_correspondences(fr1, fr2)
    if corr.points_1.shape[0] < 5:
        return None, 0

    mat, mask = cv2.findEssentialMat(corr.points_1,
                                     corr.points_2,
                                     intrinsic_mat,
                                     method=cv2.RANSAC,
                                     prob=0.999,
                                     threshold=1.0)
    if mat is None or mat.shape != (3, 3) or not validate_mat(corr, mask):
        return None, 0

    # https://kite.com/python/docs/cv2.decomposeEssentialMat
    R1, R2, t = cv2.decomposeEssentialMat(mat)
    poses = [
        Pose(R1.T, R1.T @ t),
        Pose(R2.T, R2.T @ t),
        Pose(R1.T, R1.T @ (-t)),
        Pose(R2.T, R2.T @ (-t))
    ]
    best_size, best_idx = -1, 0

    for i, pose in enumerate(poses):
        points, _, _ = triangulate_correspondences(
            remove_correspondences_with_ids(corr, np.argwhere(mask == 0)),
            eye3x4(), pose_to_view_mat3x4(pose), intrinsic_mat, params)
        if points.shape[0] > best_size:
            best_size, best_idx = points.shape[0], i

    return poses[best_idx], best_size
예제 #5
0
    def recover_pose(self, pts1, pts2, new_img_dim=None):
        """ Find rotation matrices using epipolar geometry
        
        Args:
            pts1 (np.ndarray): Initial points
            pts2 (np.ndarray): Resulting points
            new_img_dim (tuple, optional): New image dimension. Defaults to None.

        Returns:
            [type]: [description]
        """
        # https://answers.opencv.org/question/31421/opencv-3-essentialmatrix-and-recoverpose/
        img_dim = new_img_dim if new_img_dim else self.calib_dimension
        scaled_K = self.K * img_dim[0] / self.calib_dimension[0]
        scaled_K[2][2] = 1.0

        E, mask = cv2.findEssentialMat(pts1, pts2, scaled_K, cv2.RANSAC, 0.999,
                                       0.1)  # cv2.LMEDS or cv2.RANSAC
        #retval, R, t, mask = cv2.recoverPose(E, pts1, pts2, scaled_K)
        try:
            R1, R2, t = cv2.decomposeEssentialMat(E)
        except:
            # Can't figure it out, assume no rotation
            return np.eye(3), np.eye(3), np.array([0, 0, 0])
        return R1, R2, t
예제 #6
0
    def extractScaledMotion(self,currentPoints,currentTriangulated,previousPoints,previousTriangulated,dictionary=False):
        newPts=np.zeros((len(currentPoints),2),dtype=np.float64)
        oldPts=np.zeros((len(currentPoints),2),dtype=np.float64)
        for j in range(0,len(currentPoints)):

            newPts[j,0]=currentPoints[j][0]
            newPts[j,1]=currentPoints[j][1]
            oldPts[j,0]=previousPoints[j][0]
            oldPts[j,1]=previousPoints[j][1]      
        nisterResults=self.extractMotion(currentPoints,previousPoints,True)
        R1,R2,t=cv2.decomposeEssentialMat(nisterResults["E"])
        nInliers,R,T,matchMask=cv2.recoverPose(nisterResults["E"],oldPts,newPts,self.extract["k"])
        nisterResults["inlierMask"]=matchMask
        nisterResults["nInliers"]=nInliers
        #print(nInliers)
        nisterResults["T"]=T
        nisterResults["R"]=R
        nisterResults["H"]=createHomog(R,T)
        if(nisterResults["nInliers"]>0):
            s,t,inl=estimateScale(previousTriangulated,currentTriangulated,R,T,matchMask)
            nisterResults["T"]=t
            nisterResults["nInliers"]=inl
        nisterResults["H"]=createHomog(nisterResults["R"],nisterResults["T"])
        if(not dictionary):
            return nisterResults["H"]
        else:
            return nisterResults
예제 #7
0
파일: calib.py 프로젝트: shiba6v/pycalib
def recoverPose2(E, n1, n2, K1, K2, mask):
    n1 = n1.reshape((-1, 2))
    n2 = n2.reshape((-1, 2))
    R2a, R2b, t2 = cv2.decomposeEssentialMat(E)
    R1 = np.eye(3)
    t1 = np.zeros((3,1))

    def z_count(R1, t1, R2, t2, K1, K2, n1, n2):
        """
        Count number of points appeared in front of the cameras
        """
        P1 = K1 @ np.hstack((R1, t1))
        P2 = K2 @ np.hstack((R2, t2))
        Xh1 = cv2.triangulatePoints(P1, P2, n1, n2)
        Xh1 /= Xh1[3,:]
        z1 = np.sum(Xh1[2,:]>0)  # num of positive z points in Cam1 coordinate system
        Xh2 = R2 @ Xh1[:3,:] + t2
        z2 = np.sum(Xh2[2,:]>0)  # num of positive z points in Cam2 coordinate system
        return (z1 + z2), Xh1[:3,:]

    zmax = -1
    for R2x, t2x in [[R2a, t2], [R2a, -t2], [R2b, t2], [R2b, -t2]]:
        z, Xx = z_count(R1, t1, R2x,  t2x, K1, K2, n1.T, n2.T)
        if zmax < z:
            zmax = z
            R2_est = R2x
            t2_est = t2x
            X_est = Xx
            
    return R2_est, t2_est
예제 #8
0
    def get_rotation_translation(self, vid_set):
        '''
        Calculate the rotation and translation vectors given the unique id
        for a set of videos
        '''

        r_t_pairs = []
        attempts = 20
        for _ in range(attempts):
            # Build dataframe with some random set of data in the video set
            vid_set_dfs = []
            for m in self.metadata:
                if m['video_set'] == vid_set and np.random.rand() < 0.5:
                    vid_set_dfs.append(self.get_skeleton_data(m['video_index']))
            full_df = pd.concat(vid_set_dfs, ignore_index=True)

            rgb = np.array([full_df['color']]).astype('float32')
            d   = np.array([full_df['depth']]).astype('float32')

            F, mask = cv2.findFundamentalMat(d, rgb)    # Fundamental matrix
            E = rgb_mat.T @ F @ d_mat                   # Essential matrix
            R1, R2, T = cv2.decomposeEssentialMat(E)    # Decompose essential matrix

            # Get rotation matrix that looks most similar to the identity matrix
            R = R1 if abs(np.sum(R1 - np.identity(3))) < abs(np.sum(R2 - np.identity(3))) else R2

            r_t_pairs.append((R, T))
        return r_t_pairs
예제 #9
0
def pose_by_frames(frame1: FrameCorners, frame2: FrameCorners,
                   intrinsic_mat: np.ndarray) -> Tuple[Pose, int]:
    correspondences = build_correspondences(frame1, frame2)
    mat, mask = cv2.findEssentialMat(correspondences.points_1,
                                     correspondences.points_2, intrinsic_mat,
                                     cv2.RANSAC, 0.99, 1)

    if mat is None or mat.shape != (3, 3):
        return None, 0

    if mask is not None:
        correspondences = _remove_correspondences_with_ids(
            correspondences, np.argwhere(mask.flatten() == 0))

    R1, R2, t = cv2.decomposeEssentialMat(mat)
    max_pose = None
    max_npoints = 0
    for mat in [R1.T, R2.T]:
        for vec in [t, -t]:
            pose = Pose(mat, mat @ vec)
            points, _, _ = triangulate_correspondences(
                correspondences, eye3x4(), pose_to_view_mat3x4(pose),
                intrinsic_mat, INITIAL_TRIANGULATION_PARAMETERS)
            if len(points) > max_npoints:
                max_pose = pose
                max_npoints = len(points)
    return max_pose, max_npoints
예제 #10
0
def reconstruction(img_pairs):
    for pathes in img_pairs:
        filename_w_ext1 = os.path.basename(pathes[0])
        filename_w_ext2 = os.path.basename(pathes[1])
        filename1 = os.path.splitext(filename_w_ext1)[0]
        filename2 = os.path.splitext(filename_w_ext2)[0]
        F = np.loadtxt(filename1 + "FMatrix.txt", dtype=float, delimiter=',')
        pts1 = np.loadtxt(filename1 + "Points1.txt", dtype=int, delimiter=',')
        pts2 = np.loadtxt(filename1 + "Points2.txt", dtype=int, delimiter=',')
        fx = fy = 721.5
        cx = 690.5
        cy = 172.8
        K = np.array([[fx, 0., cx], [0., fy, cy], [0., 0., 1.]])
        Rt = np.hstack((np.eye(3), np.zeros((3, 1))))
        P0 = K.dot(Rt)
        E = K.T * np.mat(F) * K
        R1, R2, t = cv2.decomposeEssentialMat(E)
        P1 = verify_camerapose(P0, K, R1, R2, t, pts1.reshape(-1, 1, 2),
                               pts2.reshape(-1, 1, 2))
        pointcloud = cv2.triangulatePoints(
            P0, P1, np.array(pts1.reshape(-1, 1, 2), dtype=np.float),
            np.array(pts2.reshape(-1, 1, 2), dtype=np.float))
        # 4D Punkt in 3D umwandeln
        pointcloud = cv2.convertPointsFromHomogeneous(pointcloud.T)
        write_ply(filename1 + 'punktwolke.ply', pointcloud)
예제 #11
0
def estimate_motion(image1, image2, depth_map, K):

    sift = cv2.xfeatures2d.SIFT_create()

    kp1, des1 = sift.detectAndCompute(cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY),
                                      None)
    kp2, des2 = sift.detectAndCompute(cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY),
                                      None)
    bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
    matches = bf.match(des1, des2)

    img1_pt = []
    img2_pt = []
    for m in matches:
        img1_pt.append(kp1[m.queryIdx].pt)
        img2_pt.append(kp2[m.trainIdx].pt)

    img1_pt = np.asarray(img1_pt)
    img2_pt = np.asarray(img2_pt)
    F, mask = cv2.findFundamentalMat(img1_pt, img2_pt, cv2.FM_RANSAC)

    img1_pt = img1_pt[mask.ravel() == 1]
    img2_pt = img2_pt[mask.ravel() == 1]

    E = ((K.T).dot(F)).dot(K)
    R1, R2, t = cv2.decomposeEssentialMat(E)

    R, T = disambiguate_transform(R1, R2, t, img1_pt, img2_pt)

    scale = find_scale(depth_map, img1_pt, img2_pt, R, T)

    return R, -T * scale
예제 #12
0
def calc_known_views(corner_storage, intrinsic_mat, indent=5):
    num_frames = len(corner_storage)
    known_view_1 = (None, None)
    known_view_2 = (None, None)
    num_points = -1

    for frame_1 in range(num_frames):
        for frame_2 in range(frame_1 + indent, num_frames):
            corrs = build_correspondences(corner_storage[frame_1],
                                          corner_storage[frame_2])

            if len(corrs.ids) < 6:
                continue

            points_1 = corrs.points_1
            points_2 = corrs.points_2

            H, mask_h = cv2.findHomography(points_1,
                                           points_2,
                                           method=cv2.RANSAC)
            if mask_h is None:
                continue

            mask_h = mask_h.reshape(-1)

            E, mask_e = cv2.findEssentialMat(points_1,
                                             points_2,
                                             method=cv2.RANSAC,
                                             cameraMatrix=intrinsic_mat)

            if mask_e is None:
                continue

            mask_e = mask_e.reshape(-1)

            if mask_h.sum() / mask_e.sum() > 0.5:
                continue

            corrs = Correspondences(corrs.ids[(mask_e == 1)],
                                    points_1[(mask_e == 1)],
                                    points_2[(mask_e == 1)])

            R1, R2, t = cv2.decomposeEssentialMat(E)

            for poss_pose in [
                    Pose(R1.T, R1.T @ t),
                    Pose(R1.T, R1.T @ (-t)),
                    Pose(R2.T, R2.T @ t),
                    Pose(R2.T, R2.T @ (-t))
            ]:
                points3d, _, _ = triangulate_correspondences(
                    corrs, eye3x4(), pose_to_view_mat3x4(poss_pose),
                    intrinsic_mat, TriangulationParameters(1, 2, .1))

                if len(points3d) > num_points:
                    num_points = len(points3d)
                    known_view_1 = (frame_1, view_mat3x4_to_pose(eye3x4()))
                    known_view_2 = (frame_2, poss_pose)
    return known_view_1, known_view_2
def main():
    img_path = '../Data/stereo/centre/'
    imgs = sorted(os.listdir(img_path))
    fx, fy, cx, cy, G_camera_image, LUT = ReadCameraModel('../Data/model')

    i = 100
    img1 = cv2.imread(img_path + imgs[i], -1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BAYER_GR2BGR)
    img1 = UndistortImage(img1, LUT)

    img2 = cv2.imread(img_path + imgs[i + 1], -1)
    img2 = cv2.cvtColor(img2, cv2.COLOR_BAYER_GR2BGR)
    img2 = UndistortImage(img2, LUT)

    x1, x2 = utils.getMatchingFeaturePoints(img1, img2)

    x1 = np.hstack([x1, np.ones((x1.shape[0], 1))])
    x2 = np.hstack([x2, np.ones((x2.shape[0], 1))])

    # print(x1.shape)
    # print(x2.shape)
    features = np.hstack([x1, x2])
    # getInliersRANSAC(features,threshold=(0.07),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
    x1_in, x2_in = RANSAC.getInliersRANSAC(features,
                                           threshold=(0.005),
                                           size=8,
                                           num_inliers=0.6 * features.shape[0],
                                           num_iters=200)
    fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in, x2_in)
    K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx, K)
    print("Essential Matrix")
    print(ess_mtx)

    C, R = ExtractCameraPose(ess_mtx)
    print("Pose Orientation :")
    print(R)

    E_act = cv2.findEssentialMat(x1_in[:, :2], x2_in[:, :2], K)
    # _,R,T,_ = cv2.recoverPose(E_act[0],x1_in[:,:2],x2_in[:,:2])

    # print("Pose Position :")
    # print(T.T)
    # print("Pose Orientation :")
    # print(R)

    R1, R2, T = cv2.decomposeEssentialMat(E_act[0])
    print("OpenCV R1")
    print(R1)
    print("OpenCV R2")
    print(R2)

    print("Calculated Pose Position :")
    print(C)
    print("Opencv T")
    print(T.T)
def feat_match(img_train,
               img_query,
               num_fr,
               camMat,
               crop=1,
               foc_len=1200,
               match_pnts=20,
               thresh=1):
    # in practice for original video, foc_len can be applied,
    # in testing for resized video, camMat is applied

    kp1, des1 = orb.detectAndCompute(img_train, None)
    kp2, des2 = orb.detectAndCompute(img_query, None)
    # frm = np.copy(img_query)

    if len(des1) < 8 or len(des2) < 8:
        print("not enough feature pnts to be matched")
        return np.zeros((3, ))

    matches = bf.match(des2, des1)  # pos1 for query
    if len(matches) < 8:
        print("not enough matches in %d frame, pitch unchanged" % num_fr)
        return np.zeros((3, ))

    # sort matches according to score
    matches = sorted(matches, key=lambda x: x.distance)[:match_pnts]
    # extract matched-feature coorinates
    kpts1 = np.array([kp1[m.trainIdx].pt for m in matches], dtype=np.int)
    kpts2 = np.array([kp2[m.queryIdx].pt for m in matches], dtype=np.int)

    # essMat, mask = cv.findEssentialMat(kpts1, kpts2, focal=foc_len, prob=0.9999, threshold=0.1)  # focal length to be revised
    essMat, mask = cv.findEssentialMat(kpts1,
                                       kpts2,
                                       camMat,
                                       prob=0.9999,
                                       threshold=0.1)
    # matches = [matches[i] for i in range(len(mask)) if mask[i] == 1]
    # img_out = cv.drawMatches(img_query, kp2, img_train, kp1, matches, None,
    #                          flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    R1, R2, t = cv.decomposeEssentialMat(essMat)
    rotAngs1 = rotMat2EulAng(R1)
    rotAngs2 = rotMat2EulAng(R2)
    rotAngs, RMat = sel_ang(rotAngs1, rotAngs2, R1, R2, thresh)
    # for i in range(len(rotAngs)):
    #     if abs(rotAngs[i]) > 3:
    #         print("the %sth param: %.3f, frame %s" %(i, rotAngs[i], num_fr))
    #         print(rotAngs1)
    #         print(rotAngs2)
    # t_ang = "pitch:%.3f; yaw:%.3f; row:%.3f" % (rotAngs[0], rotAngs[1], rotAngs[2])
    # t_fr = "%s" % num_fr
    # img_out = cv.putText(img_out, t_ang, (100, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
    # img_out = cv.putText(img_out, t_fr, (20, 50), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
    # return pitch & yaw angle
    # return rotAngs[0], rotAngs[2]
    # return rotAngs, img_out
    return rotAngs
예제 #15
0
def _initialize_cloud_by_two_frames(
        prev_corners, cur_corners,
        intrinsic_mat, base_view,
        tracking_mode: TrackingMode
):
    correspondences = build_correspondences(prev_corners, cur_corners)

    failed = correspondences.points_1.shape[0] <= 5

    e, mask = [None] * 2
    if not failed:
        e, mask = cv2.findEssentialMat(
            correspondences.points_1, correspondences.points_2, intrinsic_mat,
            **tracking_mode.essential_mat_params
        )

        _, mask_h = cv2.findHomography(
            correspondences.points_1,
            correspondences.points_2,
            method=tracking_mode.essential_mat_params['method'],
            ransacReprojThreshold=tracking_mode.triangulation_params.max_reprojection_error,
            maxIters=tracking_mode.solve_pnp_ransac_params['iterationsCount'],
            confidence=tracking_mode.essential_mat_params['prob'],
            mask=np.copy(mask)
        )

        essential_inliers = np.sum(mask)
        homography_inliers = np.sum(mask_h)

        failed |= homography_inliers == 0 \
            or essential_inliers < tracking_mode.essential_homography_ratio_threshold * homography_inliers

    if not failed:
        mask = np.ma.make_mask(mask).flatten()
        r1, r2, t12 = cv2.decomposeEssentialMat(e)

        points, ids, view = np.array([]), np.array([]), None
        for R in [r1, r2]:
            for t in [t12, -t12]:
                view_ = np.hstack((R, t))

                points_, ids_ = triangulate_correspondences(
                    correspondences,
                    base_view,
                    view_,
                    intrinsic_mat,
                    tracking_mode.triangulation_params,
                    mask=mask
                )

                if points_.size > points.size:
                    points, ids, view = points_, ids_, view_

        return points.shape[0], view, points, ids

    return -1, None, None, None
예제 #16
0
def calculateCamera(p_old, p_new):
    '''
	def cleanPoints(pts):
		mean = np.sum(pts, axis = 0)
		pts = map(lambda pt: pt - mean, pts)
		scale = np.sum(np.linalg.norm(pts, axis=2)) / len(pts)
		pts = pts * np.sqrt(2) /scale
		return mean, scale, pts
		'''
    #mean, scale, p_old = cleanPoints(p_old)
    p_old = p_old.copy()
    p_new = p_new.copy()

    def normPts(pts):
        pts[:, :, 0] = pts[:, :, 0] / w - 0.5
        pts[:, :, 1] = pts[:, :, 1] / h - 0.5
        return pts

    #print p_old.shape
    p_old = normPts(p_old)
    p_new = normPts(p_new)

    F, mask = cv2.findFundamentalMat(p_old, p_new,
                                     cv2.FM_RANSAC)  #F p_old = line in p_new
    p_old = p_old[mask == 1]
    p_new = p_new[mask == 1]
    #U, s, V = np.linalg.svd(F) #, full_matrices=True) #Note that V here is often called V.H eslewhere in literature
    #F = np.dot(np.dot(U, np.diag([1,1,0])),V)
    #retval, R, t, mask = cv2.recoverPose(F, p_old, p_new)

    R1, R2, t = cv2.decomposeEssentialMat(F)

    def findAngle(R):
        return np.arccos((np.trace(R) - 1) / 2)

    #angle = np.arccos((np.trace(R)-1)/2)

    angle1 = findAngle(R1)
    angle2 = findAngle(R2)

    #print angle
    if angle1 < angle2:
        R = R1
    else:
        R = R2

    #print t
    if np.linalg.det(R) <= 0:
        print "YOU IDIOT. Det<0"

    #if retval < 10:
    #	R=np.identity(3)
    #print mask
    return R, t[0]
예제 #17
0
파일: utils.py 프로젝트: Parskatt/caps
def evaluate_pose(E, P):
    R_gt = P[:3, :3]
    t_gt = P[:3, 3]
    R1, R2, t = cv2.decomposeEssentialMat(E)
    t = t.squeeze()
    theta_1 = np.linalg.norm(scipy.linalg.logm(R1.T.dot(R_gt)), 'fro') / np.sqrt(2)
    theta_2 = np.linalg.norm(scipy.linalg.logm(R2.T.dot(R_gt)), 'fro') / np.sqrt(2)
    theta = min(theta_1, theta_2) * 180 / np.pi
    tran_cos = np.inner(t, t_gt) / (np.linalg.norm(t_gt) * np.linalg.norm(t))
    tran = np.arccos(tran_cos) * 180 / np.pi
    return theta, tran
예제 #18
0
    def recover_pose(self, pts1, pts2, new_img_dim = None):
        # https://answers.opencv.org/question/31421/opencv-3-essentialmatrix-and-recoverpose/
        # Find essential matrix from fundamental matrix
        img_dim = new_img_dim if new_img_dim else self.calib_dimension
        scaled_K = self.K * img_dim[0] / self.calib_dimension[0]
        scaled_K[2][2] = 1.0

        E, mask = cv2.findEssentialMat(pts1, pts2, scaled_K, cv2.RANSAC, 0.999, 0.1) # cv2.LMEDS or cv2.RANSAC
        #retval, R, t, mask = cv2.recoverPose(E, pts1, pts2, scaled_K)
        R1, R2, t = cv2.decomposeEssentialMat(E) 

        return R1, R2, t
예제 #19
0
def init_first_camera_positions(intrinsic_mat, corner_storage):
    frame_count = len(corner_storage)

    best_pair = (-1, -1)
    zero_view_mat = eye3x4()
    best_view_mat = None
    best_triangulated_points = -1
    confidence = 0.9
    params = TriangulationParameters(max_reprojection_error=2,
                                     min_triangulation_angle_deg=1,
                                     min_depth=0.5)

    for i in range(0, frame_count, 10):
        print("Init first camera. Frame %d/%d" % (i + 1, frame_count))
        for j in range(i + 3, min(i + 30, frame_count), 3):
            correspondences = build_correspondences(corner_storage[i],
                                                    corner_storage[j])
            if len(correspondences.ids) < 5:
                continue
            points_1, points_2 = correspondences.points_1, correspondences.points_2
            e_matrix, e_mask = findEssentialMat(points_1,
                                                points_2,
                                                intrinsic_mat,
                                                method=RANSAC,
                                                threshold=2,
                                                prob=confidence)
            h_matrix, h_mask = findHomography(points_1,
                                              points_2,
                                              method=RANSAC,
                                              ransacReprojThreshold=2,
                                              confidence=confidence)
            e_inliers, h_inliers = sum(e_mask.reshape(-1)), sum(
                h_mask.reshape(-1))
            if e_inliers / h_inliers < 0.1:
                continue
            outliers = np.delete(correspondences.ids,
                                 correspondences.ids[e_mask])
            correspondences = build_correspondences(corner_storage[i],
                                                    corner_storage[j],
                                                    outliers)
            R1, R2, t = decomposeEssentialMat(e_matrix)
            for rv in [R1, R2]:
                for tv in [-t, t]:
                    candidate_veiw_mat = np.hstack((rv, tv))
                    points, ids, _ = triangulate_correspondences(
                        correspondences, zero_view_mat, candidate_veiw_mat,
                        intrinsic_mat, params)
                    if len(points) > best_triangulated_points:
                        best_triangulated_points = len(points)
                        best_pair = (i, j)
                        best_view_mat = candidate_veiw_mat

    return (best_pair[0], zero_view_mat), (best_pair[1], best_view_mat)
예제 #20
0
def process(img2):
    img1 = cv2.imread("sampleImages/img-center.png")
    src_points = extract_points(img1)
    dst_points = extract_points(img2)

    # retval = cv2.estimateRigidTransform(src_points, dst_points, fullAffine=False)
    # matrix = None
    # matrix = cv2.getPerspectiveTransform(src_points, dst_points)
    retval, mask = cv2.findHomography(src_points, dst_points)
    # print(retval)
    # print(MATRIX)
    time.sleep(1)

    number, rotations, translations, normals = cv2.decomposeHomographyMat(
        retval, MATRIX)
    print(number)
    for possible_rotation in rotations:
        rot_vector = cv2.Rodrigues(possible_rotation)[0]
        print(list(map(math.degrees, rot_vector)))

    # [print(math.degrees(rotation_val)) for sublist in [cv2.Rodrigues(rotation)[0] for rotation in rotations] for rotation_val in sublist]
    # print(translations)z
    # print(normals)
    # print(output)
    # print(output[0])
    # print(output[1])
    # print(output[2])

    new_src = np.int32(src_points)
    new_dst = np.int32(dst_points)
    # print(src_points)
    # print(dst_points)
    essentialMatrix, mask = cv2.findFundamentalMat(new_src, new_dst,
                                                   cv2.FM_LMEDS)
    # print(essentialMatrix)
    # print("\n"*20)
    # pose = cv2.recoverPose(essentialMatrix, src_points, dst_points, MATRIX)
    R1, R2, t = cv2.decomposeEssentialMat(
        essentialMatrix)  # for thing in pose:

    vector1 = cv2.Rodrigues(R1)[0]
    vector2 = cv2.Rodrigues(R2)[0]
    pos1 = [
        math.degrees(item) for sublist in vector1.tolist() for item in sublist
    ]
    pos2 = [
        math.degrees(item) for sublist in vector2.tolist() for item in sublist
    ]
    # print(vector2.tolist())

    print(pos1)
    print(pos2)
예제 #21
0
파일: twoview.py 프로젝트: yycho0108/scan3d
 def select_model(_):
     """ Select model and return possible transform permutations """
     if (_['model'] == 'H'):
         # decompose_H()
         res_h, Hr, Ht, Hn = cv2.decomposeHomographyMat(_['H'], _['K'])
         Ht = np.float32(Ht)
         Ht /= np.linalg.norm(Ht, axis=(1, 2), keepdims=True)
         T_perm = zip(Hr, Ht)
     else:
         # decompose_E()
         R1, R2, t = cv2.decomposeEssentialMat(_['E'])
         T_perm = [(R1, t), (R2, t), (R1, -t), (R2, -t)]
     return T_perm
예제 #22
0
def recover_pose(E, K1, K2, pts1_undist, pts2_undist):
    
    Rx1, Rx2, tx = cv2.decomposeEssentialMat(E) 
    candidate_poses = [(Rx1, tx), (Rx1, -tx), (Rx2, tx), (Rx2,-tx)]
    
    # positive depth constraint is used to disambiguate the solutions 
    n_in_front = []
    for R, t in candidate_poses:
        
        n_pos_depth = positive_depth_count(R, t, K1, K2, pts1_undist, pts2_undist)
        n_in_front.append(n_pos_depth)
        
    return candidate_poses[np.argmax(n_in_front)]
def decomp_essential_mat(E, q1, q2, K, P):
    def sum_z(R, t):
        T = form_transf(R, t)
        P2 = np.matmul(np.concatenate((K, np.zeros((3, 1))), axis=1), T)
        hom_Q1 = cv2.triangulatePoints(P, P2, q1.T, q2.T)
        hom_Q2 = np.matmul(T, hom_Q1)
        return sum(hom_Q1[2, :] / hom_Q1[3, :] > 0) + sum(
            hom_Q2[2, :] / hom_Q2[3, :] > 0)

    R1, R2, t = cv2.decomposeEssentialMat(E)
    t = np.squeeze(t)
    pairs = [[R1, t], [R1, -t], [R2, t], [R2, -t]]
    sums = [sum_z(R, t) for R, t in pairs]
    return pairs[np.argmax(sums)]
    def _two_frame_initialization(self, frame1: FrameCorners,
                                  frame2: FrameCorners):
        correspondences = build_correspondences(frame1, frame2)

        if correspondences.points_1.shape[0] < 5:
            return None, 0

        essential_mat, mask_essential = cv2.findEssentialMat(
            correspondences.points_1,
            correspondences.points_2,
            self._intrinsic_mat,
            method=cv2.RANSAC,
            prob=0.9999,
            threshold=1.)

        _, mask_homography = cv2.findHomography(
            correspondences.points_1,
            correspondences.points_2,
            method=cv2.RANSAC,
            confidence=0.9999,
            ransacReprojThreshold=self._triangulation_parameters.
            max_reprojection_error)

        # zeros in mask correspond to outliers
        essential_inliers = np.count_nonzero(mask_essential)
        homography_inliers = np.count_nonzero(mask_homography)

        if essential_inliers < 1. * homography_inliers:
            return None, 0

        correspondences_filtered = remove_correspondences_with_ids(
            correspondences, np.argwhere(mask_essential == 0))

        R1, R2, t = cv2.decomposeEssentialMat(essential_mat)
        possible_poses = [
            Pose(R1.T, R1.T @ t),
            Pose(R2.T, R2.T @ t),
            Pose(R1.T, R1.T @ (-t)),
            Pose(R2.T, R2.T @ (-t))
        ]
        poses2points = [0] * 4

        for i, pose in enumerate(possible_poses):
            points, ids = triangulate_correspondences(
                correspondences_filtered, eye3x4(), pose_to_view_mat3x4(pose),
                self._intrinsic_mat, self._triangulation_parameters)
            poses2points[i] = points.shape[0]

        best_pose = np.argmax(poses2points)
        return possible_poses[best_pose], poses2points[best_pose]
예제 #25
0
def Pose_Est(im1, im2, k1, k2):
    orb = cv2.ORB_create()

    kp1, descs1 = orb.detectAndCompute(im1, None)
    kp2, descs2 = orb.detectAndCompute(im2, None)

    #pts_1 = np.array([x.pt for x in kp1], dtype=np.float32)
    #pts_2 = np.array([x.pt for x in kp2], dtype=np.float32)

    matcher = cv2.BFMatcher()

    matches = matcher.knnMatch(descs1, descs2, k=2)

    good = []
    pt1 = []
    pt2 = []

    for i, (m, n) in enumerate(matches):
        if m.distance < 0.8 * n.distance:
            good.append(m)
            pt2.append(kp2[m.trainIdx].pt)
            pt1.append(kp1[m.queryIdx].pt)

    pts1 = np.float32(pt1)
    pts2 = np.float32(pt2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)

    pts1 = pts1[mask.ravel() == 1]
    pts2 = pts2[mask.ravel() == 1]
    pts11 = pts1.reshape(-1, 1, 2)
    pts22 = pts2.reshape(-1, 1, 2)

    pts1_norm = cv2.undistortPoints(pts11, cameraMatrix=k1, distCoeffs=None)
    pts2_norm = cv2.undistortPoints(pts22, cameraMatrix=k2, distCoeffs=None)

    E, mask = cv2.findEssentialMat(pts1_norm,
                                   pts2_norm,
                                   focal=k1[0, 0],
                                   pp=(k1[0, 2], k1[1, 2]),
                                   method=cv2.RANSAC,
                                   prob=0.99,
                                   threshold=1.0)
    r1, r2, t = cv2.decomposeEssentialMat(E)
    _, R, T, mask = cv2.recoverPose(E,
                                    pts1_norm,
                                    pts2_norm,
                                    focal=k1[0, 0],
                                    pp=(k1[0, 2], k1[1, 2]))
    return R, T
예제 #26
0
def get_pose_with_score(frame_1, frame_2, corner_storage, intrinsic_mat):
    corners1, corners2 = corner_storage[frame_1], corner_storage[frame_2]

    correspondences = build_correspondences(corners1, corners2)

    essential_mat, mask_essential = cv2.findEssentialMat(correspondences[1],
                                                         correspondences[2],
                                                         intrinsic_mat,
                                                         method=cv2.RANSAC,
                                                         threshold=1.0)

    if essential_mat is None or mask_essential is None:
        return None, 0

    _, mask_homography = cv2.findHomography(correspondences[1],
                                            correspondences[2],
                                            method=cv2.RANSAC)

    essential_inliers, homography_inliers = mask_essential.flatten().sum(
    ), mask_homography.flatten().sum()

    if homography_inliers > essential_inliers * 0.5:
        return None, 0

    correspondences = _remove_correspondences_with_ids(
        correspondences, np.argwhere(mask_essential == 0))

    R1, R2, t = cv2.decomposeEssentialMat(essential_mat)

    candidates = [
        Pose(R1.T, R1.T @ t),
        Pose(R1.T, R1.T @ (-t)),
        Pose(R2.T, R2.T @ t),
        Pose(R2.T, R2.T @ (-t))
    ]

    best_pose_score, best_pose = 0, None

    triangulation_parameters = TriangulationParameters(1, 2, .1)
    for pose in candidates:
        points, _, _ = triangulate_correspondences(correspondences, eye3x4(),
                                                   pose_to_view_mat3x4(pose),
                                                   intrinsic_mat,
                                                   triangulation_parameters)
        if len(points) > best_pose_score:
            best_pose_score = len(points)
            best_pose = pose

    return best_pose, best_pose_score
예제 #27
0
def compute_camera_pose(F, K):
    E = K.T @ F @ K
    R_1, R_2, t = cv2.decomposeEssentialMat(E)
    # 4 cases
    R1, t1 = R_1, t
    R2, t2 = R_1, -t
    R3, t3 = R_2, t
    R4, t4 = R_2, -t

    Rs = [R1, R2, R3, R4]
    ts = [t1, t2, t3, t4]
    Cs = []
    for i in range(4):
        Cs.append(-Rs[i].T @ ts[i])
    return Rs, Cs
예제 #28
0
def calculate_known_views(
    intrinsic_mat,
    corner_storage: CornerStorage,
    min_correspondencies_count=100,
    max_homography=0.7,
) -> Tuple[Tuple[int, Pose], Tuple[int, Pose]]:
    best_points_num, best_known_views = -1, ((None, None), (None, None))
    for i in range(len(corner_storage)):
        for j in range(i + 1, min(i + 40, len(corner_storage))):
            corresp = build_correspondences(corner_storage[i],
                                            corner_storage[j])
            if len(corresp[0]) < min_correspondencies_count:
                break
            E, mask = cv2.findEssentialMat(corresp.points_1,
                                           corresp.points_2,
                                           cameraMatrix=intrinsic_mat,
                                           method=cv2.RANSAC)
            mask = (mask.squeeze() == 1)
            corresp = Correspondences(corresp.ids[mask],
                                      corresp.points_1[mask],
                                      corresp.points_2[mask])
            if E is None:
                continue

            # Validate E using homography
            H, mask = cv2.findHomography(corresp.points_1,
                                         corresp.points_2,
                                         method=cv2.RANSAC)
            if np.count_nonzero(mask) / len(corresp.ids) > max_homography:
                continue

            R1, R2, T = cv2.decomposeEssentialMat(E)
            for view_mat2 in [
                    np.hstack((R, t)) for R in [R1, R2] for t in [T, -T]
            ]:
                view_mat1 = np.eye(3, 4)
                points, _, _ = triangulate_correspondences(
                    corresp, view_mat1, view_mat2, intrinsic_mat,
                    triang_params)
                print('Try frames {}, {}: {} correspondent points'.format(
                    i, j, len(points)))
                if len(points) > best_points_num:
                    best_known_views = ((i, view_mat3x4_to_pose(view_mat1)),
                                        (j, view_mat3x4_to_pose(view_mat2)))
                    best_points_num = len(points)
                    if best_points_num > 1500:
                        return best_known_views
    return best_known_views
예제 #29
0
 def errorRotation(self, src, dst):
     ps = src.points(dst)
     ps = np.array([ i for i in ps])
     pd = dst.points(src)
     E = cv2.findEssentialMat(ps, pd)
     if E is None:
         return
     try:
         decomposition = cv2.decomposeEssentialMat(E[0])
     except:
         print E
     r1 = (self.decomposeRotation(inv(self.rotation(src.r)).dot(decomposition[0].dot(self.rotation(dst.r))))+np.pi/2) % np.pi - np.pi/2
     r2 = (self.decomposeRotation(inv(self.rotation(src.r)).dot(decomposition[1].dot(self.rotation(dst.r))))+np.pi/2) % np.pi - np.pi/2
     if r1.dot(np.transpose(r1)) < r2.dot(np.transpose(r2)):
         return r1,r2,E[1]
     else:
         return r1,r2,E[1]
예제 #30
0
    def vectorizePose(self):
        E, mask = cv.findEssentialMat(self.pts_ref,
                                      self.pts_frame,
                                      focal=1.0,
                                      pp=(0., 0.),
                                      method=cv2.RANSAC,
                                      prob=0.99,
                                      threshold=1.0)
        r1, r2, t = cv.decomposeEssentialMat(E)
        _, R, T, mask = cv.recoverPose(E,
                                       self.pts_ref,
                                       self.pts_frame,
                                       focal=1.0,
                                       pp=(0., 0.))

        self.Rmat = R
        self.Tvec = T
예제 #31
0
def find_view_by_two_frames(ids_1: np.ndarray, ids_2: np.ndarray,
                            corners_1: np.ndarray, corners_2: np.ndarray,
                            intrinsic_mat: np.ndarray) \
    -> Tuple[Optional[np.ndarray], Optional[np.ndarray], int]:
    correspondences = build_correspondences(ids_1, ids_2, corners_1, corners_2)

    if len(correspondences.ids) < 7:
        return None, None, 0

    mat, mat_mask = cv2.findEssentialMat(
        correspondences.points_1,
        correspondences.points_2,
        intrinsic_mat,
        method=cv2.RANSAC,
        prob=RANSAC_P,
        threshold=MAX_EPIPOL_LINE_DIST
    )

    mat_mask = mat_mask.flatten()
    correspondences = remove_correspondences_with_ids(
        correspondences, np.argwhere(mat_mask == 0).astype(np.int32))

    view_1 = eye3x4()

    best_view = None
    best_count = -1

    rotation_1, rotation_2, translation = cv2.decomposeEssentialMat(mat)
    rotation_1, rotation_2 = rotation_1.T, rotation_2.T
    for r in (rotation_1, rotation_2):
        for t in (translation, -translation):
            view_2 = pose_to_view_mat3x4(Pose(r, r @ t))

            _, point_ids, _ = triangulate_correspondences(
                correspondences,
                view_1, view_2,
                intrinsic_mat,
                TRIANGULATION_PARAMETERS
            )

            if best_count < len(point_ids):
                best_view = view_2
                best_count = len(point_ids)

    return view_1, best_view, best_count
예제 #32
0
myC1 = Camera.myCamera(k)
myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])


#retorna pontos correspondentes
Xp_1, Xp_2 = clsReconstruction.getMathingPoints('b4.jpg','b5.jpg','k_cam_hp.dat')


#evaluate the essential Matrix using the camera parameter(using the original points)
E, mask0 = cv2.findEssentialMat(Xp_1,Xp_2,k,cv2.FM_RANSAC)

#evaluate the fundamental matrix (using the normilized points)
#F, mask = cv2.findFundamentalMat(Xp_1,Xp_2,cv2.FM_RANSAC)	
#ki = np.linalg.inv(k)

R1, R2, t = cv2.decomposeEssentialMat(E)



retval, R, t, mask2 = cv2.recoverPose(E,Xp_1,Xp_2)

myC2 = Camera.myCamera(k)
myC2.projectiveMatrix(np.mat(t),R)


Xp_4Dt = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2])

#Xp_4Dt = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xh_1.transpose()[:2],Xh_2.transpose()[:2])

Xp_4D = Xp_4Dt.T