コード例 #1
0
ファイル: relative_or.py プロジェクト: ostepok/grass-gis-bba
def computeRelativeOrientation(tie_pts, cam_m, distor):
    #  computes relative orientation  using OpenCV 5 point algorithms

    tp1, tp2, tp1_u, tp2_u  = UndistorTiePoints(tie_pts, cam_m, distor)

    # eight point algorithm 
    #F, mask = cv2.findFundamentalMat(tp1[0, :], tp2[0, :], param1=0.1, param2=0.95, method = cv2.FM_RANSAC)
    #em = cam_m.T.dot(F).dot(cam_m)
    
    em, mask = cv2.findEssentialMat(tp1[0, :], tp2[0, :], 
                                    threshold=0.05, 
                                    prob=0.95, 
                                    focal=cam_m[0, 0], 
                                    pp=(cam_m[0, 2], cam_m[1, 2]))


    F = np.linalg.inv(cam_m.T).dot(em).dot(np.linalg.inv(cam_m))

    # optimal solution for triangulation of  object points
    p1, p2 = cv2.correctMatches(em, tp1_u, tp2_u)

    pts, R, t, mask = cv2.recoverPose(em, p1, p2)
    P = np.hstack((R, t))
    pm1 = np.eye(3, 4)

    pts_sps = compute3Dpoints(pm1, P, p1, p2)

    return pts_sps, P, p1, p2
コード例 #2
0
ファイル: features.py プロジェクト: alexkreimer/monos
def prune_matches(matches, kp1, kp2, des1, des2, intr):
    matches = sorted(matches, key = lambda x:x.distance)
    
    m1 = np.array([m.queryIdx for m in matches])
    m2 = np.array([m.trainIdx for m in matches])
    
    # slice inliers
    kp1_matched = kp1[m1,:]
    kp2_matched = kp2[m2,:]

    des1_matched = des1[m1,:]
    des2_matched = des2[m2,:]

    #explore_match('win', img1, img2, zip(kp1, kp2))
    E, inliers = cv2.findEssentialMat(kp1_matched.astype('float32'),
                                      kp2_matched.astype('float32'),
                                      intr.f, tuple(intr.pp))
    # retval, R, t, mask = cv2.recoverPose(E, kp1, kp2)
    inliers = inliers.ravel().view('bool')
    kp1_matched = kp1_matched[inliers,:]
    kp2_matched = kp2_matched[inliers,:]
    
    des1_matched = des1_matched[inliers,:]
    des2_matched = des2_matched[inliers,:]
    
    return (kp1_matched, des1_matched, kp2_matched, des2_matched)
コード例 #3
0
ファイル: visual_odometry.py プロジェクト: uoip/monoVO-python
	def processFrame(self, frame_id):
		self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
		E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
		_, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
		absolute_scale = self.getAbsoluteScale(frame_id)
		if(absolute_scale > 0.1):
			self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t) 
			self.cur_R = R.dot(self.cur_R)
		if(self.px_ref.shape[0] < kMinNumFeature):
			self.px_cur = self.detector.detect(self.new_frame)
			self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
		self.px_ref = self.px_cur
コード例 #4
0
ファイル: main.py プロジェクト: nvvaulin/3dReconstruction
 def errorRotation(self, src, dst):
     ps = src.points(dst)
     ps = np.array([ i for i in ps])
     pd = dst.points(src)
     E = cv2.findEssentialMat(ps, pd)
     if E is None:
         return
     try:
         decomposition = cv2.decomposeEssentialMat(E[0])
     except:
         print E
     r1 = (self.decomposeRotation(inv(self.rotation(src.r)).dot(decomposition[0].dot(self.rotation(dst.r))))+np.pi/2) % np.pi - np.pi/2
     r2 = (self.decomposeRotation(inv(self.rotation(src.r)).dot(decomposition[1].dot(self.rotation(dst.r))))+np.pi/2) % np.pi - np.pi/2
     if r1.dot(np.transpose(r1)) < r2.dot(np.transpose(r2)):
         return r1,r2,E[1]
     else:
         return r1,r2,E[1]
コード例 #5
0
    def FindEssentialRansac(self, kpts1, kpts2):
        # Compute Essential matrix from a set of corresponding points
        # @param kpts1: list of keypoints of the previous frame
        # @param kpts2: list of keypoints of the current frame

        kpts1 = np.float32(kpts1)
        kpts2 = np.float32(kpts2)

        # findEssentialMat takes as arguments, apart from the keypoints of both
        # images, the focal length and the principal point. Looking at the
        # source code of this function
        # (https://github.com/Itseez/opencv/blob/master/modules/calib3d/src/five-point.cpp)
        # I realized that these parameters are feeded to the function because it
        # internally create the camera matrix, so they must be in pixel
        # coordinates. Hence, we take them from the already known camera matrix:

        focal = 3.37
        pp = (2.85738, 0.8681)

        # pp = (self.K[0][2], self.K[1][2])

        self.E, self.maskE = cv2.findEssentialMat(kpts2, kpts1, focal, pp,
                                                  cv2.RANSAC, 0.999, 1.0,
                                                  self.maskE)
コード例 #6
0
    def triangulate_pair(self,
                         idx0,
                         idx1,
                         plot_matches,
                         use_RANSAC,
                         epi_tol,
                         debug=False):
        # load two images
        (timestamp0, T0_sc, R0_sc, file0) = self.get_pose_and_filename(idx0)
        (timestamp1, T1_sc, R1_sc, file1) = self.get_pose_and_filename(idx1)
        img0 = cv2.imread(file0)
        img1 = cv2.imread(file1)
        self.timestamps[idx0] = timestamp0
        self.timestamps[idx1] = timestamp1

        # Get 3 x 4 projection matrices for two instants in time
        Proj0 = self.compute_projection_matrix(R0_sc, T0_sc)
        Proj1 = self.compute_projection_matrix(R1_sc, T1_sc)

        # detect features and set up dictionary from descriptors to keypoints
        kp0, des0 = self.detector.detectAndCompute(img0, self.mask)
        kp1, des1 = self.detector.detectAndCompute(img1, self.mask)

        # Match features (des0 = query, des1 = database), we are trying to find
        # descriptors in img0 that correspond to descriptors in img1.
        try:
            matches = self.matcher.match(des0, des1)
        except:
            print("Matcher Failed on frames {}/{}".format(idx0, idx1))
            return
        n_matches = len(matches)
        if n_matches == 0:
            print("No matches on frames {}/{}".format(idx0, idx1))
            return
        points0 = np.zeros((2, n_matches))
        points1 = np.zeros((2, n_matches))
        for i, match in enumerate(matches):
            points0[:, i] = kp0[match.queryIdx].pt
            points1[:, i] = kp1[match.trainIdx].pt

        # plot matches before outlier rejection
        if plot_matches:
            img2 = cv2.drawMatches(
                img0,
                kp0,
                img1,
                kp1,
                matches,
                None,
                flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
            plt.imshow(img2)
            plt.show()

        # Outlier rejection
        if use_RANSAC:
            (_, good_match_list) = cv2.findEssentialMat(np.transpose(points0),
                                                        np.transpose(points1),
                                                        self.K,
                                                        method=cv2.RANSAC,
                                                        prob=0.999,
                                                        threshold=1.0)
            good_match_list = np.resize(good_match_list, n_matches)
            matches = prune_matches(matches, good_match_list)
        else:
            good_match_list = self.outlier_rejection(R0_sc, R1_sc, T0_sc,
                                                     T1_sc, matches, points0,
                                                     points1, epi_tol)
            matches = prune_matches(matches, good_match_list)

        # plot matches after outlier rejection
        if plot_matches:
            img3 = cv2.drawMatches(
                img0,
                kp0,
                img1,
                kp1,
                matches,
                None,
                flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
            plt.imshow(img3)
            plt.show()

        n_good_matches = np.count_nonzero(good_match_list)
        if n_good_matches == 0:
            print("No good matches on frames {}/{}".format(idx0, idx1))
            return
        points0 = points0[:, good_match_list == 1]
        points1 = points1[:, good_match_list == 1]

        if debug:
            pdb.set_trace()

        # Triangulate matches
        try:
            points4d = cv2.triangulatePoints(Proj0, Proj1, points0, points1)
        except:
            print("Triangulation Failed on frames {}/{}".format(idx0, idx1))
            return

        # Add matches to lists of points for both images
        R0_cs = R0_sc.inv()
        R1_cs = R1_sc.inv()
        for i in range(n_good_matches):
            # Get homogeneous coordinate in world frame
            X_s = make_not_homogeneous(points4d[:, i])

            # Transform to camera frame
            X_c0 = R0_cs.apply(X_s - T0_sc)
            X_c1 = R1_cs.apply(X_s - T1_sc)
            X_c0 = np.reshape(X_c0, (3, ))
            X_c1 = np.reshape(X_c1, (3, ))
            self.add_4d_point(timestamp0, points0[:, i],
                              make_homogeneous(X_c0))
            self.add_4d_point(timestamp1, points1[:, i],
                              make_homogeneous(X_c1))

            #self.add_4d_point(timestamp0, points0[:,i], points4d[:,i])
            #self.add_4d_point(timestamp1, points1[:,i], points4d[:,i])

        # Print results
        print(
            "Frame {}/{}: {}/{} features, {} matches, {} good matches".format(
                idx0, idx1, len(kp0), len(kp1), n_matches, n_good_matches))
コード例 #7
0
ファイル: vo_modules.py プロジェクト: KuangHaofei/DF-VO
    def compute_pose_2d2d(self, kp_ref, kp_cur):
        """Compute the pose from view2 to view1
        Args:
            kp_ref (Nx2 array): keypoints for reference view
            kp_cur (Nx2 array): keypoints for current view
        Returns:
            pose (SE3): relative pose from current to reference view
            best_inliers (N boolean array): inlier mask
        """
        principal_points = (self.cam_intrinsics.cx, self.cam_intrinsics.cy)

        # validity check
        valid_cfg = self.cfg.compute_2d2d_pose.validity
        valid_case = True

        # initialize ransac setup
        best_Rt = []
        best_inlier_cnt = 0
        max_ransac_iter = self.cfg.compute_2d2d_pose.ransac.repeat
        best_inliers = np.ones((kp_ref.shape[0])) == 1

        if valid_cfg.method == "flow+chei":
            # check flow magnitude
            avg_flow = np.mean(np.linalg.norm(kp_ref-kp_cur, axis=1))
            min_flow = valid_cfg.min_flow
            valid_case = avg_flow > min_flow
        if valid_case:
            for i in range(max_ransac_iter): # repeat ransac for several times for stable result
                # shuffle kp_cur and kp_ref (only useful when random seed is fixed)	
                new_list = np.arange(0, kp_cur.shape[0], 1)	
                np.random.shuffle(new_list)
                new_kp_cur = kp_cur.copy()[new_list]
                new_kp_ref = kp_ref.copy()[new_list]

                start_time = time()
                E, inliers = cv2.findEssentialMat(
                            new_kp_cur,
                            new_kp_ref,
                            focal=self.cam_intrinsics.fx,
                            pp=principal_points,
                            method=cv2.RANSAC,
                            prob=0.99,
                            threshold=self.cfg.compute_2d2d_pose.ransac.reproj_thre,
                            )
                
                # check homography inlier ratio
                if valid_cfg.method == "homo_ratio":
                    # Find homography
                    H, H_inliers = cv2.findHomography(
                                new_kp_cur,
                                new_kp_ref,
                                method=cv2.RANSAC,
                                confidence=0.99,
                                ransacReprojThreshold=0.2,
                                )
                    H_inliers_ratio = H_inliers.sum()/(H_inliers.sum()+inliers.sum())
                    valid_case = H_inliers_ratio < 0.25
                
                if valid_case:
                    cheirality_cnt, R, t, _ = cv2.recoverPose(E, new_kp_cur, new_kp_ref,
                                            focal=self.cam_intrinsics.fx,
                                            pp=principal_points,)
                    self.timers.timers["Ess. Mat."].append(time()-start_time)
                    
                    # check best inlier cnt
                    if valid_cfg.method == "flow+chei":
                        inlier_check = inliers.sum() > best_inlier_cnt and cheirality_cnt > 50
                    elif valid_cfg.method == "homo_ratio":
                        inlier_check = inliers.sum() > best_inlier_cnt
                    else:
                        assert False, "wrong cfg for compute_2d2d_pose.validity.method"
                    
                    if inlier_check:
                        best_Rt = [R, t]
                        best_inlier_cnt = inliers.sum()
                        best_inliers = inliers
            if len(best_Rt) == 0:
                R = np.eye(3)
                t = np.zeros((3, 1))
                best_Rt = [R, t]
        else:
            R = np.eye(3)
            t = np.zeros((3,1))
            best_Rt = [R, t]
        R, t = best_Rt
        pose = SE3()
        pose.R = R
        pose.t = t
        return pose, best_inliers
コード例 #8
0
    def find2d3dPointsBase(self):
        # img_base= cv2.imread(self.images_path[0])
        # img_base= self.preProcessing(img_base)
        min_inliers_ratio=0
        index_winner=0
        p1_winner = None
        p2_winner = None
        longitudImagen=len(self.arregloImagen)
        if longitudImagen >10:
            longitudImagen=9
        else:
            longitudImagen-=1
        print "LONGITUD IMAGEN ES:   "+ str(longitudImagen)
        # for index in range(len(self.images_path)-1):# Range corresponde con cuántas imagenes comparar después de la imagen base.
        for index in range(longitudImagen): # COMPARA CON LAS PRIMERAS 6 IMÁGENES PARA HALLAR EL PRIMER PAR.
            print "-------------------------INICIA-----------------------------------"
            # img_actual=cv2.imread(self.images_path[index+1])
            # img_actual=self.preProcessing(img_actual)
            p1_filtrado, p2_filtrado,matches_subset,matches_count=self.featureMatching(self.arregloImagen[0].features,self.arregloImagen[0].descriptors,self.arregloImagen[index+1].features,self.arregloImagen[index+1].descriptors)
            M, mask = cv2.findHomography(p1_filtrado, p2_filtrado, cv2.RANSAC,10.0)
            mask_inliers= float(cv2.countNonZero(mask))
            mask_ratio= mask_inliers/matches_count
            print "index: " + str(index+1)
            print "Inliers ratio: " + str(mask_ratio)+ " Imagen: "+str(index+2)
            # CONDICIONES PARA EL MENOR INLIER, SE TIENE EN CUENTA EL INLIER RATIO Y LA CANTIDAD MÍNIMA DE MATCHES.
            if ((min_inliers_ratio > mask_ratio and matches_count > 120 ) or index == 0): # SFM toy lib usa 100.
                print  "mathces_count: " + str(matches_count)
                min_inliers_ratio=mask_ratio
                index_winner= index+1
                p1_winner=p1_filtrado
                p2_winner=p2_filtrado
            print "-------------------------ACABA-----------------------------------"
        print "La imagen con menos ratio inlier es imagen: "+ str(index_winner+1) + " e index: " + str(index_winner)
        self.index_winner_base= index_winner
        mtx=self.createMtx(self.camera_params[index_winner,0],self.camera_params[index_winner,1],self.camera_params[index_winner,2])
        E,mask =cv2.findEssentialMat(p1_winner,p2_winner,mtx,cv2.RANSAC,0.999,1.0)
        points, R, t,mask= cv2.recoverPose(E, p1_winner,p2_winner,mtx)
        P2=np.array([[R[0,0],R[0,1], R[0,2], t[0]],[R[1,0],R[1,1], R[1,2], t[1]],[R[2,0],R[2,1], R[2,2], t[2]]],np.float32)
        tempEye=np.eye(3)
        P1=np.zeros((3,4))
        P1[:,:-1]=tempEye
        self.CheckCoherentRotation(R)

        puntos3d_filtrado,p1_winner_filtrado,p2_winner_filtrado = self.triangulateAndFind3dPoints(P1,P2,p1_winner,p2_winner,0,index_winner)
# LOURAKIS------------------------------------------------------
        totalFrames=np.zeros((len(puntos3d_filtrado),1))
        totalFrames.fill(2)
        frame0=np.zeros((len(puntos3d_filtrado),1))
        frame0.fill(0)
        frame_winner=np.zeros((len(puntos3d_filtrado),1))
        frame_winner.fill(index_winner)

        self.pointParams=np.concatenate((puntos3d_filtrado,totalFrames,frame0,p1_winner_filtrado,frame_winner,p2_winner_filtrado),axis=1)
        print "POIINTPARAMS SHAPE:"+ str(self.pointParams.shape)
        print self.pointParams[0]
        # CON DIST
        # self.camera_params[index_winner,14:]=P2[:,3]
        # self.camera_params[0,14:]=P1[:,3]
        # SIN DIST
        self.camera_params[index_winner,(14-5):]=P2[:,3] # ES LA TRASLACIÓN
        self.camera_params[0,(14-5):]=P1[:,3]

        # P2Quaternion=Quaternion(matrix=P2[:,:3])
        # P2QuatVec=np.array((P2Quaternion[0],P2Quaternion[1],P2Quaternion[2],P2Quaternion[3]),np.float32)
        P2Quaternion=sba.quaternions.quatFromRotationMatrix(P2[:,:3])
        P2QuatVec=P2Quaternion.asVector()

        # self.camera_params[index_winner,10:14]=P2QuatVec

        self.camera_params[index_winner,(10-5):(14-5)]=P2QuatVec

        # P1Quaternion=Quaternion(matrix=P1[:,:3])
        # P1QuatVec=np.array((P1Quaternion[0],P1Quaternion[1],P1Quaternion[2],P1Quaternion[3]),np.float32)
        P1Quaternion=sba.quaternions.quatFromRotationMatrix(P1[:,:3])
        P1QuatVec=P1Quaternion.asVector()
        # self.camera_params[0,10:14]=P1QuatVec
        self.camera_params[0,(10-5):(14-5)]=P1QuatVec #NO DIST
        print "quaterion: " + str(P2QuatVec)
#----------------------------------------------------------------------------------

        # self.bundleAdjustment()#CORRE EL BUNDLE ADJUSTMENT PARA EL PAR BASE...
#----------------------------------------------------------------------------------
#
#         # self.camera_params=self.newcams.toDylan()
#         rawC = np.genfromtxt('nuevascamaraspro')
#         for idx in range(rawC.shape[0]):
#             self.camera_params[idx,:]=rawC[idx,:]
#
#         # P1[:,3]=self.camera_params[0,(14-5):]
#         # P1QuatVec=self.camera_params[0,(10-5):(14-5)]
#         # P1QuatVec[1]=P1QuatVec[1]*-1
#         # P1Quaternion=Quaternion(array=P1QuatVec)
#         # P1[:,:3]=P1Quaternion.rotation_matrix
#
#         # P1QuatVec=self.camera_params[0,(10-5):(14-5)]
#         # P1Quaternion=sba.quaternions.quatFromArray(P1QuatVec)
#         # P1[:,:3]=P1Quaternion.asRotationMatrix()
#
#         P2[:,3]=self.camera_params[index_winner,(14-5):]
#         P2QuatVec=self.camera_params[index_winner,(10-5):(14-5)]
#         # P2QuatVec[1]=P2QuatVec[1]*-1
#         print "P2QUATVEC"
#         print P2QuatVec[1]
# #
#         P2Quaternion=Quaternion(array=P2QuatVec)
#         P2[:,:3]=P2Quaternion.rotation_matrix
#         # self.pointParams[:,:3]=self.puntos3dTotal
#         # puntos3d_filtrado=self.puntos3dTotal
#----------------------------------------------------------------------------------
        # P2QuatVec=self.camera_params[index_winner,(10-5):(14-5)]
        # P2QuatVec[1]=P2QuatVec[1]*-1
        # P2Quaternion=sba.quaternions.quatFromArray(P2QuatVec)
        # P2[:,:3]=P2Quaternion.asRotationMatrix()

        # puntos3d_filtrados=self.puntos3dTotal[-len(puntos3d_filtrados):,:]







#-------------------------------ACTUALIZO DATOS AJUSTADOS POR BUNDLE-----------------------
        self.arregloImagen[index_winner].Pcam = P2
        self.arregloImagen[index_winner].p2dAsociados = p2_winner_filtrado
        self.arregloImagen[index_winner].p3dAsociados = puntos3d_filtrado
        self.arregloImagen[0].Pcam = P1
        self.arregloImagen[0].p2dAsociados=p1_winner_filtrado
        self.arregloImagen[0].p3dAsociados=puntos3d_filtrado
#--------------------------------------------------------------------------------------------------

        self.puntos3dTotal=puntos3d_filtrado
コード例 #9
0
ファイル: tests.py プロジェクト: zgojcic/acne
def eval_decompose(p1s,
                   p2s,
                   dR,
                   dt,
                   threshold=0.001,
                   mask=None,
                   method=cv2.LMEDS,
                   probs=None,
                   weighted=False,
                   use_prob=True):

    if mask is None:
        mask = np.ones((len(p1s), ), dtype=bool)
    # Change mask type
    mask = mask.flatten().astype(bool)

    # Mask the ones that will not be used
    p1s_good = p1s[mask]
    p2s_good = p2s[mask]
    probs_good = None
    if probs is not None:
        probs_good = probs[mask]

    num_inlier = 0
    mask_new2 = None
    if p1s_good.shape[0] >= 5:
        if method in ["RANSAC", cv2.RANSAC]:
            E, mask_new = cv2.findEssentialMat(
                p1s_good, p2s_good, method=method,
                threshold=threshold)  # threshold=0.001--roughly 1 / f
        elif method == "USAC":
            E, mask_new = usacFindEssentialMat(p1s_good,
                                               p2s_good,
                                               method=method + "_E",
                                               threshold=threshold,
                                               probs=probs_good,
                                               weighted=weighted,
                                               use_prob=use_prob)
        elif method == "MAGSAC":
            # using magsac wrapper
            E, mask_new = magsacFindEssentialMat(p1s_good,
                                                 p2s_good,
                                                 method=method + "_E",
                                                 threshold=threshold,
                                                 probs=probs_good,
                                                 weighted=weighted,
                                                 use_prob=use_prob)
        else:
            raise ValueError("Wrong method")

        if E is not None:
            new_RT = False
            # Get the best E just in case we get multipl E from
            # findEssentialMat
            for _E in np.split(E, len(E) / 3):
                _num_inlier, _R, _t, _mask_new2 = cv2.recoverPose(
                    _E, p1s_good, p2s_good, mask=mask_new)
                if _num_inlier > num_inlier:
                    num_inlier = _num_inlier
                    R = _R
                    t = _t
                    mask_new2 = _mask_new2
                    new_RT = True
            if new_RT:
                err_q, err_t = evaluate_R_t(dR, dt, R, t)
            else:
                err_q = np.pi
                err_t = np.pi / 2

        else:
            err_q = np.pi
            err_t = np.pi / 2
    else:
        err_q = np.pi
        err_t = np.pi / 2

    loss_q = np.sqrt(0.5 * (1 - np.cos(err_q)))
    loss_t = np.sqrt(1.0 - np.cos(err_t)**2)

    mask_updated = mask.copy()
    if mask_new2 is not None:
        # Change mask type
        mask_new2 = mask_new2.flatten().astype(bool)
        mask_updated[mask] = mask_new2

    return err_q, err_t, loss_q, loss_t, np.sum(num_inlier), mask_updated
コード例 #10
0
class camClass(Image):
    def __init__(self,pose_guess=None,Kmat=None):

        self.c = np.array([sensor_x,sensor_y])  # Sensor
        self.images = []
        self.pose_guess = pose_guess
        self.Kmat = Kmat #camera matrix
        self.pointCloud = []
    def add_images(self,image):
        image.pose = self.pose_guess  #initialize image with guess
        self.images.append(image) 

    def rotational_transform(self,pts,pose):
            """  
            This function performs the translation and rotation from world coordinates into generalized camera coordinates.
            This function takes the Easting, Northing, and Elevation of the features in an image.
            The pose vector is unknown and what we are looking to optimize.
            """
            cam_x = pose[0]
            cam_y = pose[1]
            cam_z = pose[2]
            roll = pose[3]
            pitch = pose[4]
            yaw = pose[5]

            r_axis = np.array([[1, 0, 0], 
                               [0, 0,-1], 
                               [0, 1, 0]])
            r_roll = np.array([[np.cos(roll), 0, -1*np.sin(roll)], 
                               [0, 1, 0], 
                               [np.sin(roll), 0, np.cos(roll)]])            
            r_pitch = np.array([[1, 0, 0], 
                                [0, np.cos(pitch), np.sin(pitch)], 
                                [0, -1*np.sin(pitch), np.cos(pitch)]])           
            r_yaw = np.array([[np.cos(yaw), -1*np.sin(yaw), 0, 0], 
                              [np.sin(yaw), np.cos(yaw), 0, 0], 
                              [0, 0, 1, 0]])
            T = np.array([[1, 0, 0, -cam_x], 
                          [0, 1, 0, -cam_y], 
                          [0, 0, 1, -cam_z], 
                          [0, 0, 0, 1]])
            C = r_axis @ r_roll @ r_pitch @ r_yaw @ T   
            
            
            if pts.ndim <= 1:
                pts = pts[np.newaxis,:]
            pts = (np.c_[pts, np.ones(pts.shape[0])]).T
            
            return C @ pts
               
    def projective_transform(self,rot_pt):
        """  
        This function performs the projective transform on generalized coordinates in the camera reference frame.
        This function needs the outputs of the rotational transform function (the rotated points).
        """
        focal = self.f 
        sensor = self.c  
        rot_pt = rot_pt.T
        #General Coordinates
        gcx = rot_pt[:,0]/rot_pt[:,2]
        gcy = rot_pt[:,1]/rot_pt[:,2]
        #Pixel Locations
        pu = gcx*focal + sensor[0]/2.
        pv = gcy*focal + sensor[1]/2.
        return np.array([pu,pv]).T
          
 
    def estimate_pose(self):
        
         def residual_pose(pose, realgcp, imagegcp,self):
            pt = self.projective_transform(self.rotational_transform(realgcp, pose))
            res = pt.flatten() - imagegcp.flatten()
            return res 

         for i in range(len(self.images)):
            realgcp = self.images[i].realgcp
            imagegcp = self.images[i].imagegcp
            self.images[i].pose = so.least_squares(residual_pose, self.images[i].pose, method='lm',args=[realgcp, imagegcp,self]).x
     
    def estimate_RWC(self):

        if(len(self.images) < 2):
            print("There are not 2 images in this camera class")
        #===========================
        def residual_RWC( RWC, pose1, pose2,imcor1,imcor2,self):
        
            pt_1 = self.projective_transform(self.rotational_transform(RWC, pose1)) # u,v based on first image
            pt_2 = self.projective_transform(self.rotational_transform(RWC, pose2)) 
            res_1 = pt_1.flatten() - imcor1.flatten()
            res_2 = pt_2.flatten() - imcor2.flatten()
            return np.hstack((res_1, res_2))  
        #==========================
    
        for i in range(len(self.images)):
            for j in range(len(self.images)):
                if( i != j):
                    self.images[i].realgcp = so.least_squares(residual_RWC, self.images[i].realgcp , method='lm',args=(self.images[i].pose,                         self.images[j].pose, self.images[i].imagegcp, self.images[j].imagegcp,self)).x
                    
	def pointCloud():
        	
            def triangulate(P0,P1,x1,x2):
                A = np.array([[P0[2,0]*x1[0] - P0[0,0], P0[2,1]*x1[0] - P0[0,1], P0[2,2]*x1[0] - P0[0,2], P0[2,3]*x1[0] - P0[0,3]],
                [P0[2,0]*x1[1] - P0[1,0], P0[2,1]*x1[1] - P0[1,1], P0[2,2]*x1[1] - P0[1,2], P0[2,3]*x1[1] - P0[1,3]],
                  [P1[2,0]*x2[0] - P1[0,0], P1[2,1]*x2[0] - P1[0,1], P1[2,2]*x2[0] - P1[0,2], P1[2,3]*x2[0] - P1[0,3]],
                  [P1[2,0]*x2[1] - P1[1,0], P1[2,1]*x2[1] - P1[1,1], P1[2,2]*x2[1] - P1[1,2], P1[2,3]*x2[1] - P1[1,3]]])
                    u,s,vt = np.linalg.svd(A)
                return vt[-1]
            
        I1 = self.images[0]
        I2 = self.images[1]
		h,w,d = I1.shape
		sift = cv2.xfeatures2d.SIFT_create()
		kp1,des1 = sift.detectAndCompute(I1,None)
		kp2,des2 = sift.detectAndCompute(I2,None)
		bf = cv2.BFMatcher()
		matches = bf.knnMatch(des1,des2,k=2)

		# Apply ratio test
		good = []
		for i,(m,n) in enumerate(matches):
   			if m.distance < 0.7*n.distance:
       				 good.append(m)
    
		u1 = []
		u2 = []

		for m in good:
    			u1.append(kp1[m.queryIdx].pt)
    			u2.append(kp2[m.trainIdx].pt)
    
		u1 = np.array(u1)  #general coords
		u2 = np.array(u2)

		#Make homogeneous
		u1 = np.c_[u1,np.ones(u1.shape[0])]
		u2 = np.c_[u2,np.ones(u2.shape[0])cu = w//2
		
		cv = image[0].h/2
        cu = image[0].w/2

		K_cam = np.array([[f,0,cu],[0,f,cv],[0,0,1]])
		K_inv = np.linalg.inv(K_cam)
		x1 = u1 @ K_inv.T  #camera coords
		x2 = u2 @ K_inv.T 

		E, inliers = cv2.findEssentialMat(x1[:,:2],x2[:,:2],np.eye(3),method=cv2.RANSAC,threshold=1e-3)
		inliers = inliers.ravel().astype(bool) 
		n_in,R,t,_ = cv2.recoverPose(E,x1[inliers,:2],x2[inliers,:2])

		P0 = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]])
		P1 = np.hstack((R,t))
        
        for i in range(len(u2)):
            self.pointcloud.append(triangulate(P0,P1,x1[i],x2[i]))   #appends to list of points in xyz coordinates
                   
		self.pointCloud = np.array(self.pointCloud)
        self.pointCloud /= self.pointCloud[0][3]
                   
        def plotPointCloud():
            %matplotlib notebook
            fig = plt.figure()
            ax = fig.add_subplot(111,projection='3D')
            #for i in range(len(self.pointCloud)):
            ax.plot(*self.pointCloud.T,'k.')
	
                   
                   
                   
if __name__ == "__main__":
  Image1 = Image(sys.argv[1])
  Image2 = Image(sys.argv[2])
  pointCloud = camClass()
  pointCloud.add_images(Image1)
  pointCloud.add_images(Image2)
  pointCloud.pointCloud()
  pointCloud.plotPointCloud()
コード例 #11
0
z = [0]
for i in range(19, 30):
    img2 = cv2.imread(imgfiles + '%i.png' % i, cv2.IMREAD_GRAYSCALE)

    set1, set2 = feature_match(img1, img2)
    #    x1 = [set1[i][0] for i in range(len(set1))]
    #    y1 =[set1[i][1] for i in range(len(set1))]
    #    x2 = [set2[i][0] for i in range(len(set2))]
    #    y2 = [set2[i][1] for i in range(len(set2))]
    #    points1 = np.column_stack((x1,y1))
    #    points2 = np.column_stack((x2,y2))
    F, inliers1, inliers2 = get_RANSAC(set1, set2, 800)
    #    print(inliers1)
    points1 = inliers1
    points2 = inliers2
    E, mask = cv2.findEssentialMat(points1, points2)
    points, R, t, mask = cv2.recoverPose(E, points1, points2)

    t_t = t_t + np.dot(R_t, t)
    R_t = np.dot(R_t, R)
    x.append(float(t_t[0]))
    z.append(float(t_t[2]))
    print(i)
    img1 = img2
    #plotting the data

plt.pause(0.01)
plt.title('Camera Trajectory')
plt.plot(x, z, 'r-')
plt.xlabel('Motion in x-direction')
plt.ylabel('Motion in z-direction')
コード例 #12
0
def main():
	img_path = '../Data/stereo/centre/'
	imgs = sorted(os.listdir(img_path))
	fx,fy,cx,cy,G_camera_image,LUT = ReadCameraModel('../Data/model')

	i = 100
	img1 = cv2.imread(img_path+imgs[i],-1)
	img1 = cv2.cvtColor(img1,cv2.COLOR_BAYER_GR2BGR)
	img1 = UndistortImage(img1,LUT)

	img2 = cv2.imread(img_path+imgs[i+1],-1)
	img2 = cv2.cvtColor(img2,cv2.COLOR_BAYER_GR2BGR)
	img2 = UndistortImage(img2,LUT)


	x1,x2 = utils.getMatchingFeaturePoints(img1,img2)

	x1 = np.hstack([x1,np.ones((x1.shape[0],1))])
	x2 = np.hstack([x2,np.ones((x2.shape[0],1))])

	features = np.hstack([x1,x2])
	x1_in,x2_in = RANSAC.getInliersRANSAC(features,threshold=(0.005),size=8,num_inliers=0.6*features.shape[0],num_iters=1000)
	print(x1_in.shape)
	fund_mtx = fundamental.EstimateFundamentalMatrix(x1_in,x2_in)
	K = np.array([[fx,0,cx],[0,fy,cy],[0,0,1]])

	ess_mtx = essential.EssentialMatrixFromFundamentalMatrix(fund_mtx,K)

	
	C,R = cameraPose.ExtractCameraPose(ess_mtx)
	# print("Available C :")
	# print(C)

	print("Available R")
	print(R)

	X_tri = []
	for j in range(4):
		X_tri.append(triangulation.linearTriangulation(K,np.zeros(3),C[j],np.eye(3),R[j],x1_in,x2_in))
	X_tri = np.array(X_tri)
	# print(X_tri.shape)
	C_c,R_c = disambiguateCameraPose(X_tri,C,R)

	print("Pose Position :")
	print(C_c)
	print("Pose Orientation :")
	print(R_c)	

	# angle = utils.rotationMatrixToEulerAngles(R_c)
	# print("Angle :")
	# print(angle)

	E_act = cv2.findEssentialMat(x1_in[:,:2],x2_in[:,:2],K)
	_,R,T,_ = cv2.recoverPose(E_act[0],x1_in[:,:2],x2_in[:,:2])

	print("---")
	print("CV2 Pose Position :")
	print(T.T)
	print("CV2 Pose Orientation :")
	print(R)	
	# angle = utils.rotationMatrixToEulerAngles(R)
	# print("CV2 Ang;e:")
	# print(angle)
	print("<----------------->")
コード例 #13
0
ファイル: process.py プロジェクト: alexkreimer/monos
def main(seq):
    KITTI_HOME = '/home/kreimer/KITTI'
    seq_dir = os.path.join(KITTI_HOME, 'dataset', 'sequences', seq)
    image_0 = os.path.join(seq_dir, 'image_0')

    calib_file = os.path.join(KITTI_HOME, 'dataset', 'sequences', seq, 'calib.txt')
    
    scales = read_scales('data/%s.txt' % seq)
    scales = np.array(scales)

    grid   = (5,3)
    imsize = (1226, 370)
    # edges contain boundaries, i.e., the number of cells will be #-2
    xedges = np.linspace(0, imsize[0], num = grid[0])
    yedges = np.linspace(0, imsize[1], num = grid[1])

    with open(calib_file, 'r') as f:
        for line in f:
            line = [float(v) for v in line.split()[1:] ]
            K = np.array(line).reshape(3,4)
            break

    Intrinsic = namedtuple('intrinsic', ['f', 'pp'])
    intr = Intrinsic(f = K[0,0], pp=K[range(2),2])

    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)
    HH = []
    num_frames = scales.shape[0]

    data = {}
    for key in data.keys():
        data[key].append(np.array([]))
    
    for frame, (kp2, des2, img2) in enumerate(harris_gen(image_0, '%06d.png', 0, num_frames)):
        print('seq %s: frame %d of %d' % (seq, frame, num_frames))
        try:
            matches = bf.match(des1, des2)
            matches = sorted(matches, key = lambda x:x.distance)

            m1 = np.array([m.queryIdx for m in matches])
            m2 = np.array([m.trainIdx for m in matches])

            # slice inliers
            kp1_matched = kp1[m1,:]
            kp2_matched = kp2[m2,:]

            des1_matched = des1[m1,:]
            des2_matched = des2[m2,:]

            #explore_match('win', img1, img2, zip(kp1, kp2))
            E, inliers = cv2.findEssentialMat(kp1_matched.astype('float32'),
                                              kp2_matched.astype('float32'),
                                              intr.f, tuple(intr.pp))
            # retval, R, t, mask = cv2.recoverPose(E, kp1, kp2)

            inliers = inliers.ravel().view('bool')
            kp1_matched = kp1_matched[inliers,:]
            kp2_matched = kp2_matched[inliers,:]

            des1_matched = des1_matched[inliers,:]
            des2_matched = des2_matched[inliers,:]

            #MatchDisplay.showFeatures(img2, kp2_matched)
            xv, yv = np.meshgrid(xedges, yedges, indexing='ij')
            H = None

            xbins = len(xedges)-1
            ybins = len(yedges)-1

            for k in data.keys():
                data[k].append(np.array([]))
                
            for i in range(xbins):
                for j in range(ybins):
                    xmin, xmax = xv[i,j], xv[i+1,j]
                    ymin, ymax = yv[i,j], yv[i,j+1]

                    xbin = np.logical_and(kp1_matched[:,0] > xmin, kp1_matched[:,0] <= xmax)
                    ybin = np.logical_and(kp1_matched[:,1] > ymin, kp1_matched[:,1] <= ymax)

                    val = np.logical_and(xbin, ybin)

                    # binned feature points
                    kp1_bin = kp1_matched[val,:]
                    kp2_bin = kp2_matched[val,:]

                    f = FeatureWarehouse((kp1_bin, kp2_bin)).generate()

                    for k, val in enumerate(f):
                        name = val[1]
                        value = val[0]

                        cur_val = data.setdefault(name, [np.array([])])
                        data[name][-1] = np.hstack([cur_val[-1], value])

            kp1, des1, img1 = kp2, des2, img2
        except NameError:
            kp1, des1, img1 = kp2, des2, img2

    scales = scales[:frame].reshape((frame,1))

    for k in data.keys():
        with open('data/%s_%s.pickle' % (seq, k), 'wb') as fd:
            pickle.dump({'X': np.array(data[k]), 'y': scales.ravel()}, fd)
コード例 #14
0
def main():
    # sift = cv2.xfeatures2d.SIFT_create()
    BasePath = './Oxford_dataset/stereo/centre/'
    K, fx, fy, cx, cy, G_camera_image, LUT = getCameraMatrix(
        './Oxford_dataset/model')
    images = []
    P0 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])

    cam_pos = np.array([0, 0, 0])
    cam_pos = np.reshape(cam_pos, (1, 3))
    test = os.listdir(BasePath)

    for image in sorted(test):
        # print(image)
        images.append(image)

    img1 = cv2.imread("%s/%s" % (BasePath, images[729]), 0)
    # img1 = img1[0:820,:]
    # print img1.shape
    # cam_pos = np.zeros([1,2])
    # for file in range(len(images)-1):
    H1 = homogeneousMat(P0)
    # fig = plt.figure()
    # ax = Axes3D(fig)
    for file in range(730, 1000):
        # print(H1,H1.shape)
        img1 = undistortImageToGray(img1, LUT)
        imgs2 = cv2.imread("%s/%s" % (BasePath, images[file + 1]), 0)
        img2 = undistortImageToGray(imgs2, LUT)
        img1 = img1[0:820, :]
        img2 = img2[0:820, :]
        pts1, pts2, flag = features(img1, img2, K)

        if ((flag == False) | (len(pts1) <= 8) | (len(pts2) <= 8)):
            img1 = imgs2.copy()
            print("Frame skipped")
            continue
        # E,points1,points2 = getEssentialMat(K,pts1,pts2)
        E, _ = cv2.findEssentialMat(pts1,
                                    pts2,
                                    focal=fx,
                                    pp=(cx, cy),
                                    method=cv2.RANSAC,
                                    prob=0.999,
                                    threshold=0.5)
        # print("pts1",len(pts1))
        poses = ExtractCameraPoses(E)
        # print(len(poses))
        allPts = dict()
        for j in range(4):
            X = []
            xData = []
            yData = []
            zData = []
            for i in range(len(pts1)):
                pt = LinearTriangulation(K, P0, poses[j], pts1[i], pts2[i])
                #    pt = NonlinearTriangulation(K,P0,poses[j],pts1[i],pts2[i],pt)
                # print(pt.x/pt.x[-1])
                X.append(pt)
                xData.append(pt[0])
                yData.append(pt[1])
                zData.append(pt[2])
            # plt.plot(xData,zData,'o')
            # ax.scatter3D(xData,zData,yData)
            # plt.pause(0.001)
            # plt.show()
            # while True:
            #     if cv2.waitKey(1) & 0xFF == ord('q'):
            #         break
            # print('x',X[0].shape)
            # plt.plot(X[])
            # print("Pose" + str(j))
            allPts.update({j: X})

        # _,R,t,_ = cv2.recoverPose(E,pts1,pts2,K)
        # print(R.shape)
        # t = np.reshape(t,(3,1))
        correctPose, no_inlier, poseid = DisambiguateCameraPose(
            P0, poses, allPts)
        #nonlinear PNP
        if correctPose is None:
            img1 = imgs2.copy()
            print("Frame Skipped")
            continue
        # print("correctPose",correctPose)
        Cnew = correctPose[:, 3]
        Rnew = correctPose[:, :3]
        refinedPose = nonlinearPnP(allPts[poseid], pts2, K, Cnew, Rnew)
        correctPose = refinedPose
        # correctPose = np.concatenate((R,t),axis=1)
        if (no_inlier):
            img1 = imgs2.copy()
            print("no inliers")
            continue
        # print("Builtin",np.hstack((R,t)))
        # print("Calculated",correctPose)
        P0 = correctPose
        H2 = homogeneousMat(correctPose)
        # print(H1)s
        x_old = H1[0, 3]
        z_old = H1[2, 3]
        H1 = H1 @ H2
        # H1 = H2
        img1 = imgs2.copy()
        # pos = -H1[:3,:3].T @ np.reshape(H1[:3,3],(3,1))
        pos = H1[:3, 3]
        print(pos.shape)
        pos = np.reshape(pos, (1, 3))
        x_test = pos[0, 0]
        z_test = pos[0, 2]
        # print("pose shape",pos.shape)
        # print("cam pose",cam_pos.shape)
        cam_pos = np.concatenate((cam_pos, pos), axis=0)
        cv2.imshow('test', img1)
        plt.plot([x_test], [-z_test], 'o')
        plt.pause(0.001)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    plt.plot(cam_pos[:, 0], cam_pos[:, 2], ".r")
    # plt.plot(h1[0,3],h1[2,3],'.b')
    plt.show()
コード例 #15
0
    def compute_camera_extrinsic(self, image1, image2):
        # orb = cv2.ORB_create()

        # # get key points and descriptors
        # kp1, des1 = orb.detectAndCompute(image1, None)
        # kp2, des2 = orb.detectAndCompute(image2, None)

        # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        # matches = bf.match(des1,des2)
        # matches = sorted(matches, key = lambda x: x.distance)
        # good = matches[:int(len(matches) * 0.1)]

        # # select apropriate matches
        # pts1 = []
        # pts2 = []
        # for m in matches[:10]:
        #     #if m.distance < self.__DISTANCE:
        #     pts2.append(kp2[m.trainIdx].pt)
        #     pts1.append(kp1[m.queryIdx].pt)
        # pts1 = np.int32(pts1)
        # pts2 = np.int32(pts2)

        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(image1, None)
        kp2, des2 = sift.detectAndCompute(image2, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        good = good[:100]

        pts1 = np.float64([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        pts2 = np.float64([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # caluclate fundamential matrix
        F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
        E, mask = cv2.findEssentialMat(pts1,
                                       pts2,
                                       focal=1.0,
                                       pp=(486., 265.),
                                       method=cv2.RANSAC,
                                       prob=0.999,
                                       threshold=1.0)
        points, R, t, mask = cv2.recoverPose(E, pts1, pts2, pp=(486., 265.))
        #points, R, t, mask = cv2.recoverPose(F, pts1, pts2, pp=(486., 265.), mask=mask);
        # E = np.dot(np.dot(self.K.T, F), self.K)
        # points, R, t, mask = cv2.recoverPose(E, pts1, pts2)

        # # SVD
        # U, s, V = np.linalg.svd(E, full_matrices=True)
        # W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
        # rotation = np.dot(np.dot(U, W.T), V.T)
        # translation = U[:, 2]

        self.E = E
        self.F = F
        self.R = R
        self.t = t

        # NOTE: for debug
        # img3 = cv2.drawMatches(image1,kp1,image2,kp2, good, None, flags=2)
        # cv2.imshow("asdasd", img3)

        # homograpy
        M_r = np.hstack((self.R, self.t))
        M_l = np.hstack((np.eye(3, 3), np.zeros((3, 1))))

        P_l = np.dot(self.K, M_l)
        P_r = np.dot(self.K, M_r)
        point_4d_hom = cv2.triangulatePoints(P_l, P_r, pts1, pts2)
        point_4d = point_4d_hom / np.tile(point_4d_hom[-1, :], (4, 1))
        point_3d = point_4d[:3, :].T

        self.triangulation = point_3d

        return E, F, R, t
コード例 #16
0
    def initial_estimation(self):
        for i in range(len(self.img_pose) - 1):
            for h in range(i + 1, len(self.img_pose)):
                src = np.array([], dtype=np.float).reshape(0, 2)
                dst = np.array([], dtype=np.float).reshape(0, 2)
                kp_src_idx = []
                kp_dst_idx = []

                for k in self.img_pose[i].kp_matches:
                    if self.img_pose[i].kp_match_exist(k, h):
                        k = int(k)

                        match_idx = self.img_pose[i].kp_match_idx(k, h)
                        match_idx = int(match_idx)

                        src = np.vstack((src, self.img_pose[i].kp[k]))
                        dst = np.vstack((dst, self.img_pose[h].kp[match_idx]))
                        kp_dst_idx.append(match_idx)
                        kp_src_idx.append(k)

                if src.shape[0] < 6:
                    print(
                        "Not enough points to generate essential matrix for image_",
                        i, " and image_", h)
                    continue

                src = np.expand_dims(src, axis=1)
                dst = np.expand_dims(dst, axis=1)
                E, mask = cv2.findEssentialMat(dst,
                                               src,
                                               cameraMatrix=self.calibration,
                                               method=cv2.LMEDS,
                                               prob=0.999)
                # E, mask = cv2.findEssentialMat(dst,src,cameraMatrix = self.calibration,method =cv2.RANSAC,prob=0.999, threshold=1)

                _, local_R, local_t, mask = cv2.recoverPose(
                    E, dst, src, cameraMatrix=self.calibration, mask=mask)

                T = Pose3(Rot3(local_R),
                          Point3(local_t[0], local_t[1], local_t[2]))

                self.img_pose[h].T = transform_from(T, self.img_pose[i].T)
                cur_R = self.img_pose[h].T.rotation().matrix()
                cur_t = self.img_pose[h].T.translation().vector()
                cur_t = np.expand_dims(cur_t, axis=1)
                self.img_pose[h].P = np.dot(
                    self.calibration,
                    np.hstack((cur_R.T, -np.dot(cur_R.T, cur_t))))

                points4D = cv2.triangulatePoints(projMatr1=self.img_pose[i].P,
                                                 projMatr2=self.img_pose[h].P,
                                                 projPoints1=src,
                                                 projPoints2=dst)
                points4D = points4D / np.tile(points4D[-1, :], (4, 1))
                pt3d = points4D[:3, :].T

                # Find good triangulated points
                for j, k in enumerate(kp_src_idx):
                    if (mask[j]):
                        match_idx = self.img_pose[i].kp_match_idx(k, h)

                        if (self.img_pose[i].kp_3d_exist(k)):
                            self.img_pose[h].kp_landmark[
                                match_idx] = self.img_pose[i].kp_3d(k)
                            self.landmark[self.img_pose[i].kp_3d(
                                k)].point += pt3d[j]
                            self.landmark[self.img_pose[h].kp_3d(
                                match_idx)].seen += 1

                        else:
                            new_landmark = LandMark(pt3d[j], 2)
                            self.landmark.append(new_landmark)

                            self.img_pose[i].kp_landmark[k] = len(
                                self.landmark) - 1
                            self.img_pose[h].kp_landmark[match_idx] = len(
                                self.landmark) - 1

            for j in range(len(self.landmark)):
                if (self.landmark[j].seen >= 3):
                    self.landmark[j].point /= (self.landmark[j].seen - 1)
コード例 #17
0
 frame_gray = result_img2
 # calculate optical flow
 p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                        **lk_params)
 print(st.shape)
 K = np.array([[3258.52325987820, 0, 626.110732333212],
               [0, 3221.05397172884, 439.400007275847], [0, 0, 1]])
 # Select good points
 print(p1.shape)
 # good_new = p1[np.nonzero(st)[0]]
 # good_old = p0[np.nonzero(st)[0]]
 good_new = p1
 good_old = p0
 E, mask1 = cv2.findEssentialMat(good_new,
                                 good_old,
                                 K,
                                 method=cv2.RANSAC,
                                 prob=0.995,
                                 threshold=0.5)
 # good_new = p1[np.nonzero(mask1)[0]]
 # good_old = p0[np.nonzero(mask1)[0]]
 # rgb.save('./image_test/frame01.png')
 rgb = np.float32(rgb)
 print(p1.shape)
 print(good_new.shape)
 for i, (new, old) in enumerate(zip(good_new, good_old)):
     a, b = new.ravel()
     c, d = old.ravel()
     # color[i].tolist()
     mask = cv2.line(rgb, (a, b), (c, d), color[i].tolist(), 3)
 # mask = cv2.line(img_CLAHE1, (a,b),(c,d), (155,155,155), 1)
 # frame = cv2.circle(frame_gray,(a,b),5,(i,255,255),-1)
コード例 #18
0
 def processSecondFrame(self):
     self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
     E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
     _, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
     self.frame_stage = STAGE_DEFAULT_FRAME 
     self.px_ref = self.px_cur
コード例 #19
0
ファイル: visual_odometry.py プロジェクト: uoip/monoVO-python
	def processSecondFrame(self):
		self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
		E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
		_, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
		self.frame_stage = STAGE_DEFAULT_FRAME 
		self.px_ref = self.px_cur
コード例 #20
0
lines2 = cv.computeCorrespondEpilines(points1.reshape(-1,1,2), 1, F)
lines2 = lines2.reshape(-1,3)
#print lines1
plt.figure().canvas.set_window_title("epilines from RANSAC algorithm")
plt.subplot(121),plt.imshow(drawlines(img1.copy(),lines1,points1,points2))
plt.subplot(122),plt.imshow(drawlines(img2.copy(),lines2,points2,points1))
print "close the plot window to proceed..."
plt.show()
####################################################
# PART 4: Two-view triangulation
####################################################
print "=====two-view triangulation======"
cam_intrinsic = np.matrix([[2759.48, 0, 1520.69],[0, 2764.16, 1006.81],[0, 0, 1]])

# compute essential matrix based on fundamental matrix
E1 = cv.findEssentialMat(points1, points2, cam_intrinsic, method=cv.RANSAC)[0]
print "essential matrix = "
print E1

# compute essential matrix in perspective of cam 2
E2 = cv.findEssentialMat(points2, points1, cam_intrinsic, method=cv.RANSAC)[0]
#print "essential matrix cam 2 = "
#print E2

# find rotation and transformation matrix based upon essential matrix
R1, R2, T = cv.decomposeEssentialMat(E1)
R_T_1 = np.hstack([R1,T])
print "[R|T] matrix = "
print R_T_1

R1, R2, T = cv.decomposeEssentialMat(E1)
コード例 #21
0
def findEssentialMat(points1, points2, K=KMatrix()):
    '''
    Takes points1, points2, K, returns essential matrix (opencv implementation)
    '''
    return cv2.findEssentialMat(np.array(points1), np.array(points2), focal=K.focalLength, pp=K.principalPoint)
コード例 #22
0
pts3d_12 = np.matmul(Cam12.Rt, utils.euc_to_hom(pts3d_11).T).T
pts2d_12 = utils.project(Cam12.P, pts3d_11)
pts2d_12 = utils.hom_to_euc(pts2d_12)

# This is third camera.
Cam13 = Camera(K, Rc=utils.rotate(thetay=thetaY), center=np.asarray([15, 25, 10]).reshape(3, -1))
Cam13_R = Cam13.R
Cam13_c = Cam13.center
Cam13_t = Cam13.t
pts3d_13 = np.matmul(Cam13.Rt, utils.euc_to_hom(pts3d_11).T).T
pts2d_13 = utils.project(Cam13.P, pts3d_11)
pts2d_13 = utils.hom_to_euc(pts2d_13)

# From here on, we shall test the findEssentialMat method.

Ess12, _ = cv2.findEssentialMat(pts2d_11, pts2d_12, Cam12.K, cv2.FM_RANSAC)
R12_est1, R12_est2, t12_est = cv2.decomposeEssentialMat(Ess12) # It gives orientation of Cam1 wrt Cam2.
# bascially the above R and t will bring a point (represented in cam1)  to ( its representation in) Cam2.

Ess13, _ = cv2.findEssentialMat(pts2d_11, pts2d_13, K, cv2.FM_RANSAC)
R13_est1, R13_est2, t13_est = cv2.decomposeEssentialMat(Ess13) # It gives orientation of Cam1 wrt Cam3.
# bascially the above R and t will bring a point (represented in cam1)  to ( its representation in) Cam3.


Ess23, _ = cv2.findEssentialMat(pts2d_12, pts2d_13, K, cv2.FM_RANSAC)
R23_est1, R23_est2, t23_est = cv2.decomposeEssentialMat(Ess23) # It gives orientation of Cam2 wrt Cam3.
# bascially the above R and t will bring a point (represented in cam2)  to ( its representation in) Cam3.


print("R estimated is equal ???  ", (Cam12_R - R12_est1).sum(), (Cam12_R - R12_est2).sum())
print("Is the t equal ??? ", (utils.norm(Cam12_t) - t12_est).sum())
コード例 #23
0
def relative_camera_pose(img1_, img2_, K, dist):

    print("\n\nRelativeCamPose:\n")

    img1 = undistort(img1_, 'left_undistort.jpg', K, dist)
    img2 = undistort(img2_, 'right_undistort.jpg', K, dist)

    kp1, des1, kp2, des2 = create_feature_points(img1, img2, K, dist)
    pts1, pts2, F = match_feature_points(kp1, des1, kp2, des2)

    draw_image_epipolar_lines(img1, img2, pts1, pts2, F)

    # Compute the essential matrix E
    # Help: https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga13f7e34de8fa516a686a56af1196247f
    E, _ = cv.findEssentialMat(pts1, pts2, K)
    print("\nEssential Matrix, E:\n", E)

    # Decompose the essential matrix into R, r
    R1, R2, t = cv.decomposeEssentialMat(E)

    # Re-projected feature points on the first image
    # http://answers.opencv.org/question/173969/how-to-give-input-parameters-to-triangulatepoints-in-python/
    print("K:\n", K)
    print("dist: \n", dist)

    print("R1:\n", R1)
    print("R2:\n", R2)
    print("t\n", t)

    R2_t = np.hstack((R1, (-t)))
    print("R2_t:\n", R2_t)

    zero_vector = np.array([[0, 0, 0]])
    zero_vector = np.transpose(zero_vector)

    # Create projection matrices P1 and P2
    P1 = np.hstack((K, zero_vector))
    P2 = np.dot(K, R2_t)

    print("P1:\n", P1)
    print("P2:\n", P2)

    pts1 = pts1.astype(np.float)
    pts2 = pts2.astype(np.float)

    pts1 = np.transpose(pts1)
    pts2 = np.transpose(pts2)

    points4D = cv.triangulatePoints(P1, P2, pts1, pts2)
    aug_points3D = points4D / points4D[3]
    print("\nAugmented points3D:\n", aug_points3D)

    projectedPoints = np.dot(P1, aug_points3D)

    projectedPoints = projectedPoints / projectedPoints[2]
    print("\nNormalized ProjectedPoints:\n", projectedPoints)
    points2D = projectedPoints[:2]
    print("\n2D Points:\n", points2D)

    #Re-projection of points
    plt.imshow(img1, cmap="gray")

    # Project points
    plt.scatter(points2D[0], points2D[1], c='b', s=40, alpha=0.5)
    plt.scatter(pts1[0], pts1[1], c='g', s=15, alpha=1)
    #plt.show()

    # Begin part 4
    min_depth = min(aug_points3D[2])
    max_depth = max(aug_points3D[2])
    print("\n\nMin depth:\n", min_depth)
    print("Max depth:\n\n", max_depth)

    N = (max_depth - min_depth) / 20
    print("N=20:\n", N)

    equispaced_dist = np.linspace(min_depth, max_depth, num=20)
    print("Equispaced distance:\n", equispaced_dist)

    projectPoints = np.dot(P1, aug_points3D)
    print("projectPoints:\n", projectPoints)

    points2D = projectPoints[:2]
    print("\n2DPoints:\n", points2D)

    # Calculate the depth of the matching points:
    # http://answers.opencv.org/question/117141/triangulate-3d-points-from-a-stereo-camera-and-chessboard/
    # https://stackoverflow.com/questions/22334023/how-to-calculate-3d-object-points-from-2d-image-points-using-stereo-triangulatio/22335825

    homography = []
    output_warp = []

    for i in range(0, 20):
        nd_vector = np.array([0, 0, -1, equispaced_dist[i]])

        P1_aug = np.vstack((P1, nd_vector))
        P2_aug = np.vstack((P2, nd_vector))

        #print("P1_aug:\n",P1_aug)
        #print("P2_aug:\n",P2_aug)

        P2_inv = np.linalg.inv(P2_aug)
        #print("P2_inv:\n", P2_inv)

        P1P2_inv = np.dot(P1_aug, P2_inv)
        #print("P1P2_inv:\n",P1P2_inv)

        R = P1P2_inv[:3, :3]

        homography.append(R)
        #print("\n\nhomography" + str(i))
        #print(homography[i])

        output_warp.append(cv.warpPerspective(img2, homography[i], None))
        cv.imwrite('Warped_output_' + str(i) + '.jpg', output_warp[i])

    diff = []
    abs_img = []
    blur = []
    ind = []
    depth_img = []

    blur_2D = []

    for i in range(0, 20):
        diff.append(cv.absdiff(img1, output_warp[i]))
        cv.imwrite('Absolute_diff_' + str(i) + '.jpg', diff[i])

        blur.append(cv.blur(diff[i], (15, 15)))
        cv.imwrite('Block_filter_' + str(i) + '.jpg', blur[i])

        #print("Blur:\n", blur[i])

        blur_2D.append(np.ravel(blur[i]))

    big_mat = np.array(blur_2D)

    for pixel in range(len(big_mat[0])):

        index = np.argmin(big_mat[:, pixel])
        depth_img.append(
            round(240 * equispaced_dist[index] / max(equispaced_dist)))

    depth_fin = np.array(depth_img)
    reshape_depth_img = depth_fin.reshape(img1.shape)
    img = Image.fromarray(reshape_depth_img)
    #cv.imwrite("depth_image.jpg", img)
    img.show()

    return R1, R2, t
コード例 #24
0
ファイル: vo_modules.py プロジェクト: ShrutheeshIR/DF-VO
    def compute_pose_2d2d(self, kp_ref, kp_cur):
        """Compute the pose from view2 to view1
        Args:
            kp_ref (Nx2 array): keypoints for reference view
            kp_cur (Nx2 array): keypoints for current view
        Returns:
            pose (SE3): relative pose from current to reference view
            best_inliers (N boolean array): inlier mask
        """
        principal_points = (self.cam_intrinsics.cx, self.cam_intrinsics.cy)

        # initialize ransac setup
        best_Rt = []
        best_inlier_cnt = 0
        max_ransac_iter = self.cfg.compute_2d2d_pose.ransac.repeat
        best_inliers = np.ones((kp_ref.shape[0])) == 1

        # check flow magnitude
        avg_flow = np.mean(np.linalg.norm(kp_ref - kp_cur, axis=1))
        min_flow = self.cfg.compute_2d2d_pose.min_flow
        if avg_flow > min_flow:
            for i in range(
                    max_ransac_iter
            ):  # repeat ransac for several times for stable result
                # shuffle kp_cur and kp_ref (only useful when random seed is fixed)
                new_list = np.random.randint(0, kp_cur.shape[0],
                                             (kp_cur.shape[0]))
                new_kp_cur = kp_cur.copy()[new_list]
                new_kp_ref = kp_ref.copy()[new_list]

                start_time = time()
                E, inliers = cv2.findEssentialMat(
                    new_kp_cur,
                    new_kp_ref,
                    focal=self.cam_intrinsics.fx,
                    pp=principal_points,
                    method=cv2.RANSAC,
                    prob=0.99,
                    threshold=self.cfg.compute_2d2d_pose.ransac.reproj_thre,
                )
                cheirality_cnt, R, t, _ = cv2.recoverPose(
                    E,
                    new_kp_cur,
                    new_kp_ref,
                    focal=self.cam_intrinsics.fx,
                    pp=principal_points,
                )
                self.timers.timers["Ess. Mat."].append(time() - start_time)
                if inliers.sum() > best_inlier_cnt and cheirality_cnt > 50:
                    best_Rt = [R, t]
                    best_inlier_cnt = inliers.sum()
                    best_inliers = inliers
            if len(best_Rt) == 0:
                R = np.eye(3)
                t = np.zeros((3, 1))
                best_Rt = [R, t]
        else:
            R = np.eye(3)
            t = np.zeros((3, 1))
            best_Rt = [R, t]
        R, t = best_Rt
        pose = SE3()
        pose.R = R
        pose.t = t
        return pose, best_inliers
コード例 #25
0
                first_set += [kp1[match.queryIdx].pt]
                second_set += [kp2[match.trainIdx].pt]
            else:
                break
        first_set = np.array(first_set, dtype=np.float)
        second_set = np.array(second_set, dtype=np.float)
        """ for p1,p2 in zip(first_set,second_set):
            cv2.circle(frame, (int(p1[0]), int(p1[1])), 5, (0, 0, 0), -1)
            cv2.circle(frame, (int(p2[0]), int(p2[1])), 5, (255, 255, 255), -1)
        cv2.imshow("binary frame", frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        input() """
        if first_set.shape[0] > 5:
            essential_matrix, mask = cv2.findEssentialMat(
                first_set, second_set, 1.0, (0.0, 0.0), cv2.RANSAC, 0.999, 1.0)

            points, R, t, mask = cv2.recoverPose(essential_matrix,
                                                 first_set,
                                                 second_set,
                                                 mask=mask)
            """ first_set = np.array([x for i, x in enumerate(first_set) if mask[i] == 1])
            second_set = np.array([x for i, x in enumerate(second_set) if mask[i] == 1])
            for p1,p2 in zip(first_set,second_set):
                cv2.circle(frame, (int(p1[0]), int(p1[1])), 5, (255, 0, 0), -1)
                cv2.circle(frame, (int(p2[0]), int(p2[1])), 5, (0, 255, 0), -1)
            cv2.imshow("binary frame", frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            input() """
コード例 #26
0
def main():
    args = configs()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    log_interval = args.logging_interval

    if args.training_instance:
        args.load_path = os.path.join(args.load_path, args.training_instance)
    else:
        args.load_path = os.path.join(args.load_path, "evflownet_{}".format(datetime.now().strftime("%m%d_%H%M%S")))
    if not os.path.exists(args.load_path):
        os.makedirs(args.load_path)



    for ep in experiment_params:

        rpe_stats = []
        rre_stats = []
        trans_e_stats = []


        rpe_rre_info = []
        trans_e_info = []
        gt_interpolated = []
        predict_camera_frame = []
        predict_ts = []


        print(f"{ep['name']}")
        base_path = f"results/{ep['name']}"
        if not os.path.exists(base_path):
            os.makedirs(base_path)


        if ep['dataset'] == 'outdoor1':
            dataset_param = outdoor1_params
        elif ep['dataset'] == 'outdoor2':
            dataset_param = outdoor2_params
        elif ep['dataset'] == 'poster_6dof':
            dataset_param = poster_6dof_params
        elif ep['dataset'] == 'hdr_boxes':
            dataset_param = hdr_boxes_params
        elif ep['dataset'] == 'poster_translation':
            dataset_param = poster_translation_params
        elif ep['dataset'] == 'indoor1':
            dataset_param = indoor1_params
        elif ep['dataset'] == 'indoor2':
            dataset_param = indoor2_params
        elif ep['dataset'] == 'indoor3':
            dataset_param = indoor3_params
        elif ep['dataset'] == 'indoor4':
            dataset_param = indoor4_params            
        elif ep['dataset'] == 'outdoor_night1':
            dataset_param = outdoor_night1_params
        elif ep['dataset'] == 'outdoor_night2':
            dataset_param = outdoor_night2_params
        elif ep['dataset'] == 'outdoor_night3':
            dataset_param = outdoor_night3_params


        with open(f"{base_path}/config.txt", "w") as f:
            f.write("experiment params:\n")
            f.write(pprint.pformat(ep))
            f.write("\n\n\n")
            f.write("dataset params:\n")
            f.write(pprint.pformat(dataset_param))



        print("Load Data Begin. ")
        start_frame = ep['start_frame']
        end_frame = ep['end_frame']
        model_path = ep['model']
        voxel_method = ep['voxel_method']
        select_events = ep['select_events']
        voxel_threshold = ep['voxel_threshold']
        findE_threshold = ep['findE_threshold']
        findE_prob = ep['findE_prob']
        reproject_err_threshold = ep['reproject_err_threshold']


        # Set parameters
        gt_path = dataset_param['gt_path']
        sensor_size = dataset_param['sensor_size']
        camIntrinsic = dataset_param['camera_intrinsic']
        h5Dataset = DynamicH5Dataset(dataset_param['dataset_path'], voxel_method=voxel_method)
        h5DataLoader = torch.utils.data.DataLoader(dataset=h5Dataset, batch_size=1, num_workers=6, shuffle=False)
        
        # model
        print("Load Model Begin. ")
        EVFlowNet_model = EVFlowNet(args).to(device)
        EVFlowNet_model.load_state_dict(torch.load(model_path))
        EVFlowNet_model.eval()
        # EVFlowNet_model.load_state_dict(torch.load('data/model/evflownet_1001_113912_outdoor2_5k/model0'))

        # optimizer
        optimizer = torch.optim.Adam(EVFlowNet_model.parameters(), lr=args.initial_learning_rate, weight_decay=args.weight_decay)
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=args.learning_rate_decay)
        loss_fun = TotalLoss(args.smoothness_weight, args.photometric_loss_weight)



        print("Start Evaluation. ")    
        for iteration, item in enumerate(tqdm(h5DataLoader)):

            if iteration < start_frame:
                continue

            if iteration > end_frame:
                break

            voxel = item['voxel'].to(device)
            events = item['events'].to(device)
            frame = item['frame'].to(device)
            frame_ = item['frame_'].to(device)
            num_events = item['num_events'].to(device)
            image_name = "{}/img_{:07d}.png".format(base_path, iteration)

            events_vis = events[0].detach().cpu()
            flow_dict = EVFlowNet_model(voxel)
            flow_vis = flow_dict["flow3"][0].detach().cpu()

            # Compose the event images and warp the events with flow
            if select_events == 'only_pos':
                ev_bgn, ev_end, ev_img, timestamps = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, 1, sensor_size)

            elif select_events == 'only_neg':
                ev_bgn, ev_end, ev_img, timestamps = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, -1, sensor_size)

            elif select_events == 'mixed':
                ev_bgn_pos, ev_end_pos, ev_img, timestamps = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, 1, sensor_size)
                ev_bgn_neg, ev_end_neg, ev_img_neg, timestamps_neg = get_forward_backward_flow_torch(events_vis, flow_vis, voxel_threshold, -1, sensor_size)

                ev_bgn_x = torch.cat([ev_bgn_pos[0], ev_bgn_neg[0]])
                ev_bgn_y = torch.cat([ev_bgn_pos[1], ev_bgn_neg[1]])
                ev_end_x = torch.cat([ev_end_pos[0], ev_end_neg[0]])
                ev_end_y = torch.cat([ev_end_pos[1], ev_end_neg[1]])
                ev_bgn = (ev_bgn_x, ev_bgn_y)
                ev_end = (ev_end_x, ev_end_y)


            start_t = item['timestamp_begin'].cpu().numpy()[0]
            end_t = item['timestamp_end'].cpu().numpy()[0]

            # Convert to numpy format
            ev_img_raw = torch_to_numpy(ev_img[0])
            ev_img_bgn = torch_to_numpy(ev_img[1])
            ev_img_end = torch_to_numpy(ev_img[2])
            ev_bgn_xs = torch_to_numpy(ev_bgn[0])
            ev_bgn_ys = torch_to_numpy(ev_bgn[1])
            ev_end_xs = torch_to_numpy(ev_end[0])
            ev_end_ys = torch_to_numpy(ev_end[1])

            timestamps_before = torch_to_numpy(timestamps[0])
            timestamps_after = torch_to_numpy(timestamps[1])
            frame_vis = torch_to_numpy(item['frame'][0])
            frame_vis_ = torch_to_numpy(item['frame_'][0])
            flow_vis = torch_to_numpy(flow_dict["flow3"][0])


            METHOD = "opencv"
            # METHOD = "opengv"

            if METHOD == "opencv":

                ######### Opencv (calculate R and t) #########
                p1 = np.dstack([ev_bgn_xs, ev_bgn_ys]).squeeze()
                p2 = np.dstack([ev_end_xs, ev_end_ys]).squeeze()

                E, mask = cv2.findEssentialMat(p1, p2, cameraMatrix=camIntrinsic, method=cv2.RANSAC, prob=findE_prob, threshold=findE_threshold)
                points, R, t, mask1, triPoints = cv2.recoverPose(E, p1, p2, cameraMatrix=camIntrinsic, mask=mask, distanceThresh=5000)

            elif METHOD == "opengv":

                #### Calculate bearing vector manually ####
                ev_bgn_xs_undistorted = (ev_bgn_xs - 170.7684322973841) / 223.9940010790056
                ev_bgn_ys_undistorted = (ev_bgn_ys - 128.18711828338436) / 223.61783486959376
                ev_end_xs_undistorted = (ev_end_xs - 170.7684322973841) / 223.9940010790056
                ev_end_ys_undistorted = (ev_end_ys - 128.18711828338436) / 223.61783486959376

                bearing_p1 = np.dstack([ev_bgn_xs_undistorted, ev_bgn_ys_undistorted, np.ones_like(ev_bgn_xs)]).squeeze()
                bearing_p2 = np.dstack([ev_end_xs_undistorted, ev_end_ys_undistorted, np.ones_like(ev_end_xs)]).squeeze()

                bearing_p1 /= np.linalg.norm(bearing_p1, axis=1)[:, None]
                bearing_p2 /= np.linalg.norm(bearing_p2, axis=1)[:, None]

                bearing_p1 = bearing_p1.astype('float64')
                bearing_p2 = bearing_p2.astype('float64')

                # focal_length = 223.75
                # reproject_err_threshold = 0.1
                ransac_threshold = 1e-6
                ransac_transformation = pyopengv.relative_pose_ransac(bearing_p1, bearing_p2, "NISTER", threshold=ransac_threshold, iterations=1000, probability=0.999)
                R = ransac_transformation[:, 0:3]
                t = ransac_transformation[:, 3]

            # Interpolate Tw1 and Tw2
            Tw1 = get_interpolated_gt_pose(gt_path, start_t)
            Tw2 = get_interpolated_gt_pose(gt_path, end_t)
            Tw2_inv = inverse_se3_matrix(Tw2)

            # r1 = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
            # r2 = np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
            # r3 = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
            # r4 = np.array([[0, 0, -1], [0, 1, 0], [1, 0, 0]])
            # r5 = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
            # r6 = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
            # t = r5 @ t

            normed_t = t.squeeze() / np.linalg.norm(t)
            gt_t = Tw2[0:3, 3] - Tw1[0:3, 3]
            normed_gt = gt_t / np.linalg.norm(gt_t)

            # print()
            # print(np.rad2deg(np.arccos(np.dot(normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r1@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r2@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r3@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r4@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r5@normed_t, normed_gt))))
            # print(np.rad2deg(np.arccos(np.dot(r6@normed_t, normed_gt))))


            rpe = np.rad2deg(np.arccos(np.dot(normed_t, normed_gt)))
            # raise

            # if iteration == 121:
            #     print(Tw1)
            #     print(Tw2)
            #     print(Tw2_inv)
            #     print(Tw2_inv @ Tw1)

            predict_ts.append(start_t)

            # Store gt vector for later visulizaiton
            gt_interpolated.append(Tw1)
            gt_scale = np.linalg.norm(Tw2[0:3, 3] - Tw1[0:3, 3])
            pd_scale = np.linalg.norm(t)

            t *= gt_scale / pd_scale  # scale translation vector with gt_scale

            # Predicted relative pose 
            P = create_se3_matrix_with_R_t(R, t)
            P_inv = inverse_se3_matrix(P)
            # print(P @ P_inv)




            # Calculate the rpe
            E = Tw2_inv @ Tw1 @ P
            trans_e = np.linalg.norm(E[0:3, 3])

            E_inv = Tw2_inv @ Tw1 @ P_inv
            trans_e_inv = np.linalg.norm(E_inv[0:3, 3])


            # print(Tw2_inv @ Tw1)
            # print(P_inv)
            # print(E_inv)
            # print()

            # print() 
            # print(t)
            # print(Tw1[0:3, 3] - Tw2[0:3, 3])
            # print(Tw1[0:3, 3] - Tw2[0:3, 3] - t.T)

            # print()


            # print(t)
            # print(Tw1[0:3, 3] - Tw2[0:3, 3])
            # print(np.dot(np.linalg.norm(t), np.linalg.norm(Tw1[0:3, 3] - Tw2[0:3, 3])))
            # print(np.arccos(np.dot(np.linalg.norm(t), np.linalg.norm(Tw1[0:3, 3] - Tw2[0:3, 3]))))
            # raise

            rre = np.linalg.norm(logm(E[:3, :3]))
            rre_inv = np.linalg.norm(logm(E_inv[:3, :3]))

            rpe_stats.append(rpe)
            rre_stats.append(rre_inv)
            rpe_rre_info.append([rpe, rre, rre_inv])


            if trans_e_inv/gt_scale > 1.85:
                predict_camera_frame.append(P)

                trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale])
                print(trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale, " || ", rpe, rre, rre_inv)

                trans_e_stats.append(trans_e/gt_scale)
            else:                
                trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale])
                print(trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale, " || ", rpe, rre, rre_inv)

                trans_e = trans_e_inv
                predict_camera_frame.append(P_inv)

                trans_e_stats.append(trans_e_inv/gt_scale)

            # raise

            # if trans_e/gt_scale > 1.85:

            #     trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale])
            #     print(trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e_inv/gt_scale)


            #     trans_e = trans_e_inv
            #     predict_camera_frame.append(P_inv)
            # else:
            #     predict_camera_frame.append(P)


            #     trans_e_info.append([trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale])
            #     print(trans_e, trans_e_inv, gt_scale, trans_e/gt_scale, trans_e_inv/gt_scale, trans_e/gt_scale)


            cvshow_all_eval(ev_img_raw, ev_img_bgn, ev_img_end, (ev_bgn_xs, ev_bgn_ys), \
                (ev_end_xs, ev_end_ys), timestamps_before, timestamps_after, frame_vis, \
                frame_vis_, flow_vis, image_name, sensor_size, trans_e, gt_scale)


            predict_world_frame = relative_to_absolute_pose(np.array(predict_camera_frame))
            visualize_trajectory(predict_world_frame, "{}/path_{:07d}.png".format(base_path, iteration))
            visualize_trajectory(np.array(gt_interpolated), "{}/gt_path_{:07d}.png".format(base_path, iteration))


        rpe_stats = np.array(rpe_stats)
        rre_stats = np.array(rre_stats)
        trans_e_stats = np.array(trans_e_stats)

        with open(f"{base_path}/final_stats.txt", "w") as f:
            f.write("rpe_median, arpe_deg, arpe_outliner_10, arpe_outliner_15\n")
            f.write(f"{np.median(rpe_stats)}, {np.mean(rpe_stats)}, {100*len(rpe_stats[rpe_stats>10])/len(rpe_stats)}, {100*len(rpe_stats[rpe_stats>15])/len(rpe_stats)}\n\n")
            
            print("rpe_median, arpe_deg, arpe_outliner_10, arpe_outliner_15")
            print(f"{np.median(rpe_stats)}, {np.mean(rpe_stats)}, {100*len(rpe_stats[rpe_stats>10])/len(rpe_stats)}, {100*len(rpe_stats[rpe_stats>15])/len(rpe_stats)}\n")

            f.write("rre_median, arre_rad, arre_outliner_0.05, arpe_outliner_0.1\n")
            f.write(f"{np.median(rre_stats)}, {np.mean(rre_stats)}, {100*len(rre_stats[rre_stats>0.05])/len(rre_stats)}, {100*len(rre_stats[rre_stats>0.1])/len(rre_stats)}\n\n")

            print("rre_median, arre_rad, arre_outliner_0.05, arpe_outliner_0.1")
            print(f"{np.median(rre_stats)}, {np.mean(rre_stats)}, {100*len(rre_stats[rre_stats>0.05])/len(rre_stats)}, {100*len(rre_stats[rre_stats>0.1])/len(rre_stats)}\n\n")

            f.write("trans_e_median, trans_e_avg, trans_e_outliner_0.5, trans_e_outliner_1.0\n")
            f.write(f"{np.median(trans_e_stats)}, {np.mean(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>0.5])/len(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>1.0])/len(trans_e_stats)}\n")

            print("trans_e_median, trans_e_avg, trans_e_outliner_0.5, trans_e_outliner_1.0\n")
            print(f"{np.median(trans_e_stats)}, {np.mean(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>0.5])/len(trans_e_stats)}, {100*len(trans_e_stats[trans_e_stats>1.0])/len(trans_e_stats)}\n")




        with open(f"{base_path}/rpe_rre.txt", "w") as f:
            for row in rpe_rre_info:
                for item in row:
                    f.write(f"{item}, ")
                f.write("\n")

        with open(f"{base_path}/trans_e.txt", "w") as f:
            for row in trans_e_info:
                for item in row:
                    f.write(f"{item}, ")
                f.write("\n")
        
        with open(f"{base_path}/predict_pose.txt", "w") as f:
            for p in predict_world_frame:
                f.write(f"{p}\n")
        
        with open(f"{base_path}/gt_pose.txt", "w") as f:
            for p in np.array(gt_interpolated):
                f.write(f"{p}\n")

        with open(f"{base_path}/predict_tum.txt", "w") as f:
            for ts, p in zip(predict_ts, predict_world_frame):
                qx, qy, qz, qw = rotation_matrix_to_quaternion(p[:3, :3])
                tx, ty, tz = p[:3, 3]
                f.write(f"{ts} {tx} {ty} {tz} {qx} {qy} {qz} {qw}\n")

        with open(f"{base_path}/gt_tum.txt", "w") as f:
            for ts, p in zip(predict_ts, np.array(gt_interpolated)):
                qx, qy, qz, qw = rotation_matrix_to_quaternion(p[:3, :3])
                tx, ty, tz = p[:3, 3]
                f.write(f"{ts} {tx} {ty} {tz} {qx} {qy} {qz} {qw}\n")

        rotation_matrix_to_quaternion

        predict_world_frame = relative_to_absolute_pose(np.array(predict_camera_frame))
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path00.png", show=True)
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path01.png", rotate='x')
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path02.png", rotate='y')
        visualize_trajectory(predict_world_frame, f"{base_path}/final_path03.png", rotate='z')
コード例 #27
0
        p1 = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        p2 = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

    draw_params = dict(
        matchColor=(0, 255, 0),  # draw matches in green color
        singlePointColor=None,
        flags=2)

    img_siftmatch = cv2.drawMatches(img1, kp1, img2, kp2, good, None,
                                    **draw_params)
    cv2.imwrite('sift_match_' + str(counter) + '.png', img_siftmatch)

    #########################
    #2----essential matrix--#
    #########################
    E, mask = cv2.findEssentialMat(p1, p2, K, cv2.RANSAC, 0.999, 1.0)

    matchesMask = mask.ravel().tolist()

    draw_params = dict(
        matchColor=(0, 255, 0),  # draw matches in green color
        singlePointColor=None,
        matchesMask=matchesMask,  # draw only inliers
        flags=2)

    img_inliermatch = cv2.drawMatches(img1, kp1, img2, kp2, good, None,
                                      **draw_params)
    cv2.imwrite('inlier_match_' + str(counter) + '.png', img_inliermatch)
    print("Essential matrix:")
    print(E)
コード例 #28
0
    def genPointCloud(self):
        def triangulate(P0, P1, x1, x2):
            '''X,Y,Z,W of the key points that are found in 2 images
               P0 and P1 are poses, x1 and x2 are SIFT key-points'''
            A = np.array(
                [[
                    P0[2, 0] * x1[0] - P0[0, 0], P0[2, 1] * x1[0] - P0[0, 1],
                    P0[2, 2] * x1[0] - P0[0, 2], P0[2, 3] * x1[0] - P0[0, 3]
                ],
                 [
                     P0[2, 0] * x1[1] - P0[1, 0], P0[2, 1] * x1[1] - P0[1, 1],
                     P0[2, 2] * x1[1] - P0[1, 2], P0[2, 3] * x1[1] - P0[1, 3]
                 ],
                 [
                     P1[2, 0] * x2[0] - P1[0, 0], P1[2, 1] * x2[0] - P1[0, 1],
                     P1[2, 2] * x2[0] - P1[0, 2], P1[2, 3] * x2[0] - P1[0, 3]
                 ],
                 [
                     P1[2, 0] * x2[1] - P1[1, 0], P1[2, 1] * x2[1] - P1[1, 1],
                     P1[2, 2] * x2[1] - P1[1, 2], P1[2, 3] * x2[1] - P1[1, 3]
                 ]])
            u, s, vt = np.linalg.svd(A)
            return vt[-1]

        I1 = self.images[0]
        I2 = self.images[1]
        h, w, d, f = I1.h, I1.w, I1.d, I1.f

        sift = cv2.xfeatures2d.SIFT_create()
        kp1, des1 = sift.detectAndCompute(I1.img, None)
        kp2, des2 = sift.detectAndCompute(I2.img, None)
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)

        # Apply ratio test
        good = []
        for i, (m, n) in enumerate(matches):
            if m.distance < 0.7 * n.distance:
                good.append(m)
        u1 = []
        u2 = []
        for m in good:
            u1.append(kp1[m.queryIdx].pt)
            u2.append(kp2[m.trainIdx].pt)

# General Coordinates
        u1g = np.array(u1)
        u2g = np.array(u2)

        # Make Homogeneous
        u1h = np.c_[u1g, np.ones(u1g.shape[0])]
        u2h = np.c_[u2g, np.ones(u2g.shape[0])]

        # Image Center
        cv = h / 2
        cu = w / 2

        # Get Camera Coordinates
        K_cam = np.array([[f, 0, cu], [0, f, cv], [0, 0, 1]])
        K_inv = np.linalg.inv(K_cam)
        x1 = u1h @ K_inv.T
        x2 = u2h @ K_inv.T

        # Generate Essential Matrix
        E, inliers = cv2.findEssentialMat(x1[:, :2],
                                          x2[:, :2],
                                          np.eye(3),
                                          method=cv2.RANSAC,
                                          threshold=1e-3)
        inliers = inliers.ravel().astype(bool)
        n_in, R, t, _ = cv2.recoverPose(E, x1[inliers, :2], x2[inliers, :2])

        print(x1.shape)

        x1 = x1[inliers == True]
        x2 = x2[inliers == True]

        print(x1.shape)

        # Relative pose between two camperas
        P0 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
        P1 = np.hstack((R, t))

        # Find X,Y,Z for all SIFT Keypoints
        for i in range(len(x1)):
            self.pointCloud.append(triangulate(
                P0, P1, x1[i],
                x2[i]))  #appends to list of points in xyz coordinates

        self.pointCloud = np.array(self.pointCloud)
        self.pointCloud = self.pointCloud.T / self.pointCloud[:, 3]
        self.pointCloud = self.pointCloud[:-1, :]
        print(self.pointCloud.shape)
コード例 #29
0
ファイル: PnPmatch.py プロジェクト: MenelausJ/CFS_EQ5North
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)

good_ini = []
for m, n in matches:
    if m.distance < 0.6 * n.distance:
        good_ini.append(m)

dst_pts_ini = np.float32([kp1[m.queryIdx].pt
                          for m in good_ini]).reshape(-1, 1, 2)
src_pts_ini = np.float32([kp2[m.trainIdx].pt
                          for m in good_ini]).reshape(-1, 1, 2)
des_ini = np.float32([des1[m.queryIdx] for m in good_ini])
# des_ini2 = np.float32([des2[m.trainIdx] for m in good_ini])

E, mask0 = cv2.findEssentialMat(src_pts_ini, dst_pts_ini,
                                camera_intrinsic_matrix)
retval, rvec, tvec, mask1 = cv2.recoverPose(E, src_pts_ini, dst_pts_ini,
                                            camera_intrinsic_matrix, 50)
# Another problem is the scalar of tvec
P2 = np.dot(camera_intrinsic_matrix, np.hstack((rvec, tvec)))

print "Initial pairs", len(good_ini)

X3d = cv2.triangulatePoints(P1, P2, src_pts_ini,
                            dst_pts_ini)[:, (mask0 > 0).squeeze()]

with open(
        "/Users/yujieli/Documents/CFS_Video_Analysis-master/test/CamPoseTrial.txt",
        "w") as f:
    frame_i = 892
    while (frame_i <= 900):
コード例 #30
0
    def stitch_and_plot(im1, im2):
        I1 = self.images[0]
        I2 = self.images[1]
        h, w, d, f = I1.h, I1.w, I1.d, I1.f

        sift = cv2.xfeatures2d.SIFT_create()
        kp1, des1 = sift.detectAndCompute(I1.img, None)
        kp2, des2 = sift.detectAndCompute(I2.img, None)
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)

        # Apply ratio test
        good = []
        for i, (m, n) in enumerate(matches):
            if m.distance < 0.7 * n.distance:
                good.append(m)
        u1 = []
        u2 = []
        for m in good:
            u1.append(kp1[m.queryIdx].pt)
            u2.append(kp2[m.trainIdx].pt)

        # General Coordinates
        u1g = np.array(u1)
        u2g = np.array(u2)

        # Make Homogeneous
        u1h = np.c_[u1g, np.ones(u1g.shape[0])]
        u2h = np.c_[u2g, np.ones(u2g.shape[0])]

        # Image Center
        cv = h / 2
        cu = w / 2

        # Get Camera Coordinates
        K_cam = np.array([[f, 0, cu], [0, f, cv], [0, 0, 1]])
        K_inv = np.linalg.inv(K_cam)
        x1 = u1h @ K_inv.T
        x2 = u2h @ K_inv.T

        # Generate Essential Matrix
        E, inliers = cv2.findEssentialMat(x1[:, :2],
                                          x2[:, :2],
                                          np.eye(3),
                                          method=cv2.RANSAC,
                                          threshold=1e-3)
        inliers = inliers.ravel().astype(bool)
        n_in, R, t, _ = cv2.recoverPose(E, x1[inliers, :2], x2[inliers, :2])

        x1 = x1[inliers == True]
        x2 = x2[inliers == True]

        pic = np.zeros(im1.shape * 2)
        pic[im1.shape(0):, im1.shape(1):] = im1
        pic[:im1.shape(0), :im1.shape(1)] = im2

        fig = plt.figure()
        ax = fig.subplot()
        for i in range(len(x1)):
            ax.plot((x1(i, 0), x1(i, 1)), (x2(i, 0), x2(i, 1)), pic)
        plt.show()
コード例 #31
0
att = 0
for d in data:

    d0 = d[0]
    att += d0.shape[0]
    d0[:, 0] = d0[:, 0] * w2 + w2
    d0[:, 1] = d0[:, 1] * h2 + h2
    d0[:, 2] = d0[:, 2] * w2 + w2
    d0[:, 3] = d0[:, 3] * h2 + h2
    px_new = d0[:, :2]
    px_last = d0[:, 2:4]
    #if d0.shape[1]>4:
    #    d1 = d0[:,6]
    #    d2 = d0[:,7]
    E, mask = cv2.findEssentialMat(px_new,
                                   px_last,
                                   cameraMatrix=cam.mx,
                                   method=cv2.RANSAC)
    mh, R, t, mask0 = cv2.recoverPose(E, px_new, px_last, cameraMatrix=cam.mx)

    b = Utils.rotationMatrixToEulerAngles(R) * 180 / np.pi
    e = []
    for c in b:
        if c < -90:
            c = 180 + c
        if c > 90:
            c = 180 - c
        e.append(c)
    b = e
    a = d[1][:3]
    dr = a - b
    r0 = np.linalg.norm(dr)
コード例 #32
0
def pose_estimation(source_rgbd_image, target_rgbd_image,
                    pinhole_camera_intrinsic, debug_draw_correspondences):
    success = False
    trans = np.identity(4)

    # transform double array to unit8 array
    color_cv_s = np.uint8(np.asarray(source_rgbd_image.color) * 255.0)
    color_cv_t = np.uint8(np.asarray(target_rgbd_image.color) * 255.0)

    orb = cv2.ORB_create(scaleFactor=1.2,
                         nlevels=8,
                         edgeThreshold=31,
                         firstLevel=0,
                         WTA_K=2,
                         scoreType=cv2.ORB_HARRIS_SCORE,
                         nfeatures=100,
                         patchSize=31)  # to save time
    [kp_s, des_s] = orb.detectAndCompute(color_cv_s, None)
    [kp_t, des_t] = orb.detectAndCompute(color_cv_t, None)
    if len(kp_s) is 0 or len(kp_t) is 0:
        return success, trans

    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(des_s, des_t)

    pts_s = []
    pts_t = []
    for match in matches:
        pts_t.append(kp_t[match.trainIdx].pt)
        pts_s.append(kp_s[match.queryIdx].pt)
    pts_s = np.asarray(pts_s)
    pts_t = np.asarray(pts_t)
    # inlier points after initial BF matching
    if debug_draw_correspondences:
        draw_correspondences(np.asarray(source_rgbd_image.color),
                             np.asarray(target_rgbd_image.color), pts_s, pts_t,
                             np.ones(pts_s.shape[0]), "Initial BF matching")

    focal_input = (pinhole_camera_intrinsic.intrinsic_matrix[0, 0] +
                   pinhole_camera_intrinsic.intrinsic_matrix[1, 1]) / 2.0
    pp_x = pinhole_camera_intrinsic.intrinsic_matrix[0, 2]
    pp_y = pinhole_camera_intrinsic.intrinsic_matrix[1, 2]

    # Essential matrix is made for masking inliers
    pts_s_int = np.int32(pts_s + 0.5)
    pts_t_int = np.int32(pts_t + 0.5)
    [E, mask] = cv2.findEssentialMat(pts_s_int,
                                     pts_t_int,
                                     focal=focal_input,
                                     pp=(pp_x, pp_y),
                                     method=cv2.RANSAC,
                                     prob=0.995,
                                     threshold=1.0)
    if mask is None:
        return success, trans

    # inlier points after 5pt algorithm
    if debug_draw_correspondences:
        draw_correspondences(np.asarray(source_rgbd_image.color),
                             np.asarray(target_rgbd_image.color), pts_s, pts_t,
                             mask, "5-pt RANSAC")

    # make 3D correspondences
    depth_s = np.asarray(source_rgbd_image.depth)
    depth_t = np.asarray(target_rgbd_image.depth)
    pts_xyz_s = np.zeros([3, pts_s.shape[0]])
    pts_xyz_t = np.zeros([3, pts_s.shape[0]])
    cnt = 0
    for i in range(pts_s.shape[0]):
        if mask[i]:
            xyz_s = get_xyz_from_pts(pts_s[i, :], depth_s, pp_x, pp_y,
                                     focal_input)
            pts_xyz_s[:, cnt] = xyz_s
            xyz_t = get_xyz_from_pts(pts_t[i, :], depth_t, pp_x, pp_y,
                                     focal_input)
            pts_xyz_t[:, cnt] = xyz_t
            cnt = cnt + 1
    pts_xyz_s = pts_xyz_s[:, :cnt]
    pts_xyz_t = pts_xyz_t[:, :cnt]

    success, trans, inlier_id_vec = estimate_3D_transform_RANSAC(
        pts_xyz_s, pts_xyz_t)

    if debug_draw_correspondences:
        pts_s_new = np.zeros(shape=(len(inlier_id_vec), 2))
        pts_t_new = np.zeros(shape=(len(inlier_id_vec), 2))
        mask = np.ones(len(inlier_id_vec))
        cnt = 0
        for i in inlier_id_vec:
            u_s, v_s = get_uv_from_xyz(pts_xyz_s[0, i], pts_xyz_s[1, i],
                                       pts_xyz_s[2,
                                                 i], pp_x, pp_y, focal_input)
            u_t, v_t = get_uv_from_xyz(pts_xyz_t[0, i], pts_xyz_t[1, i],
                                       pts_xyz_t[2,
                                                 i], pp_x, pp_y, focal_input)
            pts_s_new[cnt, :] = [u_s, v_s]
            pts_t_new[cnt, :] = [u_t, v_t]
            cnt = cnt + 1
        draw_correspondences(np.asarray(source_rgbd_image.color),
                             np.asarray(target_rgbd_image.color), pts_s_new,
                             pts_t_new, mask, "5-pt RANSAC + 3D Rigid RANSAC")
    return success, trans
コード例 #33
0
def main():


    feature_detector = cv2.FastFeatureDetector_create(threshold=25,
                                                      nonmaxSuppression=True)

    lk_params = dict(winSize=(21, 21),
                     criteria=(cv2.TERM_CRITERIA_EPS |
                               cv2.TERM_CRITERIA_COUNT, 30, 0.03))

    current_pos = np.zeros((3, 1))
    current_rot = np.eye(3)

    traj = np.zeros((800, 600, 3), dtype=np.uint8)

    prev_image = None


    camera_matrix = np.array([[1.87156301e+03, 0.0, 9.55327397e+02],
                              [0.0, 1.87098313e+03, 5.33056795e+02],
                              [0.0, 0.0, 1.0]])
 
    for index in range(0,4980):
    
        print(index)
        # load image
        image = cv2.imread(image_path_left(index))



        detector = cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True)

        kps = detector.detect(image)

        kp = np.array([x.pt for x in kps], dtype=np.float32)

        if prev_image is None:
            prev_image = image
            prev_keypoint = kp
            continue

        p1, st, err = cv2.calcOpticalFlowPyrLK(prev_image,
                                               image, prev_keypoint,
                                               None, **lk_params)

        E, mask = cv2.findEssentialMat(p1, prev_keypoint, camera_matrix,
                                       cv2.RANSAC, 0.999, 1.0, None)

        points, R, t, mask = cv2.recoverPose(E, p1, prev_keypoint, camera_matrix)

        scale = 1.0

        current_pos += current_rot.dot(t) * scale
        current_rot = R.dot(current_rot)

        x, y, z = current_pos[0], current_pos[1], current_pos[2]

        sy = math.sqrt(current_rot[0, 0] * current_rot[0, 0] + current_rot[1, 0] * current_rot[1, 0])
        roll = math.atan2(current_rot[2, 1], current_rot[2, 2])

        pitch = math.atan2(-current_rot[2, 0], sy)

        yaw = math.atan2(current_rot[1, 0], current_rot[0, 0])

        roll = math.degrees(roll)
        pitch = math.degrees(pitch)
        yaw = math.degrees(yaw)

      
        draw_x, draw_y = int(x) + 290, ((-1) * (int(z))) + 590
        cv2.circle(traj, (draw_x, draw_y), 1, (0, 0, 255), 2)
        cv2.rectangle(traj, (10, 20), (600, 60), (0, 0, 0), -1)

        text = "Coordinates: x=%2fu y=%2fu z=%2fu" % (x, y, z)

        textRot = "rotation: roll=%2fDeg pitch=%2fDeg yaw=%2fDeg" % (roll, pitch, yaw)
        
        #print(textRot)
        cv2.putText(traj, text, (20, 40), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1, 8)
        cv2.putText(traj, textRot, (20, 60), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1, 8)

        cv2.imshow('Trajectory', traj)

        # position_axes.scatter(current_pos[0][0], current_pos[2][0])
        plt.pause(.01)

        img = cv2.drawKeypoints(image, kps, None)
        cv2.imshow('feature', img)
        cv2.waitKey(1)
        filename = 'savedImage.jpg'
        cv2.imwrite(filename, traj)

        prev_image = image
        prev_keypoint = kp
コード例 #34
0
def inf_pose_flow(img1, img2, flow_pr_inf, insmap, depthmap, mdDepth, intrinsic, pid, gradComputer=None, valid_flow=None):
    insmap_np = insmap[0, 0].cpu().numpy()
    intrinsicnp = intrinsic[0].cpu().numpy()
    dummyh = 370
    samplenum = 50000
    gradbar = 0.9
    staticbar = 0.8
    _, _, h, w = img1.shape
    border_sel = np.zeros([h, w])
    border_sel[int(0.25810811 * dummyh) : int(0.99189189 * dummyh)] = 1
    xx, yy = np.meshgrid(range(w), range(h), indexing='xy')

    flow_pr_inf_x = flow_pr_inf[0, 0].cpu().numpy()
    flow_pr_inf_y = flow_pr_inf[0, 1].cpu().numpy()

    xx_nf = xx + flow_pr_inf_x
    yy_nf = yy + flow_pr_inf_y

    depthmap_np = depthmap.squeeze().cpu().numpy()
    mdDepth_np = mdDepth.squeeze().cpu().numpy()
    if gradComputer is None:
        depth_grad = np.zeros_like(depthmap_np)
    else:
        depth_grad = gradComputer.depth2grad(torch.from_numpy(depthmap_np).unsqueeze(0).unsqueeze(0)).squeeze().numpy()
        depth_grad = depth_grad / depthmap_np
        # tensor2disp(torch.from_numpy(depth_grad).unsqueeze(0).unsqueeze(0), percentile=95, viewind=0).show()
        # tensor2disp(torch.from_numpy(depth_grad).unsqueeze(0).unsqueeze(0) > 0.9, percentile=95, viewind=0).show()

    selector = (xx_nf > 0) * (xx_nf < w) * (yy_nf > 0) * (yy_nf < h) * (insmap_np == 0) * border_sel * (depthmap_np > 0) * (depth_grad < gradbar)
    selector = selector == 1

    if samplenum > np.sum(selector):
        samplenum = np.sum(selector)

    np.random.seed(pid)
    rndidx = np.random.randint(0, np.sum(selector), samplenum)

    xx_idx_sel = xx[selector][rndidx]
    yy_idx_sel = yy[selector][rndidx]

    if valid_flow is not None:
        valid_flow_np = valid_flow.squeeze().cpu().numpy()
        mag_sel = (xx_nf > 0) * (xx_nf < w) * (yy_nf > 0) * (yy_nf < h) * (insmap_np == 0) * border_sel * (depth_grad < gradbar) * valid_flow_np
    else:
        mag_sel = (xx_nf > 0) * (xx_nf < w) * (yy_nf > 0) * (yy_nf < h) * (insmap_np == 0) * border_sel * (depth_grad < gradbar)
    mag_sel = mag_sel == 1
    flow_sel_mag = np.mean(np.sqrt(flow_pr_inf_x[mag_sel] ** 2 + flow_pr_inf_y[mag_sel] ** 2))

    if flow_sel_mag < staticbar:
        istatic = True
    else:
        istatic = False

    # selvls = np.zeros([h, w])
    # selvls[yy_idx_sel, xx_idx_sel] = 1

    pts1 = np.stack([xx_idx_sel, yy_idx_sel], axis=1).astype(np.float)
    pts2 = np.stack([xx_nf[yy_idx_sel, xx_idx_sel], yy_nf[yy_idx_sel, xx_idx_sel]], axis=1).astype(np.float)

    E, inliers = cv2.findEssentialMat(pts1, pts2, focal=intrinsicnp[0,0], pp=(intrinsicnp[0, 2], intrinsicnp[1, 2]), method=cv2.RANSAC, prob=0.99, threshold=0.1)
    cheirality_cnt, R, t, _ = cv2.recoverPose(E, pts1, pts2, focal=intrinsicnp[0, 0], pp=(intrinsicnp[0, 2], intrinsicnp[1, 2]))

    inliers_mask = inliers == 1
    inliers_mask = np.squeeze(inliers_mask, axis=1)
    pts1_inliers = pts1[inliers_mask, :].T
    pts2_inliers = pts2[inliers_mask, :].T

    pts1_inliers = np.concatenate([pts1_inliers, np.ones([1, pts1_inliers.shape[1]])], axis=0)
    pts2_inliers = np.concatenate([pts2_inliers, np.ones([1, pts2_inliers.shape[1]])], axis=0)
    coorespondedDepth = depthmap_np[selector][rndidx][inliers_mask]
    scale = depth2scale(pts1_inliers, pts2_inliers, intrinsicnp, R, t, coorespondedDepth)
    scale_md = depth2scale(pts1_inliers, pts2_inliers, intrinsicnp, R, t, mdDepth_np[selector][rndidx][inliers_mask])

    # Image.fromarray(flow_to_image(flow_pr_inf[0].cpu().permute([1, 2, 0]).numpy())).show()
    # tensor2disp(torch.from_numpy(selector).unsqueeze(0).unsqueeze(0), vmax=1, viewind=0).show()
    # tensor2disp(depthmap > 0, vmax=1, viewind=0).show()
    # tensor2disp(torch.from_numpy(selvls).unsqueeze(0).unsqueeze(0), vmax=1, viewind=0).show()

    return R, t, scale, scale_md, flow_sel_mag, istatic
コード例 #35
0
    for m,n in matches:
        if abs(m.distance) < 0.7*abs(n.distance):
            good.append(m)
    #Decide if matches are good enough
    if len(good) < REJECT_THRESH:
        print("Not enough good matches")
        continue

    train_match_points = np.float32([ train_keypoints[m.queryIdx].pt for m in good ])#.reshape(-1,1,2)
    query_match_points = np.float32([ query_keypoints[m.trainIdx].pt for m in good ])#.reshape(-1,1,2)
    # estimate fundamental matrix
    ##F, mask = cv2.findFundamentalMat(train_match_points, query_match_points, cv2.FM_RANSAC, 0.1, 0.99)
	 
    # decompose into the essential matrix
    ##E = K.T.dot(F).dot(K)
    E, mask = cv2.findEssentialMat(train_match_points, query_match_points, 1.0, (0,0), cv2.FM_RANSAC, 0.99,0.1)
    # decompose essential matrix into R, t (See Hartley and Zisserman 9.13)
    U, S, Vt = np.linalg.svd(E)
    W = np.array([0.0, -1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]).reshape(3, 3)
   # iterate over all point correspondences used in the estimation of the fundamental matrix
    first_inliers = []
    second_inliers = []
    for i in range(len(mask)):
   	if mask[i]:
		# normalize and homogenize the image coordinates
		first_inliers.append(K_inv.dot([train_match_points[i][0], train_match_points[i][1], 1.0]))
		second_inliers.append(K_inv.dot([query_match_points[i][0], query_match_points[i][1], 1.0]))
	 
    # Determine the correct choice of second camera matrix
    # only in one of the four configurations will all the points be in front of both cameras
    # First choice: R = U * Wt * Vt, T = +u_3 (See Hartley Zisserman 9.19)
コード例 #36
0
ファイル: py3DRec.py プロジェクト: CarlosHVMoraes/py3DRec
#Xp_1 = np.hstack((Xh_1[:,0], Xh_1[:,1]))
#Xp_2 = np.hstack((Xh_2[:,0], Xh_2[:,1]))



myC1 = Camera.myCamera(k)
myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])


#retorna pontos correspondentes
Xp_1, Xp_2 = clsReconstruction.getMathingPoints('b4.jpg','b5.jpg','k_cam_hp.dat')


#evaluate the essential Matrix using the camera parameter(using the original points)
E, mask0 = cv2.findEssentialMat(Xp_1,Xp_2,k,cv2.FM_RANSAC)

#evaluate the fundamental matrix (using the normilized points)
#F, mask = cv2.findFundamentalMat(Xp_1,Xp_2,cv2.FM_RANSAC)	
#ki = np.linalg.inv(k)

R1, R2, t = cv2.decomposeEssentialMat(E)



retval, R, t, mask2 = cv2.recoverPose(E,Xp_1,Xp_2)

myC2 = Camera.myCamera(k)
myC2.projectiveMatrix(np.mat(t),R)

コード例 #37
0
	def sparceRecostructionTrueCase(file1,file2,kdef):

		k = np.mat(clsReconstruction.loadData(kdef))
		ki = np.linalg.inv(k)

		im_1 = cv2.imread(file1)
		im_2 = cv2.imread(file2)

		im_b1 = cv2.cvtColor(im_1,cv2.COLOR_RGB2GRAY)
		im_b2 = cv2.cvtColor(im_2,cv2.COLOR_RGB2GRAY)

		myC1 = Camera.myCamera(k)
		myC2 = Camera.myCamera(k)


		#place camera 1 at origin
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])


		#return macthing points
		Xp_1, Xp_2 = clsReconstruction.getMatchingPoints(file1,file2,kdef,30)


		#evaluate the essential Matrix using the camera parameter(using the original points)
		E, mask0 = cv2.findEssentialMat(Xp_1,Xp_2,k,cv2.FM_RANSAC)


		#evaluate Fundamental to get the epipolar lines
		#since we already know the camera intrincics, it is better to evaluate F from the correspondence rather than from the 8 points routine
		F = ki.T*np.mat(E)*ki

		 
		#retrive R and t from E
		retval, R, t, mask2 = cv2.recoverPose(E,Xp_1,Xp_2)
		

		#place camera 2
		myC2.projectiveMatrix(np.mat(t),R)


		#clsReconstruction.drawEpipolarLines(Xp_1,Xp_2,F,im_1,im_2)


		#triangulate points
		Str_4D = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2]).T


		#make them euclidian
		Str_3D = cv2.convertPointsFromHomogeneous(Str_4D).reshape(-1,3)


		#evaluate reprojection
		Xh_Reprojection_1 = myC1.project(Str_4D)
		Xh_Reprojection_2 = myC2.project(Str_4D)


		#three ways to carry on the bundle adjustment I am using R,t and K as parameters. using the points is too time 
		# consuming although the results are much better; 
		#nR,nt, R0, R1 = clsReconstruction.bundleAdjustment(Str_4D,Xp_1,Xp_2,k,R,t)
		#Str_4D, nR,nt, R0, R1 = clsReconstruction.bundleAdjustmentwithX(Str_4D,Xp_1,Xp_2,k,R,t)	#### not working right now... 

		nk, nR, nt, R0, R1 = clsReconstruction.bundleAdjustmentwithK(Str_4D,Xp_1,Xp_2,k,R,t)
		print('old value {0:.3f}, optimized pose: {1:.3f} \n'.format(R0,R1))
		nki = np.linalg.inv(nk)


		#reevaluate essential and fundamental matrixes
		nE = clsReconstruction.skew(nt)*np.mat(nR)
		nF = nki.T*np.mat(nE)*nki


		#if we use the 3th option, we should reinitiate the cameras	and the essential matrix, once the projective matrix will change
		myC1 = Camera.myCamera(nk)
		myC2 = Camera.myCamera(nk)
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])



		#reevaluate all variables based on new values of nR and nt
		myC2.projectiveMatrix(np.mat(nt),nR)
		Str_4D = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2]).T
		Str_3D = cv2.convertPointsFromHomogeneous(Str_4D).reshape(-1,3)


		
		#Camera.myCamera.show3Dplot(Str_3D)
		Xh_Opt_1 = myC1.project(Str_4D)#.reshape(-1,2)
		Xh_Opt_2 = myC2.project(Str_4D)#.reshape(-1,2)


		#POSSIBLE IMPLEMENTATION find residuals bigger a threshould value and optimize their location in R3

		#clsReconstruction.drawEpipolarLines(Xp_1,Xp_2,nF,im_b1,im_b2)

		im = clsReconstruction.drawPoints(im_1,Xp_1,(50,50,250))
		im = clsReconstruction.drawPoints(im,Xh_Reprojection_1,(50,150,100))
		im = clsReconstruction.drawPoints(im,Xh_Opt_1,(250,250,50))

		im2 = clsReconstruction.drawPoints(im_2,Xp_2,(50,50,250))
		im2 = clsReconstruction.drawPoints(im2,Xh_Reprojection_2,(50,150,100))
		im2 = clsReconstruction.drawPoints(im2,Xh_Opt_2,(250,250,50))


		cv2.imshow("im",im)
		cv2.imshow("im2",im2)
		cv2.waitKey(0)
コード例 #38
0
ファイル: VideoOdometry.py プロジェクト: dtbinh/soccer2
	def grab(self):

		if not self.cam:
			print('Error: camera not setup, run init() first')
			return Msg.Odom()

		try:
			# get values from last run
			pp = self.pp
			focal = self.focal
			p0 = self.p0
			R = self.R
			t = self.t
			R_f = self.R_f
			t_f = self.t_f
			# try this???
			# R = R_f.copy()
			# t = np.array([0, 0, 0], dtype=np.float)
			R = self.R
			t = self.t
			old_im = self.old_im

			ret, raw = self.cam.read()

			# end of video
			if not ret:
				print('video end')
				draw(save_pts)
				# break
				exit()

			################################################
			# only for development ... delete!
			# roi = (0,479,210,639)
			# im = raw[roi[0]:roi[1],roi[2]:roi[3]]
			im = raw
			################################################

			if ret:
				cv2.imshow('debug', im)
				cv2.waitKey(1)

			# Not enough old points, p0 ... find new ones
			if p0.shape[0] < 50:
				print('------- reset --------')
				p0 = self.featureDetection(old_im)  # old_im instead?
				if p0.shape[0] == 0:
					print('bad image')
					# continue
					return

			# p0 - old pts
			# p1 - new pts
			p0, p1 = self.featureTrack(im, old_im, p0)

			# not enough new points p1 ... bad image?
			if p1.shape[0] < 50:
				print('------- reset p1 --------')
				print('p1 size:', p1.shape)
				self.old_im = im
				self.p0 = p1
				# continue
				return

			# drawKeyPoints(im, p1)

			# since these are rectified images, fundatmental (F) = essential (E)
			# E, mask = cv2.findEssentialMat(p0,p1,focal,pp,cv2.FM_RANSAC)
			# retval, R, t, mask = cv2.recoverPose(E,p0,p1,R_f,t_f,focal,pp,mask)

			E, mask = cv2.findEssentialMat(p0, p1, focal, pp, cv2.FM_RANSAC, 0.999, 1.0)
			retval, R, t, mask = cv2.recoverPose(E, p0, p1, R, t, focal, pp, mask)
			# print retval,R

			# Now update the previous frame and previous points
			# self.old_im = im.copy()
			# p0 = p1.reshape(-1,1,2)
			# p0 = p1

			# print 'p0 size',p0.shape
			# print 'p1 size',p1.shape
			# print 't',t
			# dt = t - t_prev
			# scale = np.linalg.norm(dt)
			# print scale
			scale = 1.0

			R_f = R.dot(R_f)
			# t_f = t
			t_f = t_f + scale * R_f.dot(t)

			# t_prev = t
			# t_f = t_f/t_f[2]
			# dist += np.linalg.norm(t_f[:2])

			# num = np.array([t_f[0]/t_f[2],t_f[1]/t_f[2]])
			# num = t_f
			print('position:', t_f)
			# print 'distance:', dist
			# R_f = R*R_f
			# print 'R:',R_f,'t:',t_f
			# print t_f

			save_pts.append(t_f)

			# save all
			self.p0 = p1
			self.R_f = R_f
			self.t_f = t_f
			self.old_im = im.copy()
			self.t = t
			self.R = R

			# create message
			odom = Msg.Odom()
			odom['position']['position']['x'] = t_f[0]
			odom['position']['position']['y'] = t_f[1]
			odom['position']['position']['z'] = t_f[2]

			odom['position']['orientation']['x'] = 0.0  # FIXME: 20160529 do rotation to quaternion conversion
			odom['position']['orientation']['y'] = 0.0
			odom['position']['orientation']['z'] = 0.0
			odom['position']['orientation']['w'] = 1.0

			odom['velocity']['linear']['x'] = 0.0
			odom['velocity']['linear']['y'] = 0.0
			odom['velocity']['linear']['z'] = 0.0

			odom['velocity']['angular']['x'] = 0.0
			odom['velocity']['angular']['y'] = 0.0
			odom['velocity']['angular']['z'] = 0.0

			return odom

		except KeyboardInterrupt:
			print('captured interrupt')
			exit()
コード例 #39
0
def pose_estimation(source_rgbd_image, target_rgbd_image,
        pinhole_camera_intrinsic, debug_draw_correspondences):
    success = False
    trans = np.identity(4)

    # transform double array to unit8 array
    color_cv_s = np.uint8(np.asarray(source_rgbd_image.color)*255.0)
    color_cv_t = np.uint8(np.asarray(target_rgbd_image.color)*255.0)

    orb = cv2.ORB_create(scaleFactor=1.2, nlevels = 8, edgeThreshold = 31,
            firstLevel = 0, WTA_K = 2, scoreType=cv2.ORB_HARRIS_SCORE,
            nfeatures = 100, patchSize = 31) # to save time
    [kp_s, des_s] = orb.detectAndCompute(color_cv_s, None)
    [kp_t, des_t] = orb.detectAndCompute(color_cv_t, None)
    if len(kp_s) is 0 or len(kp_t) is 0:
        return success, trans

    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(des_s,des_t)

    pts_s = []
    pts_t = []
    for match in matches:
        pts_t.append(kp_t[match.trainIdx].pt)
        pts_s.append(kp_s[match.queryIdx].pt)
    pts_s = np.asarray(pts_s)
    pts_t = np.asarray(pts_t)
    # inlier points after initial BF matching
    if debug_draw_correspondences:
        draw_correspondences(np.asarray(source_rgbd_image.color),
                np.asarray(target_rgbd_image.color), pts_s, pts_t,
                np.ones(pts_s.shape[0]), "Initial BF matching")

    focal_input = (pinhole_camera_intrinsic.intrinsic_matrix[0,0] +
            pinhole_camera_intrinsic.intrinsic_matrix[1,1]) / 2.0
    pp_x = pinhole_camera_intrinsic.intrinsic_matrix[0,2]
    pp_y = pinhole_camera_intrinsic.intrinsic_matrix[1,2]

    # Essential matrix is made for masking inliers
    pts_s_int = np.int32(pts_s + 0.5)
    pts_t_int = np.int32(pts_t + 0.5)
    [E, mask] = cv2.findEssentialMat(pts_s_int, pts_t_int, focal=focal_input,
            pp=(pp_x, pp_y), method=cv2.RANSAC, prob=0.995, threshold=1.0)
    if mask is None:
        return success, trans

    # inlier points after 5pt algorithm
    if debug_draw_correspondences:
        draw_correspondences(np.asarray(source_rgbd_image.color),
                np.asarray(target_rgbd_image.color),
                pts_s, pts_t, mask, "5-pt RANSAC")

    # make 3D correspondences
    depth_s = np.asarray(source_rgbd_image.depth)
    depth_t = np.asarray(target_rgbd_image.depth)
    pts_xyz_s = np.zeros([3, pts_s.shape[0]])
    pts_xyz_t = np.zeros([3, pts_s.shape[0]])
    cnt = 0
    for i in range(pts_s.shape[0]):
        if mask[i]:
            xyz_s = get_xyz_from_pts(pts_s[i,:],
                    depth_s, pp_x, pp_y, focal_input)
            pts_xyz_s[:,cnt] = xyz_s
            xyz_t = get_xyz_from_pts(pts_t[i,:],
                    depth_t, pp_x, pp_y, focal_input)
            pts_xyz_t[:,cnt] = xyz_t
            cnt = cnt + 1
    pts_xyz_s = pts_xyz_s[:,:cnt]
    pts_xyz_t = pts_xyz_t[:,:cnt]

    success, trans, inlier_id_vec = estimate_3D_transform_RANSAC(
            pts_xyz_s, pts_xyz_t)

    if debug_draw_correspondences:
        pts_s_new = np.zeros(shape=(len(inlier_id_vec),2))
        pts_t_new = np.zeros(shape=(len(inlier_id_vec),2))
        mask = np.ones(len(inlier_id_vec))
        cnt = 0
        for i in inlier_id_vec:
            u_s,v_s = get_uv_from_xyz(pts_xyz_s[0,i], pts_xyz_s[1,i],
                    pts_xyz_s[2,i], pp_x, pp_y, focal_input)
            u_t,v_t = get_uv_from_xyz(pts_xyz_t[0,i], pts_xyz_t[1,i],
                    pts_xyz_t[2,i], pp_x, pp_y, focal_input)
            pts_s_new[cnt,:] = [u_s,v_s]
            pts_t_new[cnt,:] = [u_t,v_t]
            cnt = cnt + 1
        draw_correspondences(np.asarray(source_rgbd_image.color),
                np.asarray(target_rgbd_image.color),
                pts_s_new, pts_t_new, mask, "5-pt RANSAC + 3D Rigid RANSAC")
    return success, trans
コード例 #40
0
ファイル: Epipolar.py プロジェクト: jimchenhub/Smart-Car
    matches = bf.knnMatch(des1, des2, k=2)
    good = []
    pts1 = []
    pts2 = []
    # 得到匹配点列表
    for i,(m,n) in enumerate(matches):
        if m.distance < 0.8*n.distance:
            good.append(m)
            pts2.append(kp2[m.trainIdx].pt) #Dmatch.trainIdx 训练图像的特征描述的索引
            pts1.append(kp1[m.queryIdx].pt) #Dmatch.queryIdx 查询图像的特征描述的索引

    # 通过匹配点列表计算基础矩阵
    pts1 = np.int32(pts1)
    pts2 = np.int32(pts2)
    F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
    E, mask = cv2.findEssentialMat(pts1, pts2, cv2.FM_RANSAC)
    # 选择内点,0:外点,1:内点
    pts1 = pts1[mask.ravel()==1]
    pts2 = pts2[mask.ravel()==1]
    # 在右图中找到极线
    # pts2.reshape(-1,1,2):从一维点数组转化为1*N的矩阵
    lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2, F)
    # 从1*N线矩阵转化为1*N的线数组,线(a, b, c):ax + by + c=0
    lines1 = lines1.reshape(-1, 3)
    img5,img6 = drawlines(img1, img2, lines1, pts1, pts2)

    # 在左图中找到极线
    lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1, F)
    lines2 = lines2.reshape(-1, 3)
    img3,img4 = drawlines(img2, img1, lines2, pts2, pts1)
コード例 #41
0
gt_trajectory = Read_gt_trajectory(dataset_pose_path)
first_frame = cv2.imread(seq00_list[0], 0)
second_frame = cv2.imread(seq00_list[1], 0)
kp1, cur_R, cur_t = process_first_frames(first_frame, second_frame, k)
last_frame = second_frame

## main loop ##
for i in range(len(seq00_list)):
    ## read the new frame from the image paths list ##
    new_frame = cv2.imread(seq00_list[i], 0)
    ## track the feature movement from prev frame to current frame ##
    kp1, kp2 = featureTracking(last_frame, new_frame, kp1)
    ## find the rotation and translation matrix ##
    E, mask = cv2.findEssentialMat(kp2,
                                   kp1,
                                   k,
                                   method=cv2.RANSAC,
                                   prob=0.999,
                                   threshold=1.0)
    _, R, t, mask = cv2.recoverPose(E, kp2, kp1, k)
    ## find the change of the feature location ##
    change = np.mean(np.abs(kp2 - kp1))
    ## find the scale of the movemnt from the ground truth trajectory ##
    absolute_scale = getAbsoluteScale(gt_trajectory, i)
    if absolute_scale > 2:
        absolute_scale = 1
    ## check if the vehicle not moving by check the change value ##
    if change > 5:
        ## accumulate the translation and rotation to find the X, Y, Z locations ##
        cur_t = cur_t + absolute_scale * cur_R.dot(t)
        cur_R = R.dot(cur_R)
    ## if the number of detect features below threshold value recaulc the feature ##
コード例 #42
0
    matches.append(cv2.DMatch(i, i, 0, 0))

img3 = cv2.drawMatches(images[0].img, kps1, images[1].img, kps2, matches, images[1].img, flags=2)


plt.imshow(img3), plt.show()
plt.savefig("newimg.png")

new_pts1 = numpy.int32(points1)
new_pts2 = numpy.int32(points2)

focalLength = images[0].k[0][0]
img1 = images[0]
img2 = images[1]

E, mask = cv2.findEssentialMat(new_pts1, new_pts2, focal=focalLength)

print "E"
print E
#
points, r, t, newMask = cv2.recoverPose(E, new_pts1, new_pts2, mask=mask)
# print points
print "E-R"
print r
print "E-T"
print t

F1, mask = cv2.findFundamentalMat(new_pts1, new_pts2, method=cv2.FM_8POINT)
F2, mask = cv2.findFundamentalMat(new_pts1, new_pts2)
F = F2
print "F1"
コード例 #43
0
ファイル: mono.py プロジェクト: bobbyluig/Eclipse
    return kp1, kp2

fast = cv2.FastFeatureDetector_create(20)

lk_params = dict(winSize  = (21, 21),
                 maxLevel = 3,
                 criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))

img1 = cv2.imread('KITTI_VO/00/image_2/%06d.png' % 0)
img2 = cv2.imread('KITTI_VO/00/image_2/%06d.png' % 1)

img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

kp1 = fast.detect(img1)

kp1, kp2 = feature_tracking(img1, img2, kp1)

focal = 718.8560
pp = (607.1928, 185.2157)

_, E = cv2.findEssentialMat(kp2, kp1, focal=focal, pp=pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, R, t, mask = cv2.recoverPose(E, kp2, kp1, focal=focal, pp=pp)






コード例 #44
0
        for l in range(len(matches)):
            pts1.append(pts_1[0:2, np.int(matches[l].queryIdx)])
            pts2.append(pts_2[0:2, np.int(matches[l].trainIdx)])

        pts1 = np.array([pts1])
        pts2 = np.array([pts2])

        K1 = cal_db[img1_idx][0]
        K2 = cal_db[img2_idx][0]
        pts1 = cv.undistortPoints(pts1, K1, None)
        pts2 = cv.undistortPoints(pts2, K2, None)
        K = np.eye(3, 3)

        E, mask = cv.findEssentialMat(pts1,
                                      pts2,
                                      K,
                                      method=cv.FM_RANSAC,
                                      threshold=inlierThreshold)
        inliers, R, t, mask = cv.recoverPose(
            E, pts1, pts2, K, mask=mask
        )  #Getting the rotation matrix and translation vector from image pair

        print("Found %d good matches." % len(matches))

        GT_R1 = cal_db[img1_idx][1]
        GT_R2 = cal_db[img2_idx][1]
        gt_R = np.matmul(GT_R2,
                         np.transpose(GT_R1))  #ground truth rotation matrix

        GT_t1 = cal_db[img1_idx][2]
        GT_t2 = cal_db[img2_idx][2]
コード例 #45
0
def main():
    # sift = cv2.xfeatures2d.SIFT_create()
    BasePath = './stereo/centre/'
    K, LUT = getCameraMatrix('./model')
    images = []
    H1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
    H1_calc = H1
    P0 = H1[:3]
    cam_pos = np.array([0, 0, 0])
    cam_pos = np.reshape(cam_pos, (1, 3))
    test = os.listdir(BasePath)
    builtin = []
    linear = []
    for image in sorted(test):
        # print(image)
        images.append(image)

    print(len(images[:-2]))
    # cam_pos = np.zeros([1,2])
    # for file in range(len(images)-1):
    for img, _ in enumerate(images[:-2]):
        # print(img)
        img1 = cv2.imread("%s/%s" % (BasePath, images[img]), 0)
        img2 = cv2.imread("%s/%s" % (BasePath, images[img + 1]), 0)
        und1 = undistortImageToGray(img1, LUT)
        und2 = undistortImageToGray(img2, LUT)

        pts1, pts2 = features(und1, und2)
        # print(pts1.shape)
        if pts1.shape[0] < 5:
            continue

        F, _ = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
        E, _ = cv2.findEssentialMat(pts1,
                                    pts2,
                                    focal=K[0][0],
                                    pp=(K[0, 2], K[1, 2]),
                                    method=cv2.RANSAC,
                                    prob=0.999,
                                    threshold=0.5)

        _, R_new, C_new, _ = cv2.recoverPose(E,
                                             pts1,
                                             pts2,
                                             focal=K[0, 0],
                                             pp=(K[0, 2], K[1, 2]))

        if np.linalg.det(R_new) < 0:
            R_new = -R_new
            # C_new = -C_new
        # if np.linalg.det(R_calc)<0:
        #     R_calc = -R_calc
        H2 = np.hstack((R_new, np.matmul(-R_new, C_new)))
        H2 = np.vstack((H2, [0, 0, 0, 1]))

        H1 = np.matmul(H1, H2)

        xpt = H1[0, 3]
        zpt = H1[2, 3]
        # builtin.append((xpt,zpt))
        print(img)

        plt.plot(-xpt, zpt, '.g')
        # plt.plot(xpt_calc,-zpt_calc,'.r')

        plt.pause(0.01)
コード例 #46
0
	def matchingTests(): 
		im_1 = cv2.imread('c1.bmp')
		im_2 = cv2.imread('c2.bmp')

		#k = np.mat(([[ 683.39404297,    0.        ,  267.21336591], [   0.        ,  684.3449707 ,  218.56421036],  [   0.        ,    0.        ,    1.        ]]))

		k = clsReconstruction.loadData('k_cam_hp.dat')
		#resise, if it is necessary
		#im_1 = cv2.resize(im_1,None,fx=0.3, fy=0.3, interpolation = cv2.INTER_CUBIC)
		#im_2 = cv2.resize(im_2,None,fx=0.3, fy=0.3, interpolation = cv2.INTER_CUBIC)

		#convert to gray
		im_1 = cv2.cvtColor(im_1, cv2.COLOR_BGR2GRAY)
		im_2 = cv2.cvtColor(im_2, cv2.COLOR_BGR2GRAY)


		#proceed with sparce feature matching
		orb = cv2.ORB_create()
		
		kp_1, des_1 = orb.detectAndCompute(im_1,None)
		kp_2, des_2 = orb.detectAndCompute(im_2,None)

		bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

		matches = bf.match(des_1,des_2)
		
		matches = sorted(matches, key = lambda x:x.distance)

		draw_params = dict(matchColor = (20,20,20), singlePointColor = (200,200,200),
							matchesMask = None,
							flags = 0)

		
		im_3 = cv2.drawMatches(im_1,kp_1,im_2,kp_2,matches[0:20], None, **draw_params)
		
		
		#select points to evaluate the fundamental matrix
		pts1 = []
		pts2 = []
		idx =  matches[1:20]

		for i in idx:
			pts1.append(kp_1[i.queryIdx].pt)
			pts2.append(kp_2[i.trainIdx].pt)
	

		
		pts1 = np.array(pts1)
		pts2 = np.array(pts2)

		#creating homegeneous coordenate
		pones = np.ones((1,len(pts1))).T

		pth_1 = np.hstack((pts1,pones))
		pth_2 = np.hstack((pts2,pones))

		k = np.array(k)
		ki = np.linalg.inv(k)
		#normalized the points
		pthn_1 = []
		pthn_2 = []

		for i in range(0,len(pts1)):
			pthn_1.append((np.mat(ki) * np.mat(pth_1[i]).T).transpose())
			pthn_2.append((np.mat(ki) * np.mat(pth_2[i]).T).transpose())

		ptn1 = []
		ptn2 = []
		for i in range(0,len(pts1)):
			ptn1.append([pthn_1[i][0,0],pthn_1[i][0,1]])
			ptn2.append([pthn_2[i][0,0],pthn_2[i][0,1]])

		ptn1 = np.array(ptn1)
		ptn2 = np.array(ptn2)
		#evaluate the essential Matrix (using the original points, not the normilized ones)
		E, mask0 = cv2.findEssentialMat(pts1,pts2,k,cv2.FM_RANSAC)
		#evaluate the fundamental matrix (using the normilized points)
		F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC)	

		E = np.mat(E)
		F = np.mat(F)


		R, t, mask1 = cv2.recoverPose(E,ptn1,ptn2)
		
		#selecting only inlier points
		ptn1 = ptn1[mask.ravel() == 1]
		ptn2 = ptn2[mask.ravel() == 1]

		# Find epilines corresponding to points in right image (second image) and
		# drawing its lines on left image
		lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
		lines1 = lines1.reshape(-1,3)
		img5,img6 = clsReconstruction.drawlines(im_1,im_2,lines1,pts1,pts2)
		# Find epilines corresponding to points in left image (first image) and
		# drawing its lines on right image
		lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
		lines2 = lines2.reshape(-1,3)
		img3,img4 = clsReconstruction.drawlines(im_2,im_1,lines2,pts2,pts1)
		plt.subplot(131),plt.imshow(img5)
		plt.subplot(132),plt.imshow(img3)
		plt.subplot(133),plt.imshow(im_3)

		plt.show()