Example #1
0
def computeRelativeOrientation(tie_pts, cam_m, distor):
    #  computes relative orientation  using OpenCV 5 point algorithms

    tp1, tp2, tp1_u, tp2_u  = UndistorTiePoints(tie_pts, cam_m, distor)

    # eight point algorithm 
    #F, mask = cv2.findFundamentalMat(tp1[0, :], tp2[0, :], param1=0.1, param2=0.95, method = cv2.FM_RANSAC)
    #em = cam_m.T.dot(F).dot(cam_m)
    
    em, mask = cv2.findEssentialMat(tp1[0, :], tp2[0, :], 
                                    threshold=0.05, 
                                    prob=0.95, 
                                    focal=cam_m[0, 0], 
                                    pp=(cam_m[0, 2], cam_m[1, 2]))


    F = np.linalg.inv(cam_m.T).dot(em).dot(np.linalg.inv(cam_m))

    # optimal solution for triangulation of  object points
    p1, p2 = cv2.correctMatches(em, tp1_u, tp2_u)

    pts, R, t, mask = cv2.recoverPose(em, p1, p2)
    P = np.hstack((R, t))
    pm1 = np.eye(3, 4)

    pts_sps = compute3Dpoints(pm1, P, p1, p2)

    return pts_sps, P, p1, p2
def find_transform(K, p1, p2):
    # 根据匹配点求本征矩阵,使用RANSAC进一步消除失配点
    # 基于五点算法 mask用来标记RANSAC计算得来的内点 只有内点才被用于后续步骤
    E, mask = cv2.findEssentialMat(p1, p2, K, cv2.RANSAC, 0.999, 1.0)

    # 分解本征矩阵E, 得到位姿R, T
    pass_count, R, T, mask = cv2.recoverPose(E, p1, p2, K, mask)

    return R, T, mask
Example #3
0
 def Rt(self):
     '''This function finds the rotation matrix R and the translation matrix t using corresponding points and a given camera matrix.'''
     pts_1 = np.array(self.ptsL, dtype=np.float)  #Convert points to float
     pts_2 = np.array(self.ptsR, dtype=np.float)
     if self.essential_matrix != []:
         poseval, self.R, self.t, mask = cv.recoverPose(
             self.essential_matrix, pts_1, pts_2, self.camera_matrix, 0.4
         )  #Find image's pose using corresponding points, the essential matrix, the camera matrix and a ratio.
     else:
         poseval, self.R, self.t, mask = cv.recoverPose(
             self.fundamental_matrix, pts_1, pts_2, self.camera_matrix, 0.4)
     self.ptsL = self.ptsL[
         mask.ravel() ==
         255]  #Keep only the points which are used for the pose recover procedure.
     self.ptsR = self.ptsR[mask.ravel() == 255]
     self.colours = self.colours[
         mask.ravel() ==
         255]  #Keep only the colours of points which are used for the pose recover calculation.
Example #4
0
 def get_pose(self, image1, idx1, image2, idx2, K):
     Rt = np.eye(4)
     E, mask = cv2.findEssentialMat(image1.kps[idx1], image2.kps[idx2], K,
                                    cv2.RANSAC)
     _, rot, trans, mask = cv2.recoverPose(E, image1.kps[idx1],
                                           image2.kps[idx2], K)
     Rt[:3, :3] = rot
     Rt[:3, 3] = trans.squeeze()
     return Rt
def recoverPose(E, points1, points2, K=KMatrix()):
    '''
    takes E, points1, points2, K, returns r, t, newMask 
    r, t transform from camera 2 coordinates to camera 1 coordinates
    '''
    points, r, t, newMask = cv2.recoverPose(E, np.array(points1), np.array(points2), pp=K.principalPoint)
    r = np.linalg.inv(r)
    t = t * -1
    return points, r, t, newMask
Example #6
0
def find_transform(K, p1, p2):
    focal_length = 0.5 * (K[0,0] + K[1, 1])
    principle_point = (K[0, 2], K[1, 2])

    E,mask = cv2.findEssentialMat(p1, p2, focal_length, principle_point, cv2.RANSAC, 0.999, 1.0)
    cameraMatrix = np.array([[focal_length, 0, principle_point[0]], [0, focal_length, principle_point[1]], [0, 0, 1]])
    pass_count, R, T, mask = cv2.recoverPose(E, p1, p2, cameraMatrix, mask)

    return R, T, mask
 def caluclate_transformation(self):
     E, mask_e = cv2.findEssentialMat(self.kp1, self.kp2, focal=1.0, pp=(0., 0.), 
                                    method=cv2.RANSAC, prob=0.999, threshold=0.001)
     
     print ("Essential matrix: used ",np.sum(mask_e) ," of total ",len(good),"matches")
     
     points, R, t, mask_RP = cv2.recoverPose(E, kp1_match_ud, kp2_match_ud, mask=mask_e)
     print("points:",points,"\trecover pose mask:",np.sum(mask_RP!=0))
     print("R:",R,"t:",t.T)
Example #8
0
def recover_pose(config, kp1, desc1, kp2, desc2):
    flann_params = dict(
        algorithm=FLANN_INDEX_LSH,
        table_number=6,  # 12
        key_size=12,  # 20
        multi_probe_level=1)  #2
    matcher = cv2.FlannBasedMatcher(
        flann_params, {})  # bug : need to pass empty dict (#1329)

    raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)  #2
    logging.info('原始匹配数目: %s', len(raw_matches))

    mi, p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
    # p1 = cv2.KeyPoint_convert(kp_pairs[0])
    # p1 = cv2.KeyPoint_convert(kp_pairs[1])

    if len(p1) < 4:
        logging.info('过滤之后匹配数目( %s )小于4个,无法定位', len(p1))
        return

    logging.info('过滤之后匹配数目: %s', len(kp_pairs))

    # H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
    # F, status = cv2.findFundamentalMat(p1, p2)
    # H, status = cv2.findHomography(p1, p2, 0)
    H, status = None, None
    if config.homography:
        logging.info("使用 finHomography 过滤匹配结果")
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 3.0)
    elif config.fundamental:
        logging.info("使用 findFundamentalMat 过滤匹配结果")
        H, status = cv2.findFundamentalMat(p1, p2)

    if config.show:
        explore_match(config, kp_pairs, status, H)

    if status is None:
        status = np.ones(len(kp_pairs), np.bool_)

    pts1 = np.float64(
        [kpp[0].pt for kpp, flag in zip(kp_pairs, status) if flag])
    pts2 = np.float64(
        [kpp[1].pt for kpp, flag in zip(kp_pairs, status) if flag])

    n = len(np.unique(pts2, axis=0))
    if n < 9:
        logging.info('最终匹配数目( %s )小于9个,无法定位', n)
        return

    K = camera_matrix(config)
    E, mask = cv2.findEssentialMat(pts1, pts2, K)
    retval, R, t, mask = cv2.recoverPose(E, pts1, pts2, K)

    nv = np.matrix(R) * np.float64([0, 0, 1]).reshape(3, 1)
    yaw = np.arctan2(nv[0], nv[2])
    return yaw, t
Example #9
0
    def test_triangulation(self):
        np.set_printoptions(suppress=True,
                            formatter={'float_kind': '{:0.2f}'.format})
        src, dst, expected_mask = create_point_pair(3)
        src0 = cv2.undistortPoints(np.expand_dims(src, axis=1),
                                   cameraMatrix=self.calibration,
                                   distCoeffs=None)
        dst0 = cv2.undistortPoints(np.expand_dims(dst, axis=1),
                                   cameraMatrix=self.calibration,
                                   distCoeffs=None)
        src = np.expand_dims(src, axis=1)
        dst = np.expand_dims(dst, axis=1)
        # F_, mask= cv2.findFundamentalMat(src,dst,cv2.FM_RANSAC,0.004,0.99)

        E, mask = cv2.findEssentialMat(dst,
                                       src,
                                       cameraMatrix=self.calibration,
                                       method=cv2.RANSAC,
                                       prob=0.999,
                                       threshold=0.004)
        actual_E_norm = E / np.linalg.norm(E)
        print('Actual E:', actual_E_norm)
        print(mask)
        _, local_R, local_t, mask = cv2.recoverPose(
            E, dst, src, cameraMatrix=self.calibration)
        print("R:", local_R)
        print("t:", local_t)
        s = 1.38 / local_t[0]
        local_t[0] *= s
        local_t[2] *= s
        print("t:", local_t)
        # self.P_0 = np.dot(self.calibration,np.hstack((np.identity(3),np.zeros((3,1)))))
        # P = np.dot(self.calibration,np.hstack((local_R.T,-np.dot(local_R.T,local_t))))
        P = np.hstack((local_R.T, -np.dot(local_R.T, local_t)))
        # points4D = cv2.triangulatePoints(self.P_0,P,src, dst)
        points4D = cv2.triangulatePoints(self.P_0, P, src0, dst0)
        points4D = points4D / np.tile(points4D[-1, :], (4, 1))
        pt3d = points4D

        # points4D = points4D / np.tile(points4D[-1, :], (4, 1))
        # pt3d = points4D[:3, :].T
        # for i,good in enumerate(mask):
        #     if good ==255:
        #         print("Recover Points:\n",pt3d[:3].T[i])

        s = np.dot(self.P_0, points4D)
        s /= s[2]
        d = np.dot(P, points4D)
        d /= d[2]
        for i, good in enumerate(mask):
            if good == 255:
                print("pt3d", pt3d.T[i])
                print("src", src0[i, 0, :])
                print("src", s.T[i])
                print("dst", dst0[i, 0, :])
                print("dst", d.T[i])
Example #10
0
    def mainLoop(self, _event):
        if self.ok == False:
            return
        else:
            if self.status == "init":
                self.map.pushFrame(self.cur_frame)
                self.status = "default"
                return
            else:
                print len(self.cur_frame.keypoints)
                ref_frame = self.map.popFrame()
                try:
                    cur_frame = self.cur_frame
                    #ref_frame = self.map.popFrame()
                    matches = matchBF(cur_frame.descriptors,
                                      ref_frame.descriptors)
                    cur_kp, ref_kp = convertKeypoint(cur_frame.keypoints,
                                                     ref_frame.keypoints,
                                                     matches)
                    K = np.array(cur_frame.getK()).reshape([3, 3])
                    print len(cur_kp), len(ref_kp)
                    focal, pp = cur_frame.getFocalPp()
                    #print focal, pp
                    #E, _ = cv2.findEssentialMat(cur_kp, ref_kp, K, cv2.RANSAC, threshould=1.0)
                    E, _ = cv2.findEssentialMat(cur_kp,
                                                ref_kp,
                                                focal[0],
                                                pp,
                                                cv2.RANSAC,
                                                threshold=1.0)
                    _, R, t, mask = cv2.recoverPose(E, cur_kp, ref_kp, K, 1.0)

                    dR = R - np.eye(3, dtype=float)
                    d = np.sum(dR * dR)
                    print d
                    if d > 1:
                        #self.map.pushFrame(cur_frame)
                        raise Exception()
                    dt = t - np.array([0, 0, 0])
                    d = np.sum(dt * dt)
                    print d
                    if d > 5:
                        raise Exception()

                    pre_R, pre_t = ref_frame.getRt()
                    cur_R = np.dot(pre_R, R)
                    cur_t = pre_t + t
                    print cur_R
                    print cur_t
                    cur_frame.setRt(cur_R, cur_t)
                    self.lossframe = 0
                    self.map.pushFrame(cur_frame)
                except:
                    self.map.pushFrame(ref_frame)
                    self.lossframe = self.lossframe + 1
                    print "loss frame ", self.lossframe
Example #11
0
File: vo.py Project: scomup/vo
    def run(self):
        self.tmap.start()
        time.sleep(3)
        # Load first image
        str_ptr = self.dataSets_PATH + 'I1_%06d.png' % (0)
        img_ptr = cv2.imread(str_ptr, 0)
        kp = self.fast.detect(img_ptr, None)
        kp_ptr = np.array([kp[idx].pt for idx in range(len(kp))], np.float32)
        # main loop
        for frame_id in range(1, 371):
            str_cur = self.dataSets_PATH + 'I1_%06d.png' % frame_id
            img_cur = cv2.imread(str_cur, 0)
            img_with_kp = cv2.drawKeypoints(
                img_ptr, kp, None, None,
                cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            cv2.imshow('image', img_with_kp)
            kp_cur, st, err = cv2.calcOpticalFlowPyrLK(img_ptr, img_cur,
                                                       kp_ptr, None,
                                                       **self.lk_params)

            kp_ptr = np.delete(
                kp_ptr,
                np.where((st == 0)
                         | ((kp_cur[:, [1]] < 0) | (kp_cur[:, [0]] < 0)))[0],
                0)
            kp_cur = np.delete(
                kp_cur,
                np.where((st == 0)
                         | ((kp_cur[:, [1]] < 0) | (kp_cur[:, [0]] < 0)))[0],
                0)
            E, mask = cv2.findEssentialMat(kp_cur, kp_ptr, self.focal,
                                           (self.offsetx, self.offsety),
                                           cv2.RANSAC, 0.999, 1.0)
            res = cv2.recoverPose(E, kp_cur, kp_ptr, self.focal,
                                  (self.offsetx, self.offsety))
            dR = np.matrix(res[1])
            dt = np.matrix(res[2])
            si = dt.item(0) + dt.item(1) + dt.item(2)
            if (si < 0):
                dt = -dt
            self.t = self.t + self.R * dt
            self.R = dR * self.R
            if (kp_ptr.shape[0] < self.minFeatureNum) & (frame_id != 1):
                kp = self.fast.detect(img_ptr, None)
                kp_ptr = np.array([kp[idx].pt for idx in range(len(kp))],
                                  np.float32)
                kp_cur, st, err = cv2.calcOpticalFlowPyrLK(
                    img_ptr, img_cur, kp_ptr, None, **self.lk_params)
                kp_cur = np.delete(
                    kp_cur,
                    np.where((st == 0) | (
                        (kp_cur[:, [1]] < 0) | (kp_cur[:, [0]] < 0)))[0], 0)
            img_ptr = img_cur
            kp_ptr = kp_cur
            self.tmap.setPoint(self.t[0], self.t[2])
            cv2.waitKey(30)
Example #12
0
def handle_stereo_images(config):
    focal = [float(s) for s in config.focal.split(',')]
    rotate = config.yaw * math.pi / 180
    offset = [float(s) for s in config.offset.split(',')]
    filename1, filename2 = config.images
    feature, count = config.feature, config.nFeatures
    asift = config.asift
    homography = config.homography
    imgLeft, imgRight = cv2.imread(filename1, 0), cv2.imread(filename2, 0)
    h, w = imgLeft.shape[:2]
    # R, _jacob = cv2.Rodrigues(rotation_matrix(rotate, axis='y').A)
    R = rotation_matrix(rotate, axis='y').A
    T = np.float64(offset)
    distCoeffs = np.zeros(4)
    fx, fy = focal
    cx, cy = (w - 1) / 2, (h - 1) / 2
    K = np.float64([[w * fx, 0, cx], [0, h * fy, cy], [0, 0, 1]])

    pts1, pts2, keypoints, descriptors = match_images(imgLeft,
                                                      imgRight,
                                                      feature + '-flann',
                                                      asift=asift,
                                                      n=count,
                                                      homography=homography)
    # if len(keypoints) < kp_stereo_match_threshold:
    #     raise RuntimeError('双目照片匹配的关键点个数 %d 少于 %d' % (len(keypoints), kp_stereo_match_threshold))

    if config.myself:
        points3d = calculate_points3d(K, R, T, pts1, pts2)
    else:
        if config.correct:
            E, mask = cv2.findEssentialMat(pts1, pts2, K)
            retval, R, t, mask = cv2.recoverPose(E, pts1, pts2, K)

            nv = np.matrix(R) * np.float64([0, 0, 1]).reshape(3, 1)
            yaw = np.arctan2(nv[0], nv[2])
            print("自动校正得到的相机水平偏转角度为 %8.2f" % (yaw[0] / math.pi * 180))

            nv = np.matrix(R) * np.float64([0, 1, 0]).reshape(3, 1)
            pitch = np.arctan2(nv[2], nv[1])
            print("自动校正得到的相机仰角为 %8.2f" % (pitch[0] / math.pi * 180))

            nv = np.matrix(R) * np.float64([1, 0, 0]).reshape(3, 1)
            roll = np.arctan2(nv[1], nv[0])
            print("自动校正得到的相机旋转角度为 %8.2f" % (roll[0] / math.pi * 180))

        distCoeffs = np.zeros(4)
        P1, P2, Q = stereo_rectify(K, distCoeffs, K, distCoeffs, (h, w), R, T)
        # print (h, w)
        # print ('R:', R)
        # print ('T:', T)
        # print ('K:', K)
        # print ('P1:', P1)
        # print ('P2:', P2)
        points3d = triangulate_points(P1, P2, pts1.T, pts2.T)
    return keypoints, points3d
Example #13
0
def Motion_detect(Image1, Image2, Height, K):
    '''
    Image1: image 1 
    Image2: image 2
    Height: Height of ego car in metres
    K: 3x3 camera intrinsic matrix

    '''

    orb = cv2.ORB_create()
    Keypoints1, Descriptors1 = orb.detectAndCompute(Image1, None)
    Keypoints2, Descriptors2 = orb.detectAndCompute(Image2, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    
    # Match descriptors.
    matches = bf.match(Descriptors1,Descriptors2)
    
    # Sort them in the order of their distance.
    # matches = sorted(matches, key = lambda x:x.distance)
    kp1_list = np.mat([])
    kp2_list = np.mat([])
    k = 0

    number_of_matches = 3000

    for m in matches:
        img1Idx = m.queryIdx
        img2Idx = m.trainIdx

        (img1x, img1y) = Keypoints1[img1Idx].pt
        (img2x, img2y) = Keypoints2[img2Idx].pt

        if k == 0:
            kp1_list = [[img1x,img1y,1]]
            kp2_list = [[img2x,img2y,1]]
            k = 1
        else:
            kp1_list = np.append(kp1_list,[[img1x,img1y,1]],axis = 0)
            kp2_list = np.append(kp2_list,[[img2x,img2y,1]],axis = 0)
            k+=1
        if k == number_of_matches:
            break

    F = cv2.findFundamentalMat(kp1_list[:,0:2], kp2_list[:,0:2], method = cv2.FM_RANSAC)
    E = K.T @ F[0] @ K 
    
    Image3 = cv2.drawMatches(Image1,kp1_list,Image2,kp2_list,matches[:10]ls
    )
    plt.imshow(Image3)
    plt.show()
    Transformation_info = cv2.recoverPose(E, (kp1_list[:,0:2]), (kp2_list[:,0:2]), K)
    Rotation = Transformation_info[1]
    Translation = Transformation_info[2]
    Translation = Translation# / Translation[2]

    return Rotation, Translation
Example #14
0
    def getRT(self, pts1, pts2):

        E = self.K.T @ self.F @ self.K
        _, R, T, mask = cv2.recoverPose(E, pts1, pts2, self.K)

        ### TODO : test

        pts1 = pts1[mask[:, 0] == 1]
        pts2 = pts2[mask[:, 0] == 1]
        return R, T
Example #15
0
    def findDecomposedEssentialMatrix(self, p1, p2):
        # fundamental matrix and inliers
        # F, mask = cv.findFundamentalMat(p1, p2, cv.FM_LMEDS, 1, 0.999)
        F, mask = cv.findFundamentalMat(p1, p2, cv.FM_RANSAC, REPROJ_ERROR, RANSAC_CONFIDENCE, RANSAC_MAXITER)
        mask = mask.astype(bool).flatten()
        E = np.dot(self.K.T, np.dot(F, self.K))

        _, R, t, _ = cv.recoverPose(E, p1[mask], p2[mask], self.K)

        return R, t, p1[mask], p2[mask], mask
Example #16
0
	def processFrame(self, frame_id):
		self.px_ref, self.px_cur = featureTracking_(self.last_frame, self.new_frame)
		E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
		_, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
		self.cur_t = self.cur_t + self.cur_R.dot(t) 
		self.cur_R = R.dot(self.cur_R)
		# self.cur_t =t
		# self.cur_R = R

		self.px_ref = self.px_cur
Example #17
0
def FindRTOpt(e_mat, last_frame, cur_frame, camera):
    last_feature = last_frame.feature_2d
    cur_feature = cur_frame.feature_2d

    _, R, t, mask = cv2.recoverPose(e_mat,
                                    cur_feature,
                                    last_feature,
                                    focal=camera.fx,
                                    pp=(camera.u, camera.v))
    return R, t
Example #18
0
def update_motion(points1, points2, Rp, Tp, scale=1.0):
    p1 = np.reshape(np.array(points1, dtype=np.float32), (-1, 1, 2))
    p2 = np.reshape(np.array(points2, dtype=np.float32), (-1, 1, 2))
    E, mask = cv2.findEssentialMat(p1, p2,
                                   cameraMatrix=cam_mat, method=cv2.RANSAC, prob=0.999, threshold=1.0, mask=None)
    points, R, T, newmask = cv2.recoverPose(E, p1, p2, cameraMatrix=cam_mat, mask=mask)

    Tp = Tp + np.dot(R, T)*scale
    Rp = np.dot(R, Rp)
    return Rp, Tp
Example #19
0
def by_ransac(p0, p1, K):
    focal = K[0, 0]
    pp = K[0, 2], K[1, 2]
    E, mask = cv2.findEssentialMat(p0, p1, focal, pp, cv2.FM_RANSAC, 0.999,
                                   1.0)
    retval, R, t, mask = cv2.recoverPose(E, p0, p1)
    Rt = np.eye(4)
    Rt[:3, :3] = R
    Rt[:3, 3] = t.T
    return Rt
Example #20
0
    def process_frame(self, img, img_index):
        # keypoints extraction and descriptors computation
        # key_points, descriptors = self.kpe.extract(img)
        key_points = self.kpe.extract_fast(img)
        print("keypoints: {}".format(len(key_points)))

        good_matches = None
        if self.prev_frame_extracts is not None and self.prev_frame is not None:
            # matching
            # good_matches = self.match(key_points, self.prev_frame_extracts['key_points'],
            #            descriptors, self.prev_frame_extracts['descriptors'])
            good_matches = self.track(self.prev_frame, img,
                                      self.prev_frame_extracts['key_points'])

            # filtering
            good_matches = np.array(good_matches)
            model, inliers = ransac((good_matches[:, 0], good_matches[:, 1]),
                                    FundamentalMatrixTransform,
                                    min_samples=8,
                                    residual_threshold=1,
                                    max_trials=100)
            good_matches = good_matches[inliers]
            print("Number of keypoints after filtering: {}".format(
                len(good_matches)))

            self.F = model.params
            self.E = self._E_from_F(self.F)
            # self.E, mask = cv2.findEssentialMat(good_matches[:, :, 0], good_matches[:, :, 1],
            #                                     focal=self.fx, pp=(self.cx, self.cy), method=cv2.RANSAC,
            #                                     prob=0.999, threshold=1.0)

            # pose estimation
            # TODO wtf is going on if write [:, :, 0] instead of [:, 0]??????
            _, R, t, mask = cv2.recoverPose(self.E,
                                            good_matches[:, 0],
                                            good_matches[:, 1],
                                            focal=self.fx,
                                            pp=(self.cx, self.cy))

            absolute_scale = self.getAbsoluteScale(img_index)
            if absolute_scale > 0.1:
                self.t = self.t + absolute_scale * self.R.dot(t.reshape(3))
                self.R = R.dot(self.R)
                # self.t = self.t + self.R.dot(t.reshape(3))
                # self.R = R.dot(self.R)

        # self.prev_frame_extracts = {'key_points': key_points, 'descriptors': descriptors}
        self.prev_frame_extracts = {'key_points': key_points}
        self.prev_frame = img

        if img_index == 0:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        if good_matches is not None:
            img = self.draw_matches(img, good_matches)
        return img, self.t
Example #21
0
    def solveR(self):
        # Read current frame
        cur_frame = self.cur_frame
        ref_frame = self.ref_zero
        #ref_frame = self.map.popFrame()

        # Calculate matched frame
        matches = matchBF(cur_frame.descriptors, ref_frame.descriptors)
        cur_kp, ref_kp = convertKeypoint(cur_frame.keypoints,
                                         ref_frame.keypoints, matches)
        K = np.array(cur_frame.getK()).reshape([3, 3])
        #print len(cur_kp), len(ref_kp)
        rospy.loginfo("[%s]: current matched number: [%s]" %
                      (self.node_name, len(ref_kp)))
        focal, pp = cur_frame.getFocalPp()
        #print focal, pp

        # Find Essential Matrix
        #E, _ = cv2.findEssentialMat(cur_kp, ref_kp, K, cv2.RANSAC, threshould=1.0)
        #E, _ = cv2.findEssentialMat(cur_kp, ref_kp, focal[0], pp, cv2.RANSAC, threshold=2.0)
        E, _ = cv2.findEssentialMat(ref_kp,
                                    cur_kp,
                                    focal[0],
                                    pp,
                                    cv2.RANSAC,
                                    threshold=0.4,
                                    prob=0.999)

        # Recover [R, t] from Essential matrix
        #_, R, t, mask = cv2.recoverPose(E, cur_kp, ref_kp, K, 2.0)
        _, R, t, mask = cv2.recoverPose(E, ref_kp, cur_kp, K, 0.4)

        dR = R - np.eye(3, dtype=float)
        d = np.sum(dR * dR)
        #print "dR: ", d
        if d > 5:
            #self.map.pushFrame(cur_frame)
            #raise Exception()
            return
        dt = t - np.array([0, 0, 0])
        d = np.sum(dt * dt)
        #print "dt: ", d
        if d > 5:
            #raise Exception()
            return

        pre_R, pre_t = ref_frame.getRt()
        cur_R = np.dot(pre_R, R)
        cur_t = pre_t + t
        rospy.loginfo("current rotation [%s]" % (cur_R))
        x = cur_R[0, 0]
        y = cur_R[0, 1]
        z = complex(x, y)
        angle = np.angle(z, deg=True)
        rospy.loginfo("current angle [%s]" % (angle))
Example #22
0
def process_img_pair(img1, kp1, des1, img2, kp2, des2, f):
    h, w, _ = img1.shape
    good = compute_matches(des1, des2)
    u1 = []
    u2 = []
    matched_kps = []
    for m in good:
        k1 = kp1[m.queryIdx]
        d1 = des1[m.queryIdx]
        k2 = kp2[m.trainIdx]
        d2 = des2[m.trainIdx]
        matched_kps.append((KeyPoint(k1, d1), KeyPoint(k2, d2)))
        u1.append(k1.pt)
        u2.append(k2.pt)
    # u,v coords of keypoints in images
    u1 = np.array(u1)
    u2 = np.array(u2)
    # Make homogeneous
    u1 = np.c_[u1, np.ones(u1.shape[0])]
    u2 = np.c_[u2, np.ones(u2.shape[0])]

    cu = w // 2
    cv = h // 2
    # Camera matrix
    K_cam = np.array([[f, 0, cu], [0, f, cv], [0, 0, 1]])
    K_inv = np.linalg.inv(K_cam)
    # Generalized image coords
    x1 = u1 @ K_inv.T
    x2 = u2 @ K_inv.T
    # Compute essential matrix with RANSAC
    E, inliers = cv2.findEssentialMat(x1[:, :2],
                                      x2[:, :2],
                                      np.eye(3),
                                      method=cv2.RANSAC,
                                      threshold=1e-3)
    inliers = inliers.ravel().astype(bool)

    n_in, R, t, _ = cv2.recoverPose(E, x1[inliers, :2], x2[inliers, :2])
    # P_i = [R|t] with first image considered canonical
    P_1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
    P_2 = np.hstack((R, t))
    matched_kps = [k for i, k in enumerate(matched_kps) if inliers[i]]
    return ImgPair(
        img1,
        img2,
        matched_kps,
        u1[inliers],
        u2[inliers],
        x1[inliers],
        x2[inliers],
        E,
        K_cam,
        P_1,
        P_2,
    )
Example #23
0
    def estimate_relative_pose_from_correspondence(pts1, pts2, K1, K2):
        f_avg = (K1[0, 0] + K2[0, 0]) / 2
        pts1, pts2 = np.ascontiguousarray(pts1, np.float32), np.ascontiguousarray(pts2, np.float32)

        pts_l_norm = cv2.undistortPoints(np.expand_dims(pts1, axis=1), cameraMatrix=K1, distCoeffs=None)
        pts_r_norm = cv2.undistortPoints(np.expand_dims(pts2, axis=1), cameraMatrix=K2, distCoeffs=None)

        E, mask = cv2.findEssentialMat(pts_l_norm, pts_r_norm, focal=1.0, pp=(0., 0.),
                                       method=cv2.RANSAC, prob=0.999, threshold=3.0 / f_avg)
        points, R_est, t_est, mask_pose = cv2.recoverPose(E, pts_l_norm, pts_r_norm)
        return mask[:,0].astype(np.bool), R_est, t_est
Example #24
0
def decomposeEssentialMat(Essential_matrix):
	"""
	:param Essential_matrix: Essential_matrix
	:return:
	"""
	U, Sigma, Vt = linalg.svd(K)
	OutputArray_R1 = U * Sigma * Vt
	OutputArray_R2 = U * np.transpose(Sigma) * Vt
	t = U[:, 2] * 1.0
	Rotation = cv2.recoverPose(Essential_matrix)
	return OutputArray_R1, OutputArray_R2, t
 def calc_essential_matrix(self, save=False):
     """
     Calculate Essential matrix between first two views.
     :param save: Save essential matrix as binary file using Numpy.
     """
     self.essential_mat, _ = cv.findEssentialMat(self.X1, self.X2, self.K,
                                                 cv.RANSAC, 0.999, 1.0)
     _, self.view2.rotation, self.view2.translation, _ = cv.recoverPose(
         self.essential_mat, self.X1, self.X2, self.K)
     if save:
         np.savez('essential_matrix', E=self.essential_mat)
Example #26
0
def get_camera_poses(E, p1F, p2F, K):
    # calculate rotation and translation from one pose to the other
    point, R, t, mask = cv2.recoverPose(E, p1F, p2F, K)
    # Create Null mat for camera pose used as starting point
    mat1 = np.hstack((np.eye(3, 3), np.zeros((3, 1))))
    # Create mat with rotation and translation from starting point (Null mat)
    mat2 = np.hstack((R, t))
    # Create the poses by multiplying with camera matrix, ie. the intrinsic parameters for camera
    pose1 = K @ mat1
    pose2 = K @ mat2
    return (pose1, pose2)
Example #27
0
	def processFrame(self, frame_id):
		self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
		E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
		_, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
		absolute_scale = self.getAbsoluteScale(frame_id)
		if(absolute_scale > 0.1):
			self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t) 
			self.cur_R = R.dot(self.cur_R)
		if(self.px_ref.shape[0] < kMinNumFeature):
			self.px_cur = self.detector.detect(self.new_frame)
			self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
		self.px_ref = self.px_cur
    def findDecomposedEssentialMatrix(self, p1, p2):
        # fundamental matrix and inliers
        F, mask = cv2.findFundamentalMat(p1, p2, cv2.FM_LMEDS, 1, 0.999)
        #F, mask = cv.findFundamentalMat(p1, p2, cv.FM_RANSAC, 1.0, 0.999)
        mask = mask.astype(bool).flatten()
        E = np.dot(self.camera_matrix_left.T,
                   np.dot(F, self.camera_matrix_right))

        _, R, t, _ = cv2.recoverPose(E, p1[mask], p2[mask],
                                     self.camera_matrix_right)

        return R, t
Example #29
0
def image_process(H_init, p_0, all_images):
    data_points = []

    for index in range(20, len(all_images) - 1):
        print("image_number = ", index)

        img1 = cv2.imread("stereo/centre/" + str(all_images[index]),
                          0)  #loading first image in greyscale
        img2 = cv2.imread("stereo/centre/" + str(all_images[index + 1]),
                          0)  #loading second image in greyscale

        colorimage1 = cv2.cvtColor(
            img1,
            cv2.COLOR_BayerGR2BGR)  #converting first image from BayerGR to BGR
        colorimage2 = cv2.cvtColor(
            img2, cv2.COLOR_BayerGR2BGR
        )  #converting second image from BayerGR to BGR

        undistortedimage1 = UndistortImage(
            colorimage1, LUT)  #undistorting first image using LUT
        undistortedimage2 = UndistortImage(
            colorimage2, LUT)  #undistorting second image using LUT

        gray1 = cv2.cvtColor(undistortedimage1,
                             cv2.COLOR_BGR2GRAY)  #converting back to grayscale
        gray2 = cv2.cvtColor(undistortedimage2,
                             cv2.COLOR_BGR2GRAY)  #converting back to grayscale

        #  Finding proper matches between two frames
        features1, features2 = SIFT_matches(gray1, gray2)

        #using inbuilt function to directly compute F_matrix after RANSAC
        FinalFundamentalMatrix, m = cv2.findFundamentalMat(
            features1, features2, cv2.FM_RANSAC)
        features1 = features1[m.ravel() == 1]
        features2 = features2[m.ravel() == 1]

        # Calculating the Essential matrix
        E_matrix = K.T @ FinalFundamentalMatrix @ K

        # Obtaining final Rotational and Translation pose for cameras
        retval, rot_mat, T, mask = cv2.recoverPose(E_matrix, features1,
                                                   features2, K)

        H_init = H_init @ H_matrix(
            rot_mat, T
        )  #matmul between H_init(Ident 4x4) and H_matrix that was calculated using new Rot and Trans matrix
        p_projection = H_init @ p_0  #new projection point calculated

        data_points.append([p_projection[0][0], -p_projection[2][0]])

        plt.scatter(p_projection[0][0], -p_projection[2][0], color='g')
    return data_points
Example #30
0
	def processFrame(self, frame_id):
		self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
		E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
		_, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
		absolute_scale = self.getAbsoluteScale(frame_id)
		if(absolute_scale > 0.1):
			self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t) 
			self.cur_R = R.dot(self.cur_R)
		if(self.px_ref.shape[0] < kMinNumFeature):
			self.px_cur = self.detector.detect(self.new_frame)
			self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
		self.px_ref = self.px_cur
Example #31
0
    def compute_pose(self, p1, p2, E):
        """
        p1, p2: only the 2d points in the respective images
        pass ratio test. These points should correspond to each other.
        E: Essential matrix
        Outputs:
        R, trans: Rotation, Translation vectors

        Hint: recoverPose
        """
        retval, R, t, mask = cv2.recoverPose(E, p1, p2, self.intrinsic)
        return R, t
Example #32
0
    def two_frame_sfm(self, name1, name2, id1, id2):
        with open(self.data_path + name1, "rb") as fp:  # Unpickling
            point_list1 = pickle.load(fp)
        with open(self.data_path + name2, "rb") as fp:  # Unpickling
            point_list2 = pickle.load(fp)

        points1_0 = np.asarray(point_list1)
        points2_0 = np.asarray(point_list2)

        bad_indices1 = np.where(~points1_0.any(axis=1))[0]
        bad_indices2 = np.where(~points2_0.any(axis=1))[0]

        # Save for every led all its locations (pixels)
        if self.first_frame:
            self.first_frame = False
            for i in np.where(points1_0.any(axis=1))[0]:
                self.led_graph[self.led_code_list[i]].add(
                    (points1_0[i, 0], points1_0[i, 1]), id1)
        for i in np.where(points2_0.any(axis=1))[0]:
            self.led_graph[self.led_code_list[i]].add(
                (points2_0[i, 0], points2_0[i, 1]), id2)

        bad_indices = list(bad_indices1)
        bad_indices.extend(x for x in bad_indices2 if x not in bad_indices)
        good_indices = [
            i for i in range(points1_0.shape[0]) if i not in bad_indices
        ]

        good_code_names = [self.led_code_list[i] for i in good_indices]
        print("good_code_names: ", good_code_names)
        points1 = points1_0[good_indices, :]
        points2 = points2_0[good_indices, :]
        E, mask = cv.findEssentialMat(points1,
                                      points2,
                                      self.mtx,
                                      method=cv.RANSAC,
                                      prob=0.999,
                                      threshold=1.0)
        # E = E[:3,:3]
        print(E)
        retval, R, t, mask, triangulatedPoints = cv.recoverPose(
            E, points1, points2, self.mtx, distanceThresh=50)
        camera_two = -R @ t

        triangulatedPoints = triangulatedPoints[0:3, :] / triangulatedPoints[
            3, :]
        frame_pc = PC(triangulatedPoints, good_code_names)
        frame_pose = RelativePose(R, t, id1, id2)

        if np.linalg.det(R) - 1 > 1e-7:
            print(f'error in R matrix:{R}')

        return frame_pc, frame_pose
def visualOdometry(image):

    global current_rot
    global current_pos
    global im_count
    global points0
    global keypoint
    global image1
    global position_figure
    global prev_image
    global lk_params
    global feature_detector
    global prev_keypoint
    global focal
    global principalPoint
    global position_axes

    # main process
    keypoint = feature_detector.detect(image, None)

    if prev_image is None:
        prev_image = image
        prev_keypoint = keypoint
        return

    points = np.array(map(lambda x: [x.pt], prev_keypoint), dtype=np.float32)

    p1, st, err = cv2.calcOpticalFlowPyrLK(prev_image, image, points, None,
                                           **lk_params)

    E, mask = cv2.findEssentialMat(p1, points, camera_matrix, cv2.RANSAC,
                                   0.999, 1.0, None)

    points, R, t, mask = cv2.recoverPose(E, p1, points, camera_matrix)

    scale = 1.0

    current_pos += current_rot.dot(t) * scale
    current_rot = R.dot(current_rot)

    print current_pos[0][0], current_pos[2][0]

    position_axes.scatter(current_pos[0][0], current_pos[2][0])
    plt.pause(.01)

    img = cv2.drawKeypoints(image, keypoint, None)

    # cv2.imshow('image', image)
    cv2.imshow('feature', img)
    cv2.waitKey(1)

    prev_image = image
    prev_keypoint = keypoint
    def get_pose(self, curr_kpts, prev_kpts, focal, pp):
        # Recover the rotation matrix and the translation vector from the
        # essential matrix E.
        # @param curr_kpts: Keypoints of the current frame (np.array float)
        # @param prev_kpts: Keypoints of the previous frame
        # @param focal: focal lenght of the camera
        # @param pp: principal point of the camera
        # @param mask: mask , it will store the mask with the points used to
        # recover the pose. Use this mask to filter the 3D points to be used in
        # following operations.
        # @return R: Rotation matrix, to be calculated
        # @return t: translation vector, to be calculated

        points, R, t, mask = cv2.recoverPose(self.E, curr_kpts, prev_kpts,
                                             focal, pp)
        return R, t
Example #35
0
	def matchingTests(): 
		im_1 = cv2.imread('c1.bmp')
		im_2 = cv2.imread('c2.bmp')

		#k = np.mat(([[ 683.39404297,    0.        ,  267.21336591], [   0.        ,  684.3449707 ,  218.56421036],  [   0.        ,    0.        ,    1.        ]]))

		k = clsReconstruction.loadData('k_cam_hp.dat')
		#resise, if it is necessary
		#im_1 = cv2.resize(im_1,None,fx=0.3, fy=0.3, interpolation = cv2.INTER_CUBIC)
		#im_2 = cv2.resize(im_2,None,fx=0.3, fy=0.3, interpolation = cv2.INTER_CUBIC)

		#convert to gray
		im_1 = cv2.cvtColor(im_1, cv2.COLOR_BGR2GRAY)
		im_2 = cv2.cvtColor(im_2, cv2.COLOR_BGR2GRAY)


		#proceed with sparce feature matching
		orb = cv2.ORB_create()
		
		kp_1, des_1 = orb.detectAndCompute(im_1,None)
		kp_2, des_2 = orb.detectAndCompute(im_2,None)

		bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

		matches = bf.match(des_1,des_2)
		
		matches = sorted(matches, key = lambda x:x.distance)

		draw_params = dict(matchColor = (20,20,20), singlePointColor = (200,200,200),
							matchesMask = None,
							flags = 0)

		
		im_3 = cv2.drawMatches(im_1,kp_1,im_2,kp_2,matches[0:20], None, **draw_params)
		
		
		#select points to evaluate the fundamental matrix
		pts1 = []
		pts2 = []
		idx =  matches[1:20]

		for i in idx:
			pts1.append(kp_1[i.queryIdx].pt)
			pts2.append(kp_2[i.trainIdx].pt)
	

		
		pts1 = np.array(pts1)
		pts2 = np.array(pts2)

		#creating homegeneous coordenate
		pones = np.ones((1,len(pts1))).T

		pth_1 = np.hstack((pts1,pones))
		pth_2 = np.hstack((pts2,pones))

		k = np.array(k)
		ki = np.linalg.inv(k)
		#normalized the points
		pthn_1 = []
		pthn_2 = []

		for i in range(0,len(pts1)):
			pthn_1.append((np.mat(ki) * np.mat(pth_1[i]).T).transpose())
			pthn_2.append((np.mat(ki) * np.mat(pth_2[i]).T).transpose())

		ptn1 = []
		ptn2 = []
		for i in range(0,len(pts1)):
			ptn1.append([pthn_1[i][0,0],pthn_1[i][0,1]])
			ptn2.append([pthn_2[i][0,0],pthn_2[i][0,1]])

		ptn1 = np.array(ptn1)
		ptn2 = np.array(ptn2)
		#evaluate the essential Matrix (using the original points, not the normilized ones)
		E, mask0 = cv2.findEssentialMat(pts1,pts2,k,cv2.FM_RANSAC)
		#evaluate the fundamental matrix (using the normilized points)
		F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC)	

		E = np.mat(E)
		F = np.mat(F)


		R, t, mask1 = cv2.recoverPose(E,ptn1,ptn2)
		
		#selecting only inlier points
		ptn1 = ptn1[mask.ravel() == 1]
		ptn2 = ptn2[mask.ravel() == 1]

		# Find epilines corresponding to points in right image (second image) and
		# drawing its lines on left image
		lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
		lines1 = lines1.reshape(-1,3)
		img5,img6 = clsReconstruction.drawlines(im_1,im_2,lines1,pts1,pts2)
		# Find epilines corresponding to points in left image (first image) and
		# drawing its lines on right image
		lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
		lines2 = lines2.reshape(-1,3)
		img3,img4 = clsReconstruction.drawlines(im_2,im_1,lines2,pts2,pts1)
		plt.subplot(131),plt.imshow(img5)
		plt.subplot(132),plt.imshow(img3)
		plt.subplot(133),plt.imshow(im_3)

		plt.show()
	def sparceRecostructionTrueCase(file1,file2,kdef):

		k = np.mat(clsReconstruction.loadData(kdef))
		ki = np.linalg.inv(k)

		im_1 = cv2.imread(file1)
		im_2 = cv2.imread(file2)

		im_b1 = cv2.cvtColor(im_1,cv2.COLOR_RGB2GRAY)
		im_b2 = cv2.cvtColor(im_2,cv2.COLOR_RGB2GRAY)

		myC1 = Camera.myCamera(k)
		myC2 = Camera.myCamera(k)


		#place camera 1 at origin
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])


		#return macthing points
		Xp_1, Xp_2 = clsReconstruction.getMatchingPoints(file1,file2,kdef,30)


		#evaluate the essential Matrix using the camera parameter(using the original points)
		E, mask0 = cv2.findEssentialMat(Xp_1,Xp_2,k,cv2.FM_RANSAC)


		#evaluate Fundamental to get the epipolar lines
		#since we already know the camera intrincics, it is better to evaluate F from the correspondence rather than from the 8 points routine
		F = ki.T*np.mat(E)*ki

		 
		#retrive R and t from E
		retval, R, t, mask2 = cv2.recoverPose(E,Xp_1,Xp_2)
		

		#place camera 2
		myC2.projectiveMatrix(np.mat(t),R)


		#clsReconstruction.drawEpipolarLines(Xp_1,Xp_2,F,im_1,im_2)


		#triangulate points
		Str_4D = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2]).T


		#make them euclidian
		Str_3D = cv2.convertPointsFromHomogeneous(Str_4D).reshape(-1,3)


		#evaluate reprojection
		Xh_Reprojection_1 = myC1.project(Str_4D)
		Xh_Reprojection_2 = myC2.project(Str_4D)


		#three ways to carry on the bundle adjustment I am using R,t and K as parameters. using the points is too time 
		# consuming although the results are much better; 
		#nR,nt, R0, R1 = clsReconstruction.bundleAdjustment(Str_4D,Xp_1,Xp_2,k,R,t)
		#Str_4D, nR,nt, R0, R1 = clsReconstruction.bundleAdjustmentwithX(Str_4D,Xp_1,Xp_2,k,R,t)	#### not working right now... 

		nk, nR, nt, R0, R1 = clsReconstruction.bundleAdjustmentwithK(Str_4D,Xp_1,Xp_2,k,R,t)
		print('old value {0:.3f}, optimized pose: {1:.3f} \n'.format(R0,R1))
		nki = np.linalg.inv(nk)


		#reevaluate essential and fundamental matrixes
		nE = clsReconstruction.skew(nt)*np.mat(nR)
		nF = nki.T*np.mat(nE)*nki


		#if we use the 3th option, we should reinitiate the cameras	and the essential matrix, once the projective matrix will change
		myC1 = Camera.myCamera(nk)
		myC2 = Camera.myCamera(nk)
		myC1.projectiveMatrix(np.mat([0,0,0]).transpose(),[0, 0, 0])



		#reevaluate all variables based on new values of nR and nt
		myC2.projectiveMatrix(np.mat(nt),nR)
		Str_4D = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2]).T
		Str_3D = cv2.convertPointsFromHomogeneous(Str_4D).reshape(-1,3)


		
		#Camera.myCamera.show3Dplot(Str_3D)
		Xh_Opt_1 = myC1.project(Str_4D)#.reshape(-1,2)
		Xh_Opt_2 = myC2.project(Str_4D)#.reshape(-1,2)


		#POSSIBLE IMPLEMENTATION find residuals bigger a threshould value and optimize their location in R3

		#clsReconstruction.drawEpipolarLines(Xp_1,Xp_2,nF,im_b1,im_b2)

		im = clsReconstruction.drawPoints(im_1,Xp_1,(50,50,250))
		im = clsReconstruction.drawPoints(im,Xh_Reprojection_1,(50,150,100))
		im = clsReconstruction.drawPoints(im,Xh_Opt_1,(250,250,50))

		im2 = clsReconstruction.drawPoints(im_2,Xp_2,(50,50,250))
		im2 = clsReconstruction.drawPoints(im2,Xh_Reprojection_2,(50,150,100))
		im2 = clsReconstruction.drawPoints(im2,Xh_Opt_2,(250,250,50))


		cv2.imshow("im",im)
		cv2.imshow("im2",im2)
		cv2.waitKey(0)
plt.imshow(img3), plt.show()
plt.savefig("newimg.png")

new_pts1 = numpy.int32(points1)
new_pts2 = numpy.int32(points2)

focalLength = images[0].k[0][0]
img1 = images[0]
img2 = images[1]

E, mask = cv2.findEssentialMat(new_pts1, new_pts2, focal=focalLength)

print "E"
print E
#
points, r, t, newMask = cv2.recoverPose(E, new_pts1, new_pts2, mask=mask)
# print points
print "E-R"
print r
print "E-T"
print t

F1, mask = cv2.findFundamentalMat(new_pts1, new_pts2, method=cv2.FM_8POINT)
F2, mask = cv2.findFundamentalMat(new_pts1, new_pts2)
F = F2
print "F1"
print F1

points, r, t, newMask = cv2.recoverPose(img1.k.transpose().dot(F1).dot(img2.k), new_pts1, new_pts2, mask=mask)
# print points
print "F1-R"
Example #38
0
	def grab(self):

		if not self.cam:
			print('Error: camera not setup, run init() first')
			return Msg.Odom()

		try:
			# get values from last run
			pp = self.pp
			focal = self.focal
			p0 = self.p0
			R = self.R
			t = self.t
			R_f = self.R_f
			t_f = self.t_f
			# try this???
			# R = R_f.copy()
			# t = np.array([0, 0, 0], dtype=np.float)
			R = self.R
			t = self.t
			old_im = self.old_im

			ret, raw = self.cam.read()

			# end of video
			if not ret:
				print('video end')
				draw(save_pts)
				# break
				exit()

			################################################
			# only for development ... delete!
			# roi = (0,479,210,639)
			# im = raw[roi[0]:roi[1],roi[2]:roi[3]]
			im = raw
			################################################

			if ret:
				cv2.imshow('debug', im)
				cv2.waitKey(1)

			# Not enough old points, p0 ... find new ones
			if p0.shape[0] < 50:
				print('------- reset --------')
				p0 = self.featureDetection(old_im)  # old_im instead?
				if p0.shape[0] == 0:
					print('bad image')
					# continue
					return

			# p0 - old pts
			# p1 - new pts
			p0, p1 = self.featureTrack(im, old_im, p0)

			# not enough new points p1 ... bad image?
			if p1.shape[0] < 50:
				print('------- reset p1 --------')
				print('p1 size:', p1.shape)
				self.old_im = im
				self.p0 = p1
				# continue
				return

			# drawKeyPoints(im, p1)

			# since these are rectified images, fundatmental (F) = essential (E)
			# E, mask = cv2.findEssentialMat(p0,p1,focal,pp,cv2.FM_RANSAC)
			# retval, R, t, mask = cv2.recoverPose(E,p0,p1,R_f,t_f,focal,pp,mask)

			E, mask = cv2.findEssentialMat(p0, p1, focal, pp, cv2.FM_RANSAC, 0.999, 1.0)
			retval, R, t, mask = cv2.recoverPose(E, p0, p1, R, t, focal, pp, mask)
			# print retval,R

			# Now update the previous frame and previous points
			# self.old_im = im.copy()
			# p0 = p1.reshape(-1,1,2)
			# p0 = p1

			# print 'p0 size',p0.shape
			# print 'p1 size',p1.shape
			# print 't',t
			# dt = t - t_prev
			# scale = np.linalg.norm(dt)
			# print scale
			scale = 1.0

			R_f = R.dot(R_f)
			# t_f = t
			t_f = t_f + scale * R_f.dot(t)

			# t_prev = t
			# t_f = t_f/t_f[2]
			# dist += np.linalg.norm(t_f[:2])

			# num = np.array([t_f[0]/t_f[2],t_f[1]/t_f[2]])
			# num = t_f
			print('position:', t_f)
			# print 'distance:', dist
			# R_f = R*R_f
			# print 'R:',R_f,'t:',t_f
			# print t_f

			save_pts.append(t_f)

			# save all
			self.p0 = p1
			self.R_f = R_f
			self.t_f = t_f
			self.old_im = im.copy()
			self.t = t
			self.R = R

			# create message
			odom = Msg.Odom()
			odom['position']['position']['x'] = t_f[0]
			odom['position']['position']['y'] = t_f[1]
			odom['position']['position']['z'] = t_f[2]

			odom['position']['orientation']['x'] = 0.0  # FIXME: 20160529 do rotation to quaternion conversion
			odom['position']['orientation']['y'] = 0.0
			odom['position']['orientation']['z'] = 0.0
			odom['position']['orientation']['w'] = 1.0

			odom['velocity']['linear']['x'] = 0.0
			odom['velocity']['linear']['y'] = 0.0
			odom['velocity']['linear']['z'] = 0.0

			odom['velocity']['angular']['x'] = 0.0
			odom['velocity']['angular']['y'] = 0.0
			odom['velocity']['angular']['z'] = 0.0

			return odom

		except KeyboardInterrupt:
			print('captured interrupt')
			exit()
Example #39
0
    return kp1, kp2

fast = cv2.FastFeatureDetector_create(20)

lk_params = dict(winSize  = (21, 21),
                 maxLevel = 3,
                 criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))

img1 = cv2.imread('KITTI_VO/00/image_2/%06d.png' % 0)
img2 = cv2.imread('KITTI_VO/00/image_2/%06d.png' % 1)

img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

kp1 = fast.detect(img1)

kp1, kp2 = feature_tracking(img1, img2, kp1)

focal = 718.8560
pp = (607.1928, 185.2157)

_, E = cv2.findEssentialMat(kp2, kp1, focal=focal, pp=pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, R, t, mask = cv2.recoverPose(E, kp2, kp1, focal=focal, pp=pp)






Example #40
0
	def processSecondFrame(self):
		self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
		E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
		_, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
		self.frame_stage = STAGE_DEFAULT_FRAME 
		self.px_ref = self.px_cur
Example #41
0
#retorna pontos correspondentes
Xp_1, Xp_2 = clsReconstruction.getMathingPoints('b4.jpg','b5.jpg','k_cam_hp.dat')


#evaluate the essential Matrix using the camera parameter(using the original points)
E, mask0 = cv2.findEssentialMat(Xp_1,Xp_2,k,cv2.FM_RANSAC)

#evaluate the fundamental matrix (using the normilized points)
#F, mask = cv2.findFundamentalMat(Xp_1,Xp_2,cv2.FM_RANSAC)	
#ki = np.linalg.inv(k)

R1, R2, t = cv2.decomposeEssentialMat(E)



retval, R, t, mask2 = cv2.recoverPose(E,Xp_1,Xp_2)

myC2 = Camera.myCamera(k)
myC2.projectiveMatrix(np.mat(t),R)


Xp_4Dt = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xp_1.transpose()[:2],Xp_2.transpose()[:2])

#Xp_4Dt = cv2.triangulatePoints(myC1.P[:3],myC2.P[:3],Xh_1.transpose()[:2],Xh_2.transpose()[:2])

Xp_4D = Xp_4Dt.T

for i in range(0,len(Xp_4D)):
	Xp_4D[i] /= Xp_4D[i,3]

Xp_3D = Xp_4D[:,0:3]