Esempio n. 1
0
def triangulate_points(P1, P2, refined_pts1, refined_pts2):
    '''Reconstructs 3D points by triangulation using Direct Linear Transformation.'''
    # convert to 2xN arrays
    refined_pts1 = refined_pts1[0].T
    refined_pts2 = refined_pts2[0].T

    # pick the P2 matrix with the most scene points in front of the cameras after triangulation
    ind = 0
    maxres = 0

    for i in range(4):
        # triangulate inliers and compute depth for each camera
        homog_3D = cv2.triangulatePoints(P1, P2[i], refined_pts1, refined_pts2)
        # the sign of the depth is the 3rd value of the image point after projecting back to the image
        d1 = np.dot(P1, homog_3D)[2]
        d2 = np.dot(P2[i], homog_3D)[2]
        
        if sum(d1 > 0) + sum(d2 < 0) > maxres:
            maxres = sum(d1 > 0) + sum(d2 < 0)
            ind = i
            infront = (d1 > 0) & (d2 < 0)

    # triangulate inliers and keep only points that are in front of both cameras
    # homog_3D is a 4xN array of reconstructed points in homogeneous coordinates, pts_3D is a Nx3 array
    homog_3D = cv2.triangulatePoints(P1, P2[ind], refined_pts1, refined_pts2)
    homog_3D = homog_3D[:, infront]
    homog_3D = homog_3D / homog_3D[3]
    pts_3D = np.array(homog_3D[:3]).T

    return homog_3D, pts_3D, infront
def cvTriangulate(pts1, pts2, k, r, t):
    '''
    Takes pts1, pts2, k, r, t
    Returns triangulated points using openCV implementation
    '''
    proj1 = np.array([[1, 0, 0, 0],
                      [0, 1, 0, 0],
                      [0, 0, 1, 0]])
    proj2 = np.append(r, t, 1)
    pts1 = np.array(pts1).transpose()
    pts2 = np.array(pts2).transpose()

    homogeneous_4d_coords = cv2.triangulatePoints(proj1, proj2, pts1, pts2)
    # return triangulatePoints(proj1, proj2, pts1, pts2)

    threeD_coords = cv2.convertPointsFromHomogeneous(homogeneous_4d_coords.transpose())

    output_points = []

    # print threeD_coords
    for point in threeD_coords:
        output_points.append((point[0][0], point[0][1], point[0][2]))
        # output_points.append(point[0])
    # for coord in homogeneous_4d_coords:

    return output_points
Esempio n. 3
0
def triangulateCV(KP1, KP2, pts_1, pts_2):

    points4d = cv2.triangulatePoints(KP1, KP2, pts_1.T, pts_2.T)
    points4d = points4d.T
    points3d = convertFromHomogeneous(points4d)
    points3d = points3d.tolist()
    return points3d
def triangulate_points(pt_set1, pt_set2, P, P1):
	my_points = cv2.triangulatePoints(P,P1,pt_set1.T,pt_set2.T)
	projected_points_1 = P.dot(my_points)
	
	# convert to inhomogeneous coordinates
	for i in range(projected_points_1.shape[1]):
		projected_points_1[0,i] /= projected_points_1[2,i]
		projected_points_1[1,i] /= projected_points_1[2,i]
		projected_points_1[2,i] /= projected_points_1[2,i]

	projected_points_2 = P1.dot(my_points)
	# convert to inhomogeneous coordinates
	for i in range(projected_points_2.shape[1]):
		projected_points_2[0,i] /= projected_points_2[2,i]
		projected_points_2[1,i] /= projected_points_2[2,i]
		projected_points_2[2,i] /= projected_points_2[2,i]

	# convert to inhomogeneous coordinates
	for i in range(projected_points_2.shape[1]):
		my_points[0,i] /= my_points[3,i]
		my_points[1,i] /= my_points[3,i]
		my_points[2,i] /= my_points[3,i]
		my_points[3,i] /= my_points[3,i]

	return my_points.T
Esempio n. 5
0
def calculate3DPosition(Point2D1, Point2D2):
    """
    Triangulate a 3D position from 2 2D position from camera
    and each camera projection matrix
    :param camera1:
    :param camera2:
    :param Point2D1:
    :param Point2D2:
    :return:
    """

    xy1 = [Point2D1.get()['x'], Point2D1.get()['y']]
    xy2 = [Point2D2.get()['x'], Point2D2.get()['y']]
#    print str(xy1) + " - " + str(Point2D1)
#    print str(xy2) + " - " + str(Point2D2)
    triangulationOutput = cv2.triangulatePoints(Point2D1.camera.projection_matrix,Point2D2.camera.projection_matrix, np.float32(xy1), np.float32(xy2))

    mypoint1 = np.array([triangulationOutput[0], triangulationOutput[1], triangulationOutput[2]])
    mypoint1 = mypoint1.reshape(-1, 3)
    mypoint1 = np.array([mypoint1])
    P_24x4 = np.resize(Point2D1.camera.projection_matrix[0], (4,4))
    P_24x4[3,0] = 0
    P_24x4[3,1] = 0
    P_24x4[3,2] = 0
    P_24x4[3,3] = 1

    projected = cv2.perspectiveTransform(mypoint1, P_24x4)
    output = triangulationOutput[:-1]/triangulationOutput[-1]
#    print output
    #TODO calculate point again with second proj mat, and calculate middle
    return output
Esempio n. 6
0
def triangulate_features(K,d,train_pts,test_pts,R,T):
   	R1, R2, P_1, P_2, Q, roi1, roi2 = cv2.stereoRectify(K, d, K, d, train_frame.shape[:2], R, T, alpha=1)
   	points1 = np.vstack(train_pts)[:,:-1].T
   	#points1 = cv2.convertPointsToHomogeneous(points1)
  	points2 = np.vstack(test_pts)[:,:-1].T
        #points2 = cv2.convertPointsToHomogeneous(points2)
   	#threeD_points = cv2.triangulatePoints(P1, P2, points1[:2], points2[:2])
   	normRshp1 = points1
   	normRshp2 = points2
   	    
   	Tpoints1 = cv2.triangulatePoints(P_1, P_2, normRshp1, normRshp2)     
   	fin_pts = []
   	#print Tpoints1[0]
    	#print Tpoints1[1]
       	#print Tpoints1[2]
    	#print Tpoints1[3]
        a = (Tpoints1/Tpoints1[3])[0:3]
        #print a
        #print a.shape
        a = a.T
        a = a/100
        print a
        max_ax = 0
        max_ay = 0
        max_az = 0
        for zz in range(a.shape[0]):
            if(abs(a[zz][0]-abs(max_ax))< max_ax + 10) and (abs(a[zz][1]-abs(max_ay))< max_ay + 10) and (abs(a[zz][2]-abs(max_az))< max_az + 10):
                if(a[zz][0] > abs(max_ax)):
                    max_ax = a[zz][0]
                if(a[zz][1] > abs(max_ay)):
                    max_ay = a[zz][1]
                if(a[zz][2] > abs(max_az)):
                    max_az = a[zz][2]
                az.scatter(a[zz][0] + new_origin[0], a[zz][1] + new_origin[1], a[zz][2] + new_origin[2], c='g', marker='o')
Esempio n. 7
0
def triangulate(cam_dict):
    IK = np.linalg.inv( proj.cam.get_K() )

    # parallel to match_dict to accumulate point coordinate sums
    sum_dict = {}

    for i, i1 in enumerate(proj.image_list):
        #rvec1, tvec1 = i1.get_proj()
        rvec1 = cam_dict[i1.name]['rvec']
        tvec1 = cam_dict[i1.name]['tvec']
        R1, jac = cv2.Rodrigues(rvec1)
        PROJ1 = np.concatenate((R1, tvec1), axis=1)
        for j, i2 in enumerate(proj.image_list):
            matches = i1.match_list[j]
            if (j <= i) or (len(matches) == 0):
                continue
            #rvec2, tvec2 = i2.get_proj()
            rvec2 = cam_dict[i2.name]['rvec']
            tvec2 = cam_dict[i2.name]['tvec']
            R2, jac = cv2.Rodrigues(rvec2)
            PROJ2 = np.concatenate((R2, tvec2), axis=1)

            uv1 = []; uv2 = []
            for k, pair in enumerate(matches):
                p1 = i1.kp_list[ pair[0] ].pt
                p2 = i2.kp_list[ pair[1] ].pt
                uv1.append( [p1[0], p1[1], 1.0] )
                uv2.append( [p2[0], p2[1], 1.0] )
            pts1 = IK.dot(np.array(uv1).T)
            pts2 = IK.dot(np.array(uv2).T)
            points = cv2.triangulatePoints(PROJ1, PROJ2, pts1[:2], pts2[:2])

            points /= points[3]
            #print "points:\n", points[0:3].T

            print "%s vs %s" % (i1.name, i2.name)
            for k, p in enumerate(points[0:3].T):
                match = matches[k]
                key = "%d-%d" % (i, match[0])
                print key
                if key in sum_dict:
                    sum_dict[key] += p
                else:
                    sum_dict[key] = p
                pnew = p.tolist()
                print "new=", pnew
                print "1st guess=", matches_dict[key]['ned']
                #surface1.append( [pnew[1], pnew[0], -pnew[2]] )

    # divide each element of sum_dict by the count of pairs to get the
    # average point location (so sum_dict becomes a location_dict)
    for key in matches_dict:
        count = len(matches_dict[key]['pts']) - 1
        sum_dict[key] /= count

    # return the new dictionary.
    return sum_dict
Esempio n. 8
0
def triangulate_observation_pair(matrix_target_loc0, matrix_target_loc1, conf):
    T0, target0_loc = matrix_target_loc0
    T1, target1_loc = matrix_target_loc1
    K1 = conf.intrinsic_camera_matrix()[1]
    projM0 = np.dot(K1, T0[:3])
    projM1 = np.dot(K1, T1[:3])
    point4d = cv2.triangulatePoints(projM0, projM1,
                                    np.array(target0_loc).T,
                                    np.array(target1_loc).T)
    return (point4d[:3] / point4d[3]).T
Esempio n. 9
0
def compute3Dpoints(P1, P2, npts1, npts2):
    # computes object point coordinates from photo points correspondence using DLT method 
    ptsh3d = cv2.triangulatePoints(P1, P2, npts1.T, npts2.T).T

    pts_sps = deepcopy(ptsh3d[:,:3])
    for i, pt in enumerate(ptsh3d):
        pt = (pt / pt[3])[:3]
        pts_sps[i, :] = pt[:3]

    return pts_sps
Esempio n. 10
0
    def _triangulate(self, projMat1, projMat2, imgPts1, imgPts2):
        p4d = cv2.triangulatePoints(projMat1, projMat2, imgPts1, imgPts2)

        for i in range(0, p4d.shape[1]):
            p4d[0][i] /= p4d[3][i]
            p4d[1][i] /= p4d[3][i]
            p4d[2][i] /= p4d[3][i]
            p4d[3][i] /= p4d[3][i]

        return p4d
Esempio n. 11
0
    def test_triangulation_gdpts(self, P, x0, x1, cam=None ):
        # TRIANGULATE POINTS
        if cam != None:
            X = cv2.triangulatePoints(self.PI(cam), self.Pdot(P,cam), x0[:2], x1[:2])
            X /= X[3]

            # TEST REPROJECTION TO IMAGE PLANE
            x0 = dot(self.PI(cam), X)[2] > 0.
            x1 = dot(self.Pdot(P,cam), X)[2] > 0.
        else:
            X = cv2.triangulatePoints(eye(4)[:3], I(P)[:3], x0[:2], x1[:2])
            X /= X[3]

            # TEST REPROJECTION TO IMAGE PLANE
            x0 = dot(eye(4)[:3], X)[2] > 0.
            x1 = dot(I(P)[:3], X)[2] > 0.

        infront = x0 & x1
        return (100*sum(infront))/len(infront), sum(infront), len(infront)
Esempio n. 12
0
def triangulate_points(p1, p2, points1, points2):
    points1_matrix = np.transpose(np.array(points1))
    points2_matrix = np.transpose(np.array(points2))
    
    res = cv2.triangulatePoints(p1, p2, points1_matrix, points2_matrix)
    res = np.transpose(res)
    res_real = np.array([[row[i]/row[3] for i in range(3)] for row in res])
    
    return res_real
    
    
    def triangulate(self, kpts1, kpts2):

        kpts1 = (np.reshape(kpts1, (len(kpts1), 2))).T
        kpts2 = (np.reshape(kpts2, (len(kpts2), 2))).T

        points3D = None

        points3D = cv2.triangulatePoints(self.cam1.P, self.cam2.P, kpts1, kpts2)

        points3D = points3D / points3D[3]

        return points3D
Esempio n. 14
0
    def MLE(self):
        x0 = self.x0
        x1 = self.x1
        x2 = self.x2
        P = eye(3, 4)
        Pi, Pii = self.getProjectionMat()
        X01 = cv2.triangulatePoints(P[:3], Pi[:3], x0[:2], x1[:2])
        X01 /= X01[3]
        X12 = cv2.triangulatePoints(Pi[:3], Pii[:3], x1[:2], x2[:2])
        X12 /= X12[3]
        X02 = cv2.triangulatePoints(P[:3], Pii[:3], x0[:2], x2[:2])
        X02 /= X02[3]
        X = (X01 + X12) / 2.0
        x0h = dot(P[:3], X)
        x0h /= x0h[2]
        x1h = dot(P[:3], X)
        x1h /= x1h[2]
        x2h = dot(P[:3], X)
        x2h /= x2h[2]

        cost = sum(sqrt(sum((x0 - x0h) ** 2, 0)) + sqrt(sum((x1 - x1h) ** 2, 0)) + sqrt(sum((x2 - x2h) ** 2, 0)))
Esempio n. 15
0
def test_triangulation_gdpts(P, x0, x1):
    H = linalg.inv(decode5(P))

    # TRIANGULATE POINTS
    X = cv2.triangulatePoints( eye(4)[:3], H[:3], x0[:2], x1[:2] )
    X /= X[3]

    # TEST REPROJECTION TO IMAGE PLANE
    x0 = dot(eye(4)[:3], X)[2] > 0.
    x1 = dot(H[:3], X)[2] > 0.
    infront = x0 & x1

    return (100*sum(infront))/len(infront), sum(infront), len(infront)
    def triangulatePoints(self, firstProjMtx, secondProjMtx):

        result = cv2.triangulatePoints(firstProjMtx, secondProjMtx, self.triangulationData[0][0], self.triangulationData[0][1])

        result = result/result[3]

        fig = plt.figure()
        ax = fig.add_subplot(111, projection = '3d')
        ax.plot_wireframe(result[0],result[1],result[2])

        plt.show()
        for x in result:
            #print x
            print '--------------------------'
    def optimal_triangulation(self, kpts1, kpts2):
        # For each given point corresondence points1[i] <-> points2[i], and a
        # fundamental matrix F, computes the corrected correspondences
        # new_points1[i] <-> new_points2[i] that minimize the geometric error
        # d(points1[i], new_points1[i])^2 + d(points2[i], new_points2[i])^2,
        # subject to the epipolar constraint new_points2^t * F * new_points1 = 0
        # Here we are using the OpenCV's function CorrectMatches.

        # @param kpts1 : keypoints in one image
        # @param kpts2 : keypoints in the other image
        # @return new_points1 : the optimized points1
        # @return new_points2 : the optimized points2

        # First, we have to reshape the keypoints. They must be a 1 x n x 2
        # array.

        kpts1 = np.float32(kpts1)  # Points in the first camera
        kpts2 = np.float32(kpts2)  # Points in the second camera

        # 3D Matrix : [kpts1[0] kpts[1]... kpts[n]]

        pt1 = np.reshape(kpts1, (1, len(kpts1), 2))

        pt2 = np.reshape(kpts2, (1, len(kpts2), 2))

        new_points1, new_points2 = cv2.correctMatches(self.F, pt2, pt1)

        self.correctedkpts1 = new_points1
        self.correctedkpts2 = new_points2

        # Transform to a 2D Matrix: 2xn

        kpts1 = (np.reshape(new_points1, (len(kpts1), 2))).T
        kpts2 = (np.reshape(new_points2, (len(kpts2), 2))).T

        print np.shape(kpts1)

        points3D = cv2.triangulatePoints(self.cam1.P, self.cam2.P, kpts2, kpts1)

        self.structure = points3D / points3D[3]  # Normalize points [x, y, z, 1]

        array = np.zeros((4, len(self.structure[0])))

        for i in range(len(self.structure[0])):

            array[:, i] = self.structure[:, i]

        self.structure = array
Esempio n. 18
0
def testpoint(Ps, graph):

	I = np.mat([[ 1, 0, 0, 0],[ 0, 1, 0, 0], [ 0, 0, 1, 0]])
	points3D=[]
	for i in range(len(Ps)):
		points3D.append({})
		for key in Ps[i].keys():
			pt1 = np.array([corr[0] for corr in graph[i][key]]).T
			pt2 = np.array([corr[1] for corr in graph[i][key]]).T
			
			pnts = cv2.triangulatePoints(np.dot(K, I), Ps[i][key], pt1, pt2)
			pnts = pnts.T
			for k, pnt in enumerate(pnts):
				pnts[k] = np.divide(pnt, pnt[3])
			points3D[i][key] = pnts

	return points3D
Esempio n. 19
0
def triangulate(pt, pt1, p, p1):
    """
    Given two sets of homogeneous coordinates and two camera matrices,
    triangulate the 3D coordinates.  The image correspondences are
    assumed to be implicitly ordered.

    References
    ----------
    .. [Hartley2003]

    Parameters
    ----------
    pt : ndarray
         (n, 3) array of homogeneous correspondences

    pt1 : ndarray
          (n, 3) array of homogeneous correspondences

    p : ndarray
        (3, 4) camera matrix

    p1 : ndarray
         (3, 4) camera matrix

    Returns
    -------
    coords : ndarray
             (4, n) projection matrix

    """
    pt = np.asarray(pt)
    pt1 = np.asarray(pt1)

    # Transpose for the openCV call if needed
    if pt.shape[0] != 3:
        pt = pt.T
    if pt1.shape[0] != 3:
        pt1 = pt1.T

    X = cv2.triangulatePoints(p, p1, pt[:2], pt1[:2])

    # Homogenize
    X /= X[3]

    return X
Esempio n. 20
0
def calculate3DPosition(projMat1, projMat2, xy1, xy2):

    triangulationOutput = cv2.triangulatePoints(projMat1,projMat2, np.float32(xy1), np.float32(xy2))

    mypoint1 = np.array([triangulationOutput[0], triangulationOutput[1], triangulationOutput[2]])
    mypoint1 = mypoint1.reshape(-1, 3)
    mypoint1 = np.array([mypoint1])
    P_24x4 = np.resize(projMat1[0], (4,4))
    P_24x4[3,0] = 0
    P_24x4[3,1] = 0
    P_24x4[3,2] = 0
    P_24x4[3,3] = 1

    projected = cv2.perspectiveTransform(mypoint1, P_24x4)
    output2 = triangulationOutput[:-1]/triangulationOutput[-1]

    #TODO calculate point again with second proj mat, and calculate middle
    return output2
Esempio n. 21
0
def calculate3D():
    global projectionMatrix
    global cameras_xy
    global cameras_index
    myout = cv2.triangulatePoints(projectionMatrix[0],projectionMatrix[1], np.float32(cameras_xy[cameras_index[0]]), np.float32(cameras_xy[cameras_index[1]]))

    mypoint1 = np.array([myout[0], myout[1], myout[2]])
    mypoint1 = mypoint1.reshape(-1, 3)
    mypoint1 = np.array([mypoint1])
    P_24x4 = np.resize(projectionMatrix[0], (4,4))
    P_24x4[3,0] = 0
    P_24x4[3,1] = 0
    P_24x4[3,2] = 0
    P_24x4[3,3] = 1

    projected = cv2.perspectiveTransform(mypoint1, P_24x4)
    output2 = myout[:-1]/myout[-1]
    print output2
    return output2
def linear_eigen_triangulation(u1, P1, u2, P2, max_coordinate_value=1.e16):
    """
    Linear Eigenvalue based (using SVD) triangulation.
    Wrapper to OpenCV's "triangulatePoints()" function.
    Relative speed: 1.0
    
    (u1, P1) is the reference pair containing normalized image coordinates (x, y) and the corresponding camera matrix.
    (u2, P2) is the second pair.
    "max_coordinate_value" is a threshold to decide whether points are at infinity
    
    u1 and u2 are matrices: amount of points equals #rows and should be equal for u1 and u2.
    
    The status-vector is based on the assumption that all 3D points have finite coordinates.
    """
    x = cv2.triangulatePoints(P1[0:3, 0:4], P2[0:3, 0:4], u1.T, u2.T)    # OpenCV's Linear-Eigen triangl
    
    x[0:3, :] /= x[3:4, :]    # normalize coordinates
    x_status = (np.max(abs(x[0:3, :]), axis=0) <= max_coordinate_value)    # NaN or Inf will receive status False
    
    return x[0:3, :].T.astype(output_dtype), x_status
Esempio n. 23
0
def get3DPoints(kp1, kp2, des1, des2):

	kd = NearestNeighbors(n_neighbors=2, algorithm='kd_tree').fit(des1)
	distances, indexes = kd.kneighbors(des2)
	
	pnts1 = []
	pnts2 = []
	
	for i, dist in enumerate(distances):
		if dist[0] < 0.8*dist[1]:
			pnts1.append( kp1[ indexes[i][0] ].pt )
			pnts2.append( kp2[ i ].pt )

	F, mask = cv2.findFundamentalMat( np.float32(pnts1), np.float32(pnts2) )
	E = K.T * np.mat(F) * K
	
	p1 = []
	p1Ind = []
	p2 = []
	p2Ind = []
	for i, e in enumerate(mask):
		if e == 1:
			p1.append(pnts1[i])
			p1Ind.append(i)
			p2.append(pnts2[i])
			p2Ind.append(i)
	
	pnts1 = np.array(p1).T
	pnts2 = np.array(p2).T
	
	U, s, V = np.linalg.svd(E, full_matrices=True)
	P = chooseP(U, s, V, pnts1[0], pnts2[0])
	
	I = np.mat([[ 1, 0, 0, 0],[ 0, 1, 0, 0], [ 0, 0, 1, 0]])
	
	pnts3d = cv2.triangulatePoints(K*I, P, pnts1, pnts2)
	
	for i, pnt in enumerate(pnts3d):
		pnts3d[i] = np.divide(pnt, pnts3d[3])
	
	return pnts3d, pnts1, pnts2
def triangulate(im1, im2): #im1 and im2 must be black and white
    if show_rt:
        t = time.time()

    #compute Fundamental matrix
    F, pts1, pts2 = compute_F(im1, im2)

    #print '\npts1:\n', pts1
    #print '\npts2:\n', pts2

    #compute Essential matrix
    E = compute_E(F, im1, im2)

    #get translation matrix for translation in the x direction
    R, T = compute_R_T(E)

    #get K
    K = compute_K(im1)

    #get projection from K, R, T
    P = compute_P(K, R, T)

    print '\nP.shape:', P.shape, '\n'
    print '\npts1.T.shape:', pts1.T.shape, '\n'
    print '\npts2.T.shape:', pts2.T.shape, '\n'

    #if pts1.T.shape[1] == 0 or pts.T.shape[0] == 0:
     #   return None, None, None # None, np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])

    hom_pts = cv2.triangulatePoints(P, P, pts1.T, pts2.T)

    #print 'hom_pts:\n', hom_pts.T
    euclid_pts = cv2.convertPointsFromHomogeneous(hom_pts.T)

    xfrm = compute_transform(R, T)

    if show_rt:
        print '\ntriangulate() runtime:', time.time() - t, '\nsecs'

    return euclid_pts, hom_pts.T, xfrm
    def triangulate(self):
        reconstructed_points = None
        for i in range(len(self.pair_set)):
            keypoint_1, keypoint_2 = self._get_keypoints(self.pair_set[i])
            point_homogeneous = cv2.triangulatePoints(self.pair_set[i].projection_matrix_1,
                                                        self.pair_set[i].projection_matrix_2,
                                                        keypoint_1.T,
                                                        keypoint_2.T).T
            if i == 0:
                reconstructed_points = point_homogeneous[:, 0:3]/point_homogeneous[:, 3].reshape((point_homogeneous[:,3].shape[0],1))
            else:
                reconstructed_points = np.vstack((reconstructed_points,
                                                 point_homogeneous[:, 0:3]/point_homogeneous[:, 3].reshape((point_homogeneous[:,3].shape[0],1))))
        reconstructed_points = np.delete(reconstructed_points,np.where(reconstructed_points[:,1]> 0), axis=0)
        reconstructed_points = np.delete(reconstructed_points, np.where(reconstructed_points[:, 1]< -15), axis=0)
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(np.asarray(reconstructed_points[:, 0]),
                   np.asarray(reconstructed_points[:, 1]),
                   np.asarray(reconstructed_points[:, 2]), c='b', marker='o')

        plt.show()
Esempio n. 26
0
def triangulate(P1,P2,i1,i2,p_real=''):
    '''
    Performs triangulation with cv2-function triangulatePoints.

    _Parameters_:

    P1,P2: camera Projection matrixes

    i1,i2: point image positions in 2 cameras

    p_real: real position of point (if known), for error calculation

    _Returns_:

    position of corresponding 3D point, 2D and 3D error.
    '''
    p_test = cv2.triangulatePoints(P1,P2,i1,i2)
    p_norm = p_test/p_test[3]
    p_norm = p_norm.T[:,:3]
    if type(p_real)!=str:
        err3 = np.sqrt(np.sum(np.power(p_real-p_norm,2),axis=1))
        err2 = np.sqrt(np.sum(np.power(p_real[:,:2]-p_norm[:,:2],2),axis=1))
    return p_norm,err2,err3
Esempio n. 27
0
    def plot_point_cloud(self, feat_mode="SURF"):
        """Plots 3D point cloud

            This method generates and plots a 3D point cloud of the recovered
            3D scene.

            :param feat_mode: whether to use rich descriptors for feature
                              matching ("surf") or optic flow ("flow")
        """
        self._extract_keypoints(feat_mode)
        self._find_fundamental_matrix()
        self._find_essential_matrix()
        self._find_camera_matrices_rt()

        # triangulate points
        first_inliers = np.array(self.match_inliers1).reshape(-1, 3)[:, :2]
        second_inliers = np.array(self.match_inliers2).reshape(-1, 3)[:, :2]
        pts4D = cv2.triangulatePoints(self.Rt1, self.Rt2, first_inliers.T,
                                      second_inliers.T).T

        # convert from homogeneous coordinates to 3D
        pts3D = pts4D[:, :3]/np.repeat(pts4D[:, 3], 3).reshape(-1, 3)

        # plot with matplotlib
        Ys = pts3D[:, 0]
        Zs = pts3D[:, 1]
        Xs = pts3D[:, 2]

        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(Xs, Ys, Zs, c='r', marker='o')
        ax.set_xlabel('Y')
        ax.set_ylabel('Z')
        ax.set_zlabel('X')
        plt.title('3D point cloud: Use pan axes button below to inspect')
        plt.show()
Esempio n. 28
0
        R = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]], dtype=np.float32)
        T1 = np.reshape(pose[frame_num - 1], (3, 1))
        T2 = np.reshape(pose[frame_num], (3, 1))
        T1 = np.dot(R, T1)
        T2 = np.dot(R, T2)
        R = np.eye(3, 3, dtype=np.float32)

        RT1 = np.hstack((R, T1))
        RT2 = np.hstack((R, T2))

        dp1 = np.reshape(old_points, (-1, 2))
        dp2 = np.reshape(new_points, (-1, 2))
        udp1 = undistort(dp1, cam_mat, dist_coeff)
        udp2 = undistort(dp2, cam_mat, dist_coeff)

        points4D = cv2.triangulatePoints(RT1, RT2, udp1, udp2)
        points3D = np.array([
            points4D[0] / points4D[3], points4D[1] / points4D[3],
            points4D[2] / points4D[3]
        ],
                            dtype=np.float32)
        points3D = np.transpose(points3D)
        for p in points3D:
            x, y, z = p
            if z < -800 or z > -660:
                continue
            csv_file.write('{0:f}, {1:f}, {2:f}\n'.format(x, y, z))

    frame = draw_tracks(frame, old_points, new_points)

    cv2.imshow("Frame", frame)
  def publish_3d_coords(self, left_max_x, left_max_y, right_max_x, right_max_y, imageSize):
          #monocular calibration 1 = left 2 = right
    CMatr1 = np.matrix([[2531.915668, 0.000000, 615.773452],  
    [0.000000, 2594.436434, 344.505755],
    [0.000000, 0.000000, 1.000000]]).astype(np.float)

    pp = pprint.PrettyPrinter(indent=4)

    # print("CMatr1", CMatr1)
    #left distortion parameters: 1.281681 -15.773048 -0.010428 0.012822 0.000000

    CMatr2 = np.matrix([[1539.714285, 0.000000, 837.703760],
    [0.000000, 1506.265655, 391.687374],
    [0.000000, 0.000000, 1.000000]]).astype(np.float)

    # print("CMatr2", CMatr2)

    projPoints1 = np.array([[left_max_x],[left_max_y]]).astype(np.float)

    projPoints2 = np.array([[right_max_x],[right_max_y]]).astype(np.float)

    distort_left = np.array([1.281681, -15.773048, -0.010428, 0.012822, 0.000000]).astype(np.float)
    distort_right = np.array([0.091411, -0.461269, 0.021006, 0.040117, 0.000000]).astype(np.float)
    # print("distort_left", distort_left)
    # print("distort_right", distort_right)

    RMat1 = self.quatToRot(np.array([-0.029, 0.992, -0.110, 0.053]))
    RMat2 = self.quatToRot(np.array([-0.019, 0.997, -0.038, -0.071]))



    RFinal = np.matmul(np.linalg.inv(RMat1),RMat2)

    T_left = np.array([-0.268, 0.516, 3.283]).astype(np.float) #--------------------CHANGE

    T_right = np.array([0.018, -0.184, 1.255]).astype(np.float) #--------------------CHANGE

    T_final = T_right - T_left

    #print("RFinal", RFinal)
    #print("T_final", T_final)
    #print(imageSize)

    R1,R2,P1,P2,Q, a,b = cv2.stereoRectify(CMatr1, distort_left, CMatr2, distort_right, (1280,720), RFinal, T_final,  alpha=-1)
    

    #print("R1",R1)
    #print("R2",R2)CMatr1
    #print("P1",P1)
    #print("P2",P2)

    #pnt1 = cv2.undistortPoints(projPoints1, CMatr1, distort_left, R=RMat1, P=P1)
    #pnt2 = cv2.undistortPoints(projPoints2, CMatr2, distort_right, R=RMat2, P=P2)

    #print("left:",projPoints1)
    #print("right:", projPoints2)

    P1_stereo = np.array([[4890.538324810042, 0.0, -1734.3179817199707, 0.0],[ 0.0, 4890.538324810042, 398.04181480407715, 0.0],[ 0.0, 0.0, 1.0, 0.0]])
    P2_stereo = np.array([[4890.538324810042, 0.0, -1734.3179817199707, 8092.200252104331],[ 0.0, 4890.538324810042, 398.04181480407715, 0.0],[ 0.0, 0.0, 1.0, 0.0]])


    points4D = cv2.triangulatePoints(P1_stereo, P2_stereo, projPoints1, projPoints2)
    #points3D = cv2.convertPointsFromHomogeneous(points4D)
    #print(points4D)

    #Converts 4D to 3D by [x,y,z,w] -> [x/w, y/w, z/w]
    
    points3D = np.array([  points4D[0]/points4D[3], points4D[1]/points4D[3], points4D[2]/points4D[3] ])
Esempio n. 30
0
    def process_keys(self, calcfr=-1):
        '''Calculate the two translations between three contiguous frames.
        
        :Steps:
            #. Finds triple correspondences (3 frames) and normalizes points.
            #. Estimates relative scaling based on sequence number (may have skipped).
            #. Create trifocal tensor and extract the two translations.
            #. 
        
        :type calcfr: int
        :arg  calcfr: Index of 3rd inputed frame to calculate. 
            Must be 2 or greater.
        '''
        assert self.nframes >= 3, 'At least three frames are required for this process'

        if calcfr == -1:
            calcfr += self.nframes

        # FIND THE TWO-MATCH AND TRI-MATCH SETS
        keylist = self.keylist
        tri_match = self.matches[-1]


        # CREATE NORMALIZED TRI-SETS OF CORRESPONDENCE POINTS
        LPA = self.LPA
        for cam, camset in enumerate( tri_match ):
            pointset = []
            for i, m in enumerate( camset ):
                if len(m) > 1:
                    print cam, m
                    pointset.append( LPA.normalize(cam,
                                               self.keylist[calcfr-2+i][cam][:,0][m],
                                               self.keylist[calcfr-2+i][cam][:,1][m]) )
            tri_match[cam] = pointset
        print 'tri_match', type(tri_match), len(tri_match)

        # GET SCALING ESTIMATE FOR THIS TRANSLATION
        scale = ((self.seqid[calcfr]-self.seqid[calcfr-1])
                    /float(self.seqid[calcfr-1]-self.seqid[calcfr-2]))
        print 'scale', scale
        
        # CALCULATE TRANSFORMATIONS
        camcol = [(1,0,0),(0,1,0),(0,0,1),(1,1,0),(1,0,1)]
        for cam, tset in enumerate(tri_match):
            print 'cam tset', cam
            try:
                a, b, c = tset
            except:
                print 'Error in tset split'
                continue
            print 'cam tset', cam
            # TEST FOR REASONABLE FLOW
            mask = test_vector_flow(a,b,c)
            print 'len gd a', sum(mask), sum(mask)/float(len(a[0])), '%'
            if sum(mask) > 24:
                a, b, c = a[:,mask], b[:,mask], c[:,mask]
            print 'Used a', len(a[0])

            print 'H_ab twice', 'a shape', a.shape

            tensor = Trifocal(a, b, c)
            H_ab, H_ac = tensor.getProjectionMat()
            H_ab = LPA.H_C2L(cam, H_ab )

            # CALCULATE INITIAL TRANSFORMATION USING GA TO REFINE THE RESULT
#            H_ab = GAP.GA_refine_single_projection(LPA, cam, a, b, front_cam=self.forward_cam, popsize=40, iters=40)
            print H_ab
            # CALCULATE WORLD 3D POINTS FROM RESULT
            F = GAP.E(GAP.encode5(H_ab))
            epiHole = GAP.mask_pts_near_epipole(a, b, F=F)
            X = cv2.triangulatePoints(LPA.P(cam), LPA.Hdot(H_ab, cam)[:3], a[:2,epiHole], b[:2,epiHole])
            X /= X[3]
            print 'points', X
            LPA.show_ladybug()
            LPA.show_points(X, radius=0.1, color=camcol[cam])
            # CALCULATE 2ND TRANSFORMATION FROM WORLD POINTS
            H_ac = H_from_Xx(LPA, cam, X, c[:,epiHole].copy())

            H_ab_gene = GAP.encode5sb( H_ab )[:6]
            print 'Hab', H_ab_gene
            print 'Hac', GAP.encode5sb( H_ac )[:6]

            # MULTIPLY BY PREVIOUS SCALE
            H_ab_gene[5] *= self.prev_scale

            # CALCULATE THE B-C TRANSLATION
            H_bc_gene = GAP.encode5sb( dot(H_ac,linalg.inv(H_ab)) )[:6]
            print 'Hbc', H_bc_gene

            # TEST IF DETECTED SCALING IS WITHIN REASON AND ADD TO LIST
            print  H_bc_gene[5], scale, abs(H_bc_gene[5] - scale), (abs(H_bc_gene[5] - scale) < 0.25)
#            raw_input('pause 2')
            if (abs(H_bc_gene[5] - scale) < 0.25):
                self.H_lists[calcfr-1].append( H_ab_gene )
                self.H_lists[calcfr].append( H_bc_gene )
                print 'len prev', len(self.H_lists[calcfr-1])
                print 'len this', len(self.H_lists[calcfr])
            else:
                print 'Not adding:', H_ab_gene
                print 'Not adding:', H_bc_gene
        self.prev_scale = scale

        # EVALUATE THE PROJECTION COLLECTIONS, RE-EVALUATE PREVIOUS
        for each in [calcfr-1, calcfr]:
            Hlist = array(self.H_lists[each])
#            print Hlist
            print '  PROCESSING H LIST'
            print 'Number in stack', len(Hlist)
            me = mean(Hlist,0)
            st = std(Hlist,0)
            print repr(me)
            try:
                above = Hlist > (me - st)
                below = Hlist < (me + st)
                res = sum(above & below, 1) == 6
                self.Transformation[each] = mean(Hlist[res],0)
                if isnan(self.Transformation[each][0]):
                    raise ValueError, 'sodo'
            except:
                self.Transformation[each] = me
            print repr(self.Transformation[each])
            print

        return self.Transformation[calcfr-1], self.Transformation[calcfr]
Esempio n. 31
0
    def fit_reconstruction(self):
        """
        Reconstruct the 3D points (after fit_descriptor)
        :return: list of 3D points of size (3*n)
        """
        if not self.match.is_fitted:
            raise ValueError("must call fit_descriptor before")

        K = self.calibration.K
        P_l = self.P_left

        # get the coordinates of the points from the keypoints lists
        pts_l = np.array(
            [self.match.kp_l[m.queryIdx].pt for m in self.match.matches])
        idx_l = np.array([m.queryIdx for m in self.match.matches])
        pts_r = np.array(
            [self.match.kp_r[m.trainIdx].pt for m in self.match.matches])
        idx_r = np.array([m.trainIdx for m in self.match.matches])

        # estimate E with RANSAC
        E, mask = cv2.findEssentialMat(pts_l,
                                       pts_r,
                                       cameraMatrix=self.calibration.K,
                                       method=cv2.FM_RANSAC)
        inliers = np.where(mask == 1)[0]

        # select the inliers from RANSAC
        pts_l = pts_l[inliers, :]
        pts_r = pts_r[inliers, :]
        idx_l = idx_l[inliers]
        idx_r = idx_r[inliers]
        self.mask = inliers

        # compute the fundamental F
        self.F = np.dot(np.linalg.inv(K.T), np.dot(E, np.linalg.inv(K)))

        # compute the rotation and translation of the transformation
        _, R, t, mask_c = cv2.recoverPose(E, pts_l, pts_r, cameraMatrix=K)
        self.P_transformation = concat(R, t)

        # compute the new position of the camera
        P_r = compose(P_l, self.P_transformation)

        # select the points that are in front of the selected camera
        infront = np.where(mask_c != 0)[0]
        pts_l = pts_l[infront, :]
        pts_r = pts_r[infront, :]
        idx_l = idx_l[infront]
        idx_r = idx_r[infront]
        self.mask = self.mask[infront]

        # find the position of the 3D points with a triangulation
        X = cv2.triangulatePoints(np.dot(K, P_l.P), np.dot(K, P_r.P), pts_l.T,
                                  pts_r.T)
        X = X / X[3]
        threeDpoints = X[:3, :]

        # reproject the 3D points through the cameras
        xlp, _ = cv2.projectPoints(threeDpoints.T,
                                   P_l.rv,
                                   P_l.t,
                                   K,
                                   distCoeffs=self.calibration.dist_coeff)
        xrp, _ = cv2.projectPoints(threeDpoints.T,
                                   P_r.rv,
                                   P_r.t,
                                   K,
                                   distCoeffs=self.calibration.dist_coeff)

        # kept points from the initial keypoints on the two images
        self.x_l = pts_l.T
        self.x_r = pts_r.T
        self.idx_l = idx_l
        self.idx_r = idx_r

        # reprojected points
        self.xp_l = np.vstack((xlp[:, 0, 0], xlp[:, 0, 1]))
        self.xp_r = np.vstack((xrp[:, 0, 0], xrp[:, 0, 1]))

        # save the results
        self.E = E
        self.P_right = P_r
        self.threeDpoints = threeDpoints
        self.is_fitted = True

        return threeDpoints
Esempio n. 32
0
            print("points: " + str(l_x) + " " + str(r_x))

            l_test_points = np.array([[[l_x, l_y]]])
            r_test_points = np.array([[[r_x, r_y]]])

            l_undistorted = cv2.undistortPoints(l_test_points, l_mtx, l_dist,
                                                None, l_rmtx, l_pmtx)
            r_undistorted = cv2.undistortPoints(r_test_points, r_mtx, r_dist,
                                                None, r_rmtx, r_pmtx)

            print("\nleft undistorted: " + str(l_undistorted))
            print("\nright undistorted: " + str(r_undistorted))

            #projected_points = cv2.triangulatePoints(l_pmtx, r_pmtx, l_test_points, r_test_points)
            projected_points2 = cv2.triangulatePoints(l_pmtx, r_pmtx,
                                                      l_undistorted,
                                                      r_undistorted)

            #print("\nElapsed: " + str(end - start))

            #print("\nProjected: " + str((projected_points[:3] / projected_points[3]).T))
            print("\nProjected2: " +
                  str((projected_points2[:3] / projected_points2[3]).T))

#        cv2.imshow("masked", blurred)

    c = cv2.waitKey(1) & 0xFF
    if c == ord('r'):
        LBOUND[0] += 1
    elif c == ord('f'):
        LBOUND[0] -= 1
Esempio n. 33
0
def _run_triangulation(data_for_triangulation):
    marker_points_4d = cv2.triangulatePoints(*data_for_triangulation)
    marker_points_3d = cv2.convertPointsFromHomogeneous(marker_points_4d.T)
    marker_points_3d.shape = 4, 3
    return marker_points_3d
Esempio n. 34
0
dist = cv_file.getNode("dist_coeff").mat()
## generate the test camera projection matrices
P1 = np.eye(3, 4, dtype=np.float32)
P2 = np.array([[0, -1, 0, 1], [1, 0, 0, -1], [0, 0, 1, 0]], dtype=np.float32)
print(P1)
print(P2)
## generate the testo points
N = 5
points3d = np.empty((4, N), np.float32)
points3d[:3, :] = np.random.randn(3, N)
points3d[3, :] = 1
print(points3d)
## proyect the 3d points into two view and add noise
points1 = P1 @ points3d
print("proyeccion")
print(points1)
points1 = points1[:2, :] / points1[2, :]
print("proyeccion")
print(points1)
points1[:2, :] += np.random.randn(2, N) * 1e-2
points2 = P2 @ points3d
points2 = points2[:2, :] / points2[2, :]
points2[:2, :] += np.random.randn(2, N) * 1e-2
## reconstruccion de los puntos a  traves de observaciones con ruido
points3d_reconstr = cv.triangulatePoints(P1, P2, points1, points2)
points3d_reconstr /= points3d_reconstr[3, :]
## impresion de la comparacionprint('Original points')
print(points3d[:3].T)
print('Reconstructed points')
print(points3d_reconstr[:3].T)
Esempio n. 35
0
        R1 = mobile_captures[m1]['rotation'].T
        T1 = - np.dot(R1, mobile_captures[m1]['position'])

        K2 = mobile_captures[m2]['K']
        R2 = mobile_captures[m2]['rotation'].T
        T2 = - np.dot(R2, mobile_captures[m2]['position'])

        P1 = np.dot(K1, np.c_[R1, T1])
        P2 = np.dot(K2, np.c_[R2, T2])

        # 2d points from 2 different views
        pts_2d_1 = np.asarray([mobile_captures[m1]['person_pose'][::3], mobile_captures[m1]['person_pose'][1::3]])
        pts_2d_2 = np.asarray([mobile_captures[m2]['person_pose'][::3], mobile_captures[m2]['person_pose'][1::3]])

        # triangulate points from different views
        pts_3d = cv.triangulatePoints(P1, P2, pts_2d_1, pts_2d_2)

        pts_3d = pts_3d / pts_3d[3, :]

        # here we store the 3d points together with their confidence values
        # 3d points
        pts_3d_multiple_views[0:3, :, c] = pts_3d[0:3, :]
        # confidence scores
        pts_3d_multiple_views[3, :, c] = mobile_captures[m1]['person_pose'][2::3]
        pts_3d_multiple_views[4, :, c] = mobile_captures[m2]['person_pose'][2::3]

        c += 1

    ####################
    #################### bundle adjustment
    ####################
Esempio n. 36
0
R_T_1 = np.hstack([R1,T])
print "[R|T] matrix = "
print R_T_1

R1, R2, T = cv.decomposeEssentialMat(E1)
R_T_2 = np.hstack([R1,T])
#print "cam2 [R|T] matrix = "
#print R_T_2

# s*[px_x, px_y,0] = cam_intrinsic*R_T*[X,Y,Z,0]
cam1_proj = np.matmul(E1,R_T_1)
cam2_proj = np.matmul(E2,R_T_2)
#print cam1_proj
#print cam2_proj

points4D = cv.triangulatePoints(cam1_proj,cam2_proj,points1[0],points2[0])
#print points4D

#print np.matmul(cam1_proj,points4D)

####################################################
# TASK 2-1 : Compute fundamental matrix for all image pairs
####################################################
print "=====fundamental matrix for all image pairs======"

#load all images
images = []
for i in range(0,9):
    images.append(cv.imread("images/000"+str(i)+".png"))
images.append(cv.imread("images/0010.png"))
print "10 images loaded"
Esempio n. 37
0
# Example-2.
val, rvec, t12_pnp_est, inliers = cv2.solvePnPRansac(pts3d_11, pts2d_12, K, None, None, None,
                                                False, 50, 2.0, 0.99, None)
R12_pnp_est, _ = cv2.Rodrigues(rvec)

# Example-3.

val, rvec, t13_pnp_est, inliers = cv2.solvePnPRansac(pts3d_11, pts2d_13, K, None, None, None,
                                                False, 50, 2.0, 0.99, None)
R13_pnp_est, _ = cv2.Rodrigues(rvec)

print("Solve-PnP Results ", (Cam12.R - R12_pnp_est).sum(),  (Cam12.t - t12_pnp_est).sum())

# Example-4.
val, rvec, t23_pnp_est, inliers = cv2.solvePnPRansac(pts3d_12, pts2d_13, K, None, None, None,
                                                False, 50, 2.0, 0.99, None)
R23_pnp_est, _ = cv2.Rodrigues(rvec)

print("R23 - - - - ", R23_pnp_est, R23_est1)
print("t23 - - - - ", utils.norm(t23_pnp_est), t23_est)

# From now on, we'll check Point-Triangulation
# Note, the returned 3D points are in world-coordinate.

pts4d = cv2.triangulatePoints(Cam11.P, Cam12.P, pts2d_11.T, pts2d_12.T).T
print("Triangulation 1 ", ((pts4d/pts4d[:, -1].reshape(-1, 1))[:, :-1] - pts3d_11).sum())

pts4d = cv2.triangulatePoints(Cam12.P, Cam13.P, pts2d_12.T, pts2d_13.T).T
print("Triangulation 2 ", ((pts4d/pts4d[:, -1].reshape(-1, 1))[:, :-1] - pts3d_11).sum())

Esempio n. 38
0
    def compute_camera_extrinsic(self, image1, image2):
        # orb = cv2.ORB_create()

        # # get key points and descriptors
        # kp1, des1 = orb.detectAndCompute(image1, None)
        # kp2, des2 = orb.detectAndCompute(image2, None)

        # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        # matches = bf.match(des1,des2)
        # matches = sorted(matches, key = lambda x: x.distance)
        # good = matches[:int(len(matches) * 0.1)]

        # # select apropriate matches
        # pts1 = []
        # pts2 = []
        # for m in matches[:10]:
        #     #if m.distance < self.__DISTANCE:
        #     pts2.append(kp2[m.trainIdx].pt)
        #     pts1.append(kp1[m.queryIdx].pt)
        # pts1 = np.int32(pts1)
        # pts2 = np.int32(pts2)

        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(image1, None)
        kp2, des2 = sift.detectAndCompute(image2, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)

        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        good = good[:100]

        pts1 = np.float64([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        pts2 = np.float64([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        # caluclate fundamential matrix
        F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
        E, mask = cv2.findEssentialMat(pts1,
                                       pts2,
                                       focal=1.0,
                                       pp=(486., 265.),
                                       method=cv2.RANSAC,
                                       prob=0.999,
                                       threshold=1.0)
        points, R, t, mask = cv2.recoverPose(E, pts1, pts2, pp=(486., 265.))
        #points, R, t, mask = cv2.recoverPose(F, pts1, pts2, pp=(486., 265.), mask=mask);
        # E = np.dot(np.dot(self.K.T, F), self.K)
        # points, R, t, mask = cv2.recoverPose(E, pts1, pts2)

        # # SVD
        # U, s, V = np.linalg.svd(E, full_matrices=True)
        # W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
        # rotation = np.dot(np.dot(U, W.T), V.T)
        # translation = U[:, 2]

        self.E = E
        self.F = F
        self.R = R
        self.t = t

        # NOTE: for debug
        # img3 = cv2.drawMatches(image1,kp1,image2,kp2, good, None, flags=2)
        # cv2.imshow("asdasd", img3)

        # homograpy
        M_r = np.hstack((self.R, self.t))
        M_l = np.hstack((np.eye(3, 3), np.zeros((3, 1))))

        P_l = np.dot(self.K, M_l)
        P_r = np.dot(self.K, M_r)
        point_4d_hom = cv2.triangulatePoints(P_l, P_r, pts1, pts2)
        point_4d = point_4d_hom / np.tile(point_4d_hom[-1, :], (4, 1))
        point_3d = point_4d[:3, :].T

        self.triangulation = point_3d

        return E, F, R, t
Esempio n. 39
0
    def camera_cali(self):
        # calibration a single camera
        '''
        for c in xrange(0, 1):
            # 3d points in real world space
            obj_pts = []
            # 2d points in image plane
            img_pts = []
            boards = self.boards[c].chessboards
            corners = self.corners[c].corners_pts
            # check if this image is the reference image
            if c == 0:
                for b in xrange(0, len(boards)):
                    h, w = len(boards[b]), len(boards[b][0])
                    obj_pts_board = np.zeros((h * w, 3), np.float32)
                    img_pts_board = np.zeros((h * w, 2), np.float32)
                    board = boards[b]
                    for i in xrange(0, h):
                        for j in xrange(0, w):
                            obj_pts_board[i * w + j, 0] = j * self.corner_dist
                            obj_pts_board[i * w + j, 1] = i * self.corner_dist
                            img_pts_board[i * w + j, 0] = corners[int(board[i, j])][0]
                            img_pts_board[i * w + j, 1] = corners[int(board[i, j])][1]
                    obj_pts.append(obj_pts_board)
                    img_pts.append(img_pts_board)

            camera_mat = np.zeros((3, 3), np.float32)
            camera_mat[0, 0] = 900.
            camera_mat[0, 2] = len(self.img[c][0]) / 2
            camera_mat[1, 1] = 900.
            camera_mat[1, 2] = len(self.img[c]) / 2
            camera_mat[2, 2] = 1.
            ret, mtx, dist, rv, tv = cv2.calibrateCamera(obj_pts, img_pts,
                                                         (len(self.img[c][0]), len(self.img[c])),
                                                         camera_mat, None, None, None,
                                                         flags=(cv2.CALIB_FIX_ASPECT_RATIO +
                                                                cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_FIX_K3))

        print(mtx, dist)

        '''
        # first camera/image is the reference camera/image
        boards_ref = self.boards[0].chessboards
        corners_ref = self.corners[0].corners_pts
        # only support two-camera calibration
        for c in xrange(1, len(self.boards)):
            match = self.matching[2 * (c - 1)]
            rot = self.matching[2 * (c - 1) + 1]
            print match
            print rot
            obj_pts_ref = []
            img_pts_ref = []
            obj_pts_tar = []
            img_pts_tar = []
            boards_tar = self.boards[c].chessboards
            corners_tar = self.corners[c].corners_pts
            for b in xrange(0, len(match)):
                if match[b] != -1:
                    # reference camera
                    obj_pts_board_ref = []
                    img_pts_board_ref = []
                    board_ref = boards_ref[b]

                    for i in xrange(0, len(board_ref)):
                        for j in xrange(0, len(board_ref[0])):
                            obj_pts_board_ref.append([
                                j * self.corner_dist, i * self.corner_dist, 0
                            ])
                            img_pts_board_ref.append(corners_ref[board_ref[i,
                                                                           j]])

                    # target camera
                    obj_pts_board_tar = []
                    img_pts_board_tar = []
                    board_tar = boards_tar[int(match[b])]

                    for k in xrange(0, int(rot[b])):
                        board_tar = np.flipud(board_tar).T

                    for i in xrange(0, len(board_tar)):
                        for j in xrange(0, len(board_tar[0])):
                            obj_pts_board_tar.append([
                                j * self.corner_dist, i * self.corner_dist, 0
                            ])
                            img_pts_board_tar.append(corners_tar[board_tar[i,
                                                                           j]])

                    obj_pts_ref.append(
                        np.array(obj_pts_board_ref).astype(np.float32))
                    img_pts_ref.append(
                        np.array(img_pts_board_ref).astype(np.float32))
                    obj_pts_tar.append(
                        np.array(obj_pts_board_tar).astype(np.float32))
                    img_pts_tar.append(
                        np.array(img_pts_board_tar).astype(np.float32))
            '''
            # visualize the corners found on image
            for no_board in xrange(0, len(img_pts_ref)):
                I_1 = self.img[0]
                I_2 = self.img[1]
                for i in xrange(0, len(img_pts_ref[no_board])):
                    x = int(math.ceil(img_pts_ref[no_board][i, 0]))
                    y = int(math.ceil(img_pts_ref[no_board][i, 1]))
                    I_1[y-2:y+2, x-2:x+2] = [0, 0, 255]
                cv2.imshow('left', I_1)
                cv2.waitKey(0)
                cv2.destroyAllWindows()
                for i in xrange(0, len(img_pts_tar[no_board])):
                    x = int(math.ceil(img_pts_tar[no_board][i, 0]))
                    y = int(math.ceil(img_pts_tar[no_board][i, 1]))
                    I_2[y-2:y+2, x-2:x+2] = [0, 0, 255]
                cv2.imshow('right', I_2)
                cv2.waitKey(0)
                cv2.destroyAllWindows()
            '''

            mono_ret, mono_mat_1, mono_dist_1, mono_r, mono_t = \
                cv2.calibrateCamera(obj_pts_ref, img_pts_ref, (len(self.img[c][0]), len(self.img[c])), flags=(
            cv2.CALIB_FIX_K3 + cv2.CALIB_FIX_K4 + cv2.CALIB_FIX_K5))

            print 'mono', mono_mat_1, mono_dist_1

            # stereo camera calibration
            ret, cam_mat_1, cam_dist_1, cam_mat_2, cam_dist_2, R, T, E, F = \
                cv2.stereoCalibrate(obj_pts_ref, img_pts_ref, img_pts_tar, (len(self.img[c][0]), len(self.img[c])),
                                    flags=(cv2.CALIB_FIX_K4+cv2.CALIB_FIX_K5))
            rect_rot_1, rect_rot_2, cam_proj_1, cam_proj_2, Q, ROI1, ROI2 = \
                cv2.stereoRectify(cam_mat_1, cam_dist_1, cam_mat_2, cam_dist_2,
                                  (len(self.img[c][0]), len(self.img[c])), R, T,
                                  None, None, None, None, None, flags=(cv2.CALIB_ZERO_DISPARITY),
                                  alpha=0, newImageSize=(1091, 547))
            # DEBUG
            cam_mat_1 = np.array([[6.47816e2, 0.0, 7.71052e2],
                                  [0.0, 6.45650e2, 4.33588e2], [0.0, 0.0,
                                                                1.0]])
            cam_mat_2 = np.array([[6.47793e2, 0.0, 7.49001e2],
                                  [0.0, 6.47026e2, 4.44525e2], [0.0, 0.0,
                                                                1.0]])
            R = np.array([[9.99915e-01, -2.07121e-03, -1.28613e-02],
                          [2.03651e-03, 9.99994e-01, -2.70997e-03],
                          [1.28669e-02, 2.68355e-03, 9.9991e-01]])
            T = np.array([[-6.311307521502e-01], [-3.756969942287e-03],
                          [8.773418730107e-03]])

            self.intrinsic.append(cam_mat_1)
            self.intrinsic.append(cam_mat_2)
            self.distortion.append(cam_dist_1)
            self.distortion.append(cam_dist_2)
            self.rotation.append(R)
            self.translation.append(T)
            self.cam_proj.append(cam_proj_1)
            self.cam_proj.append(cam_proj_2)
            self.rect_rot.append(rect_rot_1)
            self.rect_rot.append(rect_rot_2)

            for b in xrange(0, len(img_pts_ref)):
                # TODO: update corner pixel coordinates to new rectified coordinate and
                # use rectified projection matrix
                cam_proj_1 = np.concatenate((self.intrinsic[0], np.zeros(
                    (3, 1))),
                                            axis=1)
                cam_proj_2 = self.intrinsic[1].dot(
                    np.concatenate((self.rotation[0], self.translation[0]),
                                   axis=1))
                obj_pts_board = cv2.triangulatePoints(
                    cam_proj_1, cam_proj_2,
                    img_pts_ref[b].astype(np.float32).T,
                    img_pts_tar[b].astype(np.float32).T)
                obj_pts_board = obj_pts_board.T
                obj_pts_board = obj_pts_board[:, 0:3] / obj_pts_board[:, 3:4]
                self.boards_pts_set.append(obj_pts_board)

                # visualize the corners found on image
                '''
Esempio n. 40
0
def reconstruct3DPts(imgPtsA_2xn,
                     imgPtsB_2xn,
                     cameraMatrixA,
                     cameraMatrixB,
                     distCoeffsA,
                     distCoeffsB,
                     Tx2CamA,
                     Tx2CamB,
                     calcReprojErr=False):
    """
    Reconstructs points by triangulation.

    Parameters
    ----------
    imgPtsA_2xn : array_like
        Points in image, 2xn.
    imgPtsB_2xn : array_like
        Points in image, 2xn.
    cameraMatrixA : array_like
        Camera matrix / Intrinsic matrix of camera-A, 3x3.
    cameraMatrixB : array_like
        Camera matrix / Intrinsic matrix of camera-B, 3x3.
    distCoeffsA : array_like
        Input vector of distortion coefficients :math:`(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])` \
        of 4, 5, or 8 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
        Distortion coefficients of camera-A.
    distCoeffsB : array_like
        Distortion coefficients of camera-A.
    Tx2CamA : array_like
        Transform matrix from coordinate {x} to camera-A coordinate, 4x4.
    Tx2CamB : array_like
        Transform matrix from coordinate {x} to camera-B coordinate, 4x4.
    calcReprojErr : bool, optional
        Calculate reprojected error or not.(default is False)

    Returns
    -------
    PtsInCoordinateX_3xn : ndarray
        Reconstructed points in {X} coordinate, 3xn.
    ReprojedErrA : float
        If calcReprojErr is True, return reprojected error of camera-A.
        Else, return None.
    ReprojedErrB : float
        Same as ReprojedErrA
    """
    Tx2CamA_4x4 = __toArray(Tx2CamA, copy=False).astype(np.float)
    Tx2CamB_4x4 = __toArray(Tx2CamB, copy=False).astype(np.float)
    CameraMatrixA = __toArray(cameraMatrixA, copy=False)
    CameraMatrixB = __toArray(cameraMatrixB, copy=False)
    ImgPtsA_2xn = __toArray(imgPtsA_2xn, copy=False)
    ImgPtsB_2xn = __toArray(imgPtsB_2xn, copy=False)
    DistCoeffsA = __toArray(distCoeffsA, copy=False)
    DistCoeffsB = __toArray(distCoeffsB, copy=False)
    UnDistortPtsA_2xn, UnDistortRaysA_2xn\
        = unDistortPts(imgPts_2xn=ImgPtsA_2xn, cameraMatrix=CameraMatrixA, distCoeffs=DistCoeffsA)
    UnDistortPtsB_2xn, UnDistortRaysB_2xn\
        = unDistortPts(imgPts_2xn=ImgPtsB_2xn, cameraMatrix=CameraMatrixB, distCoeffs=DistCoeffsB)
    PtsInCoordinateX_4xn = \
        cv2.triangulatePoints(projMatr1=Tx2CamA_4x4[0:3],
                              projMatr2=Tx2CamB_4x4[0:3],
                              projPoints1=UnDistortRaysA_2xn.astype(np.float),
                              projPoints2=UnDistortRaysB_2xn.astype(np.float))
    PtsInCoordinateX_3xn = unHomo(PtsInCoordinateX_4xn)

    if calcReprojErr:
        PtsInCoordinateX_3xn_Norm = PtsInCoordinateX_3xn
        ProjImgPtsA_2xn = \
            projectPtsToImg(pts_3xn=PtsInCoordinateX_3xn_Norm,
                                Tx2Cam=Tx2CamA_4x4, cameraMatrix=CameraMatrixA, distCoeffs=DistCoeffsA)
        RpErrA = np.linalg.norm(UnDistortPtsA_2xn - ProjImgPtsA_2xn,
                                axis=0).mean()

        ProjImgPtsB_2xn = \
            projectPtsToImg(pts_3xn=PtsInCoordinateX_3xn_Norm,
                                Tx2Cam=Tx2CamB_4x4, cameraMatrix=CameraMatrixB, distCoeffs=DistCoeffsB)
        RpErrB = np.linalg.norm(UnDistortPtsB_2xn - ProjImgPtsB_2xn,
                                axis=0).mean()
        return PtsInCoordinateX_3xn, RpErrA, RpErrB
    return PtsInCoordinateX_3xn, None, None
E, mask = cv2.findEssentialMat(pts_l_norm,
                               pts_r_norm,
                               focal=1.0,
                               pp=(0., 0.),
                               method=cv2.RANSAC,
                               prob=0.999,
                               threshold=3.0)
points, R, t, mask = cv2.recoverPose(E, pts_l_norm, pts_r_norm)

M_r = np.hstack((R, t))
M_l = np.hstack((np.eye(3, 3), np.zeros((3, 1))))

P_l = np.dot(K, M_l)
P_r = np.dot(K, M_r)
point_4d_hom = cv2.triangulatePoints(P_l, P_r, np.expand_dims(pts_l, axis=1),
                                     np.expand_dims(pts_r, axis=1))
point_4d = point_4d_hom / np.tile(point_4d_hom[-1, :], (4, 1))
point_3d = point_4d[:3, :].T

x, y, z = point_3d.T

#print("point_3d ",point_3d)

# E, mask = cv2.findEssentialMat(pts_r_norm, pts_l_norm, K, cv2.FM_RANSAC, 0.999, 1.0, None)
# print("E: ", E)
#
# points, R, t, mask = cv2.recoverPose(E, pts2, pts1, K)
# print("R: ", R)
# print("t: ", t)

from mpl_toolkits.mplot3d import Axes3D
Esempio n. 42
0
        fig.canvas.flush_events()

        print("#############################################")
        print('Ground Truth')
        for object in objects:
            print(object)

        print("Detections")

        for camera0, camera1 in itertools.combinations(cameras, 2):
            A = camera1.P @ np.append(camera0.position, 1)
            C = np.array([[0, -A[2], A[1]],
                          [A[2], 0, -A[0]],
                          [-A[1], A[0], 0]])
            F = C @ camera1.P @ np.linalg.pinv(camera0.P)

            for detection0 in camera0.detections:
                for detection1 in camera1.detections:

                    something = np.append(detection1, 1) @ F @ np.append(detection0, 1)
                    if np.abs(something) < 0.0001:
                        # print(f"Matched: {something}")

                        point_3D = cv2.triangulatePoints(camera0.P, camera1.P, detection0, detection1)

                        W = point_3D[3][0]
                        X, Y, Z = point_3D[0][0]/W, point_3D[1][0]/W, point_3D[2][0]/W

                        print(f"X:{X:.2f}, Y:{Y:.2f}, Z:{Z:.2f}")
                    # else:
                    #     print(f"No Match: {something}")
def reconstruct_from_binary_patterns():
    scale_factor = 1.0
    ref_white = cv2.resize(cv2.imread("images/pattern000.jpg", cv2.IMREAD_GRAYSCALE) / 255.0, (0,0), fx=scale_factor,fy=scale_factor)
    ref_black = cv2.resize(cv2.imread("images/pattern001.jpg", cv2.IMREAD_GRAYSCALE) / 255.0, (0,0), fx=scale_factor,fy=scale_factor)
    ref_avg   = (ref_white + ref_black) / 2.0
    ref_on    = ref_avg + 0.05 # a threshold for ON pixels
    ref_off   = ref_avg - 0.05 # add a small buffer region
   
    h,w = ref_white.shape
    img_bonus= cv2.imread("images/pattern001.jpg")
    # mask of pixels where there is projection
    proj_mask = (ref_white > (ref_black + 0.05))

    scan_bits = np.zeros((h,w), dtype=np.uint16)
    corres_Image=np.zeros((h,w,3),np.float32)
    # analyze the binary patterns from the camera
    for i in range(0,15):
        # read the file
        patt = cv2.resize(cv2.imread("images/pattern%03d.jpg"%(i+2)), (0,0), fx=scale_factor,fy=scale_factor)
        #converting image to gray scale
        patt_gray = cv2.resize(cv2.imread("images/pattern%03d.jpg"%(i+2),cv2.IMREAD_GRAYSCALE)/255.0, (0,0), fx=scale_factor,fy=scale_factor)        
        height,width=patt_gray.shape
        
        # mask where the pixels are ON
        on_mask = (patt_gray > ref_on) & proj_mask
        
        
        # this code corresponds with the binary pattern code
        bit_code = np.uint16(1 << i)
        
        # populate scan_bits by putting the bit_code according to on_mask
        for j in range(0, h):
          for k in range(0, w):
            if on_mask[j,k]==True:              
              scan_bits[j, k] += bit_code
 

        # TODO: populate scan_bits by putting the bit_code according to on_mask

    print("load codebook")
    # the codebook translates from <binary code> to (x,y) in projector screen space
    with open("binary_codes_ids_codebook.pckl","r") as f:
        binary_codes_ids_codebook = pickle.load(f)
    colorMatrix=[]
    camera_points = []
    projector_points = []
    for x in range(w):
        for y in range(h):
            if not proj_mask[y,x]:
                continue # no projection here
            if scan_bits[y,x] not in binary_codes_ids_codebook:
                continue # bad binary code
            (p_x,p_y)= binary_codes_ids_codebook[scan_bits[y,x]]   
            if p_x >= 1279 or p_y >= 799:
              continue
            else:
              projector_points.append((p_x,p_y))
              camera_points.append((x/2,y/2))
              corres_Image[y,x]=[0,p_y,p_x]
              blue,green,red=img_bonus[y,x,:]
              colorMatrix.append((red,green,blue))

    colorMatrix=np.array(colorMatrix,dtype=np.float32)  
    colorMatrix=colorMatrix.reshape(-1,1,3)
    b,g,r=cv2.split(corres_Image)
    norm_img_b = np.zeros((h,w),np.float32)

    norm_img_g = np.zeros((h,w),np.float32)
    cv2.normalize(g,norm_img_g, 0,255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
    norm_img_r = np.zeros((h,w),np.float32)

    cv2.normalize(r,norm_img_r, 0,255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)

    corres_Image_norm = cv2.merge((norm_img_b,norm_img_g,norm_img_r))
    cv2.imwrite(sys.argv[1] + 'correspondence.jpg',corres_Image_norm)
     

  
    # TODO: use binary_codes_ids_codebook[...] and scan_bits[y,x] to
    # TODO: find for the camera (x,y) the projector (p_x, p_y).
    # TODO: store your points in camera_points and projector_points

    # IMPORTANT!!! : due to differences in calibration and acquisition - divide the camera points by 2
    
    # now that we have 2D-2D correspondances, we can triangulate 3D points!

    # load the prepared stereo calibration between projector and camera
    with open("stereo_calibration.pckl","r") as f:
        d = pickle.load(f)
        camera_K    = d['camera_K']
        camera_d    = d['camera_d']
        projector_K = d['projector_K']
        projector_d = d['projector_d']
        projector_R = d['projector_R']
        projector_t = d['projector_t']
        
        camera_points = np.array(camera_points,dtype=np.float32)           
        projector_points = np.array(projector_points,dtype=np.float32)
        camera_points=camera_points.reshape(-1,1,2)
       
        projector_points=projector_points.reshape(-1,1,2)
        
        undistorted_camera=cv2.undistortPoints(camera_points,camera_K,camera_d)        
        undistorted_projector=cv2.undistortPoints(projector_points,projector_K,projector_d)
        
        P0 = np.float64([ [1,0,0,0],
                    [0,1,0,0],
                    [0,0,1,0]   ])
             
        P1 = np.hstack((projector_R,projector_t)) 
        res=cv2.triangulatePoints(P0,P1,undistorted_camera,undistorted_projector)
        
        points_3d = cv2.convertPointsFromHomogeneous(res.T)
        
        
        
        points=[]
        colorPoints=[]
        for i in range(len(points_3d)):          
          if (int(points_3d[i][0][2])>200) and (int(points_3d[i][0][2])<1400):            
            points.append([points_3d[i][0]])
            
            colorPoints.append(np.concatenate(([points_3d[i][0]],colorMatrix[i]),axis =1))
        
        points = np.array(points,dtype=np.float32)
        colorPoints= np.array(colorPoints,dtype=np.float32)
        write_3d_points_bonus(colorPoints)
        
        points_3d=points
        
        # TODO: use cv2.undistortPoints to get normalized points for camera, use camera_K and camera_d
        # TODO: use cv2.undistortPoints to get normalized points for projector, use projector_K and projector_d

        # TODO: use cv2.triangulatePoints to triangulate the normalized points
        # TODO: use cv2.convertPointsFromHomogeneous to get real 3D points
  
	# TODO: name the resulted 3D points as "points_3d"
	

	return points_3d
Esempio n. 44
0
def triangulate(matches_direct, cam_dict):
    IK = np.linalg.inv( proj.cam.get_K() )

    match_pairs = proj.generate_match_pairs(matches_direct)

    # zero the match NED coordinate and initialize the corresponding
    # count array
    counters = []
    for match in matches_direct:
        match[0] = np.array( [0.0, 0.0, 0.0] )
        counters.append( 0)
        
    for i, i1 in enumerate(proj.image_list):
        #rvec1, tvec1 = i1.get_proj()
        rvec1 = cam_dict[i1.name]['rvec']
        tvec1 = cam_dict[i1.name]['tvec']
        R1, jac = cv2.Rodrigues(rvec1)
        PROJ1 = np.concatenate((R1, tvec1), axis=1)
        for j, i2 in enumerate(proj.image_list):
            matches = match_pairs[i][j]
            if (j <= i) or (len(matches) == 0):
                continue
            # distance between two cameras
            ned1 = np.array(cam_dict[i1.name]['ned'])
            ned2 = np.array(cam_dict[i2.name]['ned'])
            dist = np.linalg.norm(ned2 - ned1)
            if dist < 40:
                # idea: the closer together two poses are, the greater
                # the triangulation error will be relative to small
                # attitude errors.  If we only compare more distance
                # camera views the solver will be more stable.
                continue
            #rvec2, tvec2 = i2.get_proj()
            rvec2 = cam_dict[i2.name]['rvec']
            tvec2 = cam_dict[i2.name]['tvec']
            R2, jac = cv2.Rodrigues(rvec2)
            PROJ2 = np.concatenate((R2, tvec2), axis=1)

            uv1 = []; uv2 = []; indices = []
            for pair in matches:
                p1 = i1.kp_list[ pair[0] ].pt
                p2 = i2.kp_list[ pair[1] ].pt
                uv1.append( [p1[0], p1[1], 1.0] )
                uv2.append( [p2[0], p2[1], 1.0] )
                # pair[2] is the index back into the matches_direct structure
                indices.append( pair[2] )
            pts1 = IK.dot(np.array(uv1).T)
            pts2 = IK.dot(np.array(uv2).T)
            points = cv2.triangulatePoints(PROJ1, PROJ2, pts1[:2], pts2[:2])
            points /= points[3]
            #print "points:\n", points[0:3].T

            # fixme: need to update result, sum_dict is no longer used
            print "%s vs %s" % (i1.name, i2.name)
            for k, p in enumerate(points[0:3].T):
                match = matches_direct[indices[k]]
                match[0] += p
                counters[indices[k]] += 1

    # divide each NED coordinate (sum of triangulated point locations)
    # of matches_direct_dict by the count of references to produce an
    # average NED coordinate for each match.
    for i, match in enumerate(matches_direct):
        if counters[i] > 0:
            match[0] /= counters[i]
        else:
            print 'invalid match from images too close to each other:', match
            for j in range(1, len(match)):
                match[j] = [-1, -1]

    # return the new match structure
    return matches_direct
def triangulatePoints(P1, P2, x1, x2):
    X = cv2.triangulatePoints(P1[:3], P2[:3], x1, x2)
    return X / X[3]
Esempio n. 46
0
                                -4.351550728359129e+02, 2.874442853045401e+03
                            ],
                            [
                                0.612372435695795, 0.353553390593274,
                                -0.707106781186548, 8.337422895729672
                            ]])

        #izquierda=np.array([[0.500000000000000, -0.866025403784439, -3.925231146709437e-17,0.541154273188011],
        #[-0.612372435695794,-0.353553390593274,-0.707106781186548,2.024229847790223],
        #[0.612372435695795,0.353553390593274, -0.707106781186548,8.440950513770680]])

        #derecha=np.array([[0.500000000000000,-0.866025403784439,-3.925231146709437e-17,-0.005255888325765],
        #[-0.612372435695794,-0.353553390593274,-0.707106781186548,2.127757465831231],
        #[0.612372435695795,0.353553390593274,-0.707106781186548,8.337422895729672]])

        fernando = cv.triangulatePoints(izquierda, derecha, x1_fer.T, x2_fer.T)

        # however, homgeneous point is returned
        fernando = fernando / fernando[3]
        print('Projected point from openCV:', fernando.T)
        print(sistema_real_l)
        print(prueba_l)
        print(prueba_l[0][0][0])
        #print(prueba_l[1][0])
        ##almacenamiento de pixeles de la izuqierda
        u_l.append(sistema_real_l[0, 0])
        v_l.append(sistema_real_l[1, 0])
        U_l = np.array(u_l)
        V_l = np.array(v_l)
        #almacenamiento de pixeles camara derecha
        u_r.append(sistema_real_r[0, 0])
Esempio n. 47
0
    def initial_estimation(self):
        for i in range(len(self.img_pose) - 1):
            for h in range(i + 1, len(self.img_pose)):
                src = np.array([], dtype=np.float).reshape(0, 2)
                dst = np.array([], dtype=np.float).reshape(0, 2)
                kp_src_idx = []
                kp_dst_idx = []

                for k in self.img_pose[i].kp_matches:
                    if self.img_pose[i].kp_match_exist(k, h):
                        k = int(k)

                        match_idx = self.img_pose[i].kp_match_idx(k, h)
                        match_idx = int(match_idx)

                        src = np.vstack((src, self.img_pose[i].kp[k]))
                        dst = np.vstack((dst, self.img_pose[h].kp[match_idx]))
                        kp_dst_idx.append(match_idx)
                        kp_src_idx.append(k)

                if src.shape[0] < 6:
                    print(
                        "Not enough points to generate essential matrix for image_",
                        i, " and image_", h)
                    continue

                src = np.expand_dims(src, axis=1)
                dst = np.expand_dims(dst, axis=1)
                E, mask = cv2.findEssentialMat(dst,
                                               src,
                                               cameraMatrix=self.calibration,
                                               method=cv2.LMEDS,
                                               prob=0.999)
                # E, mask = cv2.findEssentialMat(dst,src,cameraMatrix = self.calibration,method =cv2.RANSAC,prob=0.999, threshold=1)

                _, local_R, local_t, mask = cv2.recoverPose(
                    E, dst, src, cameraMatrix=self.calibration, mask=mask)

                T = Pose3(Rot3(local_R),
                          Point3(local_t[0], local_t[1], local_t[2]))

                self.img_pose[h].T = transform_from(T, self.img_pose[i].T)
                cur_R = self.img_pose[h].T.rotation().matrix()
                cur_t = self.img_pose[h].T.translation().vector()
                cur_t = np.expand_dims(cur_t, axis=1)
                self.img_pose[h].P = np.dot(
                    self.calibration,
                    np.hstack((cur_R.T, -np.dot(cur_R.T, cur_t))))

                points4D = cv2.triangulatePoints(projMatr1=self.img_pose[i].P,
                                                 projMatr2=self.img_pose[h].P,
                                                 projPoints1=src,
                                                 projPoints2=dst)
                points4D = points4D / np.tile(points4D[-1, :], (4, 1))
                pt3d = points4D[:3, :].T

                # Find good triangulated points
                for j, k in enumerate(kp_src_idx):
                    if (mask[j]):
                        match_idx = self.img_pose[i].kp_match_idx(k, h)

                        if (self.img_pose[i].kp_3d_exist(k)):
                            self.img_pose[h].kp_landmark[
                                match_idx] = self.img_pose[i].kp_3d(k)
                            self.landmark[self.img_pose[i].kp_3d(
                                k)].point += pt3d[j]
                            self.landmark[self.img_pose[h].kp_3d(
                                match_idx)].seen += 1

                        else:
                            new_landmark = LandMark(pt3d[j], 2)
                            self.landmark.append(new_landmark)

                            self.img_pose[i].kp_landmark[k] = len(
                                self.landmark) - 1
                            self.img_pose[h].kp_landmark[match_idx] = len(
                                self.landmark) - 1

            for j in range(len(self.landmark)):
                if (self.landmark[j].seen >= 3):
                    self.landmark[j].point /= (self.landmark[j].seen - 1)
img2_n1 = cv.drawKeypoints(dst2, kp1_n, None, color=(0, 255, 0), flags=0)
img2_n2 = cv.drawKeypoints(dst2, kp2_n, None, color=(0, 255, 0), flags=0)

cv.imshow('Feature Matching with good points 1', img2_n1)  #,plt.show()
cv.imwrite(
    '../../output/task_3/Feature Match with good points 1 - ' + str(i) +
    '.png', img2_n1)
cv.imshow('Feature Matching with good points 2', img2_n2)  #,plt.show()
cv.imwrite(
    '../../output/task_3/Feature Match with good points 2 - ' + str(i) +
    '.png', img2_n2)

#cv.imwrite('../../output/task_3/Feature Matching with reduced points '+ str(i) + '.png', img3)#, plt.show()
cv.waitKey(1000)

triangulate = cv.triangulatePoints(proj1, proj2, l_kp1, l_kp2)

triangulate = np.array(triangulate)
print(triangulate)

cv.destroyAllWindows()

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
ax.scatter(triangulate[0] / triangulate[3], triangulate[1] / triangulate[3],
           triangulate[2] / triangulate[3])
Esempio n. 49
0
    def triangulate(self,data):
        # rospy.logwarn("triangulating: sidelen = "+str(len(self.fishuvside))+" toplen: "+str(len(self.fishuvtop)))
        if((len(self.fishuvside)>0) and (len(self.fishuvtop)>0)):
            if (1):#( (self.currentsidestamp != self.lastsidestamp) and (self.currenttopstamp!=self.lasttopstamp)  ):

                self.lasttopstamp = self.currenttopstamp
                self.lastsidestamp = self.currentsidestamp
                #print "triangulating!"
                P1 = array([self.CItop.P]).reshape(3,4)
                P2 = array([self.CIside.P]).reshape(3,4)
                x1 = vstack(self.fishuvtop)
                x1 = vstack((x1,1))
                x2 = vstack(self.fishuvside)
                x2 = vstack((x2,1))
                #use the camera information and the current guess for the u,v
                #to triangulate the position of the ball.
                #print x1,x2
                #print P1,P2
                rospy.logwarn("broadcasting measured fish!")
                try:
                    fishpos = cv2.triangulatePoints(P1, P2, self.fishuvtop.astype(float), self.fishuvside.astype(float))
                    fishpos/=fishpos[3]
                    #print ballpos

                    #send transforms to show ball's coordinate system
                    br = tf.TransformBroadcaster()
                    fishquat = tf.transformations.quaternion_from_euler(0,0,self.angle)
                    #I think the ball position needs to be inverted... why? Not sure but I think
                    #it may be because there is a confusion in the T matrix between camera->object vs. object->camera
                    br.sendTransform(-fishpos[:,0],fishquat,rospy.Time.now(),'/fishmeasured','world')

                    #create a marker
                    fishmarker = Marker()
                    fishmarker.header.frame_id='/fishmeasured'
                    fishmarker.header.stamp = rospy.Time.now()
                    fishmarker.type = fishmarker.SPHERE
                    fishmarker.action = fishmarker.MODIFY
                    fishmarker.scale.x = .12
                    fishmarker.scale.y = .12
                    fishmarker.scale.z = .12
                    fishmarker.pose.orientation.w=1
                    fishmarker.pose.orientation.x = 0
                    fishmarker.pose.orientation.y=0
                    fishmarker.pose.orientation.z=0
                    fishmarker.pose.position.x = 0
                    fishmarker.pose.position.y=0
                    fishmarker.pose.position.z=0
                    fishmarker.color.r=0.0
                    fishmarker.color.g=0
                    fishmarker.color.b=1.0
                    fishmarker.color.a=0.5

                    self.markerpub.publish(fishmarker)

                    posemsg = PoseStamped()
                    posemsg.header.stamp = rospy.Time.now()
                    posemsg.pose.position.x = 0.-fishpos[0,0]
                    posemsg.pose.position.y = 0.-fishpos[1,0]
                    posemsg.pose.position.z = 0.-fishpos[2,0]
                    self.posepub.publish(posemsg)
                except:
                    rospy.logwarn("triangulation failed")
Esempio n. 50
0
def triangulate_points(projMatr1, projMatr2, projPoints1, projPoints2):
    points4D = cv2.triangulatePoints(projMatr1, projMatr2, projPoints1,
                                     projPoints2)
    return cv2.convertPointsFromHomogeneous(points4D.T)
    # # The method shown in class, using wPts from E:
    # wPts_estimated = cv.convertPointsFromHomogeneous(np.transpose(wPts))
    # repkp1 = cv.projectPoints(wPts_estimated, (0, 0, 0), (0, 0, 0), new_kmat, dist)
    # repkp2 = cv.projectPoints(wPts_estimated, rotVec[0], trans, new_kmat, dist)
    #
    # dx1 = np.squeeze(repkp1[0][:, :, 0] - new_kp1[:, :, 0])
    # dx2 = np.squeeze(repkp2[0][:, :, 0] - new_kp2[:, :, 0])
    # dy1 = np.squeeze(repkp1[0][:, :, 1] - new_kp1[:, :, 1])
    # dy2 = np.squeeze(repkp2[0][:, :, 1] - new_kp2[:, :, 1])
    # error1_x = np.sum(np.abs(dx1) / npts)
    # error2_x = np.sum(np.abs(dx2) / npts)
    # error1_y = np.sum(np.abs(dy1) / npts)
    # error2_y = np.sum(np.abs(dy2) / npts)

    # # Triangulation Method;
    wPts_tri = cv.triangulatePoints(P1, P2, new_kp1, new_kp2)
    wPts_estimated = cv.convertPointsFromHomogeneous(np.transpose(wPts_tri))
    repkp1 = cv.projectPoints(wPts_estimated, (0, 0, 0), (0, 0, 0), new_kmat,
                              dist)
    repkp2 = cv.projectPoints(wPts_estimated, rotVec[0], trans, new_kmat, dist)

    dx1 = np.squeeze(repkp1[0][:, :, 0] - new_kp1[:, :, 0])
    dx2 = np.squeeze(repkp2[0][:, :, 0] - new_kp2[:, :, 0])
    dy1 = np.squeeze(repkp1[0][:, :, 1] - new_kp1[:, :, 1])
    dy2 = np.squeeze(repkp2[0][:, :, 1] - new_kp2[:, :, 1])
    error1_x = np.sum(np.abs(dx1) / npts)
    error2_x = np.sum(np.abs(dx2) / npts)
    error1_y = np.sum(np.abs(dy1) / npts)
    error2_y = np.sum(np.abs(dy2) / npts)

    # # The reprojection errors of two wPt estimation methods are almost the same, I believe picking either one is okay
Esempio n. 52
0
def triangulate(pose1, pose2, pts1, pts2):
    return cv2.triangulatePoints(pose1[:3], pose2[:3], pts1.T, pts2.T).T
Esempio n. 53
0
def relative_camera_pose(img1_, img2_, K, dist):

    print("\n\nRelativeCamPose:\n")

    img1 = undistort(img1_, 'left_undistort.jpg', K, dist)
    img2 = undistort(img2_, 'right_undistort.jpg', K, dist)

    kp1, des1, kp2, des2 = create_feature_points(img1, img2, K, dist)
    pts1, pts2, F = match_feature_points(kp1, des1, kp2, des2)

    draw_image_epipolar_lines(img1, img2, pts1, pts2, F)

    # Compute the essential matrix E
    # Help: https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga13f7e34de8fa516a686a56af1196247f
    E, _ = cv.findEssentialMat(pts1, pts2, K)
    print("\nEssential Matrix, E:\n", E)

    # Decompose the essential matrix into R, r
    R1, R2, t = cv.decomposeEssentialMat(E)

    # Re-projected feature points on the first image
    # http://answers.opencv.org/question/173969/how-to-give-input-parameters-to-triangulatepoints-in-python/
    print("K:\n", K)
    print("dist: \n", dist)

    print("R1:\n", R1)
    print("R2:\n", R2)
    print("t\n", t)

    R2_t = np.hstack((R1, (-t)))
    print("R2_t:\n", R2_t)

    zero_vector = np.array([[0, 0, 0]])
    zero_vector = np.transpose(zero_vector)

    # Create projection matrices P1 and P2
    P1 = np.hstack((K, zero_vector))
    P2 = np.dot(K, R2_t)

    print("P1:\n", P1)
    print("P2:\n", P2)

    pts1 = pts1.astype(np.float)
    pts2 = pts2.astype(np.float)

    pts1 = np.transpose(pts1)
    pts2 = np.transpose(pts2)

    points4D = cv.triangulatePoints(P1, P2, pts1, pts2)
    aug_points3D = points4D / points4D[3]
    print("\nAugmented points3D:\n", aug_points3D)

    projectedPoints = np.dot(P1, aug_points3D)

    projectedPoints = projectedPoints / projectedPoints[2]
    print("\nNormalized ProjectedPoints:\n", projectedPoints)
    points2D = projectedPoints[:2]
    print("\n2D Points:\n", points2D)

    #Re-projection of points
    plt.imshow(img1, cmap="gray")

    # Project points
    plt.scatter(points2D[0], points2D[1], c='b', s=40, alpha=0.5)
    plt.scatter(pts1[0], pts1[1], c='g', s=15, alpha=1)
    #plt.show()

    # Begin part 4
    min_depth = min(aug_points3D[2])
    max_depth = max(aug_points3D[2])
    print("\n\nMin depth:\n", min_depth)
    print("Max depth:\n\n", max_depth)

    N = (max_depth - min_depth) / 20
    print("N=20:\n", N)

    equispaced_dist = np.linspace(min_depth, max_depth, num=20)
    print("Equispaced distance:\n", equispaced_dist)

    projectPoints = np.dot(P1, aug_points3D)
    print("projectPoints:\n", projectPoints)

    points2D = projectPoints[:2]
    print("\n2DPoints:\n", points2D)

    # Calculate the depth of the matching points:
    # http://answers.opencv.org/question/117141/triangulate-3d-points-from-a-stereo-camera-and-chessboard/
    # https://stackoverflow.com/questions/22334023/how-to-calculate-3d-object-points-from-2d-image-points-using-stereo-triangulatio/22335825

    homography = []
    output_warp = []

    for i in range(0, 20):
        nd_vector = np.array([0, 0, -1, equispaced_dist[i]])

        P1_aug = np.vstack((P1, nd_vector))
        P2_aug = np.vstack((P2, nd_vector))

        #print("P1_aug:\n",P1_aug)
        #print("P2_aug:\n",P2_aug)

        P2_inv = np.linalg.inv(P2_aug)
        #print("P2_inv:\n", P2_inv)

        P1P2_inv = np.dot(P1_aug, P2_inv)
        #print("P1P2_inv:\n",P1P2_inv)

        R = P1P2_inv[:3, :3]

        homography.append(R)
        #print("\n\nhomography" + str(i))
        #print(homography[i])

        output_warp.append(cv.warpPerspective(img2, homography[i], None))
        cv.imwrite('Warped_output_' + str(i) + '.jpg', output_warp[i])

    diff = []
    abs_img = []
    blur = []
    ind = []
    depth_img = []

    blur_2D = []

    for i in range(0, 20):
        diff.append(cv.absdiff(img1, output_warp[i]))
        cv.imwrite('Absolute_diff_' + str(i) + '.jpg', diff[i])

        blur.append(cv.blur(diff[i], (15, 15)))
        cv.imwrite('Block_filter_' + str(i) + '.jpg', blur[i])

        #print("Blur:\n", blur[i])

        blur_2D.append(np.ravel(blur[i]))

    big_mat = np.array(blur_2D)

    for pixel in range(len(big_mat[0])):

        index = np.argmin(big_mat[:, pixel])
        depth_img.append(
            round(240 * equispaced_dist[index] / max(equispaced_dist)))

    depth_fin = np.array(depth_img)
    reshape_depth_img = depth_fin.reshape(img1.shape)
    img = Image.fromarray(reshape_depth_img)
    #cv.imwrite("depth_image.jpg", img)
    img.show()

    return R1, R2, t
Esempio n. 54
0
def get_coordinates(frames, detector, cam_properties, colored_frames):
    if len(frames) != 2:
        return None

    #detection left and right
    dL = detector.detect(frames[0])[0]
    dR = detector.detect(frames[1])[0]

    x_left, y_left = dL.center
    x_right, y_right = dR.center
    #print dL.corners[0]
    #print dR.corners
    #print "x_left %f x_right %f b %f f %f" % (x_left, x_right, cam_properties.b, cam_properties.f)
    #print "y_left %f y_right %f b %f f %f" % (y_left, y_right, cam_properties.b, cam_properties.f)

    colored_frames[0] = cv2.circle(colored_frames[0],
                                   (int(x_left), int(y_left)), 15, (0, 0, 255),
                                   -1)
    colored_frames[1] = cv2.circle(colored_frames[1],
                                   (int(x_right), int(y_right)), 15,
                                   (255, 0, 0), -1)
    disparity = abs(x_left - x_right)
    proj_left = np.array([[531.152586, 0.000000, 337.167439, 0.000000],
                          [0.000000, 531.152586, 247.265795, 0.000000],
                          [0.000000, 0.000000, 1.000000, 0.000000]])
    proj_right = np.array([[531.152586, 0.000000, 337.167439, 56.499615],
                           [0.000000, 531.152586, 247.265795, 0.000000],
                           [0.000000, 0.000000, 1.000000, 0.000000]])
    #print np.array([[x_left ], [y_left]]), np.array([[x_right], [y_right]])
    #points4D = cv2.triangulatePoints(proj_left, proj_right, np.array([[x_left], [y_left]]), np.array([[x_right], [y_right]]))
    #print points4D
    pL = np.array([[
        x_left, dL.corners[0][0], dL.corners[1][0], dL.corners[2][0],
        dL.corners[3][0]
    ],
                   [
                       y_left, dL.corners[0][1], dL.corners[1][1],
                       dL.corners[2][1], dL.corners[3][1]
                   ]])
    pR = np.array([[
        x_right, dR.corners[0][0], dR.corners[1][0], dR.corners[2][0],
        dR.corners[3][0]
    ],
                   [
                       y_right, dR.corners[0][1], dR.corners[1][1],
                       dR.corners[2][1], dR.corners[3][1]
                   ]])
    # points is in order of C, BL, BR, TR, TL
    points = cv2.triangulatePoints(proj_left, proj_right, pL, pR)
    #print points
    factor = 1.053900  #Factor used to ensure that at the 100 cm that its' around that area
    for i in range(5):
        points[0][i] /= points[3][i] * factor
        points[1][i] /= points[3][i] * factor
        points[2][i] /= points[3][i] * factor
        points[3][i] /= points[3][i]
    #convert it to 3D point array
    p3D = [[points[0][0], points[1][0], points[2][0]],
           [points[0][1], points[1][1], points[2][1]],
           [points[0][2], points[1][2], points[2][2]],
           [points[0][3], points[1][3], points[2][3]],
           [points[0][4], points[1][4], points[2][4]]]
    #x=points4D[0]/points4D[3]
    #y=points4D[1]/points4D[3]
    #z=points4D[2]/points4D[3]
    #print p3D

    #x=p3D[0][0]
    #y=p3D[0][1]
    #z=p3D[0][2]

    #How "straight" is the april tag?
    cvecBL = [
        p3D[0][0] - p3D[1][0], p3D[0][1] - p3D[1][1], p3D[0][2] - p3D[1][2]
    ]
    cvecBR = [
        p3D[0][0] - p3D[2][0], p3D[0][1] - p3D[2][1], p3D[0][2] - p3D[2][2]
    ]
    cvecTR = [
        p3D[0][0] - p3D[3][0], p3D[0][1] - p3D[3][1], p3D[0][2] - p3D[3][2]
    ]
    cvecTL = [
        p3D[0][0] - p3D[4][0], p3D[0][1] - p3D[4][1], p3D[0][2] - p3D[4][2]
    ]

    magBL = math.sqrt(cvecBL[0] * cvecBL[0] + cvecBL[1] * cvecBL[1] +
                      cvecBL[2] * cvecBL[2])
    magBR = math.sqrt(cvecBR[0] * cvecBR[0] + cvecBR[1] * cvecBR[1] +
                      cvecBR[2] * cvecBR[2])
    magTR = math.sqrt(cvecTR[0] * cvecTR[0] + cvecTR[1] * cvecTR[1] +
                      cvecTR[2] * cvecTR[2])
    magTL = math.sqrt(cvecTL[0] * cvecTL[0] + cvecTL[1] * cvecTL[1] +
                      cvecTL[2] * cvecTL[2])

    dot1 = (cvecBL[0] * cvecTR[0] + cvecBL[1] * cvecTR[1] +
            cvecBL[2] * cvecTR[2])
    dot2 = (cvecBR[0] * cvecTL[0] + cvecBR[1] * cvecTL[1] +
            cvecBR[2] * cvecTL[2])

    cos1 = dot1 / magBL / magTR
    cos2 = dot2 / magBR / magTL
    #ideally the cos should be -1
    #its actually pretty good :O ~98% range
    #print "%f %f" % (cos1,cos2)

    magAvg = (magBL + magBR + magTR + magTL) / 4
    #ideally the magnitudes should be identical
    #eh good enough ~0.8-1.2 range
    #print "%f %f %f %f" % (magBL/magAvg,magBR/magAvg,magTR/magAvg,magTL/magAvg)

    #avg of values
    avgCenter = [0.0, 0.0, 0.0]
    for i in range(0, 5):
        avgCenter[0] += p3D[i][0]
        avgCenter[1] += p3D[i][1]
        avgCenter[2] += p3D[i][2]

    avgCenter[0] /= 5
    avgCenter[1] /= 5
    avgCenter[2] /= 5
    inches = 39.3701

    #ideally identical
    #steady-low offset ~0.01%-0.05%
    #moving-high offset ~0.10-0.15%
    #print "Center", p3D[0], "Average",avgCenter
    #print "avg %f %f %f" %(p3D[0][0]/avgCenter[0],p3D[0][1]/avgCenter[1],p3D[0][2]/avgCenter[2])
    print "avg %f %f %f" % (avgCenter[0], avgCenter[1], avgCenter[2])
    print "Jin :avg correct units: %f %f %f" % (
        avgCenter[0] * inches, avgCenter[1] * inches, avgCenter[2] * inches)
    #distance=math.sqrt(x*x+y*y+z*z)
    #print "%f %f %f" % (x, y, z)
    #print "distance?: %f %f" % (z, distance)
    ##global average
    ##average+=avgCenter[2]
    ##print " .-. "
    ##global counter
    ##counter+=1
    ##print "Average: %f" % (average/counter)
    #depth = 1
    focal = 440  #Approximate focal length
    #focal = depth * disparity / cam_properties.b
    #depth = focal * cam_properties.b / disparity
    print "depth: %f, disparity: %f, focal: %f" % (depth, disparity, focal)
    x_left = abs(960 - x_left)
    x_right = abs(960 - x_right)

    z = (cam_properties.b * cam_properties.f) / (abs(x_left - x_right))
    x = (z / cam_properties.f) * ((x_left + x_right) / 2)
    return (x, z)
Esempio n. 55
0
 raw_input()
 #find translation vector and rotation matrix from E using svd, V is V'
 U,W,Vt = np.linalg.svd(E)
 print 'len of Vt: ',len(Vt),len(Vt[0])
 raw_input()
 print 'W:diag(110):\n'
 print W[0],W[1]
 raw_input()
 W = np.matrix('0, -1, 0; 1, 0, 0; 0, 0, 1')
 R = np.dot(np.dot(U,W),Vt) # 2 solns here - UW'V'
 t = U[:,2] # last column
 print 'Camera matrix\n'
 print K
 raw_input()
 print R
 raw_input()
 print t
 raw_input()
 #Projection matrices - P1 at origin and P2 from (R,t)
 P1 = np.array([ [ 1,0,0,0],[0,1,0,0],[0,0,1,0] ])
 P2 = np.hstack((R,t.reshape(3,1)))
 #print 'P2:\n',P2
 #raw_input()
 print len(points1),len(points1[0])
 raw_input()
 points1t = np.array(zip(*points1))
 points2t = np.array(zip(*points2))
 #print points1t
 res = cv2.triangulatePoints(P1,P2,points1t[:2],points2t[:2])
 print res
 
Esempio n. 56
0
    def run(self, img):
        """Perform one step

        Args:
            img (): ...
        """
        if self.ref_img is None:
            self.ref_img = img
            self.kp_ref = self.feature_detector.detect(img, None)
            self.kp_ref, self.kp_ref_des = self.feature_detector.compute(
                img, self.kp_ref)
            return

        #t = time()
        kp_ref_pt, kp_cur_pt, cur_description = self._find_matches(img)
        #print(kp_ref_pt.shape, kp_cur_pt.shape, cur_description.shape)
        #print('Finding matches on imgs: ', time() - t)

        #t = time()
        if self.state_estimator.points_number > 0:
            camera_direction = np.matmul(self.state_estimator.rotation_matrix,
                                         self.z_versor)
            #print(camera_direction)
            #print(self.state_estimator.points_viewing_versor)
            point_angle = np.array([
                np.dot(camera_direction[:, 0], point_versor)
                for point_versor in self.state_estimator.points_viewing_versor
            ])
            #print(point_angle)
            correct_angle = point_angle > np.cos(np.deg2rad(60))
            #print(self.state_estimator.points[correct_angle].shape)
            #print(self.state_estimator.position.shape)
            camera_point_direction = (self.state_estimator.points[correct_angle]-self.state_estimator.position.T) \
                / np.expand_dims(np.linalg.norm(self.state_estimator.points[correct_angle]-self.state_estimator.position.T, axis=1), axis=1)
            #print(camera_point_direction)
            camera_point_angle = np.array([
                np.dot(camera_direction[:, 0], cp_dir)
                for cp_dir in camera_point_direction
            ])
            in_front_of_camera = camera_point_angle > 0
            correct_angle_indicies = np.argwhere(correct_angle)[:, 0]
            visible_points_indicies = correct_angle_indicies[
                in_front_of_camera]

            #print(self.state_estimator.points_descriptor[visible_points_indicies])
            #print(cur_description)
            matches = self.bf.match(
                self.state_estimator.
                points_descriptor[visible_points_indicies], cur_description)
            matches_bt = [
                m for m in matches if m.distance < self.match_treshold
            ]
            old_points_indicies = [
                visible_points_indicies[m.queryIdx] for m in matches_bt
            ]
            old_points_descriptors = self.state_estimator.points[
                old_points_indicies]
            #print(old_points_indicies)
            cur_points_old_indicies = [m.trainIdx for m in matches_bt]
            cur_points_new_indicies = [
                i for i in range(len(kp_cur_pt))
                if i not in cur_points_old_indicies
            ]
        else:
            old_points = np.zeros((0, 3))
            old_points_descriptors = np.zeros((0, 32))
            old_points_indicies = np.array([], dtype=np.int)
            cur_points_new_indicies = range(len(kp_cur_pt))
            cur_points_old_indicies = []
        #print('Finding matches in map: ', time() - t)
        # Calculation of homography and fundamental could be parallelized
        #homography_matrix, _ = self._find_homography(kp_ref_pt, kp_cur_pt)
        points_in_map = []

        #t = time()
        # For now use only triangulation
        if True:
            extrinsic_params = self._find_extrinsic_triangulation(
                kp_ref_pt, kp_cur_pt)
        else:
            extrinsic_params = self._find_extrinsic_pnp(
                self.state_estimator.points[cur_points_old_indicies],
                kp_cur_pt[cur_indicies_of_old_points])
        #print('Finding extrinscic: ', time() - t)
        #print(extrinsic_params)

        new_pos = extrinsic_params[:3, 3]
        new_rot = extrinsic_params[:3, :3]
        #print(new_pos)
        self.position[self.pos_i, :] = new_pos
        self.pos_i += 1

        #print(self.state_estimator.extrinsic_matrix_3x4)
        #print(extrinsic_params[:3,:])
        #t = time()
        points_homo = cv2.triangulatePoints(
            np.matmul(self.calibration_matrix,
                      self.state_estimator.extrinsic_matrix_3x4),
            np.matmul(self.calibration_matrix, extrinsic_params[:3, :]),
            kp_ref_pt.T, kp_cur_pt.T)
        points = cv2.convertPointsFromHomogeneous(points_homo.T)
        points = np.reshape(points, (points.shape[0], points.shape[2]))
        #print('Triangulation: ', time() - t)

        new_points = points[cur_points_new_indicies]
        old_points = points[cur_points_old_indicies]
        new_desciptions = cur_description[cur_points_new_indicies]
        #print('new ', new_points.shape)
        #print('old ', old_points.shape)

        #t = time()
        self.state_estimator.update(new_pos, new_rot, new_points,
                                    new_desciptions, old_points,
                                    old_points_descriptors,
                                    old_points_indicies)
        #print('Update Kalman: ', time() - t)

        self.ref_img = img
Esempio n. 57
0
 def _triangulate_points(self, P_ref, P_cur, kps_ref, kps_cur):
     """Calculate 3d points"""
     pts3d_homo = cv2.triangulatePoints(P_ref, P_cur, kps_ref.T,
                                        kps_cur.T).T
     pts3d = cv2.convertPointsFromHomogeneous(pts3d_homo)
     return pts3d.reshape((-1, 3)).astype(np.float64)
Esempio n. 58
0
import cv2

# X = cv2.triangulatePoints(P1, P2, x1, x2)
# 	P1: Camera projection from X to x1; x1 = dot(P1,X)
# 	P2: Camera projection from X to x2; x2 = dot(P2,X)
# 	x1: 2xN normalized points
# 	x2: 2xN normalized points

# Camera projection matrices
P1 = np.eye(4)
P1 = P1[:3]
P2 = np.array([[0.878, -0.01, 0.479, -1.995], [0.01, 1., 0.002, -0.226],
               [-0.479, 0.002, 0.878, 0.615]])

# homogeneous coordinates
a3xN = np.array([[0.091, 0.167, 0.231, 0.083, 0.154],
                 [0.364, 0.333, 0.308, 0.333, 0.308]])
b3xN = np.array([[0.42, 0.537, 0.645, 0.431, 0.538],
                 [0.389, 0.375, 0.362, 0.357, 0.345]])
X = cv2.triangulatePoints(P1, P2, a3xN, b3xN)
X /= X[3]
# Recover the origin arrays from PX
x1 = np.dot(P1, X)
x2 = np.dot(P2, X)
# Again, put in homogeneous form before using them
x1 /= x1[2]
x2 /= x2[2]

print(X)
print(x1)
print(x2)
img_right = cv2.undistortPoints(img_right, cmatrix_right, dis_coeff_right)

I = np.eye(3, dtype=np.float64)
O = np.zeros((3, 1), dtype=np.float64)

P1 = np.hstack((I, O))
P2 = np.hstack((R, T))

np.savetxt(
    'D:/SPRING 2020/CSE 598 Perception in Robotics/Project/Project 2a/project_2a/parameters/P1.txt',
    P1)
np.savetxt(
    'D:/SPRING 2020/CSE 598 Perception in Robotics/Project/Project 2a/project_2a/parameters/P2.txt',
    P2)

final_points = cv2.triangulatePoints(P1, P2, img_left, img_right)
tp = final_points

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D

from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection

fig = plt.figure()
plt.axis('equal')
#ax = fig.add_subplot(111, projection='3d')
ax = plt.axes(projection='3d')
plt.title('Before Rectification')

# vertices of a pyramid
v = np.array([[-1, -1, 1], [1, -1, 1], [1, 1, 1], [-1, 1, 1], [0, 0, 0]])
Esempio n. 60
0
import cv2
import numpy as np

p1 = np.eye(3, 4, dtype=np.float32)
p2 = np.eye(3, 4, dtype=np.float32)
p2[0, 3] = -1

N = 5
points3d = np.empty((4, N), np.float32)
points3d[:3, :] = np.random.randn(3, N)
points3d[3, :] = 1

points1 = p1 @ points3d
points1 = points1[:2, :] / points1[2, :]
points1[:2, :] += np.random.randn(2, N) * 1e-2

points2 = p2 @ points3d
points2 = points2[:2, :] / points2[2, :]
points2[:2, :] += np.random.randn(2, N) * 1e-2

points3d_reconstr = cv2.triangulatePoints(p1, p2, points1, points2)
points3d_reconstr /= points3d_reconstr[3, :]

print('origin')
print(points3d[:3].T)
print('reco')
print(points3d_reconstr[:3].T)