Ejemplo n.º 1
0
def triangulatation_self(points1n, points2n):
    # achieved self
    # points1 = img_pnts[0]
    # points2 = img_pnts[1]

    # one_vec = np.ones((1, points1.shape[1]))
    # K_inv = np.linalg.inv(K)
    # pnts1 = np.vstack((points1, one_vec))
    # pnts2 = np.vstack((points2, one_vec))
    # points1n = np.dot(K_inv, pnts1) # 3 x n
    # points2n = np.dot(K_inv, pnts2) # 3 x n

    E = structure.compute_essential_normalized(points1n, points2n)
    print('Computed essential matrix:', (-E / E[0][1]))

    # Given we are at camera 1, calculate the parameters for camera 2
    # Using the essential matrix returns 4 possible camera paramters
    P1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
    P2s = structure.compute_P_from_essential(E)

    ind = -1
    for i, P2 in enumerate(P2s):
        # Find the correct camera parameters
        d1 = structure.reconstruct_one_point(points1n[:, 0], points2n[:, 0],
                                             P1, P2)

        # Convert P2 from camera view to world view
        P2_homogenous = np.linalg.inv(np.vstack([P2, [0, 0, 0, 1]]))
        d2 = np.dot(P2_homogenous[:3, :4], d1)

        if d1[2] > 0 and d2[2] > 0:
            ind = i

    P2 = np.linalg.inv(np.vstack([P2s[ind], [0, 0, 0, 1]]))[:3, :4]
    tripoints3d = structure.linear_triangulation(points1n, points2n, P1, P2)
    return tripoints3d, E, P1, P2
Ejemplo n.º 2
0
def read_directory(directory_name):
    for filename in os.listdir(directory_name):
        directory_name1 = "C:/yzn/picture/computer1"

        def dino():
            img1 = cv2.imread(directory_name + "/" + filename)
            img2 = cv2.imread(directory_name1 + "/" + filename)
            pts1, pts2 = features.find_correspondence_points(img1, img2)
            points1 = processor.cart2hom(pts1)
            points2 = processor.cart2hom(pts2)

            fig, ax = plt.subplots(1, 2)
            ax[0].autoscale_view('tight')
            ax[0].imshow(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))
            ax[0].plot(points1[0], points1[1], 'r.')
            ax[1].autoscale_view('tight')
            ax[1].imshow(cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))
            ax[1].plot(points2[0], points2[1], 'r.')
            fig.show()

            height, width, ch = img1.shape
            intrinsic = np.array([  # for dino
                [2360, 0, width / 2], [0, 2360, height / 2], [0, 0, 1]
            ])

            return points1, points2, intrinsic

        points1, points2, intrinsic = dino()

        # Calculate essential matrix with 2d points.
        # Result will be up to a scale
        # First, normalize points
        points1n = np.dot(np.linalg.inv(intrinsic), points1)
        points2n = np.dot(np.linalg.inv(intrinsic), points2)
        E = structure.compute_essential_normalized(points1n, points2n)
        print('Computed essential matrix:', (-E / E[0][1]))

        # Given we are at camera 1, calculate the parameters for camera 2
        # Using the essential matrix returns 4 possible camera paramters
        P1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
        P2s = structure.compute_P_from_essential(E)

        ind = -1
        for i, P2 in enumerate(P2s):
            # Find the correct camera parameters
            d1 = structure.reconstruct_one_point(points1n[:, 0],
                                                 points2n[:, 0], P1, P2)

            # Convert P2 from camera view to world view
            P2_homogenous = np.linalg.inv(np.vstack([P2, [0, 0, 0, 1]]))
            d2 = np.dot(P2_homogenous[:3, :4], d1)

            if d1[2] > 0 and d2[2] > 0:
                ind = i

        P2 = np.linalg.inv(np.vstack([P2s[ind], [0, 0, 0, 1]]))[:3, :4]
        #tripoints3d = structure.reconstruct_points(points1n, points2n, P1, P2)
        tripoints3d = structure.linear_triangulation(points1n, points2n, P1,
                                                     P2)
        print('3d ax', tripoints3d)
        fig = plt.figure()
        ax = fig.gca(projection='3d')
        ax.plot(tripoints3d[0], tripoints3d[1], tripoints3d[2], 'b.')
        ax.set_xlabel('x axis')
        ax.set_ylabel('y axis')
        ax.set_zlabel('z axis')
        ax.view_init(elev=135, azim=90)
        plt.show()
Ejemplo n.º 3
0
points1n = np.dot(np.linalg.inv(intrinsic), points1)
points2n = np.dot(np.linalg.inv(intrinsic), points2)
E = structure.compute_essential_normalized(points1n, points2n)
print('Computed essential matrix:', (-E / E[0][1]))

# True fundamental matrix F = K^-t E K^-1
true_F = np.dot(np.dot(np.linalg.inv(intrinsic).T, true_E),
                np.linalg.inv(intrinsic))
F = structure.compute_fundamental_normalized(points1, points2)
print('True fundamental matrix:', true_F)
print('Computed fundamental matrix:', (F * true_F[2][2]))

# Given we are at camera 1, calculate the parameters for camera 2
# Using the essential matrix returns 4 possible camera paramters
P1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
P2s = structure.compute_P_from_essential(E)

ind = -1
for i, P2 in enumerate(P2s):
    # Find the correct camera parameters
    d1 = structure.reconstruct_one_point(points1n[:, 0], points2n[:, 0], P1,
                                         P2)
    P2_homogenous = extrinsic_from_camera_pose(P2)
    d2 = np.dot(P2_homogenous[:3, :4], d1)

    if d1[2] > 0 and d2[2] > 0:
        ind = i

print('True pose of c2 wrt c1: ', H_c1_c2)
P2 = np.linalg.inv(np.vstack([P2s[ind], [0, 0, 0, 1]]))[:3, :4]
P2f = structure.compute_P_from_fundamental(F)