Ejemplo n.º 1
0
def main():
    # Define the pose distribution.
    mean_pose = SE3()
    cov_pose = np.diag(np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.4])**2)

    # Draw random poses from this distribution.
    poses = draw_random_poses(mean_pose, cov_pose)

    # Estimate the mean pose from the random poses.
    estimated_mean_pose = compute_mean_pose(poses)
    print(estimated_mean_pose.to_matrix())

    # Plot result.
    # Use Qt 5 backend in visualisation.
    matplotlib.use('qt5agg')

    # Create figure and axis.
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # Plot each of the randomly drawn poses.
    for pose in poses:
        vg.plot_pose(ax, pose.to_tuple(), alpha=0.05)

    # Plot the estimated mean pose.
    vg.plot_pose(ax, estimated_mean_pose.to_tuple())

    # Show figure.
    vg.plot.axis_equal(ax)
    plt.show()
Ejemplo n.º 2
0
def visualise_multicam_moba(true_pose_w_c, true_box_w, measurement, x, cost):
    # Visualize (press a key to jump to the next iteration).
    # Use Qt 5 backend in visualisation.
    matplotlib.use('qt5agg')

    # Create figure and axis.
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # Plot box and true state
    for true_pose in true_pose_w_c:
        vg.plot_pose(ax, true_pose.to_tuple(), scale=1, alpha=0.4)
    vg.utils.plot_as_box(ax, true_box_w)

    # Plot initial state (to run axis equal first time).
    ax.set_title('Cost: ' + str(cost[0]))
    artists = []
    for pose, meas in zip(x[0], measurement):
        # Normalised in 3d.
        xn_3d = np.vstack((meas.xn, np.ones((1, meas.num))))

        artists.extend(vg.plot_pose(ax, pose.to_tuple(), scale=1))
        artists.extend(vg.plot_camera_image_plane(ax, meas.camera.K(), pose.to_tuple()))
        artists.extend(vg.utils.plot_as_box(ax, pose * xn_3d, alpha=0.4))
        artists.extend(
            vg.utils.plot_as_box(ax, pose * meas.camera.project_to_normalised_3d(pose.inverse() * meas.x_w)))
    vg.plot.axis_equal(ax)
    plt.draw()

    while True:
        if plt.waitforbuttonpress():
            break

    # Plot iterations
    for i in range(1, len(x)):
        for artist in artists:
            artist.remove()

        ax.set_title('Cost: ' + str(cost[i]))
        artists = []
        for pose, meas in zip(x[i], measurement):
            # Normalised in 3d.
            xn_3d = np.vstack((meas.xn, np.ones((1, meas.num))))

            artists.extend(vg.plot_pose(ax, pose.to_tuple(), scale=1))
            artists.extend(vg.plot_camera_image_plane(ax, meas.camera.K(), pose.to_tuple()))
            artists.extend(vg.utils.plot_as_box(ax, pose * xn_3d, alpha=0.4))
            artists.extend(vg.utils.plot_as_box(ax, pose * meas.camera.project_to_normalised_3d(
                pose.inverse() * meas.x_w)))

        plt.draw()
        while True:
            if plt.waitforbuttonpress():
                break

    plt.close()
Ejemplo n.º 3
0
def main():
    # World box.
    points_w = vg.utils.generate_box()

    # True observer pose.
    true_pose_wo = SE3((SO3.rot_z(np.pi), np.array([[3, 0, 0]]).T))

    # Observed box.
    points_o = vg.utils.generate_box(pose=true_pose_wo.inverse().to_tuple())

    # Perturb observer pose and use as initial state.
    init_pose_wo = true_pose_wo + np.random.randn(6, 1)

    # Estimate pose in the world frame from point correspondences.
    model = NoiseFreePointAlignmentBasedPoseEstimatorObjective(
        points_w, points_o)
    x, cost, A, b = gauss_newton(init_pose_wo, model)

    # Visualize (press a key to jump to the next iteration).
    # Use Qt 5 backend in visualisation.
    matplotlib.use('qt5agg')

    # Create figure and axis.
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # Plot box and true state
    vg.plot_pose(ax, true_pose_wo.to_tuple(), scale=1, alpha=0.4)
    vg.utils.plot_as_box(ax, points_w, alpha=0.4)

    # Plot initial state (to run axis equal first time).
    ax.set_title('Cost: ' + str(cost[0]))
    artists = vg.plot_pose(ax, x[0].to_tuple(), scale=1)
    artists.extend(vg.utils.plot_as_box(ax, x[0] * points_o))
    vg.plot.axis_equal(ax)
    plt.draw()

    while True:
        if plt.waitforbuttonpress():
            break

    # Plot iterations
    for i in range(1, len(x)):
        for artist in artists:
            artist.remove()

        ax.set_title('Cost: ' + str(cost[i]))
        artists = vg.plot_pose(ax, x[i].to_tuple(), scale=1)
        artists.extend(vg.utils.plot_as_box(ax, x[i] * points_o))
        plt.draw()
        while True:
            if plt.waitforbuttonpress():
                break
Ejemplo n.º 4
0
def draw_exp(ax, xi_vec, draw_options):
    vg.plot_pose(ax, SE3().to_tuple(), scale=1, text='$\mathcal{F}_w$')
    T_l = SE3.Exp(xi_vec)
    vg.plot_pose(ax, T_l.to_tuple())  #, text=r'$\mathrm{Exp}(\mathbf{\xi})$')

    if draw_options['Draw box']:
        box_points = vg.utils.generate_box(pose=T_l.to_tuple(), scale=1)
        vg.utils.plot_as_box(ax, box_points)

    if draw_options['Draw manifold\ntrajectory']:
        draw_interpolated(ax, SE3(), xi_vec, SE3())
Ejemplo n.º 5
0
def main():
    # Define the first pose.
    T_1 = SE3((SO3.from_roll_pitch_yaw(np.pi / 4, 0,
                                       np.pi / 2), np.array([[1, 1, 1]]).T))

    # Define the second pose.
    T_2 = SE3((SO3.from_roll_pitch_yaw(-np.pi / 6, np.pi / 4,
                                       np.pi / 2), np.array([[1, 4, 2]]).T))

    # Plot the interpolation.
    # Use Qt 5 backend in visualisation.
    matplotlib.use('qt5agg')

    # Create figure and axis.
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # Plot the poses.
    vg.plot_pose(ax, T_1.to_tuple())
    vg.plot_pose(ax, T_2.to_tuple())

    # Plot the interpolated poses.
    for alpha in np.linspace(0, 1, 20):
        T = interpolate_lie_element(alpha, T_1, T_2)
        vg.plot_pose(ax, T.to_tuple(), alpha=0.1)

    # Show figure.
    vg.plot.axis_equal(ax)
    plt.show()
Ejemplo n.º 6
0
def left_right_perturbations():
    # Define the pose of a camera.
    T_w_c = SE3((SO3.from_roll_pitch_yaw(5 * np.pi / 4, 0,
                                         np.pi / 2), np.array([[2, 2, 2]]).T))

    # Perturbation.
    xsi_vec = np.array([[2, 0, 0, 0, 0, 0]]).T

    # Perform the perturbation on the right (locally).
    T_r = T_w_c @ SE3.Exp(xsi_vec)

    # Perform the perturbation on the left (globally).
    T_l = SE3.Exp(xsi_vec) @ T_w_c

    # We can transform the tangent vector between local and global frames using the adjoint matrix.
    xsi_vec_w = T_w_c.adjoint(
    ) @ xsi_vec  # When xsi_vec is used on the right side.
    xsi_vec_c = T_w_c.inverse().adjoint(
    ) @ xsi_vec  # When xsi_vec is used on the left side.
    print('xsi_vec_w: ', xsi_vec_w.flatten())
    print('xsi_vec_c: ', xsi_vec_c.flatten())

    # Visualise the perturbations:
    # Use Qt 5 backend in visualisation, use latex.
    matplotlib.use('qt5agg')

    # Create figure and axis.
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # Plot frames.
    to_right = np.hstack((T_w_c.translation, T_r.translation))
    ax.plot(to_right[0, :], to_right[1, :], to_right[2, :], 'k:')
    to_left = np.hstack((T_w_c.translation, T_l.translation))
    ax.plot(to_left[0, :], to_left[1, :], to_left[2, :], 'k:')
    vg.plot_pose(ax, SE3().to_tuple(), text='$\mathcal{F}_w$')
    vg.plot_pose(ax, T_w_c.to_tuple(), text='$\mathcal{F}_c$')
    vg.plot_pose(ax,
                 T_r.to_tuple(),
                 text=r'$\mathbf{T}_{wc} \circ \mathrm{Exp}(\mathbf{\xi})$')
    vg.plot_pose(ax,
                 T_l.to_tuple(),
                 text=r'$\mathrm{Exp}(\mathbf{\xi}) \circ \mathbf{T}_{wc}$')

    # Show figure.
    vg.plot.axis_equal(ax)
    plt.show()
Ejemplo n.º 7
0
def draw_left_perturbation(ax, T_w_a, xi_vec, draw_options):
    vg.plot_pose(ax, SE3().to_tuple(), scale=1, text='$\mathcal{F}_w$')
    vg.plot_pose(ax, T_w_a.to_tuple(), scale=1, text='$\mathcal{F}_a$')
    T_l = SE3.Exp(xi_vec) @ T_w_a
    vg.plot_pose(ax, T_l.to_tuple(
    ))  #, text=r'$\mathrm{Exp}(\mathbf{\xi}) \circ \mathbf{T}_{wa}$')

    if draw_options['Draw box']:
        box_points = vg.utils.generate_box(pose=T_l.to_tuple(), scale=1)
        vg.utils.plot_as_box(ax, box_points)

    if draw_options['Draw manifold\ntrajectory']:
        draw_interpolated(ax, SE3(), xi_vec, T_w_a)
Ejemplo n.º 8
0
import matplotlib.pyplot as plt
from pylie import SO3, SE3

# Use Qt 5 backend in visualisation.
matplotlib.use('qt5agg')

# Create figure and axis.
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')

# Plot the pose of the world North-East-Down (NED) frame (relative to the world frame).
T_w_w = SE3()
vg.plot_pose(ax, T_w_w.to_tuple(), scale=3, text='$\mathcal{F}_w$')

# Plot the body frame (a body-fixed Forward-Right-Down (FRD) frame).
roll = np.radians(-10)
pitch = np.radians(0)
yaw = np.radians(135)
t_w_b = np.array([[-10, -10, -2]]).T
T_w_b = SE3((SO3.from_roll_pitch_yaw(roll, pitch, yaw), t_w_b))
vg.plot_pose(ax, T_w_b.to_tuple(), scale=3, text='$\mathcal{F}_b$')

# Plot the camera frame.
# The camera is placed 2 m directly above the body origin.
# Its optical axis points to the left (in opposite direction of the y-axis in F_b).
# Its y-axis points downwards along the z-axis of F_b.
R_b_c = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
t_b_c = np.array([[0, 0, -2]]).T
Ejemplo n.º 9
0
# Create axis.
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')

# Camera intrinsics and pose.
K = np.array([[50, 0, 40], [0, 50, 30], [0, 0, 1]])
R_w_c = np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
t_w_c = np.zeros((3, 1))
pose_w_c = (R_w_c, t_w_c)

# Plot camera.
vg.plot_pose(ax, pose_w_c, scale=0.4, text='$\\mathcal{F}_c$')
vg.plot_camera_frustum(ax, K, pose_w_c, alpha=0.1)
vg.plot_camera_image_plane(ax, K, pose_w_c, scale=1)

# Plot a box in 3D.
R_w_b = Rotation.from_rotvec([0, 0, np.pi / 6]).as_matrix()
t_w_b = np.array([[3, 0, 0]]).T
points_w = vg.utils.generate_box(pose=(R_w_b, t_w_b), scale=0.6)
vg.utils.plot_as_box(ax, points_w)

# Project the box onto the image plane.
points_c = R_w_c.T @ points_w + (R_w_c.T @ t_w_c)
xn = points_c / points_c[2, :]
xn_w = R_w_c @ xn - t_w_c
vg.utils.plot_as_box(ax, xn_w)
Ejemplo n.º 10
0
def poses_and_cameras():
    # Use Qt 5 backend in visualisation.
    matplotlib.use('qt5agg')

    # Create figure and axis.
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # Plot the pose of the world North-East-Down (NED) frame (relative to the world frame).
    T_w_w = SE3()
    vg.plot_pose(ax, T_w_w.to_tuple(), scale=3, text='$\mathcal{F}_w$')

    # Plot the body frame (a body-fixed Forward-Right-Down (FRD) frame).
    roll = np.radians(-10)
    pitch = np.radians(0)
    yaw = np.radians(135)
    t_w_b = np.array([[-10, -10, -2]]).T
    T_w_b = SE3((SO3.from_roll_pitch_yaw(roll, pitch, yaw), t_w_b))
    vg.plot_pose(ax, T_w_b.to_tuple(), scale=3, text='$\mathcal{F}_b$')

    # Plot the camera frame.
    # The camera is placed 2 m directly above the body origin.
    # Its optical axis points to the left (in opposite direction of the y-axis in F_b).
    # Its y-axis points downwards along the z-axis of F_b.
    R_b_c = np.array([[1, 0, 0],
                      [0, 0, -1],
                      [0, 1, 0]])
    t_b_c = np.array([[0, 0, -2]]).T
    T_b_c = SE3((SO3(R_b_c), t_b_c))
    T_w_c = T_w_b @ T_b_c
    vg.plot_pose(ax, T_w_c.to_tuple(), scale=3, text='$\mathcal{F}_c$')

    # Plot obstacle frame.
    # The cube is placed at (North: 10 m, East: 10 m, Down: -1 m).
    # Its top points upwards, and its front points south.
    R_w_o = np.array([[-1, 0, 0],
                      [0, 1, 0],
                      [0, 0, -1]])
    t_w_o = np.array([[10, 10, -1]]).T
    T_w_o = SE3((SO3(R_w_o), t_w_o))
    vg.plot_pose(ax, T_w_o.to_tuple(), scale=3, text='$\mathcal{F}_o$')

    # Plot the cube with sides 3 meters.
    points_o = vg.utils.generate_box(scale=3)
    points_w = T_w_o * points_o
    vg.utils.plot_as_box(ax, points_w)

    # Plot the image plane.
    img_plane_scale = 3
    K = np.array([[50, 0, 40],
                  [0, 50, 30],
                  [0, 0, 1]])
    vg.plot_camera_image_plane(ax, K, T_w_c.to_tuple(), scale=img_plane_scale)

    # Project the box onto the normalised image plane (at z=img_plane_scale).
    points_c = T_w_c.inverse() @ T_w_o * points_o
    xn = points_c / points_c[2, :]
    xn_w = T_w_c * (img_plane_scale * xn)
    vg.utils.plot_as_box(ax, xn_w)

    # Show figure.
    vg.plot.axis_equal(ax)
    ax.invert_zaxis()
    ax.invert_yaxis()
    plt.show()
Ejemplo n.º 11
0
def main():
    # Define camera parameters.
    f = np.array([[100, 100]]).T  # "Focal lengths"  [fu, fv].T
    c = np.array([[50, 50]]).T  # Principal point [cu, cv].T

    # Define camera pose distribution.
    R_w_c = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
    t_w_c = np.array([[1, 0, 0]]).T
    mean_T_w_c = SE3((SO3(R_w_c), t_w_c))
    Sigma_T_w_c = np.diag(np.array([0.1, 0.1, 0.1, 0.01, 0.01, 0.01])**2)

    # Define pixel position distribution.
    mean_u = np.array([[50, 50]]).T
    Sigma_u = np.diag(np.array([1, 1])**2)

    # Define depth distribution.
    mean_z = 10
    var_z = 0.1**2

    # TODO 2: Approximate the distribution of the 3D point in world coordinates:
    # TODO 2: Propagate the expected point.
    x_w = np.ones((3, 1))  # Mock implementation, do something else!

    # TODO 2: Propagate the uncertainty.
    cov_x_w = np.identity(3)  # Mock implementation, do something else!
    print(cov_x_w)

    # Simulate points from the true distribution.
    num_draws = 1000
    random_poses = draw_random_poses(mean_T_w_c, Sigma_T_w_c, num_draws)
    random_u = np.random.multivariate_normal(mean_u.flatten(), Sigma_u,
                                             num_draws).T
    random_z = np.random.normal(mean_z, np.sqrt(var_z), num_draws)
    rand_x_c = backproject(random_u, random_z, f, c)
    rand_x_w = np.zeros((3, num_draws))
    for i in range(num_draws):
        rand_x_w[:, [i]] = random_poses[i] * rand_x_c[:, [i]]

    # Plot result.
    # Use Qt 5 backend in visualisation.
    matplotlib.use('qt5agg')

    # Create figure and axis.
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # Plot camera poses.
    vg.plot_pose(ax, mean_T_w_c.to_tuple())

    # Plot simulated points.
    ax.plot(rand_x_w[0, :], rand_x_w[1, :], rand_x_w[2, :], 'k.', alpha=0.1)

    # Plot the estimated mean pose.
    vg.plot_covariance_ellipsoid(ax, x_w, cov_x_w)

    # Show figure.
    vg.plot.axis_equal(ax)
    plt.show()
Ejemplo n.º 12
0
) @ xsi_vec  # When xsi_vec is used on the right side.
xsi_vec_c = T_w_c.inverse().adjoint(
) @ xsi_vec  # When xsi_vec is used on the left side.
print('xsi_vec_w: ', xsi_vec_w.flatten())
print('xsi_vec_c: ', xsi_vec_c.flatten())

# Visualise the perturbations:
# Use Qt 5 backend in visualisation, use latex.
matplotlib.use('qt5agg')

# Create figure and axis.
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')

# Plot frames.
to_right = np.hstack((T_w_c.translation, T_r.translation))
ax.plot(to_right[0, :], to_right[1, :], to_right[2, :], 'k:')
to_left = np.hstack((T_w_c.translation, T_l.translation))
ax.plot(to_left[0, :], to_left[1, :], to_left[2, :], 'k:')
vg.plot_pose(ax, SE3().to_tuple(), text='$\mathcal{F}_w$')
vg.plot_pose(ax, T_w_c.to_tuple(), text='$\mathcal{F}_c$')
vg.plot_pose(ax, T_r.to_tuple(), text='$\mathbf{T}_{wc} Exp(xi)$')
vg.plot_pose(ax, T_l.to_tuple(), text='$Exp(xi) \mathbf{T}_{wc}$')

# Show figure.
vg.plot.axis_equal(ax)
plt.show()
Ejemplo n.º 13
0
def main():
    # World box.
    points_w = vg.utils.generate_box()

    # True observer pose.
    true_pose_wo = SE3((SO3.rot_z(np.pi), np.array([[3, 0, 0]]).T))

    # Observed box with noise.
    points_o = vg.utils.generate_box(pose=true_pose_wo.inverse().to_tuple())
    num_points = points_o.shape[1]
    point_covariances = [np.diag(np.array([1e-1, 1e-1, 1e-1]) ** 2)] * num_points
    for c in range(num_points):
        points_o[:, [c]] = points_o[:, [c]] + np.random.multivariate_normal(np.zeros(3), point_covariances[c]).reshape(
            -1, 1)

    # Perturb observer pose and use as initial state.
    init_pose_wo = true_pose_wo + 10 * np.random.randn(6, 1)

    # Estimate pose in the world frame from point correspondences.
    model = NoisyPointAlignmentBasedPoseEstimatorObjective(points_w, points_o, point_covariances)
    x, cost, A, b = gauss_newton(init_pose_wo, model)
    cov_x_final = np.linalg.inv(A.T @ A)

    # Print covariance.
    with np.printoptions(precision=3, suppress=True):
        print('Covariance:')
        print(cov_x_final)

    # Visualize (press a key to jump to the next iteration).
    # Use Qt 5 backend in visualisation.
    matplotlib.use('qt5agg')

    # Create figure and axis.
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # Plot box and true state
    vg.plot_pose(ax, true_pose_wo.to_tuple(), scale=1, alpha=0.4)
    vg.utils.plot_as_box(ax, points_w, alpha=0.4)

    # Plot initial state (to run axis equal first time).
    ax.set_title('Cost: ' + str(cost[0]))
    artists = vg.plot_pose(ax, x[0].to_tuple(), scale=1)
    artists.extend(vg.utils.plot_as_box(ax, x[0] * points_o))
    vg.plot.axis_equal(ax)
    plt.draw()

    while True:
        if plt.waitforbuttonpress():
            break

    # Plot iterations
    for i in range(1, len(x)):
        for artist in artists:
            artist.remove()

        ax.set_title('Cost: ' + str(cost[i]))
        artists = vg.plot_pose(ax, x[i].to_tuple(), scale=1)
        artists.extend(vg.utils.plot_as_box(ax, x[i] * points_o))
        plt.draw()
        while True:
            if plt.waitforbuttonpress():
                break
Ejemplo n.º 14
0
def draw_interpolated(ax, T_1, xi, T_2):
    for alpha in np.linspace(0, 1, 20):
        T = T_1 @ SE3.Exp(alpha * xi) @ T_2
        vg.plot_pose(ax, T.to_tuple(), alpha=0.1)
Ejemplo n.º 15
0
def visualise_full(true_pose_w_c, true_box_w, measurement, x, cost):
    # Visualize (press a key to jump to the next iteration).
    # Use Qt 5 backend in visualisation.
    matplotlib.use('qt5agg')

    # Create figure and axis.
    fig = plt.figure()
    ax = plt.axes(projection='3d')
    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    # Plot true state
    for true_pose in true_pose_w_c:
        vg.plot_pose(ax, true_pose.to_tuple(), scale=1, alpha=0.4)
    vg.utils.plot_as_box(ax, true_box_w, alpha=0.4)

    num_cameras = x[0].num_poses
    num_points = x[0].num_points

    # Plot initial state (to run axis equal first time).
    ax.set_title('Cost: ' + str(cost[0]))
    artists = []

    # Extract points as matrix.
    x_w = np.zeros((3, num_points))
    for j in range(num_points):
        x_w[:, [j]] = x[0].get_point(j)
    artists.extend(vg.utils.plot_as_box(ax, x_w))

    for i in range(num_cameras):
        pose = x[0].get_pose(i)

        # Normalised in 3d.
        xn_3d = np.vstack((measurement[i].xn, np.ones((1, measurement[i].num))))

        artists.extend(vg.plot_pose(ax, pose.to_tuple(), scale=1))
        artists.extend(vg.plot_camera_image_plane(ax, measurement[i].camera.K(), pose.to_tuple()))
        artists.extend(vg.utils.plot_as_box(ax, pose * xn_3d, alpha=0.4))
        artists.extend(
            vg.utils.plot_as_box(ax, pose * measurement[i].camera.project_to_normalised_3d(pose.inverse() * x_w)))

    vg.plot.axis_equal(ax)
    plt.draw()

    while True:
        if plt.waitforbuttonpress():
            break

    # Plot iterations
    for it in range(1, len(x)):
        for artist in artists:
            artist.remove()

        ax.set_title('Cost: ' + str(cost[it]))
        artists = []

        # Extract points as matrix.
        x_w = np.zeros((3, num_points))
        for j in range(num_points):
            x_w[:, [j]] = x[it].get_point(j)
        artists.extend(vg.utils.plot_as_box(ax, x_w))

        for i in range(num_cameras):
            pose = x[it].get_pose(i)

            # Normalised in 3d.
            xn_3d = np.vstack((measurement[i].xn, np.ones((1, measurement[i].num))))

            artists.extend(vg.plot_pose(ax, pose.to_tuple(), scale=1))
            artists.extend(vg.plot_camera_image_plane(ax, measurement[i].camera.K(), pose.to_tuple()))
            artists.extend(vg.utils.plot_as_box(ax, pose * xn_3d, alpha=0.4))
            artists.extend(
                vg.utils.plot_as_box(ax, pose * measurement[i].camera.project_to_normalised_3d(pose.inverse() * x_w)))

        plt.draw()
        while True:
            if plt.waitforbuttonpress():
                break

    plt.close()