def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]:
    """"Returns global rotations and unit translation directions between 8 cameras
    that lie on a circle and face the center. The poses of 8 cameras are obtained from SFMdata
    and the unit translations directions between some camera pairs are computed from their
    global translations. """
    fx, fy, s, u0, v0 = 50.0, 50.0, 0.0, 50.0, 50.0
    wTc_list = SFMdata.createPoses(gtsam.Cal3_S2(fx, fy, s, u0, v0))
    # Rotations of the cameras in the world frame.
    wRc_values = gtsam.Values()
    # Normalized translation directions from camera i to camera j
    # in the coordinate frame of camera i.
    i_iZj_list = []
    for i in range(0, len(wTc_list) - 2):
        # Add the rotation.
        wRi = wTc_list[i].rotation()
        wRc_values.insert(i, wRi)
        # Create unit translation measurements with next two poses.
        for j in range(i + 1, i + 3):
            # Compute the translation from pose i to pose j, in the world reference frame.
            w_itj = wTc_list[j].translation() - wTc_list[i].translation()
            # Obtain the translation in the camera i's reference frame.
            i_itj = wRi.unrotate(w_itj)
            # Compute the normalized unit translation direction.
            i_iZj = gtsam.Unit3(i_itj)
            i_iZj_list.append(
                gtsam.BinaryMeasurementUnit3(
                    i, j, i_iZj, gtsam.noiseModel.Isotropic.Sigma(3, 0.01)))
    # Add the last two rotations.
    wRc_values.insert(len(wTc_list) - 1, wTc_list[-1].rotation())
    wRc_values.insert(len(wTc_list) - 2, wTc_list[-2].rotation())
    return wRc_values, i_iZj_list
예제 #2
0
    def test_computation_graph(self):
        """Test the dask computation graph execution using a valid collection of relative unit-translations."""
        """Test a simple case with 8 camera poses.

        The camera poses are arranged on the circle and point towards the center
        of the circle. The poses of 8 cameras are obtained from SFMdata and the
        unit translations directions between some camera pairs are computed from their global translations.

        This test is copied from GTSAM's TranslationAveragingExample.
        """

        fx, fy, s, u0, v0 = 50.0, 50.0, 0.0, 50.0, 50.0
        expected_wTi_list = SFMdata.createPoses(Cal3_S2(fx, fy, s, u0, v0))

        wRi_list = [x.rotation() for x in expected_wTi_list]

        # create relative translation directions between a pose index and the
        # next two poses
        i2Ui1_dict = {}
        for i1 in range(len(expected_wTi_list) - 1):
            for i2 in range(i1 + 1, min(len(expected_wTi_list), i1 + 3)):
                # create relative translations using global R and T.
                i2Ui1_dict[(i1, i2)] = Unit3(expected_wTi_list[i2].between(
                    expected_wTi_list[i1]).translation())

        # use the `run` API to get expected results
        expected_wti_list = self.obj.run(len(wRi_list), i2Ui1_dict, wRi_list)
        expected_wTi_list = [
            Pose3(wRi, wti) if wti is not None else None
            for (wRi, wti) in zip(wRi_list, expected_wti_list)
        ]

        # form computation graph and execute
        i2Ui1_graph = dask.delayed(i2Ui1_dict)
        wRi_graph = dask.delayed(wRi_list)
        computation_graph = self.obj.create_computation_graph(
            len(wRi_list), i2Ui1_graph, wRi_graph)
        with dask.config.set(scheduler="single-threaded"):
            wti_list = dask.compute(computation_graph)[0]

        wTi_list = [
            Pose3(wRi, wti) if wti is not None else None
            for (wRi, wti) in zip(wRi_list, wti_list)
        ]

        # compare the entries
        self.assertTrue(
            geometry_comparisons.compare_global_poses(wTi_list,
                                                      expected_wTi_list))
예제 #3
0
CUBE_SIZE = 4
CUBE_RESOLUTION = 128
# set the dummy image size as 400x300
IMAGE_W = 400
IMAGE_H = 300

# set dummy camera intrinsics
CAMERA_INTRINSICS = Cal3_S2(
    fx=100.0,
    fy=100.0,
    s=0.0,
    u0=IMAGE_W // 2,
    v0=IMAGE_H // 2,
)
# set dummy camera poses as described in GTSAM example
CAMERA_POSES = SFMdata.createPoses(CAMERA_INTRINSICS)
# set dummy camera instances
CAMERAS = [
    PinholeCameraCal3_S2(CAMERA_POSES[i], CAMERA_INTRINSICS)
    for i in range(len(CAMERA_POSES))
]

# set dummy sphere grid center
SPHERE_CENTER = np.array([-1.5, -1.5, -1.5])
# set dummy sphere grid radius
SPHERE_RADIUS = 0.2

UNIT_CUBE_CENTER = np.array([0.5, 0.5, 0.5])


class TestOverlapFrustums(unittest.TestCase):
예제 #4
0
def main():
    """
    Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).

    Each variable in the system (poses and landmarks) must be identified with a unique key.
    We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1).
    Here we will use Symbols

    In GTSAM, measurement functions are represented as 'factors'. Several common factors
    have been provided with the library for solving robotics/SLAM/Bundle Adjustment problems.
    Here we will use Projection factors to model the camera's landmark observations.
    Also, we will initialize the robot at some location using a Prior factor.

    When the factors are created, we will add them to a Factor Graph. As the factors we are using
    are nonlinear factors, we will need a Nonlinear Factor Graph.

    Finally, once all of the factors have been added to our factor graph, we will want to
    solve/optimize to graph to find the best (Maximum A Posteriori) set of variable values.
    GTSAM includes several nonlinear optimizers to perform this step. Here we will use a
    trust-region method known as Powell's Degleg

    The nonlinear solvers within GTSAM are iterative solvers, meaning they linearize the
    nonlinear functions around an initial linearization point, then solve the linear system
    to update the linearization point. This happens repeatedly until the solver converges
    to a consistent set of variable values. This requires us to specify an initial guess
    for each variable, held in a Values container.
    """

    # Define the camera calibration parameters
    K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)

    # Define the camera observation noise model
    measurement_noise = gtsam.noiseModel.Isotropic.Sigma(
        2, 1.0)  # one pixel in u and v

    # Create the set of ground-truth landmarks
    points = SFMdata.createPoints()

    # Create the set of ground-truth poses
    poses = SFMdata.createPoses(K)

    # Create a factor graph
    graph = NonlinearFactorGraph()

    # Add a prior on pose x1. This indirectly specifies where the origin is.
    # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z
    pose_noise = gtsam.noiseModel.Diagonal.Sigmas(
        np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
    factor = PriorFactorPose3(X(0), poses[0], pose_noise)
    graph.push_back(factor)

    # Simulated measurements from each camera pose, adding them to the factor graph
    for i, pose in enumerate(poses):
        camera = PinholeCameraCal3_S2(pose, K)
        for j, point in enumerate(points):
            measurement = camera.project(point)
            factor = GenericProjectionFactorCal3_S2(measurement,
                                                    measurement_noise, X(i),
                                                    L(j), K)
            graph.push_back(factor)

    # Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained
    # Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance
    # between the first camera and the first landmark. All other landmark positions are interpreted using this scale.
    point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
    factor = PriorFactorPoint3(L(0), points[0], point_noise)
    graph.push_back(factor)
    graph.print_('Factor Graph:\n')

    # Create the data structure to hold the initial estimate to the solution
    # Intentionally initialize the variables off from the ground truth
    initial_estimate = Values()
    for i, pose in enumerate(poses):
        transformed_pose = pose.retract(0.1 * np.random.randn(6, 1))
        initial_estimate.insert(X(i), transformed_pose)
    for j, point in enumerate(points):
        transformed_point = point + 0.1 * np.random.randn(3)
        initial_estimate.insert(L(j), transformed_point)
    initial_estimate.print_('Initial Estimates:\n')

    # Optimize the graph and print results
    params = gtsam.DoglegParams()
    params.setVerbosity('TERMINATION')
    optimizer = DoglegOptimizer(graph, initial_estimate, params)
    print('Optimizing:')
    result = optimizer.optimize()
    result.print_('Final results:\n')
    print('initial error = {}'.format(graph.error(initial_estimate)))
    print('final error = {}'.format(graph.error(result)))

    marginals = Marginals(graph, result)
    plot.plot_3d_points(1, result, marginals=marginals)
    plot.plot_trajectory(1, result, marginals=marginals, scale=8)
    plot.set_axes_equal(1)
    plt.show()
예제 #5
0
# path for data used in this test
DATA_ROOT_PATH = Path(__file__).resolve().parent.parent / "data"
DOOR_TRACKS_PATH = DATA_ROOT_PATH / "tracks2d_door.pickle"
DOOR_DATASET_PATH = DATA_ROOT_PATH / "set1_lund_door"

# focal length set to 50 px, with `px`, `py` set to zero
CALIBRATION = Cal3Bundler(50, 0, 0, 0, 0)
# Generate 8 camera poses arranged in a circle of radius 40 m
CAMERAS = {
    i: PinholeCameraCal3Bundler(pose, CALIBRATION)
    for i, pose in enumerate(
        SFMdata.createPoses(
            Cal3_S2(
                CALIBRATION.fx(),
                CALIBRATION.fx(),
                0,
                CALIBRATION.px(),
                CALIBRATION.py(),
            )))
}
LANDMARK_POINT = Point3(0.0, 0.0, 0.0)
MEASUREMENTS = [
    SfmMeasurement(i, cam.project(LANDMARK_POINT))
    for i, cam in CAMERAS.items()
]


def get_track_with_one_outlier() -> List[SfmMeasurement]:
    """Generates a track with outlier measurement."""
    # perturb one measurement
    idx_to_perturb = 5
예제 #6
0
DEFAULT_IMAGE_C = 3

# set default track points, the coordinates are in the world frame
DEFAULT_NUM_TRACKS = 100
DEFAULT_TRACK_POINTS = [Point3(5, 5, float(i)) for i in range(DEFAULT_NUM_TRACKS)]

# set default camera intrinsics
DEFAULT_CAMERA_INTRINSICS = Cal3_S2(
    fx=100.0,
    fy=100.0,
    s=1.0,
    u0=DEFAULT_IMAGE_W // 2,
    v0=DEFAULT_IMAGE_H // 2,
)
# set default camera poses as described in GTSAM example
DEFAULT_CAMERA_POSES = SFMdata.createPoses(DEFAULT_CAMERA_INTRINSICS)
# set default camera instances
DEFAULT_CAMERAS = [
    PinholeCameraCal3_S2(DEFAULT_CAMERA_POSES[i], DEFAULT_CAMERA_INTRINSICS) for i in range(len(DEFAULT_CAMERA_POSES))
]
DEFAULT_NUM_CAMERAS = len(DEFAULT_CAMERAS)
# the number of valid images should be equal to the number of cameras (with estimated pose)
DEFAULT_NUM_IMAGES = DEFAULT_NUM_CAMERAS

# build dummy image dictionary with default image shape
DEFAULT_DUMMY_IMAGE_DICT = {
    i: Image(value_array=np.zeros([DEFAULT_IMAGE_H, DEFAULT_IMAGE_W, DEFAULT_IMAGE_C], dtype=int))
    for i in range(DEFAULT_NUM_IMAGES)
}

# set camera[1] to be selected in test_get_item
예제 #7
0
def visual_ISAM2_example():
    plt.ion()

    # Define the camera calibration parameters
    K = gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)

    # Define the camera observation noise model
    measurement_noise = gtsam.noiseModel_Isotropic.Sigma(
        2, 1.0)  # one pixel in u and v

    # Create the set of ground-truth landmarks
    points = SFMdata.createPoints()

    # Create the set of ground-truth poses
    poses = SFMdata.createPoses(K)

    # Create an iSAM2 object. Unlike iSAM1, which performs periodic batch steps
    # to maintain proper linearization and efficient variable ordering, iSAM2
    # performs partial relinearization/reordering at each step. A parameter
    # structure is available that allows the user to set various properties, such
    # as the relinearization threshold and type of linear solver. For this
    # example, we we set the relinearization threshold small so the iSAM2 result
    # will approach the batch result.
    parameters = gtsam.ISAM2Params()
    parameters.setRelinearizeThreshold(0.01)
    parameters.setRelinearizeSkip(1)
    isam = gtsam.ISAM2(parameters)

    # Create a Factor Graph and Values to hold the new data
    graph = gtsam.NonlinearFactorGraph()
    initial_estimate = gtsam.Values()

    #  Loop over the different poses, adding the observations to iSAM incrementally
    for i, pose in enumerate(poses):

        # Add factors for each landmark observation
        for j, point in enumerate(points):
            camera = gtsam.PinholeCameraCal3_S2(pose, K)
            measurement = camera.project(point)
            graph.push_back(
                gtsam.GenericProjectionFactorCal3_S2(measurement,
                                                     measurement_noise, X(i),
                                                     L(j), K))

        # Add an initial guess for the current pose
        # Intentionally initialize the variables off from the ground truth
        initial_estimate.insert(
            X(i),
            pose.compose(
                gtsam.Pose3(gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25),
                            gtsam.Point3(0.05, -0.10, 0.20))))

        # If this is the first iteration, add a prior on the first pose to set the
        # coordinate frame and a prior on the first landmark to set the scale.
        # Also, as iSAM solves incrementally, we must wait until each is observed
        # at least twice before adding it to iSAM.
        if i == 0:
            # Add a prior on pose x0
            pose_noise = gtsam.noiseModel_Diagonal.Sigmas(
                np.array([0.3, 0.3, 0.3, 0.1, 0.1,
                          0.1]))  # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
            graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], pose_noise))

            # Add a prior on landmark l0
            point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
            graph.push_back(
                gtsam.PriorFactorPoint3(L(0), points[0],
                                        point_noise))  # add directly to graph

            # Add initial guesses to all observed landmarks
            # Intentionally initialize the variables off from the ground truth
            for j, point in enumerate(points):
                initial_estimate.insert(
                    L(j),
                    gtsam.Point3(point.x() - 0.25,
                                 point.y() + 0.20,
                                 point.z() + 0.15))
        else:
            # Update iSAM with the new factors
            isam.update(graph, initial_estimate)
            # Each call to iSAM2 update(*) performs one iteration of the iterative nonlinear solver.
            # If accuracy is desired at the expense of time, update(*) can be called additional
            # times to perform multiple optimizer iterations every step.
            isam.update()
            current_estimate = isam.calculateEstimate()
            print("****************************************************")
            print("Frame", i, ":")
            for j in range(i + 1):
                print(X(j), ":", current_estimate.atPose3(X(j)))

            for j in range(len(points)):
                print(L(j), ":", current_estimate.atPoint3(L(j)))

            visual_ISAM2_plot(current_estimate)

            # Clear the factor graph and values for the next iteration
            graph.resize(0)
            initial_estimate.clear()

    plt.ioff()
    plt.show()
def main():
    """
    A structure-from-motion example with landmarks
    - The landmarks form a 10 meter cube
    - The robot rotates around the landmarks, always facing towards the cube
    """

    # Define the camera calibration parameters
    K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)

    # Define the camera observation noise model
    camera_noise = gtsam.noiseModel.Isotropic.Sigma(
        2, 1.0)  # one pixel in u and v

    # Create the set of ground-truth landmarks
    points = SFMdata.createPoints()
    # Create the set of ground-truth poses
    poses = SFMdata.createPoses(K)

    # Create a NonlinearISAM object which will relinearize and reorder the variables
    # every "reorderInterval" updates
    isam = NonlinearISAM(reorderInterval=3)

    # Create a Factor Graph and Values to hold the new data
    graph = NonlinearFactorGraph()
    initial_estimate = Values()

    # Loop over the different poses, adding the observations to iSAM incrementally
    for i, pose in enumerate(poses):
        camera = PinholeCameraCal3_S2(pose, K)
        # Add factors for each landmark observation
        for j, point in enumerate(points):
            measurement = camera.project(point)
            factor = GenericProjectionFactorCal3_S2(measurement, camera_noise,
                                                    X(i), L(j), K)
            graph.push_back(factor)

        # Intentionally initialize the variables off from the ground truth
        noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25),
                      t=Point3(0.05, -0.10, 0.20))
        initial_xi = pose.compose(noise)

        # Add an initial guess for the current pose
        initial_estimate.insert(X(i), initial_xi)

        # If this is the first iteration, add a prior on the first pose to set the coordinate frame
        # and a prior on the first landmark to set the scale
        # Also, as iSAM solves incrementally, we must wait until each is observed at least twice before
        # adding it to iSAM.
        if i == 0:
            # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z
            pose_noise = gtsam.noiseModel.Diagonal.Sigmas(
                np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1]))
            factor = PriorFactorPose3(X(0), poses[0], pose_noise)
            graph.push_back(factor)

            # Add a prior on landmark l0
            point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
            factor = PriorFactorPoint3(L(0), points[0], point_noise)
            graph.push_back(factor)

            # Add initial guesses to all observed landmarks
            noise = np.array([-0.25, 0.20, 0.15])
            for j, point in enumerate(points):
                # Intentionally initialize the variables off from the ground truth
                initial_lj = points[j] + noise
                initial_estimate.insert(L(j), initial_lj)
        else:
            # Update iSAM with the new factors
            isam.update(graph, initial_estimate)
            current_estimate = isam.estimate()
            print('*' * 50)
            print('Frame {}:'.format(i))
            current_estimate.print_('Current estimate: ')

            # Clear the factor graph and values for the next iteration
            graph.resize(0)
            initial_estimate.clear()
예제 #9
0
    Args:
        wTi_list: global poses.
        pair_indices: pairs (i1, i2) to construct relative poses for.

    Returns:
        Dictionary (i1, i2) -> i2Ti1 for all requested pairs.
    """
    return {(i1, i2): wTi_list[i2].between(wTi_list[i1])
            for i1, i2 in pair_indices}


"""4 poses in the circle of radius 5m, all looking at the center of the circle.

For relative poses, each pose has just 2 edges, connecting to the immediate neighbors.
"""
CIRCLE_TWO_EDGES_GLOBAL_POSES = SFMdata.createPoses(
    Cal3_S2(fx=1, fy=1, s=0, u0=0, v0=0))[::2]

CIRCLE_TWO_EDGES_RELATIVE_POSES = generate_relative_from_global(
    CIRCLE_TWO_EDGES_GLOBAL_POSES, [(1, 0), (2, 1), (3, 2), (0, 3)])
"""4 poses in the circle of radius 5m, all looking at the center of the circle.

For relative poses, each pose is connected to every other (3) pose.
"""
CIRCLE_ALL_EDGES_GLOBAL_POSES = copy.copy(CIRCLE_TWO_EDGES_GLOBAL_POSES)

CIRCLE_ALL_EDGES_RELATIVE_POSES = generate_relative_from_global(
    CIRCLE_TWO_EDGES_GLOBAL_POSES, [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3),
                                    (2, 3)])
"""3 poses on a line, simulating forward motion, with large translations and no relative rotation.

For relative poses, we have a fully connected graph.
예제 #10
0
"""Unit tests for comparison functions for geometry types.

Authors: Ayush Baid
"""
import unittest
from typing import List
from unittest.mock import patch

import numpy as np
from gtsam import Cal3_S2, Point3, Pose3, Rot3, Similarity3, Unit3
from gtsam.examples import SFMdata

import gtsfm.utils.geometry_comparisons as geometry_comparisons
import tests.data.sample_poses as sample_poses

POSE_LIST = SFMdata.createPoses(Cal3_S2())

ROT3_EULER_ANGLE_ERROR_THRESHOLD = 1e-2
POINT3_RELATIVE_ERROR_THRESH = 1e-1
POINT3_ABS_ERROR_THRESH = 1e-2


def rot3_compare(R: Rot3, R_: Rot3, msg=None) -> bool:
    return np.allclose(R.xyz(), R_.xyz(), atol=1e-2)


def point3_compare(t: Point3, t_: Point3, msg=None) -> bool:
    return np.allclose(t,
                       t_,
                       rtol=POINT3_RELATIVE_ERROR_THRESH,
                       atol=POINT3_ABS_ERROR_THRESH)
from GTSAM_helper import iSAM2Wrapper
import cv2

from vslam_helper import *

if __name__ == '__main__':
    plt.ion()

    K = np.array([[50.0, 0, 50.0], [0.0, 50.0, 50.0], [0.0, 0.0, 1.0]])
    D = np.array([0.0, 0.0, 0.0, 0.0, 0.0])

    Ksim = gtsam.Cal3_S2(iSAM2Wrapper.CameraMatrix_to_Cal3_S2(K))

    # Create a sample world point, this case it is 10,10,10
    points = SFMdata.createPoints()
    point_3d = points[0]

    # Create 2 camera poses to measure the 3d point from
    poses = SFMdata.createPoses(Ksim)
    pose1 = poses[0]
    pose2 = poses[1]

    # Create GTSAM camera objects with poses
    camera1 = gtsam.PinholeCameraCal3_S2(pose1, Ksim)
    camera2 = gtsam.PinholeCameraCal3_S2(pose2, Ksim)

    # Project the 3D point into the 2 Camera poses
    kp1 = camera1.project(point_3d)
    kp2 = camera2.project(point_3d)