コード例 #1
0
 def test_scalar(self):
     qr = Quaternion(1, 2, 3, 4)
     qt = Quaternion(1, 3, 3, 1)
     dq = DualQuaternion(qr, qt)
     scalar = dq.scalar()
     scalar_expected = np.array([0., 0., 0., 4., 0., 0., 0., 1.]).T
     npt.assert_allclose(scalar.dq, scalar_expected, atol=1e-6)
コード例 #2
0
 def test_consecutive_transformations(self):
     dq_1_2 = DualQuaternion.from_pose(0, 10, 1, 1, 0, 0, 0)
     dq_2_3 = DualQuaternion.from_pose(2, 1, 3, 0, 1, 0, 0)
     dq_1_3 = DualQuaternion.from_pose(2, 9, -2, 0, 0, 1, 0)
     # Move coordinate frame dq_1 to dq_3
     dq_1_3_computed = dq_1_2 * dq_2_3
     npt.assert_allclose(dq_1_3_computed.dq, dq_1_3.dq)
コード例 #3
0
 def test_division(self):
     qr_1 = Quaternion(1, 2, 3, 4)
     qt_1 = Quaternion(1, 3, 3, 6)
     dq_1 = DualQuaternion(qr_1, qt_1)
     identity_dq = np.array([0., 0., 0., 1., 0., 0., 0., 0.]).T
     npt.assert_allclose((dq_1 / dq_1).dq, identity_dq, atol=1e-6)
     # TODO(ff): Fix this test.
     qr_2 = Quaternion(1, 4, 5, 1)
     qt_2 = Quaternion(-4, 2, 3, 4)
     dq_2 = DualQuaternion(qr_2, qt_2)
コード例 #4
0
 def test_transforming_points(self):
     dq_1_2 = DualQuaternion.from_pose(0, 10, 1, 1, 0, 0, 0)
     dq_2_3 = DualQuaternion.from_pose(2, 1, 3, 0, 1, 0, 0)
     dq_1_3 = DualQuaternion.from_pose(2, 9, -2, 0, 0, 1, 0)
     dq_1_3_computed = dq_1_2 * dq_2_3
     # Express point p (expressed in frame 1) in coordinate frame 3.
     p_1 = np.array([1, 2, 3])
     p_3_direct = dq_1_3.inverse().passive_transform_point(p_1)
     p_3_consecutive = dq_1_3_computed.inverse().passive_transform_point(
         p_1)
     npt.assert_allclose(p_3_direct, p_3_consecutive)
コード例 #5
0
    def test_addition(self):
        qr_1 = Quaternion(1., 2., 3., 4.)
        qt_1 = Quaternion(1., 3., 3., 0.)
        dq_1 = DualQuaternion(qr_1, qt_1)

        qr_2 = Quaternion(3., 5., 3., 2.)
        qt_2 = Quaternion(-4., 2., 3., 0.)
        dq_2 = DualQuaternion(qr_2, qt_2)

        dq_expected = np.array([4., 7., 6., 6., -3., 5., 6., 0.]).T

        npt.assert_allclose((dq_1 + dq_2).dq, dq_expected, rtol=1e-6)
コード例 #6
0
    def test_dq_to_matrix(self):

        pose = [1, 2, 3, 4., 5., 6., 7.]
        dq = DualQuaternion.from_pose_vector(pose)
        dq.normalize()

        matrix_out = dq.to_matrix()

        dq_from_matrix = DualQuaternion.from_transformation_matrix(matrix_out)
        matrix_out_2 = dq_from_matrix.to_matrix()

        npt.assert_allclose(dq.dq, dq_from_matrix.dq)
        npt.assert_allclose(matrix_out, matrix_out_2)
コード例 #7
0
 def test_multiplication(self):
     qr_1 = Quaternion(1., 2., 3., 4.)
     qt_1 = Quaternion(1., 3., 3., 0.)
     dq_1 = DualQuaternion(qr_1, qt_1)
     qr_2 = Quaternion(1., 4., 5., 1.)
     qt_2 = Quaternion(-4., 2., 3., 0.)
     dq_2 = DualQuaternion(qr_2, qt_2)
     # TODO(ff): Check this by hand. (And check if we shouldn't always get 0
     # scalars for the rotational quaternion of the dual quaternion).
     dq_expected = DualQuaternion(
         dq_1.q_rot * dq_2.q_rot,
         dq_1.q_rot * dq_2.q_dual + dq_1.q_dual * dq_2.q_rot)
     npt.assert_allclose((dq_1 * dq_2).dq, dq_expected.dq)
コード例 #8
0
 def test_conjugate_identity(self):
     qr = Quaternion(1, 2, 3, 4)
     qt = Quaternion(1, 3, 3, 0)
     dq = DualQuaternion(qr, qt)
     # TODO(ff): Here we should use dq*dq.conjugate() in one
     # place.
     q_rot_identity = dq.q_rot * dq.q_rot.conjugate()
     q_dual_identity = (dq.q_rot.conjugate() * dq.q_dual +
                        dq.q_dual.conjugate() * dq.q_rot)
     identity_dq_expected = DualQuaternion(q_rot_identity, q_dual_identity)
     npt.assert_allclose((dq * dq.conjugate()).dq,
                         identity_dq_expected.dq,
                         atol=1e-6)
コード例 #9
0
def compute_loop_error(results_dq_H_E, num_poses_sets, visualize=False):
    calibration_transformation_chain = []

    # Add point at origin to represent the first coordinate
    # frame in the chain of transformations.
    calibration_transformation_chain.append(
        DualQuaternion(Quaternion(0, 0, 0, 1), Quaternion(0, 0, 0, 0)))

    # Add first transformation
    calibration_transformation_chain.append(results_dq_H_E[0])

    # Create chain of transformations from the first frame to the last.
    i = 0
    idx = 0
    while i < (num_poses_sets - 2):
        idx += (num_poses_sets - i - 1)
        calibration_transformation_chain.append(results_dq_H_E[idx])
        i += 1

    # Add inverse of first to last frame to close the loop.
    calibration_transformation_chain.append(results_dq_H_E[num_poses_sets -
                                                           2].inverse())

    # Check loop.
    assert len(calibration_transformation_chain) == (num_poses_sets + 1), (
        len(calibration_transformation_chain), (num_poses_sets + 1))

    # Chain the transformations together to get points we can plot.
    poses_to_plot = []
    dq_tmp = DualQuaternion(Quaternion(0, 0, 0, 1), Quaternion(0, 0, 0, 0))
    for i in range(0, len(calibration_transformation_chain)):
        dq_tmp *= calibration_transformation_chain[i]
        poses_to_plot.append(dq_tmp.to_pose())

    (loop_error_position,
     loop_error_orientation) = compute_pose_error(poses_to_plot[0],
                                                  poses_to_plot[-1])

    print("Error when closing the loop of hand eye calibrations - position: {}"
          " m orientation: {} deg".format(loop_error_position,
                                          loop_error_orientation))

    if visualize:
        assert len(poses_to_plot) == len(calibration_transformation_chain)
        plot_poses([np.array(poses_to_plot)],
                   plot_arrows=True,
                   title="Hand-Eye Calibration Results - Closing The Loop")

    # Compute error of loop.
    return (loop_error_position, loop_error_orientation)
コード例 #10
0
 def test_normalize(self):
     qr = Quaternion(1, 2, 3, 4)
     qt = Quaternion(1, 3, 3, 0)
     dq = DualQuaternion(qr, qt)
     dq.normalize()
     dq_normalized = np.array([
         0.18257419, 0.36514837, 0.54772256, 0.73029674, 0.18257419,
         0.54772256, 0.54772256, 0.
     ]).T
     npt.assert_allclose(dq.dq, dq_normalized)
     dq_2 = DualQuaternion.from_pose(1, 2, 3, 1, 1, 1, 1)
     dq_2.normalize()
     dq_2_normalized = np.array([0.5, 0.5, 0.5, 0.5, 0., 1., 0.5, -1.5]).T
     npt.assert_allclose(dq_2.dq, dq_2_normalized)
コード例 #11
0
def evaluate_calibration(time_stamped_poses_B_H, time_stamped_poses_W_E,
                         dq_H_E, time_offset, config):
    assert len(time_stamped_poses_B_H) > 0
    assert len(time_stamped_poses_W_E) > 0
    assert isinstance(config, HandEyeConfig)

    (aligned_poses_B_H,
     aligned_poses_W_E) = compute_aligned_poses(time_stamped_poses_B_H,
                                                time_stamped_poses_W_E,
                                                time_offset)
    assert len(aligned_poses_B_H) == len(aligned_poses_W_E)

    # If we found no matching poses, the evaluation failed.
    if len(aligned_poses_B_H) == 0:
        return ((float('inf'), float('inf')), 0)

    # Convert poses to dual quaterions.
    dual_quat_B_H_vec = [
        DualQuaternion.from_pose_vector(aligned_pose_B_H)
        for aligned_pose_B_H in aligned_poses_B_H[:, 1:]
    ]
    dual_quat_W_E_vec = [
        DualQuaternion.from_pose_vector(aligned_pose_W_E)
        for aligned_pose_W_E in aligned_poses_W_E[:, 1:]
    ]

    assert len(dual_quat_B_H_vec) == len(
        dual_quat_W_E_vec
    ), "len(dual_quat_B_H_vec): {} vs len(dual_quat_W_E_vec): {}".format(
        len(dual_quat_B_H_vec), len(dual_quat_W_E_vec))

    aligned_dq_B_H = align_paths_at_index(dual_quat_B_H_vec, 0)
    aligned_dq_W_E = align_paths_at_index(dual_quat_W_E_vec, 0)

    (poses_B_H, poses_W_H) = get_aligned_poses(aligned_dq_B_H, aligned_dq_W_E,
                                               dq_H_E)

    (rmse_position, rmse_orientation,
     inlier_flags) = evaluate_alignment(poses_B_H, poses_W_H, config,
                                        config.visualize)

    if config.visualize:
        every_nth_element = config.visualize_plot_every_nth_pose
        plot_poses(
            [poses_B_H[::every_nth_element], poses_W_H[::every_nth_element]],
            True,
            title="3D Poses After Fine Alignment")

    return ((rmse_position, rmse_orientation), sum(inlier_flags))
コード例 #12
0
 def test_multiplication_with_scalar(self):
     qr = Quaternion(0.5, 0.5, -0.5, 0.5)
     qt = Quaternion(1, 3, 3, 0)
     dq = DualQuaternion(qr, qt)
     dq_expected = np.array([1.25, 1.25, -1.25, 1.25, 2.5, 7.5, 7.5, 0]).T
     # Note, the scaling only applies to the translational part.
     npt.assert_allclose((dq * 2.5).dq, dq_expected)
コード例 #13
0
def random_transform_as_dual_quaternion(
        enforce_positive_non_dual_scalar_sign=True):
    T_rand = rand_transform()
    dq_rand = DualQuaternion.from_transformation_matrix(T_rand)
    if (enforce_positive_non_dual_scalar_sign):
        if (dq_rand.q_rot.w < 0):
            dq_rand.dq = -dq_rand.dq.copy()
    return dq_rand.copy()
コード例 #14
0
def generate_test_path(n_samples,
                       include_outliers=False,
                       outlier_probability=0.1,
                       include_noise=False,
                       noise_sigma_trans=0.01,
                       noise_sigma_rot=0.1):
    # Create a sine for x, cos for y and linear motion for z.
    # Rotate around the curve, while keeping the x-axis perpendicular to the
    # curve.
    dual_quaternions = []
    t = np.linspace(0, 1, num=n_samples)
    x = 10.0 + np.cos(4 * np.pi * t)
    y = -5.0 + -np.sin(4 * np.pi * t) * 4
    z = 0.5 + t * 5
    theta = 4 * np.pi * t
    outliers = []
    for i in range(n_samples):
        q_tmp = tf.transformations.quaternion_from_euler(
            0., -theta[i] * 0.1, -theta[i], 'rxyz')
        if include_outliers:
            if np.random.rand() < outlier_probability:
                outliers.append(i)
                q_tmp = np.random.rand(4)
                x[i] = np.random.rand() * max(x) * (
                    -1 if np.random.rand() < 0.5 else 0)
                y[i] = np.random.rand() * max(y) * (
                    -1 if np.random.rand() < 0.5 else 0)
                z[i] = np.random.rand() * max(z) * (
                    -1 if np.random.rand() < 0.5 else 0)
        if include_noise:
            # Add zero mean gaussian noise with sigma noise_sigma.
            x[i] += np.random.normal(0.0, noise_sigma_trans)
            y[i] += np.random.normal(0.0, noise_sigma_trans)
            z[i] += np.random.normal(0.0, noise_sigma_trans)
            # TODO(ff): Fix this, there should only be 3 random numbers drawn.
            axis_noise_x = np.random.normal(0.0, noise_sigma_rot)
            axis_noise_y = np.random.normal(0.0, noise_sigma_rot)
            axis_noise_z = np.random.normal(0.0, noise_sigma_rot)
            angle_noise = np.pi * np.random.normal(0.0, noise_sigma_rot)

            q_noise = Quaternion.from_angle_axis(
                angle_noise, (axis_noise_x, axis_noise_y, axis_noise_z))
            q_noise.normalize()
            q_noise_free = Quaternion(q=q_tmp)
            q = q_noise * q_noise_free * q_noise.inverse()
        else:
            q = Quaternion(q=q_tmp)
        q.normalize()
        if q.w < 0.0:
            q = -q

        dq = DualQuaternion.from_pose(x[i], y[i], z[i], q.x, q.y, q.z, q.w)
        dual_quaternions.append(dq)
    if include_outliers:
        print("Included {} outliers at {} in the test data.".format(
            len(outliers), outliers))

    return dual_quaternions
コード例 #15
0
 def test_conversions(self):
     pose = [1, 2, 3, 1., 0., 0., 0.]
     dq = DualQuaternion.from_pose_vector(pose)
     pose_out = dq.to_pose()
     matrix_out = dq.to_matrix()
     matrix_expected = np.array([[1, 0, 0, 1], [0, -1, 0, 2], [0, 0, -1, 3],
                                 [0, 0, 0, 1]])
     npt.assert_allclose(pose, pose_out)
     npt.assert_allclose(matrix_out, matrix_expected)
コード例 #16
0
def spoil_initial_guess(time_offset_initial_guess, dq_H_E_initial_guess, angular_offset_range,
                        translation_offset_range, time_offset_range):
  """ Apply a random perturbation to the calibration."""

  random_time_offset_offset = np.random.uniform(
      time_offset_range[0], time_offset_range[1]) * random.choice([-1., 1.])

  # Get a random unit vector
  random_translation_offset = np.random.uniform(-1.0, 1.0, 3)
  random_translation_offset /= np.linalg.norm(random_translation_offset)
  assert np.isclose(np.linalg.norm(random_translation_offset), 1., atol=1e-8)

  # Scale unit vector to a random length between 0 and max_translation_offset.
  random_translation_length = np.random.uniform(
      translation_offset_range[0], translation_offset_range[1])
  random_translation_offset *= random_translation_length

  # Get orientation offset of at most max_angular_offset.
  random_quaternion_offset = Quaternion.get_random(
      angular_offset_range[0], angular_offset_range[1])

  translation_initial_guess = dq_H_E_initial_guess.to_pose()[0: 3]
  orientation_initial_guess = dq_H_E_initial_guess.q_rot

  time_offset_spoiled = time_offset_initial_guess + random_time_offset_offset
  translation_spoiled = random_translation_offset + translation_initial_guess
  orientation_spoiled = random_quaternion_offset * orientation_initial_guess

  # Check if we spoiled correctly.
  random_angle_offset = angle_between_quaternions(
      orientation_initial_guess, orientation_spoiled)

  assert random_angle_offset <= angular_offset_range[1]
  assert random_angle_offset >= angular_offset_range[0]

  assert np.linalg.norm(translation_spoiled -
                        translation_initial_guess) <= translation_offset_range[1]
  assert np.linalg.norm(translation_spoiled -
                        translation_initial_guess) >= translation_offset_range[0]

  # Get random orientation that distorts the original orientation by at most
  # max_angular_offset rad.
  dq_H_E_spoiled = DualQuaternion.from_pose(
      translation_spoiled[0], translation_spoiled[1], translation_spoiled[2],
      orientation_spoiled.q[0], orientation_spoiled.q[1],
      orientation_spoiled.q[2], orientation_spoiled.q[3])

  print("dq_H_E_initial_guess: {}".format(dq_H_E_initial_guess.dq))
  print("dq_H_E_spoiled: {}".format(dq_H_E_spoiled.dq))

  print("time_offset_initial_guess: {}".format(time_offset_initial_guess))
  print("time_offset_spoiled: {}".format(time_offset_spoiled))

  print("random_quaternion_offset: {}".format(random_quaternion_offset.q))

  return (time_offset_spoiled, dq_H_E_spoiled,
          (random_translation_offset, random_angle_offset, random_time_offset_offset))
コード例 #17
0
  def fromJson(cls, in_file, switchConvention = False):
    with open(in_file, 'r') as f:
      data = json.load(f)
    
    p = [ float(data['translation'][name]) for name in 'xyz' ]
    
    q = np.array([ float(data['rotation'][name]) for name in 'ijkw' ])
    if switchConvention:
      q[:3] *= -1.0

    dq = DualQuaternion.from_pose_vector(np.hstack((p, q)))
    return ExtrinsicCalibration(float(data['delay']), dq)
コード例 #18
0
def apply_hand_eye_calibration(trajectory_G_I, marker_calibration_file):
    """Returns the transformed estimator trajectory.

    Computes the trajectory of the marker frame, given the trajectory if the eye
    and the hand_eye_calibration output.
    """
    assert os.path.isfile(marker_calibration_file)
    # Frames of reference: M = marker (hand), I = IMU (eye).
    dq_I_M = ExtrinsicCalibration.fromJson(
        marker_calibration_file).pose_dq.inverse()
    trajectory_G_M = trajectory_G_I
    for idx in range(trajectory_G_M.shape[0]):
        dq_G_I = DualQuaternion.from_pose_vector(trajectory_G_I[idx, 1:8])
        dq_G_M = dq_G_I * dq_I_M
        trajectory_G_M[idx, 1:8] = dq_G_M.to_pose()
    return trajectory_G_M
コード例 #19
0
def calibrateTwo(group, a_in, b_in, a, b):
    if not os.path.exists(group):
        os.mkdir(group)
    a_aligned = group + '/' + a + "_aligned.csv"
    b_aligned = group + '/' + b + "_aligned.csv"
    time_offset_file = group + '/' + 'time_offset.csv'
    pose_file = group + '/' + 'pose.csv'

    if requiresUpdate([a_in, b_in], [a_aligned, b_aligned, time_offset_file]):
        run(
            "rosrun hand_eye_calibration compute_aligned_poses.py \
      --poses_B_H_csv_file %s  \
      --poses_W_E_csv_file %s \
      --aligned_poses_B_H_csv_file %s \
      --aligned_poses_W_E_csv_file %s \
      --time_offset_output_csv_file %s" %
            (a_in, b_in, a_aligned, b_aligned, time_offset_file), DRY_RUN)

    if requiresUpdate([a_aligned, b_aligned], [pose_file]):
        run(
            "rosrun hand_eye_calibration compute_hand_eye_calibration.py \
      --aligned_poses_B_H_csv_file %s \
      --aligned_poses_W_E_csv_file %s  \
      --extrinsics_output_csv_file %s" % (a_aligned, b_aligned, pose_file),
            DRY_RUN)

    init_guess_file = group + "_init_guess.json"
    if requiresUpdate([time_offset_file, pose_file], [init_guess_file]):
        time_offset = float(readArrayFromCsv(time_offset_file)[0, 0])
        pose = readArrayFromCsv(pose_file).reshape((7, ))
        calib = ExtrinsicCalibration(time_offset,
                                     DualQuaternion.from_pose_vector(pose))
        calib.writeJson(init_guess_file)

    calib_file = group + ".json"
    if requiresUpdate([a_in, b_in, init_guess_file], [calib_file]):
        run(
            "rosrun hand_eye_calibration_batch_estimation batch_estimator -v 1 \
      --pose1_csv=%s --pose2_csv=%s \
      --init_guess_file=%s \
      --output_file=%s" % (a_in, b_in, init_guess_file, calib_file), DRY_RUN)

    cal = ExtrinsicCalibration.fromJson(calib_file)
    cal_init = ExtrinsicCalibration.fromJson(init_guess_file)
    print(cal_init, "->\n", cal)
    return cal, cal_init
コード例 #20
0
 def test_conversions(self):
     poses = [[1, 2, 3, 1.0, 0., 0., 0.],
              [1, -2, -3, -0.5 * math.sqrt(2), 0., 0., -0.5 * math.sqrt(2)]]
     expected_matrices = [
         np.array([[1, 0, 0, 1], [0, -1, 0, 2], [0, 0, -1, 3], [0, 0, 0,
                                                                1]]),
         np.array([[1, 0, 0, 1], [0, 0, -1, -2], [0, 1, 0, -3],
                   [0, 0, 0, 1]])
     ]
     for idx in range(len(poses)):
         pose = poses[idx]
         matrix_expected = expected_matrices[idx]
         dq = DualQuaternion.from_pose_vector(pose)
         pose_out = dq.to_pose()
         matrix_out = dq.to_matrix()
         np.set_printoptions(suppress=True)
         npt.assert_allclose(pose[0:3], pose_out[0:3])
         # Check both quaternions which describe the same rotation.
         if not np.allclose(pose[3:7], pose_out[3:7]):
             npt.assert_allclose(pose[3:7], -pose_out[3:7])
         npt.assert_allclose(matrix_out, matrix_expected, atol=1e-6)
コード例 #21
0
 def test_equality(self):
     qr = Quaternion(1, 2, 3, 4)
     qt = Quaternion(1, 3, 3, 1)
     dq_1 = DualQuaternion(qr, qt)
     dq_2 = DualQuaternion(qr, qt)
     self.assertEqual(dq_1, dq_2)
コード例 #22
0
         times_B_H,
         quaternions_B_H
         ) = read_time_stamped_poses_from_csv_file(pose_file_B_H)
        print("Found ", time_stamped_poses_B_H.shape[0],
              " poses in file: ", pose_file_B_H)

        (time_stamped_poses_W_E,
         times_W_E,
         quaternions_W_E) = read_time_stamped_poses_from_csv_file(pose_file_W_E)
        print("Found ", time_stamped_poses_W_E.shape[0],
              " poses in file: ", pose_file_W_E)

        # No time offset.
        time_offset_initial_guess = 0.
        # Unit DualQuaternion.
        dq_H_E_initial_guess = DualQuaternion.from_vector(
            [0., 0., 0., 1.0, 0., 0., 0., 0.])

        print("Computing time offset...")
        time_offset_initial_guess = calculate_time_offset(times_B_H, quaternions_B_H, times_W_E,
                                                          quaternions_W_E, filtering_config, args.visualize)

        print("Time offset: {}s".format(time_offset_initial_guess))

        print("Computing aligned poses...")
        (aligned_poses_B_H, aligned_poses_W_E) = compute_aligned_poses(
            time_stamped_poses_B_H, time_stamped_poses_W_E, time_offset_initial_guess, args.visualize)

        # Convert poses to dual quaterions.
        dual_quat_B_H_vec = [DualQuaternion.from_pose_vector(
            aligned_pose_B_H) for aligned_pose_B_H in aligned_poses_B_H[:, 1:]]
        dual_quat_W_E_vec = [DualQuaternion.from_pose_vector(
コード例 #23
0
def compute_initial_guess_for_all_pairs(set_of_pose_pairs, algorithm_name, hand_eye_config,
                                        filtering_config, optimization_config, visualize=False):
  """
  Iterate over all pairs and compute an initial guess for the calibration (both time and
  transformation). Also store the experiment results from this computation for each pair.
  """

  set_of_dq_H_E_initial_guess = []
  set_of_time_offset_initial_guess = []

  # Initialize the result entry.
  result_entry = ResultEntry()
  result_entry.init_from_configs(algorithm_name, 0, filtering_config,
                                 hand_eye_config, optimization_config)

  for (pose_file_B_H, pose_file_W_E) in set_of_pose_pairs:
    print("\n\nCompute initial guess for calibration between \n\t{} \n and \n\t{} \n\n".format(
        pose_file_B_H, pose_file_W_E))

    (time_stamped_poses_B_H,
     times_B_H,
     quaternions_B_H
     ) = read_time_stamped_poses_from_csv_file(pose_file_B_H)
    print("Found ", time_stamped_poses_B_H.shape[0],
          " poses in file: ", pose_file_B_H)

    (time_stamped_poses_W_E,
     times_W_E,
     quaternions_W_E) = read_time_stamped_poses_from_csv_file(pose_file_W_E)
    print("Found ", time_stamped_poses_W_E.shape[0],
          " poses in file: ", pose_file_W_E)

    # No time offset.
    time_offset_initial_guess = 0.
    # Unit DualQuaternion.
    dq_H_E_initial_guess = DualQuaternion.from_vector(
        [0., 0., 0., 1.0, 0., 0., 0., 0.])

    print("Computing time offset...")
    time_offset_initial_guess = calculate_time_offset(times_B_H, quaternions_B_H, times_W_E,
                                                      quaternions_W_E, filtering_config,
                                                      args.visualize)

    print("Time offset: {}s".format(time_offset_initial_guess))

    print("Computing aligned poses...")
    (aligned_poses_B_H, aligned_poses_W_E) = compute_aligned_poses(
        time_stamped_poses_B_H, time_stamped_poses_W_E, time_offset_initial_guess, visualize)

    # Convert poses to dual quaterions.
    dual_quat_B_H_vec = [DualQuaternion.from_pose_vector(
        aligned_pose_B_H) for aligned_pose_B_H in aligned_poses_B_H[:, 1:]]
    dual_quat_W_E_vec = [DualQuaternion.from_pose_vector(
        aligned_pose_W_E) for aligned_pose_W_E in aligned_poses_W_E[:, 1:]]

    assert len(dual_quat_B_H_vec) == len(dual_quat_W_E_vec), ("len(dual_quat_B_H_vec): {} "
                                                              "vs len(dual_quat_W_E_vec): {}"
                                                              ).format(len(dual_quat_B_H_vec),
                                                                       len(dual_quat_W_E_vec))

    dual_quat_B_H_vec = align_paths_at_index(dual_quat_B_H_vec)
    dual_quat_W_E_vec = align_paths_at_index(dual_quat_W_E_vec)

    # Draw both paths in their Global / World frame.
    if visualize:
      poses_B_H = np.array([dual_quat_B_H_vec[0].to_pose().T])
      poses_W_E = np.array([dual_quat_W_E_vec[0].to_pose().T])
      for i in range(1, len(dual_quat_B_H_vec)):
        poses_B_H = np.append(poses_B_H, np.array(
            [dual_quat_B_H_vec[i].to_pose().T]), axis=0)
        poses_W_E = np.append(poses_W_E, np.array(
            [dual_quat_W_E_vec[i].to_pose().T]), axis=0)
      every_nth_element = args.plot_every_nth_pose
      plot_poses([poses_B_H[:: every_nth_element], poses_W_E[:: every_nth_element]],
                 True, title="3D Poses Before Alignment")

    print("Computing hand-eye calibration to obtain an initial guess...")

    if hand_eye_config.use_baseline_approach:
      (success, dq_H_E_initial_guess, rmse,
       num_inliers, num_poses_kept,
       runtime, singular_values, bad_singular_values) = compute_hand_eye_calibration_BASELINE(
          dual_quat_B_H_vec, dual_quat_W_E_vec, hand_eye_config)
    else:
      (success, dq_H_E_initial_guess, rmse,
       num_inliers, num_poses_kept,
       runtime, singular_values, bad_singular_values) = compute_hand_eye_calibration_RANSAC(
          dual_quat_B_H_vec, dual_quat_W_E_vec, hand_eye_config)

    result_entry.success.append(success)
    result_entry.num_initial_poses.append(len(dual_quat_B_H_vec))
    result_entry.num_poses_kept.append(num_poses_kept)
    result_entry.runtimes.append(runtime)
    result_entry.singular_values.append(singular_values)
    result_entry.bad_singular_value.append(bad_singular_values)
    result_entry.dataset_names.append((pose_file_B_H, pose_file_W_E))

    set_of_dq_H_E_initial_guess.append(dq_H_E_initial_guess)
    set_of_time_offset_initial_guess.append(time_offset_initial_guess)

  return (set_of_dq_H_E_initial_guess,
          set_of_time_offset_initial_guess, result_entry)
コード例 #24
0
 def test_conjugate(self):
     qr = Quaternion(1., 2., 3., 4.)
     qt = Quaternion(1., 3., 3., 5.)
     dq = DualQuaternion(qr, qt)
     dq_expected = np.array([-1., -2., -3., 4., -1., -3., -3., 5.]).T
     npt.assert_allclose((dq.conjugate()).dq, dq_expected, atol=1e-6)
コード例 #25
0
 def test_division_with_scalar(self):
     qr = Quaternion(0.5, 0.5, -0.5, 0.5)
     qt = Quaternion(1., 3., 3., 0.)
     dq = DualQuaternion(qr, qt)
     dq_expected = np.array([0.25, 0.25, -0.25, 0.25, 0.5, 1.5, 1.5, 0.]).T
     npt.assert_allclose((dq / 2.).dq, dq_expected)
コード例 #26
0
    use_poses_H_B = (args.aligned_poses_H_B_csv_file is not None)
    use_poses_W_E = (args.aligned_poses_W_E_csv_file is not None)
    use_poses_E_W = (args.aligned_poses_E_W_csv_file is not None)

    assert use_poses_B_H != use_poses_H_B, \
        "Provide either poses_B_H or poses_H_B!"
    assert use_poses_W_E != use_poses_E_W, \
        "Provide either poses_W_E or poses_E_W!"

    if use_poses_B_H:
        with open(args.aligned_poses_B_H_csv_file, 'r') as csvfile:
            poses1_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            poses1 = np.array(list(poses1_reader))
            poses1 = poses1.astype(float)
            dual_quat_B_H_vec = [
                DualQuaternion.from_pose_vector(pose[1:]) for pose in poses1
            ]
    else:
        with open(args.aligned_poses_H_B_csv_file, 'r') as csvfile:
            poses1_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            poses1 = np.array(list(poses1_reader))
            poses1 = poses1.astype(float)
            dual_quat_B_H_vec = [
                DualQuaternion.from_pose_vector(pose[1:]).inverse()
                for pose in poses1
            ]

    if use_poses_W_E:
        with open(args.aligned_poses_W_E_csv_file, 'r') as csvfile:
            poses2_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            poses2 = np.array(list(poses2_reader))
コード例 #27
0
        "Provide either poses_W_E or poses_E_W!"

    if args.calibration_output_json_file is not None:
        assert args.time_offset_input_csv_file is not None, (
            "In order to compose a complete calibration result json file, you "
            +
            "need to provide the time alignment csv file as an input using " +
            "this flag: --time_offset_input_csv_file")

    if use_poses_B_H:
        with open(args.aligned_poses_B_H_csv_file, 'r') as csvfile:
            poses1_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            poses1 = np.array(list(poses1_reader))
            poses1 = poses1.astype(float)
            dual_quat_B_H_vec = [
                DualQuaternion.from_pose_vector(pose[1:]) for pose in poses1
            ]
    else:
        with open(args.aligned_poses_H_B_csv_file, 'r') as csvfile:
            poses1_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            poses1 = np.array(list(poses1_reader))
            poses1 = poses1.astype(float)
            dual_quat_B_H_vec = [
                DualQuaternion.from_pose_vector(pose[1:]).inverse()
                for pose in poses1
            ]

    if use_poses_W_E:
        with open(args.aligned_poses_W_E_csv_file, 'r') as csvfile:
            poses2_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
            poses2 = np.array(list(poses2_reader))
コード例 #28
0
def compute_hand_eye_calibration(dq_B_H_vec_inliers,
                                 dq_W_E_vec_inliers,
                                 scalar_part_tolerance=1e-2,
                                 enforce_same_non_dual_scalar_sign=True):
    """
  Do the actual hand eye-calibration as described in the referenced paper.
  Assumes the outliers have already been removed and the scalar parts of
  each pair are a match.
  """
    n_quaternions = len(dq_B_H_vec_inliers)

    # Verify that the first pose is at the origin.
    assert np.allclose(dq_B_H_vec_inliers[0].dq,
                       [0., 0., 0., 1.0, 0., 0., 0., 0.],
                       atol=1.e-8), dq_B_H_vec_inliers[0]
    assert np.allclose(dq_W_E_vec_inliers[0].dq,
                       [0., 0., 0., 1.0, 0., 0., 0., 0.],
                       atol=1.e-8), dq_W_E_vec_inliers[0]

    if enforce_same_non_dual_scalar_sign:
        for i in range(n_quaternions):
            dq_W_E = dq_W_E_vec_inliers[i]
            dq_B_H = dq_B_H_vec_inliers[i]
            if ((dq_W_E.q_rot.w < 0. and dq_B_H.q_rot.w > 0.)
                    or (dq_W_E.q_rot.w > 0. and dq_B_H.q_rot.w < 0.)):
                dq_W_E_vec_inliers[i].dq = -dq_W_E_vec_inliers[i].dq.copy()

    # 0. Stop alignment if there are still pairs that do not have matching
    # scalar parts.
    for j in range(n_quaternions):
        dq_B_H = dq_W_E_vec_inliers[j]
        dq_W_E = dq_B_H_vec_inliers[j]

        scalar_parts_B_H = dq_B_H.scalar()
        scalar_parts_W_E = dq_W_E.scalar()

        assert np.allclose(
            scalar_parts_B_H.dq,
            scalar_parts_W_E.dq,
            atol=scalar_part_tolerance), (
                "Mismatch of scalar parts of dual quaternion at idx {}:"
                " dq_B_H: {} dq_W_E: {}".format(j, dq_B_H, dq_W_E))

    # 1.
    # Construct 6n x 8 matrix T
    t_matrix = setup_t_matrix(dq_B_H_vec_inliers, dq_W_E_vec_inliers)

    # 2.
    # Compute SVD of T and check if only two singular values are almost equal to
    # zero. Take the corresponding right-singular vectors (v_7 and v_8)
    U, s, V = np.linalg.svd(t_matrix)

    # Check if only the last two singular values are almost zero.
    bad_singular_values = False
    for i, singular_value in enumerate(s):
        if i < 6:
            if singular_value < 5e-1:
                bad_singular_values = True
        else:
            if singular_value > 5e-1:
                bad_singular_values = True
    v_7 = V[6, :].copy()
    v_8 = V[7, :].copy()
    # print("v_7: {}".format(v_7))
    # print("v_8: {}".format(v_8))

    # 3.
    # Compute the coefficients of (35) and solve it, finding two solutions for s.
    u_1 = v_7[0:4].copy()
    u_2 = v_8[0:4].copy()
    v_1 = v_7[4:8].copy()
    v_2 = v_8[4:8].copy()
    # print("u_1: {}, \nu_2: {}, \nv_1: {}, \nv_2: {}".format(u_1, u_2, v_1, v_2))

    a = np.dot(u_1.T, v_1)
    assert a != 0.0, "This would involve division by zero."
    b = np.dot(u_1.T, v_2) + np.dot(u_2.T, v_1)
    c = np.dot(u_2.T, v_2)
    # print("a: {}, b: {}, c: {}".format(a, b, c))
    square_root_term = b * b - 4.0 * a * c

    if square_root_term < -1e-2:
        assert False, "square_root_term is too negative: {}".format(
            square_root_term)
    if square_root_term < 0.0:
        square_root_term = 0.0
    s_1 = (-b + np.sqrt(square_root_term)) / (2.0 * a)
    s_2 = (-b - np.sqrt(square_root_term)) / (2.0 * a)
    # print("s_1: {}, s_2: {}".format(s_1, s_2))

    # 4.
    # For these two s values, compute s^2*u_1^T*u_1 + 2*s*u_1^T*u_2 + u_2^T*u_2
    # From these choose the largest to compute lambda_2 and then lambda_1
    solution_1 = s_1 * s_1 * np.dot(u_1.T, u_1) + 2.0 * \
        s_1 * np.dot(u_1.T, u_2) + np.dot(u_2.T, u_2)
    solution_2 = s_2 * s_2 * np.dot(u_1.T, u_1) + 2.0 * \
        s_2 * np.dot(u_1.T, u_2) + np.dot(u_2.T, u_2)

    if solution_1 > solution_2:
        assert solution_1 > 0.0, solution_1
        lambda_2 = np.sqrt(1.0 / solution_1)
        lambda_1 = s_1 * lambda_2
    else:
        assert solution_2 > 0.0, solution_2
        lambda_2 = np.sqrt(1.0 / solution_2)
        lambda_1 = s_2 * lambda_2
    # print("lambda_1: {}, lambda_2: {}".format(lambda_1, lambda_2))

    # 5.
    # The result is lambda_1*v_7 + lambda_2*v_8
    dq_H_E = DualQuaternion.from_vector(lambda_1 * v_7 + lambda_2 * v_8)
    # Normalize the output, to get rid of numerical errors.
    dq_H_E.normalize()

    if (dq_H_E.q_rot.w < 0.):
        dq_H_E.dq = -dq_H_E.dq.copy()
    return (dq_H_E, s, bad_singular_values)
コード例 #29
0
 def test_inverse(self):
     qr = Quaternion(1, 2, 3, 4)
     qt = Quaternion(5, 6, 7, 8)
     dq = DualQuaternion(qr, qt)
     identity_dq = np.array([0., 0., 0., 1., 0., 0., 0., 0.]).T
     npt.assert_allclose((dq * dq.inverse()).dq, identity_dq, atol=1e-6)