Пример #1
0
    def test_hand_eye_calibration(self):
        dq_B_H_vec, dq_W_E_vec = he_helpers.generate_test_paths(
            20, self.dq_H_E, self.dq_B_W, self.paths_start_at_origin)

        print("dq_H_E ground truth: \n{}".format(self.dq_H_E))

        hand_eye_config = HandEyeConfig()
        hand_eye_config.visualize = False
        hand_eye_config.ransac_max_number_iterations = 50
        hand_eye_config.ransac_sample_size = 3
        (success, dq_H_E_estimated, rmse, num_inliers, num_poses_kept, runtime,
         singular_values,
         bad_singular_values) = compute_hand_eye_calibration_RANSAC(
             dq_B_H_vec, dq_W_E_vec, hand_eye_config)
        assert success, "Hand-eye calibration, failed!"

        pose_H_E_estimated = dq_H_E_estimated.to_pose()
        dq_H_E_estimated.normalize()

        assert pose_H_E_estimated[6] > 0.0, (
            "The real part of the pose's quaternion should be positive. "
            "The pose is: \n{}\n where the dual quaternion was: "
            "\n{}".format(pose_H_E_estimated, dq_H_E_estimated))

        print("The static input pose was: \n{}".format(self.pose_H_E))
        print("The hand-eye calibration's output pose is: \n{}".format(
            pose_H_E_estimated))

        print("T_H_E ground truth: \n{}".format(self.dq_H_E.to_matrix()))
        print("T_H_E estimated: \n{}".format(dq_H_E_estimated.to_matrix()))

        assert np.allclose(self.dq_H_E.dq, dq_H_E_estimated.dq, atol=1e-3), (
            "input dual quaternion: {}, estimated dual quaternion: {}".format(
                self.dq_H_E, dq_H_E_estimated))
Пример #2
0
    def test_hand_eye_calibration_with_noise(self):
        dq_B_H_vec, dq_W_E_vec = he_helpers.generate_test_paths(
            20,
            self.dq_H_E,
            self.dq_B_W,
            self.paths_start_at_origin,
            include_outliers_B_H=False,
            include_noise_B_H=True,
            noise_sigma_trans_B_H=0.0001,
            noise_sigma_rot_B_H=0.001,
            include_outliers_W_E=False,
            include_noise_W_E=True,
            noise_sigma_trans_W_E=0.0001,
            noise_sigma_rot_W_E=0.001)

        # Plot the poses with noise.
        poses_B_H = np.array([dq_B_H_vec[0].to_pose().T])
        poses_W_E = np.array([dq_W_E_vec[0].to_pose().T])
        for i in range(1, len(dq_B_H_vec)):
            poses_B_H = np.append(poses_B_H,
                                  np.array([dq_B_H_vec[i].to_pose().T]),
                                  axis=0)
            poses_W_E = np.append(poses_W_E,
                                  np.array([dq_W_E_vec[i].to_pose().T]),
                                  axis=0)
        plot_poses([poses_B_H, poses_W_E], blocking=self.make_plots_blocking)

        hand_eye_config = HandEyeConfig()
        hand_eye_config.visualize = False
        hand_eye_config.ransac_max_number_iterations = 50
        hand_eye_config.ransac_sample_size = 3
        (success, dq_H_E_estimated, rmse, num_inliers, num_poses_kept, runtime,
         singular_values,
         bad_singular_values) = compute_hand_eye_calibration_RANSAC(
             dq_B_H_vec, dq_W_E_vec, hand_eye_config)
        assert success, "Hand-eye calibration, failed!"

        pose_H_E_estimated = dq_H_E_estimated.to_pose()
        dq_H_E_estimated.normalize()

        assert pose_H_E_estimated[6] > 0.0, (
            "The real part of the pose's quaternion should be positive. "
            "The pose is: \n{}\n where the dual quaternion was: "
            "\n{}".format(pose_H_E_estimated, dq_H_E_estimated))

        print("The static input pose was: \n{}".format(self.pose_H_E))
        print("The hand-eye calibration's output pose is: \n{}".format(
            pose_H_E_estimated))

        print("T_H_E ground truth: \n{}".format(self.dq_H_E.to_matrix()))
        print("T_H_E estimated: \n{}".format(dq_H_E_estimated.to_matrix()))

        assert np.allclose(self.dq_H_E.dq, dq_H_E_estimated.dq, atol=4e-2), (
            "input dual quaternion: {}, estimated dual quaternion: {}".format(
                self.dq_H_E, dq_H_E_estimated))
    # (_, hand_eye_config) = get_RANSAC_classic_config(False)
    (_, hand_eye_config) = get_exhaustive_search_scalar_part_inliers_config()
    # (_, hand_eye_config) = get_baseline_config(True)

    hand_eye_config.visualize = args.visualize
    hand_eye_config.visualize_plot_every_nth_pose = args.plot_every_nth_pose

    if hand_eye_config.use_baseline_approach:
        (success, dq_H_E, rmse, num_inliers, num_poses_kept, runtime,
         singular_values,
         bad_singular_values) = compute_hand_eye_calibration_BASELINE(
             dual_quat_B_H_vec, dual_quat_W_E_vec, hand_eye_config)
    else:
        (success, dq_H_E, rmse, num_inliers, num_poses_kept, runtime,
         singular_values,
         bad_singular_values) = compute_hand_eye_calibration_RANSAC(
             dual_quat_B_H_vec, dual_quat_W_E_vec, hand_eye_config)

    if args.extrinsics_output_csv_file is not None:
        print("Writing extrinsics to %s." % args.extrinsics_output_csv_file)
        from hand_eye_calibration.csv_io import write_double_numpy_array_to_csv_file
        write_double_numpy_array_to_csv_file(dq_H_E.to_pose(),
                                             args.extrinsics_output_csv_file)

    output_json_calibration = args.calibration_output_json_file is not None
    has_time_offset_file = args.time_offset_input_csv_file is not None
    if output_json_calibration and has_time_offset_file:
        time_offset = float(
            readArrayFromCsv(args.time_offset_input_csv_file)[0, 0])
        calib = ExtrinsicCalibration(
            time_offset, DualQuaternion.from_pose_vector(dq_H_E.to_pose()))
        calib.writeJson(args.calibration_output_json_file)
Пример #4
0
def compute_initial_guess_for_all_pairs(set_of_pose_pairs, algorithm_name, hand_eye_config,
                                        filtering_config, optimization_config, visualize=False):
  """
  Iterate over all pairs and compute an initial guess for the calibration (both time and
  transformation). Also store the experiment results from this computation for each pair.
  """

  set_of_dq_H_E_initial_guess = []
  set_of_time_offset_initial_guess = []

  # Initialize the result entry.
  result_entry = ResultEntry()
  result_entry.init_from_configs(algorithm_name, 0, filtering_config,
                                 hand_eye_config, optimization_config)

  for (pose_file_B_H, pose_file_W_E) in set_of_pose_pairs:
    print("\n\nCompute initial guess for calibration between \n\t{} \n and \n\t{} \n\n".format(
        pose_file_B_H, pose_file_W_E))

    (time_stamped_poses_B_H,
     times_B_H,
     quaternions_B_H
     ) = read_time_stamped_poses_from_csv_file(pose_file_B_H)
    print("Found ", time_stamped_poses_B_H.shape[0],
          " poses in file: ", pose_file_B_H)

    (time_stamped_poses_W_E,
     times_W_E,
     quaternions_W_E) = read_time_stamped_poses_from_csv_file(pose_file_W_E)
    print("Found ", time_stamped_poses_W_E.shape[0],
          " poses in file: ", pose_file_W_E)

    # No time offset.
    time_offset_initial_guess = 0.
    # Unit DualQuaternion.
    dq_H_E_initial_guess = DualQuaternion.from_vector(
        [0., 0., 0., 1.0, 0., 0., 0., 0.])

    print("Computing time offset...")
    time_offset_initial_guess = calculate_time_offset(times_B_H, quaternions_B_H, times_W_E,
                                                      quaternions_W_E, filtering_config,
                                                      args.visualize)

    print("Time offset: {}s".format(time_offset_initial_guess))

    print("Computing aligned poses...")
    (aligned_poses_B_H, aligned_poses_W_E) = compute_aligned_poses(
        time_stamped_poses_B_H, time_stamped_poses_W_E, time_offset_initial_guess, visualize)

    # Convert poses to dual quaterions.
    dual_quat_B_H_vec = [DualQuaternion.from_pose_vector(
        aligned_pose_B_H) for aligned_pose_B_H in aligned_poses_B_H[:, 1:]]
    dual_quat_W_E_vec = [DualQuaternion.from_pose_vector(
        aligned_pose_W_E) for aligned_pose_W_E in aligned_poses_W_E[:, 1:]]

    assert len(dual_quat_B_H_vec) == len(dual_quat_W_E_vec), ("len(dual_quat_B_H_vec): {} "
                                                              "vs len(dual_quat_W_E_vec): {}"
                                                              ).format(len(dual_quat_B_H_vec),
                                                                       len(dual_quat_W_E_vec))

    dual_quat_B_H_vec = align_paths_at_index(dual_quat_B_H_vec)
    dual_quat_W_E_vec = align_paths_at_index(dual_quat_W_E_vec)

    # Draw both paths in their Global / World frame.
    if visualize:
      poses_B_H = np.array([dual_quat_B_H_vec[0].to_pose().T])
      poses_W_E = np.array([dual_quat_W_E_vec[0].to_pose().T])
      for i in range(1, len(dual_quat_B_H_vec)):
        poses_B_H = np.append(poses_B_H, np.array(
            [dual_quat_B_H_vec[i].to_pose().T]), axis=0)
        poses_W_E = np.append(poses_W_E, np.array(
            [dual_quat_W_E_vec[i].to_pose().T]), axis=0)
      every_nth_element = args.plot_every_nth_pose
      plot_poses([poses_B_H[:: every_nth_element], poses_W_E[:: every_nth_element]],
                 True, title="3D Poses Before Alignment")

    print("Computing hand-eye calibration to obtain an initial guess...")

    if hand_eye_config.use_baseline_approach:
      (success, dq_H_E_initial_guess, rmse,
       num_inliers, num_poses_kept,
       runtime, singular_values, bad_singular_values) = compute_hand_eye_calibration_BASELINE(
          dual_quat_B_H_vec, dual_quat_W_E_vec, hand_eye_config)
    else:
      (success, dq_H_E_initial_guess, rmse,
       num_inliers, num_poses_kept,
       runtime, singular_values, bad_singular_values) = compute_hand_eye_calibration_RANSAC(
          dual_quat_B_H_vec, dual_quat_W_E_vec, hand_eye_config)

    result_entry.success.append(success)
    result_entry.num_initial_poses.append(len(dual_quat_B_H_vec))
    result_entry.num_poses_kept.append(num_poses_kept)
    result_entry.runtimes.append(runtime)
    result_entry.singular_values.append(singular_values)
    result_entry.bad_singular_value.append(bad_singular_values)
    result_entry.dataset_names.append((pose_file_B_H, pose_file_W_E))

    set_of_dq_H_E_initial_guess.append(dq_H_E_initial_guess)
    set_of_time_offset_initial_guess.append(time_offset_initial_guess)

  return (set_of_dq_H_E_initial_guess,
          set_of_time_offset_initial_guess, result_entry)