コード例 #1
0
    def calibrateHandEyeTest(self, HMBase2TCPs, HMTarget2Cams):
        #assert (HMBase2TCPs.len() == HMTarget2Cams.len())
        for hmmat in HMBase2TCPs:
            rotataion = hmmat[0:3, 0:3]
            self.R_gripper2base.append(rotataion)
            translation = hmmat[0:3, 3]
            self.t_gripper2base.append(translation)

        for hmmat in HMTarget2Cams:
            rotataion = hmmat[0:3, 0:3]
            self.R_target2cam.append(rotataion)
            translation = hmmat[0:3, 3]
            self.t_target2cam.append(translation)

        methodHE = [
            cv2.CALIB_HAND_EYE_TSAI, cv2.CALIB_HAND_EYE_PARK,
            cv2.CALIB_HAND_EYE_HORAUD, cv2.CALIB_HAND_EYE_ANDREFF,
            cv2.CALIB_HAND_EYE_DANIILIDIS
        ]

        for mth in methodHE:
            self.R_cam2gripper, self.t_cam2gripper = cv2.calibrateHandEye(
                self.R_gripper2base, self.t_gripper2base, self.R_target2cam,
                self.t_target2cam, None, None, mth)
            cv2.calibrateHandEye(self.R_gripper2base, self.t_gripper2base,
                                 self.R_target2cam, self.t_target2cam, None,
                                 None, mth)
            # output results
            print("--------------------------------------")
            print("Method %d" % mth)
            print(self.R_cam2gripper)
            print(self.t_cam2gripper)
            print("--------------------------------------")
コード例 #2
0
ファイル: hand_eye.py プロジェクト: saulzar/multical
def hand_eye(world_wrt_camera, base_wrt_gripper):
  """
    Solve the hand-eye problem AX=XB
    See cv2.calibrateHandEye for details. 
    
    Inputs changed to be consistent with hand_eye_robot_world
    compared to cv2.calibrateHandEye. 
    
    Less accurate than hand_eye_robot_world, used as fallback in OpenCV < 4.5

    Note: Uses the data-centric convention where world_camera describes the 
    transform which sends a *point* in the world frame to the same point in camera frame.
  """

  assert world_wrt_camera.shape[0] == base_wrt_gripper.shape[0]

  world_camera_r, world_camera_t = matrix.split(world_wrt_camera)
  base_gripper_r, base_gripper_t = matrix.split(np.linalg.inv(base_wrt_gripper))

  camera_gripper_r, camera_gripper_t =  cv2.calibrateHandEye(
    base_gripper_r, base_gripper_t,
    world_camera_r, world_camera_t)

  camera_wrt_gripper = matrix.join(camera_gripper_r, camera_gripper_t.reshape(-1))
  gripper_wrt_camera = np.linalg.inv(camera_wrt_gripper)

  base_wrt_world = matrix.mean_robust(matrix.transform(
    base_wrt_gripper, gripper_wrt_camera, np.linalg.inv(world_wrt_camera)))

  err = matrix.transform(base_wrt_world, world_wrt_camera) - matrix.transform(base_wrt_gripper, gripper_wrt_camera)
  return base_wrt_world, gripper_wrt_camera, np.linalg.norm(err, axis=(1, 2))
    def compute_calibration(self, handeye_parameters, samples, algorithm=None):
        """
        Computes the calibration through the OpenCV library and returns it.

        :rtype: easy_handeye.handeye_calibration.HandeyeCalibration
        """
        if algorithm is None:
            algorithm = 'Tsai-Lenz'

        loginfo(
            'OpenCV backend calibrating with algorithm {}'.format(algorithm))

        if len(samples) < HandeyeCalibrationBackendOpenCV.MIN_SAMPLES:
            logwarn(
                "{} more samples needed! Not computing the calibration".format(
                    HandeyeCalibrationBackendOpenCV.MIN_SAMPLES -
                    len(samples)))
            return

        # Update data
        opencv_samples = HandeyeCalibrationBackendOpenCV._get_opencv_samples(
            samples)
        (hand_world_rot, hand_world_tr), (marker_camera_rot,
                                          marker_camera_tr) = opencv_samples

        if len(hand_world_rot) != len(marker_camera_rot):
            logerr(
                "Different numbers of hand-world and camera-marker samples!")
            raise AssertionError

        loginfo("Computing from %g poses..." % len(samples))

        method = HandeyeCalibrationBackendOpenCV.AVAILABLE_ALGORITHMS[
            algorithm]

        hand_camera_rot, hand_camera_tr = cv2.calibrateHandEye(
            hand_world_rot,
            hand_world_tr,
            marker_camera_rot,
            marker_camera_tr,
            method=method)
        result = tfs.affines.compose(np.squeeze(hand_camera_tr),
                                     hand_camera_rot, [1, 1, 1])

        loginfo("Computed calibration: {}".format(str(result)))
        (hcqw, hcqx, hcqy,
         hcqz) = [float(i) for i in tfs.quaternions.mat2quat(hand_camera_rot)]
        (hctx, hcty, hctz) = [float(i) for i in hand_camera_tr]

        result_tuple = ((hctx, hcty, hctz), (hcqx, hcqy, hcqz, hcqw))

        ret = HandeyeCalibration(calibration_parameters=handeye_parameters,
                                 transformation=result_tuple)

        return ret
コード例 #4
0
def load_cali_matrix(file_path):
    data = np.load(file_path)

    cam_T_marker_mats_R = data['arr_0']
    cam_T_marker_mats_t = data['arr_1']

    EE_T_base_mats_R = data['arr_2']
    EE_T_base_mats_t = data['arr_3']

    cam_T_base_R, cam_T_base_t = cv2.calibrateHandEye(EE_T_base_mats_R,
                                                      EE_T_base_mats_t,
                                                      cam_T_marker_mats_R,
                                                      cam_T_marker_mats_t)
    return cam_T_base_R, cam_T_base_t
コード例 #5
0
    def getHandEyeResultMatrixUsingOpenCV(self):
        methodHE = [
            cv2.CALIB_HAND_EYE_TSAI, cv2.CALIB_HAND_EYE_PARK,
            cv2.CALIB_HAND_EYE_HORAUD, cv2.CALIB_HAND_EYE_ANDREFF,
            cv2.CALIB_HAND_EYE_DANIILIDIS
        ]

        if (self.AlgorithmTest == True):
            fsHandEyeTest = cv2.FileStorage("HandEyeTestData.xml",
                                            cv2.FILE_STORAGE_WRITE)

        for mth in methodHE:
            self.R_cam2gripper, self.t_cam2gripper = cv2.calibrateHandEye(
                self.R_gripper2base, self.t_gripper2base, self.R_target2cam,
                self.t_target2cam, None, None, mth)
            # output results
            print("--------------------------------------")
            print("Method %d" % mth)
            print(self.R_cam2gripper)
            print(self.t_cam2gripper)
            print("--------------------------------------")
            print("Distance: %f" % math.sqrt(
                math.pow(self.t_cam2gripper[0], 2.0) +
                math.pow(self.t_cam2gripper[1], 2.0) +
                math.pow(self.t_cam2gripper[2], 2.0)))
            print("--------------------------------------")

            if (mth == cv2.CALIB_HAND_EYE_HORAUD):
                # for idx in range(len(self.R_gripper2base)):
                #     print("######")
                # make a homogeneous matrix from Target(Calibration) to Gripper(TCP)
                hmT2G = HMUtil.makeHM(self.R_cam2gripper, self.t_cam2gripper.T)
                # make a homogeneous matrix from Gripper(TCP) to Robot Base
                hmG2B = HMUtil.makeHM(self.R_gripper2base[0],
                                      self.t_gripper2base[0].reshape(1, 3))
                # make a homogeneous matrix from Camera to Target(Target)
                hmC2T = HMUtil.makeHM(self.R_target2cam[0],
                                      self.t_target2cam[0].reshape(1, 3))

                # Final HM(Camera to Robot Base)
                # H(C2B) = H(G2B)H(T2G)H(C2T)
                hmResultTransform = np.dot(hmG2B, hmT2G)
                hmResultTransform = np.dot(hmResultTransform, hmC2T)

        if (self.AlgorithmTest == True):
            fsHandEyeTest.release()

        print("Result Transform: ")
        print(hmResultTransform)
        return hmResultTransform
コード例 #6
0
    def compute_cam2base(marker2cam_poses: List[np.ndarray], ee2base_poses: List[np.ndarray]) -> np.ndarray:
        """You need to implement this function

        Main calculation function for hand eye calibration.
        The input of this function is exactly the output of capture_calibration_data
        Implement an algorithm to solve the AX=XB equation, e.g. Tsai or any other methods

        The reference below list a traditional algorithm to solve this problem, you can also use any other method.

        References:
            Tsai, R. Y., & Lenz, R. K. (1989).
            A new technique for fully autonomous and efficient 3 D robotics hand/eye calibration.
            IEEE Transactions on robotics and automation, 5(3), 345-358.

        Args:
            marker2cam_poses: a list of SE(3) matrices represent poses of marker to camera
            ee2base_poses: a list of SE(3) matrices represent poses of robot hand to robot base (static frame)

        Returns:
            Transformation matrix from camera to base

        """

        R_ee2base = []
        t_ee2base = []
        for i in ee2base_poses:
            R_ee2base.append(i[:3,:3])
            t_ee2base.append(i[:3, 3])
        
        R_cam2marker = []
        t_cam2marker = []
        for i in marker2cam_poses:
            i = np.linalg.inv(i)
            R_cam2marker.append(i[:3,:3])
            t_cam2marker.append(i[:3, 3])

        R_marker2ee, t_marker2ee = cv2.calibrateHandEye(R_ee2base, t_ee2base, R_cam2marker, t_cam2marker, method = cv2.CALIB_HAND_EYE_TSAI )
        
        T_ee2base = ee2base_poses[1]
        T_cam2marker = np.linalg.inv(marker2cam_poses[1])
   
        T_marker2ee = np.identity(4)
        T_marker2ee[:3,:3] = R_marker2ee
        T_marker2ee[:3, 3] = np.array(t_marker2ee).reshape(1,3)
       
        T_cam2base = T_ee2base @ T_marker2ee @ T_cam2marker
       
        return T_cam2base
コード例 #7
0
def eye_arm_calibrate(images, urs, mtx, dist, eye_in_hand=False, show=True):
    # 创建标定板模型
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_50)
    parameters = aruco.DetectorParameters_create()
    board = aruco.GridBoard_create(2, 2, 0.08, 0.01, aruco_dict)
    # 从图像中得到标定板坐标系到相机坐标系的变换
    cam = []
    for img in images:
        gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, mtx,
                                                     dist, None, None)
        cam.append(to_matrix(np.squeeze(np.r_[tvec, rvec])))
    # urs是工具坐标系到基座坐标系的变换,在eye to hand的时候用的是基座坐标到手坐标的变换,要求逆
    urs = [to_matrix(s) for s in urs]
    if not eye_in_hand:
        urs = [np.linalg.inv(s) for s in urs]
    # calibrateHandEye函数的输入要分开旋转矩阵和平移向量
    R_gripper2base = np.array([pos[:3, :3] for pos in urs])
    t_gripper2base = np.array([pos[:3, 3] for pos in urs])
    R_target2cam = np.array([pos[:3, :3] for pos in cam])
    t_target2cam = np.array([pos[:3, 3] for pos in cam])

    R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye(
        R_gripper2base,
        t_gripper2base,
        R_target2cam,
        t_target2cam,
        method=cv2.CALIB_HAND_EYE_PARK)
    # 把输出还原为4x4变换矩阵
    cam2base = np.eye(4)
    cam2base[:3, :3] = np.squeeze(R_cam2gripper)
    cam2base[:3, 3] = np.squeeze(t_cam2gripper)
    print(cam2base)
    if show:
        display(cam2base, eye_in_hand)
    return cam2base
コード例 #8
0
ファイル: 111.py プロジェクト: SteveHao74/shahao_gmnet_robot
def eye_arm_calibrate(images, urs, mtx, dist, eye_in_hand=False):
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_50)
    parameters = aruco.DetectorParameters_create()
    board = aruco.GridBoard_create(2, 2, 0.08, 0.01, aruco_dict)
    cam = []
    for img in images:
        gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, board, mtx,
                                                     dist, None, None)
        cam.append(to_matrix(np.squeeze(np.r_[tvec, rvec])))
    urs = [to_matrix(s) for s in urs]
    if not eye_in_hand:
        urs = [np.linalg.inv(s) for s in urs]
        # cam = [np.linalg.inv(s) for s in cam]
    R_gripper2base = np.array([pos[:3, :3] for pos in urs])
    t_gripper2base = np.array([pos[:3, 3] for pos in urs])
    R_target2cam = np.array([pos[:3, :3] for pos in cam])
    t_target2cam = np.array([pos[:3, 3] for pos in cam])

    R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye(
        R_gripper2base,
        t_gripper2base,
        R_target2cam,
        t_target2cam,
        method=cv2.CALIB_HAND_EYE_PARK)
    cam2base = np.eye(4)
    cam2base[:3, :3] = np.squeeze(R_cam2gripper)
    cam2base[:3, 3] = np.squeeze(t_cam2gripper)
    print(cam2base)
    cam2world = base2world.dot(cam2base)
    print(cam2world)
    zz = cam2world.dot([0, 0, 1, 1])[:3]
    print(zz / np.linalg.norm(zz))
    display(cam2world)
    return cam2base
コード例 #9
0
def main():
    arm_pos = ARM_POS.split()
    arm_pos = [np.array(pos.split(','), np.float) for pos in arm_pos]
    arm_pos = np.array(arm_pos)
    pattern_pos = PATTERN_POS.split('\n')
    pattern_pos = [
        np.array([s.strip() for s in pos.split(',')], np.float)
        for pos in pattern_pos
    ]
    pattern_pos = np.array(pattern_pos)
    arm_pos = [np.linalg.inv(to_matrix(pos)) for pos in arm_pos]
    # arm_pos = [to_matrix(pos) for pos in arm_pos]
    # pattern_pos = [np.linalg.inv(to_matrix(pos)) for pos in pattern_pos]
    pattern_pos = [to_matrix(pos) for pos in pattern_pos]
    # arm_pos = np.array([to_vector(pos) for pos in arm_pos])
    # pattern_pos = np.array([to_vector(pos) for pos in pattern_pos])
    # print(pattern_pos)
    # fro i in range()
    # R_gripper2base = arm_pos[:, -3:]
    # t_gripper2base = arm_pos[:, :3]
    # R_target2cam = pattern_pos[:, -3:]
    # t_target2cam = pattern_pos[:, :3]
    R_gripper2base = np.array([pos[:3, :3] for pos in arm_pos])
    t_gripper2base = np.array([pos[:3, 3] for pos in arm_pos])
    R_target2cam = np.array([pos[:3, :3] for pos in pattern_pos])
    t_target2cam = np.array([pos[:3, 3] for pos in pattern_pos])
    print(R_target2cam.shape)
    print(t_target2cam.shape)
    R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye(
        R_gripper2base,
        t_gripper2base,
        R_target2cam,
        t_target2cam,
        method=cv2.CALIB_HAND_EYE_PARK)
    print(R_cam2gripper)
    print(t_cam2gripper)
コード例 #10
0
def check_trans_matrix_from_file(file_path):

    import matplotlib.pyplot as plt
    import pytransform3d.transformations as ptrans
    from pytransform3d.transform_manager import TransformManager
    from mpl_toolkits.mplot3d import Axes3D  # noqa: F401 unused import

    data = np.load(file_path)

    cam_T_marker_mats_R = data['arr_0']
    cam_T_marker_mats_t = data['arr_1']

    EE_T_base_mats_R = data['arr_2']
    EE_T_base_mats_t = data['arr_3']

    cam_T_base_R, cam_T_base_t = cv2.calibrateHandEye(EE_T_base_mats_R,
                                                      EE_T_base_mats_t,
                                                      cam_T_marker_mats_R,
                                                      cam_T_marker_mats_t)

    # chane (3,1) shape to (3, )
    cam_T_base_t = cam_T_base_t.squeeze(1)

    print("cam_T_base_R:\n", cam_T_base_R.shape, "\n", cam_T_base_R)
    print("cam_T_base_t:\n", cam_T_base_t.shape, "\n", cam_T_base_t)

    base_T_cam_R = cam_T_base_R.transpose()
    base_T_cam_t = -cam_T_base_R.transpose().dot(cam_T_base_t)

    x_errs = []
    y_errs = []
    z_errs = []
    abs_errs = []

    trans_pts = []
    base_T_EE_mats_t = []

    for i, EE_T_base_t in enumerate(EE_T_base_mats_t):
        base_T_EE_t = -EE_T_base_mats_R[i].transpose().dot(EE_T_base_t)
        base_T_EE_mats_t.append(base_T_EE_t)  # For visualization

        print("Point in base: ", i, " ",
              base_T_EE_t.transpose().shape, " ", base_T_EE_t)
        print("Point in camera: ", i, " ", cam_T_marker_mats_t[i].shape, " ",
              cam_T_marker_mats_t[i])
        trans_pt = np.add(cam_T_base_R.dot(cam_T_marker_mats_t[i]),
                          cam_T_base_t)
        trans_pts.append(trans_pt)
        print("Transed point: ", trans_pt.shape, trans_pt)

        x_errs.append(abs(trans_pt[0] - base_T_EE_t[0]))
        y_errs.append(abs(trans_pt[1] - base_T_EE_t[1]))
        z_errs.append(abs(trans_pt[2] - base_T_EE_t[2]))
        abs_errs.append(
            np.sqrt(
                np.square(trans_pt[0] - base_T_EE_t[0]) +
                np.square(trans_pt[1] - base_T_EE_t[1]) +
                np.square(trans_pt[2] - base_T_EE_t[2])))

    print("X-axis error: ", "max: ", max(x_errs), "min: ", min(x_errs),
          "mean: ",
          sum(x_errs) / len(x_errs))
    print("Y-axis error: ", "max: ", max(y_errs), "min: ", min(y_errs),
          "mean: ",
          sum(y_errs) / len(y_errs))
    print("Z-axis error: ", "max: ", max(z_errs), "min: ", min(z_errs),
          "mean: ",
          sum(z_errs) / len(z_errs))
    print("Abs error: ", "max: ", max(abs_errs), "min: ", min(abs_errs),
          "mean: ",
          sum(abs_errs) / len(abs_errs))

    abs_errs = np.sort(abs_errs)
    plt.figure(1)
    plt.plot(range(len(abs_errs)), abs_errs)

    fig_3d = plt.figure(2)
    ax = fig_3d.add_subplot(111, projection='3d')

    for pt in cam_T_marker_mats_t:
        ax.scatter(pt[0], pt[1], pt[2], marker='^')

    for pt in trans_pts:
        ax.scatter(pt[0], pt[1], pt[2], marker='.')

    plt.figure(3)
    base_T_cam = ptrans.transform_from(base_T_cam_R, base_T_cam_t, True)
    tm = TransformManager()
    tm.add_transform("base", "cam", base_T_cam)
    axis = tm.plot_frames_in("base", s=0.3)

    plt.show()
コード例 #11
0
                          cam_T_marker_t.shape, cam_T_marker_t)

                    # print("EE in base frame\n", "R:\n", base_T_EE_R.shape, base_T_EE_R, "t:\n", base_T_EE_t.shape, base_T_EE_t)
                    print("Base in EE frame\n", "R:\n", EE_T_base_R.shape,
                          EE_T_base_R, "t:\n", EE_T_base_t.shape, EE_T_base_t)

                    # cam_pts.append(cam_pt)
                    # arm_base_pts.append(arm_base_pt)
                else:
                    print("No marker detected in this frame!")

    cv2.destroyAllWindows()

    print("Performing calibration...")
    cam_T_base_R, cam_T_base_t = cv2.calibrateHandEye(EE_T_base_mats_R,
                                                      EE_T_base_mats_t,
                                                      cam_T_marker_mats_R,
                                                      cam_T_marker_mats_t)
    print("Performing calibration... Done")

    print("From camera to base:\n", "R:\n", cam_T_base_R, "\nt:\n",
          cam_T_base_t)

    # Stack arrays to get transformation matrix H
    base_T_cam_H = get_H_from_R_t(cam_T_base_R, cam_T_base_t)
    print("H:\n", base_T_cam_H)
    # cam_T_marker_mats_filename = ROOT + cfg['save_dir'] + "cam_T_marker" + str(datetime.now()).replace(' ', '-') + ".csv"
    filename_csv = ROOT + cfg['save_dir'] + str(datetime.now()).replace(
        ' ', '-') + ".csv"
    filename_npz = ROOT + cfg['save_dir'] + str(datetime.now()).replace(
        ' ', '-') + ".npz"
コード例 #12
0
def validate(num_of_test, num_of_pose, num_of_start_method, num_of_method,
             motion, test_motion):
    rvec_hand2eye_true = np.empty((3, 1))
    R_hand2eye_est = np.empty((3, 3))
    t_hand2eye_est = np.empty((3, 1))
    rvec_hand2eye_est = np.empty((3, 1))
    rvec_diff = np.empty((1, num_of_method - num_of_start_method))
    t_diff = np.empty((1, num_of_method - num_of_start_method))

    ori_error = np.empty((1, num_of_method - num_of_start_method))
    pos_error = np.empty((1, num_of_method - num_of_start_method))

    for i in range(num_of_test):
        # Generate a number of poses of robot and camera
        R_eye2world, t_eye2world, R_base2hand, t_base2hand, R_hand2eye_true, t_hand2eye_true, T_base2world, robot_motion, noise_data = \
            generateNewPose(num_of_pose, motion)
        rvec_diff_ = np.empty(1)
        t_diff_ = np.empty(1)
        ori_error_ = np.empty(1)
        pos_error_ = np.empty(1)

        for j in range(num_of_start_method, num_of_method):
            # Estimation of transformation from robot hand to camera
            cv2.calibrateHandEye(R_base2hand,
                                 t_base2hand,
                                 R_eye2world,
                                 t_eye2world,
                                 R_hand2eye_est,
                                 t_hand2eye_est,
                                 method=j)
            # --- Validate --- #
            # - Method 1 - #
            # error = X_true - X_estimation
            # Orientation
            cv2.Rodrigues(R_hand2eye_true, rvec_hand2eye_true)
            cv2.Rodrigues(R_hand2eye_est, rvec_hand2eye_est)
            # temp #
            print('t_hand2eye_est\n', t_hand2eye_est)
            print('t_hand2eye_true\n', t_hand2eye_true)
            print('R_hand2eye_est\n', R_hand2eye_est)
            print('R_hand2eye_true\n', R_hand2eye_true)
            _, rvec_diff__ = kin.rotMatrixToRodVector(
                np.dot(np.transpose(R_hand2eye_est), R_hand2eye_true))
            # rvec_diff__ = kin.rotMatrixToRodVector(temp_rvec_diff__)
            # original #
            # rvec_diff__ = np.linalg.norm(np.subtract(rvec_hand2eye_true, rvec_hand2eye_est))
            # Translation
            t_diff__ = np.linalg.norm(
                np.subtract(t_hand2eye_true,
                            t_hand2eye_est)) / np.linalg.norm(t_hand2eye_true)
            # t_diff__ = np.linalg.norm(np.subtract(t_hand2eye_true, t_hand2eye_est))

            # The result vectors at the each method
            rvec_diff_ = np.append(rvec_diff_,
                                   np.expand_dims(rvec_diff__, axis=0),
                                   axis=0)
            t_diff_ = np.append(t_diff_,
                                np.expand_dims(t_diff__, axis=0),
                                axis=0)

            # - Method 2 - #
            # $ error = (AX)^-1 XB $ #
            X = kin.homogMatfromRotAndTrans(R_hand2eye_est, t_hand2eye_est)
            _, _, R_base2hand_test, t_base2hand_test, _, _, _, _, _ = generateNewPose(
                num_of_pose, test_motion)
            T_eye2hand = kin.homogeneousInverse(X)
            T_eye2world = np.zeros((1, 4, 4))
            T_base2hand = np.zeros((1, 4, 4))

            for R_base2hand_, t_base2hand_ in zip(R_base2hand_test,
                                                  t_base2hand_test):
                T_base2hand_ = kin.homogMatfromRotAndTrans(
                    R_base2hand_, t_base2hand_)
                T_hand2base_ = kin.homogeneousInverse(T_base2hand_)
                # eye2hand * hand2base * base2world
                T_eye2world_ = T_eye2hand.dot(T_hand2base_).dot(T_base2world)
                # Generation of noise
                eye_noise = noise_data[0]
                rvec_noise_ = np.random.uniform(-1, 1, (3, 1))
                rvec_noise = rvec_noise_ / np.linalg.norm(rvec_noise_)
                # --- World coordinate --- #
                # Add to arbitrary noise.
                R_eye2world_noise = T_eye2world_[:3, :3]
                rvec_eye2world_noise = np.empty((3, 1))
                # Orientation
                cv2.Rodrigues(R_eye2world_noise, rvec_eye2world_noise)
                angle_eye_noise = np.random.uniform(np.deg2rad(eye_noise[0]),
                                                    np.deg2rad(eye_noise[1]))
                rvec_eye_noise = rvec_noise * angle_eye_noise
                rvec_eye2world_noise = np.add(rvec_eye2world_noise,
                                              rvec_eye_noise)
                cv2.Rodrigues(rvec_eye2world_noise, R_eye2world_noise)
                R_eye2world_ = R_eye2world_noise
                # Position
                t_eye2world_noise = T_eye2world_[:3, 3]
                t_eye2world_noise = np.add(
                    np.expand_dims(t_eye2world_noise, axis=1),
                    np.random.uniform(eye_noise[2], eye_noise[3], (3, 1)))
                t_eye2world_ = t_eye2world_noise
                T_eye2world_noise = kin.homogMatfromRotAndTrans(
                    R_eye2world_, t_eye2world_)
                T_eye2world = np.concatenate(
                    (T_eye2world, np.expand_dims(T_eye2world_noise, axis=0)),
                    axis=0)
                T_base2hand = np.concatenate(
                    (T_base2hand, np.expand_dims(T_base2hand_, axis=0)),
                    axis=0)
            # print('--------------------------------------')
            # print('Method', j)
            ori_error__, pos_error__ = computeHandeyeError(
                T_base2hand[1:], T_eye2world[1:], X)
            # ori_error = np.concatenate((ori_error, np.expand_dims(ori_error_, axis=0)))
            ori_error_ = np.append(ori_error_,
                                   np.expand_dims(ori_error__, axis=0),
                                   axis=0)
            pos_error_ = np.append(pos_error_,
                                   np.expand_dims(pos_error__, axis=0),
                                   axis=0)

        rvec_diff_ = rvec_diff_.reshape(
            1, num_of_method - num_of_start_method + 1)
        t_diff_ = t_diff_.reshape(1, num_of_method - num_of_start_method + 1)

        ori_error_ = ori_error_.reshape(
            1, num_of_method - num_of_start_method + 1)
        pos_error_ = pos_error_.reshape(
            1, num_of_method - num_of_start_method + 1)
        # The result vectors at the each test
        rvec_diff = np.concatenate(
            (rvec_diff, rvec_diff_[:,
                                   1:num_of_method - num_of_start_method + 1]),
            axis=0)
        t_diff = np.concatenate(
            (t_diff, t_diff_[:, 1:num_of_method - num_of_start_method + 1]),
            axis=0)

        ori_error = np.concatenate(
            (ori_error, ori_error_[:,
                                   1:num_of_method - num_of_start_method + 1]),
            axis=0)
        pos_error = np.concatenate(
            (pos_error, pos_error_[:,
                                   1:num_of_method - num_of_start_method + 1]),
            axis=0)

    mean_rvec_diff = np.mean(rvec_diff[1:, :], axis=0)
    mean_t_diff = np.mean(t_diff[1:, :], axis=0)

    std_rvec_diff = np.std(rvec_diff[1:, :], axis=0)
    std_t_diff = np.std(t_diff[1:, :], axis=0)

    max_rvec_diff = np.max(rvec_diff[1:, :], axis=0)
    max_t_diff = np.max(t_diff[1:, :], axis=0)

    mean_ori = np.mean(ori_error[1:, :], axis=0)
    mean_pos = np.mean(pos_error[1:, :], axis=0)

    return R_base2hand, R_base2hand_test, rvec_diff[1:], t_diff[
        1:], mean_rvec_diff, mean_t_diff, std_rvec_diff, std_t_diff, max_rvec_diff, max_t_diff, ori_error[
            1:], pos_error[1:], mean_ori, mean_pos
コード例 #13
0
'''
b = bAtisToCheckerboardRotMats
bInv = invertTransformationMatrices(b)
r_target2cam = b[:3, :3, :]
t_target2cam = b[:3, 3, :]

methods = [
    cv2.CALIB_HAND_EYE_TSAI, cv2.CALIB_HAND_EYE_PARK,
    cv2.CALIB_HAND_EYE_HORAUD, cv2.CALIB_HAND_EYE_ANDREFF,
    cv2.CALIB_HAND_EYE_DANIILIDIS
]

xx = cv2.calibrateHandEye(np.moveaxis(r_gripper2base, 2, 0),
                          np.moveaxis(t_gripper2base, 1, 0),
                          np.moveaxis(r_target2cam, 2, 0),
                          np.moveaxis(t_target2cam, 1, 0),
                          None,
                          None,
                          method=methods[0])
xR = xx[0]
xT = xx[1]
x = np.zeros((4, 4))
x[:3, :3] = xR
x[:3, 3] = xT[:, 0]
x[3, 3] = 1

# Compute vicon-checkerboard transform using estimated X and one of the (a,b) pairs
y = aViconToStefiRotMats[:, :, 0] @ x @ bAtisToCheckerboardRotMats[:, :, 0]

xFinal = x
yFinal = y
コード例 #14
0
#! /usr/bin/env python3
# -*- coding:utf-8 -*-

import os, sys
import numpy as np
import cv2

R_gripper2base = np.loadtxt("R_gripper2base.txt")
t_gripper2base = np.loadtxt("t_gripper2base.txt")
R_target2cam = np.loadtxt("R_target2cam.txt")
t_target2cam = np.loadtxt("t_target2cam.txt")
R_gripper2base = np.reshape(R_gripper2base, (8, 3, 3))
#print(R_gripper2base)
R_target2cam = np.reshape(R_target2cam, (8, 3, 3))

R_cam2gripper, t_cam2gipper = cv2.calibrateHandEye(R_gripper2base,
                                                   t_gripper2base,
                                                   R_target2cam, t_target2cam)
print(R_cam2gripper)
print(t_cam2gipper)
np.savetxt("R_cam2gripper.txt", R_cam2gripper)
np.savetxt("t_cam2gripper.txt", t_cam2gipper)
コード例 #15
0
    R_cam_marker = []
    t_cam_marker = []
    T_cam_marker = []
    for r, t in zip(R_marker_cam, t_marker_cam):
        R_cam_marker.append(r.T)
        t_cam_marker.append(-r.T.dot(t))
        T_cam_marker_i = np.concatenate([r.T, (-r.T.dot(t)).reshape((3, 1))],
                                        axis=1)
        T_cam_marker.append(
            np.concatenate(
                [T_cam_marker_i,
                 np.array([0, 0, 0, 1]).reshape((1, 4))],
                axis=0))

    R_marker_ee, t_marker_ee = cv2.calibrateHandEye(R_ee_base, t_ee_base,
                                                    R_cam_marker, t_cam_marker,
                                                    cv2.CALIB_HAND_EYE_TSAI)

    # T_marker_ee
    T_marker_ee = np.concatenate(
        [R_marker_ee, t_marker_ee.reshape((3, 1))], axis=1)
    T_marker_ee = np.concatenate(
        [T_marker_ee, np.array([0, 0, 0, 1]).reshape((1, 4))], axis=0)

    print(T_marker_ee)

    T_cam_marker = []
    for i in range(len(R_cam_marker)):
        T_cam_marker_i = np.concatenate(
            [R_cam_marker[i], t_cam_marker[i].reshape((3, 1))], axis=1)
        T_cam_marker_i = np.concatenate(
コード例 #16
0
ファイル: simulate.py プロジェクト: monroe-dev/hand_eye_test
def validate(numTest, numPose, numMethod):
    noise = True
    # Allocation
    rvec_hand2eye_true = np.empty((3, 1))
    R_hand2eye_est = np.empty((3, 3))
    t_hand2eye_est = np.empty((3, 1))
    rvec_hand2eye_est = np.empty((3, 1))
    # Five methods
    rvec_diff = np.empty((1, numMethod))
    t_diff = np.empty((1, numMethod))

    for i in range(numTest):
        # Generate a number of poses
        R_eye2world, t_eye2world, R_base2hand, t_base2hand, R_hand2eye_true, t_hand2eye_true = generateNewPose(
            numPose, noise)
        rvec_diff_ = np.empty(1)
        t_diff_ = np.empty(1)

        for j in range(numMethod):
            # Estimation of transformation from robot hand to camera
            cv2.calibrateHandEye(R_base2hand,
                                 t_base2hand,
                                 R_eye2world,
                                 t_eye2world,
                                 R_hand2eye_est,
                                 t_hand2eye_est,
                                 method=j)
            # Validate
            # Orientation
            cv2.Rodrigues(R_hand2eye_true, rvec_hand2eye_true)
            cv2.Rodrigues(R_hand2eye_est, rvec_hand2eye_est)
            rvec_diff__ = np.linalg.norm(
                np.subtract(rvec_hand2eye_true, rvec_hand2eye_est))
            # Translation
            t_diff__ = np.linalg.norm(
                np.subtract(t_hand2eye_true, t_hand2eye_est))
            # The result vectors at the each method
            rvec_diff_ = np.append(rvec_diff_,
                                   np.expand_dims(rvec_diff__, axis=0),
                                   axis=0)
            t_diff_ = np.append(t_diff_,
                                np.expand_dims(t_diff__, axis=0),
                                axis=0)

        rvec_diff_ = rvec_diff_.reshape(1, numMethod + 1)
        t_diff_ = t_diff_.reshape(1, numMethod + 1)
        # The result vectors at the each test
        rvec_diff = np.concatenate((rvec_diff, rvec_diff_[:, 1:numMethod + 1]),
                                   axis=0)
        t_diff = np.concatenate((t_diff, t_diff_[:, 1:numMethod + 1]), axis=0)

    mean_rvec_diff = np.mean(rvec_diff[1:, :], axis=0)
    mean_t_diff = np.mean(t_diff[1:, :], axis=0)

    std_rvec_diff = np.std(rvec_diff[1:, :], axis=0)
    std_t_diff = np.std(t_diff[1:, :], axis=0)

    max_rvec_diff = np.max(rvec_diff[1:, :], axis=0)
    max_t_diff = np.max(t_diff[1:, :], axis=0)

    return rvec_diff[1:], t_diff[
        1:], mean_rvec_diff, mean_t_diff, std_rvec_diff, std_t_diff, max_rvec_diff, max_t_diff