Esempio n. 1
0
def cb_X0(f):
  global cTsAry,bTmAry
  print "cbX0"
  pb_msg.publish("rcalib::clear")
  cTsAry=TransformArray()
  bTmAry=TransformArray()
  count=Int32(); count.data=len(cTsAry.transforms); pb_count.publish(count)
    def __init__(self):

        # Frame which is rigidly attached to the camera
        # The transform from this frame to the camera optical frame is what
        # we're trying to compute.
        # For the eye-in-hand case, this is the end-effector frame.
        # For the eye-on-base case, this is the world or base frame.
        self.camera_parent_frame_id = rospy.get_param('~camera_parent_frame')

        # Frame which is rigidly attached to the marker
        # The transform from the camera parent frame to the marker parent frame
        # is given by forward kinematics.
        # For the eye-in-hand case, this is the world or base frame.
        # For the eye-on-base case, this is the end-effector frame.
        self.marker_parent_frame_id = rospy.get_param('~marker_parent_frame')

        self.publish_tf = rospy.get_param('~publish_tf')
        self.tf_suffix = rospy.get_param('~tf_suffix')
        self.marker_id = rospy.get_param('~marker_id')
        self.optical_frame_id = 'cc'

        # Compute the camera base to optical transform
        self.xyz_optical_base = rospy.get_param('~xyz_optical_base', [0, 0, 0])
        self.rpy_optical_base = rospy.get_param('~rpy_optical_base', [0, 0, 0])
        self.F_optical_base = PyKDL.Frame(
            PyKDL.Rotation.RPY(*self.rpy_optical_base),
            PyKDL.Vector(*self.xyz_optical_base))
        self.F_base_optical = self.F_optical_base.Inverse()

        # tf structures
        self.listener = tf.TransformListener()
        self.broadcaster = tf.TransformBroadcaster()

        # input data
        self.hand_world_samples = TransformArray()
        self.camera_marker_samples = TransformArray()

        # marker subscriber
        # self.aruco_subscriber = rospy.Subscriber(
        #         'aruco_tracker/transform',
        #         TransformStamped,
        #         self.aruco_cb,
        #         queue_size=1)

        # calibration service

        self.hand_eye_service = rospy.Service('hand_eye_calibration',
                                              hand_eye_calibration,
                                              self.aruco_cb)
        self.hand_eye_service = rospy.Service('compute_calibration',
                                              hand_eye_calibration,
                                              self.compute_calibration)

        rospy.wait_for_service('compute_effector_camera_quick')
        self.calibrate = rospy.ServiceProxy('compute_effector_camera_quick',
                                            compute_effector_camera_quick)

        self.samples_min = 5
Esempio n. 3
0
 def get_visp_samples(self):
     hand_world_samples = TransformArray()
     hand_world_samples.header.frame_id = self.tracking_base_frame  # bug?
     camera_marker_samples = TransformArray()
     camera_marker_samples.header.frame_id = self.tracking_base_frame
     for s in self.samples:
         camera_marker_samples.transforms.append(s['optical'])
         hand_world_samples.transforms.append(s['robot'])
     return hand_world_samples, camera_marker_samples
    def __init__(self):

        # Frame which is rigidly attached to the camera
        # The transform from this frame to the camera optical frame is what
        # we're trying to compute.
        # For the eye-in-hand case, this is the end-effector frame.
        # For the eye-on-base case, this is the world or base frame.
        self.camera_parent_frame_id = rospy.get_param('~camera_parent_frame')

        # Frame which is rigidly attached to the marker
        # The transform from the camera parent frame to the marker parent frame
        # is given by forward kinematics.
        # For the eye-in-hand case, this is the world or base frame.
        # For the eye-on-base case, this is the end-effector frame.
        self.marker_parent_frame_id = rospy.get_param('~marker_parent_frame')

        self.publish_tf = rospy.get_param('~publish_tf')
        self.tf_suffix = rospy.get_param('~tf_suffix')
        self.sample_rate = rospy.get_param('~sample_rate')
        self.interactive = rospy.get_param('~interactive')

        # Compute the camera base to optical transform
        self.xyz_optical_base = rospy.get_param('~xyz_optical_base', [0, 0, 0])
        self.rpy_optical_base = rospy.get_param('~rpy_optical_base', [0, 0, 0])
        self.F_optical_base = PyKDL.Frame(
            PyKDL.Rotation.RPY(*self.rpy_optical_base),
            PyKDL.Vector(*self.xyz_optical_base))
        self.F_base_optical = self.F_optical_base.Inverse()

        # tf structures
        self.listener = tf.TransformListener()
        self.broadcaster = tf.TransformBroadcaster()

        # rate limiter
        self.rate = rospy.Rate(self.sample_rate)

        # input data
        self.hand_world_samples = TransformArray()
        self.camera_marker_samples = TransformArray()

        # marker subscriber
        self.QR_subscriber = rospy.Subscriber(
            'visp_auto_tracker/object_position',
            PoseStamped,
            self.pose_cb,
            queue_size=1)
        rospy.loginfo('Subscribe to the QR object pose topic!')

        # calibration service
        rospy.wait_for_service('compute_effector_camera_quick')
        self.calibrate = rospy.ServiceProxy('compute_effector_camera_quick',
                                            compute_effector_camera_quick)
        rospy.loginfo('Calibration service available!')
Esempio n. 5
0
def cb_X3(f):
    global cTsAry, bTmAry
    print "X3"
    Tcsv = np.loadtxt('input.txt')
    bTmAry = TransformArray()
    cTsAry = TransformArray()
    for vec in Tcsv:
        #    bTmAry.transforms.append(xyz2quat(tflib.fromVec(vec[0:7])))
        bTmAry.transforms.append(tflib.fromVec(vec[0:7]))
        cTsAry.transforms.append(tflib.fromVec(vec[7:14]))
    print bTmAry.transforms[0]
    print cTsAry.transforms[0]
    call_visp()
    save_result_mTs('result_mts.txt')
    save_result_bTs('result_bts.txt')
    return
    def get_visp_samples(self):
        """
        Returns the sample list as a TransformArray.
        :rtype: visp_hand2eye_calibration.msg.TransformArray
        """
        hand_world_samples = TransformArray()
        # hand_world_samples.header.frame_id = self.robot_base_frame
        hand_world_samples.header.frame_id = self.tracking_base_frame
        # DONTFIXME: yes, I know, it should be like the line above.
        # thing is, otherwise the results of the calibration are wrong. don't ask me why

        camera_marker_samples = TransformArray()
        camera_marker_samples.header.frame_id = self.tracking_base_frame

        for s in self.samples:
            camera_marker_samples.transforms.append(s['optical'].transform)
            hand_world_samples.transforms.append(s['robot'].transform)

        return hand_world_samples, camera_marker_samples
Esempio n. 7
0
    def __init__(self, name):
        self.name = name

        # the names of the base_link and end_link according to the urdf model
        self.base_link = "iiwa_base_link"
        self.end_link = "iiwa_adapter"

        # two variables to compute the time fragment between the two callbacks
        self.time_A = dt.datetime.now()
        self.time_B = dt.datetime.now()

        self.joint_names = [
            "joint1", "joint2", "joint3", "joint4", "joint5", "joint6",
            "joint7"
        ]

        # initialize a node
        rospy.init_node("path_walker_node")

        self.robot = URDF.from_xml_file(
            "/home/davoud/kuka_ws/src/kuka/kuka/iiwa/iiwa_description/urdf/iiwa.urdf"
        )
        self.tree = kdl_tree_from_urdf_model(self.robot)
        self.kdl_kin = KDLKinematics(self.robot, self.base_link, self.end_link)

        # open the calibration_data file for saving the coordinates of end_effector and marker
        self.file = open("calibration_data", 'a')
        self.file.write("\n\nNew Run    Time: " + str(dt.datetime.now()) +
                        "\n")

        # current position of the end effector
        # self.end_eff_current_pose = np.empty((1, 7))

        # number of poses for the path
        self.num_of_poses = 13

        # current pose of path
        self.current_pose = 0

        # the two transform arrays to call the service
        self.world_effector_transform_array = TransformArray()
        self.world_effector_transform_array.header.stamp = rospy.Time.now()
        self.world_effector_transform_array.header.frame_id = "iiwa_base_link"
        self.camera_object_transform_array = TransformArray()
        self.camera_object_transform_array.header.stamp = self.world_effector_transform_array.header.stamp
        self.camera_object_transform_array.header.frame_id = "camera_rgb_optical_frame"

        # list of poses
        """
        self.pose1 =  [-0.38, -1.79,  0.00,  0.00, -1.48, -1.90, -1.32]
        self.pose3 =  [-0.65, -1.51,  0.13,  0.00, -1.44, -1.75, -1.06]
        self.pose5 =  [-0.81, -1.33,  0.17, -0.37, -1.25, -1.71, -0.71]
        self.pose7 =  [-0.45, -1.33,  0.17, -0.79, -1.42, -1.76, -0.40]
        self.pose9 =  [-0.06, -0.90,  0.17, -1.28, -1.88, -1.76, -0.40]
        self.pose11 =  [-0.06, -0.20,  0.17, -1.28, -1.88, -1.76, -0.42]
        self.pose13 =  [-0.06,  0.27,  0.16, -1.06, -1.88, -1.76, -0.08]
        self.pose15 =  [-0.05,  1.25,  0.15, -0.43, -1.88, -1.76, -0.08]
        self.pose17 =  [-1.64,  1.21,  0.97, -1.80, -2.95, -1.57, -2.20]
        self.pose19 = [ 0.10,  1.09,  0.97, -0.19, -2.94, -1.57, -1.83]
        self.pose21 = [ 0.78,  1.10, -1.08, -0.19, -0.98, -1.33, -1.51]
        self.pose23 = [ 1.54,  1.34, -1.08, -0.19, -2.15, -0.66, -1.30]
        self.pose25 = [ 1.98,  1.13, -1.27, -0.68, -2.15, -0.66, -1.30]
        self.pose27 = [ 2.15,  1.93, -2.17, -0.48, -2.45, -1.06, -1.10]
        self.pose29 = [ 2.15,  1.63, -2.17, -0.48, -2.15, -0.86, -1.30]
        self.pose31 = [ 0.85, -0.23, -2.77,  1.88, -2.15,  0.46,  0.90]

        self.pose2 =   [-0.38, -1.79,  0.00,  0.00, -1.48, -1.70, -1.42]
        self.pose4 =   [-0.65, -1.51,  0.13,  0.00, -1.44, -1.55, -1.16]
        self.pose6 =   [-0.81, -1.33,  0.17, -0.37, -1.25, -1.51, -0.81]
        self.pose8 =   [-0.45, -1.33,  0.17, -0.79, -1.42, -1.56, -0.50]
        self.pose10 =  [-0.06, -0.90,  0.17, -1.28, -1.88, -1.56, -0.50]
        self.pose12 =  [-0.06, -0.20,  0.17, -1.28, -1.88, -1.56, -0.52]
        self.pose14 =  [-0.06,  0.27,  0.16, -1.06, -1.88, -1.56, -0.18]
        self.pose16 =  [-0.05,  1.25,  0.15, -0.43, -1.88, -1.56, -0.18]
        self.pose18 =  [-1.64,  1.21,  0.97, -1.80, -2.95, -1.37, -2.30]
        self.pose20 =  [ 0.10,  1.09,  0.97, -0.19, -2.94, -1.77, -1.93]
        self.pose22 =  [ 0.78,  1.10, -1.08, -0.19, -0.98, -1.53, -1.61]
        self.pose24 =  [ 1.54,  1.34, -1.08, -0.19, -2.15, -0.56, -1.40]
        self.pose26 =  [ 1.98,  1.13, -1.27, -0.68, -2.15, -0.56, -1.40]
        self.pose28 =  [ 2.15,  1.93, -2.17, -0.48, -2.45, -1.06, -1.20]
        self.pose30 =  [ 2.15,  1.63, -2.17, -0.48, -2.15, -0.86, -1.00]
        self.pose32 =  [ 0.85, -0.23, -2.77,  1.88, -2.15,  0.26,  0.80]
        """
        self.pose1 = [0.39, 1.67, -2.77, 0.07, -2.15, 0.26, 1.19]
        self.pose2 = [0.44, 1.67, -2.77, 0.07, -2.15, 0.26, 1.19]
        self.pose3 = [0.49, 1.67, -2.77, 0.07, -2.15, 0.26, 1.19]
        self.pose4 = [0.34, 1.67, -2.77, 0.07, -2.15, 0.26, 1.19]
        self.pose5 = [0.29, 1.67, -2.77, 0.07, -2.15, 0.26, 1.19]
        self.pose6 = [0.39, 1.72, -2.77, 0.07, -2.15, 0.26, 1.19]
        self.pose7 = [0.39, 1.77, -2.77, 0.07, -2.15, 0.26, 1.19]
        self.pose8 = [0.39, 1.62, -2.77, 0.07, -2.15, 0.26, 1.19]
        self.pose9 = [0.39, 1.57, -2.77, 0.07, -2.15, 0.26, 1.19]
        self.pose10 = [0.39, 1.57, -2.77, 0.07, -2.15, 0.26, 1.14]
        self.pose11 = [0.39, 1.57, -2.77, 0.07, -2.15, 0.26, 1.09]
        self.pose12 = [0.39, 1.57, -2.77, 0.07, -2.15, 0.26, 1.24]
        self.pose13 = [0.39, 1.57, -2.77, 0.07, -2.15, 0.26, 1.29]
        self.pose14 = [0.39, 1.57, -2.77, 0.07, -2.15, 0.26, 1.34]
        self.pose15 = [0.39, 1.57, -2.77, 0.07, -2.15, 0.26, 1.39]
        self.pose16 = [0.39, 1.57, -2.77, 0.07, -2.15, 0.26, 1.44]
        self.pose17 = [0.39, 1.57, -2.77, 0.07, -2.15, 0.26, 1.49]

        # transform listener
        self.transform_listener = tf.TransformListener()

        # the two current transforms
        self.world_effector_transform = Transform()
        self.camera_object_transform = Transform()

        # actionlib preparation
        self.manipulator_client = actionlib.SimpleActionClient(
            "/iiwa/follow_joint_trajectory", FollowJointTrajectoryAction)
        self.manipulator_client.wait_for_server()
        self.trajectory = JointTrajectory()
        self.trajectory.joint_names = self.joint_names
        self.trajectory.points.append(JointTrajectoryPoint())

        # subscribe to "/joint_states" topic which is published by kuka.
        rospy.Subscriber("/joint_states",
                         JointState,
                         self.kuka_inf_cb,
                         queue_size=1)

        # subscribe to "/aruco_single/marker" which is published by aruco_ros
        # rospy.Subscriber("aruco_single/marker", Marker, self.marker_pose_cb)

        # subscribe to to "/avg_marker" which is computed by averaging two markers
        rospy.Subscriber("/avg_marker", Marker, self.marker_pose_cb)
Esempio n. 8
0
     print 'Visp reset failed:' + e
     pb_msg.publish("rcalib::visp reset failed")
     return
 calibrator = None
 try:  #solving as fixed camera
     calibrator = rospy.ServiceProxy('/compute_effector_camera_quick',
                                     compute_effector_camera_quick)
 except rospy.ServiceException, e:
     print 'Visp call failed:' + e
     pb_msg.publish("rcalib::visp call failed")
     return
 creset(resetRequest())
 req = compute_effector_camera_quickRequest()
 req.camera_object = cTsAry
 if Config["mount_frame_id"] == "world":  #fixed camera
     mTbAry = TransformArray()
     for tf in bTmAry.transforms:
         mTbAry.transforms.append(tflib.inv(tf))
     req.world_effector = mTbAry
     print "calib fixed"
 else:
     req.world_effector = bTmAry
     print "calib handeye"
 try:
     res = calibrator(req)
     print res.effector_camera
     set_param_tf(
         Config["config_tf"] + "/" + Config["camera_frame_id"] +
         "/transform", res.effector_camera)
     mTc = tflib.toRT(res.effector_camera)
     pb_msg.publish("rcalib::visp solver success")
    def __init__(self, config):
        self.config = config
        self.target = None  # Initialize tracked target pose
        self.cam_T_target = None
        self.w_T_hand = None

        # Msgs for visp_hand2eye_calibration
        self.cam_T_target_array = TransformArray()
        self.w_T_hand_array = TransformArray()

        # Publishers for visp_hand2eye_calibration
        self.w_T_hand_pub = rospy.Publisher("/world_effector",
                                            Transform,
                                            queue_size=1000)
        self.cam_T_target_pub = rospy.Publisher("/camera_object",
                                                Transform,
                                                queue_size=1000)

        # Initialize robot
        print "Loading URDF..."
        self.robot = pydrake.rbtree.RigidBodyTree(self.config["ik_urdf"])
        print

        print "Bodies"
        print "------"
        self.bodies = dict()
        for body in range(self.robot.get_num_bodies()):
            body_name = self.robot.getBodyOrFrameName(body)
            print body_name, body
            self.bodies[body_name] = body
        # if robot.get_num_bodies() == 0: print "No bodies"
        print

        self.positions = dict()
        self.joint_names = list()
        print "Positions"
        print "---------"
        for position in range(self.robot.get_num_positions()):
            position_name = self.robot.get_position_name(position)
            print position_name, position
            self.positions[position_name] = position
            self.joint_names.append(position_name)
        # if robot.get_num_positions() == 0: print "No positions"
        print

        # Set up ROS
        print "Waiting for getRobotConf service..."
        rospy.wait_for_service('/robot_server/getRobotConf')
        self.conf_service = rospy.ServiceProxy('/robot_server/getRobotConf',
                                               GetRobotConf)

        print "Waiting for MoveHead service..."
        rospy.wait_for_service('/robot_control/MoveHead')
        self.move_head_service = rospy.ServiceProxy('/robot_control/MoveHead',
                                                    MoveHead)

        print "Initializing publishers and subscribers..."
        self.tf_buffer = tf2_ros.Buffer(
            rospy.Duration(10.0))  #tf buffer length
        self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
        self.tf_broadcaster = tf2_ros.TransformBroadcaster()

        print "ROS stack ready!"
        print