Example #1
0
    def mean_arm_pose(self, arm_poses):
        tmp = kdl.Vector(0.0, 0.0, 0.0)
        elbow_angle = 0.0
        direction = kdl.Vector()

        for m in arm_poses:
            elbow_angle = elbow_angle + m.elbow_angle
            p = tfc.fromMsg(m.direction)
            tmp = p.M * kdl.Vector(1.0, 0.0, 0.0)
            direction = direction + tmp

        n = direction.Normalize()
        # rospy.loginfo('x: {} y: {} z: {}'.format(direction.x(), direction.y(), direction.z()))
        elbow_angle = elbow_angle / len(arm_poses)

        pitch = math.atan2(-direction.z(), math.sqrt(direction.x()*direction.x() + direction.y()*direction.y()))
        yaw = math.atan2(direction.y(), direction.x())

        pose = kdl.Frame(kdl.Rotation.RPY(0.0, pitch, yaw))

        arm_msg = copy.deepcopy(arm_poses[-1])
        arm_msg.direction = tfc.toMsg(pose)
        arm_msg.elbow_angle = elbow_angle

        max_dev = 0.0
        for m in arm_poses:
            p = tfc.fromMsg(m.direction)
            tmp = p.M * kdl.Vector(1.0, 0.0, 0.0)
            dev = (direction - tmp).Norm()
            if dev > max_dev: max_dev = dev

        return arm_msg, max_dev
Example #2
0
    def process_odom_data(self, msg):
        if self.running:
            if self.last_image is not None:
                self.mutex.acquire()
                if self.zero_odom_offset is None:
                    self.zero_odom_offset = tf_conversions.fromMsg(
                        msg.pose.pose)
                msg = self.subtract_odom(msg, self.zero_odom_offset)
                self.last_odom_pose = msg.pose.pose

                current_pose_odom = tf_conversions.fromMsg(msg.pose.pose)
                current_goal_frame_odom = tf_conversions.fromMsg(self.goal)
                current_goal_plus_lookahead_frame_odom = tf_conversions.fromMsg(
                    self.goal_plus_lookahead)
                old_goal_frame_world = tf_conversions.fromMsg(
                    self.poses[self.goal_index])

                delta_frame = current_pose_odom.Inverse(
                ) * current_goal_plus_lookahead_frame_odom

                if delta_frame_in_bounds(delta_frame):
                    if self.discrete_correction:
                        rotation_correction, path_correction = self.do_discrete_correction(
                            msg.pose.pose, current_goal_frame_odom,
                            old_goal_frame_world)
                        self.make_new_goal(rotation_correction,
                                           path_correction)
                    else:
                        self.make_new_goal()
                self.mutex.release()
        else:
            self.mutex.acquire()
            self.last_odom_pose = msg.pose.pose
            self.mutex.release()
Example #3
0
    def update(self):
        for name, waypoint in self.waypoints.items():
            F = tf_c.fromMsg(waypoint)
            self.broadcaster_.sendTransform(tuple(F.p),
                                            tuple(F.M.GetQuaternion()),
                                            rospy.Time.now(), name, '/world')

        for name, waypoint_data in self.relative_waypoints.items():
            try:
                F_relative_frame_waypoint = tf_c.fromMsg(waypoint_data[0])
                relative_frame_name = waypoint_data[1]
                F_world_relative_frame = tf_c.fromTf(
                    self.listener_.lookupTransform('/world',
                                                   relative_frame_name,
                                                   rospy.Time(0)))
                F_relative_waypoint = F_world_relative_frame * F_relative_frame_waypoint
                self.broadcaster_.sendTransform(
                    tuple(F_relative_waypoint.p),
                    tuple(F_relative_waypoint.M.GetQuaternion()),
                    rospy.Time.now(), name, '/world')
                self.F_relative_waypoint_last = F_relative_waypoint
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException) as e:
                if self.F_relative_waypoint_last == None:
                    pass
                else:
                    self.broadcaster_.sendTransform(
                        tuple(self.F_relative_waypoint_last.p),
                        tuple(self.F_relative_waypoint_last.M.GetQuaternion()),
                        rospy.Time.now(), name, '/world')

        self.check_landmarks()
def get_obj_frames(blue_obj, red_obj, base_tf, base2kinect_tf):
    base_pose = Pose()
    base_pose.position = base_tf.transform.translation
    base_pose.orientation = base_tf.transform.rotation
    base_kdl = tf_conversions.fromMsg(base_pose)
    base_unitX = base_kdl.M.UnitX()
    base_unitY = base_kdl.M.UnitY()
    base_unitZ = base_kdl.M.UnitZ()

    ### Frame for Blue Object

    blue_center_kinect = PointStamped()
    blue_center_kinect.header = base2kinect_tf.header
    blue_center_kinect.point = blue_obj.bb_center
    blue_center = tf2_geometry_msgs.do_transform_point(blue_center_kinect,
                                                       base2kinect_tf)

    blue_pose = Pose()
    blue_pose.position = blue_center.point
    blue_pose.position.z = blue_pose.position.z - blue_obj.bb_dims.z / 2
    blue_pose.orientation = base_tf.transform.rotation
    blue_kdl = tf_conversions.fromMsg(blue_pose)
    blue_pos = blue_kdl.p
    blue_rot = PyKDL.Rotation(-base_unitX, -base_unitY, base_unitZ)
    blue_kdl = PyKDL.Frame(blue_rot, blue_pos)
    blue_pose = tf_conversions.toMsg(blue_kdl)

    blue_frame = TransformStamped()
    blue_frame.header.frame_id = base_frame
    blue_frame.header.stamp = rospy.Time.now()
    blue_frame.child_frame_id = 'blue_frame'
    blue_frame.transform.translation = blue_pose.position
    blue_frame.transform.rotation = blue_pose.orientation

    ### Frame for Red Object

    red_center_kinect = PointStamped()
    red_center_kinect.header = base2kinect_tf.header
    red_center_kinect.point = red_obj.bb_center
    red_center = tf2_geometry_msgs.do_transform_point(red_center_kinect,
                                                      base2kinect_tf)

    red_pose = Pose()
    red_pose.position = red_center.point
    red_pose.position.z = red_pose.position.z - red_obj.bb_dims.z / 2
    red_pose.orientation = base_tf.transform.rotation
    red_kdl = tf_conversions.fromMsg(red_pose)
    red_pos = red_kdl.p
    red_rot = PyKDL.Rotation(-base_unitX, -base_unitY, base_unitZ)
    red_kdl = PyKDL.Frame(red_rot, red_pos)
    red_pose = tf_conversions.toMsg(red_kdl)

    red_frame = TransformStamped()
    red_frame.header.frame_id = base_frame
    red_frame.header.stamp = rospy.Time.now()
    red_frame.child_frame_id = 'red_frame'
    red_frame.transform.translation = red_pose.position
    red_frame.transform.rotation = red_pose.orientation

    return blue_frame, red_frame
Example #5
0
def transform_to_tip(group, ee_frame, goal):
    """
    Transform the goal from the ee_frame to the tip link which is 
    resolved from planning group
    return (new_goal, tip_frame) 
    """
    mc = MoveGroupCommander(group)
    tip_frame = mc.get_end_effector_link()
    
    ee_pose = mc.get_current_pose(ee_frame)
    tip_pose = mc.get_current_pose(tip_frame)
    kdl_ee = tf_conversions.fromMsg(ee_pose.pose)
    kdl_tip = tf_conversions.fromMsg(tip_pose.pose)
    if isinstance(goal, geometry_msgs.msg.Pose):
        kdl_goal = tf_conversions.fromMsg(goal)
    elif isinstance(goal, geometry_msgs.msg.PoseStamped):
        kdl_goal = tf_conversions.fromMsg(goal.pose)
    else:
        rospy.logerr("Unknown pose type, only Pose and PoseStamped is allowed")
        rospy.logerr(str(type(goal)))
        return (None, tip_frame)
    
    grip = kdl_tip.Inverse() * kdl_ee
    kdl_pose = kdl_goal * grip.Inverse()
    
    return (tf_conversions.toMsg(kdl_pose), tip_frame)
    def dist_from_anchor(self, point):

        anchor_frame = tf_conversions.fromMsg(self._demonstration_anchor.pose)
        point_frame = tf_conversions.fromMsg(point.pose)

        delta = anchor_frame.Inverse() * point_frame

        return delta.p.Norm()
def orient_towards_object_double(tfBuffer, ee_vec, object_pose):
    w2w = tfBuffer.lookup_transform(base_frame, base_frame, rospy.Time(0))

    w2wPose = Pose()
    w2wPose.position.x = w2w.transform.translation.x
    w2wPose.position.y = w2w.transform.translation.y
    w2wPose.position.z = w2w.transform.translation.z
    w2wPose.orientation = w2w.transform.rotation

    ee_pose = Pose()
    ee_pose.position.x = ee_vec[0]
    ee_pose.position.y = ee_vec[1]
    ee_pose.position.z = ee_vec[2]
    ee_pose.orientation.x = ee_vec[3]
    ee_pose.orientation.y = ee_vec[4]
    ee_pose.orientation.z = ee_vec[5]
    ee_pose.orientation.w = ee_vec[6]

    w2i_kdl = tf_conversions.fromMsg(ee_pose)
    w2i_x = w2i_kdl.M.UnitX()
    w2i_pos = w2i_kdl.p

    w2e_kdl = tf_conversions.fromMsg(object_pose)
    w2e_x = w2e_kdl.M.UnitX()
    w2e_pos = w2e_kdl.p

    w2w_kdl = tf_conversions.fromMsg(w2wPose)
    w2w_z = w2w_kdl.M.UnitZ()
    w2w_pos = w2w_kdl.p

    z_rot_wp = (w2e_pos - w2i_pos) / ((w2e_pos - w2i_pos).Norm())
    y_rot_wp = -w2w_z * z_rot_wp
    x_rot_wp = -z_rot_wp * y_rot_wp
    wp_pos = w2i_pos

    wp_M = PyKDL.Rotation(x_rot_wp, y_rot_wp, z_rot_wp)

    wp_kdl = PyKDL.Frame(wp_M, wp_pos)

    wp_pose = tf_conversions.toMsg(wp_kdl)

    recipNorm = 1 / math.sqrt(wp_pose.orientation.x**2 +
                              wp_pose.orientation.y**2 +
                              wp_pose.orientation.z**2 +
                              wp_pose.orientation.w**2)
    wp_pose.orientation.x = wp_pose.orientation.x * recipNorm
    wp_pose.orientation.y = wp_pose.orientation.y * recipNorm
    wp_pose.orientation.z = wp_pose.orientation.z * recipNorm
    wp_pose.orientation.w = wp_pose.orientation.w * recipNorm

    wp_orientation = []
    wp_orientation.append(wp_pose.orientation.x)
    wp_orientation.append(wp_pose.orientation.y)
    wp_orientation.append(wp_pose.orientation.z)
    wp_orientation.append(wp_pose.orientation.w)

    return wp_orientation
Example #8
0
    def callback(self, camera_info_msg, image_msg, camera_pose_msg):
        # camera parameters and image
        camera_matrix = numpy.float32(camera_info_msg.K).reshape(3, 3)
        distortion = numpy.float32(camera_info_msg.D).flatten()
        frame = self.cv_bridge.imgmsg_to_cv2(image_msg, 'bgr8')

        # camera pose
        cam2world = tf_conversions.toMatrix(
            tf_conversions.fromMsg(camera_pose_msg.pose))
        world2cam = numpy.linalg.inv(cam2world)

        tvec = world2cam[:3, 3]
        rvec, jacob = cv2.Rodrigues(world2cam[:3, :3])

        # estimates the scale of the environment
        ret, scale = self.scale_estimater.estimate(tvec)
        if not ret:
            cv2.putText(frame, 'estimating scale...', (10, 30),
                        cv2.FONT_HERSHEY_PLAIN, 3.0, (255, 255, 255))
        else:
            # assuming that the camera moves 20cm in the first 100 frames
            scale = scale / 0.2
            for cube in self.cubes:
                cube.draw(frame, camera_matrix, distortion, rvec, tvec, scale)

        self.frame = frame
        self.cam2world = cam2world
Example #9
0
	def MoveTriad(self, msg):
		if self.axisbody is None:
			return
		f1 = tf_conversions.fromMsg(msg.pose)
		T = tf_conversions.toMatrix(f1)
		#T[0:3,3] = [-.5,0,0]
		self.axisbody.SetTransform(T)
Example #10
0
def get_pose_as_tran(moveit_grasp_msg):
    """
    type: moveit_grasp_msg: moveit_msgs.msg.Grasp
    """
    assert isinstance(moveit_grasp_msg, moveit_msgs.msg.Grasp)
    pose_msg = moveit_grasp_msg.grasp_pose.pose
    return tf_conversions.toMatrix(tf_conversions.fromMsg(pose_msg))
Example #11
0
def make_marker(m_id, pose, frame_id="base_link", color=(1, 0, 0, 1), scale=(.1, .03, .01), m_type=visualization_msgs.msg.Marker.ARROW, arrow_direction='x'):

    final_marker_pose = tf_conversions.fromMsg(pose)
    # For a given pose, the marker can point along either the x, y or z axis. By default, ros rvis points along x axis
    if arrow_direction == 'x':
        pass
    elif arrow_direction == 'y':
        final_marker_pose.M.DoRotZ(math.pi / 2)
    elif arrow_direction == 'z':
        final_marker_pose.M.DoRotY(-math.pi / 2)
    else:
        print "Invalid arrow direction, using default x "

    m = visualization_msgs.msg.Marker()
    m.id = m_id
    m.type = m_type

    m.header.frame_id = frame_id
    m.header.stamp = rospy.Time.now()
    m.pose = tf_conversions.toMsg(final_marker_pose)

    m.color = std_msgs.msg.ColorRGBA(*color)
    m.scale = Vector3(*scale)

    return m
Example #12
0
    def __configure_pose(self, t, msg):
        z = tf_conversions.fromMsg(msg).M.UnitZ()[2]
        # TODO: Check that this is correctly maintaining X and Y
        # If Z is pointing down, flip it up
        if z < 0:  # Need to flip Z for pi/2
            q1 = [1, 0, 0, 0]
            ori = [
                msg.orientation.x, msg.orientation.y, msg.orientation.z,
                msg.orientation.w
            ]
            res1 = tf.transformations.quaternion_multiply(ori, q1)
            q2 = [0, 0, 1, 0]
            res2 = tf.transformations.quaternion_multiply(res1, q2)
            msg.orientation.x = res2[0]
            msg.orientation.y = res2[1]
            msg.orientation.z = res2[2]
            msg.orientation.w = res2[3]

        t.transform.translation.x = msg.position.x
        t.transform.translation.y = msg.position.y
        t.transform.translation.z = msg.position.z
        t.transform.rotation.x = msg.orientation.x
        t.transform.rotation.y = msg.orientation.y
        t.transform.rotation.z = msg.orientation.z
        t.transform.rotation.w = msg.orientation.w
Example #13
0
def graspit_grasp_pose_to_moveit_grasp_pose(move_group_commander, listener, graspit_grasp_msg,
                                            grasp_frame='/approach_tran'):
    """
    :param move_group_commander: A move_group command from which to get the end effector link.
    :type move_group_commander: moveit_commander.MoveGroupCommander
    :param listener: A transformer for looking up the transformation
    :type tf.TransformListener
    :param graspit_grasp_msg: A graspit grasp message
    :type graspit_grasp_msg: graspit_msgs.msg.Grasp

    """

    try:
        listener.waitForTransform(grasp_frame, move_group_commander.get_end_effector_link(),
                                     rospy.Time(0), timeout=rospy.Duration(1))
        at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(grasp_frame, move_group_commander.get_end_effector_link(),rospy.Time())
    except:
        rospy.logerr("graspit_grasp_pose_to_moveit_grasp_pose::\n " +
                    "Failed to find transform from %s to %s"%(grasp_frame, move_group_commander.get_end_effector_link()))
        ipdb.set_trace()



    graspit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(tf_conversions.fromMsg(graspit_grasp_msg.final_grasp_pose))
    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS().fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    if move_group_commander.get_end_effector_link() == 'l_wrist_roll_link':
        rospy.logerr('This is a PR2\'s left arm so we have to rotate things.')
        pr2_is_weird_mat = tf.TransformerROS().fromTranslationRotation([0,0,0], tf.transformations.quaternion_from_euler(0,math.pi/2,0))
    else:
        pr2_is_weird_mat = tf.TransformerROS().fromTranslationRotation([0,0,0], [0,0,0])
    actual_ee_pose_matrix = np.dot( graspit_grasp_msg_final_grasp_tran_matrix, approach_tran_to_end_effector_tran_matrix)
    actual_ee_pose_matrix = np.dot(actual_ee_pose_matrix, pr2_is_weird_mat)
    actual_ee_pose = tf_conversions.toMsg(tf_conversions.fromMatrix(actual_ee_pose_matrix))
    rospy.loginfo("actual_ee_pose: " + str(actual_ee_pose))
    return actual_ee_pose
    def remove_anchor_pose(cls, anchor, data):

        # Convert anchor pose into a PyKDL.Frame: simplifies
        anchor_frame = tf_conversions.fromMsg(anchor.pose)
        anchor_frame.M = PyKDL.Rotation()  # NO ROTATION

        def subtract_pose(point, verbose=False):
            p = copy.deepcopy(point)

            # Find the difference in poses. NOTE we do not change anything other
            # than the pose (twist & wrench stay the same).
            point_frame = tf_conversions.fromMsg(point.pose)
            delta = anchor_frame.Inverse() * point_frame
            p.pose = tf_conversions.toMsg(delta)

            if verbose:
                print('{} {} {} -> {} {} {}'.format(point.pose.position.x,
                                                    point.pose.position.y,
                                                    point.pose.position.z,
                                                    p.pose.position.x,
                                                    p.pose.position.y,
                                                    p.pose.position.z))
            return p

        parsed = [subtract_pose(x) for x in data]

        # Create an identity translation/rotation for the new anchor pose.
        anchor_new = copy.deepcopy(anchor)
        identity = tf_conversions.Frame()
        anchor_new.pose = tf_conversions.toMsg(identity)

        return (anchor_new, parsed)
Example #15
0
def graspit_grasp_pose_to_moveit_grasp_pose(move_group_commander, listener, graspit_grasp_msg,
                                            grasp_frame='/approach_tran'):
    """
    :param move_group_commander: A move_group command from which to get the end effector link.
    :type move_group_commander: moveit_commander.MoveGroupCommander
    :param listener: A transformer for looking up the transformation
    :type tf.TransformListener 
    :param graspit_grasp_msg: A graspit grasp message
    :type graspit_grasp_msg: graspit_msgs.msg.Grasp

    """

    try:
        listener.waitForTransform(grasp_frame, move_group_commander.get_end_effector_link(),
                                     rospy.Time(0), timeout=rospy.Duration(1))
        at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(grasp_frame, move_group_commander.get_end_effector_link(),rospy.Time())
    except:
        rospy.logerr("graspit_grasp_pose_to_moveit_grasp_pose::\n " +
                    "Failed to find transform from %s to %s"%(grasp_frame, move_group_commander.get_end_effector_link()))
        ipdb.set_trace()



    graspit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(tf_conversions.fromMsg(graspit_grasp_msg.final_grasp_pose))
    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS().fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    actual_ee_pose_matrix = np.dot( graspit_grasp_msg_final_grasp_tran_matrix, approach_tran_to_end_effector_tran_matrix)
    actual_ee_pose = tf_conversions.toMsg(tf_conversions.fromMatrix(actual_ee_pose_matrix))
    rospy.loginfo("actual_ee_pose: " + str(actual_ee_pose))
    return actual_ee_pose
Example #16
0
	def process_image_data(self, msg):
		if self.ready:
			if self.last_odom is None:
				if self.current_odom is not None:
					self.save_data(msg)
				return

			current_frame = tf_conversions.fromMsg(self.current_odom.pose.pose)
			old_frame = tf_conversions.fromMsg(self.last_odom.pose.pose)
			difference = old_frame.Inverse() * current_frame

			delta_distance = difference.p.Norm()
			delta_theta = abs(difference.M.GetRPY()[2])

			if delta_distance > self.distance_threshold or delta_theta > self.angle_threshold:
				self.save_data(msg)
def get_pose_as_tran(moveit_grasp_msg):
    """
    type: moveit_grasp_msg: moveit_msgs.msg.Grasp
    """
    assert isinstance(moveit_grasp_msg, moveit_msgs.msg.Grasp)
    pose_msg = moveit_grasp_msg.grasp_pose.pose
    return tf_conversions.toMatrix(tf_conversions.fromMsg(pose_msg))
Example #18
0
def create_occupancy_grid_from_obstacles(obstacles,
                                         mins_xyz,
                                         step_size_xyz,
                                         dims_xyz,
                                         use_binvox=False):
    voxel_grid = np.zeros(shape=dims_xyz)

    for obstacle in obstacles:
        if use_binvox:
            vox = binvox_rw.read_as_3d_array(
                open(obstacle.mesh_filepath.replace('.ply', '.binvox'), 'r'))
            vertices = binvox_to_points(vox)
        else:
            vertices = read_vertex_points_from_ply_filepath(
                obstacle.mesh_filepath)
        frame = tf_conversions.fromMsg(obstacle.pose_stamped.pose)
        transform = tf_conversions.toMatrix(frame)
        vertices_transformed = transform_points(vertices, transform)

        if use_binvox:
            voxel_grid += add_obstacles_to_reachability_space_full_binvox(
                vertices_transformed, mins_xyz, step_size_xyz, dims_xyz)
        else:
            voxel_grid += add_obstacles_to_reachability_space_full(
                vertices_transformed, mins_xyz, step_size_xyz, dims_xyz)

    voxel_grid[np.where(voxel_grid > 0)] = 1
    return voxel_grid
Example #19
0
    def do_discrete_correction(self, pose, old_goal_frame_odom,
                               old_goal_frame_world):
        new_goal_frame_world = tf_conversions.fromMsg(
            self.poses[self.goal_index])
        turning_goal = is_turning_goal(old_goal_frame_world,
                                       new_goal_frame_world)
        inter_goal_offset_world = old_goal_frame_world.Inverse(
        ) * new_goal_frame_world
        inter_goal_distance_world = inter_goal_offset_world.p.Norm()

        offsets, correlations = self.calculate_image_pose_offset(
            self.goal_index, self.search_range)
        if self.goal_index >= self.search_range:
            rotation_offset = offsets[self.search_range]
            rotation_correlation = correlations[self.search_range]
        else:
            rotation_offset = offsets[-self.search_range - 1]
            rotation_correlation = correlations[-self.search_range - 1]

        offset = rotation_offset

        rotation_correction = self.rotation_correction_gain * offset
        if rotation_correlation < self.image_recognition_threshold:
            rotation_correction = 0.0

        if not turning_goal and self.goal_index >= self.search_range and self.goal_index < len(
                self.poses) - self.search_range:
            corr = np.array(correlations)
            corr -= self.image_recognition_threshold
            corr[corr < 0] = 0.0
            s = corr.sum()
            if s > 0:
                corr /= s
            w = corr * np.arange(-self.search_range, self.search_range + 1, 1)
            pos = w.sum()
            path_error = pos

            path_correction_distance = -self.path_correction_gain * path_error * GOAL_DISTANCE_SPACING
            path_correction = (GOAL_DISTANCE_SPACING + path_correction_distance
                               ) / GOAL_DISTANCE_SPACING
            if np.isnan(path_correction):
                print(corr, s, w, pos, GOAL_DISTANCE_SPACING)
            if -path_correction_distance > GOAL_DISTANCE_SPACING:
                print(
                    'PATH CORRECTION ERROR: correction is greater than distance to goal!'
                )
                print(
                    'corr = %s; pos = %f, path_correction = %f, goal_distance = %f'
                    % (str(corr), pos, path_correction_distance,
                       GOAL_DISTANCE_SPACING))
                print('path_correction = %f' % path_correction)
                path_correction_distance = -GOAL_DISTANCE_SPACING
                path_correction = 0.0
        else:
            path_correction = 1.0
            path_correction_distance = 0.0

        self.sum_theta_correction = rotation_correction
        self.sum_path_correction = path_correction_distance
        return rotation_correction, path_correction
Example #20
0
def read_pose_files(pose_files):
    return [
        tf_conversions.fromMsg(
            message_converter.convert_dictionary_to_ros_message(
                'geometry_msgs/Pose', json.loads(read_file(p))))
        for p in pose_files
    ]
def pose2matrix(pose):
    p = pose
    if hasattr(pose, "pose"):
        p = pose.pose
    T = tf_conversions.fromMsg(pose.pose)
    T = tf_conversions.toMatrix(T)
    return T
    def set_workspace_shape(self, req):
        ray_tf = self.pointing_model.pointing_ray()

        req_ws_shape = self.ws_shape

        if req.robot_frame:
            self.robot_frame = req.robot_frame

        if ray_tf:
            ray_kdl_frame = transform_to_kdl(ray_tf)
            robot_kdl_frame = self.frame_from_tf(self.human_frame, self.robot_frame) #self.ws_shape.frame_id, self.robot_frame)
            ray_origin_kdl_frame = self.frame_from_tf(self.human_frame, self.ray_origin_frame)

            if robot_kdl_frame and ray_origin_kdl_frame:
                if req.workspace_shape == SetWorkspaceShapeRequest.WORKSPACE_XY_PLANE:
                    req_ws_shape = self.xy_plane
                elif req.workspace_shape == SetWorkspaceShapeRequest.WORKSPACE_VISUAL_PLANE:
                    req_ws_shape = self.vis_plane
                elif req.workspace_shape == SetWorkspaceShapeRequest.WORKSPACE_CYLINDER:
                    req_ws_shape = self.cylinder
                elif req.workspace_shape == SetWorkspaceShapeRequest.WORKSPACE_SPHERE:
                    req_ws_shape = self.sphere
                else:
                    raise rospy.ServiceException('Unsupported shape type')
                    # return None

                cur_pointer_pose = self.get_pointer(self.ws_shape, ray_kdl_frame, self.ws_shape.point)

                if self.switch_at_pointer:
                    switch_point = copy.deepcopy(tfc.fromMsg(cur_pointer_pose).p)
                else:
                    switch_point = copy.deepcopy(robot_kdl_frame.p)


                new_pointer_pose = self.get_pointer(req_ws_shape, ray_kdl_frame, switch_point)

                eyes_robot_vector = kdl.Vector(robot_kdl_frame.p - ray_origin_kdl_frame.p)

                robot_elev_frame = self.pointing_model._frame_from_direction(ray_origin_kdl_frame.p, eyes_robot_vector)
                _, pitch, _ = robot_elev_frame.M.GetRPY()

                if math.fabs(pitch) < self.min_elevation_angle:
                    raise rospy.ServiceException('Drone elevation angle is less than {} deg: {}. Ignoring'
                        .format(math.degrees(self.min_elevation_angle), math.degrees(pitch)))
                else:
                    # Since the safety check succeeded we can update the pass_point of the shape
                    new_pointer_pose = self.get_pointer(req_ws_shape, ray_kdl_frame, switch_point, cache = True)

            else:
                raise rospy.ServiceException('Unable to obtain robot\'s frame. Is robot localized?')
                # return None

        else:
            raise rospy.ServiceException('Internal error: failed to get ray frame')
            # return None

        self.ws_shape = req_ws_shape

        return SetWorkspaceShapeResponse()
Example #23
0
    def top_cam_tag_callback(self, tag_detections):
        #rospy.loginfo("Received top cam tag!")

        for detection in tag_detections.detections:
            self.top_cam_to_table_stamp = tag_detections.header.stamp
            self.top_cam_tag_found = True
            tag_id = detection.id[0]
            self.top_cam_to_table[tag_id] = tfc.toMatrix(tfc.fromMsg(detection.pose.pose.pose))
Example #24
0
def transform_pose(grasp_pose_in_world, world_frame_to_object_frame_transform):

    # Convert ROSmsg to 4x4 numpy arrays
    grasp_pose_in_world_tran_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(grasp_pose_in_world))
    object_pose_in_world_tran_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(world_frame_to_object_frame_transform))

    # Transform the grasp pose into the object reference frame
    grasp_pose_in_object_tran_matrix = np.dot(
        np.linalg.inv(object_pose_in_world_tran_matrix),
        grasp_pose_in_world_tran_matrix)

    # Convert pose back into ROS message
    grasp_pose_in_object = tf_conversions.toMsg(
        tf_conversions.fromMatrix(grasp_pose_in_object_tran_matrix))

    return grasp_pose_in_object
Example #25
0
    def calculate_pose(self, ignore_heading=False):
        if not self.human_pose_msg or not self.pointing_ray_msg or not self.robot_pose_msg:
            if not self.human_pose_msg:
                rospy.logerr('Cannot relloc: user\'s MOCAP pose is not known')
            if not self.pointing_ray_msg:
                rospy.logerr('Cannot relloc: pointing ray is not known')
            if not self.robot_pose_msg:
                rospy.logerr('Cannot relloc: robot MOCAP pose is not known')

            return None

        # Project human pose on the ground and adjust pointing ray
        # tmp_h_f = tfc.fromMsg(self.human_pose_msg.pose)
        # tmp_ray_f = tfc.fromMsg(self.pointing_ray_msg.pose)
        # _,_,yaw = tmp_h_f.M.GetRPY()
        # _,_,ray_yaw = tmp_ray_f.M.GetRPY()

        tmp_h_f = tfc.fromMsg(self.human_pose_msg.pose)
        tmp_robot_f = tfc.fromMsg(self.robot_pose_msg.pose)

        dir_v = tmp_robot_f.p - tmp_h_f.p  # from human to robot

        u = kdl.Vector(dir_v.x(), dir_v.y(), dir_v.z())
        u.Normalize()

        yaw = np.math.atan2(u.y(), u.x())

        ######### The bug that cost me a finger being cut by the drone blades #########################################
        # human_f = kdl.Frame(kdl.Rotation.RPY(0.0, 0.0, ray_yaw - yaw), kdl.Vector(tmp_h_f.p.x(), tmp_h_f.p.y(), 0.0))
        ###############################################################################################################

        if ignore_heading:
            human_f = kdl.Frame(kdl.Rotation.RPY(0.0, 0.0, self.cached_yaw),
                                kdl.Vector(tmp_h_f.p.x(), tmp_h_f.p.y(), 0.0))
        else:
            human_f = kdl.Frame(kdl.Rotation.RPY(0.0, 0.0, yaw),
                                kdl.Vector(tmp_h_f.p.x(), tmp_h_f.p.y(), 0.0))
            self.cached_yaw = yaw

        t = self.kdl_to_transform(human_f)
        t.header = self.human_pose_msg.header
        t.child_frame_id = self.human_frame

        return t
Example #26
0
def get_original_grasp_tran(moveit_grasp_msg):
    listener = tf.listener.TransformListener()
    print '1'
    listener.waitForTransform("approach_tran", 'staubli_rx60l_link7', rospy.Time(), timeout=rospy.Duration(1.0))
    at_to_ee_tran, at_to_ee_rot = listener.lookupTransform("approach_tran", 'staubli_rx60l_link7',rospy.Time())
    print '2'
    moveit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(tf_conversions.fromMsg(moveit_grasp_msg.grasp_pose.pose))
    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS().fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    original_ee_pose_matrix = np.dot( moveit_grasp_msg_final_grasp_tran_matrix, numpy.linalg.inv(approach_tran_to_end_effector_tran_matrix))
    return original_ee_pose_matrix
Example #27
0
    def make_new_goal(self, rotation_correction=0.0, path_correction=1.0):
        old_goal_index = self.goal_index
        old_goal_frame_world = tf_conversions.fromMsg(
            self.poses[old_goal_index])
        current_goal_frame_odom = tf_conversions.fromMsg(self.goal)

        state = self.update_goal_index()

        if state == GOAL_STATE.finished:
            print('Localiser stopping. Reached final goal.')
            self.running = False
            return

        if state == GOAL_STATE.restart:
            # don't have a big offset from the end of the path, back to the start
            old_goal_frame_world = tf_conversions.Frame()

        new_goal_frame_world = tf_conversions.fromMsg(
            self.poses[self.goal_index])
        turning_goal = is_turning_goal(old_goal_frame_world,
                                       new_goal_frame_world)
        goal_offset = get_corrected_goal_offset(old_goal_frame_world,
                                                new_goal_frame_world,
                                                rotation_correction,
                                                path_correction)
        new_goal = current_goal_frame_odom * goal_offset

        sum_path_correction_ratio = (
            GOAL_DISTANCE_SPACING +
            self.sum_path_correction) / GOAL_DISTANCE_SPACING

        self.update_goal(new_goal, True, turning_goal)
        self.save_data_at_goal(self.last_odom_pose, new_goal,
                               new_goal_frame_world, self.sum_theta_correction,
                               sum_path_correction_ratio)
        self.goal_number += 1
        print('[%d] theta [%f]\tpath [%f]' %
              (old_goal_index, math.degrees(
                  self.sum_theta_correction), sum_path_correction_ratio))
        if turning_goal:
            print('turning goal:')
        self.sum_theta_correction = 0
        self.sum_path_correction = 0.0
Example #28
0
def read_transform_stamped_files(pose_files):
    transforms = [
        message_converter.convert_dictionary_to_ros_message(
            'geometry_msgs/TransformStamped',
            json.loads(read_file(p))).transform for p in pose_files
    ]
    return [
        tf_conversions.fromMsg(Pose(tf.translation, tf.rotation))
        for tf in transforms
    ]
def xyzrpy_to_xform(xyzrpy):

    r = tf_conversions.Rotation.RPY(*xyzrpy[3:])
    pose = Pose(Point(*xyzrpy[:3]), Quaternion(*r.GetQuaternion()))
    frame = tf_conversions.fromMsg(pose)
    transform = tf_conversions.toMatrix(frame)
    rotMatrix = transform[0:3, 0:3].flatten().tolist()
    tran = [pose.position.x, pose.position.y, pose.position.z]

    return rotMatrix, tran
    def update(self):
        for name,waypoint in self.waypoints.items():
            F = tf_c.fromMsg(waypoint)
            self.broadcaster_.sendTransform(tuple(F.p),tuple(F.M.GetQuaternion()),rospy.Time.now(), name, '/world')

        for name,waypoint_data in self.relative_waypoints.items():
            try:
                F_relative_frame_waypoint = tf_c.fromMsg(waypoint_data[0])
                relative_frame_name = waypoint_data[1]
                F_world_relative_frame = tf_c.fromTf(self.listener_.lookupTransform('/world', relative_frame_name, rospy.Time(0)))
                F_relative_waypoint = F_world_relative_frame*F_relative_frame_waypoint
                self.broadcaster_.sendTransform(tuple(F_relative_waypoint.p),tuple(F_relative_waypoint.M.GetQuaternion()),rospy.Time.now(), name, '/world')
                self.F_relative_waypoint_last = F_relative_waypoint
            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
                if self.F_relative_waypoint_last == None:
                    pass
                else:
                    self.broadcaster_.sendTransform(tuple(self.F_relative_waypoint_last.p),tuple(self.F_relative_waypoint_last.M.GetQuaternion()),rospy.Time.now(), name, '/world')

        self.check_landmarks()
Example #31
0
def get_normal_from_pose(pose):
    """ from pose, compute the normal on z
    https://answers.ros.org/question/222101/get-vector-representation-of-x-y-z-axis-from-geometry_msgs-pose/?answer=222179#post-id-222179
    """
    # p = Pose()
    # p.orientation = pose.orientation
    # z1 = (quaternion_matrix((p.orientation.x, p.orientation.y, p.orientation.z, p.orientation.w)))[0:3,2:3]
    z = tf_conversions.fromMsg(pose).M.UnitZ()
    normal = np.array([[z[0], z[1], z[2]]]).T

    return normal
Example #32
0
    def create_correction(self, word, robot_anchor):
        """ Create an AnchoredCorrection message using the given word & anchor.

        Get the correction points from the stored map, using a *single* word.
        """

        if word not in self._corrections:
            rospy.logerr('Word [{}] not in known corrections: {}'.format(
                word, self._corrections.keys()))
            return None

        # Make sure we have a string, not a list of strings.
        assert isinstance(word, basestring)

        correction = AnchoredDemonstration()
        correction.header.stamp = rospy.Time.now()
        correction.words.append(word)
        correction.num_words = 1

        # Set the corrections: Transform each demonstrated point so it begins
        # from the robot anchor.
        correction.num_points = len(self._corrections[word])
        anchor_frame = tf_conversions.fromMsg(
            robot_anchor.pose)  # PyKDL.Frame.
        anchor_frame.M = PyKDL.Rotation()  # No rotation.
        for c in self._corrections[word]:

            # Convert each pose to be in the world frame, using the anchor.
            pose_frame = tf_conversions.fromMsg(c.pose)
            offset = anchor_frame * pose_frame

            # Copy the correction and update the *pose only*.
            new_c = copy.deepcopy(c)
            new_c.pose = tf_conversions.toMsg(offset)
            correction.demonstration.append(new_c)
            pass

        # Keep the anchor pose to indicate where the correction starts from.
        correction.anchor = robot_anchor

        return correction
Example #33
0
    def process_odom_data(self, msg):
        current_frame = tf_conversions.fromMsg(msg.pose.pose)
        d = current_frame.p.y()
        theta = current_frame.M.GetRPY()[2]

        turn_command = -self.gain_distance * d - self.gain_turn * theta

        motor_command = TwistStamped()
        motor_command.header.stamp = rospy.Time.now()
        motor_command.twist.linear.x = 0.1
        motor_command.twist.angular.z = turn_command

        self.pub_cmd_vel.publish(motor_command)
    def run(self):
        loop_rate = rospy.Rate(self.publish_rate)

        while not rospy.is_shutdown():
            try:
                loop_rate.sleep()

                if self.ws_shape:
                    if self.ws_shape != self.last_ws_shape:
                        self.pub_workspace_shape.publish(self.get_shape_name(self.ws_shape))
                        self.last_ws_shape = self.ws_shape

                ray_tf = self.pointing_model.pointing_ray()

                if ray_tf:
                    ray_kdl_frame = transform_to_kdl(ray_tf)
                    self.tf_br.sendTransform(ray_tf)

                    ray_pose = PoseStamped()
                    ray_pose.header = ray_tf.header
                    ray_pose.pose = tfc.toMsg(ray_kdl_frame)
                    self.pub_pointing_ray.publish(ray_pose)

                    ray_origin_kdl_frame = self.frame_from_tf(self.human_frame, self.ray_origin_frame)
                    shape_origin_kdl_frame = self.frame_from_tf(self.human_frame, self.ws_shape.frame_id)

                    if ray_origin_kdl_frame and shape_origin_kdl_frame:
                        pointer_pose = self.get_pointer(self.ws_shape, ray_kdl_frame)

                        if pointer_pose:
                            m_msg = self.create_rviz_marker(self.ws_shape, self.ws_shape.point - copy.deepcopy(shape_origin_kdl_frame.p))

                            if m_msg:
                                self.pub_markers.publish(m_msg)

                            pose_msg = PoseStamped()
                            pose_msg.header = ray_tf.header
                            pose_msg.pose = pointer_pose

                            self.pub_arm_pointer.publish(pose_msg)

                            eyes_pointer = tfc.fromMsg(pointer_pose).p - ray_origin_kdl_frame.p
                            pr_marker_msg = self.create_pointing_ray_marker(self.pointing_model.frame_id, eyes_pointer.Norm())

                            if pr_marker_msg:
                                self.pub_markers.publish(pr_marker_msg)

            except rospy.ROSException, e:
                if e.message == 'ROS time moved backwards':
                    rospy.logwarn('Saw a negative time change, resetting.')
Example #35
0
    def publish_goal(self, pose, lookahead_distance=0.0, stop_at_goal=False):
        goal = Goal()
        goal.pose.header.stamp = rospy.Time.now()
        goal.pose.header.frame_id = "odom"

        lookahead = tf_conversions.Frame(
            tf_conversions.Vector(lookahead_distance, 0, 0))

        # add the offset for navigating to the goal:
        # (offset * odom).Inverse() * goal = odom.Invserse() * pose
        # goal = (offset * odom) * odom.Inverse() * pose
        # goal = offset * pose
        original_pose_frame = tf_conversions.fromMsg(pose)
        pose_frame = self.zero_odom_offset * original_pose_frame
        original_pose_frame_lookahead = original_pose_frame * lookahead
        pose_frame_lookahead = pose_frame * lookahead

        goal.pose.pose.position.x = pose_frame_lookahead.p.x()
        goal.pose.pose.position.y = pose_frame_lookahead.p.y()
        goal.pose.pose.orientation = tf_conversions.toMsg(
            pose_frame_lookahead).orientation

        goal.stop_at_goal.data = stop_at_goal
        self.goal_pub.publish(goal)
        self.goal_plus_lookahead = tf_conversions.toMsg(
            original_pose_frame_lookahead)

        if self.publish_gt_goals:
            try:
                trans = self.tfBuffer.lookup_transform('map', 'odom',
                                                       rospy.Time())
                trans_frame = tf_conversions.Frame(
                    tf_conversions.Rotation(trans.rotation.x, trans.rotation.y,
                                            trans.rotation.z,
                                            trans.rotation.w),
                    tf_conversions.Vector(trans.translation.x,
                                          trans.translation.y,
                                          trans.translation.z))

                goal_pose = PoseStamped()
                goal_pose.header.stamp = rospy.Time.now()
                goal_pose.header.frame_id = "map"
                goal_pose.pose = tf_conversions.toMsg(trans_frame *
                                                      pose_frame_lookahead)
                self.goal_pose_pub.publish(goal_pose)
            except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                    tf2_ros.ExtrapolationException):
                print('Could not lookup transform from /map to /odom')
                pass
Example #36
0
 def follow_goal_cb(self,msg):
     if self.driver_status == 'FOLLOW':
         # Set follow goal pose as axis-angle
         F_goal = tf_c.fromMsg(msg.pose)
         a,axis = F_goal.M.GetRotAngle()
         with self.pid_lock:
             self.follow_goal_axis_angle = list(F_goal.p) + [a*axis[0],a*axis[1],a*axis[2]]
         # Broadcast goal for debugging purposes
         self.broadcaster_.sendTransform(tuple(F_goal.p),tuple(F_goal.M.GetQuaternion()),rospy.Time.now(), '/ur_goal','/base_link')
         # Set goal pose as PID set point
         # with self.pid_lock:
         #     for pid, g in zip(self._pid, self.follow_goal_axis_angle):
         #         pid.setPoint(g)
     else:
         rospy.logerr("FOLLOW NOT ENABLED!")
def get_ground_truth_poses(dir):
    pose_files = np.array(
        [f for f in os.listdir(dir) if f.endswith('_map_to_base_link.txt')])
    nums = [int(s.split('_')[0]) for s in pose_files]
    idx = np.argsort(nums)
    pose_files = [dir + f for f in pose_files[idx]]
    transforms = [
        message_converter.convert_dictionary_to_ros_message(
            'geometry_msgs/TransformStamped',
            json.loads(read_file(p))).transform for p in pose_files
    ]
    return [
        tf_conversions.fromMsg(Pose(tf.translation, tf.rotation))
        for tf in transforms
    ]
Example #38
0
 def servo_to_pose_call(self,req): 
     if self.driver_status == 'SERVO':
         rospy.logwarn(req)
         self.F_command = tf_c.fromMsg(req.target)
         M_command = tf_c.toMatrix(self.F_command)
         # Calculate IK
         joint_positions = self.kdl_kin.inverse(M_command, self.q) # inverse kinematics
         if joint_positions is not None:
             pose_sol = self.kdl_kin.forward(joint_positions) # should equal pose
             self.q = joint_positions
         else:
             rospy.logwarn('no solution found')
         # self.send_command(F_command)
         return 'SUCCESS - moved to pose'
     else:
         rospy.logerr('SIMPLE UR -- Not in servo mode')
         return 'FAILED - not in servo mode'
def graspit_grasp_pose_to_moveit_grasp_pose(
        listener,                     # type: tf.TransformListener
        graspit_grasp_msg,            # type: graspit_msgs.msg.Grasp
        end_effector_link,            # type: str
        grasp_frame                   # type: str
):
    # type: (...) -> geometry_msgs.msg.Pose
    """
    :param listener: A transformer for looking up the transformation
    :param graspit_grasp_msg: A graspit grasp message
    """

    try:
        listener.waitForTransform(
            grasp_frame,
            end_effector_link,
            rospy.Time(0),
            timeout=rospy.Duration(1)
        )

        at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(
            grasp_frame,
            end_effector_link,
            rospy.Time()
        )

    except Exception as e:
        rospy.logerr("graspit_grasp_pose_to_moveit_grasp_pose::\n "
                     "Failed to find transform from {} to {}"
                     .format(grasp_frame, end_effector_link)
                     )
        ipdb.set_trace()

        return geometry_msgs.msg.Pose()

    graspit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(graspit_grasp_msg.final_grasp_pose)
    )

    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS().fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    actual_ee_pose_matrix = np.dot(graspit_grasp_msg_final_grasp_tran_matrix, approach_tran_to_end_effector_tran_matrix)
    actual_ee_pose = tf_conversions.toMsg(tf_conversions.fromMatrix(actual_ee_pose_matrix))

    return actual_ee_pose
Example #40
0
 def servo_to_pose_call(self,req): 
     if self.driver_status == 'SERVO':
         T = tf_c.fromMsg(req.target)
         a,axis = T.M.GetRotAngle()
         pose = list(T.p) + [a*axis[0],a*axis[1],a*axis[2]]
         # Check acceleration and velocity limits
         if req.accel > self.MAX_ACC:
             acceleration = self.MAX_ACC
         else:
             acceleration = req.accel
         if req.vel > self.MAX_VEL:
             velocity = self.MAX_VEL
         else:
             velocity = req.vel
         # Send command
         self.rob.movel(pose,acc=acceleration,vel=velocity)
         return 'SUCCESS - moved to pose'
     else:
         rospy.logerr('SIMPLE UR -- Not in servo mode')
         return 'FAILED - not in servo mode'
    def get_most_recent_transform(self):

        left_pose, left_time = self.left_arm_tf_updater.current_pose_and_time
        right_pose, right_time = self.left_arm_tf_updater.current_pose_and_time

        if(right_time > left_time):
            pose = right_pose
            camera_transform = self.right_arm_tf_updater.camera_transform
        else:
            pose = left_pose
            camera_transform = self.left_arm_tf_updater.camera_transform
       
        transform = None
        if(pose):
            checkerboard_in_camera = tf_conversions.toMatrix((tf_conversions.fromMsg(pose)))
            checkerboard_in_body = camera_transform * checkerboard_in_camera
            checkerboard_in_body_tf = tf_conversions.toTf(tf_conversions.toMatrix(checkerboard_in_body))
            checkerboard_rpy = tf.transformations.quaternion_to_euler(*checkerboard_in_body_tf[1])
            transform = checkboard_body_in_tf[0] + checkerboard_rpy

        return transform
Example #42
0
	def UpdateRobotJoints(self, msg):
		msg2 = None
		#rospy.loginfo('command received')

		MyTrans = self.manip.GetTransform()   # Get the hand transform
		#ftest = tf_conversions.fromMsg(MyTrans)
		#ftest = tf_conversions.toMatrix(ftest)	
		
		f1 = tf_conversions.fromMsg(msg)
		MyMatrix = tf_conversions.toMatrix(f1)


		# Create message to set robot commands
		move_msg = PoseCheck()
		# Copy Postition/Orientation from RobotPose msg to PoseCheck srv
		move_msg.position = msg.position
		move_msg.orientation = msg.orientation
		move_msg.spread = 0.0

		# Check if A button was pressed, if so initiate robot movement
		if(msg.j_position[0]):
			move_msg.move = True
		else:
			move_msg.move = False	
		# not currently used so set to false by default		
		move_msg.get_norm = False
		
		# PRE OSU_ROS_ADEPT CODE, NOT USED IN ADEPT/TRIGRIP PROGRAM

		# Function to run if using Interactive Markers
		if msg.j_position[3] != 0:
			msg2 = PoseCheck()
			mycog = self.object.GetTransform()
			#Add offset of object and subtract a small amount to fix minor Z offset error.
			MyMatrix[0:3,3] = MyMatrix[0:3,3] + mycog[0:3,3] - MyMatrix[0:3,2]*.0175
			f2 = tf_conversions.fromMatrix(MyMatrix)   # Convert the transform to a ROS frame
			msg2.pose = tf_conversions.toMsg(f2) 
			msg2.move = True
		#rospy.logdebug("before receiving error")
		if msg.j_position[1] == 0 and msg.j_position[2] == 1:
			grasp_msg=GraspObject()
			grasp_msg.grasp_direction="open"
			grasp_msg.spread=False
			grasp_msg.get_joint_vals=False		
			self.Grasp_Object(grasp_msg) # Open the hand
			#self.Grasp_Object(grasp_direction="open") # Open the hand
			
		# END NON OSU_ROS_ADEPT CODE

		if msg2 is not None:  # Move arm to new position
			self.handle_pose_check(msg2)
		else:
			# Sends osu_ros_adept message
			self.handle_pose_check(move_msg)
    
		#TODO: Fix this or remove it for the WAM operation.  Cannot use robot.SetTransform.
		# For Palm grasp option
		if msg.j_position[3] == 2:
			HandDirection = MyMatrix[0:3,2]
			for i in range(0,1000):  # loop until the object is touched or limit is reached
				MyMatrix[0:3,3] += .0001 * HandDirection
				with env:
					self.robot.SetTransform(MyMatrix)  # Move hand forward
				for link in self.robot.GetLinks():    # Check for collisions
					incollision=self.env.CheckCollision(link,report)
					if incollision:
						i = 1000
				if i == 1000:   # If hand is in collision, break from the loop
					break

		# Check to see if any of the manipulator buttons have been pressed.  Otherwise, move the hand.
		if msg.j_position[1] == 1 and msg.j_position[2] == 0:
			grasp_msg=GraspObject()
			grasp_msg.grasp_direction="close"
			grasp_msg.spread=False
			grasp_msg.get_joint_vals=False		
			self.Grasp_Object(grasp_msg) # Open the hand
			#self.Grasp_Object(grasp_direction="close") # Open the hand
		return
Example #43
0
    def joints_callback(self, pos):
		#rospy.logdebug(pos)
		# perform calculations to find new location        
		newpos = tf_conversions.fromMsg(pos)
		newpos = tf_conversions.toMatrix(newpos)

		#rospy.logdebug(newpos)
	
		# Create two matrices to modify the RPY matrix.  This is due to the fact that
		# the BarrettWAM hand is rotated 90deg with respect to the global frame.
		# The M.GetRPY function and Rotation.RPY use the X-axis as the roll axis
		# whereas the WAM wants to roll about the Z-axis.  Must rotate the RPY values
		# 90deg, then calculate the new RPY coordinates, then rotate back to OpenRAVE
		# coordinates again.
		#modMatrix = PyKDL.Rotation.RPY(0, -3.14159/2, 0)
		#modMatrix2 = PyKDL.Rotation.RPY(0, 3.14159/2, 0)

		oldrot = PyKDL.Rotation(newpos[0,0],newpos[0,1],newpos[0,2],newpos[1,0],newpos[1,1],newpos[1,2],newpos[2,0],newpos[2,1],newpos[2,2])
		#oldrot = oldrot * modMatrix  # rotating matrix
	
		# Get the RPY values from the new rotated matrix
		ftest = PyKDL.Frame(oldrot, PyKDL.Vector(0,0,0))
		testrot = ftest.M.GetRPY()

		# Calculate the new positions and roll, pitch, yaw
		roll = testrot[0] + .025 * (self.buttons[RB_IDX] - self.buttons[LB_IDX]) 
		pitch = testrot[1]
		if testrot[1] < -1.5 and self.axes[RIGHT_VERT] > 0 or testrot[1] > 1.5 and self.axes[RIGHT_VERT] < 0:
			pitch = testrot[1]
		else:
			pitch = testrot[1] - .025 * self.axes[RIGHT_VERT]
		yaw = testrot[2] + .025 * self.axes[RIGHT_HOR]
		z = ((self.axes[LEFT_TRIG]-3) - (self.axes[RIGHT_TRIG]-3))

	#
	#
	#  Two different styles of control that move the hand in different ways
	#
	#
		
		
	# Old move controls	
		#newpos[0,3] = newpos[0,3] + .01 * self.axes[LEFT_VERT]
		#newpos[1,3] = newpos[1,3] + .01 * self.axes[LEFT_HOR]
		#newpos[2,3] = newpos[2,3] + .01 * z
		
		#Weight to influence the speed of simulated robot motion				
		weight = .01 

	# "Cockpit" style control method
		newpos[0,3] = newpos[0,3] - weight * self.axes[LEFT_HOR] * math.sin(yaw) + weight * self.axes[1] * math.cos(yaw) * math.cos(pitch) + weight * z * math.sin(pitch) * math.cos(yaw)
		newpos[1,3] = newpos[1,3] + weight * self.axes[LEFT_VERT] * math.sin(yaw) * math.cos(pitch) + weight * self.axes[LEFT_HOR] * math.cos(yaw) + weight * z * math.sin(pitch) * math.sin(yaw)
		newpos[2,3] = newpos[2,3] + weight * z * math.cos(pitch) - weight * self.axes[LEFT_VERT] * math.sin(pitch)
	#rospy.logdebug('cos, sin, yaw: {0} ,{1} ,{2} ' .format(math.cos(yaw), math.sin(yaw), yaw) )


	# Create a frame for publishing.  Make sure to rotate back before creating the frame.
		v = PyKDL.Vector(newpos[0,3],newpos[1,3],newpos[2,3])
		r1 = PyKDL.Rotation.RPY(roll, pitch, yaw)
		#r1 = r1 * modMatrix2
		f1 = PyKDL.Frame(r1, v)	
	
	
	#the hand is non-existent. left over code from the wam. May be useful if someone attached a hand
	# Get the hand values and calculate the new positions
		#spread = pos.j_position[0]
		movement = self.buttons[A_IDX]		
		"""if self.buttons[2] and spread < 3.1:
			spread = spread + .01
		if self.buttons[0] and spread > 0:
			spread = spread - .01
		"""

		
		# publish the new position
		FrameMsg = tf_conversions.toMsg(f1)
		j_pos = array([movement,self.buttons[B_IDX],self.buttons[Y_IDX],0])
		#rospy.logdebug(self.buttons)
		j_vel = array([0,0,0,0])

		MyMsg = RobotPose()
		MyMsg.position = FrameMsg.position
		MyMsg.orientation = FrameMsg.orientation
		MyMsg.j_position = j_pos
		MyMsg.j_velocity = j_vel

		self.jointpub.publish(MyMsg)
Example #44
0
	def handle_pose_check(self, msg):
		#TODO: fix the msg input and function calls used in this node
		#self.MoveTriad(msg)
		#self.env.RemoveKinBody(self.object)
	
		#rospy.loginfo("handle_pose function called")
		#rospy.logdebug(msg)

		myrot = PyKDL.Rotation.Quaternion(msg.orientation.w, msg.orientation.x, msg.orientation.y, msg.orientation.z)
		r,p,y = PyKDL.Rotation.GetRPY(myrot)
		myrot = PyKDL.Rotation.RPY(r, -p, -y)
		MyFrame = PyKDL.Frame( myrot , PyKDL.Vector(msg.position.x, msg.position.y, msg.position.z) ) 
		FrameMsg1 = tf_conversions.toMsg(MyFrame)
		MyMatrix = tf_conversions.toMatrix(MyFrame)

		# Current location of RAVE model hand
		tmp = self.manip.GetTransform()

		#rospy.logdebug("\n\t\t====Current Matrix====\n" + str(tmp))

		# Convert the message to a matrix, MyMatrix will be used to find an IK solution
		f1 = tf_conversions.fromMsg(msg)
		MyMatrix = tf_conversions.toMatrix(f1)

#		rospy.logdebug("\n\t\t====Matrix For Soln====\n" + str(MyMatrix))
		
		# Spread values unused while working with trigrip
		#if msg.spread or msg.spread == 0:
		#	self.robot.SetDOFValues([msg.spread],[10])

		with self.env:   # Check to see if hand position is in collision.  If so, reset hand position.
			oldJoints = self.robot.GetDOFValues()
			#rospy.logdebug("\n\t====Old joints====\n" + str(oldJoints))
			try:	
				
				IKtype = IkParameterizationType.Transform6D
				#rospy.logdebug(IkParameterization(MyMatrix, IKtype))
				
				# Finds all solutions for a given point and orientation in space
				solns = self.manip.FindIKSolutions(IkParameterization(MyMatrix, IKtype), filteroptions=IkFilterOptions.CheckEnvCollisions)
				#rospy.loginfo("\n\t===solns===\n" + str(solns))

				# Holder for a better solution
				soln = None
				if solns != []:	    
					# Try to find the solution with the least change in joints 1 and 2, tries to minimize jumping
					soln = self.find_soln(solns, oldJoints)
				
				
				# Could be used in lieu of FindIKSolutions, finds just one solution, not necessarily the best
				#soln = self.manip.FindIKSolution(IkParameterization(MyMatrix, IKtype), filteroptions=IkFilterOptions.CheckEnvCollisions)
				
				# We have a solution to apply to the robot
				if(soln != None):
					#rospy.loginfo("\n\t===soln===\n" + str(soln))
					self.robot.SetDOFValues(soln, self.manip.GetArmIndices())
					#Send Command to the physical robot
					if(msg.move):
						# Displays solutions used when A button is pressed
						rospy.loginfo("\n\t===Joint Solution===\n" + str(soln))
						self.send_cmd(soln)

				else:
					rospy.loginfo("No IK solution found")
					#self.env.AddKinBody(self.object)
					return osu_ros_adept.srv.PoseCheckResponse(False,[0,0,0])
			except e:
				rospy.loginfo("IK not available")
				#rospy.logdebug(e)
			report=CollisionReport()

			for link in self.robot.GetLinks():
				linkCollision = self.env.CheckCollision(link,report)
				if linkCollision:
					rospy.logdebug("Link collision " + str(linkCollision) + "Link: " + str(link))

				# Wam code, not used with Adept/Trigrip
				if "wam0" in str(link): # If looking at the base of the WAM, skip and continue with next link
					continue

				incollision=self.env.CheckCollision(link,report)
				if incollision:# and msg.j_position[1] == 0 and msg.j_position[2] == 0 or incollision and msg.j_position[3] != 0:
					rospy.logdebug("Incollision")
					# Detects collision, reset joints
					self.robot.SetDOFValues(oldJoints)
					rospy.loginfo("Collision detected")
					
					#self.env.AddKinBody(self.object)
					return PoseCheckResponse(False,[0,0,0])
		#self.MoveTriad(msg)
		#if not msg.move: # if not moving, reset robot to old Joint values
		#	self.robot.SetDOFValues(oldJoints)
		
		if msg.get_norm:  # Validated: Return norm is useful when using the Interactive markers to control the WAM
			vals = MyMatrix[0:3,2]
			#self.env.AddKinBody(self.object)
			return PoseCheckResponse(True, vals)
		#self.env.AddKinBody(self.object)
		return PoseCheckResponse(True,[0,0,0])