def execute(self, ud): start_time = rospy.get_time() rate = rospy.Rate(2.5) while not self.preempt_requested() and not rospy.is_shutdown(): segmented_objs = self.segment_srv( only_surface=True).segmented_objects.objects if segmented_objs: table = segmented_objs[0] pose = geo_msgs.PoseStamped( table.point_cloud.header, geo_msgs.Pose(table.center, table.orientation)) pose = TF2().transform_pose(pose, pose.header.frame_id, 'map') width, length = table.width, table.depth table_tf = to_transform(pose, 'table_frame') TF2().publish_transform(table_tf) if 'table' in ud: table.name = ud['table'].name ud['table'] = table ud['table_pose'] = pose rospy.loginfo("Detected table of size %.1f x %.1f at %s", width, length, pose2d2str(pose)) return 'succeeded' elif self.timeout and rospy.get_time() - start_time > self.timeout: rospy.logwarn("No table detected after %g seconds", rospy.get_time() - start_time) return 'aborted' try: rate.sleep() except rospy.ROSInterruptException: break rospy.logwarn( "Detect table has been preempted after %g seconds (timeout %g)", rospy.get_time() - start_time, self.timeout) return 'preempted'
def run_sm(sm, name): """ Run the given state machine """ TF2() # start listener asap to avoid delays when running rospy.sleep(rospy.get_param('~start_delay', 0.0)) wait_for_sim_time() # Create and start the introspection server, if required (for FSM visualization or for controlling the camera) if rospy.get_param('~run_introspection_server', False): sis = smach_ros.IntrospectionServer(name, sm, '/SM_ROOT') sis.start() # MBF is the last component to start, so wait for it before running the sm wait_for_mbf() # Execute the state machine t0 = rospy.get_time() outcome = sm.execute() rospy.loginfo("%s completed in %.2fs with outcome '%s'", name, rospy.get_time() - t0, outcome) # Wait for ctrl-c to stop the application rospy.spin() if 'sis' in locals(): sis.stop() rospy.signal_shutdown(name + ' ended')
def __add_free_space(name, x, y, operation=CollisionObject.ADD): sp = SolidPrimitive() sp.type = SolidPrimitive.BOX sp.dimensions = [0.5, 0.5, 0.1] spp = PoseStamped() spp.header.frame_id = 'map' spp.pose.position.x = x spp.pose.position.y = y spp.pose.position.z = 0.05 spp.pose.orientation.w = 1 co = CollisionObject() co.operation = operation co.id = name co.type.db = json.dumps({ 'table': 'NONE', 'type': 'free_space', 'name': co.id }) co.header.frame_id = 'map' co.header.stamp = rospy.get_rostime() co.primitives.append(sp) co.primitive_poses.append(TF2().transform_pose(spp, spp.header.frame_id, co.header.frame_id).pose) psw = PlanningSceneWorld() psw.collision_objects.append(co) obstacle_pub.publish(psw) rospy.loginfo("Added a fake obstacle named '%s'", name)
def __init__(self): smach.State.__init__(self, outcomes=['have_target', 'no_targets'], input_keys=['objects', 'failures'], output_keys=['target', 'tightening']) manip_frame = rospy.get_param('~rec_objects_frame', 'arm_base_link') self.arm_on_bfp_rf = TF2().transform_pose(None, manip_frame, 'base_footprint')
def execute(self, ud): try: ud['robot_pose'] = TF2().transform_pose(None, 'base_footprint', 'map') return 'succeeded' except rospy.ROSException as err: rospy.logerr("Get robot pose failed: %s", str(err)) return 'aborted'
def execute(self, ud): try: # set as Pose, no PoseStamped, as required by FindRoomSequenceWithCheckpointsGoal ud['robot_pose'] = TF2().transform_pose(None, 'base_footprint', 'map').pose return 'succeeded' except rospy.ROSException as err: rospy.logerr("Get robot pose failed: %s", str(err)) return 'aborted'
def make_goal(self, ud, goal): """ Create a goal for the action """ room_number = ud['room_number'] segmented_map = ud['segmented_map'] # We need an image containing only the room to explore, so we create an empty image with the same # properties as the original map, set to white (\xFF) all pixels that correspond to the given room # number in the segmented map, and set as black (\x00) te rest goal.input_map = ud['map_image'] goal.input_map.data = b'' # segmented_map encoding is 32SC1, so 4 bytes, though just the first byte is enough up to 255 rooms for i in range(segmented_map.height): for j in range(segmented_map.width): idx = i * segmented_map.step + j * 4 val = segmented_map.data[idx:idx + 4] pixel = struct.unpack('<BBBB', val)[0] if pixel == room_number: goal.input_map.data += b'\xFF' else: goal.input_map.data += b'\x00' fov_points = rospy.get_param( '/exploration/room_exploration_server/field_of_view_points') goal.field_of_view_origin = TF2().transform_pose( None, 'kinect_rgb_frame', 'base_footprint').pose.position goal.field_of_view = [geometry_msgs.Point32(*pt) for pt in fov_points] goal.planning_mode = 2 # plan a path for coverage with the robot's field of view goal.starting_position = to_pose2d( ud['robot_pose']) # we need to convert to Pose2D msg # use room center as starting point; theta is ignored by room exploration room_info = ud['room_information_in_meter'][room_number - 1] start_point = geometry_msgs.PointStamped() start_point.header.frame_id = 'map' start_point.point = room_info.room_center goal.starting_position.x = room_info.room_center.x goal.starting_position.y = room_info.room_center.y goal.starting_position.theta = 0.0 # it's ignored self.start_pub.publish(start_point) # provide the starting pose so we can move there before starting exploring ud['start_pose'] = create_2d_pose(room_info.room_center.x, room_info.room_center.y, 0.0, 'map') # IDEA: can use room_info.room_min_max to avoid points colliding with the walls # visualize explore map and fov on RViz fov = geometry_msgs.PolygonStamped() fov.header.frame_id = 'kinect_rgb_frame' fov.polygon.points = goal.field_of_view rospy.Publisher('/exploration/img', sensor_msgs.Image, queue_size=1, latch=True).publish(goal.input_map) if not self.fov_pub_timer: self.fov_pub_timer = rospy.Timer( rospy.Duration(0.1), lambda e: self.fov_pub.publish(fov))
def aim_to_target(self): if not self._target_obj_pose: rospy.logwarn("No target pose to aim to") return ThorpError(code=ThorpError.INVALID_TARGET_POSE, text="No target pose to aim to") pose_in_cannon_ref = TF2().transform_pose(self._target_obj_pose, None, 'cannon_shaft_link') adjacent = pose_in_cannon_ref.pose.position.x opposite = pose_in_cannon_ref.pose.position.z tilt_angle = atan(opposite / adjacent) print(adjacent, opposite, degrees(tilt_angle)) tilt_angle = max(min(degrees(tilt_angle), +18), -18) return self.tilt(tilt_angle)
def link_states_cb(msg): try: # throttle to 'frequency' Hz if hasattr( link_states_cb, 'last_pub_time' ) and rospy.get_time() - link_states_cb.last_pub_time < period: return link_states_cb.last_pub_time = rospy.get_time() # get map -> base from gazebo index = msg.name.index('thorp::base_footprint') map_to_bfp_tf = Transform.create( PoseStamped(Header(stamp=rospy.get_rostime(), frame_id='map'), msg.pose[index])) # subtract (multiply by the inverse) odom -> base_footprint tf bfp_to_odom_tf = Transform.create(TF2().lookup_transform( 'base_footprint', 'odom')) map_to_odom_tf = map_to_bfp_tf * bfp_to_odom_tf TF2().publish_transform( map_to_odom_tf.to_geometry_msg_transform_stamped()) except (ValueError, rospy.ROSException): pass
def execute(self, ud): table = ud['table'] table_pose = ud['table_pose'] # expected on map frame, as robot_pose if not table or not table_pose: rospy.logerr("Detected table contains None!") return 'no_table' p_x = self.distance + table.depth / 2.0 n_x = -p_x p_y = self.distance + table.width / 2.0 n_y = -p_y table_tf = to_transform(table_pose, 'table_frame') TF2().publish_transform(table_tf) table_tf.transform.translation.z = 0.0 closest_pose = None closest_dist = float('inf') from math import pi poses = [('p_x', create_2d_pose(p_x, 0.0, -pi, 'table_frame')), ('n_x', create_2d_pose(n_x, 0.0, 0.0, 'table_frame')), ('p_y', create_2d_pose(0.0, p_y, -pi / 2.0, 'table_frame')), ('n_y', create_2d_pose(0.0, n_y, +pi / 2.0, 'table_frame'))] pose_array = geometry_msgs.PoseArray() # for visualization sides_poses = {} for name, pose in poses: pose = transform_pose(pose, table_tf) pose_array.poses.append(deepcopy(pose.pose)) pose_array.poses[ -1].position.z += 0.025 # raise over costmap to make it visible sides_poses[name] = pose dist = distance_2d(pose, ud['robot_pose']) if dist < closest_dist: closest_pose = pose closest_dist = dist ud['poses'] = sides_poses ud['closest_pose'] = closest_pose pose_array.header = deepcopy(closest_pose.header) pose_array.poses.append(deepcopy(closest_pose.pose)) pose_array.poses[ -1].position.z += 0.05 # remark the closest pose with a double arrow self.poses_viz.publish(pose_array) return 'succeeded'
#!/usr/bin/env python import rospy from smach.user_data import UserData from thorp_toolkit.geometry import TF2, create_2d_pose from thorp_smach.states.navigation import LookToPose from thorp_smach.utils import wait_for_mbf if __name__ == '__main__': rospy.init_node('run_look_to_pose_state') # MBF is the last component to start, so wait for it before running the sm wait_for_mbf() ud = UserData() ud['robot_pose'] = TF2().transform_pose(None, 'base_footprint', 'map') ud['target_pose'] = create_2d_pose(0.0, 0.0, 0.0, 'map') state = LookToPose() t0 = rospy.get_time() outcome = state.execute(ud) rospy.loginfo("LookToPose completed in %.2fs with outcome '%s'", rospy.get_time() - t0, outcome) # Wait for ctrl-c to stop the application rospy.spin() rospy.signal_shutdown('All done.')
self._fire_cannon_pub.publish(msg) return ThorpError(code=ThorpError.SUCCESS) def handle_cannon_command(self, request): if request.action == CannonCmdRequest.AIM: return self.aim_to_target() if request.action == CannonCmdRequest.TILT: return self.tilt(request.angle) if request.action == CannonCmdRequest.FIRE: return self.fire(request.shots) def spin(self): rate = rospy.Rate(20) while not rospy.is_shutdown(): self._js_msg.position[0] = self._cannon_tilt_angle self._js_msg.header.stamp = rospy.Time.now() self._joint_states_pub.publish(self._js_msg) try: rate.sleep() except rospy.exceptions.ROSInterruptException: pass if __name__ == '__main__': rospy.init_node("cannon_ctrl") TF2() # start listener asap node = CannonCtrlNode() node.spin()
def execute(self, ud): bfp_to_arm_tf = Transform.create(TF2().lookup_transform( 'base_footprint', self.manip_frame)) # base to arm tf map_to_fbp_tf = Transform.create(TF2().lookup_transform( 'map', 'base_footprint')) # map to base pick_locs = [] for name, picking_pose in ud['picking_poses'].items(): # current distance from the robot (stored but not used by now) dist_from_robot = distance_2d(picking_pose, ud['robot_pose']) # apply base to arm tf, so we get arm pose on map reference for each location arm_pose_mrf = (Transform.create(picking_pose) * bfp_to_arm_tf).to_geometry_msg_pose_stamped() # detected objects poses are in arm reference, so their modulo is the distance to the arm objs = [] for i, obj in enumerate(ud['objects']): # transform object pose from base to map frame, so we can compare distances to picking locations # we must limit max arm reach with our navigation tolerance when reaching the goal, as that will # be our probable picking pose, instead of the ideal one received as input obj_pose_mrf = ( map_to_fbp_tf * Transform.create(obj.pose)).to_geometry_msg_pose_stamped() dist = distance_2d(obj_pose_mrf, arm_pose_mrf) # both on map rf if dist <= cfg.MAX_ARM_REACH - cfg.TIGHT_DIST_TOLERANCE: objs.append(ObjectToPick(obj.id, dist, obj.pose)) if not objs: continue # no objects reachable from here; keep going # sort objects by increasing distance from the arm; that should make picking easier, # as we won't hit closer objects when going over them (not 100% sure if this is true) objs = sorted(objs, key=lambda o: o.distance) # set also the approach and detach poses, at APPROACH_DIST_TO_TABLE and DETACH_DIST_FROM_TABLE respectively approach_pose = deepcopy(picking_pose) translate_pose( approach_pose, -(cfg.APPROACH_DIST_TO_TABLE - cfg.PICKING_DIST_TO_TABLE), 'x') detach_pose = deepcopy(picking_pose) translate_pose( detach_pose, -(cfg.DETACH_DIST_FROM_TABLE - cfg.PICKING_DIST_TO_TABLE), 'x') pick_locs.append( PickLocation(name, dist_from_robot, objs, arm_pose_mrf, approach_pose, picking_pose, detach_pose)) if not pick_locs: rospy.loginfo("No reachable objects") return 'no_reachable_objs' # find the most efficient sequence of picking locations: try all permutations, sort and get the first possible_plans = [] for perm in permutations(pick_locs): perm = list(perm) # convert to list, so we can remove elements self.filter_pick_locs(perm) possible_plans.append( PickingPlan(self.traveled_dist(ud['robot_pose'], perm), perm)) # sort by 1) less locations to visit 2) less travelled distance (in case of match) sorted_plans = sorted(possible_plans, key=lambda p: (len(p.locations), p.travelled_dist)) sorted_plocs = sorted_plans[0].locations # remove duplicated objects from the location where they are at the longest distance to the arm objs_to_remove = [] for ploc in sorted_plocs: dup_objs = 0 for obj in ploc.objects: for other_pl in [pl for pl in sorted_plocs if pl != ploc]: for other_obj in other_pl.objects: if obj.name == other_obj.name: if obj.distance >= other_obj.distance: obj_to_rm = (ploc, obj) else: obj_to_rm = (other_pl, other_obj) if obj_to_rm not in objs_to_remove: objs_to_remove.append(obj_to_rm) dup_objs += 1 for ploc, obj in objs_to_remove: ploc.objects.remove( obj ) # TODO make a test for this and move the remove to within the loop (much simpler) # TODO: why I sort again? I may break the optimization done above!!! sorted_plocs = sorted(sorted_plocs, key=lambda pl: len(pl.objects) ) # sort again after removing duplicates self.viz_pick_locs(sorted_plocs) ud['picking_locs'] = sorted_plocs return 'succeeded'
def _highlight_target(self, pose, fire=False): Visualization().add_disc_marker( TF2().transform_pose(pose, pose.header.frame_id, 'map'), [1.0, 1.0], [0.8, 0.0, 0.0, 0.6] if fire else [0.0, 0.0, 0.8, 0.4]) Visualization().publish_markers()