class RealPoseBroadcaster(object): def __init__(self): rospy.init_node("real_pose_broadcaster") # Enable us to get odom and the pose from within odom self.odom = Odometry() self.linear = Point(0.0, 0.0, 0.0) self.quat = Quaternion(0.0, 0.0, 0.0, 0.0) rospy.Subscriber("base_pose_ground_truth", Odometry, self.get_odom) # Want to broadcast a transform relating the position of the robot to the map frame self.br = tf.TransformBroadcaster() # Want to put a marker for the real pose self.mark_placer = MarkerPlacer("rviz_real_robot_pose", "map") self.mark_placer.set_scale(1.25, 0.25, 0.25) # Also want to broadcast the real pose of the robot self.pose_pub = rospy.Publisher("/real_pose", PoseStamped, queue_size=10) # Update this every 10 seconds or so self.rate = rospy.Rate(10) while not rospy.is_shutdown(): self.br.sendTransform(translation=(self.linear.x, self.linear.y, self.linear.z), rotation=(self.quat.x, self.quat.y, self.quat.z, self.quat.w), time=rospy.Time.now(), child="real_robot_pose", parent="map") self.mark_placer.place_marker([self.linear], [self.quat]) pose = PoseStamped() pose.header.frame_id = "/map" pose.header.stamp = rospy.Time.now() pose.pose.position = self.linear pose.pose.orientation = self.quat self.pose_pub.publish(pose) self.rate.sleep() def get_odom(self, odometry): self.odom = odometry self.linear = self.odom.pose.pose.position self.quat = self.odom.pose.pose.orientation
class Navigator(object): """docstring for Navigator""" def __init__(self, start_pos, target_list=[]): """ Various parameters we use to make decisions on """ # The start position we want to hit before we follow targets self.start_pos = start_pos # List of targets to follow self.target_list = target_list # nav_queue is used to contain a list of lists of coordinates to follow self.nav_queue = list() # The distance required of closeness to current coordinate for assuming it to be reached self.coord_dist_cutoff = 0.6 # What the current intermediate goal is self.curr_target = (0.0, 0.0) # Gains for calculating linear and angular speed # ref: A Stable Target-Tracking Control for Unicycle Mobile Robots Lee, S. et al. # Linear velocity depends only on K1, angular depends on K1 and K2 self.K1_default = 1.5 self.K2_default = -5.0 self.K1_slow = 0.5 self.K2_slow = -5.0 self.K1 = self.K1_default self.K2 = self.K2_default # Set up a markerarray to show the planned path to take self.planned_markers = MarkerPlacer("rviz_planned_path", "map", 4000) # Make the marker size a bit bigger so that they are easier to see self.planned_markers.set_scale(0.12, 0.12, 0.12) # Set them to be spheres self.planned_markers.set_type(Marker.SPHERE) # Pick a random colour since we might have multiple planned paths and want # to differentiate between them self.planned_markers.random_color() # Set up a markerarray to show the actual path taken self.taken_markers = MarkerPlacer("rviz_taken_path", "map", 4000) # Make the marker size a bit bigger so that they are easier to see self.taken_markers.set_scale(0.12, 0.12, 0.12) # Set them to be spheres self.taken_markers.set_type(Marker.SPHERE) # Pick a random colour since we might want to follow multiple paths and # want to differentiate between them self.taken_markers.random_color() # Set up a markerarray to place markers when the goal is reached self.goal_markers = MarkerPlacer("rviz_goals", "map", 40) # Make the marker size a bit bigger so that they are easier to see self.goal_markers.set_scale(0.10, 0.10, 1.35) # Set them to be spheres self.goal_markers.set_type(Marker.CYLINDER) # Set colour to pink self.goal_markers.set_color(1.0, 0.0, 1.0, 1.0) # Add a marker for the assumed pose that the navigator is acting on self.pose_marker = MarkerPlacer("/rviz_nav_pose", "/map") self.pose_marker.set_color(1.0, 1.0, 0.0, 1.0) self.pose_marker.set_scale(1.25, 0.25, 0.25) # Set up a container for current pose of robot. This pose is the fusion of # data received from the particle filter and odometry updates. # We store yaw in a separate variable since this will reduce the number of # transformations is required from going quat -> yaw -> quat since we will # use the yaw directly for navigation purposes self.pose = Pose() self.yaw = 0.0 # Subscribe to the real pose of the robot in order to drop markers along the followed path self.real_pose = PoseStamped() rospy.Subscriber("/real_pose", PoseStamped, self.get_real_pose) # Set up a subscriber to particle filter pose self.particle_pose = PoseStamped() rospy.Subscriber("/particle_pose", PoseStamped, self.get_particle_pose) # At this point we kind-of want to wait until we have a particle update # before we start throwing odometry updates at the uninitialised position. rospy.wait_for_message("/particle_pose", Odometry) # Set up a subscriber for (noisy) odometry updates # First wait for an Odometry message in order to get a time stamp which # will be used to update the robots position from the odometry data rospy.wait_for_message("/odom", Odometry) self.last_odom_time = rospy.get_time() # Then create a container for the odometry and subscribe to the topic. self.odom = Odometry() rospy.Subscriber("/odom", Odometry, self.get_odom) # Initialize the map that will be used for calculating paths and overall navigation self.m_s = None self.initialize_map() # Find path from robot position to start position. For now we're doing # this in the navigator, but should perhaps be outsources to an external # module later since it can take a very long time. self.xy_path = list() self.xy_pos_path = list() self.get_path_to_start() # Add the start pos since the resolution of the planned path is not perfect self.xy_pos_path.append(start_pos) # Place markers for the current path to show that we know where we are going (hopefully!) self.mark_current_path() # At this point we got: Robot pose (fusion of particle + odom) and a list # of coordinates we want to follow towards a goal. This should be enough # navigate! # Set up a publisher for sending velocity commands to the robot self.vel = Twist() self.vel_publisher = rospy.Publisher("/cmd_vel", Twist, queue_size=10) # Pop off the first item in list as current goal self.curr_target = self.xy_pos_path.pop(0) def run(self): # Here we need to act on the information previously received regarding # robot pose and list of coordinates to follow. # Using these parameters we have to figure out: # 1) When we are close enough to a coordinate to knock it off the list # 1.1) Is the target the goal? If so -> What to do? # 2) What the next coordinate is # 3) The angle towards the next coordinate # 4) The angular and linear speed we need to set in order to hit the coordinate # Find closeness to current coordinate curr_dist = dist((self.pose.position.x, self.pose.position.y), self.curr_target) if curr_dist < self.coord_dist_cutoff: # Drop off a marker to show that we have been here self.taken_markers.place_marker([copy.deepcopy(self.real_pose.pose.position)], [copy.deepcopy(self.real_pose.pose.orientation)]) # Check if we have arrived at the last point in the list (the ultimate goal) if len(self.xy_pos_path) == 0: if curr_dist < 0.2: # Poop goal marker self.goal_markers.place_marker([self.real_pose.pose.position], [self.real_pose.pose.orientation]) # Check if there are more targets to go to if len(self.target_list) > 0: # Change the colour for the planned path and path taken to differentiate # it from the previous path self.taken_markers.random_color() self.planned_markers.random_color() # Find a new target and path self.xy_path = self.get_next_path() # Convert the path to xy_pos format self.convert_current_path() # Mark it self.mark_current_path() # if no more targets go to sleep else: rospy.sleep() # If we have more coordinates to visit pop off the next one! else: self.curr_target = self.xy_pos_path.pop(0) curr_dist = dist((self.pose.position.x, self.pose.position.y), self.curr_target) # Adjust the speed depending on how close we are to the current goal if len(self.xy_pos_path) < 5: self.K1 = self.K1_slow self.K2 = self.K2_slow else: self.K1 = self.K1_default self.K2 = self.K2_default # Figure out the angle we want to travel in to reach the current target # Assumption: This is given by atan2 between the target and the robot? # This makes sense as it would transpose the origin to the centre of the # robot, leaving the calculation to a "virtual" coordinate system around # the robot. theta = math.atan2(self.curr_target[1] - self.pose.position.y, self.curr_target[0] - self.pose.position.x) # Find the difference in theta that should go towards zero when the # correct heading is reached d_theta = theta - self.yaw d_theta = self.normalize(d_theta) # Find a new set of linear and angular speeds to publish lin_speed = self.new_speed(curr_dist, d_theta) ang_speed = self.new_angular_speed(d_theta) self.vel.linear.x = lin_speed self.vel.angular.z = ang_speed self.vel_publisher.publish(self.vel) def new_speed(self, radius, target_theta): """ ref: A Stable Target-Tracking Control for Unicycle Mobile Robots Lee, S. et al. """ return self.K1 * radius * math.cos(target_theta) def new_angular_speed(self, target_theta): """ ref: A Stable Target-Tracking Control for Unicycle Mobile Robots Lee, S. et al. """ factor_1 = (-self.K1) * math.sin(target_theta) * math.cos(target_theta) factor_2 = self.K2 * target_theta return factor_1 - factor_2 def get_real_pose(self, pose): self.real_pose = pose def get_particle_pose(self, particle_pose): prev_particle_pose = self.particle_pose self.particle_pose = particle_pose # Set up a check to see if the new pose is out of whack - Might happen on # random occasions. If it is we do not want to update the assumed pose but # rather continue using the estimated pose from previous particle filter + # odometry updates # TODO(geir): Implement the check somehow if True: self.pose.position = self.particle_pose.pose.position self.pose.orientation = self.particle_pose.pose.orientation pitch, roll, yaw = tf.transformations.euler_from_quaternion([self.pose.orientation.x, self.pose.orientation.y, self.pose.orientation.z, self.pose.orientation.w]) self.yaw = yaw def get_odom(self, odom): self.odom = odom # Update current pose with odom data! # First get the current time and time since last update time = rospy.get_time() d_time = time-self.last_odom_time self.last_odom_time = time # Get the linear and angular velocities lin_vel = self.odom.twist.twist.linear.x ang_vel = self.odom.twist.twist.angular.z # Calculate change in yaw, x and y d_theta = ang_vel * d_time d_x = (lin_vel * math.cos(self.yaw))*d_time d_y = (lin_vel * math.sin(self.yaw))*d_time # Update position self.yaw += d_theta self.pose.position.x += d_x self.pose.position.y += d_y q = tf.transformations.quaternion_from_euler(0.0, 0.0, self.yaw) self.pose.orientation = Quaternion(*q) # Publish the marker for this pose self.pose_marker.place_marker([self.pose.position], [self.pose.orientation]) def initialize_map(self): # Load a map, down sample it and expand it self.m_s = MapStorage() # Down sample in order to reduce graph size self.m_s.downsample_map_by_half() self.m_s.downsample_map_by_half() # Expand in order to avoid collisions # Previously expanded twice (inside down sampling) => 0.1 res -> 0.4 res # Three more expansions will leave us with an object previously taking up a # size of 0.4 m to take up ~4*0.4 -> 1.6. The robot size is 1 metre, so # this leaves a space of 0.6 for errors before collisions occur. # This is *MUCH* needed - anything else leads to a boatload of collisions self.m_s.expand_map() self.m_s.expand_map() self.m_s.expand_map() self.m_s.expand_map() def get_next_path(self): # Here we want to find the item in the list which is the closest to the # current position of the robot. closest = 999999.9 closest_index = 0 for i in range(len(self.target_list)): d = dist((self.pose.position.x, self.pose.position.y), (self.target_list[i][0], self.target_list[i][1])) if d < closest: closest = d closest_index = i # Get the target that is closest pos_t = self.target_list.pop(closest_index) # Find the x,y values for robot position and target x_t, y_t = self.m_s.xy_from_xy_pos(pos_t[0], pos_t[1]) x_r, y_r = self.m_s.xy_from_xy_pos(self.pose.position.x, self.pose.position.y) # Search for path path = astar_search(self.m_s, (x_r, y_r), (x_t, y_t)) return path def get_path_to_start(self): # Convert x_pos,y_pos for self.pose and self.start_pos to x,y for use with A* search x_r, y_r = self.m_s.xy_from_xy_pos(self.pose.position.x, self.pose.position.y) x_s, y_s = self.m_s.xy_from_xy_pos(self.start_pos[0], self.start_pos[1]) # Find a path from current position to start position self.xy_path = astar_search(self.m_s, (x_r, y_r), (x_s, y_s)) # Convert the path in to real coordinates self.convert_current_path() def convert_current_path(self): # The path is now in (x,y) format so we want to convert this to a list in # (x_pos,y_pos) format in order to use it for navigation self.xy_pos_path = list() for p in self.xy_path: x_pos, y_pos = self.m_s.xy_pos_from_xy(p[0], p[1]) self.xy_pos_path.append((x_pos, y_pos)) def mark_current_path(self): o = Quaternion() # Need for interface o_list = list() p_list = list() for pos in self.xy_pos_path: o_list.append(o) p_list.append(Point(pos[0], pos[1], 0.0)) self.planned_markers.place_marker(p_list, o_list) def normalize(self, theta): while theta <= math.pi: theta += 2.0*math.pi while theta > math.pi: theta -= 2.0*math.pi return theta