def update_heading( self ):
    # generate and store the vectors generated by the two controller types
    self.gtg_heading_vector = self.go_to_goal_controller.calculate_gtg_heading_vector()
    self.ao_heading_vector, self.obstacle_vectors = self.avoid_obstacles_controller.calculate_ao_heading_vector()

    # normalize the heading vectors
    self.gtg_heading_vector = linalg.unit( self.gtg_heading_vector )
    self.ao_heading_vector = linalg.unit( self.ao_heading_vector )

    # generate the blended vector
    self.blended_heading_vector = linalg.add( linalg.scale( self.gtg_heading_vector, self.alpha ),
                                              linalg.scale( self.ao_heading_vector, 1.0 - self.alpha ) )
    def update_heading(self):
        # generate and store the vectors generated by the two controller types
        self.gtg_heading_vector = self.go_to_goal_controller.calculate_gtg_heading_vector()
        self.ao_heading_vector, self.obstacle_vectors = self.avoid_obstacles_controller.calculate_ao_heading_vector()

        # normalize the heading vectors
        self.gtg_heading_vector = linalg.unit(self.gtg_heading_vector)
        self.ao_heading_vector = linalg.unit(self.ao_heading_vector)

        # generate the blended vector
        self.blended_heading_vector = linalg.add(
            linalg.scale(self.gtg_heading_vector, self.alpha), linalg.scale(self.ao_heading_vector, 1.0 - self.alpha)
        )
    def draw_gtg_and_ao_controller_to_frame(self):
        robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

        # draw the detected environment boundary (i.e. sensor readings)
        obstacle_vertexes = self.gtg_and_ao_controller.obstacle_vectors[:]
        obstacle_vertexes.append(
            obstacle_vertexes[0])  # close the drawn polygon
        obstacle_vertexes = linalg.rotate_and_translate_vectors(
            obstacle_vertexes, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([obstacle_vertexes],
                                            linewidth=0.005,
                                            color="black",
                                            alpha=1.0)

        # draw the computed avoid-obstacles vector
        ao_heading_vector = linalg.scale(
            linalg.unit(self.gtg_and_ao_controller.ao_heading_vector),
            VECTOR_LEN)
        vector_line = [[0.0, 0.0], ao_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(
            vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line],
                                            linewidth=0.005,
                                            color="red",
                                            alpha=1.0)

        # draw the computed go-to-goal vector
        gtg_heading_vector = linalg.scale(
            linalg.unit(self.gtg_and_ao_controller.gtg_heading_vector),
            VECTOR_LEN)
        vector_line = [[0.0, 0.0], gtg_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(
            vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line],
                                            linewidth=0.005,
                                            color="dark green",
                                            alpha=1.0)

        # draw the computed blended vector
        blended_heading_vector = linalg.scale(
            linalg.unit(self.gtg_and_ao_controller.blended_heading_vector),
            VECTOR_LEN)
        vector_line = [[0.0, 0.0], blended_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(
            vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line],
                                            linewidth=0.02,
                                            color="blue",
                                            alpha=1.0)
    def draw_go_to_goal_controller_to_frame(self):
        robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

        # draw the computed go-to-goal vector
        gtg_heading_vector = linalg.scale(linalg.unit(self.go_to_goal_controller.gtg_heading_vector), VECTOR_LEN)
        vector_line = [[0.0, 0.0], gtg_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line], linewidth=0.02, color="dark green", alpha=1.0)
  def calculate_fw_heading_vector( self, follow_direction ):

    # get the necessary variables for the working set of sensors
    #   the working set is the sensors on the side we are bearing on, indexed from rearmost to foremost on the robot
    #   NOTE: uses preexisting knowledge of the how the sensors are stored and indexed
    if follow_direction == FWDIR_LEFT:
      # if we are following to the left, we bear on the righthand sensors
      sensor_placements = self.proximity_sensor_placements[7:3:-1]
      sensor_distances = self.supervisor.proximity_sensor_distances()[7:3:-1]
      sensor_detections = self.supervisor.proximity_sensor_positive_detections()[7:3:-1]
    elif follow_direction == FWDIR_RIGHT:
      # if we are following to the right, we bear on the lefthand sensors
      sensor_placements = self.proximity_sensor_placements[:4]
      sensor_distances = self.supervisor.proximity_sensor_distances()[:4]
      sensor_detections = self.supervisor.proximity_sensor_positive_detections()[:4]
    else:
      raise Exception( "unknown wall-following direction" )

    if True not in sensor_detections:
      # if there is no wall to track detected, we default to predefined reference points
      # NOTE: these points are designed to turn the robot towards the bearing side, which aids with cornering behavior
      #       the resulting heading vector is also meant to point close to directly aft of the robot
      #       this helps when determining switching conditions in the supervisor state machine
      p1 = [ -0.2, 0.0 ]
      if follow_direction == FWDIR_LEFT: p2 = [ -0.2, -0.0001 ]
      if follow_direction == FWDIR_RIGHT: p2 = [ -0.2, 0.0001 ]
    else:
      # sort the sensor distances along with their corresponding indices
      sensor_distances, indices = zip( *sorted( zip( # this method ensures two different sensors are always used
                                      sensor_distances, # sensor distances
                                      [0, 1, 2, 3]      # corresponding indices
                                    ) ) )
      # get the smallest sensor distances and their corresponding indices
      d1, d2 = sensor_distances[0:2]
      i1, i2 = indices[0:2]

      # calculate the vectors to the obstacle in the robot's reference frame
      sensor1_pos, sensor1_theta = sensor_placements[i1].vunpack()
      sensor2_pos, sensor2_theta = sensor_placements[i2].vunpack()
      p1, p2 = [ d1, 0.0 ], [ d2, 0.0 ]
      p1 = linalg.rotate_and_translate_vector( p1, sensor1_theta, sensor1_pos )
      p2 = linalg.rotate_and_translate_vector( p2, sensor2_theta, sensor2_pos )

      # ensure p2 is forward of p1
      if i2 < i1: p1, p2 = p2, p1

    # compute the key vectors and auxiliary data
    l_wall_surface = [ p2, p1 ]
    l_parallel_component = linalg.sub( p2, p1 )
    l_distance_vector = linalg.sub( p1, linalg.proj( p1, l_parallel_component ) )
    unit_perp = linalg.unit( l_distance_vector )
    distance_desired = linalg.scale( unit_perp, self.follow_distance )
    l_perpendicular_component = linalg.sub( l_distance_vector, distance_desired )
    l_fw_heading_vector = linalg.add( l_parallel_component, l_perpendicular_component )

    return l_fw_heading_vector, l_parallel_component, l_perpendicular_component, l_distance_vector, l_wall_surface
  def calculate_fw_heading_vector( self, follow_direction ):

    # get the necessary variables for the working set of sensors
    #   the working set is the sensors on the side we are bearing on, indexed from rearmost to foremost on the robot
    #   NOTE: uses preexisting knowledge of the how the sensors are stored and indexed
    if follow_direction == FWDIR_LEFT:
      # if we are following to the left, we bear on the righthand sensors
      sensor_placements = self.proximity_sensor_placements[7:3:-1]
      sensor_distances = self.supervisor.proximity_sensor_distances()[7:3:-1]
      sensor_detections = self.supervisor.proximity_sensor_positive_detections()[7:3:-1]
    elif follow_direction == FWDIR_RIGHT:
      # if we are following to the right, we bear on the lefthand sensors
      sensor_placements = self.proximity_sensor_placements[:4]
      sensor_distances = self.supervisor.proximity_sensor_distances()[:4]
      sensor_detections = self.supervisor.proximity_sensor_positive_detections()[:4]
    else:
      raise Exception( "unknown wall-following direction" )

    if True not in sensor_detections:
      # if there is no wall to track detected, we default to predefined reference points
      # NOTE: these points are designed to turn the robot towards the bearing side, which aids with cornering behavior
      #       the resulting heading vector is also meant to point close to directly aft of the robot
      #       this helps when determining switching conditions in the supervisor state machine
      p1 = [ -0.2, 0.0 ]
      if follow_direction == FWDIR_LEFT: p2 = [ -0.2, -0.0001 ]
      if follow_direction == FWDIR_RIGHT: p2 = [ -0.2, 0.0001 ]
    else:
      # sort the sensor distances along with their corresponding indices
      sensor_distances, indices = zip( *sorted( zip( # this method ensures two different sensors are always used
                                      sensor_distances, # sensor distances
                                      [0, 1, 2, 3]      # corresponding indices
                                    ) ) )
      # get the smallest sensor distances and their corresponding indices
      d1, d2 = sensor_distances[0:2]
      i1, i2 = indices[0:2]
      
      # calculate the vectors to the obstacle in the robot's reference frame
      sensor1_pos, sensor1_theta = sensor_placements[i1].vunpack()
      sensor2_pos, sensor2_theta = sensor_placements[i2].vunpack()                
      p1, p2 = [ d1, 0.0 ], [ d2, 0.0 ]
      p1 = linalg.rotate_and_translate_vector( p1, sensor1_theta, sensor1_pos )
      p2 = linalg.rotate_and_translate_vector( p2, sensor2_theta, sensor2_pos )

      # ensure p2 is forward of p1
      if i2 < i1: p1, p2 = p2, p1
    
    # compute the key vectors and auxiliary data
    l_wall_surface = [ p2, p1 ]
    l_parallel_component = linalg.sub( p2, p1 ) 
    l_distance_vector = linalg.sub( p1, linalg.proj( p1, l_parallel_component ) )
    unit_perp = linalg.unit( l_distance_vector )
    distance_desired = linalg.scale( unit_perp, self.follow_distance )
    l_perpendicular_component = linalg.sub( l_distance_vector, distance_desired )
    l_fw_heading_vector = linalg.add( l_parallel_component, l_perpendicular_component )

    return l_fw_heading_vector, l_parallel_component, l_perpendicular_component, l_distance_vector, l_wall_surface
Пример #7
0
  def draw_go_to_goal_controller_to_frame( self ):
    robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

    # draw the computed go-to-goal vector
    gtg_heading_vector = linalg.scale( linalg.unit( self.go_to_goal_controller.gtg_heading_vector ), VECTOR_LEN )
    vector_line = [ [ 0.0, 0.0 ], gtg_heading_vector ]
    vector_line = linalg.rotate_and_translate_vectors( vector_line, robot_theta, robot_pos )
    self.viewer.current_frame.add_lines( [ vector_line ],
                                         linewidth = 0.02,
                                         color = "dark green",
                                         alpha = 1.0 )
  def draw_gtg_and_ao_controller_to_frame( self ):
    robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

    # draw the detected environment boundary (i.e. sensor readings)
    obstacle_vertexes = self.gtg_and_ao_controller.obstacle_vectors[:]
    obstacle_vertexes.append( obstacle_vertexes[0] )  # close the drawn polygon
    obstacle_vertexes = linalg.rotate_and_translate_vectors( obstacle_vertexes, robot_theta, robot_pos )
    self.viewer.current_frame.add_lines(  [ obstacle_vertexes ],
                                          linewidth = 0.005,
                                          color = "black",
                                          alpha = 1.0 )

    # draw the computed avoid-obstacles vector
    ao_heading_vector = linalg.scale( linalg.unit( self.gtg_and_ao_controller.ao_heading_vector ), VECTOR_LEN )
    vector_line = [ [ 0.0, 0.0 ], ao_heading_vector ]
    vector_line = linalg.rotate_and_translate_vectors( vector_line, robot_theta, robot_pos )
    self.viewer.current_frame.add_lines( [ vector_line ],
                                         linewidth = 0.005,
                                         color = "red",
                                         alpha = 1.0 )

    # draw the computed go-to-goal vector
    gtg_heading_vector = linalg.scale( linalg.unit( self.gtg_and_ao_controller.gtg_heading_vector ), VECTOR_LEN )
    vector_line = [ [ 0.0, 0.0 ], gtg_heading_vector ]
    vector_line = linalg.rotate_and_translate_vectors( vector_line, robot_theta, robot_pos )
    self.viewer.current_frame.add_lines( [ vector_line ],
                                         linewidth = 0.005,
                                         color = "dark green",
                                         alpha = 1.0 )

    # draw the computed blended vector
    blended_heading_vector = linalg.scale( linalg.unit( self.gtg_and_ao_controller.blended_heading_vector ), VECTOR_LEN )
    vector_line = [ [ 0.0, 0.0 ], blended_heading_vector ]
    vector_line = linalg.rotate_and_translate_vectors( vector_line, robot_theta, robot_pos )
    self.viewer.current_frame.add_lines( [ vector_line ],
                                         linewidth = 0.02,
                                         color = "blue",
                                         alpha = 1.0 )
    def _draw_follow_wall_controller_to_frame_by_side(self, side):
        if side == FWDIR_LEFT:
            surface_line = self.follow_wall_controller.l_wall_surface
            distance_vector = self.follow_wall_controller.l_distance_vector
            perpendicular_component = self.follow_wall_controller.l_perpendicular_component
            parallel_component = self.follow_wall_controller.l_parallel_component
            fw_heading_vector = self.follow_wall_controller.l_fw_heading_vector
        elif side == FWDIR_RIGHT:
            surface_line = self.follow_wall_controller.r_wall_surface
            distance_vector = self.follow_wall_controller.r_distance_vector
            perpendicular_component = self.follow_wall_controller.r_perpendicular_component
            parallel_component = self.follow_wall_controller.r_parallel_component
            fw_heading_vector = self.follow_wall_controller.r_fw_heading_vector
        else:
            raise Exception("unrecognized argument: follow-wall direction indicator")

        robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

        # draw the estimated wall surface
        surface_line = linalg.rotate_and_translate_vectors(surface_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([surface_line], linewidth=0.01, color="black", alpha=1.0)

        # draw the measuring line from the robot to the wall
        range_line = [[0.0, 0.0], distance_vector]
        range_line = linalg.rotate_and_translate_vectors(range_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([range_line], linewidth=0.005, color="black", alpha=1.0)

        # # draw the perpendicular component vector
        # perpendicular_component_line = [ [ 0.0, 0.0 ], perpendicular_component ]
        # perpendicular_component_line = linalg.rotate_and_translate_vectors( perpendicular_component_line, robot_theta, robot_pos )
        # self.viewer.current_frame.add_lines(  [ perpendicular_component_line ],
        #                                       linewidth = 0.01,
        #                                       color = "blue",
        #                                       alpha = 0.8 )

        # # draw the parallel component vector
        # parallel_component_line = [ [ 0.0, 0.0 ], parallel_component ]
        # parallel_component_line = linalg.rotate_and_translate_vectors( parallel_component_line, robot_theta, robot_pos )
        # self.viewer.current_frame.add_lines(  [ parallel_component_line ],
        #                                       linewidth = 0.01,
        #                                       color = "red",
        #                                       alpha = 0.8 )

        # draw the computed follow-wall vector
        fw_heading_vector = linalg.scale(linalg.unit(fw_heading_vector), VECTOR_LEN)
        vector_line = [[0.0, 0.0], fw_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line], linewidth=0.02, color="orange", alpha=1.0)
    def _draw_follow_wall_controller_to_frame_by_side(self, side):
        if side == FWDIR_LEFT:
            surface_line = self.follow_wall_controller.l_wall_surface
            distance_vector = self.follow_wall_controller.l_distance_vector
            fw_heading_vector = self.follow_wall_controller.l_fw_heading_vector
        elif side == FWDIR_RIGHT:
            surface_line = self.follow_wall_controller.r_wall_surface
            distance_vector = self.follow_wall_controller.r_distance_vector
            fw_heading_vector = self.follow_wall_controller.r_fw_heading_vector
        else:
            raise Exception(
                "unrecognized argument: follow-wall direction indicator")

        robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

        # draw the estimated wall surface
        surface_line = linalg.rotate_and_translate_vectors(
            surface_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([surface_line],
                                            linewidth=0.01,
                                            color="black",
                                            alpha=1.0)

        # draw the measuring line from the robot to the wall
        range_line = [[0.0, 0.0], distance_vector]
        range_line = linalg.rotate_and_translate_vectors(
            range_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([range_line],
                                            linewidth=0.005,
                                            color="black",
                                            alpha=1.0)

        # draw the computed follow-wall vector
        fw_heading_vector = linalg.scale(linalg.unit(fw_heading_vector),
                                         VECTOR_LEN)
        vector_line = [[0.0, 0.0], fw_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(
            vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line],
                                            linewidth=0.02,
                                            color="orange",
                                            alpha=1.0)