def calculate_ao_heading_vector(self):
        """
        :return: An obstacle avoidance vector in the robot's reference frame
                 and vectors to detected obstacles in the robot's reference frame
        """
        # initialize vector
        obstacle_vectors = [[0.0, 0.0]] * len(self.proximity_sensor_placements)
        ao_heading_vector = [0.0, 0.0]

        # get the distances indicated by the robot's sensor readings
        sensor_distances = self.supervisor.proximity_sensor_distances()

        # calculate the position of detected obstacles and find an avoidance vector
        robot_pos, robot_theta = self.supervisor.estimated_pose().vunpack()
        for i in range(len(sensor_distances)):
            # calculate the position of the obstacle
            sensor_pos, sensor_theta = self.proximity_sensor_placements[
                i].vunpack()
            vector = [sensor_distances[i], 0.0]
            vector = linalg.rotate_and_translate_vector(
                vector, sensor_theta, sensor_pos)
            obstacle_vectors[
                i] = vector  # store the obstacle vectors in the robot's reference frame

            # accumluate the heading vector within the robot's reference frame
            ao_heading_vector = linalg.add(
                ao_heading_vector, linalg.scale(vector, self.sensor_gains[i]))

        return ao_heading_vector, obstacle_vectors
예제 #2
0
  def _bounding_circle( self ):
    v = self._as_vector()
    vhalf = linalg.scale( v, 0.5 )

    c = linalg.add( self.vertexes[0], vhalf )
    r = linalg.mag( v ) * 0.5
        
    return c, r
예제 #3
0
  def _bounding_circle( self ):
    v = self._as_vector()
    vhalf = linalg.scale( v, 0.5 )

    c = linalg.add( self.vertexes[0], vhalf )
    r = linalg.mag( v ) * 0.5

    return c, r
    def _draw_detection_to_frame(self):
        target_delta = self.proximity_sensor.target_delta
        if target_delta != None:
            detector_endpoints = self.proximity_sensor.detector_line.vertexes
            detector_vector = linalg.sub(detector_endpoints[1], detector_endpoints[0])
            target_vector = linalg.add(detector_endpoints[0], linalg.scale(detector_vector, target_delta))

            self.viewer.current_frame.add_circle(pos=target_vector, radius=0.02, color="black", alpha=0.7)
  def calculate_fw_heading_vector( self, follow_direction ):

    # get the necessary variables for the working set of sensors
    #   the working set is the sensors on the side we are bearing on, indexed from rearmost to foremost on the robot
    #   NOTE: uses preexisting knowledge of the how the sensors are stored and indexed
    if follow_direction == FWDIR_LEFT:
      # if we are following to the left, we bear on the righthand sensors
      sensor_placements = self.proximity_sensor_placements[7:3:-1]
      sensor_distances = self.supervisor.proximity_sensor_distances()[7:3:-1]
      sensor_detections = self.supervisor.proximity_sensor_positive_detections()[7:3:-1]
    elif follow_direction == FWDIR_RIGHT:
      # if we are following to the right, we bear on the lefthand sensors
      sensor_placements = self.proximity_sensor_placements[:4]
      sensor_distances = self.supervisor.proximity_sensor_distances()[:4]
      sensor_detections = self.supervisor.proximity_sensor_positive_detections()[:4]
    else:
      raise Exception( "unknown wall-following direction" )

    if True not in sensor_detections:
      # if there is no wall to track detected, we default to predefined reference points
      # NOTE: these points are designed to turn the robot towards the bearing side, which aids with cornering behavior
      #       the resulting heading vector is also meant to point close to directly aft of the robot
      #       this helps when determining switching conditions in the supervisor state machine
      p1 = [ -0.2, 0.0 ]
      if follow_direction == FWDIR_LEFT: p2 = [ -0.2, -0.0001 ]
      if follow_direction == FWDIR_RIGHT: p2 = [ -0.2, 0.0001 ]
    else:
      # sort the sensor distances along with their corresponding indices
      sensor_distances, indices = zip( *sorted( zip( # this method ensures two different sensors are always used
                                      sensor_distances, # sensor distances
                                      [0, 1, 2, 3]      # corresponding indices
                                    ) ) )
      # get the smallest sensor distances and their corresponding indices
      d1, d2 = sensor_distances[0:2]
      i1, i2 = indices[0:2]

      # calculate the vectors to the obstacle in the robot's reference frame
      sensor1_pos, sensor1_theta = sensor_placements[i1].vunpack()
      sensor2_pos, sensor2_theta = sensor_placements[i2].vunpack()
      p1, p2 = [ d1, 0.0 ], [ d2, 0.0 ]
      p1 = linalg.rotate_and_translate_vector( p1, sensor1_theta, sensor1_pos )
      p2 = linalg.rotate_and_translate_vector( p2, sensor2_theta, sensor2_pos )

      # ensure p2 is forward of p1
      if i2 < i1: p1, p2 = p2, p1

    # compute the key vectors and auxiliary data
    l_wall_surface = [ p2, p1 ]
    l_parallel_component = linalg.sub( p2, p1 )
    l_distance_vector = linalg.sub( p1, linalg.proj( p1, l_parallel_component ) )
    unit_perp = linalg.unit( l_distance_vector )
    distance_desired = linalg.scale( unit_perp, self.follow_distance )
    l_perpendicular_component = linalg.sub( l_distance_vector, distance_desired )
    l_fw_heading_vector = linalg.add( l_parallel_component, l_perpendicular_component )

    return l_fw_heading_vector, l_parallel_component, l_perpendicular_component, l_distance_vector, l_wall_surface
  def calculate_fw_heading_vector( self, follow_direction ):

    # get the necessary variables for the working set of sensors
    #   the working set is the sensors on the side we are bearing on, indexed from rearmost to foremost on the robot
    #   NOTE: uses preexisting knowledge of the how the sensors are stored and indexed
    if follow_direction == FWDIR_LEFT:
      # if we are following to the left, we bear on the righthand sensors
      sensor_placements = self.proximity_sensor_placements[7:3:-1]
      sensor_distances = self.supervisor.proximity_sensor_distances()[7:3:-1]
      sensor_detections = self.supervisor.proximity_sensor_positive_detections()[7:3:-1]
    elif follow_direction == FWDIR_RIGHT:
      # if we are following to the right, we bear on the lefthand sensors
      sensor_placements = self.proximity_sensor_placements[:4]
      sensor_distances = self.supervisor.proximity_sensor_distances()[:4]
      sensor_detections = self.supervisor.proximity_sensor_positive_detections()[:4]
    else:
      raise Exception( "unknown wall-following direction" )

    if True not in sensor_detections:
      # if there is no wall to track detected, we default to predefined reference points
      # NOTE: these points are designed to turn the robot towards the bearing side, which aids with cornering behavior
      #       the resulting heading vector is also meant to point close to directly aft of the robot
      #       this helps when determining switching conditions in the supervisor state machine
      p1 = [ -0.2, 0.0 ]
      if follow_direction == FWDIR_LEFT: p2 = [ -0.2, -0.0001 ]
      if follow_direction == FWDIR_RIGHT: p2 = [ -0.2, 0.0001 ]
    else:
      # sort the sensor distances along with their corresponding indices
      sensor_distances, indices = zip( *sorted( zip( # this method ensures two different sensors are always used
                                      sensor_distances, # sensor distances
                                      [0, 1, 2, 3]      # corresponding indices
                                    ) ) )
      # get the smallest sensor distances and their corresponding indices
      d1, d2 = sensor_distances[0:2]
      i1, i2 = indices[0:2]
      
      # calculate the vectors to the obstacle in the robot's reference frame
      sensor1_pos, sensor1_theta = sensor_placements[i1].vunpack()
      sensor2_pos, sensor2_theta = sensor_placements[i2].vunpack()                
      p1, p2 = [ d1, 0.0 ], [ d2, 0.0 ]
      p1 = linalg.rotate_and_translate_vector( p1, sensor1_theta, sensor1_pos )
      p2 = linalg.rotate_and_translate_vector( p2, sensor2_theta, sensor2_pos )

      # ensure p2 is forward of p1
      if i2 < i1: p1, p2 = p2, p1
    
    # compute the key vectors and auxiliary data
    l_wall_surface = [ p2, p1 ]
    l_parallel_component = linalg.sub( p2, p1 ) 
    l_distance_vector = linalg.sub( p1, linalg.proj( p1, l_parallel_component ) )
    unit_perp = linalg.unit( l_distance_vector )
    distance_desired = linalg.scale( unit_perp, self.follow_distance )
    l_perpendicular_component = linalg.sub( l_distance_vector, distance_desired )
    l_fw_heading_vector = linalg.add( l_parallel_component, l_perpendicular_component )

    return l_fw_heading_vector, l_parallel_component, l_perpendicular_component, l_distance_vector, l_wall_surface
예제 #7
0
  def transform_to( self, reference_pose ):
    rel_vect, rel_theta = self.vunpack()            # elements of this pose (the relative pose)
    ref_vect, ref_theta = reference_pose.vunpack()  # elements of the reference pose

    # construct the elements of the transformed pose
    result_vect_d = linalg.rotate_vector( rel_vect, ref_theta )
    result_vect = linalg.add( ref_vect, result_vect_d ) 
    result_theta = ref_theta + rel_theta

    return Pose( result_vect, result_theta )
  def _draw_detection_to_frame( self ):
    target_delta = self.proximity_sensor.target_delta
    if target_delta != None:
      detector_endpoints = self.proximity_sensor.detector_line.vertexes
      detector_vector = linalg.sub( detector_endpoints[1], detector_endpoints[0] )
      target_vector = linalg.add( detector_endpoints[0], linalg.scale( detector_vector, target_delta ) )

      self.viewer.current_frame.add_circle( pos = target_vector,
                                            radius = 0.02,
                                            color = "black",
                                            alpha = 0.7 )
예제 #9
0
    def transform_to(self, reference_pose):
        rel_vect, rel_theta = self.vunpack(
        )  # elements of this pose (the relative pose)
        ref_vect, ref_theta = reference_pose.vunpack(
        )  # elements of the reference pose

        # construct the elements of the transformed pose
        result_vect_d = linalg.rotate_vector(rel_vect, ref_theta)
        result_vect = linalg.add(ref_vect, result_vect_d)
        result_theta = ref_theta + rel_theta

        return Pose(result_vect, result_theta)
  def update_heading( self ):
    # generate and store the vectors generated by the two controller types
    self.gtg_heading_vector = self.go_to_goal_controller.calculate_gtg_heading_vector()
    self.ao_heading_vector, self.obstacle_vectors = self.avoid_obstacles_controller.calculate_ao_heading_vector()

    # normalize the heading vectors
    self.gtg_heading_vector = linalg.unit( self.gtg_heading_vector )
    self.ao_heading_vector = linalg.unit( self.ao_heading_vector )

    # generate the blended vector
    self.blended_heading_vector = linalg.add( linalg.scale( self.gtg_heading_vector, self.alpha ),
                                              linalg.scale( self.ao_heading_vector, 1.0 - self.alpha ) )
예제 #11
0
    def update_heading(self):
        # generate and store the vectors generated by the two controller types
        self.gtg_heading_vector = self.go_to_goal_controller.calculate_gtg_heading_vector()
        self.ao_heading_vector, self.obstacle_vectors = self.avoid_obstacles_controller.calculate_ao_heading_vector()

        # normalize the heading vectors
        self.gtg_heading_vector = linalg.unit(self.gtg_heading_vector)
        self.ao_heading_vector = linalg.unit(self.ao_heading_vector)

        # generate the blended vector
        self.blended_heading_vector = linalg.add(
            linalg.scale(self.gtg_heading_vector, self.alpha), linalg.scale(self.ao_heading_vector, 1.0 - self.alpha)
        )
예제 #12
0
    def _draw_detection_to_frame(self, frame):
        """
        Visualize the detection of the proximity sensor
        :param frame: The frame to be used
        """
        target_delta = self.proximity_sensor.target_delta
        if target_delta is not None:
            detector_endpoints = self.proximity_sensor.detector_line.vertexes
            detector_vector = linalg.sub(detector_endpoints[1],
                                         detector_endpoints[0])
            target_vector = linalg.add(
                detector_endpoints[0],
                linalg.scale(detector_vector, target_delta))

            frame.add_circle(pos=target_vector,
                             radius=0.02,
                             color="black",
                             alpha=0.7)
예제 #13
0
def line_segment_intersection(line1, line2):
    # see http://stackoverflow.com/questions/563198
    nointersect_symbol = (False, None, None)

    p1, r1 = line1[0], linalg.sub(line1[1], line1[0])
    p2, r2 = line2[0], linalg.sub(line2[1], line2[0])

    r1xr2 = float(linalg.cross(r1, r2))
    if r1xr2 == 0.0: return nointersect_symbol
    p2subp1 = linalg.sub(p2, p1)

    d1 = linalg.cross(p2subp1, r2) / r1xr2
    d2 = linalg.cross(p2subp1, r1) / r1xr2

    if d1 >= 0.0 and d1 <= 1.0 and d2 >= 0.0 and d2 <= 1.0:
        return True, linalg.add(p1, linalg.scale(r1, d1)), d1
    else:
        return nointersect_symbol
예제 #14
0
def line_segment_intersection( line1, line2 ):
  # see http://stackoverflow.com/questions/563198
  nointersect_symbol = ( False, None, None )

  p1, r1 = line1[0],   linalg.sub( line1[1], line1[0] )
  p2, r2 = line2[0],   linalg.sub( line2[1], line2[0] )

  r1xr2 = float( linalg.cross( r1, r2 ) )
  if r1xr2 == 0.0: return nointersect_symbol
  p2subp1 = linalg.sub( p2, p1 )

  d1 = linalg.cross( p2subp1, r2 ) / r1xr2
  d2 = linalg.cross( p2subp1, r1 ) / r1xr2

  if d1 >= 0.0 and d1 <= 1.0 and d2 >= 0.0 and d2 <= 1.0:
    return True, linalg.add( p1, linalg.scale( r1, d1 ) ), d1
  else:
    return nointersect_symbol
  def calculate_ao_heading_vector( self ):
    # initialize vector
    obstacle_vectors = [ [ 0.0, 0.0 ] ] * len( self.proximity_sensor_placements )
    ao_heading_vector = [ 0.0, 0.0 ]             

    # get the distances indicated by the robot's sensor readings
    sensor_distances = self.supervisor.proximity_sensor_distances()

    # calculate the position of detected obstacles and find an avoidance vector
    robot_pos, robot_theta = self.supervisor.estimated_pose().vunpack()
    for i in range( len( sensor_distances ) ):
      # calculate the position of the obstacle
      sensor_pos, sensor_theta = self.proximity_sensor_placements[i].vunpack()
      vector = [ sensor_distances[i], 0.0 ]
      vector = linalg.rotate_and_translate_vector( vector, sensor_theta, sensor_pos )
      obstacle_vectors[i] = vector   # store the obstacle vectors in the robot's reference frame
       
      # accumluate the heading vector within the robot's reference frame
      ao_heading_vector = linalg.add( ao_heading_vector,
                                   linalg.scale( vector, self.sensor_gains[i] ) )

    return ao_heading_vector, obstacle_vectors