def calculate_fw_heading_vector( self, follow_direction ): # get the necessary variables for the working set of sensors # the working set is the sensors on the side we are bearing on, indexed from rearmost to foremost on the robot # NOTE: uses preexisting knowledge of the how the sensors are stored and indexed if follow_direction == FWDIR_LEFT: # if we are following to the left, we bear on the righthand sensors sensor_placements = self.proximity_sensor_placements[7:3:-1] sensor_distances = self.supervisor.proximity_sensor_distances()[7:3:-1] sensor_detections = self.supervisor.proximity_sensor_positive_detections()[7:3:-1] elif follow_direction == FWDIR_RIGHT: # if we are following to the right, we bear on the lefthand sensors sensor_placements = self.proximity_sensor_placements[:4] sensor_distances = self.supervisor.proximity_sensor_distances()[:4] sensor_detections = self.supervisor.proximity_sensor_positive_detections()[:4] else: raise Exception( "unknown wall-following direction" ) if True not in sensor_detections: # if there is no wall to track detected, we default to predefined reference points # NOTE: these points are designed to turn the robot towards the bearing side, which aids with cornering behavior # the resulting heading vector is also meant to point close to directly aft of the robot # this helps when determining switching conditions in the supervisor state machine p1 = [ -0.2, 0.0 ] if follow_direction == FWDIR_LEFT: p2 = [ -0.2, -0.0001 ] if follow_direction == FWDIR_RIGHT: p2 = [ -0.2, 0.0001 ] else: # sort the sensor distances along with their corresponding indices sensor_distances, indices = zip( *sorted( zip( # this method ensures two different sensors are always used sensor_distances, # sensor distances [0, 1, 2, 3] # corresponding indices ) ) ) # get the smallest sensor distances and their corresponding indices d1, d2 = sensor_distances[0:2] i1, i2 = indices[0:2] # calculate the vectors to the obstacle in the robot's reference frame sensor1_pos, sensor1_theta = sensor_placements[i1].vunpack() sensor2_pos, sensor2_theta = sensor_placements[i2].vunpack() p1, p2 = [ d1, 0.0 ], [ d2, 0.0 ] p1 = linalg.rotate_and_translate_vector( p1, sensor1_theta, sensor1_pos ) p2 = linalg.rotate_and_translate_vector( p2, sensor2_theta, sensor2_pos ) # ensure p2 is forward of p1 if i2 < i1: p1, p2 = p2, p1 # compute the key vectors and auxiliary data l_wall_surface = [ p2, p1 ] l_parallel_component = linalg.sub( p2, p1 ) l_distance_vector = linalg.sub( p1, linalg.proj( p1, l_parallel_component ) ) unit_perp = linalg.unit( l_distance_vector ) distance_desired = linalg.scale( unit_perp, self.follow_distance ) l_perpendicular_component = linalg.sub( l_distance_vector, distance_desired ) l_fw_heading_vector = linalg.add( l_parallel_component, l_perpendicular_component ) return l_fw_heading_vector, l_parallel_component, l_perpendicular_component, l_distance_vector, l_wall_surface
def calculate_fw_heading_vector( self, follow_direction ): # get the necessary variables for the working set of sensors # the working set is the sensors on the side we are bearing on, indexed from rearmost to foremost on the robot # NOTE: uses preexisting knowledge of the how the sensors are stored and indexed if follow_direction == FWDIR_LEFT: # if we are following to the left, we bear on the righthand sensors sensor_placements = self.proximity_sensor_placements[7:3:-1] sensor_distances = self.supervisor.proximity_sensor_distances()[7:3:-1] sensor_detections = self.supervisor.proximity_sensor_positive_detections()[7:3:-1] elif follow_direction == FWDIR_RIGHT: # if we are following to the right, we bear on the lefthand sensors sensor_placements = self.proximity_sensor_placements[:4] sensor_distances = self.supervisor.proximity_sensor_distances()[:4] sensor_detections = self.supervisor.proximity_sensor_positive_detections()[:4] else: raise Exception( "unknown wall-following direction" ) if True not in sensor_detections: # if there is no wall to track detected, we default to predefined reference points # NOTE: these points are designed to turn the robot towards the bearing side, which aids with cornering behavior # the resulting heading vector is also meant to point close to directly aft of the robot # this helps when determining switching conditions in the supervisor state machine p1 = [ -0.2, 0.0 ] if follow_direction == FWDIR_LEFT: p2 = [ -0.2, -0.0001 ] if follow_direction == FWDIR_RIGHT: p2 = [ -0.2, 0.0001 ] else: # sort the sensor distances along with their corresponding indices sensor_distances, indices = zip( *sorted( zip( # this method ensures two different sensors are always used sensor_distances, # sensor distances [0, 1, 2, 3] # corresponding indices ) ) ) # get the smallest sensor distances and their corresponding indices d1, d2 = sensor_distances[0:2] i1, i2 = indices[0:2] # calculate the vectors to the obstacle in the robot's reference frame sensor1_pos, sensor1_theta = sensor_placements[i1].vunpack() sensor2_pos, sensor2_theta = sensor_placements[i2].vunpack() p1, p2 = [ d1, 0.0 ], [ d2, 0.0 ] p1 = linalg.rotate_and_translate_vector( p1, sensor1_theta, sensor1_pos ) p2 = linalg.rotate_and_translate_vector( p2, sensor2_theta, sensor2_pos ) # ensure p2 is forward of p1 if i2 < i1: p1, p2 = p2, p1 # compute the key vectors and auxiliary data l_wall_surface = [ p2, p1 ] l_parallel_component = linalg.sub( p2, p1 ) l_distance_vector = linalg.sub( p1, linalg.proj( p1, l_parallel_component ) ) unit_perp = linalg.unit( l_distance_vector ) distance_desired = linalg.scale( unit_perp, self.follow_distance ) l_perpendicular_component = linalg.sub( l_distance_vector, distance_desired ) l_fw_heading_vector = linalg.add( l_parallel_component, l_perpendicular_component ) return l_fw_heading_vector, l_parallel_component, l_perpendicular_component, l_distance_vector, l_wall_surface
def calculate_ao_heading_vector(self): """ :return: An obstacle avoidance vector in the robot's reference frame and vectors to detected obstacles in the robot's reference frame """ # initialize vector obstacle_vectors = [[0.0, 0.0]] * len(self.proximity_sensor_placements) ao_heading_vector = [0.0, 0.0] # get the distances indicated by the robot's sensor readings sensor_distances = self.supervisor.proximity_sensor_distances() # calculate the position of detected obstacles and find an avoidance vector robot_pos, robot_theta = self.supervisor.estimated_pose().vunpack() for i in range(len(sensor_distances)): # calculate the position of the obstacle sensor_pos, sensor_theta = self.proximity_sensor_placements[ i].vunpack() vector = [sensor_distances[i], 0.0] vector = linalg.rotate_and_translate_vector( vector, sensor_theta, sensor_pos) obstacle_vectors[ i] = vector # store the obstacle vectors in the robot's reference frame # accumluate the heading vector within the robot's reference frame ao_heading_vector = linalg.add( ao_heading_vector, linalg.scale(vector, self.sensor_gains[i])) return ao_heading_vector, obstacle_vectors
def calculate_gtg_heading_vector( self ): # get the inverse of the robot's pose robot_inv_pos, robot_inv_theta = self.supervisor.estimated_pose().inverse().vunpack() # calculate the goal vector in the robot's reference frame goal = self.supervisor.goal() goal = linalg.rotate_and_translate_vector( goal, robot_inv_theta, robot_inv_pos ) return goal
def calculate_gtg_heading_vector( self ): # get the inverse of the robot's pose robot_inv_pos, robot_inv_theta = self.supervisor.estimated_pose().inverse().vunpack() # calculate the goal vector in the robot's reference frame goal = self.supervisor.goal() goal = linalg.rotate_and_translate_vector( goal, robot_inv_theta, robot_inv_pos ) return goal
def calculate_ao_heading_vector( self ): # initialize vector obstacle_vectors = [ [ 0.0, 0.0 ] ] * len( self.proximity_sensor_placements ) ao_heading_vector = [ 0.0, 0.0 ] # get the distances indicated by the robot's sensor readings sensor_distances = self.supervisor.proximity_sensor_distances() # calculate the position of detected obstacles and find an avoidance vector robot_pos, robot_theta = self.supervisor.estimated_pose().vunpack() for i in range( len( sensor_distances ) ): # calculate the position of the obstacle sensor_pos, sensor_theta = self.proximity_sensor_placements[i].vunpack() vector = [ sensor_distances[i], 0.0 ] vector = linalg.rotate_and_translate_vector( vector, sensor_theta, sensor_pos ) obstacle_vectors[i] = vector # store the obstacle vectors in the robot's reference frame # accumluate the heading vector within the robot's reference frame ao_heading_vector = linalg.add( ao_heading_vector, linalg.scale( vector, self.sensor_gains[i] ) ) return ao_heading_vector, obstacle_vectors