コード例 #1
0
    def draw_avoid_obstacles_controller_to_frame(self):
        robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

        # draw the detected environment boundary (i.e. sensor readings)
        obstacle_vertexes = self.avoid_obstacles_controller.obstacle_vectors[:]
        obstacle_vertexes.append(
            obstacle_vertexes[0])  # close the drawn polygon
        obstacle_vertexes = linalg.rotate_and_translate_vectors(
            obstacle_vertexes, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([obstacle_vertexes],
                                            linewidth=0.005,
                                            color="black",
                                            alpha=1.0)

        # draw the computed avoid-obstacles vector
        ao_heading_vector = linalg.scale(
            linalg.unit(self.avoid_obstacles_controller.ao_heading_vector),
            VECTOR_LEN)
        vector_line = [[0.0, 0.0], ao_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(
            vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line],
                                            linewidth=0.02,
                                            color="red",
                                            alpha=1.0)
コード例 #2
0
    def draw_avoid_obstacles_controller_to_frame(self):
        robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

        # draw the detected environment boundary (i.e. sensor readings)
        obstacle_vertexes = self.avoid_obstacles_controller.obstacle_vectors[:]
        obstacle_vertexes.append(obstacle_vertexes[0])  # close the drawn polygon
        obstacle_vertexes = linalg.rotate_and_translate_vectors(obstacle_vertexes, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([obstacle_vertexes], linewidth=0.005, color="black", alpha=1.0)

        # draw the computed avoid-obstacles vector
        ao_heading_vector = linalg.scale(linalg.unit(self.avoid_obstacles_controller.ao_heading_vector), VECTOR_LEN)
        vector_line = [[0.0, 0.0], ao_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line], linewidth=0.02, color="red", alpha=1.0)
コード例 #3
0
    def _draw_follow_wall_controller_to_frame_by_side(self, side):
        if side == FWDIR_LEFT:
            surface_line = self.follow_wall_controller.l_wall_surface
            distance_vector = self.follow_wall_controller.l_distance_vector
            perpendicular_component = self.follow_wall_controller.l_perpendicular_component
            parallel_component = self.follow_wall_controller.l_parallel_component
            fw_heading_vector = self.follow_wall_controller.l_fw_heading_vector
        elif side == FWDIR_RIGHT:
            surface_line = self.follow_wall_controller.r_wall_surface
            distance_vector = self.follow_wall_controller.r_distance_vector
            perpendicular_component = self.follow_wall_controller.r_perpendicular_component
            parallel_component = self.follow_wall_controller.r_parallel_component
            fw_heading_vector = self.follow_wall_controller.r_fw_heading_vector
        else:
            raise Exception("unrecognized argument: follow-wall direction indicator")

        robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

        # draw the estimated wall surface
        surface_line = linalg.rotate_and_translate_vectors(surface_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([surface_line], linewidth=0.01, color="black", alpha=1.0)

        # draw the measuring line from the robot to the wall
        range_line = [[0.0, 0.0], distance_vector]
        range_line = linalg.rotate_and_translate_vectors(range_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([range_line], linewidth=0.005, color="black", alpha=1.0)

        # # draw the perpendicular component vector
        # perpendicular_component_line = [ [ 0.0, 0.0 ], perpendicular_component ]
        # perpendicular_component_line = linalg.rotate_and_translate_vectors( perpendicular_component_line, robot_theta, robot_pos )
        # self.viewer.current_frame.add_lines(  [ perpendicular_component_line ],
        #                                       linewidth = 0.01,
        #                                       color = "blue",
        #                                       alpha = 0.8 )

        # # draw the parallel component vector
        # parallel_component_line = [ [ 0.0, 0.0 ], parallel_component ]
        # parallel_component_line = linalg.rotate_and_translate_vectors( parallel_component_line, robot_theta, robot_pos )
        # self.viewer.current_frame.add_lines(  [ parallel_component_line ],
        #                                       linewidth = 0.01,
        #                                       color = "red",
        #                                       alpha = 0.8 )

        # draw the computed follow-wall vector
        fw_heading_vector = linalg.scale(linalg.unit(fw_heading_vector), VECTOR_LEN)
        vector_line = [[0.0, 0.0], fw_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line], linewidth=0.02, color="orange", alpha=1.0)
コード例 #4
0
    def draw_robot_to_frame(self):
        # update the robot traverse path
        position = self.robot.pose.vposition()
        self.traverse_path.append(position)

        # draw the internal state ( supervisor ) to the frame
        self.supervisor_view.draw_supervisor_to_frame()

        # draw the IR sensors to the frame if indicated
        if self.viewer.show_invisibles:
            for ir_sensor_view in self.ir_sensor_views:
                ir_sensor_view.draw_proximity_sensor_to_frame()

        # draw the robot
        robot_bottom = self.robot.global_geometry.vertices
        self.viewer.current_frame.add_polygons([robot_bottom], color="blue", alpha=0.5)
        # add decoration
        robot_pos, robot_theta = self.robot.pose.vunpack()
        robot_top = linalg.rotate_and_translate_vectors(
            K3_TOP_PLATE, robot_theta, robot_pos
        )
        self.viewer.current_frame.add_polygons([robot_top], color="black", alpha=0.5)

        # draw the robot's traverse path if indicated
        if self.viewer.show_invisibles:
            self._draw_traverse_path_to_frame()
コード例 #5
0
  def draw_robot_to_frame( self ):
    # update the robot traverse path
    position = self.robot.pose.vposition()
    self.traverse_path.append( position )
    
    # draw the internal state ( supervisor ) to the frame
    self.supervisor_view.draw_supervisor_to_frame()

    # draw the IR sensors to the frame if indicated
    if self.viewer.draw_invisibles:
      for ir_sensor_view in self.ir_sensor_views:
        ir_sensor_view.draw_proximity_sensor_to_frame()

    # draw the robot
    robot_bottom = self.robot.global_geometry.vertexes
    self.viewer.current_frame.add_polygons( [ robot_bottom ],
                                            color = "blue",
                                            alpha = 0.5 ) 
    # add decoration
    robot_pos, robot_theta = self.robot.pose.vunpack()
    robot_top = linalg.rotate_and_translate_vectors( K3_TOP_PLATE, robot_theta, robot_pos )
    self.viewer.current_frame.add_polygons( [ robot_top ],
                                            color = "black",
                                            alpha = 0.5 )
    
    # draw the robot's traverse path if indicated
    if self.viewer.draw_invisibles:
      self._draw_traverse_path_to_frame()
コード例 #6
0
    def draw_proximity_sensor_to_frame(self):
        proximity_sensor = self.proximity_sensor

        # grab proximity sensor pose values
        sensor_pos, sensor_theta = proximity_sensor.pose.vunpack()

        # build the sensor cone
        r = proximity_sensor.max_range
        phi = proximity_sensor.phi_view
        sensor_cone_poly = [
            [0.0, 0.0],
            [r * cos(-phi / 2), r * sin(-phi / 2)],
            [r, 0.0],
            [r * cos(phi / 2), r * sin(phi / 2)],
        ]
        sensor_cone_poly = linalg.rotate_and_translate_vectors(
            sensor_cone_poly, sensor_theta, sensor_pos)

        # shade the sensor cone according to positive detection
        if self.proximity_sensor.target_delta is not None:
            alpha = 0.9 - 0.8 * self.proximity_sensor.target_delta
        else:
            alpha = 0.1

        # add the sensor cone to the frame
        self.viewer.current_frame.add_polygons([sensor_cone_poly],
                                               color="red",
                                               alpha=alpha)
コード例 #7
0
    def draw_proximity_sensor_to_frame(self, frame):
        """
        Draws the proximity sensor to a frame
        :param frame: The frame to be used
        """
        proximity_sensor = self.proximity_sensor

        # grab proximity sensor pose values
        sensor_pos, sensor_theta = proximity_sensor.pose.vunpack()

        # build the sensor cone
        r = proximity_sensor.max_range
        sensor_cone_poly = [
            [0.0, 0.0],
            [r * cos(-self.cone_angle / 2), r * sin(-self.cone_angle / 2)],
            [r, 0.0],
            [r * cos(self.cone_angle / 2), r * sin(self.cone_angle / 2)]
        ]
        sensor_cone_poly = linalg.rotate_and_translate_vectors(
            sensor_cone_poly, sensor_theta, sensor_pos)

        # shade the sensor cone according to positive detection
        if self.proximity_sensor.target_delta is not None:
            alpha = 1
        else:
            alpha = 0.4

        # add the sensor cone to the frame
        frame.add_polygons([sensor_cone_poly], color="red", alpha=alpha)
コード例 #8
0
    def draw_proximity_sensor_to_frame(self):
        proximity_sensor = self.proximity_sensor

        # grab proximity sensor pose values
        sensor_pos, sensor_theta = proximity_sensor.pose.vunpack()

        # build the sensor cone
        r = proximity_sensor.max_range
        phi = proximity_sensor.phi_view
        sensor_cone_poly = [
            [0.0, 0.0],
            [r * cos(-phi / 2), r * sin(-phi / 2)],
            [r, 0.0],
            [r * cos(phi / 2), r * sin(phi / 2)],
        ]
        sensor_cone_poly = linalg.rotate_and_translate_vectors(sensor_cone_poly, sensor_theta, sensor_pos)

        # shade the sensor cone according to positive detection
        if self.proximity_sensor.target_delta != None:
            alpha = 0.9 - 0.8 * self.proximity_sensor.target_delta
        else:
            alpha = 0.1

        # add the sensor cone to the frame
        self.viewer.current_frame.add_polygons([sensor_cone_poly], color="red", alpha=alpha)
コード例 #9
0
    def draw_go_to_goal_controller_to_frame(self):
        robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

        # draw the computed go-to-goal vector
        gtg_heading_vector = linalg.scale(linalg.unit(self.go_to_goal_controller.gtg_heading_vector), VECTOR_LEN)
        vector_line = [[0.0, 0.0], gtg_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line], linewidth=0.02, color="dark green", alpha=1.0)
コード例 #10
0
ファイル: Polygon.py プロジェクト: yixinglin/sobot-rimulator
 def get_transformation_to_pose(self, pose):
     """
     :param pose: The resulting pose
     :return: A copy of this polygon transformed to the given pose
     """
     p_pos, p_theta = pose.vunpack()
     return Polygon(
         linalg.rotate_and_translate_vectors(self.vertexes, p_theta, p_pos))
コード例 #11
0
    def _draw_follow_wall_controller_to_frame_by_side(self, side):
        if side == FWDIR_LEFT:
            surface_line = self.follow_wall_controller.l_wall_surface
            distance_vector = self.follow_wall_controller.l_distance_vector
            fw_heading_vector = self.follow_wall_controller.l_fw_heading_vector
        elif side == FWDIR_RIGHT:
            surface_line = self.follow_wall_controller.r_wall_surface
            distance_vector = self.follow_wall_controller.r_distance_vector
            fw_heading_vector = self.follow_wall_controller.r_fw_heading_vector
        else:
            raise Exception(
                "unrecognized argument: follow-wall direction indicator")

        robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

        # draw the estimated wall surface
        surface_line = linalg.rotate_and_translate_vectors(
            surface_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([surface_line],
                                            linewidth=0.01,
                                            color="black",
                                            alpha=1.0)

        # draw the measuring line from the robot to the wall
        range_line = [[0.0, 0.0], distance_vector]
        range_line = linalg.rotate_and_translate_vectors(
            range_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([range_line],
                                            linewidth=0.005,
                                            color="black",
                                            alpha=1.0)

        # draw the computed follow-wall vector
        fw_heading_vector = linalg.scale(linalg.unit(fw_heading_vector),
                                         VECTOR_LEN)
        vector_line = [[0.0, 0.0], fw_heading_vector]
        vector_line = linalg.rotate_and_translate_vectors(
            vector_line, robot_theta, robot_pos)
        self.viewer.current_frame.add_lines([vector_line],
                                            linewidth=0.02,
                                            color="orange",
                                            alpha=1.0)
コード例 #12
0
  def draw_go_to_goal_controller_to_frame( self ):
    robot_pos, robot_theta = self.supervisor.estimated_pose.vunpack()

    # draw the computed go-to-goal vector
    gtg_heading_vector = linalg.scale( linalg.unit( self.go_to_goal_controller.gtg_heading_vector ), VECTOR_LEN )
    vector_line = [ [ 0.0, 0.0 ], gtg_heading_vector ]
    vector_line = linalg.rotate_and_translate_vectors( vector_line, robot_theta, robot_pos )
    self.viewer.current_frame.add_lines( [ vector_line ],
                                         linewidth = 0.02,
                                         color = "dark green",
                                         alpha = 1.0 )
コード例 #13
0
 def get_transformation_to_pose( self, pose ):
   p_pos, p_theta = pose.vunpack()
   return LineSegment( linalg.rotate_and_translate_vectors( self.vertexes, p_theta, p_pos ) )
コード例 #14
0
 def get_transformation_to_pose( self, pose ):
   p_pos, p_theta = pose.vunpack()
   return LineSegment( linalg.rotate_and_translate_vectors( self.vertexes, p_theta, p_pos ) )
コード例 #15
0
ファイル: polygon.py プロジェクト: timeeehd/sobot-rimulator
 def get_transformation_to_pose(self, pose):
     p_pos, p_theta = pose.vunpack()
     return Polygon(
         linalg.rotate_and_translate_vectors(self.vertices, p_theta, p_pos))