示例#1
0
    def __init__(self,
                 robot_pose=(0, 0, 0),
                 visible=True,
                 default_color=cnames['yellow'],
                 element_dict={},
                 min_view_dist=0.3,
                 max_view_dist=1.0):
        self.element_dict = element_dict
        # Define nominal viewcone
        self.min_view_dist = min_view_dist  # [m]
        self.max_view_dist = max_view_dist  # [m]
        self.view_angle = math.pi / 2  # [rad]
        viewcone_pts = [
            (0, 0),
            (self.max_view_dist * math.cos(self.view_angle / 2),
             self.max_view_dist * math.sin(self.view_angle / 2)),
            (self.max_view_dist * math.cos(-self.view_angle / 2),
             self.max_view_dist * math.sin(-self.view_angle / 2)),
            (0, 0),
        ]

        # Create SoftMax model for camera
        self.detection_model = camera_model_2D(self.min_view_dist,
                                               self.max_view_dist)

        # Instantiate Sensor superclass object
        update_rate = 1  # [hz]
        has_physical_dimensions = True
        super(Camera, self).__init__(update_rate, has_physical_dimensions)

        # Set the ideal and actual viewcones
        self.ideal_viewcone = MapObject(
            'Ideal viewcone',
            viewcone_pts,
            visible=False,
            blocks_camera=False,
            color_str='pink',
            pose=robot_pose,
            has_relations=False,
            centroid_at_origin=False,
        )
        self.viewcone = MapObject(
            'Viewcone',
            viewcone_pts,
            alpha=0.2,
            visible=True,
            blocks_camera=False,
            color_str='lightyellow',
            pose=robot_pose,
            has_relations=False,
            centroid_at_origin=False,
        )
        self.view_pose = (0, 0, 0)

        # <>TODO: Add in and test an offset of (-0.1,-0.1)
        self.offset = (0, 0, 0)  # [m] offset (x,y,theta) from center of robot
        self._move_viewcone(robot_pose)

        # Set up the VB fusion parameters
        self.vb = VariationalBayes()
示例#2
0
def main():
    from cops_and_robots.map_tools.map_elements import MapArea, MapObject

    fig = plt.figure()
    ax = fig.add_subplot(111)

    area1 = MapArea('Area1', [1, 1], pose=[1, 1, 0], visible=True, color_str='blue')
    area2 = MapArea('Area2', [1, 1], pose=[-1, 1, 0], visible=True, color_str='lightblue')
    object1 = MapObject('Object1', [1, 1], pose=[0, 1, 0], visible=True, color_str='red')
    object2 = MapObject('Object2', [1, 1], pose=[0, -1, 0], visible=True, color_str='magenta')

    static_elements = [area1, object1]
    dynamic_elements = [area2, object2]
    information_elements = []
    elements = {'static': static_elements, 'dynamic': dynamic_elements, 'information':information_elements}
    invisible_elements = [area2, object1]

    bounds = [-5, -5, 5, 5]
    sl = ShapeLayer(elements, bounds=bounds)
    ax.set_xlim(bounds[0], bounds[2])
    ax.set_ylim(bounds[1], bounds[3])
    # sl = ShapeLayer(elements, invisible_elements=invisible_elements, bounds=[-5, -5, 5, 5])

    ani = animation.FuncAnimation(sl.fig, sl.update,
        frames=xrange(100),
        interval=100,
        repeat=True,
        blit=False)

    plt.show()
示例#3
0
    def __init__(self, robot_pose=(0, 0, 0), visible=True,
                 default_color=cnames['yellow'], element_dict={},
                 min_view_dist=0.3, max_view_dist=1.0):
        self.element_dict = element_dict
        # Define nominal viewcone
        self.min_view_dist = min_view_dist  # [m]
        self.max_view_dist = max_view_dist  # [m]
        self.view_angle = math.pi / 2  # [rad]
        viewcone_pts = [(0, 0),
                        (self.max_view_dist * math.cos(self.view_angle / 2),
                         self.max_view_dist * math.sin(self.view_angle / 2)),
                        (self.max_view_dist * math.cos(-self.view_angle / 2),
                         self.max_view_dist * math.sin(-self.view_angle / 2)),
                        (0, 0),
                        ]

        # Create SoftMax model for camera
        self.detection_model = camera_model_2D(self.min_view_dist,
                                               self.max_view_dist)

        # Instantiate Sensor superclass object
        update_rate = 1  # [hz]
        has_physical_dimensions = True
        super(Camera, self).__init__(update_rate, has_physical_dimensions)

        # Set the ideal and actual viewcones
        self.ideal_viewcone = MapObject('Ideal viewcone',
                                        viewcone_pts,
                                        visible=False,
                                        blocks_camera=False,
                                        color_str='pink',
                                        pose=robot_pose,
                                        has_relations=False,
                                        centroid_at_origin=False,
                                        )
        self.viewcone = MapObject('Viewcone',
                                  viewcone_pts,
                                  alpha=0.2,
                                  visible=True,
                                  blocks_camera=False,
                                  color_str='lightyellow',
                                  pose=robot_pose,
                                  has_relations=False,
                                  centroid_at_origin=False,
                                  )
        self.view_pose = (0, 0, 0)

        # <>TODO: Add in and test an offset of (-0.1,-0.1)
        self.offset = (0, 0, 0)  # [m] offset (x,y,theta) from center of robot
        self._move_viewcone(robot_pose)

        # Set up the VB fusion parameters
        self.vb = VariationalBayes()
示例#4
0
def set_up_fleming(map_):
    """Set up a map as the generic Fleming space configuration.

    """
    # Make vicon field space object
    field_w = 7.5  # [m] field width
    field = MapArea('Field', [field_w, field_w], has_relations=False)

    # Make wall objects
    l = 1.15  # [m] wall length
    w = 0.1524  # [m] wall width
    wall_shape = [l, w]

    poses = np.array([[-7, -1.55, 1],
                      [-7, -1.55 - l, 1],
                      [-7 + l/2 + w/2, -1, 0],
                      [-7 + 3*l/2 + w/2, -1, 0],
                      [-7 + 5*l/2 + w/2, -1, 0],
                      [-2, -1.55, 1],
                      [-2 + 1*l/2 + w/2, -1, 0],
                      [-2 + 3*l/2 + w/2, -1, 0],
                      [-2 + 5*l/2 +w/2, -1, 0],
                      [-7.45 + 1*l/2 + w/2, 1.4, 0],
                      [-7.45 + 3*l/2 + w/2, 1.4, 0],
                      [-7.45 + 5*l/2 + w/2, 1.4, 0],
                      [-7.45 + 7*l/2 + w/2, 1.4, 0],
                      [-7.45 + 9*l/2 + w/2, 1.4, 0],
                      [l/2 + w/2, 1.4, 0],
                      [3*l/2 + w/2, 1.4, 0],
                      [0, 1.4 + l/2, 1],
                      [0, 1.4 + 3*l/2, 1],
                     ])

    poses = poses * np.array([1, 1, 90])

    n_walls = poses.shape[0]
    walls = []
    for i in range(poses.shape[0]):
        name = 'Wall ' + str(i)
        pose = poses[i, :]
        wall = MapObject(name, wall_shape, pose=pose, color_str='sienna',
                         has_relations=False, map_bounds=map_.bounds)
        walls.append(wall)

    # Make rectangular objects (desk, bookcase, etc)
    labels = ['Bookcase', 'Desk', 'Chair', 'Filing Cabinet',
              'Dining Table', 'Mars Poster', 'Cassini Poster',
              'Fridge', 'Checkers Table','Fern']
    colors = ['sandybrown', 'sandybrown', 'brown', 'black',
              'brown', 'bisque', 'black',
              'black','sandybrown','sage']
    poses = np.array([[0, -1.2, 270],  # Bookcase
                      [-5.5, -2, 0],  # Desk
                      [3, -2, 270],  # Chair
                      [-4, -1.32, 270],  # Filing Cabinet
                      [-8.24, -2.15, 270],  # Dining Table
                      [-4.38, 3.67, 270],  # Mars Poster
                      [1.38, 3.67, 270],  # Cassini Poster
                      [-9.1, 3.3, 315],  # Fridge
                      [2.04, 2.66, 270],  # Checkers Table
                      [-2.475, 1.06, 270],  # Fern
                     ])
    sizes = np.array([[0.18, 0.38],  # Bookcase
                      [0.61, 0.99],  # Desk
                      [0.46, 0.41],  # Chair
                      [0.5, 0.37],  # Filing Cabinet
                      [1.17, 0.69],  # Dining Table
                      [0.05, 0.84],  # Mars Poster
                      [0.05, 0.56],  # Cassini Poster
                      [0.46, 0.46],  # Fridge
                      [0.5, 0.5],  # Checkers Table
                      [0.5, 0.5],  # Fern
                     ])

    landmarks = []
    for i, pose in enumerate(poses):
        landmark = MapObject(labels[i], sizes[i], pose=pose,
                             color_str=colors[i], map_bounds=map_.bounds)
        landmarks.append(landmark)

    # Add walls to map
    for wall in walls:
        map_.add_obj(wall)

    # Add landmarks to map
    for landmark in landmarks:
        map_.add_obj(landmark)

    # Create areas
    labels = ['Study', 'Library', 'Kitchen', 'Billiard Room', 'Hallway',
              'Dining Room']
    colors = ['aquamarine','lightcoral', 'goldenrod', 'sage','cornflowerblue',
              'orchid']
    points = np.array([[[-7.0, -3.33], [-7.0, -1], [-2, -1], [-2, -3.33]],
                       [[-2, -3.33], [-2, -1],[4.0, -1], [4.0, -3.33]],
                       [[-9.5, 1.4], [-9.5, 3.68],[0, 3.68], [0, 1.4]],
                       [[0, 1.4], [0, 3.68],[4, 3.68], [4, 1.4]],
                       [[-9.5, -1], [-9.5, 1.4],[4, 1.4], [4, -1]],
                       [[-9.5, -3.33], [-9.5, -1],[-7, -1], [-7, -3.33]],
                      ])

    for i, pts in enumerate(points):
        centroid = [pts[0,0] + np.abs(pts[2,0] - pts[0,0]) / 2,
                    pts[0,1] + np.abs(pts[1,1] - pts[0,1]) / 2, 0 ]
        area = MapArea(name=labels[i], shape_pts=pts, pose=centroid,
                       color_str=colors[i], map_bounds=map_.bounds)
        map_.add_area(area)

        # Relate landmarks and areas
        for landmark in landmarks:
            if area.shape.contains(Point(landmark.pose)):
                area.contained_objects[landmark.name] = landmark
                landmark.container_area = area
                landmark.define_relations(map_.bounds)
        area.define_relations(map_.bounds)

    # <>TODO: Include area demarcations
    map_.feasible_layer.define_feasible_regions(map_.static_elements)
示例#5
0
class Camera(Sensor):
    """A conic sensor mounted on a robot.

    The camera provides a viewcone from the point of view of the robot which
    rescales based on its environment (e.g. if it's in front of a wall).

    .. image:: img/classes_Camera.png

    Parameters
    ----------
    robot_pose : array_like, optional
        The cop's initial [x, y, theta] (defaults to [0, 0, 0]).
    visible : bool, optional
        Whether or not the view cone is shown. Default is True.
    default_color : cnames
        Default color to display all camera sensors as. Defaults to yellow.

    """
    def __init__(self, robot_pose=(0, 0, 0), visible=True,
                 default_color=cnames['yellow'], element_dict={},
                 min_view_dist=0.3, max_view_dist=1.0):
        self.element_dict = element_dict
        # Define nominal viewcone
        self.min_view_dist = min_view_dist  # [m]
        self.max_view_dist = max_view_dist  # [m]
        self.view_angle = math.pi / 2  # [rad]
        viewcone_pts = [(0, 0),
                        (self.max_view_dist * math.cos(self.view_angle / 2),
                         self.max_view_dist * math.sin(self.view_angle / 2)),
                        (self.max_view_dist * math.cos(-self.view_angle / 2),
                         self.max_view_dist * math.sin(-self.view_angle / 2)),
                        (0, 0),
                        ]

        # Create SoftMax model for camera
        self.detection_model = camera_model_2D(self.min_view_dist,
                                               self.max_view_dist)

        # Instantiate Sensor superclass object
        update_rate = 1  # [hz]
        has_physical_dimensions = True
        super(Camera, self).__init__(update_rate, has_physical_dimensions)

        # Set the ideal and actual viewcones
        self.ideal_viewcone = MapObject('Ideal viewcone',
                                        viewcone_pts,
                                        visible=False,
                                        blocks_camera=False,
                                        color_str='pink',
                                        pose=robot_pose,
                                        has_relations=False,
                                        centroid_at_origin=False,
                                        )
        self.viewcone = MapObject('Viewcone',
                                  viewcone_pts,
                                  alpha=0.2,
                                  visible=True,
                                  blocks_camera=False,
                                  color_str='lightyellow',
                                  pose=robot_pose,
                                  has_relations=False,
                                  centroid_at_origin=False,
                                  )
        self.view_pose = (0, 0, 0)

        # <>TODO: Add in and test an offset of (-0.1,-0.1)
        self.offset = (0, 0, 0)  # [m] offset (x,y,theta) from center of robot
        self._move_viewcone(robot_pose)

        # Set up the VB fusion parameters
        self.vb = VariationalBayes()

    def update_viewcone(self, robot_pose):
        """Update the camera's viewcone position and scale.

        Parameters
        ----------
        robot_pose : array_like, optional
            The robot's currentl [x, y, theta].
        shape_layer : ShapeLayer
            A layer object providing all the shapes in the map for the camera
            to rescale its viewcone.
        """
        self._move_viewcone(robot_pose)
        self._rescale_viewcone(robot_pose)

        # Translate detection model
        self.detection_model.move(self.view_pose)

    def _move_viewcone(self, robot_pose):
        """Move the viewcone based on the robot's pose

        Parameters
        ----------
        robot_pose : array_like, optional
            The robot's currentl [x, y, theta].
        """
        pose = (robot_pose[0] + self.offset[0],
                robot_pose[1] + self.offset[1],
                robot_pose[2]
                )

        # Reset the view shape
        self.viewcone.shape = self.ideal_viewcone.shape
        transform = tuple(np.subtract(pose, self.view_pose))
        self.ideal_viewcone.move_relative(transform,
                                          rotation_pt=self.view_pose[0:2])
        self.viewcone.move_relative(transform, rotation_pt=self.view_pose[0:2])
        self.view_pose = pose

    def _rescale_viewcone(self, robot_pose):
        """Rescale the viewcone based on intersecting map objects.

        Parameters
        ----------
        robot_pose : array_like, optional
            The robot's currentl [x, y, theta].
        """
        scale = self.max_view_dist

        blocking_shapes = []
        possible_elements = []

        try:
            possible_elements += self.element_dict['static']
        except:
            logging.debug('No static elements')
        try:
            possible_elements += self.element_dict['dynamic']
        except:
            logging.debug('No dynamic elements')

        for element in possible_elements:
            if element.blocks_camera:
                blocking_shapes.append(element.shape)

        for shape in blocking_shapes:
            if self.viewcone.shape.intersects(shape):
                # <>TODO: Use shadows instead of rescaling viewcone
                # calculate shadows for all shapes touching viewcone
                # origin = self.viewcone.project(map_object.shape)
                # shadow = affinity.scale(...) #map portion invisible to the view
                # self.viewcone = self.viewcone.difference(shadow)

                distance = Point(self.view_pose[0:2]).distance(shape)
                scale_ = distance / self.max_view_dist * 1.3  # <>TODO: why the 1.3?
                if scale_ < scale:
                    scale = scale_

        self.viewcone.shape = affinity.scale(self.ideal_viewcone.shape,
                                             xfact=scale,
                                             yfact=scale,
                                             origin=self.view_pose[0:2])
        # else:
        #     self.viewcone.shape = self.ideal_viewcone.shape

    def detect(self, filter_type, particles=None, prior=None):
        """Update a fusion engine's probability from camera detections.

        Parameters
        ----------
        filter_type : {'particle','gauss sum'}
            The type of filter to update.
        particles : array_like, optional
            The particle list, assuming [x,y,p], where x and y are position
            data and p is the particle's associated probability.

        """
        if filter_type == 'particle':
            self._detect_particles(particles)
        else:
            posterior = self._detect_probability(prior)
            return posterior

    def _detect_particles(self, particles):
        """Update particles based on sensor model.

        particles : array_like
            The particle list, assuming [x,y,p], where x and y are position
            data and p is the particle's associated probability.
        """

        # Update particle probabilities in view cone frame
        for i, particle in enumerate(particles):
            if self.viewcone.shape.contains(Point(particle[1:3])):
                particles[i, 0] *= (1 - self.detection_model
                    .probability(state=particle[1:3], class_='Detection'))

        # Renormalize
        particles[:, 0] /= sum(particles[:, 0])

    def _detect_probability(self, prior):

        mu, sigma, beta = self.vb.update(measurement='No Detection',
                                         likelihood=self.detection_model,
                                         prior=prior,
                                         use_LWIS=True,
                                         poly=self.detection_model.poly
                                         )
        gm = GaussianMixture(beta, mu, sigma)
        gm.camera_viewcone = self.detection_model.poly  # for plotting
        return gm
示例#6
0
class Camera(Sensor):
    """A conic sensor mounted on a robot.

    The camera provides a viewcone from the point of view of the robot which
    rescales based on its environment (e.g. if it's in front of a wall).

    .. image:: img/classes_Camera.png

    Parameters
    ----------
    robot_pose : array_like, optional
        The cop's initial [x, y, theta] (defaults to [0, 0, 0]).
    visible : bool, optional
        Whether or not the view cone is shown. Default is True.
    default_color : cnames
        Default color to display all camera sensors as. Defaults to yellow.

    """
    def __init__(self,
                 robot_pose=(0, 0, 0),
                 visible=True,
                 default_color=cnames['yellow'],
                 element_dict={},
                 min_view_dist=0.3,
                 max_view_dist=1.0):
        self.element_dict = element_dict
        # Define nominal viewcone
        self.min_view_dist = min_view_dist  # [m]
        self.max_view_dist = max_view_dist  # [m]
        self.view_angle = math.pi / 2  # [rad]
        viewcone_pts = [
            (0, 0),
            (self.max_view_dist * math.cos(self.view_angle / 2),
             self.max_view_dist * math.sin(self.view_angle / 2)),
            (self.max_view_dist * math.cos(-self.view_angle / 2),
             self.max_view_dist * math.sin(-self.view_angle / 2)),
            (0, 0),
        ]

        # Create SoftMax model for camera
        self.detection_model = camera_model_2D(self.min_view_dist,
                                               self.max_view_dist)

        # Instantiate Sensor superclass object
        update_rate = 1  # [hz]
        has_physical_dimensions = True
        super(Camera, self).__init__(update_rate, has_physical_dimensions)

        # Set the ideal and actual viewcones
        self.ideal_viewcone = MapObject(
            'Ideal viewcone',
            viewcone_pts,
            visible=False,
            blocks_camera=False,
            color_str='pink',
            pose=robot_pose,
            has_relations=False,
            centroid_at_origin=False,
        )
        self.viewcone = MapObject(
            'Viewcone',
            viewcone_pts,
            alpha=0.2,
            visible=True,
            blocks_camera=False,
            color_str='lightyellow',
            pose=robot_pose,
            has_relations=False,
            centroid_at_origin=False,
        )
        self.view_pose = (0, 0, 0)

        # <>TODO: Add in and test an offset of (-0.1,-0.1)
        self.offset = (0, 0, 0)  # [m] offset (x,y,theta) from center of robot
        self._move_viewcone(robot_pose)

        # Set up the VB fusion parameters
        self.vb = VariationalBayes()

    def update_viewcone(self, robot_pose):
        """Update the camera's viewcone position and scale.

        Parameters
        ----------
        robot_pose : array_like, optional
            The robot's currentl [x, y, theta].
        shape_layer : ShapeLayer
            A layer object providing all the shapes in the map for the camera
            to rescale its viewcone.
        """
        self._move_viewcone(robot_pose)
        self._rescale_viewcone(robot_pose)

        # Translate detection model
        self.detection_model.move(self.view_pose)

    def _move_viewcone(self, robot_pose):
        """Move the viewcone based on the robot's pose

        Parameters
        ----------
        robot_pose : array_like, optional
            The robot's currentl [x, y, theta].
        """
        pose = (robot_pose[0] + self.offset[0], robot_pose[1] + self.offset[1],
                robot_pose[2])

        # Reset the view shape
        self.viewcone.shape = self.ideal_viewcone.shape
        transform = tuple(np.subtract(pose, self.view_pose))
        self.ideal_viewcone.move_relative(transform,
                                          rotation_pt=self.view_pose[0:2])
        self.viewcone.move_relative(transform, rotation_pt=self.view_pose[0:2])
        self.view_pose = pose

    def _rescale_viewcone(self, robot_pose):
        """Rescale the viewcone based on intersecting map objects.

        Parameters
        ----------
        robot_pose : array_like, optional
            The robot's currentl [x, y, theta].
        """
        scale = self.max_view_dist

        blocking_shapes = []
        possible_elements = []

        try:
            possible_elements += self.element_dict['static']
        except:
            logging.debug('No static elements')
        try:
            possible_elements += self.element_dict['dynamic']
        except:
            logging.debug('No dynamic elements')

        for element in possible_elements:
            if element.blocks_camera:
                blocking_shapes.append(element.shape)

        for shape in blocking_shapes:
            if self.viewcone.shape.intersects(shape):
                # <>TODO: Use shadows instead of rescaling viewcone
                # calculate shadows for all shapes touching viewcone
                # origin = self.viewcone.project(map_object.shape)
                # shadow = affinity.scale(...) #map portion invisible to the view
                # self.viewcone = self.viewcone.difference(shadow)

                distance = Point(self.view_pose[0:2]).distance(shape)
                scale_ = distance / self.max_view_dist * 1.3  # <>TODO: why the 1.3?
                if scale_ < scale:
                    scale = scale_

        self.viewcone.shape = affinity.scale(self.ideal_viewcone.shape,
                                             xfact=scale,
                                             yfact=scale,
                                             origin=self.view_pose[0:2])
        # else:
        #     self.viewcone.shape = self.ideal_viewcone.shape

    def detect(self, filter_type, particles=None, prior=None):
        """Update a fusion engine's probability from camera detections.

        Parameters
        ----------
        filter_type : {'particle','gauss sum'}
            The type of filter to update.
        particles : array_like, optional
            The particle list, assuming [x,y,p], where x and y are position
            data and p is the particle's associated probability.

        """
        if filter_type == 'particle':
            self._detect_particles(particles)
        else:
            posterior = self._detect_probability(prior)
            return posterior

    def _detect_particles(self, particles):
        """Update particles based on sensor model.

        particles : array_like
            The particle list, assuming [x,y,p], where x and y are position
            data and p is the particle's associated probability.
        """

        # Update particle probabilities in view cone frame
        for i, particle in enumerate(particles):
            if self.viewcone.shape.contains(Point(particle[1:3])):
                particles[i, 0] *= (1 - self.detection_model.probability(
                    state=particle[1:3], class_='Detection'))

        # Renormalize
        particles[:, 0] /= sum(particles[:, 0])

    def _detect_probability(self, prior):

        mu, sigma, beta = self.vb.update(measurement='No Detection',
                                         likelihood=self.detection_model,
                                         prior=prior,
                                         use_LWIS=True,
                                         poly=self.detection_model.poly)
        gm = GaussianMixture(beta, mu, sigma)
        gm.camera_viewcone = self.detection_model.poly  # for plotting
        return gm
示例#7
0
    goal_points = [
        (0, 0, 0),
        (2, 0, 0),
        (2, 0.5, 90),
        (2, 1.5, 90),
        (2, 1.7, 90),
    ]

    # Define Map and its objects
    bounds = (-5, -5, 5, 5)

    l = 1.2192  # [m] wall length
    w = 0.1524  # [m] wall width

    pose = (2.4, 0, 90)
    wall1 = MapObject('wall1', (l, w), pose=pose)
    pose = (2, 2.2, 0)
    wall2 = MapObject('wall2', (l, w), pose=pose)

    shape_layer = ShapeLayer(bounds=bounds)
    shape_layer.add_obj(wall1)
    shape_layer.add_obj(wall2)
    shape_layer.plot()

    # Define Particle Filter
    # target_pose = (10,10,0)
    # particle_filter = ParticleFilter(bounds=bounds,"Roy")
    # particle_filter.update_viewcone(kinect,target_pose)

    # Move camera and update the camera
    for point in goal_points:
示例#8
0
    def __init__(self,
                 name,
                 pose=None,
                 pose_source='python',
                 color_str='darkorange',
                 map_cfg={},
                 create_mission_planner=True,
                 goal_planner_cfg={},
                 path_planner_cfg={},
                 **kwargs):

        # Object attributes
        self.name = name
        self.pose_source = pose_source

        # Setup map
        self.map = Map(**map_cfg)

        # If pose is not given, randomly place in feasible layer.
        feasible_robot_generated = False
        if pose is None:
            while not feasible_robot_generated:
                x = random.uniform(self.map.bounds[0], self.map.bounds[2])
                y = random.uniform(self.map.bounds[1], self.map.bounds[3])
                if self.map.feasible_layer.pose_region.contains(Point([x, y])):
                    feasible_robot_generated = True
            theta = random.uniform(0, 359)
            pose = [x, y, theta]

        self.pose2D = Pose(self, pose, pose_source)
        self.pose_history = np.array(([0, 0, 0], self.pose2D.pose))
        if pose_source == 'python':
            self.publish_to_ROS = False
        else:
            self.publish_to_ROS = True

        # Setup planners
        if create_mission_planner:
            self.mission_planner = MissionPlanner(self)
        self.goal_planner = GoalPlanner(self,
                                        **goal_planner_cfg)
        # If pose_source is python, this robot is just in simulation
        if not self.publish_to_ROS:
            self.path_planner = PathPlanner(self, **path_planner_cfg)
            self.controller = Controller(self)

        # Define MapObject
        # <>TODO: fix this horrible hack
        if self.name == 'Deckard':
            pose = [0, 0, -np.pi/4]
            r = iRobotCreate.DIAMETER / 2
            n_sides = 4
            pose = [0, 0, -np.pi/4]
            x = [r * np.cos(2 * np.pi * n / n_sides + pose[2]) + pose[0]
                 for n in range(n_sides)]
            y = [r * np.sin(2 * np.pi * n / n_sides + pose[2]) + pose[1]
                 for n in range(n_sides)]
            shape_pts = Polygon(zip(x, y)).exterior.coords
        else:
            shape_pts = Point([0, 0]).buffer(iRobotCreate.DIAMETER / 2)\
                .exterior.coords
        self.map_obj = MapObject(self.name, shape_pts[:], has_relations=False,
                                 blocks_camera=False, color_str=color_str)
        self.update_shape()
示例#9
0
class Robot(iRobotCreate):
    """Class definition for the generic robot object.

    .. image:: img/classes_Robot.png

    Parameters
    ----------
    name : str
        The robot's name.
    pose : array_like, optional
        The robot's initial [x, y, theta] in [m,m,degrees] (defaults to
        [0, 0.5, 0]).
    map_name : str, optional
        The name of the map (defaults to 'fleming').
    role : {'robber','cop'}, optional
        The robot's role in the cops and robbers game.
    status : two-element list of strings, optional
        The robot's initial mission status and movement status. Cops and
        robbers share possible movement statuses, but their mission statuses
         differ entirely. Defaults to ['on the run', 'without a goal'].
    planner_type: {'simple', 'particle', 'MAP'}, optional
        The robot's type of planner.
    consider_others : bool, optional
        Whether this robot generates other robot models (e.g. the primary cop
        will imagine other robots moving around.) Defaults to false.
    **kwargs
        Arguments passed to the ``MapObject`` attribute.

    """

    def __init__(self,
                 name,
                 pose=None,
                 pose_source='python',
                 color_str='darkorange',
                 map_cfg={},
                 create_mission_planner=True,
                 goal_planner_cfg={},
                 path_planner_cfg={},
                 **kwargs):

        # Object attributes
        self.name = name
        self.pose_source = pose_source

        # Setup map
        self.map = Map(**map_cfg)

        # If pose is not given, randomly place in feasible layer.
        feasible_robot_generated = False
        if pose is None:
            while not feasible_robot_generated:
                x = random.uniform(self.map.bounds[0], self.map.bounds[2])
                y = random.uniform(self.map.bounds[1], self.map.bounds[3])
                if self.map.feasible_layer.pose_region.contains(Point([x, y])):
                    feasible_robot_generated = True
            theta = random.uniform(0, 359)
            pose = [x, y, theta]

        self.pose2D = Pose(self, pose, pose_source)
        self.pose_history = np.array(([0, 0, 0], self.pose2D.pose))
        if pose_source == 'python':
            self.publish_to_ROS = False
        else:
            self.publish_to_ROS = True

        # Setup planners
        if create_mission_planner:
            self.mission_planner = MissionPlanner(self)
        self.goal_planner = GoalPlanner(self,
                                        **goal_planner_cfg)
        # If pose_source is python, this robot is just in simulation
        if not self.publish_to_ROS:
            self.path_planner = PathPlanner(self, **path_planner_cfg)
            self.controller = Controller(self)

        # Define MapObject
        # <>TODO: fix this horrible hack
        if self.name == 'Deckard':
            pose = [0, 0, -np.pi/4]
            r = iRobotCreate.DIAMETER / 2
            n_sides = 4
            pose = [0, 0, -np.pi/4]
            x = [r * np.cos(2 * np.pi * n / n_sides + pose[2]) + pose[0]
                 for n in range(n_sides)]
            y = [r * np.sin(2 * np.pi * n / n_sides + pose[2]) + pose[1]
                 for n in range(n_sides)]
            shape_pts = Polygon(zip(x, y)).exterior.coords
        else:
            shape_pts = Point([0, 0]).buffer(iRobotCreate.DIAMETER / 2)\
                .exterior.coords
        self.map_obj = MapObject(self.name, shape_pts[:], has_relations=False,
                                 blocks_camera=False, color_str=color_str)
        self.update_shape()

    def update_shape(self):
        """Update the robot's map_obj.
        """
        self.map_obj.move_absolute(self.pose2D.pose)

    def update(self, i=0):
        """Update all primary functionality of the robot.

        This includes planning and movement for both cops and robbers,
        as well as sensing and map animations for cops.

        Parameters
        ----------
        i : int, optional
            The current animation frame. Default is 0 for non-animated robots.

        Returns
        -------
        tuple or None
            `None` if the robot does not generate an animation packet, or a
            tuple of all animation parameters otherwise.
        """
        if self.pose_source == 'tf':
            self.pose2D.tf_update()

        if self.mission_planner.mission_status is not 'stopped':
            # Update statuses and planners
            self.mission_planner.update()
            self.goal_planner.update()
            if self.publish_to_ROS is False:
                self.path_planner.update()
                self.controller.update()

            # Add to the pose history, update the map
            self.pose_history = np.vstack((self.pose_history,
                                           self.pose2D.pose[:]))
            self.update_shape()