예제 #1
0
    def __init__(self,
                 robot_pose=(0, 0, 0),
                 visible=True,
                 default_color=cnames['yellow'],
                 element_dict={},
                 min_view_dist=0.3,
                 max_view_dist=1.0):
        self.element_dict = element_dict
        # Define nominal viewcone
        self.min_view_dist = min_view_dist  # [m]
        self.max_view_dist = max_view_dist  # [m]
        self.view_angle = math.pi / 2  # [rad]
        viewcone_pts = [
            (0, 0),
            (self.max_view_dist * math.cos(self.view_angle / 2),
             self.max_view_dist * math.sin(self.view_angle / 2)),
            (self.max_view_dist * math.cos(-self.view_angle / 2),
             self.max_view_dist * math.sin(-self.view_angle / 2)),
            (0, 0),
        ]

        # Create SoftMax model for camera
        self.detection_model = camera_model_2D(self.min_view_dist,
                                               self.max_view_dist)

        # Instantiate Sensor superclass object
        update_rate = 1  # [hz]
        has_physical_dimensions = True
        super(Camera, self).__init__(update_rate, has_physical_dimensions)

        # Set the ideal and actual viewcones
        self.ideal_viewcone = MapObject(
            'Ideal viewcone',
            viewcone_pts,
            visible=False,
            blocks_camera=False,
            color_str='pink',
            pose=robot_pose,
            has_relations=False,
            centroid_at_origin=False,
        )
        self.viewcone = MapObject(
            'Viewcone',
            viewcone_pts,
            alpha=0.2,
            visible=True,
            blocks_camera=False,
            color_str='lightyellow',
            pose=robot_pose,
            has_relations=False,
            centroid_at_origin=False,
        )
        self.view_pose = (0, 0, 0)

        # <>TODO: Add in and test an offset of (-0.1,-0.1)
        self.offset = (0, 0, 0)  # [m] offset (x,y,theta) from center of robot
        self._move_viewcone(robot_pose)

        # Set up the VB fusion parameters
        self.vb = VariationalBayes()
예제 #2
0
    def __init__(self, robot_pose=(0, 0, 0), visible=True,
                 default_color=cnames['yellow'], element_dict={},
                 min_view_dist=0.3, max_view_dist=1.0):
        self.element_dict = element_dict
        # Define nominal viewcone
        self.min_view_dist = min_view_dist  # [m]
        self.max_view_dist = max_view_dist  # [m]
        self.view_angle = math.pi / 2  # [rad]
        viewcone_pts = [(0, 0),
                        (self.max_view_dist * math.cos(self.view_angle / 2),
                         self.max_view_dist * math.sin(self.view_angle / 2)),
                        (self.max_view_dist * math.cos(-self.view_angle / 2),
                         self.max_view_dist * math.sin(-self.view_angle / 2)),
                        (0, 0),
                        ]

        # Create SoftMax model for camera
        self.detection_model = camera_model_2D(self.min_view_dist,
                                               self.max_view_dist)

        # Instantiate Sensor superclass object
        update_rate = 1  # [hz]
        has_physical_dimensions = True
        super(Camera, self).__init__(update_rate, has_physical_dimensions)

        # Set the ideal and actual viewcones
        self.ideal_viewcone = MapObject('Ideal viewcone',
                                        viewcone_pts,
                                        visible=False,
                                        blocks_camera=False,
                                        color_str='pink',
                                        pose=robot_pose,
                                        has_relations=False,
                                        centroid_at_origin=False,
                                        )
        self.viewcone = MapObject('Viewcone',
                                  viewcone_pts,
                                  alpha=0.2,
                                  visible=True,
                                  blocks_camera=False,
                                  color_str='lightyellow',
                                  pose=robot_pose,
                                  has_relations=False,
                                  centroid_at_origin=False,
                                  )
        self.view_pose = (0, 0, 0)

        # <>TODO: Add in and test an offset of (-0.1,-0.1)
        self.offset = (0, 0, 0)  # [m] offset (x,y,theta) from center of robot
        self._move_viewcone(robot_pose)

        # Set up the VB fusion parameters
        self.vb = VariationalBayes()
예제 #3
0
def camera_test(num_std=1, time_interval=1):
    # prior = fleming_prior()
    # prior = uniform_prior()
    # prior = GaussianMixture(1, np.zeros(2), np.eye(2))
    prior = GaussianMixture([1, 1, 1], np.array([
        [-7, 0],
        [-3, 0],
        [1, 0],
    ]),
                            np.eye(2)[None, :].repeat(3, axis=0))
    bounds = [-12.5, -3.5, 2.5, 3.5]

    min_view_dist = 0.3  # [m]
    max_view_dist = 1.0  # [m]
    detection_model = camera_model_2D(min_view_dist, max_view_dist)

    trajectory = np.zeros((20, 2))
    ls = np.linspace(-10, 3, 20)
    trajectory = np.hstack((ls[:, None], trajectory))

    class camera_tester(object):
        """docstring for merged_gm"""
        def __init__(self,
                     prior,
                     detection_model,
                     trajectory,
                     num_std=1,
                     bounds=None):
            self.fig = plt.figure(figsize=(16, 8))
            self.gm = prior
            self.detection_model = detection_model
            self.trajectory = itertools.cycle(trajectory)
            self.vb = VariationalBayes()
            self.num_std = num_std
            if bounds is None:
                self.bounds = [-5, -5, 5, 5]
            else:
                self.bounds = bounds

        def update(self, i=0):
            self.camera_pose = next(self.trajectory)
            logging.info('Moving to pose {}.'.format(self.camera_pose))
            self.detection_model.move(self.camera_pose)

            # Do a VBIS update
            mu, sigma, beta = self.vb.update(measurement='No Detection',
                                             likelihood=detection_model,
                                             prior=self.gm,
                                             use_LWIS=True,
                                             poly=detection_model.poly,
                                             num_std=self.num_std)
            self.gm = GaussianMixture(weights=beta,
                                      means=mu,
                                      covariances=sigma)
            # Log what's going on
            logging.info(self.gm)
            logging.info('Weight sum: {}'.format(beta.sum()))

            self.remove()
            self.plot()

        def plot(self):
            levels_res = 50
            self.levels = np.linspace(0, np.max(self.gm.pdf(self.pos)),
                                      levels_res)
            self.contourf = self.ax.contourf(self.xx,
                                             self.yy,
                                             self.gm.pdf(self.pos),
                                             levels=self.levels,
                                             cmap=plt.get_cmap('jet'))
            # Plot camera
            self.cam_patch = PolygonPatch(self.detection_model.poly,
                                          facecolor='none',
                                          linewidth=2,
                                          edgecolor='white')
            self.ax.add_patch(self.cam_patch)

            # Plot ellipses
            self.ellipse_patches = self.gm.plot_ellipses(
                poly=self.detection_model.poly)

        def plot_setup(self):
            # Define gridded space for graphing
            min_x, max_x = self.bounds[0], self.bounds[2]
            min_y, max_y = self.bounds[1], self.bounds[3]
            res = 30
            self.xx, self.yy = np.mgrid[min_x:max_x:1 / res,
                                        min_y:max_y:1 / res]
            pos = np.empty(self.xx.shape + (2, ))
            pos[:, :, 0] = self.xx
            pos[:, :, 1] = self.yy
            self.pos = pos

            # Plot setup
            self.ax = self.fig.add_subplot(111)

            self.ax.set_title('VBIS with camera detection test')
            plt.axis('scaled')
            self.ax.set_xlim([min_x, max_x])
            self.ax.set_ylim([min_y, max_y])

            levels_res = 50
            self.levels = np.linspace(0, np.max(self.gm.pdf(self.pos)),
                                      levels_res)
            cax = self.contourf = self.ax.contourf(self.xx,
                                                   self.yy,
                                                   self.gm.pdf(self.pos),
                                                   levels=self.levels,
                                                   cmap=plt.get_cmap('jet'))
            self.fig.colorbar(cax)

        def remove(self):
            if hasattr(self, 'cam_patch'):
                self.cam_patch.remove()
                del self.cam_patch

            if hasattr(self, 'ellipse_patches'):
                for patch in self.ellipse_patches:
                    patch.remove()
                del self.ellipse_patches

            if hasattr(self, 'contourf'):
                for collection in self.contourf.collections:
                    collection.remove()
                del self.contourf

    gm = camera_tester(prior, detection_model, trajectory, num_std, bounds)
    logging.info('Initial GM:')
    logging.info(prior)

    ani = animation.FuncAnimation(gm.fig,
                                  gm.update,
                                  interval=time_interval,
                                  repeat=True,
                                  blit=False,
                                  init_func=gm.plot_setup)

    plt.show()