def update(self,i=0):
                # Regenerate GMs
                merged_gauss_2d = GaussianMixture(weights, means, covariances,
                                                  max_num_mixands=self.num_mixands)
                matlab_merged_gauss_2d = self.parent.matlab_gm(weights,
                                                   means,
                                                   covariances,
                                                   max_num_mixands=self.num_mixands)

                # Replot GMs
                title = 'Python Merged GM ({} mixands)'.format(merged_gauss_2d.weights.size)
                if hasattr(self,'old_contour'):
                    merged_gauss_2d.contourf = self.old_contour
                    merged_gauss_2d.plot_remove()
                self.old_contour = merged_gauss_2d.plot(ax=self.axes[1], levels=self.levels, title=title)

                title = 'Matlab Merged GM ({} mixands)'.format(matlab_merged_gauss_2d.weights.size)
                if hasattr(self,'old_matlab_contour'):
                    matlab_merged_gauss_2d.contourf = self.old_matlab_contour
                    matlab_merged_gauss_2d.plot_remove()
                self.old_matlab_contour = matlab_merged_gauss_2d.plot(ax=self.axes[2], levels=self.levels, title=title)

                # Decrement mixands (with wrapping)
                if self.num_mixands == self.max_num_mixands:
                    self.num_mixands = 1
                elif np.int(self.num_mixands * self.mixand_rate) < self.max_num_mixands:
                    self.num_mixands = np.int(self.num_mixands * self.mixand_rate)
                else:
                    self.num_mixands = self.max_num_mixands
Ejemplo n.º 2
0
    def clear_probability_from_objects(self, target, obj):
        logging.info('Clearing probability from {}'.format(obj.name))
        fusion_engine = self.robot.fusion_engine
        vb = VariationalBayes()

        if not hasattr(obj, 'relations'):
            obj.define_relations()

        if target is not None:
            prior = fusion_engine.filters[target].probability
            likelihood = obj.relations.binary_models['Inside']
            mu, sigma, beta = vb.update(
                measurement='Not Inside',
                likelihood=likelihood,
                prior=prior,
                use_LWIS=False,
            )
            gm = GaussianMixture(beta, mu, sigma)
            fusion_engine.filters[target].probability = gm
        else:
            # Update all but combined filters
            for name, filter_ in fusion_engine.filters.iteritems():
                if name == 'combined':
                    pass
                else:
                    prior = filter_.probability
                    likelihood = obj.relations.binary_models['Inside']
                    mu, sigma, beta = vb.update(
                        measurement='Not Inside',
                        likelihood=likelihood,
                        prior=prior,
                        use_LWIS=False,
                    )
                    gm = GaussianMixture(beta, mu, sigma)
                    filter_.probability = gm
Ejemplo n.º 3
0
    def _detect_probability(self, prior):

        mu, sigma, beta = self.vb.update(measurement='No Detection',
                                         likelihood=self.detection_model,
                                         prior=prior,
                                         use_LWIS=True,
                                         poly=self.detection_model.poly)
        gm = GaussianMixture(beta, mu, sigma)
        gm.camera_viewcone = self.detection_model.poly  # for plotting
        return gm
Ejemplo n.º 4
0
    def lwis_update(self, prior):
        """

        clustering:
            pairwise greedy merging - compare means, weights & variances
            salmond's method and runnals' method (better)

        """
        prior_mean = np.asarray(prior.means[0])
        prior_var = np.asarray(prior.covariances[0])

        # Importance distribution
        q = GaussianMixture(1, prior_mean, prior_var)

        # Importance sampling correction
        w = np.zeros(num_samples)  # Importance weights
        x = q.rvs(size=num_samples)  # Sampled points
        x = np.asarray(x)
        if hasattr(likelihood, 'subclasses'):
            measurement_class = likelihood.subclasses[measurement]
        else:
            measurement_class = likelihood.classes[measurement]

        for i in range(num_samples):
            w[i] = prior.pdf(x[i]) \
                * measurement_class.probability(state=x[i])\
                / q.pdf(x[i])
        w /= np.sum(w)  # Normalize weights

        mu_hat = np.zeros_like(np.asarray(mu_VB))
        for i in range(num_samples):
            x_i = np.asarray(x[i])
            mu_hat = mu_hat + x_i.dot(w[i])

        var_hat = np.zeros_like(np.asarray(var_VB))
        for i in range(num_samples):
            x_i = np.asarray(x[i])
            var_hat = var_hat + w[i] * np.outer(x_i, x_i)
        var_hat -= np.outer(mu_hat, mu_hat)

        if mu_hat.size == 1 and mu_hat.ndim > 0:
            mu_lwis = mu_hat[0]
        else:
            mu_lwis = mu_hat
        if var_hat.size == 1:
            var_lwis = var_hat[0][0]
        else:
            var_lwis = var_hat

        logging.debug(
            'LWIS update found mean of {} and variance of {}.'.format(
                mu_lwis, var_lwis))

        return mu_lwis, var_lwis, log_c_hat
Ejemplo n.º 5
0
    def _detect_probability(self, prior):

        mu, sigma, beta = self.vb.update(measurement='No Detection',
                                         likelihood=self.detection_model,
                                         prior=prior,
                                         use_LWIS=True,
                                         poly=self.detection_model.poly
                                         )
        gm = GaussianMixture(beta, mu, sigma)
        gm.camera_viewcone = self.detection_model.poly  # for plotting
        return gm
Ejemplo n.º 6
0
    def detect_probability(self, prior):
        if not hasattr(self.grounding, 'relations') \
            or self.grounding.name.lower() == 'deckard':
            logging.info(
                "Defining relations because {} didn't have any.".format(
                    self.grounding.name))
            self.grounding.define_relations()

        if self.grounding.name.lower() == 'deckard':
            if self.relation == 'Right':
                self.relation = 'Left'
            elif self.relation == 'Left':
                self.relation = 'Right'

        # Position update
        label = self.relation
        if self.target_name == 'nothing' and self.positivity == 'is not':
            label = 'a robot'
        elif self.target_name == 'nothing' or self.positivity == 'is not':
            label = 'Not ' + label

        likelihood = self.grounding.relations.binary_models[self.relation]
        mu, sigma, beta = self.vb.update(
            measurement=label,
            likelihood=likelihood,
            prior=prior,
            use_LWIS=False,
        )

        gm = GaussianMixture(beta, mu, sigma)
        alpha = self.false_alarm_prob / 2
        posterior = prior.combine_gms(gm, alpha)
        # Weight based on human possibly being wrong
        return posterior
Ejemplo n.º 7
0
def compare_to_matlab(measurement='Near'):
    prior = GaussianMixture(
        weights=[1, 1, 1, 1, 1],
        means=[
            [-2, -4],  # GM1 mean
            [-1, -2],  # GM2 mean
            [0, 0],  # GM3 mean
            [1, -2],  # GM4 mean
            [2, -4],  # GM5 mean
        ],
        covariances=[
            [
                [0.1, 0],  # GM1 mean
                [0, 0.1]
            ],
            [
                [0.2, 0],  # GM2 mean
                [0, 0.2]
            ],
            [
                [0.3, 0],  # GM3 mean
                [0, 0.3]
            ],
            [
                [0.2, 0],  # GM4 mean
                [0, 0.2]
            ],
            [
                [0.1, 0],  # GM5 mean
                [0, 0.1]
            ],
        ])

    # prior = GaussianMixture(weights=[1],
    #                         means=[[-2, -4],  # GM1 mean
    #                                ],
    #                         covariances=[[[0.1, 0],  # GM1 mean
    #                                       [0, 0.1]
    #                                       ],
    #                                      ])

    # Define sensor likelihood
    brm = range_model()

    file_ = open('/Users/nick/Downloads/VBIS GM Fusion/nick_output.csv', 'w')
    for i in range(30):
        # Do a VBIS update
        logging.info('Starting VB update...')
        vb = VariationalBayes()
        mu_hat, var_hat, beta_hat = vb.update(measurement, brm, prior)

        # Flatten values
        flat = np.hstack((beta_hat, mu_hat.flatten(), var_hat.flatten()))

        # Save Flattened values
        np.savetxt(file_, np.atleast_2d(flat), delimiter=',')
    file_.close()
Ejemplo n.º 8
0
        def update(self, i=0):
            self.camera_pose = next(self.trajectory)
            logging.info('Moving to pose {}.'.format(self.camera_pose))
            self.detection_model.move(self.camera_pose)

            # Do a VBIS update
            mu, sigma, beta = self.vb.update(measurement='No Detection',
                                             likelihood=detection_model,
                                             prior=self.gm,
                                             use_LWIS=True,
                                             poly=detection_model.poly,
                                             num_std=self.num_std)
            self.gm = GaussianMixture(weights=beta,
                                      means=mu,
                                      covariances=sigma)
            # Log what's going on
            logging.info(self.gm)
            logging.info('Weight sum: {}'.format(beta.sum()))

            self.remove()
            self.plot()
Ejemplo n.º 9
0
def test_voi():
    from cops_and_robots.robo_tools.robber import Robber
    from cops_and_robots.map_tools.map import Map
    from cops_and_robots.fusion.human import Human
    from cops_and_robots.fusion.gaussian_mixture import GaussianMixture
    import matplotlib.pyplot as plt
    from matplotlib.colors import cnames

    m = Map()
    pris = Robber('Pris')
    pris.map_obj.color = cnames['cornflowerblue']
    m.add_robber(pris.map_obj)
    zhora = Robber('Zhora')
    zhora.map_obj.color = cnames['cornflowerblue']
    m.add_robber(zhora.map_obj)
    roy = Robber('Roy')
    m.add_robber(roy.map_obj)
    h = Human(map_=m)
    m.add_human_sensor(h)

    prior = GaussianMixture(
        [0.1, 0.7, 0.2], [[3, -2], [-6, -2], [0, 0]],
        [[[1.5, 1.0], [1.0, 1.5]], [[2.5, -0.3], [-0.3, 2.5]],
         [[0.5, -0.3], [-0.3, 0.5]]])
    prior._discretize(bounds=m.bounds, grid_spacing=0.1)
    q = Questioner(human_sensor=h,
                   target_order=['Pris', 'Roy'],
                   target_weights=[11., 10.])

    m.setup_plot(show_human_interface=False)
    m.update()
    ax = m.axes['combined']

    # prior.plot(bounds=m.bounds, alpha=0.5, ax=ax)
    # m.update()
    # plt.show()
    q.weigh_questions({'Roy': prior})
    for qu in q.weighted_questions:
        print qu
Ejemplo n.º 10
0
def test_voi():
    from cops_and_robots.robo_tools.robber import Robber
    from cops_and_robots.map_tools.map import Map
    from cops_and_robots.fusion.human import Human
    from cops_and_robots.fusion.gaussian_mixture import GaussianMixture
    import matplotlib.pyplot as plt
    from matplotlib.colors import cnames

    m = Map()
    pris = Robber("Pris")
    pris.map_obj.color = cnames["cornflowerblue"]
    m.add_robber(pris.map_obj)
    zhora = Robber("Zhora")
    zhora.map_obj.color = cnames["cornflowerblue"]
    m.add_robber(zhora.map_obj)
    roy = Robber("Roy")
    m.add_robber(roy.map_obj)
    h = Human(map_=m)
    m.add_human_sensor(h)

    prior = GaussianMixture(
        [0.1, 0.7, 0.2],
        [[3, -2], [-6, -2], [0, 0]],
        [[[1.5, 1.0], [1.0, 1.5]], [[2.5, -0.3], [-0.3, 2.5]], [[0.5, -0.3], [-0.3, 0.5]]],
    )
    prior._discretize(bounds=m.bounds, grid_spacing=0.1)
    q = Questioner(human_sensor=h, target_order=["Pris", "Roy"], target_weights=[11.0, 10.0])

    m.setup_plot(show_human_interface=False)
    m.update()
    ax = m.axes["combined"]

    # prior.plot(bounds=m.bounds, alpha=0.5, ax=ax)
    # m.update()
    # plt.show()
    q.weigh_questions({"Roy": prior})
    for qu in q.weighted_questions:
        print qu
    def matlab_gm(self, weights, means, covariances, max_num_mixands):
        mdict = {
            'weights': weights,
            'means': means,
            'covariances': covariances,
            'max_num_mixands': max_num_mixands,
        }

        savemat('matlab/gaussian_mixture/data/from_python.mat', mdict)
        raw_input("Hit enter when Matlab has created some output to use...")
        mdict = loadmat('matlab/gaussian_mixture/data/from_matlab.mat')

        # Get weights, means, covariances from MATLAB
        weights = mdict['weights'][0]
        means = mdict['means']
        covariances = mdict['covariances'].flatten('F').reshape(-1, 2, 2)
        gm = GaussianMixture(weights, means, covariances)
        return gm
Ejemplo n.º 12
0
    def robber_detected(self, robber_pose):
        """Update the particle filter for a detected robber.
        """

        # <>TODO: Figure out better strategy when robber detected

        # Find closest particle to target
        # robber_pt = robber_pose[0:2]
        # dist = [math.sqrt((pt[0] - robber_pt[0]) ** 2
        #                   + (pt[1] - robber_pt[1]) ** 2)
        #         for pt in self.particles[:, 1:3]]
        # index = dist.index(min(dist))

        # Set all other particles to 0 probability
        # self.particles[:, 2] = 0
        # self.particles[index] = 1

        self.probability = GaussianMixture(1, robber_pose[0:2],
                                           0.01 * np.eye(2))
        self.finished = True
        self.recieved_human_update = False
            def update(self, i=0):
                # Regenerate GMs
                merged_gauss_2d = GaussianMixture(
                    weights,
                    means,
                    covariances,
                    max_num_mixands=self.num_mixands)
                matlab_merged_gauss_2d = self.parent.matlab_gm(
                    weights,
                    means,
                    covariances,
                    max_num_mixands=self.num_mixands)

                # Replot GMs
                title = 'Python Merged GM ({} mixands)'.format(
                    merged_gauss_2d.weights.size)
                if hasattr(self, 'old_contour'):
                    merged_gauss_2d.contourf = self.old_contour
                    merged_gauss_2d.plot_remove()
                self.old_contour = merged_gauss_2d.plot(ax=self.axes[1],
                                                        levels=self.levels,
                                                        title=title)

                title = 'Matlab Merged GM ({} mixands)'.format(
                    matlab_merged_gauss_2d.weights.size)
                if hasattr(self, 'old_matlab_contour'):
                    matlab_merged_gauss_2d.contourf = self.old_matlab_contour
                    matlab_merged_gauss_2d.plot_remove()
                self.old_matlab_contour = matlab_merged_gauss_2d.plot(
                    ax=self.axes[2], levels=self.levels, title=title)

                # Decrement mixands (with wrapping)
                if self.num_mixands == self.max_num_mixands:
                    self.num_mixands = 1
                elif np.int(self.num_mixands *
                            self.mixand_rate) < self.max_num_mixands:
                    self.num_mixands = np.int(self.num_mixands *
                                              self.mixand_rate)
                else:
                    self.num_mixands = self.max_num_mixands
Ejemplo n.º 14
0
def camera_test(num_std=1, time_interval=1):
    # prior = fleming_prior()
    # prior = uniform_prior()
    # prior = GaussianMixture(1, np.zeros(2), np.eye(2))
    prior = GaussianMixture([1, 1, 1], np.array([
        [-7, 0],
        [-3, 0],
        [1, 0],
    ]),
                            np.eye(2)[None, :].repeat(3, axis=0))
    bounds = [-12.5, -3.5, 2.5, 3.5]

    min_view_dist = 0.3  # [m]
    max_view_dist = 1.0  # [m]
    detection_model = camera_model_2D(min_view_dist, max_view_dist)

    trajectory = np.zeros((20, 2))
    ls = np.linspace(-10, 3, 20)
    trajectory = np.hstack((ls[:, None], trajectory))

    class camera_tester(object):
        """docstring for merged_gm"""
        def __init__(self,
                     prior,
                     detection_model,
                     trajectory,
                     num_std=1,
                     bounds=None):
            self.fig = plt.figure(figsize=(16, 8))
            self.gm = prior
            self.detection_model = detection_model
            self.trajectory = itertools.cycle(trajectory)
            self.vb = VariationalBayes()
            self.num_std = num_std
            if bounds is None:
                self.bounds = [-5, -5, 5, 5]
            else:
                self.bounds = bounds

        def update(self, i=0):
            self.camera_pose = next(self.trajectory)
            logging.info('Moving to pose {}.'.format(self.camera_pose))
            self.detection_model.move(self.camera_pose)

            # Do a VBIS update
            mu, sigma, beta = self.vb.update(measurement='No Detection',
                                             likelihood=detection_model,
                                             prior=self.gm,
                                             use_LWIS=True,
                                             poly=detection_model.poly,
                                             num_std=self.num_std)
            self.gm = GaussianMixture(weights=beta,
                                      means=mu,
                                      covariances=sigma)
            # Log what's going on
            logging.info(self.gm)
            logging.info('Weight sum: {}'.format(beta.sum()))

            self.remove()
            self.plot()

        def plot(self):
            levels_res = 50
            self.levels = np.linspace(0, np.max(self.gm.pdf(self.pos)),
                                      levels_res)
            self.contourf = self.ax.contourf(self.xx,
                                             self.yy,
                                             self.gm.pdf(self.pos),
                                             levels=self.levels,
                                             cmap=plt.get_cmap('jet'))
            # Plot camera
            self.cam_patch = PolygonPatch(self.detection_model.poly,
                                          facecolor='none',
                                          linewidth=2,
                                          edgecolor='white')
            self.ax.add_patch(self.cam_patch)

            # Plot ellipses
            self.ellipse_patches = self.gm.plot_ellipses(
                poly=self.detection_model.poly)

        def plot_setup(self):
            # Define gridded space for graphing
            min_x, max_x = self.bounds[0], self.bounds[2]
            min_y, max_y = self.bounds[1], self.bounds[3]
            res = 30
            self.xx, self.yy = np.mgrid[min_x:max_x:1 / res,
                                        min_y:max_y:1 / res]
            pos = np.empty(self.xx.shape + (2, ))
            pos[:, :, 0] = self.xx
            pos[:, :, 1] = self.yy
            self.pos = pos

            # Plot setup
            self.ax = self.fig.add_subplot(111)

            self.ax.set_title('VBIS with camera detection test')
            plt.axis('scaled')
            self.ax.set_xlim([min_x, max_x])
            self.ax.set_ylim([min_y, max_y])

            levels_res = 50
            self.levels = np.linspace(0, np.max(self.gm.pdf(self.pos)),
                                      levels_res)
            cax = self.contourf = self.ax.contourf(self.xx,
                                                   self.yy,
                                                   self.gm.pdf(self.pos),
                                                   levels=self.levels,
                                                   cmap=plt.get_cmap('jet'))
            self.fig.colorbar(cax)

        def remove(self):
            if hasattr(self, 'cam_patch'):
                self.cam_patch.remove()
                del self.cam_patch

            if hasattr(self, 'ellipse_patches'):
                for patch in self.ellipse_patches:
                    patch.remove()
                del self.ellipse_patches

            if hasattr(self, 'contourf'):
                for collection in self.contourf.collections:
                    collection.remove()
                del self.contourf

    gm = camera_tester(prior, detection_model, trajectory, num_std, bounds)
    logging.info('Initial GM:')
    logging.info(prior)

    ani = animation.FuncAnimation(gm.fig,
                                  gm.update,
                                  interval=time_interval,
                                  repeat=True,
                                  blit=False,
                                  init_func=gm.plot_setup)

    plt.show()
Ejemplo n.º 15
0
def gmm_sm_test(measurement='Outside'):

    # Define prior
    # prior = GaussianMixture(weights=[1, 4, 5],
    #                         means=[[0.5, 1.3],  # GM1 mean
    #                                [-0.7, -0.6],  # GM2 mean
    #                                [0.2, -3],  # GM3 mean
    #                                ],
    #                         covariances=[[[0.4, 0.3],  # GM1 mean
    #                                       [0.3, 0.4]
    #                                       ],
    #                                      [[0.3, 0.1],  # GM2 mean
    #                                       [0.1, 0.3]
    #                                       ],
    #                                      [[0.5, 0.4],  # GM3 mean
    #                                       [0.4, 0.5]],
    #                                      ])
    prior = GaussianMixture(
        weights=[1, 1, 1, 1, 1],
        means=[
            [-2, -4],  # GM1 mean
            [-1, -2],  # GM2 mean
            [0, 0],  # GM3 mean
            [1, -2],  # GM4 mean
            [2, -4],  # GM5 mean
        ],
        covariances=[
            [
                [0.1, 0],  # GM1 mean
                [0, 0.1]
            ],
            [
                [0.2, 0],  # GM2 mean
                [0, 0.2]
            ],
            [
                [0.3, 0],  # GM3 mean
                [0, 0.3]
            ],
            [
                [0.2, 0],  # GM4 mean
                [0, 0.2]
            ],
            [
                [0.1, 0],  # GM5 mean
                [0, 0.1]
            ],
        ])
    # prior = GaussianMixture(weights=[1],
    #                         means=[[-2, -4],  # GM1 mean
    #                                ],
    #                         covariances=[[[0.1, 0],  # GM1 mean
    #                                       [0, 0.1]
    #                                       ],
    #                                      ])
    # Define sensor likelihood
    brm = range_model()

    # Do a VBIS update
    logging.info('Starting VB update...')
    vb = VariationalBayes()
    mu_hat, var_hat, beta_hat = vb.update(measurement,
                                          brm,
                                          prior,
                                          use_LWIS=True)
    vbis_posterior = GaussianMixture(weights=beta_hat,
                                     means=mu_hat,
                                     covariances=var_hat)

    # Define gridded space for graphing
    min_x, max_x = -5, 5
    min_y, max_y = -5, 5
    res = 100
    x_space, y_space = np.mgrid[min_x:max_x:1 / res, min_y:max_y:1 / res]
    pos = np.empty(x_space.shape + (2, ))
    pos[:, :, 0] = x_space
    pos[:, :, 1] = y_space

    levels_res = 50
    max_prior = np.max(prior.pdf(pos))
    prior_levels = np.linspace(0, max_prior, levels_res)

    brm.probability()
    max_lh = np.max(brm.probs)
    lh_levels = np.linspace(0, max_lh, levels_res)
    max_post = np.max(vbis_posterior.pdf(pos))
    post_levels = np.linspace(0, max_post, levels_res)

    # Plot results
    fig = plt.figure()
    likelihood_label = 'Likelihood of \'{}\''.format(measurement)

    prior_ax = plt.subplot2grid((2, 32), (0, 0), colspan=14)
    prior_cax = plt.subplot2grid((2, 32), (0, 14), colspan=1)
    prior_c = prior_ax.contourf(x_space,
                                y_space,
                                prior.pdf(pos),
                                levels=prior_levels)
    cbar = plt.colorbar(prior_c, cax=prior_cax)
    prior_ax.set_xlabel('x1')
    prior_ax.set_ylabel('x2')
    prior_ax.set_title('Prior Distribution')

    lh_ax = plt.subplot2grid((2, 32), (0, 17), colspan=14)
    lh_cax = plt.subplot2grid((2, 32), (0, 31), colspan=1)
    brm.classes[measurement].plot(ax=lh_ax,
                                  label=likelihood_label,
                                  ls='--',
                                  levels=lh_levels,
                                  show_plot=False,
                                  plot_3D=False)
    # plt.colorbar(sm.probs, cax=lh_cax)
    lh_ax.set_title(likelihood_label)

    posterior_ax = plt.subplot2grid((2, 32), (1, 0), colspan=31)
    posterior_cax = plt.subplot2grid((2, 32), (1, 31), colspan=1)
    posterior_c = posterior_ax.contourf(x_space,
                                        y_space,
                                        vbis_posterior.pdf(pos),
                                        levels=post_levels)
    plt.colorbar(posterior_c, cax=posterior_cax)
    posterior_ax.set_xlabel('x1')
    posterior_ax.set_ylabel('x2')
    posterior_ax.set_title('VBIS Posterior Distribution')

    logging.info(
        'Prior Weights: \n {} \n Means: \n {} \n Variances: \n {} \n'.format(
            prior.weights, prior.means, prior.covariances))
    logging.info(
        'Posterior Weights: \n {} \n Means: \n {} \n Variances: \n {} \n'.
        format(vbis_posterior.weights, vbis_posterior.means,
               vbis_posterior.covariances))

    plt.show()
Ejemplo n.º 16
0
def comparison_2d():
    # Define prior
    prior_mean = np.array([2.3, 1.2])
    prior_var = np.array([[2, 0.6], [0.6, 2]])
    prior = GaussianMixture(1, prior_mean, prior_var)

    # Define sensor likelihood
    sm = intrinsic_space_model()
    measurement = 'Front'
    measurement_i = sm.classes[measurement].id

    # Do a VB update
    init_mean = np.zeros((1, 2))
    init_var = np.eye(2)
    init_alpha = 0.5
    init_xi = np.ones(5)

    vb = VariationalBayes()
    vb_mean, vb_var, _ = vb.vb_update(measurement, sm, prior, init_mean,
                                      init_var, init_alpha, init_xi)

    nisar_vb_mean = np.array([1.795546121012238, 2.512627005425541])
    nisar_vb_var = np.array([[0.755723395661314, 0.091742424424428],
                             [0.091742424424428, 0.747611340151417]])
    diff_vb_mean = vb_mean - nisar_vb_mean
    diff_vb_var = vb_var - nisar_vb_var
    logging.info(
        'Nisar\'s VB update had mean difference: \n {}\n and var difference: \n {}\n'
        .format(diff_vb_mean, diff_vb_var))

    vb_mean, vb_var, _ = vb.vbis_update(measurement, sm, prior, init_mean,
                                        init_var, init_alpha, init_xi)
    vb_posterior = GaussianMixture(1, vb_mean, vb_var)

    # Define gridded space for graphing
    min_x, max_x = -5, 5
    min_y, max_y = -5, 5
    res = 200
    x_space, y_space = np.mgrid[min_x:max_x:1 / res, min_y:max_y:1 / res]
    pos = np.empty(x_space.shape + (2, ))
    pos[:, :, 0] = x_space
    pos[:, :, 1] = y_space

    levels_res = 30
    max_prior = np.max(prior.pdf(pos))
    prior_levels = np.linspace(0, max_prior, levels_res)

    sm.probability()
    max_lh = np.max(sm.probs)
    lh_levels = np.linspace(0, max_lh, levels_res)

    max_post = np.max(vb_posterior.pdf(pos))
    post_levels = np.linspace(0, max_post, levels_res)

    # Plot results
    fig = plt.figure()
    likelihood_label = 'Likelihood of \'{}\''.format(measurement)

    prior_ax = plt.subplot2grid((2, 32), (0, 0), colspan=14)
    prior_cax = plt.subplot2grid((2, 32), (0, 14), colspan=1)
    prior_c = prior_ax.contourf(x_space,
                                y_space,
                                prior.pdf(pos),
                                levels=prior_levels)
    cbar = plt.colorbar(prior_c, cax=prior_cax)
    prior_ax.set_xlabel('x1')
    prior_ax.set_ylabel('x2')
    prior_ax.set_title('Prior Distribution')

    lh_ax = plt.subplot2grid((2, 32), (0, 17), colspan=14)
    lh_cax = plt.subplot2grid((2, 32), (0, 31), colspan=1)
    sm.classes[measurement].plot(ax=lh_ax,
                                 label=likelihood_label,
                                 plot_3D=False,
                                 levels=lh_levels)
    # plt.colorbar(sm.probs, cax=lh_cax)
    lh_ax.set_title(likelihood_label)

    posterior_ax = plt.subplot2grid((2, 32), (1, 0), colspan=31)
    posterior_cax = plt.subplot2grid((2, 32), (1, 31), colspan=1)
    posterior_c = posterior_ax.contourf(x_space,
                                        y_space,
                                        vb_posterior.pdf(pos),
                                        levels=post_levels)
    plt.colorbar(posterior_c, cax=posterior_cax)
    posterior_ax.set_xlabel('x1')
    posterior_ax.set_ylabel('x2')
    posterior_ax.set_title('VB Posterior Distribution')

    plt.show()
Ejemplo n.º 17
0
def comparison_1d():

    # Define prior
    prior_mean, prior_var = 0.3, 0.01
    min_x, max_x = -5, 5
    res = 10000

    prior = GaussianMixture(1, prior_mean, prior_var)
    x_space = np.linspace(min_x, max_x, res)

    # Define sensor likelihood
    sm = speed_model()
    measurement = 'Slow'
    measurement_i = sm.class_labels.index(measurement)

    # Do a VB update
    init_mean, init_var = 0, 1
    init_alpha, init_xi = 0.5, np.ones(4)

    vb = VariationalBayes()
    vb_mean, vb_var, _ = vb.vb_update(measurement, sm, prior, init_mean,
                                      init_var, init_alpha, init_xi)
    vb_posterior = GaussianMixture(1, vb_mean, vb_var)

    nisar_vb_mean = 0.131005297841171
    nisar_vb_var = 6.43335516254277e-05
    diff_vb_mean = vb_mean - nisar_vb_mean
    diff_vb_var = vb_var - nisar_vb_var
    logging.info(
        'Nisar\'s VB update had mean difference {} and var difference {}\n'.
        format(diff_vb_mean, diff_vb_var))

    # Do a VBIS update
    vbis_mean, vbis_var, _ = vb.vbis_update(measurement, sm, prior, init_mean,
                                            init_var, init_alpha, init_xi)
    vbis_posterior = GaussianMixture(1, vbis_mean, vbis_var)

    nisar_vbis_mean = 0.154223416817080
    nisar_vbis_var = 0.00346064073274943
    diff_vbis_mean = vbis_mean - nisar_vbis_mean
    diff_vbis_var = vbis_var - nisar_vbis_var
    logging.info(
        'Nisar\'s VBIS update had mean difference {} and var difference {}\n'.
        format(diff_vbis_mean, diff_vbis_var))

    # Plot results
    likelihood_label = 'Likelihood of \'{}\''.format(measurement)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    sm.classes[measurement].plot(ax=ax,
                                 fill_between=False,
                                 label=likelihood_label,
                                 ls='--')
    ax.plot(x_space,
            prior.pdf(x_space),
            lw=1,
            label='prior pdf',
            c='grey',
            ls='--')

    ax.plot(x_space,
            vb_posterior.pdf(x_space),
            lw=2,
            label='VB posterior',
            c='r')
    ax.fill_between(x_space,
                    0,
                    vb_posterior.pdf(x_space),
                    alpha=0.2,
                    facecolor='r')
    ax.plot(x_space,
            vbis_posterior.pdf(x_space),
            lw=2,
            label='VBIS Posterior',
            c='g')
    ax.fill_between(x_space,
                    0,
                    vbis_posterior.pdf(x_space),
                    alpha=0.2,
                    facecolor='g')

    ax.set_title('VBIS Update')
    ax.legend()
    ax.set_xlim([0, 0.4])
    ax.set_ylim([0, 7])
    plt.show()
Ejemplo n.º 18
0
    def update(self,
               measurement,
               likelihood,
               prior,
               use_LWIS=False,
               poly=None,
               num_std=1):
        """VB update using Gaussian mixtures and multimodal softmax.

        This uses Variational Bayes with Importance Sampling (VBIS) for
        each mixand-softmax pair available.
        """
        # If we have a polygon, update only the mixands intersecting with it
        if poly is None:
            update_intersections_only = False
        else:
            update_intersections_only = True

        h = 0
        relevant_subclasses = likelihood.classes[measurement].subclasses
        num_relevant_subclasses = len(relevant_subclasses)

        # Use intersecting priors only
        if update_intersections_only:
            other_priors = prior.copy()
            weights = []
            means = []
            covariances = []
            mixand_ids = []
            ellipses = prior.std_ellipses(num_std)

            any_intersection = False
            for i, ellipse in enumerate(ellipses):
                try:
                    has_intersection = poly.intersects(ellipse)
                except ValueError:
                    logging.warn('Null geometry error! Defaulting to true.')
                    has_intersection = True

                if has_intersection:
                    # Get parameters for intersecting priors
                    mixand_ids.append(i)
                    weights.append(prior.weights[i])
                    means.append(prior.means[i])
                    covariances.append(prior.covariances[i])
                    any_intersection = True

            if not any_intersection:
                logging.debug('No intersection with any ellipse.')
                mu_hat = other_priors.means
                var_hat = other_priors.covariances
                beta_hat = other_priors.weights
                return mu_hat, var_hat, beta_hat

            # Remove these from the other priors
            other_priors.weights = \
                np.delete(other_priors.weights, mixand_ids, axis=0)
            other_priors.means = \
                np.delete(other_priors.means, mixand_ids, axis=0)
            other_priors.covariances = \
                np.delete(other_priors.covariances, mixand_ids, axis=0)

            # Retain total weight of intersection weights for renormalization
            max_intersecion_weight = sum(weights)

            # Create new prior
            prior = GaussianMixture(weights, means, covariances)
            logging.debug(
                'Using only mixands {} for VBIS fusion. Total weight {}'.
                format(mixand_ids, max_intersecion_weight))

        # Parameters for all new mixands
        K = num_relevant_subclasses * prior.weights.size
        mu_hat = np.zeros((K, prior.means.shape[1]))
        var_hat = np.zeros(
            (K, prior.covariances.shape[1], prior.covariances.shape[2]))
        log_beta_hat = np.zeros(K)  # Weight estimates

        for u, mixand_weight in enumerate(prior.weights):
            mix_sm_corr = 0

            # Check to see if the mixand is completely contained within
            # the softmax class (i.e. doesn't need an update)
            mixand = GaussianMixture(1, prior.means[u], prior.covariances[u])
            mixand_samples = mixand.rvs(self.num_mixand_samples)
            p_hat_ru_samples = likelihood.classes[measurement].probability(
                state=mixand_samples)
            mix_sm_corr = np.sum(p_hat_ru_samples) / self.num_mixand_samples

            if mix_sm_corr > self.mix_sm_corr_thresh:
                logging.debug(
                    'Mixand {}\'s correspondence with {} was {},'
                    'above the threshold of {}, so VBIS was skipped.'.format(
                        u, measurement, mix_sm_corr, self.mix_sm_corr_thresh))

                # Append the prior's parameters to the mixand parameter lists
                mu_hat[h, :] = prior.means[u]
                var_hat[h, :] = prior.covariances[u]
                log_beta_hat[h] = np.log(mixand_weight)

                h += 1
                continue

            # Otherwise complete the full VBIS update
            ordered_subclasses = iter(sorted(relevant_subclasses.iteritems()))
            for label, subclass in ordered_subclasses:

                # Compute \hat{P}_s(r|u)
                mixand_samples = mixand.rvs(self.num_mixand_samples)
                p_hat_ru_samples = subclass.probability(state=mixand_samples)
                p_hat_ru_sampled = np.sum(
                    p_hat_ru_samples) / self.num_mixand_samples

                mu_vbis, var_vbis, log_c_hat = \
                    self.vbis_update(label, subclass.softmax_collection,
                                     mixand, use_LWIS=use_LWIS)

                # Compute log odds of r given u
                if np.isnan(log_c_hat):  # from LWIS update
                    log_p_hat_ru = np.log(p_hat_ru_sampled)
                else:
                    log_p_hat_ru = np.max(
                        (log_c_hat, np.log(p_hat_ru_sampled)))

                # Find log of P(u,r|D_k) \approxequal \hat{B}_{ur}
                log_beta_vbis = np.log(mixand_weight) + log_p_hat_ru

                # Symmetrize var_vbis
                var_vbis = 0.5 * (var_vbis.T + var_vbis)

                # Update estimate values
                log_beta_hat[h] = log_beta_vbis
                mu_hat[h, :] = mu_vbis
                var_hat[h, :] = var_vbis
                h += 1

        # Renormalize and truncate (based on weight threshold)
        log_beta_hat = log_beta_hat - np.max(log_beta_hat)
        unnormalized_beta_hats = np.exp(log_beta_hat)
        beta_hat = np.exp(log_beta_hat) / np.sum(np.exp(log_beta_hat))

        # Reattach untouched prior values
        if update_intersections_only:
            beta_hat = unnormalized_beta_hats * max_intersecion_weight
            beta_hat = np.hstack((other_priors.weights, beta_hat))
            mu_hat = np.vstack((other_priors.means, mu_hat))
            var_hat = np.concatenate((other_priors.covariances, var_hat))

            # Shrink mu, var and beta if necessary
            h += other_priors.weights.size
            beta_hat = beta_hat[:h]
            mu_hat = mu_hat[:h]
            var_hat = var_hat[:h]

            beta_hat /= beta_hat.sum()
        else:
            # Shrink mu, var and beta if necessary
            beta_hat = beta_hat[:h]
            mu_hat = mu_hat[:h]
            var_hat = var_hat[:h]

        # Threshold based on weights
        mu_hat = mu_hat[beta_hat > self.weight_threshold, :]
        var_hat = var_hat[beta_hat > self.weight_threshold, :]
        beta_hat = beta_hat[beta_hat > self.weight_threshold]

        # Check if covariances are positive semidefinite
        for i, var in enumerate(var_hat):
            try:
                assert np.all(np.linalg.det(var) > 0)
            except AssertionError, e:
                logging.warn('Following variance is not positive '
                             'semidefinite: \n{}'.format(var))
                var_hat[i] = np.eye(var.shape[0]) * 10**-3
    def test_merging(self, num_mixands=100, max_num_mixands=10, spread=4,
                     speak=False):
        if max_num_mixands is None:
            animate = True
            max_num_mixands = num_mixands
        else:
            animate = False

        # Generate the unmerged and merged gaussians
        weights, means, covariances = generate_random_params(num_mixands,
                                                             ndims=2,
                                                             spread=spread)

        unmerged_gauss_2d = GaussianMixture(weights.copy(),
                                            means.copy(),
                                            covariances.copy(),
                                            max_num_mixands=len(weights))
        merged_gauss_2d = GaussianMixture(weights.copy(),
                                          means.copy(),
                                          covariances.copy(),
                                          max_num_mixands=max_num_mixands)
        matlab_merged_gauss_2d = self.matlab_gm(weights.copy(),
                                                means.copy(),
                                                covariances.copy(),
                                                max_num_mixands)
        mixtures = {'unmerged': unmerged_gauss_2d,
                    'merged': merged_gauss_2d,
                    'matlab merged': matlab_merged_gauss_2d
                    }

        # Setup figure and levels
        fig = plt.figure(figsize=(18,6))
        axes = []
        _, max_1 = unmerged_gauss_2d.max_point_by_grid()
        _, max_2 = merged_gauss_2d.max_point_by_grid()
        _, max_3 = matlab_merged_gauss_2d.max_point_by_grid()
        max_prob = np.max((max_1, max_2, max_3))
        levels = np.linspace(0, max_prob * 1.2, 50)

        # Plot all three
        ax = fig.add_subplot(131)
        axes.append(ax)
        title = 'Original GM ({} mixands)'\
                .format(unmerged_gauss_2d.weights.size)
        unmerged_gauss_2d.plot(ax=ax, levels=levels, title=title)

        ax = fig.add_subplot(132)
        axes.append(ax)
        title = 'Python Merged GM ({} mixands)'\
                .format(merged_gauss_2d.weights.size)
        merged_gauss_2d.plot(ax=ax, levels=levels, title=title)

        ax = fig.add_subplot(133)
        axes.append(ax)
        title = 'Matlab Merged GM ({} mixands)'\
                .format(matlab_merged_gauss_2d.weights.size)
        matlab_merged_gauss_2d.plot(ax=ax, levels=levels, title=title)

        # Add a colorbar
        fig.subplots_adjust(right=0.85)
        cbar_ax = fig.add_axes([0.875, 0.1, 0.025, 0.8])
        fig.colorbar(unmerged_gauss_2d.contourf, cax=cbar_ax)

        class GMAnimation(object):
            """docstring for merged_gm"""
            def __init__(self, parent, mixand_rate=2, levels=None, axes=None):
                self.max_num_mixands = mixtures['unmerged'].weights.size
                self.num_mixands = 1
                self.mixand_rate = mixand_rate
                self.levels = levels
                self.axes = axes
                self.parent = parent

            def update(self,i=0):
                # Regenerate GMs
                merged_gauss_2d = GaussianMixture(weights, means, covariances,
                                                  max_num_mixands=self.num_mixands)
                matlab_merged_gauss_2d = self.parent.matlab_gm(weights,
                                                   means,
                                                   covariances,
                                                   max_num_mixands=self.num_mixands)

                # Replot GMs
                title = 'Python Merged GM ({} mixands)'.format(merged_gauss_2d.weights.size)
                if hasattr(self,'old_contour'):
                    merged_gauss_2d.contourf = self.old_contour
                    merged_gauss_2d.plot_remove()
                self.old_contour = merged_gauss_2d.plot(ax=self.axes[1], levels=self.levels, title=title)

                title = 'Matlab Merged GM ({} mixands)'.format(matlab_merged_gauss_2d.weights.size)
                if hasattr(self,'old_matlab_contour'):
                    matlab_merged_gauss_2d.contourf = self.old_matlab_contour
                    matlab_merged_gauss_2d.plot_remove()
                self.old_matlab_contour = matlab_merged_gauss_2d.plot(ax=self.axes[2], levels=self.levels, title=title)

                # Decrement mixands (with wrapping)
                if self.num_mixands == self.max_num_mixands:
                    self.num_mixands = 1
                elif np.int(self.num_mixands * self.mixand_rate) < self.max_num_mixands:
                    self.num_mixands = np.int(self.num_mixands * self.mixand_rate)
                else:
                    self.num_mixands = self.max_num_mixands

            def compare_results():
                pass

        if animate:
            gm_ani = GMAnimation(self, mixand_rate=2, levels=levels, axes=axes)
            ani = animation.FuncAnimation(fig, gm_ani.update, 
                interval=100,
                repeat=True,
                blit=False,
                )
        else:
            self.diff(merged_gauss_2d, matlab_merged_gauss_2d)
        plt.show()
        self.check_diff()
    def test_merging(self,
                     num_mixands=100,
                     max_num_mixands=10,
                     spread=4,
                     speak=False):
        if max_num_mixands is None:
            animate = True
            max_num_mixands = num_mixands
        else:
            animate = False

        # Generate the unmerged and merged gaussians
        weights, means, covariances = generate_random_params(num_mixands,
                                                             ndims=2,
                                                             spread=spread)

        unmerged_gauss_2d = GaussianMixture(weights.copy(),
                                            means.copy(),
                                            covariances.copy(),
                                            max_num_mixands=len(weights))
        merged_gauss_2d = GaussianMixture(weights.copy(),
                                          means.copy(),
                                          covariances.copy(),
                                          max_num_mixands=max_num_mixands)
        matlab_merged_gauss_2d = self.matlab_gm(weights.copy(), means.copy(),
                                                covariances.copy(),
                                                max_num_mixands)
        mixtures = {
            'unmerged': unmerged_gauss_2d,
            'merged': merged_gauss_2d,
            'matlab merged': matlab_merged_gauss_2d
        }

        # Setup figure and levels
        fig = plt.figure(figsize=(18, 6))
        axes = []
        _, max_1 = unmerged_gauss_2d.max_point_by_grid()
        _, max_2 = merged_gauss_2d.max_point_by_grid()
        _, max_3 = matlab_merged_gauss_2d.max_point_by_grid()
        max_prob = np.max((max_1, max_2, max_3))
        levels = np.linspace(0, max_prob * 1.2, 50)

        # Plot all three
        ax = fig.add_subplot(131)
        axes.append(ax)
        title = 'Original GM ({} mixands)'\
                .format(unmerged_gauss_2d.weights.size)
        unmerged_gauss_2d.plot(ax=ax, levels=levels, title=title)

        ax = fig.add_subplot(132)
        axes.append(ax)
        title = 'Python Merged GM ({} mixands)'\
                .format(merged_gauss_2d.weights.size)
        merged_gauss_2d.plot(ax=ax, levels=levels, title=title)

        ax = fig.add_subplot(133)
        axes.append(ax)
        title = 'Matlab Merged GM ({} mixands)'\
                .format(matlab_merged_gauss_2d.weights.size)
        matlab_merged_gauss_2d.plot(ax=ax, levels=levels, title=title)

        # Add a colorbar
        fig.subplots_adjust(right=0.85)
        cbar_ax = fig.add_axes([0.875, 0.1, 0.025, 0.8])
        fig.colorbar(unmerged_gauss_2d.contourf, cax=cbar_ax)

        class GMAnimation(object):
            """docstring for merged_gm"""
            def __init__(self, parent, mixand_rate=2, levels=None, axes=None):
                self.max_num_mixands = mixtures['unmerged'].weights.size
                self.num_mixands = 1
                self.mixand_rate = mixand_rate
                self.levels = levels
                self.axes = axes
                self.parent = parent

            def update(self, i=0):
                # Regenerate GMs
                merged_gauss_2d = GaussianMixture(
                    weights,
                    means,
                    covariances,
                    max_num_mixands=self.num_mixands)
                matlab_merged_gauss_2d = self.parent.matlab_gm(
                    weights,
                    means,
                    covariances,
                    max_num_mixands=self.num_mixands)

                # Replot GMs
                title = 'Python Merged GM ({} mixands)'.format(
                    merged_gauss_2d.weights.size)
                if hasattr(self, 'old_contour'):
                    merged_gauss_2d.contourf = self.old_contour
                    merged_gauss_2d.plot_remove()
                self.old_contour = merged_gauss_2d.plot(ax=self.axes[1],
                                                        levels=self.levels,
                                                        title=title)

                title = 'Matlab Merged GM ({} mixands)'.format(
                    matlab_merged_gauss_2d.weights.size)
                if hasattr(self, 'old_matlab_contour'):
                    matlab_merged_gauss_2d.contourf = self.old_matlab_contour
                    matlab_merged_gauss_2d.plot_remove()
                self.old_matlab_contour = matlab_merged_gauss_2d.plot(
                    ax=self.axes[2], levels=self.levels, title=title)

                # Decrement mixands (with wrapping)
                if self.num_mixands == self.max_num_mixands:
                    self.num_mixands = 1
                elif np.int(self.num_mixands *
                            self.mixand_rate) < self.max_num_mixands:
                    self.num_mixands = np.int(self.num_mixands *
                                              self.mixand_rate)
                else:
                    self.num_mixands = self.max_num_mixands

            def compare_results():
                pass

        if animate:
            gm_ani = GMAnimation(self, mixand_rate=2, levels=levels, axes=axes)
            ani = animation.FuncAnimation(
                fig,
                gm_ani.update,
                interval=100,
                repeat=True,
                blit=False,
            )
        else:
            self.diff(merged_gauss_2d, matlab_merged_gauss_2d)
        plt.show()
        self.check_diff()
Ejemplo n.º 21
0
    def _update_combined(self, sensors, robbers):
        """Update the `combined` filter.

        Parameters
        ----------
        sensors : dict
            A collection of all sensors to be updated.
        """
        if self.filter_type == 'particle':

            # Remove all particles from combined filter
            # <>TODO: correct number of particles based on state
            self.filters['combined'].particles = np.zeros((1, 5))

            # Add all particles from missing robots to combined filter
            for robber in robbers.values():
                self.filters['combined'].n_particles += \
                    self.filters[robber.name].n_particles
                self.filters['combined'].particles = \
                    np.append(self.filters['combined'].particles,
                              self.filters[robber.name].particles,
                              axis=0)
            self.filters['combined'].n_particles = \
                len(self.filters['combined'].particles)

            # Reset the human sensor
            sensors['human'].utterance = ''
            sensors['human'].target = ''
        else:
            # Pre-allocate parameter arrays
            num_mixands = 0
            all_done = True
            for label, filter_ in self.filters.iteritems():
                if label == 'combined' or filter_.finished:
                    continue
                all_done = False
                num_mixands += filter_.probability.num_mixands
                # <>TODO: Don't find ndims each time
                ndims = filter_.probability.ndims
            if all_done:
                # <>TODO: Fix hardcoded dimensions, clean it up
                gm = GaussianMixture(1, [0, 0], 0.01 * np.eye(2))
                self.filters['combined'].probability = gm
                return
            weights = np.empty((num_mixands))
            means = np.empty((num_mixands, ndims))
            covariances = np.empty((num_mixands, ndims, ndims))

            # Load parameter arrays
            i = 0
            for label, filter_ in self.filters.iteritems():
                if label == 'combined' or filter_.finished:
                    continue
                size = filter_.probability.num_mixands
                weights[i:i + size] = filter_.probability.weights
                means[i:i + size] = filter_.probability.means
                covariances[i:i + size] = filter_.probability.covariances
                i += size

            combined_gm = GaussianMixture(weights,
                                          means,
                                          covariances,
                                          max_num_mixands=num_mixands)
            self.filters['combined'].probability = combined_gm
Ejemplo n.º 22
0
    class camera_tester(object):
        """docstring for merged_gm"""
        def __init__(self,
                     prior,
                     detection_model,
                     trajectory,
                     num_std=1,
                     bounds=None):
            self.fig = plt.figure(figsize=(16, 8))
            self.gm = prior
            self.detection_model = detection_model
            self.trajectory = itertools.cycle(trajectory)
            self.vb = VariationalBayes()
            self.num_std = num_std
            if bounds is None:
                self.bounds = [-5, -5, 5, 5]
            else:
                self.bounds = bounds

        def update(self, i=0):
            self.camera_pose = next(self.trajectory)
            logging.info('Moving to pose {}.'.format(self.camera_pose))
            self.detection_model.move(self.camera_pose)

            # Do a VBIS update
            mu, sigma, beta = self.vb.update(measurement='No Detection',
                                             likelihood=detection_model,
                                             prior=self.gm,
                                             use_LWIS=True,
                                             poly=detection_model.poly,
                                             num_std=self.num_std)
            self.gm = GaussianMixture(weights=beta,
                                      means=mu,
                                      covariances=sigma)
            # Log what's going on
            logging.info(self.gm)
            logging.info('Weight sum: {}'.format(beta.sum()))

            self.remove()
            self.plot()

        def plot(self):
            levels_res = 50
            self.levels = np.linspace(0, np.max(self.gm.pdf(self.pos)),
                                      levels_res)
            self.contourf = self.ax.contourf(self.xx,
                                             self.yy,
                                             self.gm.pdf(self.pos),
                                             levels=self.levels,
                                             cmap=plt.get_cmap('jet'))
            # Plot camera
            self.cam_patch = PolygonPatch(self.detection_model.poly,
                                          facecolor='none',
                                          linewidth=2,
                                          edgecolor='white')
            self.ax.add_patch(self.cam_patch)

            # Plot ellipses
            self.ellipse_patches = self.gm.plot_ellipses(
                poly=self.detection_model.poly)

        def plot_setup(self):
            # Define gridded space for graphing
            min_x, max_x = self.bounds[0], self.bounds[2]
            min_y, max_y = self.bounds[1], self.bounds[3]
            res = 30
            self.xx, self.yy = np.mgrid[min_x:max_x:1 / res,
                                        min_y:max_y:1 / res]
            pos = np.empty(self.xx.shape + (2, ))
            pos[:, :, 0] = self.xx
            pos[:, :, 1] = self.yy
            self.pos = pos

            # Plot setup
            self.ax = self.fig.add_subplot(111)

            self.ax.set_title('VBIS with camera detection test')
            plt.axis('scaled')
            self.ax.set_xlim([min_x, max_x])
            self.ax.set_ylim([min_y, max_y])

            levels_res = 50
            self.levels = np.linspace(0, np.max(self.gm.pdf(self.pos)),
                                      levels_res)
            cax = self.contourf = self.ax.contourf(self.xx,
                                                   self.yy,
                                                   self.gm.pdf(self.pos),
                                                   levels=self.levels,
                                                   cmap=plt.get_cmap('jet'))
            self.fig.colorbar(cax)

        def remove(self):
            if hasattr(self, 'cam_patch'):
                self.cam_patch.remove()
                del self.cam_patch

            if hasattr(self, 'ellipse_patches'):
                for patch in self.ellipse_patches:
                    patch.remove()
                del self.ellipse_patches

            if hasattr(self, 'contourf'):
                for collection in self.contourf.collections:
                    collection.remove()
                del self.contourf
Ejemplo n.º 23
0
    def vbis_update(self,
                    measurement,
                    likelihood,
                    prior,
                    init_mean=0,
                    init_var=1,
                    init_alpha=0.5,
                    init_xi=1,
                    num_samples=None,
                    use_LWIS=False):
        """VB update with importance sampling for Gaussian and Softmax.
        """
        if num_samples is None:
            num_samples = self.num_importance_samples

        if use_LWIS:
            q_mu = np.asarray(prior.means[0])
            log_c_hat = np.nan
        else:
            # Use VB update
            q_mu, var_VB, log_c_hat = self.vb_update(measurement, likelihood,
                                                     prior, init_mean,
                                                     init_var, init_alpha,
                                                     init_xi)

        q_var = np.asarray(prior.covariances[0])

        # Importance distribution
        q = GaussianMixture(1, q_mu, q_var)

        # Importance sampling correction
        w = np.zeros(num_samples)  # Importance weights
        x = q.rvs(size=num_samples)  # Sampled points
        x = np.asarray(x)
        if hasattr(likelihood, 'subclasses'):
            measurement_class = likelihood.subclasses[measurement]
        else:
            measurement_class = likelihood.classes[measurement]

        # Compute parameters using samples
        w = prior.pdf(x) * measurement_class.probability(state=x) / q.pdf(x)
        w /= np.sum(w)  # Normalize weights

        mu_hat = np.sum(x.T * w, axis=-1)

        # <>TODO: optimize this
        var_hat = np.zeros_like(np.asarray(q_var))
        for i in range(num_samples):
            x_i = np.asarray(x[i])
            var_hat = var_hat + w[i] * np.outer(x_i, x_i)
        var_hat -= np.outer(mu_hat, mu_hat)

        # Ensure properly formatted output
        if mu_hat.size == 1 and mu_hat.ndim > 0:
            mu_post_vbis = mu_hat[0]
        else:
            mu_post_vbis = mu_hat
        if var_hat.size == 1:
            var_post_vbis = var_hat[0][0]
        else:
            var_post_vbis = var_hat

        logging.debug(
            'VBIS update found mean of {} and variance of {}.'.format(
                mu_post_vbis, var_post_vbis))

        return mu_post_vbis, var_post_vbis, log_c_hat
        return self.contourf

    def remove(self):
        if hasattr(self, 'contourf'):
            for collection in self.contourf.collections:
                collection.remove()
            del self.contourf

        if hasattr(self, 'ellipse_patches'):
            for patch in self.ellipse_patches:
                patch.remove()
            del self.ellipse_patches


if __name__ == '__main__':
    d = GaussianMixture(1, [0, 0], [[1, 0], [0, 1]])
    filter_ = type('test', (object, ), {'probability': d})()
    pl = ProbabilityLayer(d, z_levels=50, alpha=1)

    test_probability = []
    test_probability.append(GaussianMixture(1, [2, 0], [[1, 0], [0, 1]]))
    test_probability.append(GaussianMixture(1, [1, 1], [[1, 0], [0, 1]]))
    test_probability.append(GaussianMixture(1, [0, 2], [[1, 0], [0, 1]]))
    test_probability.append(GaussianMixture(1, [-1, 1], [[1, 0], [0, 1]]))
    test_probability.append(GaussianMixture(1, [-2, 0], [[1, 0], [0, 1]]))
    test_probability.append(GaussianMixture(1, [-1, -1], [[1, 0], [0, 1]]))
    test_probability.append(GaussianMixture(1, [0, -2], [[1, 0], [0, 1]]))
    test_probability.append(GaussianMixture(1, [1, -1], [[1, 0], [0, 1]]))
    test_probability.append(GaussianMixture(1, [2, 0], [[1, 0], [0, 1]]))
    pl.test_probability = itertools.cycle(test_probability)