def state_list():
    return [
        randvars.Categorical(
            probabilities=np.ones(10) / 10, support=np.random.rand(10, 2)
        )
        for _ in range(20)
    ]
Example #2
0
def test_pmf_valueerror():
    """If a PMF has string-valued support, its pmf cannot be evaluated at an integer.

    This value error is intended to guard against the issue presented in
    https://stackoverflow.com/questions/45020217/numpy-where-function-throws-a-futurewarning-returns-scalar-instead-of-list
    """
    categ = randvars.Categorical(probabilities=[0.5, 0.5], support=["a", "b"])
    with pytest.raises(ValueError):
        categ.pmf(2)
Example #3
0
def test_pmf_zero(categ):
    """Make a new Categorical RV that excludes the final point and check that the pmf
    rightfully evaluates to zero."""

    new_categ = randvars.Categorical(
        support=categ.support[:-1],
        probabilities=categ.probabilities[:-1],
    )
    zero_pmf_value = new_categ.pmf(x=categ.support[-1])
    np.testing.assert_almost_equal(zero_pmf_value, 0.0)
    def filter_step(self, start, stop, randvar, data):
        """Perform a particle filter step.

        This method implements sequential importance (re)sampling.

        It consists of the following steps:
        1. Propagating the "past" particles through the dynamics model.
        2. Computing a "proposal" random variable.
        This is either the prior dynamics model or the output of a filter step
        of an (approximate) Gaussian filter.
        3. Sample from the proposal random variable. This is the "new" particle.
        4. Propagate the particle through the measurement model.
        This is required in order to evaluate the PDF of the resulting RV at
        the data. If this is small, the weight of the particle will be small.
        5. Compute weights ("event probabilities") of the new particle.
        This requires evaluating the PDFs of all three RVs (dynamics, proposal, measurement).

        After this is done for all particles, the weights are normalized in order to sum to 1.
        If the effective number of particles is low, the particles are resampled.
        """
        new_weights = randvar.probabilities.copy()
        new_support = randvar.support.copy()

        for idx, (particle, weight) in enumerate(zip(new_support,
                                                     new_weights)):

            dynamics_rv, _ = self.dynamics_model.forward_realization(
                particle, t=start, dt=(stop - start))
            proposal_state, proposal_weight = self.compute_new_particle(
                data, stop, dynamics_rv)
            new_support[idx] = proposal_state
            new_weights[idx] = proposal_weight

        new_weights = new_weights / np.sum(new_weights)
        new_rv = randvars.Categorical(
            support=new_support,
            probabilities=new_weights,
            random_state=self.random_state,
        )

        if self.with_resampling:
            if effective_number_of_events(
                    new_rv) < self.min_effective_num_of_particles:
                new_rv = new_rv.resample()

        return new_rv, {}
    def filter_generator(self, regression_problem: problems.RegressionProblem):
        """Apply Particle filtering to a data set.

        Parameters
        ----------
        regression_problem

        Yields
        ------
        curr_rv
            Filtering random variable at each grid point.
        info_dict
            Dictionary containing filtering information

        See Also
        --------
        RegressionProblem: a regression problem data class
        """

        dataset, times = regression_problem.observations, regression_problem.locations

        # Initialize: condition on first data point using the initial random
        # variable as a dynamics rv (i.e. as the current prior).
        particles_and_weights = np.array(
            [
                self.compute_new_particle(dataset[0], times[0], self.initrv)
                for _ in range(self.num_particles)
            ],
            dtype=object,
        )
        particles = np.stack(particles_and_weights[:, 0], axis=0)
        weights = np.stack(particles_and_weights[:, 1], axis=0)
        weights = np.array(weights) / np.sum(weights)
        curr_rv = randvars.Categorical(support=particles,
                                       probabilities=weights,
                                       random_state=self.random_state)
        yield curr_rv, {}

        for idx in range(1, len(times)):
            curr_rv, info_dict = self.filter_step(
                start=times[idx - 1],
                stop=times[idx],
                randvar=curr_rv,
                data=dataset[idx],
            )
            yield curr_rv, info_dict
Example #6
0
def test_effective_number_of_events():
    weights = np.random.rand(10)
    categ = randvars.Categorical(support=np.random.rand(10, 2),
                                 probabilities=weights / np.sum(weights))
    ess = filtsmooth.particle.effective_number_of_events(categ)
    assert 0 < ess < 10
Example #7
0
def categ(probabilities, support):
    return randvars.Categorical(probabilities=probabilities, support=support)
Example #8
0
    def filter_generator(
        self,
        regression_problem: problems.TimeSeriesRegressionProblem,
    ):
        """Apply Particle filtering to a data set.

        Parameters
        ----------
        regression_problem :
            Regression problem.

        Yields
        ------
        curr_rv
            Filtering random variable at each grid point.
        info_dict
            Dictionary containing filtering information

        See Also
        --------
        TimeSeriesRegressionProblem: a regression problem data class
        """

        # It is not clear at the moment how to handle this.
        if not np.all(np.diff(regression_problem.locations) > 0):
            raise ValueError(
                "Particle filtering cannot handle repeating time points currently."
            )

        initarg = self.prior_process.initarg
        t_old = self.prior_process.initarg

        # If the initial time of the prior equals the location of the first data point,
        # the initial set of particles is overwritten. Here, we set them to unimportant values.
        # If the initial time of the prior is NOT the location of the first data point,
        # we have to sample an initial set of particles.
        weights = np.ones(self.num_particles) / self.num_particles
        particle_set_shape = (
            self.num_particles, ) + self.prior_process.initrv.shape
        if regression_problem.locations[0] == initarg:
            particles = np.nan * np.ones(particle_set_shape)
        else:
            particles = self.prior_process.initrv.sample(
                rng=self.rng, size=(self.num_particles, ))

        for t, data, measmod in regression_problem:

            dt = t - t_old
            new_particles = particles.copy()
            new_weights = weights.copy()

            # Capture the inputs in a variable for more compact code layout
            inputs = measmod, particles, weights, data, t_old, dt, t
            if t == initarg:
                particle_generator = self.importance_rv_generator_initial_time(
                    *inputs)
            else:
                particle_generator = self.importance_rv_generator(*inputs)

            for idx, (importance_rv, dynamics_rv, p,
                      w) in enumerate(particle_generator):

                # Importance sampling step
                new_particle = importance_rv.sample(rng=self.rng)
                meas_rv, _ = measmod.forward_realization(new_particle, t=t)
                loglikelihood = meas_rv.logpdf(data)
                log_correction_factor = (
                    self.importance_distribution.log_correction_factor(
                        proposal_state=new_particle,
                        importance_rv=importance_rv,
                        dynamics_rv=dynamics_rv,
                        old_weight=w,
                    ))
                new_weight = np.exp(loglikelihood + log_correction_factor)

                new_particles[idx] = new_particle
                new_weights[idx] = new_weight

            weights = new_weights / np.sum(new_weights)
            particles = new_particles
            new_rv = randvars.Categorical(support=particles,
                                          probabilities=weights)

            if self.with_resampling:
                N = effective_number_of_events(new_rv)
                if N < self.min_effective_num_of_particles:
                    new_rv = new_rv.resample(rng=self.rng)
            yield new_rv, {}
            t_old = t