Ejemplo n.º 1
0
    def predict(self, time, true_value=None):
        # TODO Use true_value
        if time < self._time:
            raise ValueError('Predicting the past (current time=%s, prediction time=%s)' % (self._time, time))
        if time == self.time:
            print('Predicting the present - nothing to do')
            return
        if not self._resampled_particles_uptodate:
            self._resampled_particles[:] = self._prior_particles[:]
        row = 0
        self._prior_particles = np.empty((self._particle_count, self._state_dim))
        for p in self._processes:
            process_dim = p.process_dim
            if npu.is_vectorised(p.propagate):
                self._prior_particles[:, row:row+process_dim] = p.propagate(self._time, self._resampled_particles[:, row:row+process_dim], time)
            else:
                for i in range(self._particle_count):
                    self._current_particle_idx = i
                    self._prior_particles[i, row:row+process_dim] = npu.to_ndim_1(p.propagate(self._time, self._resampled_particles[i, row:row+process_dim], time))
                self._current_particle_idx = None
            row += process_dim

        self._time = time

        self._resampled_particles_uptodate = False
        self._cached_prior_mean = None
        self._cached_prior_var = None
        
        # TODO Vectorise
        # TODO using fft kde - assumes all weights are equal!
        # TODO This only works when self._state_dim == 1
        if self._predicted_observation_sampler is not None:
            if npu.is_vectorised(self._predicted_observation_sampler):
                self.predicted_observation_particles = self._predicted_observation_sampler(self._prior_particles, self)
            else:
                self.predicted_observation_particles = np.empty((self._particle_count, self._observation_dim))
                for i in range(self._particle_count):
                    self._current_particle_idx = i
                    self.predicted_observation_particles[i,:] = self._predicted_observation_sampler(self._prior_particles[i,:], self)
                self._current_particle_idx = None
            self.predicted_observation = np.average(self.predicted_observation_particles, weights=self._weights, axis=0)
            self.predicted_observation_kde = sm.nonparametric.KDEUnivariate(self.predicted_observation_particles)
            #fft=False, weights=self._weights
            self.predicted_observation_kde.fit()
            # import matplotlib.pyplot as plt
            # fig = plt.figure()
            #x_grid = np.linspace(-4.5, 3.5, 1000)
            #plt.plot(x_grid, kde.evaluate(x_grid))
            #plt.show()
            self.innovationvar = np.var(self.predicted_observation_particles) + self.predicted_observation_kde.bw * self.predicted_observation_kde.bw

        self._state_distr = EmpiricalDistr(particles=self._prior_particles)
Ejemplo n.º 2
0
    def _weight(self, observation):
        if self._predicted_observation_sampler is not None:
            self.innovation = observation - self.predicted_observation
        
        weight_sum = 0.

        if npu.is_vectorised(self._weighting_func):
            self._unnormalised_weights = npu.to_ndim_1(self._weighting_func(observation, self._prior_particles, self))
            weight_sum = np.sum(self._unnormalised_weights)
        else:
            for i in range(self._particle_count):
                self._current_particle_idx = i
                self._unnormalised_weights[i] = npu.toscalar(self._weighting_func(observation, self._prior_particles[i,:], self))
                weight_sum += self._unnormalised_weights[i]
            self._current_particle_idx = None
                
        if weight_sum < ParticleFilter.MIN_WEIGHT_SUM:
            warnings.warn('The sum of weights is less than MIN_WEIGHT_SUM')
            #self._unnormalised_weights[:] = 1. / self._particle_count
            #weight_sum = 1.
        
        self._weights = self._unnormalised_weights / weight_sum
        
        self.effective_sample_size = 1. / np.sum(np.square(self._weights))

        self.log_likelihood += np.log(np.sum(self._unnormalised_weights) / self._particle_count)
        
        self._last_observation = observation
        
        self._cached_posterior_mean = None
        self._cached_posterior_var = None
Ejemplo n.º 3
0
 def solver(a, b, f):
     if npu.is_vectorised(f):
         return f(a, b)
     else:
         rc = np.shape(a)[0]
         r = np.empty((rc, 1))
         for i in range(rc):
             r[i] = f(a[i], b[i])
         return r
Ejemplo n.º 4
0
    def test_vectorised(self):
        func_call_count = 0

        def func(a, b):
            nonlocal func_call_count
            func_call_count += 1
            return a + b

        funcv_call_count = 0

        @vectorised
        def funcv(a, b):
            nonlocal funcv_call_count
            funcv_call_count += 1
            return a + b

        def solver(a, b, f):
            if npu.is_vectorised(f):
                return f(a, b)
            else:
                rc = np.shape(a)[0]
                r = np.empty((rc, 1))
                for i in range(rc):
                    r[i] = f(a[i], b[i])
                return r

        self.assertFalse(npu.is_vectorised(func))
        self.assertTrue(npu.is_vectorised(funcv))
        a = npu.col(14., 2., 429.)
        b = npu.col(42., 1., 5.)
        r = solver(a, b, func)
        npt.assert_almost_equal(r, np.array([[56.], [3.], [434.]]))
        self.assertEqual(func_call_count, 3)
        r = solver(a, b, funcv)
        npt.assert_almost_equal(r, np.array([[56.], [3.], [434.]]))
        self.assertEqual(funcv_call_count, 1)