def test_are_views_of_same(self): a = np.array([[429., 5.], [2., 14.]]) b = npu.to_ndim_1(a, copy=False) self.assertTrue(npu.are_views_of_same(b, a)) self.assertTrue(npu.are_views_of_same(a, b)) b = a.T self.assertTrue(npu.are_views_of_same(b, a)) self.assertTrue(npu.are_views_of_same(a, b)) b = npu.to_ndim_1(a, copy=True) self.assertFalse(npu.are_views_of_same(b, a)) self.assertFalse(npu.are_views_of_same(a, b))
def test_is_view_of(self): a = np.array([[429., 5.], [2., 14.]]) b = npu.to_ndim_1(a, copy=False) self.assertTrue(npu.is_view_of(b, a)) self.assertFalse(npu.is_view_of(a, b)) b = a.T self.assertTrue(npu.is_view_of(b, a)) self.assertFalse(npu.is_view_of(a, b)) b = npu.to_ndim_1(a, copy=True) self.assertFalse(npu.is_view_of(b, a)) self.assertFalse(npu.is_view_of(a, b))
def multinomial_resample(empirical_distr, target_particle_count=None, random_state=None): if target_particle_count is None: target_particle_count = empirical_distr.particle_count if random_state is None: random_state = rnd.random_state() counts = rnd.multinomial(target_particle_count, npu.to_ndim_1(empirical_distr.normalized_weights)) assert np.sum(counts) == target_particle_count particle_idx = 0 resampled_particles = np.empty((target_particle_count, np.shape(empirical_distr.particles)[1])) for i in range(empirical_distr.particle_count): for _ in range(counts[i]): resampled_particles[particle_idx,:] = npu.to_ndim_1(empirical_distr.particle(i)) particle_idx += 1 return EmpiricalDistr(particles=resampled_particles, weights=np.ones((target_particle_count,)))
def _weight(self, observation): if self._predicted_observation_sampler is not None: self.innovation = observation - self.predicted_observation weight_sum = 0. if npu.is_vectorised(self._weighting_func): self._unnormalised_weights = npu.to_ndim_1(self._weighting_func(observation, self._prior_particles, self)) weight_sum = np.sum(self._unnormalised_weights) else: for i in range(self._particle_count): self._current_particle_idx = i self._unnormalised_weights[i] = npu.toscalar(self._weighting_func(observation, self._prior_particles[i,:], self)) weight_sum += self._unnormalised_weights[i] self._current_particle_idx = None if weight_sum < ParticleFilter.MIN_WEIGHT_SUM: warnings.warn('The sum of weights is less than MIN_WEIGHT_SUM') #self._unnormalised_weights[:] = 1. / self._particle_count #weight_sum = 1. self._weights = self._unnormalised_weights / weight_sum self.effective_sample_size = 1. / np.sum(np.square(self._weights)) self.log_likelihood += np.log(np.sum(self._unnormalised_weights) / self._particle_count) self._last_observation = observation self._cached_posterior_mean = None self._cached_posterior_var = None
def _initialise(self): # TODO Vectorise for i in range(self._particle_count): self._current_particle_idx = i self._prior_particles[i,:] = npu.to_ndim_1(self._state_distr.sample()) self._resampled_particles[i,:] = self._prior_particles[i,:] self._current_particle_idx = None self._unnormalised_weights[:] = np.NaN self._weights[:] = 1./self._particle_count
def predict(self, time, true_value=None): # TODO Use true_value if time < self._time: raise ValueError('Predicting the past (current time=%s, prediction time=%s)' % (self._time, time)) if time == self.time: print('Predicting the present - nothing to do') return if not self._resampled_particles_uptodate: self._resampled_particles[:] = self._prior_particles[:] row = 0 self._prior_particles = np.empty((self._particle_count, self._state_dim)) for p in self._processes: process_dim = p.process_dim if npu.is_vectorised(p.propagate): self._prior_particles[:, row:row+process_dim] = p.propagate(self._time, self._resampled_particles[:, row:row+process_dim], time) else: for i in range(self._particle_count): self._current_particle_idx = i self._prior_particles[i, row:row+process_dim] = npu.to_ndim_1(p.propagate(self._time, self._resampled_particles[i, row:row+process_dim], time)) self._current_particle_idx = None row += process_dim self._time = time self._resampled_particles_uptodate = False self._cached_prior_mean = None self._cached_prior_var = None # TODO Vectorise # TODO using fft kde - assumes all weights are equal! # TODO This only works when self._state_dim == 1 if self._predicted_observation_sampler is not None: if npu.is_vectorised(self._predicted_observation_sampler): self.predicted_observation_particles = self._predicted_observation_sampler(self._prior_particles, self) else: self.predicted_observation_particles = np.empty((self._particle_count, self._observation_dim)) for i in range(self._particle_count): self._current_particle_idx = i self.predicted_observation_particles[i,:] = self._predicted_observation_sampler(self._prior_particles[i,:], self) self._current_particle_idx = None self.predicted_observation = np.average(self.predicted_observation_particles, weights=self._weights, axis=0) self.predicted_observation_kde = sm.nonparametric.KDEUnivariate(self.predicted_observation_particles) #fft=False, weights=self._weights self.predicted_observation_kde.fit() # import matplotlib.pyplot as plt # fig = plt.figure() #x_grid = np.linspace(-4.5, 3.5, 1000) #plt.plot(x_grid, kde.evaluate(x_grid)) #plt.show() self.innovationvar = np.var(self.predicted_observation_particles) + self.predicted_observation_kde.bw * self.predicted_observation_kde.bw self._state_distr = EmpiricalDistr(particles=self._prior_particles)
def test_to_ndim_1(self): for v in [ 429., [429.], [[429.]], np.array(429.), np.array([429.]), np.array([[429.]]) ]: r = npu.to_ndim_1(v) self.assertTrue(checks.is_numpy_array(r)) self.assertEqual(np.shape(r), (1, )) npt.assert_almost_equal(r, np.array([429.])) for v in [[429., 5.], [[429., 5.]], [[[429.], [5.]]], np.array([429., 5.]), np.array([[429., 5.]]), np.array([[[429.], [5.]]])]: r = npu.to_ndim_1(v) self.assertTrue(checks.is_numpy_array(r)) self.assertEqual(np.shape(r), (2, )) npt.assert_almost_equal(r, np.array([429., 5.])) for v in [[429., 5., 2., 14.], [[429., 5., 2., 14.]], [[429., 5.], [2., 14.]], [[429.], [5.], [2.], [14.]], np.array([429., 5., 2., 14.]), np.array([[429., 5., 2., 14.]]), np.array([[429., 5.], [2., 14.]]), np.array([[[429.], [5.], [2.], [14.]]])]: r = npu.to_ndim_1(v) self.assertTrue(checks.is_numpy_array(r)) self.assertEqual(np.shape(r), (4, )) npt.assert_almost_equal(r, np.array([429., 5., 2., 14.])) npt.assert_equal(npu.to_ndim_1(None), np.array([None])) a = np.array([2., 14.]) b = npu.to_ndim_1(a, copy=False) b[1] = 42. npt.assert_almost_equal(b, np.array([2., 42.])) npt.assert_almost_equal(a, np.array([2., 42.])) a = [2., 14.] b = npu.to_ndim_1(a, copy=False) b[1] = 42. npt.assert_almost_equal(b, np.array([2., 42.])) npt.assert_almost_equal(a, np.array([2., 14.])) a = [2., 14.] b = npu.to_ndim_1(a, copy=True) b[1] = 42. npt.assert_almost_equal(b, np.array([2., 42.])) npt.assert_almost_equal(a, np.array([2., 14.]))
def multivariate_normal(mean=None, cov=None, size=None, ndim=None, random_state=None): global _rs if ndim is None: if mean is not None: ndim = np.size(mean) elif cov is not None: ndim = npu.nrow(cov) else: ndim = 1 if ndim is not None: if mean is None: mean = npu.ndim_1_of(ndim, 0.) if cov is None: cov = np.eye(ndim, ndim) mean = npu.to_ndim_1(mean) cov = npu.to_ndim_2(cov) npc.check_size(mean, ndim) npc.check_nrow(cov, ndim) npc.check_square(cov) if random_state is None: random_state = _rs() return random_state.multivariate_normal(mean, cov, size)
def multivariate_lognormal(mean_of_log=0., cov_of_log=1., size=None, ndim=None, random_state=None): global _rs if ndim is None: if mean_of_log is not None: ndim = np.size(mean_of_log) elif cov_of_log is not None: ndim = npu.nrow(cov_of_log) else: ndim = 1 if ndim is not None: if mean_of_log is None: mean_of_log = npu.ndim_1_of(ndim, 0.) if cov_of_log is None: cov_of_log = np.eye(ndim, ndim) mean_of_log = npu.to_ndim_1(mean_of_log) cov_of_log = npu.to_ndim_2(cov_of_log) npc.check_size(mean_of_log, ndim) npc.check_nrow(cov_of_log, ndim) npc.check_square(cov_of_log) if random_state is None: random_state = _rs() normal = random_state.multivariate_normal(mean_of_log, cov_of_log, size) return np.exp(normal)