def rejuvenate(self, t, xp, w): ancestors = rs.resampling(self.resampling_scheme, w.W) x = xp[ancestors] for _ in tqdm(range(self.k), disable= not self.verbose): x = self.model.MCMC(t, x) self.logging.compact_particle_history.add(lw=np.zeros(len(x)), ancestor=ancestors, last_particles=x) ut.memory_tracker_add(t) return x, rs.Weights(lw=np.zeros(len(xp)))
def resample_move(self): self.rs_flag = self.aux.ESS < self.N * self.ESSrmin if self.rs_flag: # if resampling self.A = rs.resampling(self.resampling, self.aux.W) self.Xp = self.X[self.A] self.reset_weights() self.X = self.fk.M(self.t, self.Xp) elif not self.fk.mutate_only_after_resampling: self.A = np.arange(self.N) self.Xp = self.X self.X = self.fk.M(self.t, self.Xp)
def array_resampling(resampling_scheme: str, W: np.ndarray, M: int) -> Tuple: # tested """Resampling scheme for multidimensional numpy weight arrays. :param W: a numpy array of weights, summing to 1. :returns: a tuple of numpy arrays indicating the chosen elements """ if M == 1 and resampling_scheme == 'multinomial': return multinomial_sampling(W) W_raveled = np.ravel(W) chosen_ravel_idx = rs.resampling(resampling_scheme, W_raveled, M) return np.unravel_index(chosen_ravel_idx, W.shape)
def resample_move(self): self.rs_flag = self.fk.time_to_resample(self) if self.rs_flag: # if resampling self.A = rs.resampling(self.resampling, self.aux.W, M=self.N) # we always resample self.N particles, even if smc.X has a # different size (example: waste-free) self.Xp = self.X[self.A] self.reset_weights() else: self.A = np.arange(self.N) self.Xp = self.X self.X = self.fk.M(self.t, self.Xp)
def M(self, t: int, xp: np.ndarray, w: rs.Weights) -> typing.Tuple[np.ndarray, rs.Weights]: resampling_needed = ut.ESS_ratio(w) < self.ESSrmin self.logging.punctual_logging[ t] = GenericParticleFilterPunctualLogging() self.logging.punctual_logging[t].ESS_ratio = ut.ESS_ratio(w) self.logging.punctual_logging[t].ESS = w.ESS self.logging.punctual_logging[t].resampled = resampling_needed if resampling_needed: ancestors = rs.resampling(self.resampling_mode, w.W) xp = xp[ancestors] w = rs.Weights(lw=np.zeros(len(xp))) self.logging.compact_particle_history.add(lw=w.lw, ancestor=ancestors, last_particles=xp) x = self.fk_model.M(t, xp) return x, w
def chosen_outer_ancestors(self) -> np.ndarray: if self.outer_resampling_needed: return rs.resampling(scheme=self.outer_resampling_mode, W=self.w.outer_weights.W) else: return np.arange(self.M2)
""" TV distance between two discrete distributions. x, y: the weights """ return 0.5 * sum(abs(x - y)) results = {key: np.zeros((ntrials, len(taus))) for key in rs_schemes} for i in range(ntrials): x = stats.norm.rvs(size=N) for j, tau in enumerate(taus): lw = -.5 * tau * (bias - x)**2 W = rs.exp_and_normalise(lw) for scheme in rs_schemes: A = rs.resampling(scheme, W) counts = np.bincount(A, minlength=N) # counts start at 0 results[scheme][i, j] = tv_distance(W, counts / N) # PLOTS # ===== savefigs = True plt.style.use('ggplot') sb.set_palette(sb.dark_palette("lightgray", n_colors=4, reverse=True)) # Actual figure plt.figure() for k, scheme in enumerate(rs_schemes): plt.plot(taus, np.mean(results[scheme], axis=0), label=scheme, linewidth=3) plt.legend()