def test_Inference(self): # ===== Distributions ===== # dist = Normal(0., 1.) mvn = Independent(Normal(torch.zeros(2), torch.ones(2)), 1) # ===== Define model ===== # linear = AffineProcess((f, g), (1., 0.25), dist, dist) model = LinearGaussianObservations(linear, scale=0.1) mv_linear = AffineProcess((fmvn, gmvn), (0.5, 0.25), mvn, mvn) mvnmodel = LinearGaussianObservations(mv_linear, torch.eye(2), scale=0.1) # ===== Test for multiple models ===== # priors = Exponential(1.), LogNormal(0., 1.) hidden1d = AffineProcess((f, g), priors, dist, dist) oned = LinearGaussianObservations(hidden1d, 1., scale=0.1) hidden2d = AffineProcess((fmvn, gmvn), priors, mvn, mvn) twod = LinearGaussianObservations(hidden2d, torch.eye(2), scale=0.1 * torch.ones(2)) particles = 1000 # ====== Run inference ===== # for trumod, model in [(model, oned), (mvnmodel, twod)]: x, y = trumod.sample_path(1000) algs = [ (NESS, {'particles': particles, 'filter_': APF(model.copy(), 200)}), (NESS, {'particles': particles, 'filter_': UKF(model.copy())}), (SMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)}), (SMC2FW, {'particles': particles, 'filter_': APF(model.copy(), 200)}), (NESSMC2, {'particles': particles, 'filter_': APF(model.copy(), 200)}) ] for alg, props in algs: alg = alg(**props).initialize() alg = alg.fit(y) w = normalize(alg._w_rec if hasattr(alg, '_w_rec') else torch.ones(particles)) tru_params = trumod.hidden.theta._cont + trumod.observable.theta._cont inf_params = alg.filter.ssm.hidden.theta._cont + alg.filter.ssm.observable.theta._cont for trup, p in zip(tru_params, inf_params): if not p.trainable: continue kde = p.get_kde(weights=w) transed = p.bijection.inv(trup) densval = kde.logpdf(transed.numpy().reshape(-1, 1)) priorval = p.distr.log_prob(trup) assert (densval > priorval.numpy()).all()
def test_SequentialAlgorithms(self): particles = 1000 for true_model, model in [(make_model(False), make_model(True)), (make_model(False, 2), make_model(True, 2))]: x, y = true_model.sample_path(1000) algs = [ (NESS, { "particles": particles, "filter_": APF(model.copy(), 200) }), (NESS, { "particles": particles, "filter_": UKF(model.copy()) }), (SMC2, { "particles": particles, "filter_": APF(model.copy(), 125) }), (SMC2FW, { "particles": particles, "filter_": APF(model.copy(), 200) }), (NESSMC2, { "particles": particles, "filter_": APF(model.copy(), 200) }), ] for alg_type, props in algs: alg = alg_type(**props) state = alg.fit(y) w = state.normalized_weights() zipped = zip(true_model.hidden.functional_parameters(), alg.filter.ssm.parameters_and_priors()) for true_p, (p, prior) in zipped: kde = gaussian_kde( prior.get_unconstrained(p).squeeze().numpy(), weights=w.numpy()) inverse_true_value = prior.bijection.inv(true_p) posterior_log_prob = kde.logpdf( inverse_true_value.numpy().reshape(-1, 1)) prior_log_prob = prior.unconstrained_prior.log_prob( inverse_true_value) assert (posterior_log_prob > prior_log_prob.numpy()).all()
def test_StateDict(self): # ===== Define model ===== # norm = Normal(0., 1.) linear = AffineProcess((f, g), (1., 1.), norm, norm) linearobs = AffineObservations((fo, go), (1., 1.), norm) model = StateSpaceModel(linear, linearobs) # ===== Define filter ===== # filt = SISR(model, 100).initialize() # ===== Get statedict ===== # sd = filt.state_dict() # ===== Verify that we don't save multiple instances ===== # assert '_model' in sd and '_model' not in sd['_proposal'] newfilt = SISR(model, 1000).load_state_dict(sd) assert newfilt._w_old is not None and newfilt.ssm is newfilt._proposal._model # ===== Test same with UKF and verify that we save UT ===== # ukf = UKF(model).initialize() sd = ukf.state_dict() assert '_model' in sd and '_model' not in sd['_ut']
def test_SDE(self): def f(x, a, s): return -a * x def g(x, a, s): return s em = AffineEulerMaruyama((f, g), (0.02, 0.15), Normal(0., 1.), Normal(0., 1.), dt=1e-2, num_steps=10) model = LinearGaussianObservations(em, scale=1e-3) x, y = model.sample_path(500) with self.assertRaises(NotImplementedError): SISR(model, 200) for filt in [SISR(model, 500, proposal=Bootstrap()), UKF(model)]: filt = filt.initialize().longfilter(y) means = filt.filtermeans if isinstance(filt, UKF): means = means[:, 0] self.assertLess(torch.std(x - means), 5e-2)
def test_SDE(self): def f(x, a, s): return -a * x def g(x, a, s): return s dt = 1e-2 norm = DistributionWrapper(Normal, loc=0.0, scale=sqrt(dt)) em = AffineEulerMaruyama((f, g), (0.02, 0.15), norm, norm, dt=1e-2, num_steps=10) model = LinearGaussianObservations(em, scale=1e-3) x, y = model.sample_path(500) for filt in [SISR(model, 500, proposal=prop.Bootstrap()), UKF(model)]: result = filt.longfilter(y) means = result.filter_means if isinstance(filt, UKF): means = means[:, 0] self.assertLess(torch.std(x - means), 5e-2)