def test_sgvb(self): μ, ρ1, ρ2, σ = 1.5, 0.2, 0.1, 1.5 torch.manual_seed(123) params = dict(μ=μ, ρ1=ρ1, ρ2=ρ2, σ=σ) model = AR2(input_length=100) y = model.simulate(**params) fit = sgvb(model, y, max_iters=200, quiet=True) summ = fit.summary()
def test_training_loop(self): torch.manual_seed(123) m = LocalLevelModel(input_length=20) y, z = m.simulate(γ=0., η=2., σ=1.5, ρ=0.85) self.assertEqual(20, len(y)) self.assertEqual(20, len(z)) fit = sgvb(m, y, max_iters=8, quiet=True) self.assertIsInstance(fit, MVNPosterior)
def test_plots(self): torch.manual_seed(123) m = UnivariateGaussian() N, μ0, σ0 = 100, 5., 5. y = m.simulate(N=N, μ=μ0, σ=σ0) fit = sgvb(m, y, max_iters=100, quiet=True) patch("ptvi.model.plt.show", fit.plot_marg_post("μ")) patch("ptvi.model.plt.show", fit.plot_data()) patch("ptvi.model.plt.show", fit.plot_elbos())
def test_sgvb_gpu_double(self): μ, ρ1, ρ2, σ = 1.5, 0.2, 0.1, 1.5 torch.manual_seed(123) params = dict(μ=μ, ρ1=ρ1, ρ2=ρ2, σ=σ) model = AR2(input_length=100, dtype=torch.float64, device=cuda) y = model.simulate(**params) self.assertEqual(y.dtype, torch.float64) self.assertEqual(y.device.type, cuda.type) fit = sgvb(model, y, max_iters=10, quiet=True) summ = fit.summary()
def test_smoke_test_sgvb(self): model = UnivariateGaussian() torch.manual_seed(123) N, μ0, σ0 = 100, 5., 5. y = model.simulate(N=N, μ=μ0, σ=σ0) fit = sgvb(model, y, max_iters=2**4, num_draws=1, sim_entropy=True, quiet=True) self.assertIsInstance(fit, MVNPosterior)
def test_training_loop_double_gpu(self): torch.manual_seed(123) m = LocalLevelModel(input_length=20, dtype=torch.float64, device=cuda) y, z = m.simulate(γ=0., η=2., σ=1.5, ρ=0.85) self.assertEqual(20, len(y)) self.assertEqual(y.device.type, cuda.type) self.assertEqual(y.dtype, torch.float64) self.assertEqual(20, len(z)) self.assertEqual(z.device.type, cuda.type) self.assertEqual(z.dtype, torch.float64) fit = sgvb(m, y, max_iters=8, quiet=True) self.assertIsInstance(fit, MVNPosterior)
def test_smoke_test_sgvb_gpu_if_available(self): model = UnivariateGaussian(device=cuda) torch.manual_seed(123) N, μ0, σ0 = 100, 5., 5. y = model.simulate(N=N, μ=μ0, σ=σ0) self.assertEqual(y.device.type, cuda.type) fit = sgvb(model, y, max_iters=2**4, num_draws=1, sim_entropy=True, quiet=True) self.assertIsInstance(fit, MVNPosterior)
def test_training(self): fll = FilteredLocalLevelModel(input_length=50) true_params = dict(γ=0., η=2., ρ=0.95, σ=1.5) algo_seed, data_seed = 123, 123 torch.manual_seed(data_seed) y, z = fll.simulate(**true_params) self.assertIsInstance(y, torch.Tensor) self.assertEqual(y.shape, (50, )) self.assertIsInstance(z, torch.Tensor) self.assertEqual(z.shape, (50, )) torch.manual_seed(algo_seed) fit = sgvb(fll, y, max_iters=8, quiet=True) self.assertIsInstance(fit, MVNPosterior)
def test_plots(self): torch.manual_seed(123) m = LocalLevelModel(input_length=20) y, z = m.simulate(γ=0., η=2., σ=1.5, ρ=0.85) fit = sgvb(m, y, max_iters=100, quiet=True) patch("ptvi.model.plt.show", fit.plot_sample_paths()) patch("ptvi.model.plt.show", fit.plot_pred_ci(fc_steps=2, true_y=y)) patch("ptvi.model.plt.show", fit.plot_marg_post("η")) patch("ptvi.model.plt.show", fit.plot_data()) patch("ptvi.model.plt.show", fit.plot_elbos()) patch("ptvi.model.plt.show", fit.plot_latent(true_z=z, include_data=True))
def test_plots_and_forecasts_gpu(self): μ, ρ1, ρ2, σ = 1.5, 0.2, 0.1, 1.5 torch.manual_seed(123) params = dict(μ=μ, ρ1=ρ1, ρ2=ρ2, σ=σ) model = AR2(input_length=20, dtype=torch.float64, device=cuda) y = model.simulate(**params) fit = sgvb(model, y, max_iters=10, quiet=True) patch("ptvi.model.plt.show", fit.plot_sample_paths()) patch("ptvi.model.plt.show", fit.plot_sample_paths(fc_steps=2)) patch("ptvi.model.plt.show", fit.plot_pred_ci(fc_steps=2, true_y=y)) patch("ptvi.model.plt.show", fit.plot_marg_post("σ")) patch("ptvi.model.plt.show", fit.plot_data()) patch("ptvi.model.plt.show", fit.plot_elbos())
def test_outputs(self): torch.manual_seed(123) m = LocalLevelModel(input_length=20) y, z = m.simulate(γ=0., η=2., σ=1.5, ρ=0.85) # we can't do much better than smoke test sampling methods fit = sgvb(m, y, max_iters=100, quiet=True) ss = fit.sample_paths(N=10, fc_steps=0) self.assertEqual(ss.shape, (10, 20)) self.assertEqual(0, torch.sum(torch.isnan(ss))) ss = fit.sample_paths(N=10, fc_steps=10) self.assertEqual(ss.shape, (10, 20 + 10)) self.assertEqual(0, torch.sum(torch.isnan(ss))) summ = fit.summary() self.assertTrue(all(summ.index == ["γ", "η", "σ", "ρ"]))
def conditional(ctx, datafile, t, outfile, n, a, b, c, maxiters): """Forecast stoch vol model and compute log score, conditional on T observations. Example: stochvol --data_seed=123 --algo_seed=123 conditional experiment.csv 200 SV00200.json --N=100 --a=1. --b=0. --c=0.8 """ assert t > 1 start_date, start_time = str(datetime.today()), time() click.echo(_DIVIDER) click.echo("Stochastic volatility model: conditional score estimation") click.echo(_DIVIDER) true_params = dict(a=a, b=b, c=c) algo_seed = ctx.obj["algo_seed"] data_seed = ctx.obj["data_seed"] data = pd.read_csv(datafile) click.echo(f"Started at: {start_date}") click.echo(f"Reading {t}/{len(data)} observations from {datafile}.") click.echo(f"True parameters assumed to be a={a}, b={b}, c={c}") # draw N variates from p(y_T+1 | z_T+1, a, b, c) click.echo( f"Drawing {n} variates from p(y_T+1, z_T+1 | z_T, a, b, c) with " f"data_seed={data_seed}" ) torch.manual_seed(data_seed) a, b, c = map(torch.tensor, (a, b, c)) z_next = b + c * data["z"][t - 1] + Normal(0, 1).sample((n,)) y_next = Normal(0, torch.exp(a) * torch.exp(z_next / 2)).sample() y_next_list = y_next.cpu().numpy().squeeze().tolist() # for saving # perform inference y = data["y"][:t] model = SVModel(input_length=t) click.echo(repr(model)) torch.manual_seed(algo_seed) fit = sgvb(model, y, max_iters=maxiters) click.echo("Inference summary:") click.echo(fit.summary(true=true_params)) click.echo(f"Generating {n} forecast draws from q...") # filter to get p(z_T | y, θ) then project z_{T+1}, z_{T+2}, ... forecast, fc_draws = fit.forecast(steps=1) fc_draws_list = fc_draws.squeeze().tolist() dens = forecast.pdf(y_next) scores = np.log(dens[dens > 0]) score = np.mean(scores) score_se = np.std(scores) click.echo(f"Forecast log score = {score:.4f} nats (sd = {score_se:.4f}, n = {n})") click.echo(f"Writing results to {outfile} in JSON format.") y_list = data["y"][:t].tolist() z_list = data["z"][:t].tolist() summary = { "method": "VSMC", "algo_seed": algo_seed, "data_seed": data_seed, "datafile": datafile, "t": t, "outfile": outfile, "fc_draws": fc_draws_list, "score": score, "score_se": score_se, "n": n, "y_next": y_next_list, "start_date": start_date, "elapsed": time() - start_time, "true_params": true_params, "full_length": len(data), "max_iters": maxiters, "inference_results": str(fit.summary()), "y": y_list, "z": z_list, } with open(outfile, "w", encoding="utf8") as ofilep: json.dump(summary, ofilep, indent=4, sort_keys=True) click.echo(f"Done in {time() - start_time:.1f} seconds.") click.echo(_DIVIDER)
def test_training_loop(self): m = StochVolModel(input_length=50) y, b = m.simulate(λ=0.5, σ=0.5, φ=0.95) fit = sgvb(m, y, max_iters=5, quiet=True) self.assertIsInstance(fit, MVNPosterior)