Beispiel #1
0
    def do_elbo_test(self, reparameterized, n_steps):
        pyro.clear_param_store()

        def model():
            mu_latent = pyro.sample("mu_latent", dist.normal, self.mu0,
                                    torch.pow(self.lam0, -0.5))
            pyro.observe("obs", dist.normal, self.data, mu_latent,
                         torch.pow(self.lam, -0.5))
            return mu_latent

        def guide():
            mu_q = pyro.param(
                "mu_q",
                Variable(self.analytic_mu_n.data + 0.134 * torch.ones(2),
                         requires_grad=True))
            log_sig_q = pyro.param(
                "log_sig_q",
                Variable(self.analytic_log_sig_n.data - 0.14 * torch.ones(2),
                         requires_grad=True))
            sig_q = torch.exp(log_sig_q)
            normal = dist.normal if reparameterized else fakes.nonreparameterized_normal
            pyro.sample("mu_latent", normal, mu_q, sig_q)

        adam = optim.Adam({"lr": .001})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(n_steps):
            svi.step()

            mu_error = param_mse("mu_q", self.analytic_mu_n)
            log_sig_error = param_mse("log_sig_q", self.analytic_log_sig_n)

        assert_equal(0.0, mu_error, prec=0.05)
        assert_equal(0.0, log_sig_error, prec=0.05)
Beispiel #2
0
    def test_elbo_with_transformed_distribution(self):
        pyro.clear_param_store()

        def model():
            zero = Variable(torch.zeros(1))
            one = Variable(torch.ones(1))
            mu_latent = pyro.sample("mu_latent", dist.normal, self.mu0,
                                    torch.pow(self.tau0, -0.5))
            bijector = AffineExp(torch.pow(self.tau, -0.5), mu_latent)
            x_dist = TransformedDistribution(dist.normal, bijector)
            pyro.observe("obs0", x_dist, self.data[0], zero, one)
            pyro.observe("obs1", x_dist, self.data[1], zero, one)
            return mu_latent

        def guide():
            mu_q_log = pyro.param(
                "mu_q_log",
                Variable(self.log_mu_n.data + 0.17, requires_grad=True))
            tau_q_log = pyro.param(
                "tau_q_log",
                Variable(self.log_tau_n.data - 0.143, requires_grad=True))
            mu_q, tau_q = torch.exp(mu_q_log), torch.exp(tau_q_log)
            pyro.sample("mu_latent", dist.normal, mu_q, torch.pow(tau_q, -0.5))

        adam = optim.Adam({"lr": .0005, "betas": (0.96, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(12001):
            svi.step()

        mu_error = param_abs_error("mu_q_log", self.log_mu_n)
        tau_error = param_abs_error("tau_q_log", self.log_tau_n)
        assert_equal(0.0, mu_error, prec=0.05)
        assert_equal(0.0, tau_error, prec=0.05)
Beispiel #3
0
    def test_elbo_nonreparameterized(self):
        pyro.clear_param_store()

        def model():
            lambda_latent = pyro.sample("lambda_latent", dist.gamma, self.alpha0, self.beta0)
            pyro.observe("obs0", dist.exponential, self.data[0], lambda_latent)
            pyro.observe("obs1", dist.exponential, self.data[1], lambda_latent)
            return lambda_latent

        def guide():
            alpha_q_log = pyro.param(
                "alpha_q_log",
                Variable(self.log_alpha_n.data + 0.17, requires_grad=True))
            beta_q_log = pyro.param(
                "beta_q_log",
                Variable(self.log_beta_n.data - 0.143, requires_grad=True))
            alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
            pyro.sample("lambda_latent", dist.gamma, alpha_q, beta_q)

        adam = optim.Adam({"lr": .0003, "betas": (0.97, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(10001):
            svi.step()

        alpha_error = param_abs_error("alpha_q_log", self.log_alpha_n)
        beta_error = param_abs_error("beta_q_log", self.log_beta_n)
        self.assertEqual(0.0, alpha_error, prec=0.08)
        self.assertEqual(0.0, beta_error, prec=0.08)
Beispiel #4
0
    def do_elbo_test(self, reparameterized, n_steps):
        pyro.clear_param_store()
        pt_guide = LogNormalNormalGuide(self.log_mu_n.data + 0.17,
                                        self.log_tau_n.data - 0.143)

        def model():
            mu_latent = pyro.sample("mu_latent", dist.normal, self.mu0,
                                    torch.pow(self.tau0, -0.5))
            sigma = torch.pow(self.tau, -0.5)
            pyro.observe("obs0", dist.lognormal, self.data[0], mu_latent,
                         sigma)
            pyro.observe("obs1", dist.lognormal, self.data[1], mu_latent,
                         sigma)
            return mu_latent

        def guide():
            pyro.module("mymodule", pt_guide)
            mu_q, tau_q = torch.exp(pt_guide.mu_q_log), torch.exp(
                pt_guide.tau_q_log)
            sigma = torch.pow(tau_q, -0.5)
            normal = dist.normal if reparameterized else fakes.nonreparameterized_normal
            pyro.sample("mu_latent", normal, mu_q, sigma)

        adam = optim.Adam({"lr": .0005, "betas": (0.96, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(n_steps):
            svi.step()

        mu_error = param_abs_error("mymodule$$$mu_q_log", self.log_mu_n)
        tau_error = param_abs_error("mymodule$$$tau_q_log", self.log_tau_n)
        assert_equal(0.0, mu_error, prec=0.07)
        assert_equal(0.0, tau_error, prec=0.07)
Beispiel #5
0
    def do_elbo_test(self, reparameterized, n_steps):
        pyro.clear_param_store()

        def model():
            mu_latent = pyro.sample("mu_latent", dist.normal,
                                    self.mu0, torch.pow(self.lam0, -0.5))
            pyro.map_data("aaa", self.data, lambda i,
                          x: pyro.observe(
                              "obs_%d" % i, dist.normal,
                              x, mu_latent, torch.pow(self.lam, -0.5)),
                          batch_size=self.batch_size)
            return mu_latent

        def guide():
            mu_q = pyro.param("mu_q", Variable(self.analytic_mu_n.data + 0.134 * torch.ones(2),
                                               requires_grad=True))
            log_sig_q = pyro.param("log_sig_q", Variable(
                                   self.analytic_log_sig_n.data - 0.14 * torch.ones(2),
                                   requires_grad=True))
            sig_q = torch.exp(log_sig_q)
            pyro.sample("mu_latent", dist.Normal(mu_q, sig_q, reparameterized=reparameterized))
            pyro.map_data("aaa", self.data, lambda i, x: None,
                          batch_size=self.batch_size)

        adam = optim.Adam({"lr": .001})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(n_steps):
            svi.step()

            mu_error = param_mse("mu_q", self.analytic_mu_n)
            log_sig_error = param_mse("log_sig_q", self.analytic_log_sig_n)

        self.assertEqual(0.0, mu_error, prec=0.05)
        self.assertEqual(0.0, log_sig_error, prec=0.05)
Beispiel #6
0
    def do_inference(self):
        pyro.clear_param_store()
        pyro.util.set_rng_seed(0)
        t0 = time.time()

        adam = optim.Adam({"lr": 0.005, "betas": (0.95, 0.999)})
        svi = SVI(self.model,
                  self.guide,
                  adam,
                  loss="ELBO",
                  trace_graph=False,
                  analytic_kl=False)
        losses = []

        for k in range(100001):
            loss = svi.step(data)
            losses.append(loss)

            if k % 20 == 0 and k > 20:
                t_k = time.time()
                print("[epoch %05d] mean elbo: %.5f     elapsed time: %.4f" %
                      (k, -np.mean(losses[-100:]), t_k - t0))
                print("[W] %.2f %.2f %.2f %.2f   %.2f %.2f %.2f %.2f   %.2f %.2f %.2f %.2f" % (\
                      self.get_w_stats("top") + self.get_w_stats("mid") + self.get_w_stats("bottom") ))
                print("[Z] %.2f %.2f %.2f %.2f   %.2f %.2f %.2f %.2f   %.2f %.2f %.2f %.2f" % (\
                      self.get_z_stats("top") + self.get_z_stats("mid") + self.get_z_stats("bottom") ))

        return results
Beispiel #7
0
    def test(self):
        pyro.clear_param_store()
        pyro.util.set_rng_seed(1)
        print("*** multinomial dirichlet ***   [reparameterized = %s]" % self.use_rep)
        print("alpha0: ", self.alpha0.data.numpy())
        print("alphap: ", np.exp(self.log_alpha_n.data.numpy()))

        def model():
            p_latent = pyro.sample("p_latent", dist.dirichlet, self.alpha0)
            with pyro.iarange('observe_data'):
                pyro.sample('obs', dist.multinomial, p_latent, 1, obs=self.data)

        def guide():
            alpha_q_log = pyro.param("alpha_q_log", Variable(self.log_alpha_n.data +
                                                             self.noise*torch.randn(self.N),
                                                             requires_grad=True))
            alpha_q = torch.exp(alpha_q_log)
            if self.use_rep:
                pyro.sample("p_latent", dist.dirichlet, alpha_q)
            else:
                pyro.sample("p_latent", NonRepDirichlet(alpha_q))

        if self.use_rep:
            adam = optim.Adam({"lr": .0003, "betas": (0.95, 0.999)})
        else:
            adam = optim.Adam({"lr": .0003, "betas": (0.95, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(25001):
            svi.step()

            if k%500==0:
                alpha_error = param_abs_error("alpha_q_log", self.log_alpha_n)
                min_alpha = np.min(pyro.param("alpha_q_log").data.numpy())
                print("[%04d]: %.4f %.4f" % (k, alpha_error, min_alpha))
Beispiel #8
0
    def do_elbo_test(self, reparameterized, n_steps):
        pyro.clear_param_store()
        pt_guide = LogNormalNormalGuide(self.log_mu_n.data + 0.17,
                                        self.log_tau_n.data - 0.143)

        def model():
            mu_latent = pyro.sample("mu_latent", dist.normal,
                                    self.mu0, torch.pow(self.tau0, -0.5))
            sigma = torch.pow(self.tau, -0.5)
            pyro.observe("obs0", dist.lognormal, self.data[0], mu_latent, sigma)
            pyro.observe("obs1", dist.lognormal, self.data[1], mu_latent, sigma)
            return mu_latent

        def guide():
            pyro.module("mymodule", pt_guide)
            mu_q, tau_q = torch.exp(pt_guide.mu_q_log), torch.exp(pt_guide.tau_q_log)
            sigma = torch.pow(tau_q, -0.5)
            pyro.sample("mu_latent", dist.Normal(mu_q, sigma, reparameterized=reparameterized))

        adam = optim.Adam({"lr": .0005, "betas": (0.96, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(n_steps):
            svi.step()

        mu_error = param_abs_error("mymodule$$$mu_q_log", self.log_mu_n)
        tau_error = param_abs_error("mymodule$$$tau_q_log", self.log_tau_n)
        self.assertEqual(0.0, mu_error, prec=0.07)
        self.assertEqual(0.0, tau_error, prec=0.07)
Beispiel #9
0
    def test_elbo_nonreparameterized(self):
        pyro.clear_param_store()

        def model():
            p_latent = pyro.sample("p_latent", dist.beta, self.alpha0, self.beta0)
            pyro.map_data("aaa",
                          self.data, lambda i, x: pyro.observe(
                              "obs_{}".format(i), dist.bernoulli, x, p_latent),
                          batch_size=self.batch_size)
            return p_latent

        def guide():
            alpha_q_log = pyro.param("alpha_q_log",
                                     Variable(self.log_alpha_n.data + 0.17, requires_grad=True))
            beta_q_log = pyro.param("beta_q_log",
                                    Variable(self.log_beta_n.data - 0.143, requires_grad=True))
            alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
            pyro.sample("p_latent", dist.beta, alpha_q, beta_q)
            pyro.map_data("aaa", self.data, lambda i, x: None, batch_size=self.batch_size)

        adam = optim.Adam({"lr": .001, "betas": (0.97, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(10001):
            svi.step()

            alpha_error = param_abs_error("alpha_q_log", self.log_alpha_n)
            beta_error = param_abs_error("beta_q_log", self.log_beta_n)

        self.assertEqual(0.0, alpha_error, prec=0.08)
        self.assertEqual(0.0, beta_error, prec=0.08)
Beispiel #10
0
    def do_elbo_test(self, reparameterized, n_steps):
        pyro.clear_param_store()
        beta = dist.beta if reparameterized else fakes.nonreparameterized_beta

        def model():
            p_latent = pyro.sample("p_latent", beta, self.alpha0, self.beta0)
            pyro.observe("obs", dist.bernoulli, self.data, p_latent)
            return p_latent

        def guide():
            alpha_q_log = pyro.param(
                "alpha_q_log",
                Variable(self.log_alpha_n.data + 0.17, requires_grad=True))
            beta_q_log = pyro.param(
                "beta_q_log",
                Variable(self.log_beta_n.data - 0.143, requires_grad=True))
            alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
            pyro.sample("p_latent", beta, alpha_q, beta_q)

        adam = optim.Adam({"lr": .001, "betas": (0.97, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(n_steps):
            svi.step()

        alpha_error = param_abs_error("alpha_q_log", self.log_alpha_n)
        beta_error = param_abs_error("beta_q_log", self.log_beta_n)
        assert_equal(0.0, alpha_error, prec=0.08)
        assert_equal(0.0, beta_error, prec=0.08)
Beispiel #11
0
    def test_extra_samples(self):
        pyro.clear_param_store()

        adam = optim.Adam({"lr": .001})
        svi = SVI(self.model, self.guide, adam, loss="ELBO", trace_graph=False)

        with pytest.warns(Warning):
            svi.step()
Beispiel #12
0
    def test_duplicate_obs_name(self):
        pyro.clear_param_store()

        adam = optim.Adam({"lr": .001})
        svi = SVI(self.duplicate_obs, self.guide, adam, loss="ELBO", trace_graph=False)

        with pytest.raises(RuntimeError):
            svi.step()
Beispiel #13
0
    def test_extra_samples(self):
        pyro.clear_param_store()

        adam = optim.Adam({"lr": .001})
        svi = SVI(self.model, self.guide, adam, loss="ELBO", trace_graph=False)

        with pytest.warns(Warning):
            svi.step()
Beispiel #14
0
    def do_test_fixedness(self, fixed_tags):
        pyro.clear_param_store()

        def model():
            alpha_p_log = pyro.param("alpha_p_log",
                                     Variable(self.alpha_p_log_0.clone(),
                                              requires_grad=True),
                                     tags="model")
            beta_p_log = pyro.param("beta_p_log",
                                    Variable(self.beta_p_log_0.clone(),
                                             requires_grad=True),
                                    tags="model")
            alpha_p, beta_p = torch.exp(alpha_p_log), torch.exp(beta_p_log)
            lambda_latent = pyro.sample("lambda_latent", dist.gamma, alpha_p,
                                        beta_p)
            pyro.observe("obs", dist.poisson, self.data, lambda_latent)
            return lambda_latent

        def guide():
            alpha_q_log = pyro.param("alpha_q_log",
                                     Variable(self.alpha_q_log_0.clone(),
                                              requires_grad=True),
                                     tags="guide")
            beta_q_log = pyro.param("beta_q_log",
                                    Variable(self.beta_q_log_0.clone(),
                                             requires_grad=True),
                                    tags="guide")
            alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
            pyro.sample("lambda_latent", dist.gamma, alpha_q, beta_q)

        def per_param_args(module_name, param_name, tags):
            if tags in fixed_tags:
                return {'lr': 0.0}
            else:
                return {'lr': 0.0}

        adam = optim.Adam(per_param_args)
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for _ in range(3):
            svi.step()

        model_unchanged = (torch.equal(pyro.param("alpha_p_log").data, self.alpha_p_log_0)) and\
                          (torch.equal(pyro.param("beta_p_log").data, self.beta_p_log_0))
        guide_unchanged = (torch.equal(pyro.param("alpha_q_log").data, self.alpha_q_log_0)) and\
                          (torch.equal(pyro.param("beta_q_log").data, self.beta_q_log_0))
        model_changed = not model_unchanged
        guide_changed = not guide_unchanged
        error = ('model' in fixed_tags
                 and model_changed) or ('guide' in fixed_tags
                                        and guide_changed)
        return (not error)
Beispiel #15
0
    def do_test_fixedness(self, fixed_tags):
        pyro.clear_param_store()

        def model():
            alpha_p_log = pyro.param(
                "alpha_p_log", Variable(
                    self.alpha_p_log_0.clone(), requires_grad=True), tags="model")
            beta_p_log = pyro.param(
                "beta_p_log", Variable(
                    self.beta_p_log_0.clone(), requires_grad=True), tags="model")
            alpha_p, beta_p = torch.exp(alpha_p_log), torch.exp(beta_p_log)
            lambda_latent = pyro.sample("lambda_latent", dist.gamma, alpha_p, beta_p)
            pyro.observe("obs", dist.poisson, self.data, lambda_latent)
            return lambda_latent

        def guide():
            alpha_q_log = pyro.param(
                "alpha_q_log", Variable(
                    self.alpha_q_log_0.clone(), requires_grad=True), tags="guide")
            beta_q_log = pyro.param(
                "beta_q_log", Variable(
                    self.beta_q_log_0.clone(), requires_grad=True), tags="guide")
            alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
            pyro.sample("lambda_latent", dist.gamma, alpha_q, beta_q)

        def per_param_args(module_name, param_name, tags):
            if tags in fixed_tags:
                    return {'lr': 0.0}
            else:
                return {'lr': 0.0}

        adam = optim.Adam(per_param_args)
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for _ in range(3):
            svi.step()

        model_unchanged = (torch.equal(pyro.param("alpha_p_log").data, self.alpha_p_log_0)) and\
                          (torch.equal(pyro.param("beta_p_log").data, self.beta_p_log_0))
        guide_unchanged = (torch.equal(pyro.param("alpha_q_log").data, self.alpha_q_log_0)) and\
                          (torch.equal(pyro.param("beta_q_log").data, self.beta_q_log_0))
        model_changed = not model_unchanged
        guide_changed = not guide_unchanged
        error = ('model' in fixed_tags and model_changed) or ('guide' in fixed_tags and guide_changed)
        return (not error)
Beispiel #16
0
    def test(self):
        pyro.clear_param_store()
        pyro.util.set_rng_seed(5)
        print("*** exponential gamma ***   [reparameterized = %s]" % self.use_rep)
        print("        log_alpha log_beta   mean_error  var_error")

        def model():
            lambda_latent = pyro.sample("lambda_latent", dist.gamma, self.alpha0, self.beta0)
            with pyro.iarange('observe_data'):
                pyro.observe('obs', dist.exponential, self.data, lambda_latent)
            return lambda_latent

        def guide():
            alpha_q_log = pyro.param(
                "alpha_q_log", Variable(self.log_alpha_n.data + noise(), requires_grad=True))
            beta_q_log = pyro.param(
                "beta_q_log", Variable(self.log_beta_n.data - noise(), requires_grad=True))
            alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
            if self.use_rep:
                pyro.sample("lambda_latent", dist.gamma, alpha_q, beta_q)
            else:
                pyro.sample("lambda_latent", NonRepGamma(alpha_q, beta_q))

        if self.use_rep:
            adam = optim.Adam({"lr": .0005, "betas": (0.95, 0.999)})
        else:
            adam = optim.Adam({"lr": .0005, "betas": (0.97, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(15001):
            svi.step()

            if k % 500==0:
                alpha_error = param_abs_error("alpha_q_log", self.log_alpha_n)
                beta_error = param_abs_error("beta_q_log", self.log_beta_n)
                mean_error = gamma_mean_error("alpha_q_log", "beta_q_log",
                                              self.log_alpha_n, self.log_beta_n)
                var_error = gamma_var_error("alpha_q_log", "beta_q_log",
                                              self.log_alpha_n, self.log_beta_n)
                print("[%04d]: %.4f    %.4f     %.4f      %.4f" % (k, alpha_error, beta_error,
                                                                   mean_error, var_error))
Beispiel #17
0
    def test_elbo_with_transformed_distribution(self):
        pyro.clear_param_store()

        def model():
            zero = Variable(torch.zeros(1))
            one = Variable(torch.ones(1))
            mu_latent = pyro.sample("mu_latent", dist.normal,
                                    self.mu0, torch.pow(self.tau0, -0.5))
            bijector = AffineExp(torch.pow(self.tau, -0.5), mu_latent)
            x_dist = TransformedDistribution(dist.normal, bijector)
            pyro.observe("obs0", x_dist, self.data[0], zero, one)
            pyro.observe("obs1", x_dist, self.data[1], zero, one)
            return mu_latent

        def guide():
            mu_q_log = pyro.param(
                "mu_q_log",
                Variable(
                    self.log_mu_n.data +
                    0.17,
                    requires_grad=True))
            tau_q_log = pyro.param("tau_q_log", Variable(self.log_tau_n.data - 0.143,
                                                         requires_grad=True))
            mu_q, tau_q = torch.exp(mu_q_log), torch.exp(tau_q_log)
            pyro.sample("mu_latent", dist.normal, mu_q, torch.pow(tau_q, -0.5))

        adam = optim.Adam({"lr": .0005, "betas": (0.96, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(12001):
            svi.step()

        mu_error = param_abs_error("mu_q_log", self.log_mu_n)
        tau_error = param_abs_error("tau_q_log", self.log_tau_n)
        self.assertEqual(0.0, mu_error, prec=0.05)
        self.assertEqual(0.0, tau_error, prec=0.05)
Beispiel #18
0
    def test(self, use_rep=False, lr=0.001, beta1=0.90, beta2=0.999,
             verbose=False, seed=1, noise_epsilon=0.6, report_frequencies=[5000, 10000, 15000]):
        pyro.clear_param_store()
        pyro.util.set_rng_seed(seed)
        if verbose:
            print("*** poisson gamma ***   [reparameterized = %s]" % use_rep)

        def guide(obs=None):
            alpha_q_log = pyro.param("alpha_q_log",
                Variable(self.log_alpha_n.data + noise(eps=noise_epsilon), requires_grad=True))
            beta_q_log = pyro.param("beta_q_log",
                Variable(self.log_beta_n.data - noise(eps=noise_epsilon), requires_grad=True))
            alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
            if use_rep:
                pyro.sample("lambda_latent", dist.gamma, alpha_q, beta_q)
            else:
                pyro.sample("lambda_latent", NonRepGamma(alpha_q, beta_q))

        adam = optim.Adam({"lr": lr, "betas": (beta1, beta2)})
        svi = SVI(self.model, guide, adam, loss="ELBO", trace_graph=False)
        svi_eval = SVI(self.model, guide, adam, loss="ELBO", trace_graph=False, num_particles=500)
        results = []

        for k in range(report_frequencies[-1]):
            svi.step(self.data)

            if (k+1) in report_frequencies:
                alpha_error = param_abs_error("alpha_q_log", self.log_alpha_n)
                beta_error = param_abs_error("beta_q_log", self.log_beta_n)
                mean_error = gamma_mean_error("alpha_q_log", "beta_q_log",
                                              self.log_alpha_n, self.log_beta_n)
                var_error = gamma_var_error("alpha_q_log", "beta_q_log",
                                              self.log_alpha_n, self.log_beta_n)
                if verbose:
                    print("[%04d]: %.4f    %.4f     %.4f      %.4f" % (k, alpha_error, beta_error,
                                                                       mean_error, var_error))

                elbo = -svi_eval.evaluate_loss(self.data)
                results.append((elbo, alpha_error, beta_error))

        return results