示例#1
0
文件: test_nuts.py 项目: zyxue/pyro
def test_gaussian_mixture_model(jit):
    K, N = 3, 1000

    def gmm(data):
        mix_proportions = pyro.sample("phi", dist.Dirichlet(torch.ones(K)))
        with pyro.plate("num_clusters", K):
            cluster_means = pyro.sample(
                "cluster_means", dist.Normal(torch.arange(float(K)), 1.))
        with pyro.plate("data", data.shape[0]):
            assignments = pyro.sample("assignments",
                                      dist.Categorical(mix_proportions))
            pyro.sample("obs",
                        dist.Normal(cluster_means[assignments], 1.),
                        obs=data)
        return cluster_means

    true_cluster_means = torch.tensor([1., 5., 10.])
    true_mix_proportions = torch.tensor([0.1, 0.3, 0.6])
    cluster_assignments = dist.Categorical(true_mix_proportions).sample(
        torch.Size((N, )))
    data = dist.Normal(true_cluster_means[cluster_assignments], 1.0).sample()
    nuts_kernel = NUTS(gmm,
                       max_plate_nesting=1,
                       jit_compile=jit,
                       ignore_jit_warnings=True)
    mcmc_run = MCMC(nuts_kernel, num_samples=300, warmup_steps=100).run(data)
    posterior = mcmc_run.marginal(["phi", "cluster_means"]).empirical
    assert_equal(posterior["phi"].mean.sort()[0],
                 true_mix_proportions,
                 prec=0.05)
    assert_equal(posterior["cluster_means"].mean.sort()[0],
                 true_cluster_means,
                 prec=0.2)
示例#2
0
def run_pyro_nuts(data, pfile, n_samples, params):

    # import model, transformed_data functions (if exists) from pyro module

    model = import_by_string(pfile + ".model")
    assert model is not None, "model couldn't be imported"
    transformed_data = import_by_string(pfile + ".transformed_data")
    if transformed_data is not None:
        transformed_data(data)

    nuts_kernel = NUTS(model, step_size=0.0855)
    mcmc_run = MCMC(nuts_kernel,
                    num_samples=n_samples,
                    warmup_steps=int(n_samples / 2))
    posteriors = {k: [] for k in params}

    for trace, _ in mcmc_run._traces(data, params):
        for k in posteriors:
            posteriors[k].append(trace.nodes[k]['value'])

    #posteriors["sigma"] = list(map(torch.exp, posteriors["log_sigma"]))
    #del posteriors["log_sigma"]

    posterior_means = {
        k: torch.mean(torch.stack(posteriors[k]), 0)
        for k in posteriors
    }
    bb()
    return posterior_means
示例#3
0
def test_hmc(model_class, X, y, kernel, likelihood):
    if model_class is SparseGPRegression or model_class is VariationalSparseGP:
        gp = model_class(X, y, kernel, X, likelihood)
    else:
        gp = model_class(X, y, kernel, likelihood)

    kernel.set_prior("variance",
                     dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
    kernel.set_prior("lengthscale",
                     dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))

    hmc_kernel = HMC(gp.model, step_size=1)
    mcmc_run = MCMC(hmc_kernel, num_samples=10)

    post_trace = defaultdict(list)
    for trace, _ in mcmc_run._traces():
        variance_name = param_with_module_name(kernel.name, "variance")
        post_trace["variance"].append(trace.nodes[variance_name]["value"])
        lengthscale_name = param_with_module_name(kernel.name, "lengthscale")
        post_trace["lengthscale"].append(
            trace.nodes[lengthscale_name]["value"])
        if model_class is VariationalGP:
            f_name = param_with_module_name(gp.name, "f")
            post_trace["f"].append(trace.nodes[f_name]["value"])
        if model_class is VariationalSparseGP:
            u_name = param_with_module_name(gp.name, "u")
            post_trace["u"].append(trace.nodes[u_name]["value"])

    for param in post_trace:
        param_mean = torch.mean(torch.stack(post_trace[param]), 0)
        logger.info("Posterior mean - {}".format(param))
        logger.info(param_mean)
示例#4
0
文件: test_nuts.py 项目: zyxue/pyro
def test_nuts_conjugate_gaussian(fixture, num_samples, warmup_steps,
                                 expected_means, expected_precs, mean_tol,
                                 std_tol):
    pyro.get_param_store().clear()
    nuts_kernel = NUTS(fixture.model)
    mcmc_run = MCMC(nuts_kernel, num_samples, warmup_steps).run(fixture.data)
    for i in range(1, fixture.chain_len + 1):
        param_name = 'loc_' + str(i)
        marginal = mcmc_run.marginal(param_name).empirical[param_name]
        latent_loc = marginal.mean
        latent_std = marginal.variance.sqrt()
        expected_mean = torch.ones(fixture.dim) * expected_means[i - 1]
        expected_std = 1 / torch.sqrt(
            torch.ones(fixture.dim) * expected_precs[i - 1])

        # Actual vs expected posterior means for the latents
        logger.debug('Posterior mean (actual) - {}'.format(param_name))
        logger.debug(latent_loc)
        logger.debug('Posterior mean (expected) - {}'.format(param_name))
        logger.debug(expected_mean)
        assert_equal(rmse(latent_loc, expected_mean).item(),
                     0.0,
                     prec=mean_tol)

        # Actual vs expected posterior precisions for the latents
        logger.debug('Posterior std (actual) - {}'.format(param_name))
        logger.debug(latent_std)
        logger.debug('Posterior std (expected) - {}'.format(param_name))
        logger.debug(expected_std)
        assert_equal(rmse(latent_std, expected_std).item(), 0.0, prec=std_tol)
示例#5
0
def test_hmc(model_class, X, y, kernel, likelihood):
    if model_class is SparseGPRegression or model_class is VariationalSparseGP:
        gp = model_class(X, y, kernel, X, likelihood)
    else:
        gp = model_class(X, y, kernel, likelihood)

    kernel.set_prior("variance", dist.Uniform(torch.tensor(0.5), torch.tensor(1.5)))
    kernel.set_prior("lengthscale", dist.Uniform(torch.tensor(1.0), torch.tensor(3.0)))

    hmc_kernel = HMC(gp.model, step_size=1)
    mcmc_run = MCMC(hmc_kernel, num_samples=10)

    post_trace = defaultdict(list)
    for trace, _ in mcmc_run._traces():
        variance_name = param_with_module_name(kernel.name, "variance")
        post_trace["variance"].append(trace.nodes[variance_name]["value"])
        lengthscale_name = param_with_module_name(kernel.name, "lengthscale")
        post_trace["lengthscale"].append(trace.nodes[lengthscale_name]["value"])
        if model_class is VariationalGP:
            f_name = param_with_module_name(gp.name, "f")
            post_trace["f"].append(trace.nodes[f_name]["value"])
        if model_class is VariationalSparseGP:
            u_name = param_with_module_name(gp.name, "u")
            post_trace["u"].append(trace.nodes[u_name]["value"])

    for param in post_trace:
        param_mean = torch.mean(torch.stack(post_trace[param]), 0)
        logger.info("Posterior mean - {}".format(param))
        logger.info(param_mean)
示例#6
0
def test_mcmc_interface():
    data = torch.tensor([1.0])
    kernel = PriorKernel(normal_normal_model)
    mcmc = MCMC(kernel=kernel, num_samples=800, warmup_steps=100).run(data)
    marginal = mcmc.marginal().empirical["_RETURN"]
    assert_equal(marginal.sample_size, 800)
    sample_mean = marginal.mean
    sample_std = marginal.variance.sqrt()
    assert_equal(sample_mean, torch.tensor([0.0]), prec=0.08)
    assert_equal(sample_std, torch.tensor([1.0]), prec=0.08)
示例#7
0
def test_dirichlet_categorical(jit):
    def model(data):
        concentration = torch.tensor([1.0, 1.0, 1.0])
        p_latent = pyro.sample('p_latent', dist.Dirichlet(concentration))
        pyro.sample("obs", dist.Categorical(p_latent), obs=data)
        return p_latent

    true_probs = torch.tensor([0.1, 0.6, 0.3])
    data = dist.Categorical(true_probs).sample(sample_shape=(torch.Size((2000,))))
    hmc_kernel = HMC(model, trajectory_length=1, jit_compile=jit, ignore_jit_warnings=True)
    mcmc_run = MCMC(hmc_kernel, num_samples=200, warmup_steps=100).run(data)
    posterior = mcmc_run.marginal('p_latent').empirical['p_latent']
    assert_equal(posterior.mean, true_probs, prec=0.02)
示例#8
0
def test_gamma_normal(jit):
    def model(data):
        rate = torch.tensor([1.0, 1.0])
        concentration = torch.tensor([1.0, 1.0])
        p_latent = pyro.sample('p_latent', dist.Gamma(rate, concentration))
        pyro.sample("obs", dist.Normal(3, p_latent), obs=data)
        return p_latent

    true_std = torch.tensor([0.5, 2])
    data = dist.Normal(3, true_std).sample(sample_shape=(torch.Size((2000,))))
    hmc_kernel = HMC(model, trajectory_length=1, step_size=0.03, adapt_step_size=False,
                     jit_compile=jit, ignore_jit_warnings=True)
    mcmc_run = MCMC(hmc_kernel, num_samples=200, warmup_steps=200).run(data)
    posterior = mcmc_run.marginal(['p_latent']).empirical['p_latent']
    assert_equal(posterior.mean, true_std, prec=0.05)
示例#9
0
def test_beta_bernoulli(jit):
    def model(data):
        alpha = torch.tensor([1.1, 1.1])
        beta = torch.tensor([1.1, 1.1])
        p_latent = pyro.sample('p_latent', dist.Beta(alpha, beta))
        with pyro.plate("data", data.shape[0], dim=-2):
            pyro.sample('obs', dist.Bernoulli(p_latent), obs=data)
        return p_latent

    true_probs = torch.tensor([0.9, 0.1])
    data = dist.Bernoulli(true_probs).sample(sample_shape=(torch.Size((1000,))))
    hmc_kernel = HMC(model, trajectory_length=1, max_plate_nesting=2,
                     jit_compile=jit, ignore_jit_warnings=True)
    mcmc_run = MCMC(hmc_kernel, num_samples=800, warmup_steps=500).run(data)
    posterior = mcmc_run.marginal(["p_latent"]).empirical["p_latent"]
    assert_equal(posterior.mean, true_probs, prec=0.05)
示例#10
0
文件: test_nuts.py 项目: zyxue/pyro
def test_gamma_normal(jit, use_multinomial_sampling):
    def model(data):
        rate = torch.tensor([1.0, 1.0])
        concentration = torch.tensor([1.0, 1.0])
        p_latent = pyro.sample('p_latent', dist.Gamma(rate, concentration))
        pyro.sample("obs", dist.Normal(3, p_latent), obs=data)
        return p_latent

    true_std = torch.tensor([0.5, 2])
    data = dist.Normal(3, true_std).sample(sample_shape=(torch.Size((2000, ))))
    nuts_kernel = NUTS(model,
                       use_multinomial_sampling=use_multinomial_sampling,
                       jit_compile=jit,
                       ignore_jit_warnings=True)
    mcmc_run = MCMC(nuts_kernel, num_samples=200, warmup_steps=100).run(data)
    posterior = mcmc_run.marginal('p_latent').empirical['p_latent']
    assert_equal(posterior.mean, true_std, prec=0.05)
示例#11
0
文件: test_nuts.py 项目: zyxue/pyro
def test_beta_bernoulli(step_size, adapt_step_size, adapt_mass_matrix,
                        full_mass):
    def model(data):
        alpha = torch.tensor([1.1, 1.1])
        beta = torch.tensor([1.1, 1.1])
        p_latent = pyro.sample("p_latent", dist.Beta(alpha, beta))
        pyro.sample("obs", dist.Bernoulli(p_latent), obs=data)
        return p_latent

    true_probs = torch.tensor([0.9, 0.1])
    data = dist.Bernoulli(true_probs).sample(
        sample_shape=(torch.Size((1000, ))))
    nuts_kernel = NUTS(model, step_size, adapt_step_size, adapt_mass_matrix,
                       full_mass)
    mcmc_run = MCMC(nuts_kernel, num_samples=500, warmup_steps=100).run(data)
    posterior = mcmc_run.marginal(sites='p_latent').empirical['p_latent']
    assert_equal(posterior.mean, true_probs, prec=0.02)
示例#12
0
def test_logistic_regression(step_size, trajectory_length, num_steps,
                             adapt_step_size, adapt_mass_matrix, full_mass):
    dim = 3
    data = torch.randn(2000, dim)
    true_coefs = torch.arange(1., dim + 1.)
    labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()

    def model(data):
        coefs_mean = torch.zeros(dim)
        coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(dim)))
        y = pyro.sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)
        return y

    hmc_kernel = HMC(model, step_size, trajectory_length, num_steps,
                     adapt_step_size, adapt_mass_matrix, full_mass)
    mcmc_run = MCMC(hmc_kernel, num_samples=500, warmup_steps=100, disable_progbar=True).run(data)
    beta_posterior = mcmc_run.marginal(['beta']).empirical['beta']
    assert_equal(rmse(true_coefs, beta_posterior.mean).item(), 0.0, prec=0.1)
示例#13
0
def bernoulli_beta_hmc(**kwargs):
    def model(data):
        alpha = pyro.param('alpha', torch.tensor([1.1, 1.1]))
        beta = pyro.param('beta', torch.tensor([1.1, 1.1]))
        p_latent = pyro.sample("p_latent", dist.Beta(alpha, beta))
        pyro.sample("obs", dist.Bernoulli(p_latent), obs=data)
        return p_latent

    pyro.set_rng_seed(0)
    true_probs = torch.tensor([0.9, 0.1])
    data = dist.Bernoulli(true_probs).sample(
        sample_shape=(torch.Size((1000, ))))
    kernel = kwargs.pop('kernel')
    num_samples = kwargs.pop('num_samples')
    mcmc_kernel = kernel(model, **kwargs)
    mcmc_run = MCMC(mcmc_kernel, num_samples=num_samples,
                    warmup_steps=100).run(data)
    return mcmc_run.marginal('p_latent').empirical
示例#14
0
文件: test_nuts.py 项目: zyxue/pyro
def test_gaussian_hmm(jit, num_steps):
    dim = 4

    def model(data):
        initialize = pyro.sample("initialize", dist.Dirichlet(torch.ones(dim)))
        with pyro.plate("states", dim):
            transition = pyro.sample("transition",
                                     dist.Dirichlet(torch.ones(dim, dim)))
            emission_loc = pyro.sample(
                "emission_loc", dist.Normal(torch.zeros(dim), torch.ones(dim)))
            emission_scale = pyro.sample(
                "emission_scale",
                dist.LogNormal(torch.zeros(dim), torch.ones(dim)))
        x = None
        with ignore_jit_warnings([("Iterating over a tensor", RuntimeWarning)
                                  ]):
            for t, y in pyro.markov(enumerate(data)):
                x = pyro.sample(
                    "x_{}".format(t),
                    dist.Categorical(
                        initialize if x is None else transition[x]),
                    infer={"enumerate": "parallel"})
                pyro.sample("y_{}".format(t),
                            dist.Normal(emission_loc[x], emission_scale[x]),
                            obs=y)

    def _get_initial_trace():
        guide = AutoDelta(
            poutine.block(model,
                          expose_fn=lambda msg: not msg["name"].startswith("x")
                          and not msg["name"].startswith("y")))
        elbo = TraceEnum_ELBO(max_plate_nesting=1)
        svi = SVI(model, guide, optim.Adam({"lr": .01}), elbo,
                  num_steps=100).run(data)
        return svi.exec_traces[-1]

    def _generate_data():
        transition_probs = torch.rand(dim, dim)
        emissions_loc = torch.arange(dim, dtype=torch.Tensor().dtype)
        emissions_scale = 1.
        state = torch.tensor(1)
        obs = [dist.Normal(emissions_loc[state], emissions_scale).sample()]
        for _ in range(num_steps):
            state = dist.Categorical(transition_probs[state]).sample()
            obs.append(
                dist.Normal(emissions_loc[state], emissions_scale).sample())
        return torch.stack(obs)

    data = _generate_data()
    nuts_kernel = NUTS(model,
                       max_plate_nesting=1,
                       jit_compile=jit,
                       ignore_jit_warnings=True)
    if num_steps == 30:
        nuts_kernel.initial_trace = _get_initial_trace()
    MCMC(nuts_kernel, num_samples=5, warmup_steps=5).run(data)
示例#15
0
def test_bernoulli_latent_model(jit):
    def model(data):
        y_prob = pyro.sample("y_prob", dist.Beta(1.0, 1.0))
        y = pyro.sample("y", dist.Bernoulli(y_prob))
        with pyro.plate("data", data.shape[0]):
            z = pyro.sample("z", dist.Bernoulli(0.65 * y + 0.1))
            pyro.sample("obs", dist.Normal(2. * z, 1.), obs=data)
        pyro.sample("nuisance", dist.Bernoulli(0.3))

    N = 2000
    y_prob = torch.tensor(0.3)
    y = dist.Bernoulli(y_prob).sample(torch.Size((N,)))
    z = dist.Bernoulli(0.65 * y + 0.1).sample()
    data = dist.Normal(2. * z, 1.0).sample()
    hmc_kernel = HMC(model, trajectory_length=1, max_plate_nesting=1,
                     jit_compile=jit, ignore_jit_warnings=True)
    mcmc_run = MCMC(hmc_kernel, num_samples=600, warmup_steps=200).run(data)
    posterior = mcmc_run.marginal("y_prob").empirical["y_prob"].mean
    assert_equal(posterior, y_prob, prec=0.05)
示例#16
0
文件: test_nuts.py 项目: zyxue/pyro
def test_gamma_beta(jit):
    def model(data):
        alpha_prior = pyro.sample('alpha', dist.Gamma(concentration=1.,
                                                      rate=1.))
        beta_prior = pyro.sample('beta', dist.Gamma(concentration=1., rate=1.))
        pyro.sample('x',
                    dist.Beta(concentration1=alpha_prior,
                              concentration0=beta_prior),
                    obs=data)

    true_alpha = torch.tensor(5.)
    true_beta = torch.tensor(1.)
    data = dist.Beta(concentration1=true_alpha,
                     concentration0=true_beta).sample(torch.Size((5000, )))
    nuts_kernel = NUTS(model, jit_compile=jit, ignore_jit_warnings=True)
    mcmc_run = MCMC(nuts_kernel, num_samples=500, warmup_steps=200).run(data)
    posterior = mcmc_run.marginal(['alpha', 'beta']).empirical
    assert_equal(posterior['alpha'].mean, true_alpha, prec=0.06)
    assert_equal(posterior['beta'].mean, true_beta, prec=0.05)
示例#17
0
def test_num_chains(num_chains, cpu_count, monkeypatch):
    monkeypatch.setattr(torch.multiprocessing, 'cpu_count', lambda: cpu_count)
    kernel = PriorKernel(normal_normal_model)
    available_cpu = max(1, cpu_count - 1)
    with optional(pytest.warns(UserWarning), available_cpu < num_chains):
        mcmc = MCMC(kernel, num_samples=10, num_chains=num_chains)
    assert mcmc.num_chains == min(num_chains, available_cpu)
    if mcmc.num_chains == 1:
        assert isinstance(mcmc.sampler, _SingleSampler)
    else:
        assert isinstance(mcmc.sampler, _ParallelSampler)
示例#18
0
文件: test_nuts.py 项目: zyxue/pyro
def test_bernoulli_latent_model(jit):
    @poutine.broadcast
    def model(data):
        y_prob = pyro.sample("y_prob", dist.Beta(1., 1.))
        with pyro.plate("data", data.shape[0]):
            y = pyro.sample("y", dist.Bernoulli(y_prob))
            z = pyro.sample("z", dist.Bernoulli(0.65 * y + 0.1))
            pyro.sample("obs", dist.Normal(2. * z, 1.), obs=data)

    N = 2000
    y_prob = torch.tensor(0.3)
    y = dist.Bernoulli(y_prob).sample(torch.Size((N, )))
    z = dist.Bernoulli(0.65 * y + 0.1).sample()
    data = dist.Normal(2. * z, 1.0).sample()
    nuts_kernel = NUTS(model,
                       max_plate_nesting=1,
                       jit_compile=jit,
                       ignore_jit_warnings=True)
    mcmc_run = MCMC(nuts_kernel, num_samples=600, warmup_steps=200).run(data)
    posterior = mcmc_run.marginal("y_prob").empirical["y_prob"]
    assert_equal(posterior.mean, y_prob, prec=0.05)
示例#19
0
文件: test_nuts.py 项目: zyxue/pyro
def test_logistic_regression(jit, use_multinomial_sampling):
    dim = 3
    data = torch.randn(2000, dim)
    true_coefs = torch.arange(1., dim + 1.)
    labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()

    def model(data):
        coefs_mean = torch.zeros(dim)
        coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(dim)))
        y = pyro.sample('y',
                        dist.Bernoulli(logits=(coefs * data).sum(-1)),
                        obs=labels)
        return y

    nuts_kernel = NUTS(model,
                       use_multinomial_sampling=use_multinomial_sampling,
                       jit_compile=jit,
                       ignore_jit_warnings=True)
    mcmc_run = MCMC(nuts_kernel, num_samples=500, warmup_steps=100).run(data)
    posterior = mcmc_run.marginal('beta').empirical['beta']
    assert_equal(rmse(true_coefs, posterior.mean).item(), 0.0, prec=0.1)
示例#20
0
def test_categorical_dirichlet():
    def model(data):
        concentration = torch.tensor([1.0, 1.0, 1.0])
        p_latent = pyro.sample('p_latent', dist.Dirichlet(concentration))
        pyro.sample("obs", dist.Categorical(p_latent), obs=data)
        return p_latent

    true_probs = torch.tensor([0.1, 0.6, 0.3])
    data = dist.Categorical(true_probs).sample(
        sample_shape=(torch.Size((2000, ))))
    nuts_kernel = NUTS(model, adapt_step_size=True)
    mcmc_run = MCMC(nuts_kernel, num_samples=200, warmup_steps=100).run(data)
    posterior = EmpiricalMarginal(mcmc_run, sites='p_latent')
    assert_equal(posterior.mean, true_probs, prec=0.02)
示例#21
0
def test_normal_gamma():
    def model(data):
        rate = torch.tensor([1.0, 1.0])
        concentration = torch.tensor([1.0, 1.0])
        p_latent = pyro.sample('p_latent', dist.Gamma(rate, concentration))
        pyro.sample("obs", dist.Normal(3, p_latent), obs=data)
        return p_latent

    true_std = torch.tensor([0.5, 2])
    data = dist.Normal(3, true_std).sample(sample_shape=(torch.Size((2000, ))))
    nuts_kernel = NUTS(model, step_size=0.01)
    mcmc_run = MCMC(nuts_kernel, num_samples=200, warmup_steps=100).run(data)
    posterior = EmpiricalMarginal(mcmc_run, sites='p_latent')
    assert_equal(posterior.mean, true_std, prec=0.05)
示例#22
0
def test_bernoulli_beta_with_dual_averaging():
    def model(data):
        alpha = torch.tensor([1.1, 1.1])
        beta = torch.tensor([1.1, 1.1])
        p_latent = pyro.sample("p_latent", dist.Beta(alpha, beta))
        pyro.sample("obs", dist.Bernoulli(p_latent), obs=data)
        return p_latent

    true_probs = torch.tensor([0.9, 0.1])
    data = dist.Bernoulli(true_probs).sample(
        sample_shape=(torch.Size((1000, ))))
    nuts_kernel = NUTS(model, adapt_step_size=True)
    mcmc_run = MCMC(nuts_kernel, num_samples=500, warmup_steps=100).run(data)
    posterior = EmpiricalMarginal(mcmc_run, sites="p_latent")
    assert_equal(posterior.mean, true_probs, prec=0.03)
示例#23
0
def test_bernoulli_beta_with_dual_averaging():
    def model(data):
        alpha = torch.tensor([1.1, 1.1])
        beta = torch.tensor([1.1, 1.1])
        p_latent = pyro.sample('p_latent', dist.Beta(alpha, beta))
        pyro.sample('obs', dist.Bernoulli(p_latent), obs=data)
        return p_latent

    true_probs = torch.tensor([0.9, 0.1])
    data = dist.Bernoulli(true_probs).sample(
        sample_shape=(torch.Size((1000, ))))
    hmc_kernel = HMC(model, trajectory_length=1, adapt_step_size=True)
    mcmc_run = MCMC(hmc_kernel, num_samples=800, warmup_steps=500).run(data)
    posterior = EmpiricalMarginal(mcmc_run, sites='p_latent')
    assert_equal(posterior.mean, true_probs, prec=0.05)
示例#24
0
def test_logistic_regression():
    dim = 3
    true_coefs = torch.arange(1, dim + 1)
    data = torch.randn(2000, dim)
    labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()

    def model(data):
        coefs_mean = torch.zeros(dim)
        coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(dim)))
        y = pyro.sample('y',
                        dist.Bernoulli(logits=(coefs * data).sum(-1)),
                        obs=labels)
        return y

    nuts_kernel = NUTS(model, step_size=0.0855)
    mcmc_run = MCMC(nuts_kernel, num_samples=500, warmup_steps=100).run(data)
    posterior = EmpiricalMarginal(mcmc_run, sites='beta')
    assert_equal(rmse(true_coefs, posterior.mean).item(), 0.0, prec=0.1)
示例#25
0
def test_logistic_regression_with_dual_averaging():
    dim = 3
    true_coefs = torch.arange(1, dim + 1)
    data = torch.randn(2000, dim)
    labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()

    def model(data):
        coefs_mean = torch.zeros(dim)
        coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(dim)))
        y = pyro.sample('y',
                        dist.Bernoulli(logits=(coefs * data).sum(-1)),
                        obs=labels)
        return y

    hmc_kernel = HMC(model, trajectory_length=1, adapt_step_size=True)
    mcmc_run = MCMC(hmc_kernel, num_samples=500, warmup_steps=100).run(data)
    posterior = EmpiricalMarginal(mcmc_run, sites='beta')
    assert_equal(rmse(posterior.mean, true_coefs).item(), 0.0, prec=0.1)