Beispiel #1
0
 def test_saasbo_sample(self):
     for use_saas, use_input_warping in product((False, True),
                                                repeat=2):
         with torch.random.fork_rng():
             torch.manual_seed(0)
             X = torch.randn(3, 2)
             Y = torch.randn(3, 1)
             Yvar = torch.randn(3, 1)
             kernel = NUTS(pyro_model, max_tree_depth=1)
             mcmc = MCMC(kernel, warmup_steps=0, num_samples=1)
             mcmc.run(
                 X,
                 Y,
                 Yvar,
                 use_input_warping=use_input_warping,
                 use_saas=use_saas,
             )
             samples = mcmc.get_samples()
             if use_saas:
                 self.assertTrue("kernel_tausq" in samples)
                 self.assertTrue("_kernel_inv_length_sq" in samples)
                 self.assertTrue("lengthscale" not in samples)
             else:
                 self.assertTrue("kernel_tausq" not in samples)
                 self.assertTrue("_kernel_inv_length_sq" not in samples)
                 self.assertTrue("lengthscale" in samples)
             if use_input_warping:
                 self.assertIn("c0", samples)
                 self.assertIn("c1", samples)
             else:
                 self.assertNotIn("c0", samples)
                 self.assertNotIn("c1", samples)
Beispiel #2
0
def test_structured_mass():
    def model(cov):
        w = pyro.sample("w", dist.Normal(0, 1000).expand([2]).to_event(1))
        x = pyro.sample("x", dist.Normal(0, 1000).expand([1]).to_event(1))
        y = pyro.sample("y", dist.Normal(0, 1000).expand([1]).to_event(1))
        z = pyro.sample("z", dist.Normal(0, 1000).expand([1]).to_event(1))
        wxyz = torch.cat([w, x, y, z])
        pyro.sample("obs", dist.MultivariateNormal(torch.zeros(5), cov), obs=wxyz)

    w_cov = torch.tensor([[1.5, 0.5], [0.5, 1.5]])
    xy_cov = torch.tensor([[2., 1.], [1., 3.]])
    z_var = torch.tensor([2.5])
    cov = torch.zeros(5, 5)
    cov[:2, :2] = w_cov
    cov[2:4, 2:4] = xy_cov
    cov[4, 4] = z_var

    # smoke tests
    for dense_mass in [True, False]:
        kernel = NUTS(model, jit_compile=True, ignore_jit_warnings=True, full_mass=dense_mass)
        mcmc = MCMC(kernel, num_samples=1, warmup_steps=1)
        mcmc.run(cov)
        assert kernel.inverse_mass_matrix[("w", "x", "y", "z")].dim() == 1 + int(dense_mass)

    kernel = NUTS(model, jit_compile=True, ignore_jit_warnings=True, full_mass=[("w",), ("x", "y")])
    mcmc = MCMC(kernel, num_samples=1, warmup_steps=1000)
    mcmc.run(cov)
    assert_close(kernel.inverse_mass_matrix[("w",)], w_cov, atol=0.5, rtol=0.5)
    assert_close(kernel.inverse_mass_matrix[("x", "y")], xy_cov, atol=0.5, rtol=0.5)
    assert_close(kernel.inverse_mass_matrix[("z",)], z_var, atol=0.5, rtol=0.5)
Beispiel #3
0
def test_arrowhead_mass():
    def model(prec):
        w = pyro.sample("w", dist.Normal(0, 1000).expand([2]).to_event(1))
        x = pyro.sample("x", dist.Normal(0, 1000).expand([1]).to_event(1))
        y = pyro.sample("y", dist.Normal(0, 1000).expand([1]).to_event(1))
        z = pyro.sample("z", dist.Normal(0, 1000).expand([2]).to_event(1))
        wyxz = torch.cat([w, y, x, z])
        pyro.sample("obs", dist.MultivariateNormal(torch.zeros(6), precision_matrix=prec), obs=wyxz)

    A = torch.randn(6, 12)
    prec = A @ A.t() * 0.1

    # smoke tests
    for dense_mass in [True, False]:
        kernel = NUTS(model, jit_compile=True, ignore_jit_warnings=True, full_mass=dense_mass)
        mcmc = MCMC(kernel, num_samples=1, warmup_steps=1)
        mcmc.run(prec)
        assert kernel.inverse_mass_matrix[("w", "x", "y", "z")].dim() == 1 + int(dense_mass)

    kernel = NUTS(model, jit_compile=True, ignore_jit_warnings=True, full_mass=[("w",), ("y", "x")])
    kernel.mass_matrix_adapter = ArrowheadMassMatrix()
    mcmc = MCMC(kernel, num_samples=1, warmup_steps=1000)
    mcmc.run(prec)
    assert ("w", "y", "x", "z") in kernel.inverse_mass_matrix
    mass_matrix = kernel.mass_matrix_adapter.mass_matrix[("w", "y", "x", "z")]
    assert mass_matrix.top.shape == (4, 6)
    assert mass_matrix.bottom_diag.shape == (2,)
    assert_close(mass_matrix.top, prec[:4], atol=0.2, rtol=0.2)
    assert_close(mass_matrix.bottom_diag, prec.diag()[4:], atol=0.2, rtol=0.2)
Beispiel #4
0
def test_gaussian_mixture_model(jit):
    K, N = 3, 1000

    def gmm(data):
        mix_proportions = pyro.sample("phi", dist.Dirichlet(torch.ones(K)))
        with pyro.plate("num_clusters", K):
            cluster_means = pyro.sample(
                "cluster_means", dist.Normal(torch.arange(float(K)), 1.0)
            )
        with pyro.plate("data", data.shape[0]):
            assignments = pyro.sample("assignments", dist.Categorical(mix_proportions))
            pyro.sample("obs", dist.Normal(cluster_means[assignments], 1.0), obs=data)
        return cluster_means

    true_cluster_means = torch.tensor([1.0, 5.0, 10.0])
    true_mix_proportions = torch.tensor([0.1, 0.3, 0.6])
    cluster_assignments = dist.Categorical(true_mix_proportions).sample(
        torch.Size((N,))
    )
    data = dist.Normal(true_cluster_means[cluster_assignments], 1.0).sample()
    nuts_kernel = NUTS(
        gmm, max_plate_nesting=1, jit_compile=jit, ignore_jit_warnings=True
    )
    mcmc = MCMC(nuts_kernel, num_samples=300, warmup_steps=100)
    mcmc.run(data)
    samples = mcmc.get_samples()
    assert_equal(samples["phi"].mean(0).sort()[0], true_mix_proportions, prec=0.05)
    assert_equal(
        samples["cluster_means"].mean(0).sort()[0], true_cluster_means, prec=0.2
    )
Beispiel #5
0
def test_gaussian_hmm(num_steps):
    dim = 4

    def model(data):
        initialize = pyro.sample("initialize", dist.Dirichlet(torch.ones(dim)))
        with pyro.plate("states", dim):
            transition = pyro.sample("transition", dist.Dirichlet(torch.ones(dim, dim)))
            emission_loc = pyro.sample(
                "emission_loc", dist.Normal(torch.zeros(dim), torch.ones(dim))
            )
            emission_scale = pyro.sample(
                "emission_scale", dist.LogNormal(torch.zeros(dim), torch.ones(dim))
            )
        x = None
        with ignore_jit_warnings([("Iterating over a tensor", RuntimeWarning)]):
            for t, y in pyro.markov(enumerate(data)):
                x = pyro.sample(
                    "x_{}".format(t),
                    dist.Categorical(initialize if x is None else transition[x]),
                    infer={"enumerate": "parallel"},
                )
                pyro.sample(
                    "y_{}".format(t),
                    dist.Normal(emission_loc[x], emission_scale[x]),
                    obs=y,
                )

    def _get_initial_trace():
        guide = AutoDelta(
            poutine.block(
                model,
                expose_fn=lambda msg: not msg["name"].startswith("x")
                and not msg["name"].startswith("y"),
            )
        )
        elbo = TraceEnum_ELBO(max_plate_nesting=1)
        svi = SVI(model, guide, optim.Adam({"lr": 0.01}), elbo)
        for _ in range(100):
            svi.step(data)
        return poutine.trace(guide).get_trace(data)

    def _generate_data():
        transition_probs = torch.rand(dim, dim)
        emissions_loc = torch.arange(dim, dtype=torch.Tensor().dtype)
        emissions_scale = 1.0
        state = torch.tensor(1)
        obs = [dist.Normal(emissions_loc[state], emissions_scale).sample()]
        for _ in range(num_steps):
            state = dist.Categorical(transition_probs[state]).sample()
            obs.append(dist.Normal(emissions_loc[state], emissions_scale).sample())
        return torch.stack(obs)

    data = _generate_data()
    nuts_kernel = NUTS(
        model, max_plate_nesting=1, jit_compile=True, ignore_jit_warnings=True
    )
    if num_steps == 30:
        nuts_kernel.initial_trace = _get_initial_trace()
    mcmc = MCMC(nuts_kernel, num_samples=5, warmup_steps=5)
    mcmc.run(data)
Beispiel #6
0
def test_beta_binomial(hyperpriors):
    def model(data):
        with pyro.plate("plate_0", data.shape[-1]):
            alpha = pyro.sample(
                "alpha", dist.HalfCauchy(1.)) if hyperpriors else torch.tensor(
                    [1., 1.])
            beta = pyro.sample(
                "beta", dist.HalfCauchy(1.)) if hyperpriors else torch.tensor(
                    [1., 1.])
            beta_binom = BetaBinomialPair()
            with pyro.plate("plate_1", data.shape[-2]):
                probs = pyro.sample("probs", beta_binom.latent(alpha, beta))
                with pyro.plate("data", data.shape[0]):
                    pyro.sample("binomial",
                                beta_binom.conditional(
                                    probs=probs, total_count=total_count),
                                obs=data)

    true_probs = torch.tensor([[0.7, 0.4], [0.6, 0.4]])
    total_count = torch.tensor([[1000, 600], [400, 800]])
    num_samples = 80
    data = dist.Binomial(
        total_count=total_count,
        probs=true_probs).sample(sample_shape=(torch.Size((10, ))))
    hmc_kernel = NUTS(collapse_conjugate(model),
                      jit_compile=True,
                      ignore_jit_warnings=True)
    mcmc = MCMC(hmc_kernel, num_samples=num_samples, warmup_steps=50)
    mcmc.run(data)
    samples = mcmc.get_samples()
    posterior = posterior_replay(model, samples, data, num_samples=num_samples)
    assert_equal(posterior["probs"].mean(0), true_probs, prec=0.05)
Beispiel #7
0
def test_gamma_poisson(hyperpriors):
    def model(data):
        with pyro.plate("latent_dim", data.shape[1]):
            alpha = (
                pyro.sample("alpha", dist.HalfCauchy(1.0))
                if hyperpriors
                else torch.tensor([1.0, 1.0])
            )
            beta = (
                pyro.sample("beta", dist.HalfCauchy(1.0))
                if hyperpriors
                else torch.tensor([1.0, 1.0])
            )
            gamma_poisson = GammaPoissonPair()
            rate = pyro.sample("rate", gamma_poisson.latent(alpha, beta))
            with pyro.plate("data", data.shape[0]):
                pyro.sample("obs", gamma_poisson.conditional(rate), obs=data)

    true_rate = torch.tensor([3.0, 10.0])
    num_samples = 100
    data = dist.Poisson(rate=true_rate).sample(sample_shape=(torch.Size((100,))))
    hmc_kernel = NUTS(
        collapse_conjugate(model), jit_compile=True, ignore_jit_warnings=True
    )
    mcmc = MCMC(hmc_kernel, num_samples=num_samples, warmup_steps=50)
    mcmc.run(data)
    samples = mcmc.get_samples()
    posterior = posterior_replay(model, samples, data, num_samples=num_samples)
    assert_equal(posterior["rate"].mean(0), true_rate, prec=0.3)
Beispiel #8
0
def sample(draws=500,
           model=None,
           warmup_steps=None,
           num_chains=1,
           kernel='nuts'):
    """Markov-chain Monte Carlo sampling.

  Sampling should be run within the context of a model or the model should be passed as an argument `model` explicitly.
  Number of samples is given by `draws` which defaults to `500`.
  Warm-up steps are assumed to be 30% of sample count.
  MCMC kernel can be selected by setting `kernel`. `hmc` and `nuts` are available.
  `pmpyro.inference.sample` returns a trace of samples

  """
    # get model from context
    if model is None:
        model = Context.get_context()
    stfn = model.stfn  # get stochastic function from model
    data = model.args  # get data
    # make nuts kernel
    kernels = {'nuts': NUTS(stfn, adapt_step_size=True), 'hmc': HMC(stfn)}
    # if not num_chains:    # figure out number of chains
    #   num_chains = max(os.cpu_count() -1, 2)
    if not warmup_steps:  # figure out warm-up steps
        warmup_steps = int(0.3 * draws)
    # run MCMC
    mcmc = MCMC(kernels[kernel],
                num_samples=draws,
                warmup_steps=warmup_steps,
                num_chains=num_chains)
    mcmc.run(*data)
    # get num samples
    num_samples = num_chains * draws
    return mcmc.get_samples()
Beispiel #9
0
def test_nuts_conjugate_gaussian(
    fixture,
    num_samples,
    warmup_steps,
    expected_means,
    expected_precs,
    mean_tol,
    std_tol,
):
    pyro.get_param_store().clear()
    nuts_kernel = NUTS(fixture.model)
    mcmc = MCMC(nuts_kernel, num_samples, warmup_steps)
    mcmc.run(fixture.data)
    samples = mcmc.get_samples()
    for i in range(1, fixture.chain_len + 1):
        param_name = "loc_" + str(i)
        latent = samples[param_name]
        latent_loc = latent.mean(0)
        latent_std = latent.std(0)
        expected_mean = torch.ones(fixture.dim) * expected_means[i - 1]
        expected_std = 1 / torch.sqrt(torch.ones(fixture.dim) * expected_precs[i - 1])

        # Actual vs expected posterior means for the latents
        logger.debug("Posterior mean (actual) - {}".format(param_name))
        logger.debug(latent_loc)
        logger.debug("Posterior mean (expected) - {}".format(param_name))
        logger.debug(expected_mean)
        assert_equal(rmse(latent_loc, expected_mean).item(), 0.0, prec=mean_tol)

        # Actual vs expected posterior precisions for the latents
        logger.debug("Posterior std (actual) - {}".format(param_name))
        logger.debug(latent_std)
        logger.debug("Posterior std (expected) - {}".format(param_name))
        logger.debug(expected_std)
        assert_equal(rmse(latent_std, expected_std).item(), 0.0, prec=std_tol)
Beispiel #10
0
def numpyro_schools_model(data, draws, chains):
    """Centered eight schools implementation in NumPyro."""
    import jax
    import numpyro
    import numpyro.distributions as dist
    from numpyro.mcmc import MCMC, NUTS

    def model():
        mu = numpyro.sample("mu", dist.Normal(0, 5))
        tau = numpyro.sample("tau", dist.HalfCauchy(5))
        # TODO: use numpyro.plate or `sample_shape` kwargs instead of  # pylint: disable=fixme
        # multiplying with np.ones(J) in future versions of NumPyro
        theta = numpyro.sample("theta",
                               dist.Normal(mu * np.ones(data["J"]), tau))
        numpyro.sample("obs", dist.Normal(theta, data["sigma"]), obs=data["y"])

    mcmc = MCMC(
        NUTS(model),
        num_warmup=draws,
        num_samples=draws,
        num_chains=chains,
        chain_method="sequential",
    )
    mcmc.run(jax.random.PRNGKey(0), collect_fields=("z", "diverging"))
    return mcmc
class BinomialQuadraticApproximator():
    def __init__(self, X, N, n_steps, learning_rate, prior_type, infer_type):
        self.X = X
        self.N = N
        self.n_steps = n_steps
        self.prior_type = prior_type
        self.infer_type = infer_type

        self.optimiser = pyro.optim.Adam({'lr': learning_rate})
        self.map_guide = AutoLaplaceApproximation(self.model)

    def plot(self):
        plt.subplot(3, 1, 1)
        plt.plot(self.losses)
        plt.title('losses')

        plt.subplot(3, 1, 2)
        plt.plot(self._posterior_approximate_mean)
        plt.title('Posterior Mean')

        plt.subplot(3, 1, 3)
        plt.plot(self._posterior_approximate_scale)
        plt.title('Posterior Scale (Variance)')

    def train(self, **kwargs):
        pyro.clear_param_store()
        if self.infer_type == 'svi':
            self._svi_trainer(**kwargs)
        elif self.infer_type == 'mcmc':
            self._mcmc_trainer(**kwargs)
        

    def _svi_trainer(self):
        svi = SVI(self.model, self.map_guide, self.optimiser, Trace_ELBO())
        self.losses = []
        self._posterior_approximate_mean = []
        self._posterior_approximate_scale = []
        for step in range(self.n_steps):
            loss = svi.step(self.X)
            self.losses.append(loss)
            quadratic_approximation = self.map_guide.laplace_approximation(self.X).get_posterior()
            self._posterior_approximate_mean.append(quadratic_approximation.loc.item())
            self._posterior_approximate_scale.append(quadratic_approximation.scale_tril.item())

            if step % 50 == 0:
                print('[iter {}]  loss: {:.4f}'.format(step, loss))

    def _mcmc_trainer(self, step_size=0.1, num_samples=1000, warmup_steps=100):
        mcmc_kernel = NUTS(self.model, step_size=step_size)
        self.mcmc = MCMC(mcmc_kernel, num_samples=num_samples, warmup_steps=warmup_steps)
        self.mcmc.run(self.X)
        

    def model(self, data):
        if self.prior_type == 'uniform':
            p = pyro.sample('p', dist.Uniform(0, 1))
        elif self.prior_type == 'beta':
            p = pyro.sample('p', dist.Beta(10.0, 10.0))
        return pyro.sample('obs', dist.Binomial(total_count=self.N, probs=p), obs=self.X)
Beispiel #12
0
def train_nuts(model, data, num_warmup, num_samples, num_chains=1, **kwargs):
    _kwargs = dict(adapt_step_size=True, adapt_mass_matrix=True, jit_compile=True)
    _kwargs.update(kwargs)
    print(_kwargs)
    kernel = NUTS(model, **_kwargs)
    engine = MCMC(kernel, num_samples, num_warmup, num_chains=num_chains)
    engine.run(data, training=True)
    return engine
Beispiel #13
0
def run_inference(
    pyro_model: Callable[[Tensor, Tensor, Tensor, bool, str, float], None],
    X: Tensor,
    Y: Tensor,
    Yvar: Tensor,
    num_samples: int = 512,
    warmup_steps: int = 1024,
    thinning: int = 16,
    use_input_warping: bool = False,
    max_tree_depth: int = 6,
    use_saas: bool = False,
    disable_progbar: bool = False,
) -> Tensor:
    start = time.time()
    try:
        from pyro.infer.mcmc import NUTS, MCMC
    except ImportError:  # pragma: no cover
        raise RuntimeError("Cannot call run_inference without pyro installed!")
    kernel = NUTS(
        pyro_model,
        jit_compile=True,
        full_mass=True,
        ignore_jit_warnings=True,
        max_tree_depth=max_tree_depth,
    )
    mcmc = MCMC(
        kernel,
        warmup_steps=warmup_steps,
        num_samples=num_samples,
        disable_progbar=disable_progbar,
    )
    mcmc.run(
        # there is an issue with jit-compilation and cuda
        # for now, we run MCMC on the CPU.
        X.cpu(),
        Y.cpu(),
        Yvar.cpu(),
        use_input_warping=use_input_warping,
        use_saas=use_saas,
    )
    # this prints the summary
    orig_std_out = sys.stdout.write
    sys.stdout.write = logger.info
    mcmc.summary()
    sys.stdout.write = orig_std_out
    logger.info(f"MCMC elapsed time: {time.time() - start}")
    samples = mcmc.get_samples()
    if use_saas:  # compute the lengthscale for saas and throw away everything else
        inv_length_sq = (samples["kernel_tausq"].unsqueeze(-1) *
                         samples["_kernel_inv_length_sq"])
        samples["lengthscale"] = (1.0 /
                                  inv_length_sq).sqrt()  # pyre-ignore [16]
        del samples["kernel_tausq"], samples["_kernel_inv_length_sq"]
    # thin
    for k, v in samples.items():
        # apply thinning and move back to X's device
        samples[k] = v[::thinning].to(device=X.device)
    return samples
Beispiel #14
0
def fit_fully_bayesian_model_nuts(
    model: SaasFullyBayesianSingleTaskGP,
    max_tree_depth: int = 6,
    warmup_steps: int = 512,
    num_samples: int = 256,
    thinning: int = 16,
    disable_progbar: bool = False,
) -> None:
    r"""Fit a fully Bayesian model using the No-U-Turn-Sampler (NUTS)


    Args:
        model: SaasFullyBayesianSingleTaskGP to be fitted.
        max_tree_depth: Maximum tree depth for NUTS
        warmup_steps: The number of burn-in steps for NUTS.
        num_samples:  The number of MCMC samples. Note that with thinning,
            num_samples / thinning samples are retained.
        thinning: The amount of thinning. Every nth sample is retained.
        disable_progbar: A boolean indicating whether to print the progress
            bar and diagnostics during MCMC.

    Example:
        >>> gp = SaasFullyBayesianSingleTaskGP(train_X, train_Y)
        >>> fit_fully_bayesian_model_nuts(gp)
    """
    model.train()

    # Do inference with NUTS
    nuts = NUTS(
        model.pyro_model.sample,
        jit_compile=True,
        full_mass=True,
        ignore_jit_warnings=True,
        max_tree_depth=max_tree_depth,
    )
    mcmc = MCMC(
        nuts,
        warmup_steps=warmup_steps,
        num_samples=num_samples,
        disable_progbar=disable_progbar,
    )
    mcmc.run()

    # Get final MCMC samples from the Pyro model
    mcmc_samples = model.pyro_model.postprocess_mcmc_samples(
        mcmc_samples=mcmc.get_samples())
    for k, v in mcmc_samples.items():
        mcmc_samples[k] = v[::thinning]

    # Load the MCMC samples back into the BoTorch model
    model.load_mcmc_samples(mcmc_samples)
    model.eval()
    def test_pyro_sampling(self):
        try:
            import pyro  # noqa
            from pyro.infer.mcmc import NUTS, MCMC
        except ImportError:
            return
        train_x, test_x, train_y, test_y = self._get_data(cuda=False)
        likelihood = GaussianLikelihood(
            noise_constraint=gpytorch.constraints.Positive())
        gp_model = ExactGPModel(train_x, train_y, likelihood)

        # Register normal GPyTorch priors
        gp_model.mean_module.register_prior("mean_prior", UniformPrior(-1, 1),
                                            "constant")
        gp_model.covar_module.base_kernel.register_prior(
            "lengthscale_prior", UniformPrior(0.01, 0.5), "lengthscale")
        gp_model.covar_module.register_prior("outputscale_prior",
                                             UniformPrior(1, 2), "outputscale")
        likelihood.register_prior("noise_prior", UniformPrior(0.05, 0.3),
                                  "noise")

        def pyro_model(x, y):
            with gpytorch.settings.fast_computations(False, False, False):
                sampled_model = gp_model.pyro_sample_from_prior()
                output = sampled_model.likelihood(sampled_model(x))
                pyro.sample("obs", output, obs=y)
            return y

        nuts_kernel = NUTS(pyro_model, adapt_step_size=True)
        mcmc_run = MCMC(nuts_kernel,
                        num_samples=3,
                        warmup_steps=20,
                        disable_progbar=True)
        mcmc_run.run(train_x, train_y)

        gp_model.pyro_load_from_samples(mcmc_run.get_samples())

        gp_model.eval()
        expanded_test_x = test_x.unsqueeze(-1).repeat(3, 1, 1)
        output = gp_model(expanded_test_x)

        self.assertEqual(output.mean.size(0), 3)

        # All 3 samples should do reasonably well on a noiseless dataset.
        self.assertLess(
            torch.norm(output.mean[0] - test_y) / test_y.norm(), 0.2)
        self.assertLess(
            torch.norm(output.mean[1] - test_y) / test_y.norm(), 0.2)
        self.assertLess(
            torch.norm(output.mean[2] - test_y) / test_y.norm(), 0.2)
Beispiel #16
0
 def test_gp_kernels(self):
     torch.manual_seed(0)
     X = torch.randn(3, 2)
     Y = torch.randn(3, 1)
     Yvar = torch.randn(3, 1)
     kernel = NUTS(single_task_pyro_model, max_tree_depth=1)
     with self.assertRaises(ValueError):
         mcmc = MCMC(kernel, warmup_steps=0, num_samples=1)
         mcmc.run(
             X,
             Y,
             Yvar,
             gp_kernel="some_kernel_we_dont_support",
         )
Beispiel #17
0
def test_dirichlet_categorical(jit):
    def model(data):
        concentration = torch.tensor([1.0, 1.0, 1.0])
        p_latent = pyro.sample("p_latent", dist.Dirichlet(concentration))
        pyro.sample("obs", dist.Categorical(p_latent), obs=data)
        return p_latent

    true_probs = torch.tensor([0.1, 0.6, 0.3])
    data = dist.Categorical(true_probs).sample(sample_shape=(torch.Size((2000,))))
    nuts_kernel = NUTS(model, jit_compile=jit, ignore_jit_warnings=True)
    mcmc = MCMC(nuts_kernel, num_samples=200, warmup_steps=100)
    mcmc.run(data)
    samples = mcmc.get_samples()
    posterior = samples["p_latent"]
    assert_equal(posterior.mean(0), true_probs, prec=0.02)
Beispiel #18
0
def test_gamma_beta(jit):
    def model(data):
        alpha_prior = pyro.sample('alpha', dist.Gamma(concentration=1., rate=1.))
        beta_prior = pyro.sample('beta', dist.Gamma(concentration=1., rate=1.))
        pyro.sample('x', dist.Beta(concentration1=alpha_prior, concentration0=beta_prior), obs=data)

    true_alpha = torch.tensor(5.)
    true_beta = torch.tensor(1.)
    data = dist.Beta(concentration1=true_alpha, concentration0=true_beta).sample(torch.Size((5000,)))
    nuts_kernel = NUTS(model, jit_compile=jit, ignore_jit_warnings=True)
    mcmc = MCMC(nuts_kernel, num_samples=500, warmup_steps=200)
    mcmc.run(data)
    samples = mcmc.get_samples()
    assert_equal(samples["alpha"].mean(0), true_alpha, prec=0.08)
    assert_equal(samples["beta"].mean(0), true_beta, prec=0.05)
    def test_pyro_sampling(self):
        try:
            import pyro
            from pyro.infer.mcmc import NUTS, MCMC
        except:
            return
        train_x, test_x, train_y, test_y = self._get_data(cuda=False)
        likelihood = GaussianLikelihood(
            noise_constraint=gpytorch.constraints.Positive())
        gp_model = ExactGPModel(train_x, train_y, likelihood)

        # Register normal GPyTorch priors
        gp_model.mean_module.register_prior("mean_prior", UniformPrior(-1, 1),
                                            "constant")
        gp_model.covar_module.base_kernel.register_prior(
            "lengthscale_prior", UniformPrior(0.01, 0.2), "lengthscale")
        gp_model.covar_module.register_prior("outputscale_prior",
                                             UniformPrior(1, 2), "outputscale")
        likelihood.register_prior("noise_prior", LogNormalPrior(-1.5, 0.1),
                                  "noise")

        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)

        def pyro_model(x, y):
            gp_model.pyro_sample_from_prior()
            output = gp_model(x)
            loss = mll.pyro_factor(output, y)
            return y

        nuts_kernel = NUTS(pyro_model, adapt_step_size=True)
        mcmc_run = MCMC(nuts_kernel, num_samples=3, warmup_steps=20)
        mcmc_run.run(train_x, train_y)

        gp_model.pyro_load_from_samples(mcmc_run.get_samples())

        gp_model.eval()
        expanded_test_x = test_x.unsqueeze(-1).repeat(3, 1, 1)
        output = gp_model(expanded_test_x)

        self.assertEqual(output.mean.size(0), 3)

        # All 3 samples should do reasonably well on a noiseless dataset.
        self.assertLess(
            torch.norm(output.mean[0] - test_y) / test_y.norm(), 0.2)
        self.assertLess(
            torch.norm(output.mean[1] - test_y) / test_y.norm(), 0.2)
        self.assertLess(
            torch.norm(output.mean[2] - test_y) / test_y.norm(), 0.2)
Beispiel #20
0
def test_beta_bernoulli(step_size, adapt_step_size, adapt_mass_matrix, full_mass):
    def model(data):
        alpha = torch.tensor([1.1, 1.1])
        beta = torch.tensor([1.1, 1.1])
        p_latent = pyro.sample("p_latent", dist.Beta(alpha, beta))
        pyro.sample("obs", dist.Bernoulli(p_latent), obs=data)
        return p_latent

    true_probs = torch.tensor([0.9, 0.1])
    data = dist.Bernoulli(true_probs).sample(sample_shape=(torch.Size((1000,))))
    nuts_kernel = NUTS(model, step_size=step_size, adapt_step_size=adapt_step_size,
                       adapt_mass_matrix=adapt_mass_matrix, full_mass=full_mass)
    mcmc = MCMC(nuts_kernel, num_samples=400, warmup_steps=200)
    mcmc.run(data)
    samples = mcmc.get_samples()
    assert_equal(samples["p_latent"].mean(0), true_probs, prec=0.02)
Beispiel #21
0
def pyro_centered_schools(data, draws, chains):
    """Centered eight schools implementation in Pyro.

    Note there is not really a deterministic node in pyro, so I do not
    know how to do a non-centered implementation.
    """
    import torch
    from pyro.infer.mcmc import MCMC, NUTS

    del chains
    y = torch.Tensor(data["y"]).type(torch.Tensor)
    sigma = torch.Tensor(data["sigma"]).type(torch.Tensor)

    nuts_kernel = NUTS(_pyro_conditioned_model, adapt_step_size=True)
    posterior = MCMC(  # pylint:disable=not-callable
        nuts_kernel, num_samples=draws, warmup_steps=500
    ).run(_pyro_centered_model, sigma, y)

    # This block lets the posterior be pickled
    for trace in posterior.exec_traces:
        for node in trace.nodes.values():
            node.pop("fn", None)
    posterior.kernel = None
    posterior.run = None
    posterior.logger = None
    if hasattr(posterior, "sampler"):
        posterior.sampler = None
    return posterior
Beispiel #22
0
    def _train_hmc(self, train_loader, n_samples, warmup, step_size, num_steps,
                   device):

        print("\n == HMC training ==")
        pyro.clear_param_store()

        num_batches = int(len(train_loader.dataset) / train_loader.batch_size)
        batch_samples = int(n_samples / num_batches) + 1
        print("\nn_batches=", num_batches, "\tbatch_samples =", batch_samples)

        kernel = HMC(self.model, step_size=step_size, num_steps=num_steps)
        mcmc = MCMC(kernel=kernel,
                    num_samples=batch_samples,
                    warmup_steps=warmup,
                    num_chains=1)

        start = time.time()
        for x_batch, y_batch in train_loader:
            x_batch = x_batch.to(device)
            labels = y_batch.to(device).argmax(-1)
            mcmc.run(x_batch, labels)

        execution_time(start=start, end=time.time())

        self.posterior_predictive = {}
        posterior_samples = mcmc.get_samples(n_samples)
        state_dict_keys = list(self.basenet.state_dict().keys())

        if DEBUG:
            print("\n", list(posterior_samples.values())[-1])

        for model_idx in range(n_samples):
            net_copy = copy.deepcopy(self.basenet)

            model_dict = OrderedDict({})
            for weight_idx, weights in enumerate(posterior_samples.values()):
                model_dict.update(
                    {state_dict_keys[weight_idx]: weights[model_idx]})

            net_copy.load_state_dict(model_dict)
            self.posterior_predictive.update({str(model_idx): net_copy})

        if DEBUG:
            print("\n", weights[model_idx])

        self.save()
Beispiel #23
0
def test_gamma_normal(jit, use_multinomial_sampling):
    def model(data):
        rate = torch.tensor([1.0, 1.0])
        concentration = torch.tensor([1.0, 1.0])
        p_latent = pyro.sample('p_latent', dist.Gamma(rate, concentration))
        pyro.sample("obs", dist.Normal(3, p_latent), obs=data)
        return p_latent

    true_std = torch.tensor([0.5, 2])
    data = dist.Normal(3, true_std).sample(sample_shape=(torch.Size((2000, ))))
    nuts_kernel = NUTS(model,
                       use_multinomial_sampling=use_multinomial_sampling,
                       jit_compile=jit,
                       ignore_jit_warnings=True)
    mcmc = MCMC(nuts_kernel, num_samples=200, warmup_steps=100)
    mcmc.run(data)
    samples = mcmc.get_samples()
    assert_equal(samples["p_latent"].mean(0), true_std, prec=0.05)
Beispiel #24
0
def run(param):
    nn_model, p_tgt, save_fn, args = param
    if (not args.overwrite) and os.path.isfile(save_fn):
        print(save_fn + ' already exists!')
        return
    if not os.path.isfile(save_fn):
        fo = open(save_fn,
                  'w')  # write the file first to signal working on it.
        fo.write('\n')
        fo.close()
    nuts = NUTS(program_arbitrary)
    mcmc = MCMC(nuts,
                num_samples=args.num_samples,
                warmup_steps=args.num_warmups,
                num_chains=args.num_chains)
    mcmc.run(nn_model, p_tgt)
    zs = mcmc.get_samples()['z'].detach().cpu().numpy()
    np.savetxt(save_fn, zs)
Beispiel #25
0
def test_bernoulli_latent_model(jit):
    @poutine.broadcast
    def model(data):
        y_prob = pyro.sample("y_prob", dist.Beta(1., 1.))
        with pyro.plate("data", data.shape[0]):
            y = pyro.sample("y", dist.Bernoulli(y_prob))
            z = pyro.sample("z", dist.Bernoulli(0.65 * y + 0.1))
            pyro.sample("obs", dist.Normal(2. * z, 1.), obs=data)

    N = 2000
    y_prob = torch.tensor(0.3)
    y = dist.Bernoulli(y_prob).sample(torch.Size((N,)))
    z = dist.Bernoulli(0.65 * y + 0.1).sample()
    data = dist.Normal(2. * z, 1.0).sample()
    nuts_kernel = NUTS(model, max_plate_nesting=1, jit_compile=jit, ignore_jit_warnings=True)
    mcmc = MCMC(nuts_kernel, num_samples=600, warmup_steps=200)
    mcmc.run(data)
    samples = mcmc.get_samples()
    assert_equal(samples["y_prob"].mean(0), y_prob, prec=0.05)
Beispiel #26
0
def test_logistic_regression(jit, use_multinomial_sampling):
    dim = 3
    data = torch.randn(2000, dim)
    true_coefs = torch.arange(1., dim + 1.)
    labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample()

    def model(data):
        coefs_mean = torch.zeros(dim)
        coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(dim)))
        y = pyro.sample('y', dist.Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)
        return y

    nuts_kernel = NUTS(model,
                       use_multinomial_sampling=use_multinomial_sampling,
                       jit_compile=jit,
                       ignore_jit_warnings=True)
    mcmc = MCMC(nuts_kernel, num_samples=500, warmup_steps=100)
    mcmc.run(data)
    samples = mcmc.get_samples()
    assert_equal(rmse(true_coefs, samples["beta"].mean(0)).item(), 0.0, prec=0.1)
Beispiel #27
0
def monte_carlo(y):
    pyro.clear_param_store()

    # create a Simple Hamiltonian Monte Carlo kernel with step_size of 0.1
    hmc_kernel = HMC(conditioned_model, step_size=.1)
    mcmc = MCMC(hmc_kernel, num_samples=500, warmup_steps=100)
    # create a Markov Chain Monte Carlo method with: 
    # the hmc_kernel, 500 samples, and 100 warmup iterations
    mcmc.run(model,y)
    
    mcmc.run(model, y)

    sample_dict = mcmc.get_samples(num_samples=5000)
    plt.figure(figsize=(8, 6))
    sns.distplot(sample_dict["p"].numpy())
    plt.xlabel("Observed probability value")
    plt.ylabel("Observed frequency")
    plt.show()
    mcmc.summary(prob=0.95)

    return sample_dict
Beispiel #28
0
    def _train_hmc(self, train_loader, n_samples, warmup, step_size, num_steps, savedir, device):
        print("\n == fullBNN HMC training ==")
        pyro.clear_param_store()

        num_batches = int(len(train_loader.dataset)/train_loader.batch_size)
        batch_samples = int(n_samples/num_batches)+1
        print("\nn_batches =",num_batches,"\tbatch_samples =", batch_samples)

        # kernel = HMC(self.model, step_size=step_size, num_steps=num_steps)
        kernel = NUTS(self.model, adapt_step_size=True)
        mcmc = MCMC(kernel=kernel, num_samples=batch_samples, warmup_steps=warmup, num_chains=1)

        self.posterior_samples=[]
        state_dict_keys = list(self.basenet.state_dict().keys())
        start = time.time()

        for x_batch, y_batch in train_loader:
            x_batch = x_batch.to(device)
            y_batch = y_batch.to(device).argmax(-1)
            mcmc_run = mcmc.run(x_batch, y_batch)

            posterior_samples = mcmc.get_samples(batch_samples)
            # print('module$$$model.1.weight:\n', posterior_samples['module$$$model.1.weight'][:,0,:5])

            for sample_idx in range(batch_samples):
                net_copy = copy.deepcopy(self.basenet)

                model_dict=OrderedDict({})
                for weight_idx, weights in enumerate(posterior_samples.values()):
                    model_dict.update({state_dict_keys[weight_idx]:weights[sample_idx]})
                
                net_copy.load_state_dict(model_dict)
                self.posterior_samples.append(net_copy)

        execution_time(start=start, end=time.time())     
        self.save(savedir)
Beispiel #29
0
def mcmc(model,
         obs,
         num_samples,
         kernel='HMC',
         kernel_params={},
         mcmc_params={},
         sites=['theta']):
    # NOTE: requires differentiable model

    model_conditioned = partial(model, obs=obs)

    if kernel.upper() == 'HMC':
        mcmc_kernel = HMC(model_conditioned, **kernel_params)
    elif kernel.upper() == 'NUTS':
        mcmc_kernel = NUTS(model_conditioned, **kernel_params)
    else:
        raise NotImplementedError

    mcmc = MCMC(mcmc_kernel, num_samples, **mcmc_params)
    mcmc_run = mcmc.run()

    posterior = pyro.infer.EmpiricalMarginal(mcmc_run, sites=sites)

    return posterior
Beispiel #30
0
class BayesianGP(object):
    def __init__(self, x: np.ndarray, y: np.ndarray):
        """
        :param x: [N x D]
        :param y: [N]
        """

        x, y = TensorType(x), TensorType(y)
        assert x.ndimension() == 2
        assert y.ndimension() == 1
        assert x.shape[0] == y.numel()

        self.x = x
        self.y = y
        self.n_samples = 32
        self._xform = ExpTransform()

        # Length scales for the kernel
        self.raw_scales_prior = Normal(zeros(self.dx), ones(self.dx))
        # Kernel variance
        self.raw_variance_prior = Normal(zeros(1), ones(1))
        # Jitter, aka Gaussian likelihood's variance
        self.raw_jitter_prior = Normal(-3.0 + zeros(1), ones(1))
        # For the constant ("bias") mean function
        self.bias_prior = Normal(zeros(1), ones(1))

        self._mcmc = None

    @property
    def dx(self):
        """
        Input dimension
        """
        return self.x.shape[1]

    @property
    def n(self):
        """
        Number of data
        """
        return self.y.numel()

    def fit(self):
        mcmc_kernel = NUTS(self._prior_model)
        self._mcmc = MCMC(mcmc_kernel,
                          num_samples=self.n_samples,
                          warmup_steps=128)
        self._mcmc.run()

    def predict_f(self, x_test, diag=True):
        return self._predict(x_test, diag, False)

    def predict_y(self, x_test, diag=True):
        return self._predict(x_test, diag, True)

    def append_data(self, x_new, y_new):
        """
        Add new input-output pair(s) to the model

        :param x_new: inputs
        :type x_new: np.ndarray
        :param y_new: outputs
        :type y_new: np.ndarray
        """

        self.x = torch.cat((self.x, TensorType(np.atleast_2d(x_new))))
        self.y = torch.cat((self.y, TensorType(y_new.flatten())))

    def _prior_model(self):
        scales, variance, jitter, bias = self._get_samples()
        if self.n > 0:
            kyy = _rbf(self.x, self.x, scales, variance) + jitter * eye(self.n)
            try:
                ckyy = _jitchol(kyy)
                sample(
                    "output",
                    MultivariateNormal(bias + zeros(self.n), scale_tril=ckyy),
                    obs=self.y,
                )
            except RuntimeError:  # Cholesky fails?
                # "No chance"
                sample("output", Delta(zeros(1)), obs=ones(1))

    def _posterior_model(self, x_test, diag, with_jitter):
        """
        Return means & (co)variance samples.
        """

        assert self.n > 0, "Need at least one training datum for posterior"

        scales, variance, jitter, bias = self._get_samples()
        kyy = _rbf(self.x, self.x, scales, variance) + jitter * eye(self.n)
        ckyy = _jitchol(kyy)
        kys = _rbf(self.x, x_test, scales, variance)

        alpha = _trtrs(kys, ckyy)
        beta = _trtrs(self.y[:, None] - bias, ckyy)

        mean = (alpha.t() @ beta).flatten() + bias
        if diag:
            kss = _rbf_diag(x_test, variance)
            cov = kss - torch.sum(alpha**2, dim=0)
            if with_jitter:
                cov = cov + jitter
            # Guard against numerically-negative variances?
            cov = cov - (torch.clamp(cov, max=0.0)).detach()
        else:
            kss = _rbf(x_test, x_test, scales, variance)
            cov = kss - alpha.t() @ alpha
            if with_jitter:
                cov = cov + jitter * eye(*cov.shape)
            # Numerically-negativs variances?...

        sample("mean", Delta(mean))
        sample("cov", Delta(cov))

    def _posterior_model_no_data(self, x_test, diag, with_jitter):
        """
        When the conditioning set is empty
        """

        scales, variance, jitter, bias = self._get_samples()
        if diag:
            cov = _rbf_diag(x_test, variance)
            if with_jitter:
                cov = cov + jitter
        else:
            cov = _rbf(x_test, x_test, scales, variance)
            if with_jitter:
                cov = cov + jitter * eye(x_test.shape[0])
        mean = torch.zeros(x_test.shape[0]) + bias

        sample("mean", Delta(mean))
        sample("cov", Delta(cov))

    def _get_samples(self):
        scales = self._xform(sample("raw_scales", self.raw_scales_prior))
        variance = self._xform(sample("raw_variance", self.raw_variance_prior))
        jitter = self._xform(sample("raw_jitter", self.raw_jitter_prior))
        bias = sample("bias", self.bias_prior)

        return scales, variance, jitter, bias

    @_input_as_tensor
    def _predict(self, x_test: TensorType, diag, with_jitter):
        """
        Return predictive mean [N* x 1] and either predictive variance [N* x 1]
        or covariance [N* x N*]

        :return: (TensorType, TensorType) mean & (co)variance
        """

        model = self._posterior_model if self.n > 0 else self._posterior_model_no_data
        samples = Predictive(model, self._mcmc.get_samples()).get_samples(
            x_test, diag, with_jitter)

        means, covs = samples["mean"], samples["cov"]

        mean = means.mean(dim=0)
        # Law of total (co)variance:
        if diag:
            cov = means.var(dim=0) + covs.mean(dim=0)
        else:
            d_mean = (means - mean)[:, :, None]
            cov_of_means = (d_mean @ torch.transpose(d_mean, 1, 2)).sum(
                dim=0) / (means.shape[0] - 1)
            mean_of_covs = covs.mean(dim=0)
            cov = cov_of_means + mean_of_covs

        # Make sure the shapes are right:
        if len(mean.shape) == 1:
            mean = mean[:, None]
        if len(cov.shape) == 1:
            cov = cov[:, None]

        return mean, cov