Example #1
0
    class pd_prior:
        """Wrapper for the pytorch prior such that it returns pandas samples."""

        def __init__(self, lower, upper, parameter_names):
            self.lower = torch.tensor(lower)
            self.upper = torch.tensor(upper)
            self.names = parameter_names
            self.numerical_prior = BoxUniform(torch.as_tensor(self.lower, dtype=torch.float32), torch.as_tensor(self.upper, dtype=torch.float32))

        def sample(self, sample_shape):
            numerical_sample = self.numerical_prior.sample(sample_shape).numpy()
            return pd.DataFrame(numerical_sample, columns=self.names)

        def log_prob(self, theta):
            numerical_theta = theta.to_numpy()
            return self.numerical_prior.log_prob(numerical_theta)
Example #2
0
def test_running_sbc(method, prior, model="mdn"):
    """Tests running inference and then SBC and obtaining nltp."""

    num_dim = 2
    if prior == "boxuniform":
        prior = BoxUniform(-torch.ones(num_dim), torch.ones(num_dim))
    else:
        prior = MultipleIndependent(
            [Uniform(-torch.ones(1), torch.ones(1)) for _ in range(num_dim)]
        )

    num_simulations = 100
    max_num_epochs = 1
    num_sbc_runs = 2

    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    def simulator(theta):
        return linear_gaussian(theta, likelihood_shift, likelihood_cov)

    inferer = method(prior, show_progress_bars=False, density_estimator=model)

    theta, x = simulate_for_sbi(simulator, prior, num_simulations)

    _ = inferer.append_simulations(theta, x).train(
        training_batch_size=100, max_num_epochs=max_num_epochs
    )
    posterior = inferer.build_posterior()

    thetas = prior.sample((num_sbc_runs,))
    xs = simulator(thetas)

    run_sbc(thetas, xs, posterior, num_workers=1, num_posterior_samples=10)

    # Check nltp
    get_nltp(thetas, xs, posterior)
Example #3
0
def test_mnle_on_device(device):

    # Generate mixed data.
    num_simulations = 100
    theta = torch.rand(num_simulations, 2)
    x = torch.cat(
        (torch.rand(num_simulations,
                    1), torch.randint(0, 2, (num_simulations, 1))),
        dim=1,
    )

    # Train and infer.
    prior = BoxUniform(torch.zeros(2), torch.ones(2))
    trainer = MNLE(prior=prior, device=device)
    trainer.append_simulations(theta, x).train(max_num_epochs=1)

    # Test sampling on device.
    posterior = (trainer.append_simulations(
        theta, x).train(max_num_epochs=1).build_posterior())
    posterior.sample((1, ), x=x[0], show_progress_bars=False)
Example #4
0
def test_plot_summary(method, tmp_path):
    num_dim = 1
    prior = BoxUniform(low=-2 * torch.ones(num_dim),
                       high=2 * torch.ones(num_dim))

    summary_writer = SummaryWriter(tmp_path)

    def linear_gaussian(theta):
        return theta + 1.0 + torch.randn_like(theta) * 0.1

    simulator, prior = prepare_for_sbi(linear_gaussian, prior)

    inference = method(prior=prior, summary_writer=summary_writer)
    theta, x = simulate_for_sbi(simulator, proposal=prior, num_simulations=6)
    train_kwargs = (dict(
        max_num_epochs=5, validation_fraction=0.5, num_atoms=2)
                    if method == SNRE else dict(max_num_epochs=1))
    _ = inference.append_simulations(theta, x).train(**train_kwargs)
    fig, axes = plot_summary(inference)
    assert isinstance(fig, Figure) and isinstance(axes[0], Axes)
Example #5
0
def test_mnle_api(sampler):

    # Generate mixed data.
    num_simulations = 100
    theta = torch.rand(num_simulations, 2)
    x = torch.cat(
        (torch.rand(num_simulations,
                    1), torch.randint(0, 2, (num_simulations, 1))),
        dim=1,
    )

    # Train and infer.
    prior = BoxUniform(torch.zeros(2), torch.ones(2))
    x_o = x[0]
    # Build estimator manually.
    density_estimator = likelihood_nn(model="mnle", **dict(tail_bound=2.0))
    trainer = MNLE(density_estimator=density_estimator)
    mnle = trainer.append_simulations(theta, x).train(max_num_epochs=1)

    # Test different samplers.
    posterior = trainer.build_posterior(prior=prior, sample_with=sampler)
    posterior.set_default_x(x_o)
    if sampler == "vi":
        posterior.train()
    posterior.sample((1, ), show_progress_bars=False)

    # MNLE should work with the default potential as well.
    potential_fn, parameter_transform = likelihood_estimator_based_potential(
        mnle, prior, x_o)
    posterior = MCMCPosterior(
        potential_fn,
        proposal=prior,
        theta_transform=parameter_transform,
        init_strategy="proposal",
    )
    posterior.sample((1, ), show_progress_bars=False)
Example #6
0
def set_up_model(seed_data=7,
                 prior="Uniform",
                 theta_true=torch.tensor([0.01, 0.5, 1, 0.01]).log(),
                 method="not_snpla"):
    if prior == "Uniform":
        if method == "snpla":
            prior_dist = Uniform(low=-5 * torch.ones(4),
                                 high=2 * torch.ones(4))
        else:
            prior_dist = BoxUniform(low=-5 * torch.ones(4),
                                    high=2 * torch.ones(4))

    else:
        Exception("Prior dist not valid!")

    # TODO add prior_dist
    # set LV model
    model = LotkaVolterra.LotkaVolterra(prior_dist)

    # gen data
    torch.manual_seed(seed_data)
    x_o = model.gen_single(theta_true)

    return x_o, model, theta_true
Example #7
0
def test_smcabc_inference_on_linear_gaussian(
    num_dim,
    prior_type: str,
    lra=False,
    sass=False,
    sass_expansion_degree=1,
    kde=False,
    kde_bandwidth="cv",
    transform=False,
    num_simulations=20000,
):
    x_o = zeros((1, num_dim))
    num_samples = 1000
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    if prior_type == "gaussian":
        prior_mean = zeros(num_dim)
        prior_cov = eye(num_dim)
        prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
        gt_posterior = true_posterior_linear_gaussian_mvn_prior(
            x_o[0], likelihood_shift, likelihood_cov, prior_mean, prior_cov)
        target_samples = gt_posterior.sample((num_samples, ))
    elif prior_type == "uniform":
        prior = BoxUniform(-ones(num_dim), ones(num_dim))
        target_samples = samples_true_posterior_linear_gaussian_uniform_prior(
            x_o[0], likelihood_shift, likelihood_cov, prior, num_samples)
    else:
        raise ValueError("Wrong prior string.")

    def simulator(theta):
        return linear_gaussian(theta, likelihood_shift, likelihood_cov)

    infer = SMC(simulator,
                prior,
                simulation_batch_size=10000,
                algorithm_variant="C")

    phat = infer(
        x_o,
        num_particles=1000,
        num_initial_pop=5000,
        epsilon_decay=0.5,
        num_simulations=num_simulations,
        distance_based_decay=True,
        return_summary=False,
        lra=lra,
        sass=sass,
        sass_fraction=0.5,
        sass_expansion_degree=sass_expansion_degree,
        kde=kde,
        kde_kwargs=dict(
            bandwidth=kde_bandwidth,
            transform=biject_to(prior.support) if transform else None,
        ),
    )

    check_c2st(
        phat.sample((num_samples, )) if kde else phat,
        target_samples,
        alg=
        f"SMCABC-{prior_type}prior-lra{lra}-sass{sass}-kde{kde}-{kde_bandwidth}",
    )

    if kde:
        samples = phat.sample((10, ))
        phat.log_prob(samples)
Example #8
0
    os.chdir('/home/samwiq/spa/seq-posterior-approx-w-nf-dev')
else:
    os.chdir(
        '/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev'
    )

sys.path.append('./')

print(os.getcwd())

id_job = str(dim) + '_' + str(seed) + '_' + str(seed_data)

import two_moons.functions as func
import algorithms.spa as spa

prior = BoxUniform(low=-2 * torch.ones(2), high=2 * torch.ones(2))
x_o, model = func.set_up_model(prior)
dim = 2

flow_lik, flow_post = func.set_up_networks(seed)

optimizer_lik = torch.optim.Adam(flow_lik.parameters())
optimizer_post = torch.optim.Adam(flow_post.parameters(), lr=10**-3)
decay_rate_post = 0.95

# test prior pred sampling and sampling for given that

nbr_rounds = 5
prob_prior_decay_rate = 0.8
prob_prior = spa.calc_prob_prior(nbr_rounds, prob_prior_decay_rate)
Example #9
0
def test_gaussian_transforms(snpe_method: str, plot_results: bool = False):
    """
    Tests whether the the product between proposal and posterior is computed correctly.

    For SNPE-C, this initializes two MoGs with two components each. It then evaluates
    their product by simply multiplying the probabilities of the two. The result is
    compared to the product of two MoGs as implemented in APT.

    For SNPE-A, it initializes a MoG with two compontents and one Gaussian (with one
    component). It then devices the MoG by the Gaussian and compares it to the
    transformation in SNPE-A.

    Args:
        snpe_method: String indicating whether to test snpe-a or snpe-c.
        plot_results: Whether to plot the products of the distributions.
    """
    class MoG:
        def __init__(self, means, preds, logits):
            self._means = means
            self._preds = preds
            self._logits = logits

        def log_prob(self, theta):
            probs = zeros(theta.shape[0])
            for m, p, l in zip(self._means, self._preds, self._logits):
                mvn = MultivariateNormal(m, p)
                weighted_prob = torch.exp(mvn.log_prob(theta)) * l
                probs += weighted_prob
            return probs

    # Build a grid on which to evaluate the densities.
    bound = 5.0
    theta_range = torch.linspace(-bound, bound, 100)
    theta1_grid, theta2_grid = torch.meshgrid(theta_range, theta_range)
    theta_grid = torch.stack([theta1_grid, theta2_grid])
    theta_grid_flat = torch.reshape(theta_grid, (2, 100**2))

    # Generate two MoGs.
    means1 = torch.tensor([[2.0, 2.0], [-2.0, -2.0]])
    covs1 = torch.stack([0.5 * torch.eye(2), torch.eye(2)])
    weights1 = torch.tensor([0.3, 0.7])

    if snpe_method == "snpe_c":
        means2 = torch.tensor([[2.0, -2.2], [-2.0, 1.9]])
        covs2 = torch.stack([0.6 * torch.eye(2), 0.9 * torch.eye(2)])
        weights2 = torch.tensor([0.6, 0.4])
    elif snpe_method == "snpe_a":
        means2 = torch.tensor([[-0.2, -0.4]])
        covs2 = torch.stack([3.5 * torch.eye(2)])
        weights2 = torch.tensor([1.0])

    mog1 = MoG(means1, covs1, weights1)
    mog2 = MoG(means2, covs2, weights2)

    # Evaluate the product of their pdfs by evaluating them separately and multiplying.
    probs1_raw = mog1.log_prob(theta_grid_flat.T)
    probs1 = torch.reshape(probs1_raw, (100, 100))

    probs2_raw = mog2.log_prob(theta_grid_flat.T)
    probs2 = torch.reshape(probs2_raw, (100, 100))

    if snpe_method == "snpe_c":
        probs_mult = probs1 * probs2

        # Set up a SNPE object in order to use the
        # `_automatic_posterior_transformation()`.
        prior = BoxUniform(-5 * ones(2), 5 * ones(2))
        # Testing new z-score arg options.
        density_estimator = posterior_nn("mdn",
                                         z_score_theta=None,
                                         z_score_x=None)
        inference = SNPE(prior=prior, density_estimator=density_estimator)
        theta_ = torch.rand(100, 2)
        x_ = torch.rand(100, 2)
        _ = inference.append_simulations(theta_, x_).train(max_num_epochs=1)
        inference._set_state_for_mog_proposal()

        precs1 = torch.inverse(covs1)
        precs2 = torch.inverse(covs2)

        # `.unsqueeze(0)` is needed because the method requires a batch dimension.
        logits_pp, means_pp, _, covs_pp = inference._automatic_posterior_transformation(
            torch.log(weights1.unsqueeze(0)),
            means1.unsqueeze(0),
            precs1.unsqueeze(0),
            torch.log(weights2.unsqueeze(0)),
            means2.unsqueeze(0),
            precs2.unsqueeze(0),
        )

    elif snpe_method == "snpe_a":
        probs_mult = probs1 / probs2

        prior = BoxUniform(-5 * ones(2), 5 * ones(2))

        inference = SNPE_A(prior=prior)
        theta_ = torch.rand(100, 2)
        x_ = torch.rand(100, 2)
        density_estimator = inference.append_simulations(
            theta_, x_).train(max_num_epochs=1)
        wrapped_density_estimator = SNPE_A_MDN(flow=density_estimator,
                                               proposal=prior,
                                               prior=prior,
                                               device="cpu")

        precs1 = torch.inverse(covs1)
        precs2 = torch.inverse(covs2)

        # `.unsqueeze(0)` is needed because the method requires a batch dimension.
        (
            logits_pp,
            means_pp,
            _,
            covs_pp,
        ) = wrapped_density_estimator._proposal_posterior_transformation(
            torch.log(weights2.unsqueeze(0)),
            means2.unsqueeze(0),
            precs2.unsqueeze(0),
            torch.log(weights1.unsqueeze(0)),
            means1.unsqueeze(0),
            precs1.unsqueeze(0),
        )

    # Normalize weights.
    logits_pp_norm = logits_pp - torch.logsumexp(
        logits_pp, dim=-1, keepdim=True)
    weights_pp = torch.exp(logits_pp_norm)

    # Evaluate the product of the two distributions.
    mog_apt = MoG(means_pp[0], covs_pp[0], weights_pp[0])

    probs_apt_raw = mog_apt.log_prob(theta_grid_flat.T)
    probs_apt = torch.reshape(probs_apt_raw, (100, 100))

    # Compute the error between the two methods.
    norm_probs_mult = probs_mult / torch.max(probs_mult)
    norm_probs3_ = probs_apt / torch.max(probs_apt)
    error = torch.abs(norm_probs_mult - norm_probs3_)

    assert torch.max(error) < 1e-5

    if plot_results:
        _, ax = plt.subplots(1, 4, figsize=(16, 4))

        ax[0].imshow(probs1, extent=[-bound, bound, -bound, bound])
        ax[0].set_title("p_1")
        ax[1].imshow(probs2, extent=[-bound, bound, -bound, bound])
        ax[1].set_title("p_2")
        ax[2].imshow(probs_mult, extent=[-bound, bound, -bound, bound])
        ax[3].imshow(probs_apt, extent=[-bound, bound, -bound, bound])
        if snpe_method == "snpe_c":
            ax[2].set_title("p_1 * p_2")
            ax[3].set_title("APT")
        elif snpe_method == "snpe_a":
            ax[2].set_title("p_1 / p_2")
            ax[3].set_title("SNPE-A")

        plt.show()
Example #10
0
 def __init__(self, lower, upper, parameter_names):
     self.lower = torch.tensor(lower)
     self.upper = torch.tensor(upper)
     self.names = parameter_names
     self.numerical_prior = BoxUniform(torch.as_tensor(self.lower, dtype=torch.float32), torch.as_tensor(self.upper, dtype=torch.float32))
Example #11
0
    AffineTransform,
    ComposeTransform,
    ExpTransform,
    IndependentTransform,
    SigmoidTransform,
)

from sbi.utils import BoxUniform, MultipleIndependent, mcmc_transform, process_prior
from tests.user_input_checks_test import UserNumpyUniform


@pytest.mark.parametrize(
    "prior, target_transform",
    (
        (Uniform(-torch.ones(1), torch.ones(1)), SigmoidTransform),
        (BoxUniform(-torch.ones(2), torch.ones(2)), SigmoidTransform),
        (UserNumpyUniform(torch.zeros(2), torch.ones(2)), SigmoidTransform),
        (MultivariateNormal(torch.zeros(2), torch.eye(2)), AffineTransform),
        (LogNormal(loc=torch.zeros(1), scale=torch.ones(1)), ExpTransform),
    ),
)
def test_transforms(prior, target_transform):

    if isinstance(prior, UserNumpyUniform):
        prior, *_ = process_prior(
            prior,
            dict(lower_bound=torch.zeros(2), upper_bound=torch.ones(2)),
        )

    transform = mcmc_transform(prior)
    core_transform = transform._inv