コード例 #1
0
def test_linearGaussian_simulator(D: int, N: int):
    """Test linear Gaussian simulator.

    Args:
        D: parameter dimension
        N: number of samples
    """

    true_parameters = torch.zeros(D)
    num_simulations = N
    parameters = true_parameters.repeat(num_simulations).reshape(-1, D)
    xs = linear_gaussian(parameters, torch.zeros(D), torch.eye(D))

    # Check shapes.
    assert parameters.shape == torch.Size(
        (N, D)
    ), f"wrong shape of parameters: {parameters.shape} != {torch.Size((N, D))}"
    assert xs.shape == torch.Size([N, D])

    # Check mean and std.
    assert torch.allclose(
        xs.mean(axis=0), true_parameters,
        atol=5e-2), f"Expected mean of zero, obtained {xs.mean(axis=0)}"
    assert torch.allclose(
        xs.std(axis=0), torch.ones(D),
        atol=5e-2), f"Expected std of one, obtained {xs.std(axis=0)}"
コード例 #2
0
def test_example_posterior(snpe_method: type):
    """Return an inferred `NeuralPosterior` for interactive examination."""
    num_dim = 2
    x_o = zeros(1, num_dim)

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)

    if snpe_method == SNPE_A:
        extra_kwargs = dict(final_round=True)
    else:
        extra_kwargs = dict()

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    inference = snpe_method(prior, show_progress_bars=False, **extra_kwargs)
    theta, x = simulate_for_sbi(simulator,
                                prior,
                                1000,
                                simulation_batch_size=10,
                                num_workers=6)
    _ = inference.append_simulations(theta, x).train()

    posterior = inference.build_posterior().set_default_x(x_o)
    assert posterior is not None
コード例 #3
0
    def linear_gaussian_nan(theta,
                            likelihood_shift=likelihood_shift,
                            likelihood_cov=likelihood_cov):
        condition = theta[:, 0] < 0.0
        x = linear_gaussian(theta, likelihood_shift, likelihood_cov)
        x[condition] = float("nan")

        return x
コード例 #4
0
    def linear_gaussian_nan(
        theta, likelihood_shift=likelihood_shift, likelihood_cov=likelihood_cov
    ):
        x = linear_gaussian(theta, likelihood_shift, likelihood_cov)
        # Set nan randomly.
        x[torch.rand(x.shape) < (percent_nans * 1.0 / x.shape[1])] = float("nan")

        return x
コード例 #5
0
def test_c2st_snl_on_linearGaussian(num_dim: int, prior_str: str, set_seed):
    """Test SNL on linear Gaussian, comparing to ground truth posterior via c2st.

    Args:
        num_dim: parameter dimension of the gaussian model
        prior_str: one of "gaussian" or "uniform"
        set_seed: fixture for manual seeding
    """

    x_o = zeros((1, num_dim))
    num_samples = 500

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    if prior_str == "gaussian":
        prior_mean = zeros(num_dim)
        prior_cov = eye(num_dim)
        prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
        gt_posterior = true_posterior_linear_gaussian_mvn_prior(
            x_o[0], likelihood_shift, likelihood_cov, prior_mean, prior_cov)
        target_samples = gt_posterior.sample((num_samples, ))
    else:
        prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))
        target_samples = samples_true_posterior_linear_gaussian_uniform_prior(
            x_o,
            likelihood_shift,
            likelihood_cov,
            prior=prior,
            num_samples=num_samples)

    simulator = lambda theta: linear_gaussian(theta, likelihood_shift,
                                              likelihood_cov)

    infer = SNL(
        *prepare_for_sbi(simulator, prior),
        mcmc_method="slice_np",
        show_progress_bars=False,
    )

    posterior = infer(num_rounds=1,
                      num_simulations_per_round=1000).set_default_x(x_o)

    samples = posterior.sample(sample_shape=(num_samples, ),
                               mcmc_parameters={"thin": 3})

    # Check performance based on c2st accuracy.
    check_c2st(samples, target_samples, alg=f"snle_a-{prior_str}-prior")

    # TODO: we do not have a test for SNL log_prob(). This is because the output
    # TODO: density is not normalized, so KLd does not make sense.
    if prior_str == "uniform":
        # Check whether the returned probability outside of the support is zero.
        posterior_prob = get_prob_outside_uniform_prior(posterior, num_dim)
        assert (
            posterior_prob == 0.0
        ), "The posterior probability outside of the prior support is not zero"
コード例 #6
0
def test_c2st_snl_on_linearGaussian_different_dims(set_seed):
    """Test whether SNL infers well a simple example with available ground truth.

    This example has different number of parameters theta than number of x. This test
    also acts as the only functional test for SNL not marked as slow.

    Args:
        set_seed: fixture for manual seeding
    """

    theta_dim = 3
    x_dim = 2
    discard_dims = theta_dim - x_dim

    x_o = ones(1, x_dim)
    num_samples = 1000
    num_simulations = 5000

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(x_dim)
    likelihood_cov = 0.3 * eye(x_dim)

    prior_mean = zeros(theta_dim)
    prior_cov = eye(theta_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    target_samples = samples_true_posterior_linear_gaussian_mvn_prior_different_dims(
        x_o[0],
        likelihood_shift,
        likelihood_cov,
        prior_mean,
        prior_cov,
        num_discarded_dims=discard_dims,
        num_samples=num_samples,
    )

    simulator = lambda theta: linear_gaussian(theta,
                                              likelihood_shift,
                                              likelihood_cov,
                                              num_discarded_dims=discard_dims)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = SNL(
        prior,
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                num_simulations,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train()
    posterior = inference.build_posterior()
    samples = posterior.sample((num_samples, ),
                               x=x_o,
                               mcmc_parameters={"thin": 3})

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg="snle_a")
コード例 #7
0
def test_c2st_multi_round_snl_on_linearGaussian(num_trials: int):
    """Test SNL on linear Gaussian, comparing to ground truth posterior via c2st."""

    num_dim = 2
    x_o = zeros((num_trials, num_dim))
    num_samples = 500
    num_simulations_per_round = 600 * num_trials

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    gt_posterior = true_posterior_linear_gaussian_mvn_prior(
        x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov)
    target_samples = gt_posterior.sample((num_samples, ))

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    inference = SNLE(show_progress_bars=False)

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                num_simulations_per_round,
                                simulation_batch_size=50)
    likelihood_estimator = inference.append_simulations(theta, x).train()
    potential_fn, theta_transform = likelihood_estimator_based_potential(
        prior=prior, likelihood_estimator=likelihood_estimator, x_o=x_o)
    posterior1 = MCMCPosterior(
        proposal=prior,
        potential_fn=potential_fn,
        theta_transform=theta_transform,
        thin=5,
        num_chains=20,
    )

    theta, x = simulate_for_sbi(simulator,
                                posterior1,
                                num_simulations_per_round,
                                simulation_batch_size=50)
    likelihood_estimator = inference.append_simulations(theta, x).train()
    potential_fn, theta_transform = likelihood_estimator_based_potential(
        prior=prior, likelihood_estimator=likelihood_estimator, x_o=x_o)
    posterior = MCMCPosterior(
        proposal=prior,
        potential_fn=potential_fn,
        theta_transform=theta_transform,
        thin=5,
        num_chains=20,
    )

    samples = posterior.sample(sample_shape=(num_samples, ))

    # Check performance based on c2st accuracy.
    check_c2st(samples, target_samples, alg="multi-round-snl")
コード例 #8
0
def test_c2st_multi_round_snl_on_linearGaussian(set_seed):
    """Test SNL on linear Gaussian, comparing to ground truth posterior via c2st.

    Args:
        set_seed: fixture for manual seeding
    """

    num_dim = 2
    x_o = zeros((1, num_dim))
    num_samples = 500

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    gt_posterior = true_posterior_linear_gaussian_mvn_prior(
        x_o[0], likelihood_shift, likelihood_cov, prior_mean, prior_cov)
    target_samples = gt_posterior.sample((num_samples, ))

    simulator = lambda theta: linear_gaussian(theta, likelihood_shift,
                                              likelihood_cov)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = SNL(
        prior,
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                750,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train()
    posterior1 = inference.build_posterior(mcmc_method="slice_np_vectorized",
                                           mcmc_parameters={
                                               "thin": 5,
                                               "num_chains": 20
                                           }).set_default_x(x_o)

    theta, x = simulate_for_sbi(simulator,
                                posterior1,
                                750,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train()
    posterior = inference.build_posterior().copy_hyperparameters_from(
        posterior1)

    samples = posterior.sample(sample_shape=(num_samples, ),
                               mcmc_parameters={"thin": 3})

    # Check performance based on c2st accuracy.
    check_c2st(samples, target_samples, alg="multi-round-snl")
コード例 #9
0
ファイル: linearGaussian_snpe_test.py プロジェクト: bkmi/sbi
def test_api_snpe_c_posterior_correction(sample_with, mcmc_method, prior_str):
    """Test that leakage correction applied to sampling works, with both MCMC and
    rejection.

    """

    num_dim = 2
    x_o = zeros(1, num_dim)

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    if prior_str == "gaussian":
        prior_mean = zeros(num_dim)
        prior_cov = eye(num_dim)
        prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    else:
        prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    inference = SNPE_C(prior, show_progress_bars=False)

    theta, x = simulate_for_sbi(simulator, prior, 1000)
    posterior_estimator = inference.append_simulations(theta, x).train()
    potential_fn, theta_transform = posterior_estimator_based_potential(
        posterior_estimator, prior, x_o)
    if sample_with == "mcmc":
        posterior = MCMCPosterior(
            potential_fn=potential_fn,
            theta_transform=theta_transform,
            proposal=prior,
            method=mcmc_method,
        )
    elif sample_with == "rejection":
        posterior = RejectionPosterior(
            potential_fn=potential_fn,
            proposal=prior,
            theta_transform=theta_transform,
        )

    # Posterior should be corrected for leakage even if num_rounds just 1.
    samples = posterior.sample((10, ))

    # Evaluate the samples to check correction factor.
    _ = posterior.log_prob(samples)
コード例 #10
0
def test_api_snpe_c_posterior_correction(snpe_method: type, sample_with,
                                         mcmc_method, prior_str, set_seed):
    """Test that leakage correction applied to sampling works, with both MCMC and
    rejection.

    Args:
        set_seed: fixture for manual seeding
    """

    num_dim = 2
    x_o = zeros(1, num_dim)

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    if prior_str == "gaussian":
        prior_mean = zeros(num_dim)
        prior_cov = eye(num_dim)
        prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    else:
        prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    inference = snpe_method(
        prior,
        simulation_batch_size=50,
        sample_with=sample_with,
        mcmc_method=mcmc_method,
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator, prior, 1000)
    _ = inference.append_simulations(theta, x).train(max_num_epochs=5)

    posterior = inference.build_posterior()
    posterior = posterior.set_sample_with(sample_with).set_mcmc_method(
        mcmc_method)

    # Posterior should be corrected for leakage even if num_rounds just 1.
    samples = posterior.sample((10, ), x=x_o)

    # Evaluate the samples to check correction factor.
    posterior.log_prob(samples, x=x_o)
コード例 #11
0
def test_embedding_net_api(method, num_dim: int, embedding_net: str):
    """Tests the API when using a preconfigured embedding net."""

    x_o = zeros(1, num_dim)

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))

    theta = prior.sample((1000, ))
    x = linear_gaussian(theta, likelihood_shift, likelihood_cov)

    if embedding_net == "mlp":
        embedding = FCEmbedding(input_dim=num_dim)
    else:
        raise NameError

    if method == "SNPE":
        density_estimator = posterior_nn("maf", embedding_net=embedding)
        inference = SNPE(prior,
                         density_estimator=density_estimator,
                         show_progress_bars=False)
    elif method == "SNLE":
        density_estimator = likelihood_nn("maf", embedding_net=embedding)
        inference = SNLE(prior,
                         density_estimator=density_estimator,
                         show_progress_bars=False)
    elif method == "SNRE":
        classifier = classifier_nn("resnet", embedding_net_x=embedding)
        inference = SNRE(prior,
                         classifier=classifier,
                         show_progress_bars=False)
    else:
        raise NameError

    _ = inference.append_simulations(theta, x).train(max_num_epochs=5)
    posterior = inference.build_posterior().set_default_x(x_o)

    s = posterior.sample((1, ))
    _ = posterior.potential(s)
コード例 #12
0
def test_c2st_multi_round_snl_on_linearGaussian(set_seed):
    """Test SNL on linear Gaussian, comparing to ground truth posterior via c2st.

    Args:
        set_seed: fixture for manual seeding
    """

    num_dim = 2
    x_o = zeros((1, num_dim))
    num_samples = 500

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    gt_posterior = true_posterior_linear_gaussian_mvn_prior(
        x_o[0], likelihood_shift, likelihood_cov, prior_mean, prior_cov)
    target_samples = gt_posterior.sample((num_samples, ))

    simulator = lambda theta: linear_gaussian(theta, likelihood_shift,
                                              likelihood_cov)

    infer = SNL(
        *prepare_for_sbi(simulator, prior),
        simulation_batch_size=50,
        mcmc_method="slice",
        show_progress_bars=False,
    )

    posterior1 = infer(num_simulations=500).set_default_x(x_o)
    posterior = infer(num_simulations=500,
                      proposal=posterior1).set_default_x(x_o)

    samples = posterior.sample(sample_shape=(num_samples, ),
                               mcmc_parameters={"thin": 3})

    # Check performance based on c2st accuracy.
    check_c2st(samples, target_samples, alg="multi-round-snl")
コード例 #13
0
def test_c2st_sre_on_linearGaussian(set_seed):
    """Test whether SRE infers well a simple example with available ground truth.

    This example has different number of parameters theta than number of x. This test
    also acts as the only functional test for SRE not marked as slow.

    Args:
        set_seed: fixture for manual seeding
    """

    theta_dim = 3
    x_dim = 2
    discard_dims = theta_dim - x_dim
    num_samples = 1000
    num_simulations = 1000

    likelihood_shift = -1.0 * ones(
        x_dim
    )  # likelihood_mean will be likelihood_shift+theta
    likelihood_cov = 0.3 * eye(x_dim)

    prior_mean = zeros(theta_dim)
    prior_cov = eye(theta_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(
            theta, likelihood_shift, likelihood_cov, num_discarded_dims=discard_dims
        ),
        prior,
    )
    inference = SNRE_B(
        prior,
        classifier="resnet",
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(
        simulator, prior, num_simulations, simulation_batch_size=100
    )
    _ = inference.append_simulations(theta, x).train()
    posterior = inference.build_posterior(mcmc_method="slice_np_vectorized")

    num_trials = 1
    x_o = zeros(num_trials, x_dim)
    target_samples = samples_true_posterior_linear_gaussian_mvn_prior_different_dims(
        x_o,
        likelihood_shift,
        likelihood_cov,
        prior_mean,
        prior_cov,
        num_discarded_dims=discard_dims,
        num_samples=num_samples,
    )
    samples = posterior.sample(
        (num_samples,),
        x=x_o,
        mcmc_parameters={"thin": 5, "num_chains": 2},
    )

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg=f"snre-{num_trials}trials")
コード例 #14
0
ファイル: linearGaussian_mdn.py プロジェクト: agramfort/sbi
 def simulator(theta: Tensor) -> Tensor:
     return linear_gaussian(theta, likelihood_shift, likelihood_cov)
コード例 #15
0
def test_c2st_snpe_on_linearGaussian(snpe_method, num_dim: int, prior_str: str,
                                     num_trials: int, set_seed):
    """Test whether SNPE infers well a simple example with available ground truth.

    Args:
        set_seed: fixture for manual seeding
    """

    x_o = zeros(num_trials, num_dim)
    num_samples = 1000
    num_simulations = 2500

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    if prior_str == "gaussian":
        prior_mean = zeros(num_dim)
        prior_cov = eye(num_dim)
        prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
        gt_posterior = true_posterior_linear_gaussian_mvn_prior(
            x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov)
        target_samples = gt_posterior.sample((num_samples, ))
    else:
        prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))
        target_samples = samples_true_posterior_linear_gaussian_uniform_prior(
            x_o,
            likelihood_shift,
            likelihood_cov,
            prior=prior,
            num_samples=num_samples)

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    inference = snpe_method(prior, show_progress_bars=False)

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                num_simulations,
                                simulation_batch_size=1000)
    _ = inference.append_simulations(theta, x).train(training_batch_size=100)
    posterior = inference.build_posterior().set_default_x(x_o)
    samples = posterior.sample((num_samples, ))

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg="snpe_c")

    map_ = posterior.map(num_init_samples=1_000)

    # Checks for log_prob()
    if prior_str == "gaussian":
        # For the Gaussian prior, we compute the KLd between ground truth and posterior.
        dkl = get_dkl_gaussian_prior(posterior, x_o[0], likelihood_shift,
                                     likelihood_cov, prior_mean, prior_cov)

        max_dkl = 0.15

        assert (
            dkl < max_dkl
        ), f"D-KL={dkl} is more than 2 stds above the average performance."

        assert ((map_ - gt_posterior.mean)**2).sum() < 0.5

    elif prior_str == "uniform":
        # Check whether the returned probability outside of the support is zero.
        posterior_prob = get_prob_outside_uniform_prior(
            posterior, prior, num_dim)
        assert (
            posterior_prob == 0.0
        ), "The posterior probability outside of the prior support is not zero"

        # Check whether normalization (i.e. scaling up the density due
        # to leakage into regions without prior support) scales up the density by the
        # correct factor.
        (
            posterior_likelihood_unnorm,
            posterior_likelihood_norm,
            acceptance_prob,
        ) = get_normalization_uniform_prior(posterior, prior, x_o)
        # The acceptance probability should be *exactly* the ratio of the unnormalized
        # and the normalized likelihood. However, we allow for an error margin of 1%,
        # since the estimation of the acceptance probability is random (based on
        # rejection sampling).
        assert (
            acceptance_prob * 0.99 < posterior_likelihood_unnorm /
            posterior_likelihood_norm < acceptance_prob * 1.01
        ), "Normalizing the posterior density using the acceptance probability failed."

        assert ((map_ - ones(num_dim))**2).sum() < 0.5
コード例 #16
0
def test_c2st_posterior_ensemble_on_linearGaussian(inference_method):
    """Test whether NeuralPosteriorEnsemble infers well a simple example with available
    ground truth.

    """

    num_trials = 1
    num_dim = 2
    x_o = zeros(num_trials, num_dim)
    num_samples = 1000
    num_simulations = 4000 if inference_method == "SNRE_A" else 2000

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    gt_posterior = true_posterior_linear_gaussian_mvn_prior(
        x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov)
    target_samples = gt_posterior.sample((num_samples, ))

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)

    # train ensemble components
    ensemble_size = 2
    posteriors = [
        infer(simulator, prior, inference_method, num_simulations)
        for i in range(ensemble_size)
    ]

    # create ensemble
    posterior = NeuralPosteriorEnsemble(posteriors)
    posterior.set_default_x(x_o)

    # test sampling and evaluation.
    if inference_method == "SNLE_A" or inference_method == "SNRE_A":
        samples = posterior.sample((num_samples, ),
                                   num_chains=20,
                                   method="slice_np_vectorized")
    else:
        samples = posterior.sample((num_samples, ))
    _ = posterior.potential(samples)

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples,
               target_samples,
               alg="{} posterior ensemble".format(inference_method))

    map_ = posterior.map(init_method=samples, show_progress_bars=False)
    assert ((map_ - gt_posterior.mean)**2).sum() < 0.5

    # Checks for log_prob()
    # For the Gaussian prior, we compute the KLd between ground truth and posterior.
    # This step is skipped for NLE since the probabilities are not normalised.
    if "snpe" in inference_method.lower() or "snre" in inference_method.lower(
    ):
        dkl = get_dkl_gaussian_prior(
            posterior,
            x_o[0],
            likelihood_shift,
            likelihood_cov,
            prior_mean,
            prior_cov,
            num_samples=num_samples,
        )
        max_dkl = 0.15
        assert (
            dkl < max_dkl
        ), f"D-KL={dkl} is more than 2 stds above the average performance."

    # test individual log_prob and map
    posterior.log_prob(samples, individually=True)
コード例 #17
0
def test_c2st_snl_on_linearGaussian_different_dims_and_trials(
        num_dim: int, prior_str: str, set_seed):
    """Test SNL on linear Gaussian, comparing to ground truth posterior via c2st.

    Args:
        num_dim: parameter dimension of the gaussian model
        prior_str: one of "gaussian" or "uniform"
        set_seed: fixture for manual seeding
    """
    num_samples = 500
    num_simulations = 7500
    trials_to_test = [1, 5, 10]

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    # Use increased cov to avoid too small posterior cov for many trials.
    likelihood_cov = 0.8 * eye(num_dim)

    if prior_str == "gaussian":
        prior_mean = zeros(num_dim)
        prior_cov = eye(num_dim)
        prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    else:
        prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    inference = SNL(prior, show_progress_bars=False)

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                num_simulations,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train()

    # Test inference amortized over trials.
    for num_trials in trials_to_test:
        x_o = zeros((num_trials, num_dim))
        if prior_str == "gaussian":
            gt_posterior = true_posterior_linear_gaussian_mvn_prior(
                x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov)
            target_samples = gt_posterior.sample((num_samples, ))
        else:
            target_samples = samples_true_posterior_linear_gaussian_uniform_prior(
                x_o,
                likelihood_shift,
                likelihood_cov,
                prior=prior,
                num_samples=num_samples,
            )
        posterior = inference.build_posterior(
            mcmc_method="slice_np_vectorized").set_default_x(x_o)

        samples = posterior.sample(sample_shape=(num_samples, ),
                                   mcmc_parameters={
                                       "thin": 3,
                                       "num_chains": 2
                                   })

        # Check performance based on c2st accuracy.
        check_c2st(samples,
                   target_samples,
                   alg=f"snle_a-{prior_str}-prior-{num_trials}-trials")

        map_ = posterior.map(num_init_samples=1_000, init_method="prior")

        # TODO: we do not have a test for SNL log_prob(). This is because the output
        # TODO: density is not normalized, so KLd does not make sense.
        if prior_str == "uniform":
            # Check whether the returned probability outside of the support is zero.
            posterior_prob = get_prob_outside_uniform_prior(
                posterior, prior, num_dim)
            assert (
                posterior_prob == 0.0
            ), "The posterior probability outside of the prior support is not zero"

            assert ((map_ - ones(num_dim))**2).sum() < 0.5
        else:
            assert ((map_ - gt_posterior.mean)**2).sum() < 0.5
コード例 #18
0
 def simulator(theta):
     return linear_gaussian(theta,
                            likelihood_shift,
                            likelihood_cov,
                            num_discarded_dims=discard_dims)
コード例 #19
0
def test_c2st_snl_on_linearGaussian():
    """Test whether SNL infers well a simple example with available ground truth.

    This example has different number of parameters theta than number of x. This test
    also acts as the only functional test for SNL not marked as slow.

    """

    theta_dim = 3
    x_dim = 2
    discard_dims = theta_dim - x_dim

    x_o = zeros(1, x_dim)
    num_samples = 1000
    num_simulations = 3100

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(x_dim)
    likelihood_cov = 0.3 * eye(x_dim)

    prior_mean = zeros(theta_dim)
    prior_cov = eye(theta_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    target_samples = samples_true_posterior_linear_gaussian_mvn_prior_different_dims(
        x_o,
        likelihood_shift,
        likelihood_cov,
        prior_mean,
        prior_cov,
        num_discarded_dims=discard_dims,
        num_samples=num_samples,
    )
    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta,
                                      likelihood_shift,
                                      likelihood_cov,
                                      num_discarded_dims=discard_dims),
        prior,
    )
    density_estimator = likelihood_nn("maf", num_transforms=3)
    inference = SNLE(density_estimator=density_estimator,
                     show_progress_bars=False)

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                num_simulations,
                                simulation_batch_size=50)
    likelihood_estimator = inference.append_simulations(theta, x).train()
    potential_fn, theta_transform = likelihood_estimator_based_potential(
        prior=prior, likelihood_estimator=likelihood_estimator, x_o=x_o)
    posterior = MCMCPosterior(
        proposal=prior,
        potential_fn=potential_fn,
        theta_transform=theta_transform,
        method="slice_np_vectorized",
        num_chains=5,
        thin=10,
    )
    samples = posterior.sample((num_samples, ))

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg="snle_a")
コード例 #20
0
def test_c2st_and_map_snl_on_linearGaussian_different(num_dim: int,
                                                      prior_str: str):
    """Test SNL on linear Gaussian, comparing to ground truth posterior via c2st.

    Args:
        num_dim: parameter dimension of the gaussian model
        prior_str: one of "gaussian" or "uniform"

    """
    num_samples = 500
    num_simulations = 3000
    trials_to_test = [1]

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    # Use increased cov to avoid too small posterior cov for many trials.
    likelihood_cov = 0.8 * eye(num_dim)

    if prior_str == "gaussian":
        prior_mean = zeros(num_dim)
        prior_cov = eye(num_dim)
        prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    else:
        prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    density_estimator = likelihood_nn("maf", num_transforms=3)
    inference = SNLE(density_estimator=density_estimator,
                     show_progress_bars=False)

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                num_simulations,
                                simulation_batch_size=10000)
    likelihood_estimator = inference.append_simulations(theta, x).train()

    # Test inference amortized over trials.
    for num_trials in trials_to_test:
        x_o = zeros((num_trials, num_dim))
        if prior_str == "gaussian":
            gt_posterior = true_posterior_linear_gaussian_mvn_prior(
                x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov)
            target_samples = gt_posterior.sample((num_samples, ))
        elif prior_str == "uniform":
            target_samples = samples_true_posterior_linear_gaussian_uniform_prior(
                x_o,
                likelihood_shift,
                likelihood_cov,
                prior=prior,
                num_samples=num_samples,
            )
        else:
            raise ValueError(f"Wrong prior_str: '{prior_str}'.")

        potential_fn, theta_transform = likelihood_estimator_based_potential(
            prior=prior, likelihood_estimator=likelihood_estimator, x_o=x_o)
        posterior = MCMCPosterior(
            proposal=prior,
            potential_fn=potential_fn,
            theta_transform=theta_transform,
            method="slice_np_vectorized",
            thin=5,
            num_chains=5,
        )

        samples = posterior.sample(sample_shape=(num_samples, ))

        # Check performance based on c2st accuracy.
        check_c2st(samples,
                   target_samples,
                   alg=f"snle_a-{prior_str}-prior-{num_trials}-trials")

        map_ = posterior.map(num_init_samples=1_000,
                             init_method="proposal",
                             show_progress_bars=False)

        # TODO: we do not have a test for SNL log_prob(). This is because the output
        # TODO: density is not normalized, so KLd does not make sense.
        if prior_str == "uniform":
            # Check whether the returned probability outside of the support is zero.
            posterior_prob = get_prob_outside_uniform_prior(
                posterior, prior, num_dim)
            assert (
                posterior_prob == 0.0
            ), "The posterior probability outside of the prior support is not zero"

            assert ((map_ - ones(num_dim))**2).sum() < 0.5
        else:
            assert ((map_ - gt_posterior.mean)**2).sum() < 0.5
コード例 #21
0
 def simulator(theta):
     if torch.rand(1) > 0.5:
         return linear_gaussian(theta, likelihood_shift, likelihood_cov)
     else:
         return linear_gaussian(theta, -likelihood_shift, likelihood_cov)
コード例 #22
0
def test_c2st_snpe_on_linearGaussian_different_dims(set_seed):
    """Test whether SNPE B/C infer well a simple example with available ground truth.

    This example has different number of parameters theta than number of x. Also
    this implicitly tests simulation_batch_size=1. It also impleictly tests whether the
    prior can be `None` and whether we can stop and resume training.

    Args:
        set_seed: fixture for manual seeding
    """

    theta_dim = 3
    x_dim = 2
    discard_dims = theta_dim - x_dim

    x_o = zeros(1, x_dim)
    num_samples = 1000

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(x_dim)
    likelihood_cov = 0.3 * eye(x_dim)

    prior_mean = zeros(theta_dim)
    prior_cov = eye(theta_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    target_samples = samples_true_posterior_linear_gaussian_mvn_prior_different_dims(
        x_o,
        likelihood_shift,
        likelihood_cov,
        prior_mean,
        prior_cov,
        num_discarded_dims=discard_dims,
        num_samples=num_samples,
    )

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta,
                                      likelihood_shift,
                                      likelihood_cov,
                                      num_discarded_dims=discard_dims),
        prior,
    )
    # Test whether prior can be `None`.
    inference = SNPE_C(prior=None,
                       density_estimator="maf",
                       show_progress_bars=False)

    # type: ignore
    theta, x = simulate_for_sbi(simulator,
                                prior,
                                2000,
                                simulation_batch_size=1)
    inference = inference.append_simulations(theta, x)
    _ = inference.train(
        max_num_epochs=10)  # Test whether we can stop and resume.
    _ = inference.train(resume_training=True)
    posterior = inference.build_posterior()
    samples = posterior.sample((num_samples, ), x=x_o)

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg="snpe_c")
コード例 #23
0
 def simulator(theta):
     return linear_gaussian(theta, likelihood_shift, likelihood_cov)
コード例 #24
0
def test_c2st_multi_round_snpe_on_linearGaussian(method_str: str, set_seed):
    """Test whether SNPE B/C infer well a simple example with available ground truth.

    Args:
        set_seed: fixture for manual seeding.
    """

    num_dim = 2
    x_o = zeros((1, num_dim))
    num_samples = 1000

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)

    gt_posterior = true_posterior_linear_gaussian_mvn_prior(
        x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov)
    target_samples = gt_posterior.sample((num_samples, ))

    if method_str == "snpe_c_non_atomic":
        density_estimator = utils.posterior_nn("mdn", num_components=5)
        method_str = "snpe_c"
    elif method_str == "snpe_a":
        density_estimator = "mdn_snpe_a"
    else:
        density_estimator = "maf"

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    creation_args = dict(
        simulator=simulator,
        prior=prior,
        density_estimator=density_estimator,
        show_progress_bars=False,
    )

    if method_str == "snpe_b":
        inference = SNPE_B(**creation_args)
        theta, x = simulate_for_sbi(simulator,
                                    prior,
                                    500,
                                    simulation_batch_size=10)
        _ = inference.append_simulations(theta, x).train()
        posterior1 = inference.build_posterior().set_default_x(x_o)
        theta, x = simulate_for_sbi(simulator,
                                    posterior1,
                                    1000,
                                    simulation_batch_size=10)
        _ = inference.append_simulations(theta, x, proposal=posterior1).train()
        posterior = inference.build_posterior().set_default_x(x_o)
    elif method_str == "snpe_c":
        inference = SNPE_C(**creation_args)
        theta, x = simulate_for_sbi(simulator,
                                    prior,
                                    500,
                                    simulation_batch_size=50)
        _ = inference.append_simulations(theta, x).train()
        posterior1 = inference.build_posterior().set_default_x(x_o)
        theta, x = simulate_for_sbi(simulator,
                                    posterior1,
                                    1000,
                                    simulation_batch_size=50)
        _ = inference.append_simulations(theta, x, proposal=posterior1).train()
        posterior = inference.build_posterior().set_default_x(x_o)
    elif method_str == "snpe_a":
        inference = SNPE_A(**creation_args)
        proposal = prior
        final_round = False
        num_rounds = 3
        for r in range(num_rounds):
            if r == 2:
                final_round = True
            theta, x = simulate_for_sbi(simulator,
                                        proposal,
                                        500,
                                        simulation_batch_size=50)
            inference = inference.append_simulations(theta,
                                                     x,
                                                     proposal=proposal)
            _ = inference.train(max_num_epochs=200, final_round=final_round)
            posterior = inference.build_posterior().set_default_x(x_o)
            proposal = posterior

    samples = posterior.sample((num_samples, ))

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg=method_str)
コード例 #25
0
ファイル: test_simulator_utils.py プロジェクト: boyali/sbi
def numpy_linear_gaussian(theta):
    """Linear Gaussian simulator wrapped to get and return numpy."""
    return linear_gaussian(torch.as_tensor(theta, dtype=torch.float32)).numpy()
コード例 #26
0
def test_c2st_sre_on_linearGaussian():
    """Test whether SRE infers well a simple example with available ground truth.

    This example has different number of parameters theta than number of x. This test
    also acts as the only functional test for SRE not marked as slow.

    """

    theta_dim = 3
    x_dim = 2
    discard_dims = theta_dim - x_dim
    num_samples = 1000
    num_simulations = 2100

    likelihood_shift = -1.0 * ones(
        x_dim
    )  # likelihood_mean will be likelihood_shift+theta
    likelihood_cov = 0.3 * eye(x_dim)

    prior_mean = zeros(theta_dim)
    prior_cov = eye(theta_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(
            theta, likelihood_shift, likelihood_cov, num_discarded_dims=discard_dims
        ),
        prior,
    )
    inference = SNRE_B(
        classifier="resnet",
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(
        simulator, prior, num_simulations, simulation_batch_size=100
    )
    ratio_estimator = inference.append_simulations(theta, x).train()

    num_trials = 1
    x_o = zeros(num_trials, x_dim)
    target_samples = samples_true_posterior_linear_gaussian_mvn_prior_different_dims(
        x_o,
        likelihood_shift,
        likelihood_cov,
        prior_mean,
        prior_cov,
        num_discarded_dims=discard_dims,
        num_samples=num_samples,
    )
    potential_fn, theta_transform = ratio_estimator_based_potential(
        ratio_estimator=ratio_estimator, prior=prior, x_o=x_o
    )
    posterior = MCMCPosterior(
        potential_fn=potential_fn,
        theta_transform=theta_transform,
        proposal=prior,
        thin=5,
        num_chains=2,
    )
    samples = posterior.sample((num_samples,))

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg=f"snre-{num_trials}trials")