Exemplo n.º 1
0
def test_c2st_multi_round_snl_on_linearGaussian(num_trials: int):
    """Test SNL on linear Gaussian, comparing to ground truth posterior via c2st."""

    num_dim = 2
    x_o = zeros((num_trials, num_dim))
    num_samples = 500
    num_simulations_per_round = 600 * num_trials

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    gt_posterior = true_posterior_linear_gaussian_mvn_prior(
        x_o, likelihood_shift, likelihood_cov, prior_mean, prior_cov)
    target_samples = gt_posterior.sample((num_samples, ))

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    inference = SNLE(show_progress_bars=False)

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                num_simulations_per_round,
                                simulation_batch_size=50)
    likelihood_estimator = inference.append_simulations(theta, x).train()
    potential_fn, theta_transform = likelihood_estimator_based_potential(
        prior=prior, likelihood_estimator=likelihood_estimator, x_o=x_o)
    posterior1 = MCMCPosterior(
        proposal=prior,
        potential_fn=potential_fn,
        theta_transform=theta_transform,
        thin=5,
        num_chains=20,
    )

    theta, x = simulate_for_sbi(simulator,
                                posterior1,
                                num_simulations_per_round,
                                simulation_batch_size=50)
    likelihood_estimator = inference.append_simulations(theta, x).train()
    potential_fn, theta_transform = likelihood_estimator_based_potential(
        prior=prior, likelihood_estimator=likelihood_estimator, x_o=x_o)
    posterior = MCMCPosterior(
        proposal=prior,
        potential_fn=potential_fn,
        theta_transform=theta_transform,
        thin=5,
        num_chains=20,
    )

    samples = posterior.sample(sample_shape=(num_samples, ))

    # Check performance based on c2st accuracy.
    check_c2st(samples, target_samples, alg="multi-round-snl")
Exemplo n.º 2
0
def test_c2st_multi_round_snl_on_linearGaussian(set_seed):
    """Test SNL on linear Gaussian, comparing to ground truth posterior via c2st.

    Args:
        set_seed: fixture for manual seeding
    """

    num_dim = 2
    x_o = zeros((1, num_dim))
    num_samples = 500

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    gt_posterior = true_posterior_linear_gaussian_mvn_prior(
        x_o[0], likelihood_shift, likelihood_cov, prior_mean, prior_cov)
    target_samples = gt_posterior.sample((num_samples, ))

    simulator = lambda theta: linear_gaussian(theta, likelihood_shift,
                                              likelihood_cov)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = SNL(
        prior,
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                750,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train()
    posterior1 = inference.build_posterior(mcmc_method="slice_np_vectorized",
                                           mcmc_parameters={
                                               "thin": 5,
                                               "num_chains": 20
                                           }).set_default_x(x_o)

    theta, x = simulate_for_sbi(simulator,
                                posterior1,
                                750,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train()
    posterior = inference.build_posterior().copy_hyperparameters_from(
        posterior1)

    samples = posterior.sample(sample_shape=(num_samples, ),
                               mcmc_parameters={"thin": 3})

    # Check performance based on c2st accuracy.
    check_c2st(samples, target_samples, alg="multi-round-snl")
Exemplo n.º 3
0
def test_api_sre_on_linearGaussian(num_dim: int):
    """Test inference API of SRE with linear Gaussian model.

    Avoids intense computation for fast testing of API etc.

    Args:
        num_dim: parameter dimension of the Gaussian model
    """

    prior = MultivariateNormal(loc=zeros(num_dim), covariance_matrix=eye(num_dim))

    simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior)
    inference = SNRE_B(
        classifier="resnet",
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator, prior, 1000, simulation_batch_size=50)
    ratio_estimator = inference.append_simulations(theta, x).train(max_num_epochs=5)

    for num_trials in [1, 2]:
        x_o = zeros(num_trials, num_dim)
        potential_fn, theta_transform = ratio_estimator_based_potential(
            ratio_estimator=ratio_estimator, prior=prior, x_o=x_o
        )
        posterior = MCMCPosterior(
            potential_fn=potential_fn,
            theta_transform=theta_transform,
            proposal=prior,
            num_chains=2,
        )
        posterior.sample(sample_shape=(10,))
def test_api_sre_sampling_methods(mcmc_method: str, prior_str: str, set_seed):
    """Test leakage correction both for MCMC and rejection sampling.

    Args:
        mcmc_method: which mcmc method to use for sampling
        prior_str: one of "gaussian" or "uniform"
        set_seed: fixture for manual seeding
    """

    num_dim = 2
    x_o = zeros(num_dim)
    if prior_str == "gaussian":
        prior = MultivariateNormal(loc=zeros(num_dim),
                                   covariance_matrix=eye(num_dim))
    else:
        prior = utils.BoxUniform(low=-1.0 * ones(num_dim), high=ones(num_dim))

    simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior)
    inference = SRE(
        prior,
        classifier="resnet",
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                200,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train(max_num_epochs=5)
    posterior = inference.build_posterior(mcmc_method=mcmc_method)

    posterior.sample(sample_shape=(10, ), x=x_o)
Exemplo n.º 5
0
def test_train_with_different_data_and_training_device(
    snpe_method: type, data_device, training_device
):

    assert torch.cuda.is_available(), "gpu geared test has no GPU available"

    num_dim = 2

    # simulator, prior = prepare_for_sbi(user_simulator, user_prior)
    prior_ = MultivariateNormal(
        loc=torch.zeros(num_dim), covariance_matrix=torch.eye(num_dim)
    )
    simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior_)

    inference = snpe_method(
        prior,
        density_estimator="mdn_snpe_a" if snpe_method == SNPE_A else "maf",
        show_progress_bars=False,
        device=training_device,
    )

    # Run inference.
    theta, x = simulate_for_sbi(simulator, prior, 100)
    theta, x = theta.to(data_device), x.to(data_device)
    inference = inference.append_simulations(theta, x)

    _ = inference.train(max_num_epochs=2)

    # Check for default device for inference object
    weights_device = next(inference._neural_net.parameters()).device
    assert torch.device(training_device) == weights_device

    _ = inference.build_posterior()
Exemplo n.º 6
0
def test_log_prob_with_different_x(snpe_method: type):

    num_dim = 2

    prior = MultivariateNormal(loc=zeros(num_dim),
                               covariance_matrix=eye(num_dim))
    simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior)
    inference = snpe_method(prior)
    theta, x = simulate_for_sbi(simulator, prior, 1000)
    _ = inference.append_simulations(theta, x).train()
    posterior = inference.build_posterior()

    _ = posterior.sample((10, ), x=ones(1, num_dim))
    theta = posterior.sample((10, ), ones(1, num_dim))
    posterior.log_prob(theta, x=ones(num_dim))
    posterior.log_prob(theta, x=ones(num_dim))
    posterior.log_prob(theta, x=ones(1, num_dim))
    posterior = posterior.set_default_x(ones(1, num_dim))
    posterior.log_prob(theta, x=None)
    posterior.sample((10, ), x=None)

    # Both must fail due to batch size of x > 1.
    with pytest.raises(ValueError):
        posterior.log_prob(theta, x=ones(2, num_dim))
    with pytest.raises(ValueError):
        posterior.sample(2, x=ones(2, num_dim))
def test_api_sre_on_linearGaussian(num_dim: int):
    """Test inference API of SRE with linear Gaussian model.

    Avoids intense computation for fast testing of API etc.

    Args:
        num_dim: parameter dimension of the Gaussian model
    """

    prior = MultivariateNormal(loc=zeros(num_dim), covariance_matrix=eye(num_dim))

    simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior)
    inference = SNRE_B(
        prior,
        classifier="resnet",
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator, prior, 1000, simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train(max_num_epochs=5)
    posterior = inference.build_posterior()

    for num_trials in [1, 2]:
        x_o = zeros(num_trials, num_dim)
        posterior.sample(sample_shape=(10,), x=x_o, mcmc_parameters={"num_chains": 2})
Exemplo n.º 8
0
def test_log_prob_with_different_x(snpe_method: type, x_o_batch_dim: bool):

    num_dim = 2

    prior = MultivariateNormal(loc=zeros(num_dim),
                               covariance_matrix=eye(num_dim))
    simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior)
    inference = snpe_method(prior=prior)
    theta, x = simulate_for_sbi(simulator, prior, 1000)
    posterior_estimator = inference.append_simulations(
        theta, x).train(max_num_epochs=3)

    if x_o_batch_dim == 0:
        x_o = ones(num_dim)
    elif x_o_batch_dim == 1:
        x_o = ones(1, num_dim)
    elif x_o_batch_dim == 2:
        x_o = ones(2, num_dim)
    else:
        raise NotImplementedError

    posterior = DirectPosterior(posterior_estimator=posterior_estimator,
                                prior=prior).set_default_x(x_o)
    samples = posterior.sample((10, ))
    _ = posterior.log_prob(samples)
Exemplo n.º 9
0
def test_api_snl_on_linearGaussian(num_dim: int, set_seed):
    """Test API for inference on linear Gaussian model using SNL.

    Avoids expensive computations by training on few simulations and generating few
    posterior samples.

    Args:
        num_dim: parameter dimension of the gaussian model
    """
    num_samples = 10
    x_o = zeros(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)

    simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior)
    inference = SNL(
        prior,
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                1000,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train(max_num_epochs=5)
    posterior = inference.build_posterior().set_default_x(x_o)

    posterior.sample(sample_shape=(num_samples, ),
                     x=x_o,
                     mcmc_parameters={"thin": 3})
Exemplo n.º 10
0
def test_nograd_after_inference_train(inference_method) -> None:

    num_dim = 2
    prior_ = BoxUniform(-torch.ones(num_dim), torch.ones(num_dim))
    simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior_)

    inference = inference_method(
        prior,
        **(
            dict(classifier="resnet")
            if inference_method in [SNRE_A, SNRE_B]
            else dict(
                density_estimator=(
                    "mdn_snpe_a" if inference_method == SNPE_A else "maf"
                )
            )
        ),
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator, prior, 32)
    inference = inference.append_simulations(theta, x)

    posterior_estimator = inference.train(max_num_epochs=2)

    def check_no_grad(model):
        for p in model.parameters():
            assert p.grad is None

    check_no_grad(posterior_estimator)
    check_no_grad(inference._neural_net)
Exemplo n.º 11
0
def example_posterior():
    """Return an inferred `NeuralPosterior` for interactive examination."""
    num_dim = 2
    x_o = zeros(1, num_dim)

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)

    def simulator(theta):
        return linear_gaussian(theta, likelihood_shift, likelihood_cov)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = SNPE_C(
        prior,
        show_progress_bars=False,
    )
    theta, x = simulate_for_sbi(simulator,
                                prior,
                                1000,
                                simulation_batch_size=10,
                                num_workers=6)
    _ = inference.append_simulations(theta, x).train()
    return inference.build_posterior().set_default_x(x_o)
Exemplo n.º 12
0
def run_snpe(total_runs=10, num_generation=6, seed=46, nde='maf'):
    torch.manual_seed(seed)
    num_workers = 16 #for parallel execution of simulations
    Ndata = 3000
    use_embedding_net = True
    use_mcmc = False #becomes very slow, but can handle leakage
    result_posterior = []
    store_time = []

    prior = utils.BoxUniform(low=torch.tensor([-2.0,-1.0]), 
                         high=torch.tensor([2.0,1.0]))

    simulator_sbi, prior = prepare_for_sbi(simulator, prior)
    
    x_o = np.load("target_ts.npy")
    x_o = torch.tensor(x_o)
    x_o = x_o.reshape((1,100))

    #NN for summary statistic 
    embedding_net = SummaryNet()
    
    for run in range(total_runs):
        print(f"staring run {run}")

        theta_store = []
        time_ticks = []
        posteriors = []
        proposal = prior

        if use_embedding_net:
            neural_posterior = utils.posterior_nn(model=nde, 
                                                embedding_net=embedding_net,
                                                hidden_features=10,
                                                num_transforms=2)
        else:
            neural_posterior = utils.posterior_nn(model=nde, 
                                                hidden_features=10,
                                                num_transforms=2)
        
        inference = SNPE_C(prior=prior, density_estimator=neural_posterior)
    
        for i in range(num_generation):
            print(f"staring round {i}")
            time_begin = time.time()
            theta, x = simulate_for_sbi(simulator_sbi, proposal, num_simulations=Ndata, num_workers=num_workers)
            
            density_estimator = inference.append_simulations(theta, x, proposal=proposal).train()
            posterior = inference.build_posterior(density_estimator, sample_with_mcmc=use_mcmc)
            print("building post done")
            posteriors.append(posterior)
            proposal = posterior.set_default_x(x_o)

            posterior_samples = posterior.sample((Ndata,), x=x_o).numpy()
            print("Post samples done")
            theta_store.append(posterior_samples)
            time_ticks.append(time.time() - time_begin)
        
        result_posterior.append(theta_store)
        store_time.append(time_ticks)
    return np.asarray(result_posterior), np.asarray(store_time), posteriors
Exemplo n.º 13
0
def test_inference_with_user_sbi_problems(
    snpe_method: type, user_simulator: Callable, user_prior
):
    """
    Test inference with combinations of user defined simulators, priors and x_os.
    """

    simulator, prior = prepare_for_sbi(user_simulator, user_prior)
    inference = snpe_method(
        prior,
        density_estimator="mdn_snpe_a" if snpe_method == SNPE_A else "maf",
        show_progress_bars=False,
    )

    # Run inference.
    theta, x = simulate_for_sbi(simulator, prior, 100)
    _ = inference.append_simulations(theta, x).train(max_num_epochs=2)

    # Build posterior.
    if snpe_method == SNPE_A:
        if not isinstance(prior, (MultivariateNormal, BoxUniform, DirectPosterior)):
            with pytest.raises(AssertionError):
                # SNPE-A does not support priors yet.
                _ = inference.build_posterior()
        else:
            _ = inference.build_posterior()
    else:
        _ = inference.build_posterior()
Exemplo n.º 14
0
def test_mdn_with_1D_uniform_prior():
    """
    Note, we have this test because for 1D uniform priors, mdn log prob evaluation
    results in batch_size x batch_size return. This is probably because Uniform does
    not allow for event dimension > 1 and somewhere in pyknos it is used as if this was
    possible.
    Casting to BoxUniform solves it.
    """
    num_dim = 1
    x_o = torch.tensor([[1.0]])
    num_samples = 100

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior = Uniform(low=torch.zeros(num_dim), high=torch.ones(num_dim))

    def simulator(theta: Tensor) -> Tensor:
        return linear_gaussian(theta, likelihood_shift, likelihood_cov)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = SNPE(prior, density_estimator="mdn")

    theta, x = simulate_for_sbi(simulator, prior, 100)
    _ = inference.append_simulations(theta, x).train(training_batch_size=50)
    posterior = inference.build_posterior().set_default_x(x_o)
    samples = posterior.sample((num_samples,))
    log_probs = posterior.log_prob(samples)

    assert log_probs.shape == torch.Size([num_samples])
Exemplo n.º 15
0
def mdn_inference_with_different_methods(method, set_seed):

    num_dim = 2
    x_o = torch.tensor([[1.0, 0.0]])
    num_samples = 500
    num_simulations = 2000

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    gt_posterior = true_posterior_linear_gaussian_mvn_prior(
        x_o[0], likelihood_shift, likelihood_cov, prior_mean, prior_cov
    )
    target_samples = gt_posterior.sample((num_samples,))

    def simulator(theta: Tensor) -> Tensor:
        return linear_gaussian(theta, likelihood_shift, likelihood_cov)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = method(prior, density_estimator="mdn")

    theta, x = simulate_for_sbi(simulator, prior, num_simulations)
    _ = inference.append_simulations(theta, x).train(training_batch_size=50)
    posterior = inference.build_posterior().set_default_x(x_o)

    samples = posterior.sample((num_samples,))

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg=f"{method}")
Exemplo n.º 16
0
def test_example_posterior(snpe_method: type):
    """Return an inferred `NeuralPosterior` for interactive examination."""
    num_dim = 2
    x_o = zeros(1, num_dim)

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)

    if snpe_method == SNPE_A:
        extra_kwargs = dict(final_round=True)
    else:
        extra_kwargs = dict()

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    inference = snpe_method(prior, show_progress_bars=False, **extra_kwargs)
    theta, x = simulate_for_sbi(simulator,
                                prior,
                                1000,
                                simulation_batch_size=10,
                                num_workers=6)
    _ = inference.append_simulations(theta, x).train()

    posterior = inference.build_posterior().set_default_x(x_o)
    assert posterior is not None
def test_c2st_sre_on_linearGaussian_different_dims(set_seed):
    """Test whether SRE infers well a simple example with available ground truth.

    This example has different number of parameters theta than number of x. This test
    also acts as the only functional test for SRE not marked as slow.

    Args:
        set_seed: fixture for manual seeding
    """

    theta_dim = 3
    x_dim = 2
    discard_dims = theta_dim - x_dim

    x_o = ones(1, x_dim)
    num_samples = 1000

    likelihood_shift = -1.0 * ones(
        x_dim)  # likelihood_mean will be likelihood_shift+theta
    likelihood_cov = 0.3 * eye(x_dim)

    prior_mean = zeros(theta_dim)
    prior_cov = eye(theta_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    target_samples = samples_true_posterior_linear_gaussian_mvn_prior_different_dims(
        x_o[0],
        likelihood_shift,
        likelihood_cov,
        prior_mean,
        prior_cov,
        num_discarded_dims=discard_dims,
        num_samples=num_samples,
    )

    def simulator(theta):
        return linear_gaussian(theta,
                               likelihood_shift,
                               likelihood_cov,
                               num_discarded_dims=discard_dims)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = SRE(
        prior,
        classifier="resnet",
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                5000,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train()
    posterior = inference.build_posterior()
    samples = posterior.sample((num_samples, ),
                               x=x_o,
                               mcmc_parameters={"thin": 3})

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg="snpe_c")
Exemplo n.º 18
0
def test_c2st_snpe_on_linearGaussian_different_dims(set_seed):
    """Test whether SNPE B/C infer well a simple example with available ground truth.

    This example has different number of parameters theta than number of x. Also
    this implicitly tests simulation_batch_size=1.

    Args:
        set_seed: fixture for manual seeding
    """

    theta_dim = 3
    x_dim = 2
    discard_dims = theta_dim - x_dim

    x_o = zeros(1, x_dim)
    num_samples = 1000

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(x_dim)
    likelihood_cov = 0.3 * eye(x_dim)

    prior_mean = zeros(theta_dim)
    prior_cov = eye(theta_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    target_samples = samples_true_posterior_linear_gaussian_mvn_prior_different_dims(
        x_o[0],
        likelihood_shift,
        likelihood_cov,
        prior_mean,
        prior_cov,
        num_discarded_dims=discard_dims,
        num_samples=num_samples,
    )

    def simulator(theta):
        return linear_gaussian(theta,
                               likelihood_shift,
                               likelihood_cov,
                               num_discarded_dims=discard_dims)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = SNPE_C(
        prior,
        density_estimator="maf",
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                2000,
                                simulation_batch_size=1)  # type: ignore
    _ = inference.append_simulations(theta, x).train()
    posterior = inference.build_posterior()
    samples = posterior.sample((num_samples, ), x=x_o)

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg="snpe_c")
Exemplo n.º 19
0
def test_training_and_mcmc_on_device(method, model, device):
    """Test training on devices.

    This test does not check training speeds.

    """
    device = process_device(device)

    num_dim = 2
    num_samples = 10
    num_simulations = 500
    max_num_epochs = 5

    x_o = zeros(1, num_dim)
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    prior_mean = zeros(num_dim)
    prior_cov = eye(num_dim)
    prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)

    def simulator(theta):
        return linear_gaussian(theta, likelihood_shift, likelihood_cov)

    if method == SNPE:
        kwargs = dict(density_estimator=utils.posterior_nn(model=model), )
        mcmc_kwargs = dict(
            sample_with_mcmc=True,
            mcmc_method="slice_np",
        )
    elif method == SNLE:
        kwargs = dict(density_estimator=utils.likelihood_nn(model=model), )
        mcmc_kwargs = dict(mcmc_method="slice")
    elif method == SNRE:
        kwargs = dict(classifier=utils.classifier_nn(model=model), )
        mcmc_kwargs = dict(mcmc_method="slice_np_vectorized", )
    else:
        raise ValueError()

    inferer = method(prior, show_progress_bars=False, device=device, **kwargs)

    proposals = [prior]

    # Test for two rounds.
    for r in range(2):
        theta, x, = simulate_for_sbi(simulator,
                                     proposal=prior,
                                     num_simulations=num_simulations)
        _ = inferer.append_simulations(theta,
                                       x).train(training_batch_size=100,
                                                max_num_epochs=max_num_epochs)
        posterior = inferer.build_posterior(**mcmc_kwargs).set_default_x(x_o)
        proposals.append(posterior)

    proposals[-1].sample(sample_shape=(num_samples, ), x=x_o, **mcmc_kwargs)
Exemplo n.º 20
0
def test_inference_with_restriction_estimator():

    # likelihood_mean will be likelihood_shift+theta
    num_dim = 3
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)
    x_o = zeros(1, num_dim)
    num_samples = 500

    def linear_gaussian_nan(theta,
                            likelihood_shift=likelihood_shift,
                            likelihood_cov=likelihood_cov):
        condition = theta[:, 0] < 0.0
        x = linear_gaussian(theta, likelihood_shift, likelihood_cov)
        x[condition] = float("nan")

        return x

    prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))
    target_samples = samples_true_posterior_linear_gaussian_uniform_prior(
        x_o,
        likelihood_shift=likelihood_shift,
        likelihood_cov=likelihood_cov,
        num_samples=num_samples,
        prior=prior,
    )

    simulator, prior = prepare_for_sbi(linear_gaussian_nan, prior)
    restriction_estimator = RestrictionEstimator(prior=prior)
    proposals = [prior]
    num_rounds = 2

    for r in range(num_rounds):
        theta, x = simulate_for_sbi(simulator, proposals[-1], 1000)
        restriction_estimator.append_simulations(theta, x)
        if r < num_rounds - 1:
            _ = restriction_estimator.train()
        proposals.append(restriction_estimator.restrict_prior())

    all_theta, all_x, _ = restriction_estimator.get_simulations()

    # Any method can be used in combination with the `RejectionEstimator`.
    inference = SNPE_C(prior=prior)
    posterior_estimator = inference.append_simulations(all_theta,
                                                       all_x).train()

    # Build posterior.
    posterior = DirectPosterior(
        prior=prior,
        posterior_estimator=posterior_estimator).set_default_x(x_o)

    samples = posterior.sample((num_samples, ))

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg=f"{SNPE_C}")
Exemplo n.º 21
0
def test_inference_with_user_sbi_problems(user_simulator: Callable, user_prior):
    """
    Test inference with combinations of user defined simulators, priors and x_os.
    """

    simulator, prior = prepare_for_sbi(user_simulator, user_prior)
    inference = SNPE_C(prior, density_estimator="maf", show_progress_bars=False,)

    # Run inference.
    theta, x = simulate_for_sbi(simulator, prior, 100)
    _ = inference.append_simulations(theta, x).train(max_num_epochs=2)
    _ = inference.build_posterior()
Exemplo n.º 22
0
def test_api_snpe_c_posterior_correction(
    sample_with_mcmc, mcmc_method, prior_str, set_seed
):
    """Test that leakage correction applied to sampling works, with both MCMC and
    rejection.

    Args:
        set_seed: fixture for manual seeding
    """

    num_dim = 2
    x_o = zeros(1, num_dim)

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    if prior_str == "gaussian":
        prior_mean = zeros(num_dim)
        prior_cov = eye(num_dim)
        prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    else:
        prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))

    def simulator(theta):
        return linear_gaussian(theta, likelihood_shift, likelihood_cov)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = SNPE_C(
        prior,
        density_estimator="maf",
        simulation_batch_size=50,
        sample_with_mcmc=sample_with_mcmc,
        mcmc_method=mcmc_method,
        show_progress_bars=False,
    )

    theta, x = simulate_for_sbi(simulator, prior, 1000)
    _ = inference.append_simulations(theta, x).train(max_num_epochs=5)
    posterior = inference.build_posterior()
    posterior = posterior.set_sample_with_mcmc(sample_with_mcmc).set_mcmc_method(
        mcmc_method
    )

    # Posterior should be corrected for leakage even if num_rounds just 1.
    samples = posterior.sample((10,), x=x_o)

    # Evaluate the samples to check correction factor.
    posterior.log_prob(samples, x=x_o)
Exemplo n.º 23
0
def test_api_snl_sampling_methods(sampling_method: str, prior_str: str,
                                  set_seed):
    """Runs SNL on linear Gaussian and tests sampling from posterior via mcmc.

    Args:
        mcmc_method: which mcmc method to use for sampling
        prior_str: use gaussian or uniform prior
        set_seed: fixture for manual seeding
    """

    num_dim = 2
    num_samples = 10
    num_trials = 2
    # HMC with uniform prior needs good likelihood.
    num_simulations = 10000 if sampling_method == "hmc" else 1000
    x_o = zeros((num_trials, num_dim))
    # Test for multiple chains is cheap when vectorized.
    num_chains = 3 if sampling_method == "slice_np_vectorized" else 1
    if sampling_method == "rejection":
        sample_with = "rejection"
    else:
        sample_with = "mcmc"

    if prior_str == "gaussian":
        prior = MultivariateNormal(loc=zeros(num_dim),
                                   covariance_matrix=eye(num_dim))
    else:
        prior = utils.BoxUniform(-1.0 * ones(num_dim), ones(num_dim))

    simulator, prior = prepare_for_sbi(diagonal_linear_gaussian, prior)
    inference = SNL(prior, show_progress_bars=False)

    theta, x = simulate_for_sbi(simulator,
                                prior,
                                num_simulations,
                                simulation_batch_size=50)
    _ = inference.append_simulations(theta, x).train(max_num_epochs=5)
    posterior = inference.build_posterior(
        sample_with=sample_with,
        mcmc_method=sampling_method).set_default_x(x_o)

    posterior.sample(
        sample_shape=(num_samples, ),
        x=x_o,
        mcmc_parameters={
            "thin": 3,
            "num_chains": num_chains
        },
    )
Exemplo n.º 24
0
def test_api_snpe_c_posterior_correction(sample_with, mcmc_method, prior_str):
    """Test that leakage correction applied to sampling works, with both MCMC and
    rejection.

    """

    num_dim = 2
    x_o = zeros(1, num_dim)

    # likelihood_mean will be likelihood_shift+theta
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)

    if prior_str == "gaussian":
        prior_mean = zeros(num_dim)
        prior_cov = eye(num_dim)
        prior = MultivariateNormal(loc=prior_mean, covariance_matrix=prior_cov)
    else:
        prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))

    simulator, prior = prepare_for_sbi(
        lambda theta: linear_gaussian(theta, likelihood_shift, likelihood_cov),
        prior)
    inference = SNPE_C(prior, show_progress_bars=False)

    theta, x = simulate_for_sbi(simulator, prior, 1000)
    posterior_estimator = inference.append_simulations(theta, x).train()
    potential_fn, theta_transform = posterior_estimator_based_potential(
        posterior_estimator, prior, x_o)
    if sample_with == "mcmc":
        posterior = MCMCPosterior(
            potential_fn=potential_fn,
            theta_transform=theta_transform,
            proposal=prior,
            method=mcmc_method,
        )
    elif sample_with == "rejection":
        posterior = RejectionPosterior(
            potential_fn=potential_fn,
            proposal=prior,
            theta_transform=theta_transform,
        )

    # Posterior should be corrected for leakage even if num_rounds just 1.
    samples = posterior.sample((10, ))

    # Evaluate the samples to check correction factor.
    _ = posterior.log_prob(samples)
Exemplo n.º 25
0
def test_inference_with_2d_x(embedding, method):

    num_dim = 2
    num_samples = 10
    num_simulations = 100

    prior = utils.BoxUniform(zeros(num_dim), torch.ones(num_dim))

    simulator, prior = prepare_for_sbi(simulator_2d, prior)

    theta_o = torch.ones(1, num_dim)

    if method == SNPE:
        net_provider = utils.posterior_nn(
            model="mdn",
            embedding_net=embedding(),
        )
        sample_kwargs = {"sample_with_mcmc": True}
        num_trials = 1
    elif method == SNLE:
        net_provider = utils.likelihood_nn(model="mdn",
                                           embedding_net=embedding())
        sample_kwargs = {}
        num_trials = 2
    else:
        net_provider = utils.classifier_nn(
            model="mlp",
            embedding_net_x=embedding(),
        )
        sample_kwargs = {
            "mcmc_method": "slice_np_vectorized",
            "mcmc_parameters": {
                "num_chains": 2
            },
        }
        num_trials = 2

    inference = method(prior, net_provider, show_progress_bars=False)
    theta, x = simulate_for_sbi(simulator, prior, num_simulations)
    _ = inference.append_simulations(theta, x).train(training_batch_size=100,
                                                     max_num_epochs=10)
    x_o = simulator(theta_o.repeat(num_trials, 1))
    posterior = inference.build_posterior(**sample_kwargs).set_default_x(x_o)

    posterior.log_prob(
        posterior.sample((num_samples, ), show_progress_bars=False))
Exemplo n.º 26
0
def test_inference_with_nan_simulator(method: type, exclude_invalid_x: bool,
                                      percent_nans: float, set_seed):

    # likelihood_mean will be likelihood_shift+theta
    num_dim = 3
    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.3 * eye(num_dim)
    x_o = zeros(1, num_dim)
    num_samples = 500
    num_simulations = 2000

    def linear_gaussian_nan(theta,
                            likelihood_shift=likelihood_shift,
                            likelihood_cov=likelihood_cov):
        x = linear_gaussian(theta, likelihood_shift, likelihood_cov)
        # Set nan randomly.
        x[torch.rand(x.shape) < (percent_nans * 1.0 /
                                 x.shape[1])] = float("nan")

        return x

    prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))
    target_samples = samples_true_posterior_linear_gaussian_uniform_prior(
        x_o,
        likelihood_shift=likelihood_shift,
        likelihood_cov=likelihood_cov,
        num_samples=num_samples,
        prior=prior,
    )

    simulator, prior = prepare_for_sbi(linear_gaussian_nan, prior)
    inference = method(prior)

    theta, x = simulate_for_sbi(simulator, prior, num_simulations)
    _ = inference.append_simulations(
        theta, x).train(exclude_invalid_x=exclude_invalid_x)

    posterior = inference.build_posterior().set_default_x(x_o)

    samples = posterior.sample((num_samples, ))

    # Compute the c2st and assert it is near chance level of 0.5.
    check_c2st(samples, target_samples, alg=f"{method}")
Exemplo n.º 27
0
def test_restricted_prior_log_prob(prior):
    """Test whether the log-prob method of the restricted prior works appropriately."""
    def simulator(theta):
        perturbed_theta = theta + 0.5 * torch.randn(2)
        perturbed_theta[theta[:, 0] < 0.8] = torch.as_tensor(
            [float("nan"), float("nan")])
        return perturbed_theta

    if prior == "uniform":
        prior = utils.BoxUniform(-2 * torch.ones(2), 2 * torch.ones(2))
    else:
        prior = MultivariateNormal(torch.zeros(2), torch.eye(2))
    theta, x = simulate_for_sbi(simulator, prior, 1000)

    restriction_estimator = RestrictionEstimator(prior=prior)
    restriction_estimator.append_simulations(theta, x)
    _ = restriction_estimator.train(max_num_epochs=40)
    restricted_prior = restriction_estimator.restrict_prior()

    def integrate_grid(distribution):
        resolution = 500
        range_ = 4
        x = torch.linspace(-range_, range_, resolution)
        y = torch.linspace(-range_, range_, resolution)
        X, Y = torch.meshgrid(x, y)
        xy = torch.stack([X, Y])
        xy = torch.reshape(xy, (2, resolution**2)).T
        dist_on_grid = torch.exp(distribution.log_prob(xy))
        integral = torch.sum(dist_on_grid) / resolution**2 * (2 * range_)**2
        return integral

    integal_restricted = integrate_grid(restricted_prior)
    error = torch.abs(integal_restricted - torch.as_tensor(1.0))
    assert error < 0.01, "The restricted prior does not integrate to one."

    theta = prior.sample((10_000, ))
    restricted_prior_probs = torch.exp(restricted_prior.log_prob(theta))

    valid_thetas = restricted_prior.predict(theta).bool()
    assert torch.all(restricted_prior_probs[valid_thetas] > 0.0
                     ), "Accepted theta have zero probability."
    assert torch.all(restricted_prior_probs[torch.logical_not(valid_thetas)] ==
                     0.0), "Rejected theta has non-zero probablity."
Exemplo n.º 28
0
def torch_num_params(N):
    simulator, prior = get_simulator(N, 0.01, 1)
    simulator, prior = prepare_for_sbi(simulator, prior)
    density_estimator_build_fun = posterior_nn(
        model="maf",
        hidden_features=50,
        num_transforms=3,
        z_score_x=False,
        z_score_theta=False,
        support_map=True,
    )
    theta, x = simulate_for_sbi(simulator,
                                proposal=prior,
                                num_simulations=10,
                                show_progress_bar=False)
    maf = density_estimator_build_fun(theta, x)
    num_params = 0
    for param in maf.parameters():
        num_params += np.prod(param.shape)
    return num_params
Exemplo n.º 29
0
def flexible():
    num_dim = 3
    x_o = torch.ones(1, num_dim)
    prior_mean = torch.zeros(num_dim)
    prior_cov = torch.eye(num_dim)
    simulator = diagonal_linear_gaussian

    # flexible interface
    prior = torch.distributions.MultivariateNormal(
        loc=prior_mean, covariance_matrix=prior_cov
    )
    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = SNPE(prior)

    theta, x = simulate_for_sbi(simulator, proposal=prior, num_simulations=500)
    density_estimator = inference.append_simulations(theta, x).train()
    posterior = inference.build_posterior(density_estimator)
    posterior.sample((100,), x=x_o)

    return posterior
Exemplo n.º 30
0
def test_plot_summary(method, tmp_path):
    num_dim = 1
    prior = BoxUniform(low=-2 * torch.ones(num_dim),
                       high=2 * torch.ones(num_dim))

    summary_writer = SummaryWriter(tmp_path)

    def linear_gaussian(theta):
        return theta + 1.0 + torch.randn_like(theta) * 0.1

    simulator, prior = prepare_for_sbi(linear_gaussian, prior)

    inference = method(prior=prior, summary_writer=summary_writer)
    theta, x = simulate_for_sbi(simulator, proposal=prior, num_simulations=6)
    train_kwargs = (dict(
        max_num_epochs=5, validation_fraction=0.5, num_atoms=2)
                    if method == SNRE else dict(max_num_epochs=1))
    _ = inference.append_simulations(theta, x).train(**train_kwargs)
    fig, axes = plot_summary(inference)
    assert isinstance(fig, Figure) and isinstance(axes[0], Axes)