コード例 #1
0
    def use_pairplot(samples_filename, output_filename="data/infer.png"):

        try:
            samples = torch.load(samples_filename)
        except:
            print("no input file!")
            exit(0)

        fig, axes = pairplot(samples,
                             labels=['SG', 'GS', 'CS', 'SC', 'GG', 'CC'],
                             figsize=(10, 8),
                             points=true_params,
                             points_offdiag={'markersize': 6},
                             points_colors='r')
        fig.savefig(f"{output_filename}", dpi=150)
        plt.close()
コード例 #2
0
def test_sample_conditional(snpe_method: type, set_seed):
    """
    Test whether sampling from the conditional gives the same results as evaluating.

    This compares samples that get smoothed with a Gaussian kde to evaluating the
    conditional log-probability with `eval_conditional_density`.

    `eval_conditional_density` is itself tested in `sbiutils_test.py`. Here, we use
    a bimodal posterior to test the conditional.
    """

    num_dim = 3
    dim_to_sample_1 = 0
    dim_to_sample_2 = 2

    x_o = zeros(1, num_dim)

    likelihood_shift = -1.0 * ones(num_dim)
    likelihood_cov = 0.1 * eye(num_dim)

    prior = utils.BoxUniform(-2.0 * ones(num_dim), 2.0 * ones(num_dim))

    def simulator(theta):
        if torch.rand(1) > 0.5:
            return linear_gaussian(theta, likelihood_shift, likelihood_cov)
        else:
            return linear_gaussian(theta, -likelihood_shift, likelihood_cov)

    if snpe_method == SNPE_A:
        net = utils.posterior_nn("mdn_snpe_a", hidden_features=20)
    else:
        net = utils.posterior_nn("maf", hidden_features=20)

    simulator, prior = prepare_for_sbi(simulator, prior)
    inference = snpe_method(prior,
                            density_estimator=net,
                            show_progress_bars=False)

    # We need a pretty big dataset to properly model the bimodality.
    theta, x = simulate_for_sbi(simulator, prior, 10000)
    _ = inference.append_simulations(theta, x).train(max_num_epochs=50)

    posterior = inference.build_posterior().set_default_x(x_o)
    samples = posterior.sample((50, ))

    # Evaluate the conditional density be drawing samples and smoothing with a Gaussian
    # kde.
    cond_samples = posterior.sample_conditional(
        (500, ),
        condition=samples[0],
        dims_to_sample=[dim_to_sample_1, dim_to_sample_2])
    _ = analysis.pairplot(
        cond_samples,
        limits=[[-2, 2], [-2, 2], [-2, 2]],
        fig_size=(2, 2),
        diag="kde",
        upper="kde",
    )

    limits = [[-2, 2], [-2, 2], [-2, 2]]

    density = gaussian_kde(cond_samples.numpy().T, bw_method="scott")

    X, Y = np.meshgrid(
        np.linspace(limits[0][0], limits[0][1], 50),
        np.linspace(limits[1][0], limits[1][1], 50),
    )
    positions = np.vstack([X.ravel(), Y.ravel()])
    sample_kde_grid = np.reshape(density(positions).T, X.shape)

    # Evaluate the conditional with eval_conditional_density.
    eval_grid = analysis.eval_conditional_density(
        posterior,
        condition=samples[0],
        dim1=dim_to_sample_1,
        dim2=dim_to_sample_2,
        limits=torch.tensor([[-2, 2], [-2, 2], [-2, 2]]),
    )

    # Compare the two densities.
    sample_kde_grid = sample_kde_grid / np.sum(sample_kde_grid)
    eval_grid = eval_grid / torch.sum(eval_grid)

    error = np.abs(sample_kde_grid - eval_grid.numpy())

    max_err = np.max(error)
    assert max_err < 0.0025