Пример #1
0
def test_sample_energy_multi_temperature(ctx):
    dim = 1000
    torch.manual_seed(123445)
    temperature = torch.tensor([0.5, 1.0, 2], **ctx)[..., None]
    n_samples = len(temperature)
    mean = torch.ones(dim, **ctx)
    normal_distribution = NormalDistribution(dim,
                                             mean=mean,
                                             cov=torch.eye(dim, **ctx))

    samples = normal_distribution.sample(3, temperature=temperature)

    assert samples.shape == torch.Size([n_samples, dim])
    assert samples.mean().item() == pytest.approx(1.0, abs=5e-2, rel=0)
    assert as_numpy(samples.var(dim=1)) == pytest.approx(as_numpy(
        temperature.flatten()),
                                                         abs=0.2,
                                                         rel=0)

    x = torch.randn(1, 1000, **ctx).expand(3, 1000)
    energy = normal_distribution.energy(x, temperature=temperature)
    energy_t0 = energy[1]
    for i in [0, 2]:
        du = energy[i] - energy_t0 / temperature[i]
        du = as_numpy(du)
        assert du.std() < 1e-5
Пример #2
0
def test_distribution_energy(dim, device, dtype):
    """compare torch's normal distribution with bgflow's normal distribution"""
    n_samples = 7
    mean, cov = _random_mean_cov(dim, device, dtype)
    samples = torch.randn((n_samples, dim)).to(device, dtype)
    normal_trch = TorchDistribution(
        MultivariateNormal(loc=mean, covariance_matrix=cov))
    normal_bgtrch = NormalDistribution(dim, mean, cov)
    assert torch.allclose(normal_trch.energy(samples),
                          normal_bgtrch.energy(samples),
                          rtol=2e-2,
                          atol=1e-2)
Пример #3
0
def test_test_hutchinson_estimator_reset_noise(rademacher):
    # Test if the noise vector is resetted to deal with different shape
    dim = 10
    time_independent_dynamics = TimeIndependentDynamics(
        DenseNet([dim, 16, 16, dim], activation=torch.nn.Tanh()))
    hutchinson_estimator = HutchinsonEstimator(rademacher)
    normal_distribution = NormalDistribution(dim)

    x = normal_distribution.sample(100)
    _, _ = hutchinson_estimator(time_independent_dynamics, None, x)
    x = normal_distribution.sample(10)
    hutchinson_estimator.reset_noise()
    # this will fail if the noise is not resetted
    _, _ = hutchinson_estimator(time_independent_dynamics, None, x)
Пример #4
0
def test_kernel_dynamics(n_particles, n_dimensions, use_checkpoints, device):
    # Build flow with kernel dynamics and run initial config.

    dim = n_particles * n_dimensions
    n_samples = 100
    prior = NormalDistribution(dim).to(device)
    latent = prior.sample(n_samples)

    d_max = 8
    mus = torch.linspace(0, d_max, 10).to(device)
    gammas = 0.3 * torch.ones(len(mus))

    mus_time = torch.linspace(0, 1, 5).to(device)
    gammas_time = 0.3 * torch.ones(len(mus_time))

    kernel_dynamics = KernelDynamics(n_particles,
                                     n_dimensions,
                                     mus,
                                     gammas,
                                     optimize_d_gammas=True,
                                     optimize_t_gammas=True,
                                     mus_time=mus_time,
                                     gammas_time=gammas_time)

    flow = DiffEqFlow(dynamics=kernel_dynamics).to(device)

    if not use_checkpoints:
        pytest.importorskip("torchdiffeq")

        samples, dlogp = flow(latent)
        latent2, ndlogp = flow.forward(samples, inverse=True)

        assert samples.shape == torch.Size([n_samples, dim])
        assert dlogp.shape == torch.Size([n_samples, 1])
        # assert (latent - latent2).abs().mean() < 0.002
        # assert (latent - samples).abs().mean() > 0.01
        # assert (dlogp + ndlogp).abs().mean() < 0.002

    if use_checkpoints:
        pytest.importorskip("anode")
        flow._use_checkpoints = True
        options = {"Nt": 20, "method": "RK4"}
        flow._kwargs = options

        samples, dlogp = flow(latent)
        latent2, ndlogp = flow.forward(samples, inverse=True)

        assert samples.shape == torch.Size([n_samples, dim])
        assert dlogp.shape == torch.Size([n_samples, 1])
Пример #5
0
def test_normal_distribution(device, dtype, dim, n_samples, temperature):
    """Test sampling of the Normal Distribution."""

    cov = torch.tensor([[1, 0.3], [0.3, 2]], device=device, dtype=dtype)
    mean = torch.ones(dim)
    normal_distribution = NormalDistribution(dim, mean=mean, cov=cov)

    samples = normal_distribution.sample(n_samples, temperature=temperature)
    tol = 0.2 * np.sqrt(temperature)
    assert samples.shape == torch.Size([n_samples, dim])
    assert samples.mean(dim=0).cpu().numpy() == pytest.approx(np.ones(dim),
                                                              abs=tol,
                                                              rel=0)
    assert np.cov(samples.cpu().numpy(), rowvar=False) == pytest.approx(
        cov.cpu().numpy() * temperature, abs=tol, rel=0)
Пример #6
0
def test_sample_product_with_temperature(ctx):
    normal = NormalDistribution(dim=100, mean=torch.zeros(100, **ctx))
    product = ProductDistribution([normal, normal])
    x1, y1 = product.sample(20, temperature=1.)
    x2, y2 = product.sample(20, temperature=100.)

    assert (x1.std() / x2.std()).item() == pytest.approx(0.1, abs=0.05)
    assert (y1.std() / y2.std()).item() == pytest.approx(0.1, abs=0.05)
Пример #7
0
def test_normalization_2d(ctx, temperature):
    """check the normalization constant at different temperatures"""
    if isinstance(temperature, torch.Tensor):
        temperature = temperature.to(**ctx)
    dim = 2
    n_samples = 2
    cov = torch.tensor([[1, 0.3], [0.3, 2]], **ctx)
    mean = torch.ones(dim, **ctx)
    normal_distribution = NormalDistribution(dim, mean=mean, cov=cov)
    samples = normal_distribution.sample(n_samples, temperature=temperature)

    tt = torch.as_tensor(temperature)[..., None] if isinstance(
        temperature, torch.Tensor) else temperature
    ref = torch.distributions.MultivariateNormal(loc=mean,
                                                 covariance_matrix=tt * cov)
    logp = as_numpy(ref.log_prob(samples))[..., None]
    u = as_numpy(normal_distribution.energy(samples, temperature=temperature))
    atol = 4e-3 if ctx["dtype"] == torch.float32 else 1e-5
    assert u == pytest.approx(-logp, abs=atol)
Пример #8
0
def test_hutchinson_estimator(dim, rademacher):
    # Test trace estimation of the hutchinson estimator for small dimensions, where it is less noisy
    n_batch = 1024
    time_independent_dynamics = TimeIndependentDynamics(
        DenseNet([dim, 16, 16, dim], activation=torch.nn.Tanh()))
    hutchinson_estimator = HutchinsonEstimator(rademacher)
    normal_distribution = NormalDistribution(dim)
    x = normal_distribution.sample(n_batch)
    y, trace = hutchinson_estimator(time_independent_dynamics, None, x)
    brute_force_trace = brute_force_jacobian_trace(y, x)
    if rademacher and dim == 1:
        # Hutchinson is exact for rademacher noise and dim=1
        assert torch.allclose(trace.mean(),
                              -brute_force_trace.mean(),
                              atol=1e-6)
    else:
        assert torch.allclose(trace.mean(),
                              -brute_force_trace.mean(),
                              atol=1e-1)
Пример #9
0
def test_distribution_samples(dim, sample_shape, device, dtype):
    """compare torch's normal distribution with bgflow's normal distribution"""
    mean, cov = _random_mean_cov(dim, device, dtype)
    normal_trch = TorchDistribution(
        MultivariateNormal(loc=mean, covariance_matrix=cov))
    normal_bgtrch = NormalDistribution(dim, mean, cov)
    samples_trch = normal_trch.sample(sample_shape)
    target_shape = torch.Size([sample_shape]) if isinstance(
        sample_shape, int) else sample_shape
    assert samples_trch.size() == target_shape + torch.Size([dim])
    if isinstance(sample_shape, int):
        samples_bgtrch = normal_bgtrch.sample(sample_shape)
        # to make sure that both sample from the same distribution, compute divergences
        for p in [normal_trch, normal_bgtrch]:
            for q in [normal_trch, normal_bgtrch]:
                for x in [samples_bgtrch, samples_trch]:
                    for y in [samples_bgtrch, samples_trch]:
                        div = torch.mean((-p.energy(x) + q.energy(y)))
                        assert torch.abs(div) < 5e-2
Пример #10
0
def test_bar(ctx, method, compute_uncertainty):
    pytest.importorskip("pymbar")
    dim = 1
    energy1 = NormalDistribution(dim, mean=torch.zeros(dim, **ctx))
    energy2 = NormalDistribution(dim, mean=0.2 * torch.ones(dim, **ctx))
    samples1 = energy1.sample(10000)
    samples2 = energy2.sample(20000)

    free_energy, uncertainty = bennett_acceptance_ratio(
        forward_work=(1.0 + energy2.energy(samples1)) -
        energy1.energy(samples1),
        reverse_work=energy1.energy(samples2) -
        (1.0 + energy2.energy(samples2)),
        implementation=method,
        compute_uncertainty=compute_uncertainty)
    assert free_energy.item() == pytest.approx(1., abs=1e-2)
    if compute_uncertainty:
        assert uncertainty.item() < 1e-2
    else:
        assert uncertainty is None
Пример #11
0
def test_normalization_1d(ctx, sigma, temperature):
    if sigma == 1:
        # to check without the cov argument
        normal_1 = NormalDistribution(dim=1).to(**ctx)
    else:
        normal_1 = NormalDistribution(dim=1,
                                      cov=torch.tensor([[sigma**2]])).to(**ctx)
    normal_t = NormalDistribution(dim=1,
                                  cov=torch.tensor([[temperature * sigma**2]
                                                    ])).to(**ctx)
    nbins = 10000
    xmax = 3 * sigma * np.sqrt(temperature)
    x = torch.linspace(-xmax, xmax, nbins, **ctx)[..., None]
    dx = 2 * xmax / nbins
    u1 = as_numpy(normal_1.energy(x))
    ut1 = as_numpy(normal_1.energy(x, temperature=temperature))
    ut = as_numpy(normal_t.energy(x))
    atol = 1e-4 if ctx["dtype"] is torch.float32 else 1e-5
    # check that the u_T = u_1 / T + const
    assert (u1 / temperature - ut).std() == pytest.approx(0.0, abs=atol)
    assert ut == pytest.approx(ut1, abs=atol)
    # check that the integral(e^-u) = 1
    assert (np.exp(-ut1) * dx).sum() == pytest.approx(1., abs=1e-2)
Пример #12
0
def test_energy(device, dtype):
    """Test energy of the Normal Distribution."""

    dim = 2
    cov = torch.tensor([[1, 0.0], [0.0, 1]], device=device, dtype=dtype)
    mean = torch.ones(dim).to(cov)

    normal_distribution = NormalDistribution(dim, mean=mean, cov=cov)
    energy = normal_distribution.energy(torch.tensor([1, 2]).to(mean))
    assert energy.cpu().numpy() == pytest.approx(2 / 2 * np.log(2 * np.pi) +
                                                 0.5 * 1,
                                                 abs=1e-3,
                                                 rel=0)

    cov2 = torch.tensor([[1, 0.0], [0.0, 2]]).to(cov)
    normal_distribution2 = NormalDistribution(dim, mean=mean, cov=cov2)
    energy = normal_distribution2.energy(torch.tensor([1, 2]).to(cov))
    assert energy.cpu().numpy() == pytest.approx(2 / 2 * np.log(2 * np.pi) +
                                                 0.5 * 1 / 2 + 0.5 * np.log(2),
                                                 abs=1e-2,
                                                 rel=0)
Пример #13
0
def test_bar_uncertainty(ctx):
    """test consistency with the reference implementation"""
    pytest.importorskip("pymbar")
    dim = 1
    energy1 = NormalDistribution(dim, mean=torch.zeros(dim, **ctx))
    energy2 = NormalDistribution(
        dim, mean=0.2 * torch.ones(dim, **ctx))  # will be multiplied by e
    samples1 = energy1.sample(1000)
    samples2 = energy2.sample(2000)

    free_energy1, uncertainty1 = bennett_acceptance_ratio(
        forward_work=(1.0 + energy2.energy(samples1)) -
        energy1.energy(samples1),
        reverse_work=energy1.energy(samples2) -
        (1.0 + energy2.energy(samples2)),
        implementation="torch")
    free_energy2, uncertainty2 = bennett_acceptance_ratio(
        forward_work=(1.0 + energy2.energy(samples1)) -
        energy1.energy(samples1),
        reverse_work=energy1.energy(samples2) -
        (1.0 + energy2.energy(samples2)),
        implementation="pymbar")
    assert free_energy1.item() == pytest.approx(free_energy2.item(), rel=1e-3)
    assert uncertainty1.item() == pytest.approx(uncertainty2.item(), rel=1e-3)
Пример #14
0
def test_bar_no_convergence(ctx, method, warn):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", RuntimeWarning)
        pytest.importorskip("pymbar")
        dim = 1
        energy1 = NormalDistribution(dim, mean=-1e20 * torch.ones(dim, **ctx))
        energy2 = NormalDistribution(dim, mean=1e20 * torch.ones(dim, **ctx))
        samples1 = energy1.sample(5)
        samples2 = energy2.sample(5)

        if warn:
            # test if warning is raised
            with pytest.warns(UserWarning, match="BAR could not"):
                free_energy, uncertainty = bennett_acceptance_ratio(
                    forward_work=(1.0 + energy2.energy(samples1)) -
                    energy1.energy(samples1),
                    reverse_work=energy1.energy(samples2) -
                    (1.0 + energy2.energy(samples2)),
                    implementation=method,
                    warn=warn)
        else:
            free_energy, uncertainty = bennett_acceptance_ratio(
                forward_work=(1.0 + energy2.energy(samples1)) -
                energy1.energy(samples1),
                reverse_work=energy1.energy(samples2) -
                (1.0 + energy2.energy(samples2)),
                implementation=method,
                warn=warn)
        assert np.isnan(free_energy.item())
        assert np.isnan(uncertainty.item())
Пример #15
0
import pytest
import torch
from bgflow.distribution import NormalDistribution
from bgflow.nn.flow import DiffEqFlow
from bgflow.nn.flow.dynamics import BlackBoxDynamics, TimeIndependentDynamics
from bgflow.nn.flow.estimator import BruteForceEstimator

dim = 1
n_samples = 100
prior = NormalDistribution(dim)
latent = prior.sample(n_samples)


class SimpleDynamics(torch.nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, xs):
        dxs = - 1 * xs
        return dxs


def make_black_box_flow():
    black_box_dynamics = BlackBoxDynamics(
        dynamics_function=TimeIndependentDynamics(SimpleDynamics()),
        divergence_estimator=BruteForceEstimator()
    )

    flow = DiffEqFlow(
        dynamics=black_box_dynamics
    )