コード例 #1
0
def test_zeros():
    """Tests zeros"""

    pf.set_backend("pytorch")

    # Scalar
    zeros = ops.zeros([1])
    assert isinstance(zeros, torch.Tensor)
    assert zeros.ndim == 1
    assert zeros.shape[0] == 1
    assert zeros.numpy() == 0.0

    # 1D
    zeros = ops.zeros([5])
    assert isinstance(zeros, torch.Tensor)
    assert zeros.ndim == 1
    assert zeros.shape[0] == 5
    assert all(zeros.numpy() == 0.0)

    # 2D
    zeros = ops.zeros([5, 4])
    assert isinstance(zeros, torch.Tensor)
    assert zeros.ndim == 2
    assert zeros.shape[0] == 5
    assert zeros.shape[1] == 4
    assert np.all(zeros.numpy() == 0.0)

    # 3D
    zeros = ops.zeros([5, 4, 3])
    assert isinstance(zeros, torch.Tensor)
    assert zeros.ndim == 3
    assert zeros.shape[0] == 5
    assert zeros.shape[1] == 4
    assert zeros.shape[2] == 3
    assert np.all(zeros.numpy() == 0.0)
コード例 #2
0
def test_mean():
    """Tests mean"""

    pf.set_backend('pytorch')

    # Should mean along the last dimension by default
    ones = torch.ones([5, 4, 3])
    val = ops.mean(ones)
    assert isinstance(val, torch.Tensor)
    assert val.ndim == 2
    assert val.shape[0] == 5
    assert val.shape[1] == 4
    assert np.all(val.numpy() == 1.0)

    # But can change that w/ the axis kwarg
    ones = torch.ones([5, 4, 3])
    val = ops.mean(ones, axis=1)
    assert isinstance(val, torch.Tensor)
    assert val.ndim == 2
    assert val.shape[0] == 5
    assert val.shape[1] == 3
    assert np.all(val.numpy() == 1.0)

    # Actually test values
    val = ops.mean(torch.Tensor([0.9, 1.9, 2.1, 3.1]))
    assert is_close(val.numpy(), 2.0)
コード例 #3
0
def test_add_col_of():
    """Tests add_col_of"""
    pf.set_backend('pytorch')
    a = torch.randn([2, 3, 5])
    val = ops.add_col_of(a, 1)
    assert val.ndim == 3
    assert val.shape[0] == 2
    assert val.shape[1] == 3
    assert val.shape[2] == 6
コード例 #4
0
def test_kl_divergence():
    """Tests kl_divergence"""

    pf.set_backend("pytorch")

    # Divergence between a distribution and itself should be 0
    dist = torch.distributions.normal.Normal(0, 1)
    assert ops.kl_divergence(dist, dist).numpy() == 0.0

    # Divergence between two different distributions should be >0
    d1 = torch.distributions.normal.Normal(0, 1)
    d2 = torch.distributions.normal.Normal(1, 1)
    assert ops.kl_divergence(d1, d2).numpy() > 0.0

    # Divergence between more different distributions should be larger
    d1 = torch.distributions.normal.Normal(0, 1)
    d2 = torch.distributions.normal.Normal(1, 1)
    d3 = torch.distributions.normal.Normal(2, 1)
    assert (ops.kl_divergence(d1, d2).numpy() < ops.kl_divergence(d1,
                                                                  d3).numpy())

    # Should auto-convert probflow distibutions
    dist = pf.Normal(0, 1)
    assert ops.kl_divergence(dist, dist).numpy() == 0.0