Exemplo n.º 1
0
# -*- coding: utf-8 -*-
"""
Tests for dit.util.testing
"""

import pytest

from hypothesis import find, given

import numpy as np

from dit.multivariate import coinformation
from dit.utils.testing import distributions, distribution_structures, markov_chains


@given(dist=distributions(alphabets=1))
def test_distributions1(dist):
    """
    A test for the distributions strategy.
    """
    assert dist.alphabet == ((0, 1), )


@given(dist=distributions(alphabets=(2, 2)))
def test_distributions2(dist):
    """
    A test for the distributions strategy.
    """
    assert dist.alphabet == ((0, 1), (0, 1))

Exemplo n.º 2
0
    secrecy_capacity_skar,
    necessary_intrinsic_mutual_information,
    minimal_intrinsic_mutual_information,
    # reduced_intrinsic_mutual_information,
    intrinsic_mutual_information,
    upper_intrinsic_mutual_information)

eps = 1e-4


@settings(deadline=None,
          timeout=unlimited,
          min_satisfying_examples=3,
          max_examples=5,
          suppress_health_check=[HealthCheck.hung_test])
@given(dist=distributions(alphabets=(2, ) * 3))
def test_hierarchy(dist):
    """
    Test that the bounds are ordered correctly.
    """
    limi = lower_intrinsic_mutual_information(dist, [[0], [1]], [2])
    sc = secrecy_capacity_skar(dist, [[0], [1]], [2])
    nimi = necessary_intrinsic_mutual_information(dist, [[0], [1]], [2])
    mimi = minimal_intrinsic_mutual_information(dist, [[0], [1]], [2])
    # rimi = reduced_intrinsic_mutual_information(dist, [[0], [1]], [2])
    imi = intrinsic_mutual_information(dist, [[0], [1]], [2])
    uimi = upper_intrinsic_mutual_information(dist, [[0], [1]], [2])

    assert 0 <= limi + eps
    assert limi <= sc + eps
    assert sc <= nimi + eps
@pytest.mark.parametrize(('rvs', 'crvs'), [
    ([[0], [1]], []),
    ([[0], [1]], [2]),
])
@pytest.mark.parametrize('dist', [dyadic, triadic])
def test_maximum_correlation(dist, rvs, crvs):
    """ Test against known values """
    assert maximum_correlation(dist, rvs, crvs) == pytest.approx(1.0)


@pytest.mark.parametrize('rvs', [['X', 'Y', 'Z'], ['X']])
def test_maximum_correlation_failure(rvs):
    """ Test that maximum_correlation fails with len(rvs) != 2 """
    with pytest.raises(ditException):
        maximum_correlation(dyadic, rvs)


@given(dist1=distributions(alphabets=((2, 4),)*2, nondegenerate=True),
       dist2=distributions(alphabets=((2, 4),)*2, nondegenerate=True))
def test_maximum_correlation_tensorization(dist1, dist2):
    """
    Test tensorization:
        rho(X X' : Y Y') = max(rho(X:Y), rho(X', Y'))
    """
    mixed = dist1.__matmul__(dist2)
    rho_mixed = maximum_correlation(mixed, [[0, 2], [1, 3]])
    rho_a = maximum_correlation(dist1, [[0], [1]])
    rho_b = maximum_correlation(dist2, [[0], [1]])
    assert rho_mixed == pytest.approx(max(rho_a, rho_b), abs=1e-4)
Exemplo n.º 4
0
    relative_entropy,
    variational_distance,
)
from dit.helpers import normalize_pmfs
from dit.multivariate import (
    entropy as H,
    total_correlation as I,
    gk_common_information as K,
    wyner_common_information as C,
    exact_common_information as G,
)

epsilon = 1e-4


@given(dist=distributions())
def test_entropy_upper_bound(dist):
    """
    H(X) <= log(|X|)
    """
    h = H(dist)
    logX = np.log2(len(dist.outcomes))
    assert h <= logX + epsilon


@given(dist1=distributions(alphabets=(10, )),
       dist2=distributions(alphabets=(10, )))
def test_pinskers_inequality(dist1, dist2):
    """
    DKL(p||q) >= V(p||q)**2 / (2log(2))
    """
Exemplo n.º 5
0
def test_distributions3():
    """
    A test for the distributions strategy.
    """
    dist = distributions(alphabets=((2, 2), (2, 2))).example()
    assert dist.alphabet == ((0, 1), (0, 1))
    Test multivariate, with rv names
    """
    icmi = IMI.intrinsic_caekl_mutual_information(dist6, ['W', 'X', 'Y'], 'Z')
    assert icmi == pytest.approx(0)


def test_imi_fail():
    """
    Test that things fail when not provided with a conditional variable.
    """
    with pytest.raises(ditException):
        IMI.intrinsic_total_correlation(dist1, [[0], [1], [2]])


@pytest.mark.flaky(rerun=5)
@given(dist=distributions(alphabets=((2, 4),)*3))
def test_bounds(dist):
    """
    I[X:Y v Z] <= I[X:Y]
    I[X:Y v Z] <= I[X:Y|Z]
    """
    imi = IMI.intrinsic_total_correlation(dist, [[0], [1]], [2])
    mi = total_correlation(dist, [[0], [1]])
    cmi = total_correlation(dist, [[0], [1]], [2])
    assert imi <= mi + 1e-10
    assert imi <= cmi + 1e-10


@pytest.mark.parametrize(('dist', 'val'), [(intrinsic_1, 0.0), (intrinsic_2, 1.5), (intrinsic_3, 1.3932929108738521)])
def test_1(dist, val):
    """
Exemplo n.º 7
0
def test_distributions1():
    """
    A test for the distributions strategy.
    """
    dist = distributions(alphabets=1).example()
    assert dist.alphabet == ((0, 1), )
Exemplo n.º 8
0
                             relative_entropy,
                             variational_distance,
                            )
from dit.helpers import normalize_pmfs
from dit.multivariate import (entropy as H,
                              total_correlation as I,
                              gk_common_information as K,
                              wyner_common_information as C,
                              exact_common_information as G,
                             )


epsilon = 1e-4


@given(dist=distributions())
def test_entropy_upper_bound(dist):
    """
    H(X) <= log(|X|)
    """
    h = H(dist)
    logX = np.log2(len(dist.outcomes))
    assert h <= logX + epsilon


@given(dist1=distributions(alphabets=(10,)), dist2=distributions(alphabets=(10,)))
def test_pinskers_inequality(dist1, dist2):
    """
    DKL(p||q) >= V(p||q)**2 / (2log(2))
    """
    dkl = relative_entropy(dist1, dist2)
Exemplo n.º 9
0
                              dual_total_correlation as B,
                              wyner_common_information as C,
                              exact_common_information as G,
                              functional_common_information as F,
                              mss_common_information as M,
                              )

from dit.utils.testing import distributions

epsilon = 1e-4


@pytest.mark.slow
@pytest.mark.flaky(reruns=5)
@settings(max_examples=5)
@given(dist=distributions(alphabets=(2,)*2))
def test_cis1(dist):
    """
    Test that the common informations are ordered correctly.
    """
    k = K(dist)
    j = J(dist)
    b = B(dist)
    c = C(dist)
    g = G(dist)
    f = F(dist)
    m = M(dist)
    assert k <= j + epsilon
    assert j <= b + epsilon
    assert b <= c + epsilon
    assert c <= g + epsilon
Exemplo n.º 10
0
def test_distributions3():
    """
    A test for the distributions strategy.
    """
    dist = distributions(alphabets=((2, 2), (2, 2))).example()
    assert dist.alphabet == ((0, 1), (0, 1))
Exemplo n.º 11
0
def test_distributions1():
    """
    A test for the distributions strategy.
    """
    dist = distributions(alphabets=1).example()
    assert dist.alphabet == ((0, 1),)