Example #1
0
def test_dkl_3(args):
    """
    Test that when p has outcomes that q doesn't have, that we raise an exception.
    """
    first, second, rvs, crvs = args
    with pytest.raises(ditException):
        kullback_leibler_divergence(first, second, rvs, crvs)
Example #2
0
def test_divergences_to_kl():
    """
    Tests that the generalized divergences properly fall back to KL for the appropriate values of alpha, and not otherwise.
    """
    test_dists = [get_dists_2(), get_dists_3()]
    for dists in test_dists:
        for dist1 in dists:
            for dist2 in dists:
                assert_almost_equal(alpha_divergence(dist1, dist2, alpha=-1),
                                    kullback_leibler_divergence(dist2, dist1))
                if dist1 != dist2:
                    assert_not_equal(alpha_divergence(dist1, dist2, alpha=0),
                                     kullback_leibler_divergence(dist2, dist1))
                for divergence in [
                        renyi_divergence, tsallis_divergence, alpha_divergence,
                        hellinger_divergence
                ]:
                    assert_almost_equal(
                        divergence(dist1, dist2, alpha=1),
                        kullback_leibler_divergence(dist1, dist2))
                    if dist1 != dist2:
                        assert_not_equal(
                            alpha_divergence(dist1, dist2, alpha=0),
                            kullback_leibler_divergence(dist2, dist1))
                        assert_not_equal(
                            alpha_divergence(dist1, dist2, alpha=2),
                            kullback_leibler_divergence(dist2, dist1))
def test_dkl_3(args):
    """
    Test that when p has outcomes that q doesn't have, that we raise an exception.
    """
    first, second, rvs, crvs = args
    with pytest.raises(ditException):
        kullback_leibler_divergence(first, second, rvs, crvs)
def test_divergences_to_kl(dists):
    """
    Tests that the generalized divergences properly fall back to KL for the appropriate values of alpha, and not otherwise.
    """
    for dist1, dist2 in combinations(dists, 2):
        assert alpha_divergence(dist1, dist2, alpha=-1) == pytest.approx(kullback_leibler_divergence(dist2, dist1))

        assert alpha_divergence(dist1, dist2, alpha=0) != pytest.approx(kullback_leibler_divergence(dist2, dist1))
        assert alpha_divergence(dist1, dist2, alpha=2) != pytest.approx(kullback_leibler_divergence(dist2, dist1))
def test_divergences_to_kl(dists):
    """
    Tests that the generalized divergences properly fall back to KL for the appropriate values of alpha, and not otherwise.
    """
    for dist1, dist2 in combinations(dists, 2):
        assert alpha_divergence(dist1, dist2, alpha=-1) == pytest.approx(kullback_leibler_divergence(dist2, dist1))

        assert alpha_divergence(dist1, dist2, alpha=0) != pytest.approx(kullback_leibler_divergence(dist2, dist1))
        assert alpha_divergence(dist1, dist2, alpha=2) != pytest.approx(kullback_leibler_divergence(dist2, dist1))
def test_dkl_2():
    """
    Test that DKL(d, d) = 0.
    """
    ds = get_dists()
    for d in ds:
        yield assert_almost_equal, kullback_leibler_divergence(d, d), 0
def test_divergences_to_kl():
    """
    Tests that the generalized divergences properly fall back to KL for the appropriate values of alpha, and not otherwise.
    """
    test_dists = [get_dists_2(), get_dists_3()]
    for dists in test_dists:
        for dist1 in dists:
            for dist2 in dists:
                assert_almost_equal(alpha_divergence(dist1, dist2, alpha=-1), kullback_leibler_divergence(dist2, dist1))
                if dist1 != dist2:
                    assert_not_equal(alpha_divergence(dist1, dist2, alpha=0), kullback_leibler_divergence(dist2, dist1))
                for divergence in [renyi_divergence, tsallis_divergence, alpha_divergence, hellinger_divergence]:
                    assert_almost_equal(divergence(dist1, dist2, alpha=1), kullback_leibler_divergence(dist1, dist2))
                    if dist1 != dist2:
                        assert_not_equal(alpha_divergence(dist1, dist2, alpha=0), kullback_leibler_divergence(dist2, dist1))
                        assert_not_equal(alpha_divergence(dist1, dist2, alpha=2), kullback_leibler_divergence(dist2, dist1))
def test_divergences_to_kl2(dists, divergence):
    """
    Tests that the generalized divergences properly fall back to KL for the appropriate values of alpha.
    """
    for dist1, dist2 in combinations(dists, 2):
        assert divergence(dist1, dist2, alpha=1) == pytest.approx(
            kullback_leibler_divergence(dist1, dist2))
Example #9
0
def test_dkl_2():
    """
    Test that DKL(d, d) = 0.
    """
    ds = get_dists()
    for d in ds:
        yield assert_almost_equal, kullback_leibler_divergence(d, d), 0
def test_divergences_to_kl():
    """
    Tests that the generalized divergences properly fall back to KL for the appropriate values of alpha, and not otherwise.
    """
    test_dists = [get_dists_2(), get_dists_3()]
    for dists in test_dists:
        for dist1, dist2 in product(dists, repeat=2):
            assert_almost_equal(alpha_divergence(dist1, dist2, alpha=-1), kullback_leibler_divergence(dist2, dist1))

            if dist1 is not dist2:
                assert_not_equal(alpha_divergence(dist1, dist2, alpha=0), kullback_leibler_divergence(dist2, dist1))

            for divergence in divergences:

                assert_almost_equal(divergence(dist1, dist2, alpha=1), kullback_leibler_divergence(dist1, dist2))

                if dist1 is not dist2:
                    assert_not_equal(alpha_divergence(dist1, dist2, alpha=0), kullback_leibler_divergence(dist2, dist1))
                    assert_not_equal(alpha_divergence(dist1, dist2, alpha=2), kullback_leibler_divergence(dist2, dist1))
Example #11
0
def lautum_information(dist, rvs=None, crvs=None, rv_mode=None):
    """
    Computes the lautum information.

    Parameters
    ----------
    dist : Distribution
        The distribution from which the lautum information is calculated.
    rvs : list, None
        A list of lists. Each inner list specifies the indexes of the random
        variables used to calculate the lautum information. If None, then the
        lautum information is calculated over all random variables, which is
        equivalent to passing `rvs=dist.rvs`.
    crvs : list, None
        A single list of indexes specifying the random variables to condition
        on. If None, then no variables are conditioned on.
    rv_mode : str, None
        Specifies how to interpret `rvs` and `crvs`. Valid options are:
        {'indices', 'names'}. If equal to 'indices', then the elements of
        `crvs` and `rvs` are interpreted as random variable indices. If equal
        to 'names', the the elements are interpreted as random variable names.
        If `None`, then the value of `dist._rv_mode` is consulted, which
        defaults to 'indices'.

    Returns
    -------
    L : float
        The lautum information.

    Examples
    --------
    >>> outcomes = ['000', '001', '010', '011', '100', '101', '110', '111']
    >>> pmf = [3/16, 1/16, 1/16, 3/16, 1/16, 3/16, 3/16, 1/16]
    >>> d = dit.Distribution(outcomes, pmf)
    >>> dit.other.lautum_information(d)
    0.20751874963942196
    >>> dit.other.lautum_information(d, rvs=[[0], [1]])
    0.0

    Raises
    ------
    ditException
        Raised if `dist` is not a joint distribution or if `rvs` or `crvs`
        contain non-existent random variables.
    """
    rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)

    pd = product_distribution(dist, rvs=rvs + [crvs], rv_mode=rv_mode)
    L = kullback_leibler_divergence(pd,
                                    dist,
                                    rvs=rvs,
                                    crvs=crvs,
                                    rv_mode=rv_mode)
    return L
Example #12
0
def test_dkl_1():
    """
    Test against several known values.
    """
    d1, d2, d3, d4, d5 = get_dists()
    tests = [([d1, d3], 0.5849625007211563), ([d1, d4], 0.0294468445267841),
             ([d1, d3, [0]], 0.5849625007211563),
             ([d1, d4, [0]], 0.0294468445267841),
             ([d4, d1, [0]], 0.029049405545331419),
             ([d4, d5], 0.029049405545331419), ([d5, d4], 0.0294468445267841),
             ([d4, d5, [0], [1]], 0), ([d4, d5, [1], [0]], 0),
             ([d1, d2], np.inf), ([d2, d1], np.inf), ([d3, d1], np.inf)]
    for args, val in tests:
        yield assert_almost_equal, kullback_leibler_divergence(*args), val
def test_dkl_1():
    """
    Test against several known values.
    """
    d1, d2, d3, d4, d5 = get_dists()
    tests = [([d1, d3], 0.5849625007211563),
             ([d1, d4], 0.0294468445267841),
             ([d1, d3, [0]], 0.5849625007211563),
             ([d1, d4, [0]], 0.0294468445267841),
             ([d4, d1, [0]], 0.029049405545331419),
             ([d4, d5], 0.029049405545331419),
             ([d5, d4], 0.0294468445267841),
             ([d4, d5, [0], [1]], 0),
             ([d4, d5, [1], [0]], 0),
             ([d1, d2], np.inf),
             ([d2, d1], np.inf),
             ([d3, d1], np.inf)]
    for args, val in tests:
        yield assert_almost_equal, kullback_leibler_divergence(*args), val
def test_dkl_1(args, expected):
    """
    Test against several known values.
    """
    assert kullback_leibler_divergence(*args) == pytest.approx(expected)
def test_divergences_to_kl2(dists, divergence):
    """
    Tests that the generalized divergences properly fall back to KL for the appropriate values of alpha.
    """
    for dist1, dist2 in combinations(dists, 2):
        assert divergence(dist1, dist2, alpha=1) == pytest.approx(kullback_leibler_divergence(dist1, dist2))
Example #16
0
def test_dkl_2(d):
    """
    Test that DKL(d, d) = 0.
    """
    assert kullback_leibler_divergence(d, d) == pytest.approx(0)
Example #17
0
def test_dkl_1(args, expected):
    """
    Test against several known values.
    """
    assert kullback_leibler_divergence(*args) == pytest.approx(expected)
def test_dkl_2(d):
    """
    Test that DKL(d, d) = 0.
    """
    assert kullback_leibler_divergence(d, d) == pytest.approx(0)