def test_coi1(): """ Test I for xor """ d = n_mod_m(3, 2) assert_almost_equal(I(d), -1.0) assert_almost_equal(I(d, [[0], [1], [2]]), -1.0) assert_almost_equal(I(d, [[0], [1], [2]], [2]), 0.0) assert_almost_equal(I(d, [[0], [1]], [2]), 1.0)
def test_coi1(): """ Test I for xor """ d = n_mod_m(3, 2) assert I(d) == pytest.approx(-1.0) assert I(d, [[0], [1], [2]]) == pytest.approx(-1.0) assert I(d, [[0], [1], [2]], [2]) == pytest.approx(0.0) assert I(d, [[0], [1]], [2]) == pytest.approx(1.0)
def getTE(timeseriesa, timeseriesb, history=1): """ Takes two time series and calculates the pairwise transfer entropies Parameters ---------- a,b: np.array Two time series history: int Markov Order Returns --------- list of Floats containing Transfer Entropy, Intrinsic Transfer Entropy, Transfer Entropy, backwards Transfer Entropy, backwards intrinsic Transfer Entropy of the pairs of nodes addressed by input a,b """ pairofnodes = list(zip(timeseriesa, timeseriesb)) #list(zip(two lists of ints)) # pairofnodes=list(zip(rbn.histories[a],rbn.histories[b])) distribution = distribution_from_data(pairofnodes, history + 1, base='linear') dist = dit.distconst.modify_outcomes( distribution, lambda o: (tuple([oo[0] for oo in o[:-1]]), tuple([oo[1] for oo in o[:-1]]), o[-1][0], o[-1][1])) I0, J0, I1, J1 = ([0], [1], [2], [3]) TE = I(dist, [J1, I0], J0) # ITE=IMI(dist,[J1,I0],J0) backTE = I(dist, [I1, J0], I0) # backITE=IMI(dist,[I1,J0],I0) return [TE, backTE, 0, 0]
def test_data_processing_inequality(dist): """ given X - Y - Z: I(X:Z) <= I(X:Y) """ i_xy = I(dist, [[0], [1]]) i_xz = I(dist, [[0], [2]]) assert i_xz <= i_xy + epsilon
def test_mi_hc(dist): """ given U - X - Y: I[U:Y] <= s*(X||Y)*I[U:X] """ a = I(dist, [[0], [2]]) b = hypercontractivity_coefficient(dist, [[1], [2]], niter=20) c = I(dist, [[0], [1]]) assert a <= b * c + epsilon
def test_zhang_yeung_inequality(dist): """ 2I(C:D) <= I(A:B)+I(A:CD)+3I(C:D|A)+I(C:D|B) """ I_a_b = I(dist, [[0], [1]]) I_c_d = I(dist, [[2], [3]]) I_a_cd = I(dist, [[0], [2, 3]]) I_c_d_g_a = I(dist, [[2], [3]], [0]) I_c_d_g_b = I(dist, [[2], [3]], [1]) assert 2 * I_c_d <= I_a_b + I_a_cd + 3 * I_c_d_g_a + I_c_d_g_b + epsilon
def test_giant_bit2(n, k): """ tests for the giant bit coinformation """ d = giant_bit(n, k) assert I(d) == pytest.approx(np.log2(k))
def test_caekl_2(d): """ Ensure that it reduces to the mutual information for bivariate distributions reduced from multivariate. """ rvs = [[0], [1]] assert I(d, rvs) == pytest.approx(J(d, rvs))
def test_iidsum(): """ Test against known value. """ d = iid_sum(2, 6) cmi = I(d, [[0], [1]], [2]) assert cmi == pytest.approx(1.8955230821535425)
def test_sp2(): """ Test all possible info measures, with rv_names """ d = n_mod_m(4, 2) d.set_rv_names('xyzw') ip = ShannonPartition(d) for meas in all_info_measures('xyzw'): yield assert_almost_equal, ip[meas], I(d, meas[0], meas[1])
def test_max_correlation_mutual_information(dist): """ (p_min * rho(X:Y))^2 <= (2 ln 2)I(X:Y) """ p_min = dist.marginal([0]).pmf.min() rho = maximum_correlation(dist, [[0], [1]]) i = I(dist, [[0], [1]]) assert (p_min * rho)**2 <= (2 * np.log(2)) * i + epsilon
def test_mincoinfo_1(): """ Test mincoinfo """ d = uniform(['000', '111']) mcio = MinCoInfoOptimizer(d, [[0], [1], [2]]) mcio.optimize() dp = mcio.construct_dist() assert I(dp) == pytest.approx(-1)
def test_mis1(d): """ Test that all the mutual informations match for bivariate distributions. """ i = I(d) t = T(d) b = B(d) j = J(d) ii = II(d) assert i == pytest.approx(t) assert t == pytest.approx(b) assert b == pytest.approx(j) assert j == pytest.approx(ii)
def _measure(d, sources, target): """ I_ME*, the maximum entropy distribution satisfying the * assumption of BROJA. Parameters ---------- d : Distribution The distribution to compute i_mes for. sources : iterable of iterables The source variables. target : iterable The target variable. Returns ------- i_mes : float The value of I_ME*. """ dp = maxent_dist(d, [source + target for source in sources]) i_mes = I(dp) return i_mes
def test_coi8(): """ Test that I fails on ScalarDistributions """ d = SD([1 / 3] * 3) with pytest.raises(ditException): I(d)
def test_coi7(): """ Test that H = I for one variable """ outcomes = "ABC" pmf = [1 / 3] * 3 d = D(outcomes, pmf) assert H(d) == pytest.approx(I(d))
def test_coi4(): """ Test conditional I, with and without names """ d = n_mod_m(4, 2) assert_almost_equal(I(d, [[0], [1], [2]], [3]), -1.0) d.set_rv_names("XYZW") assert_almost_equal(I(d, [['X'], ['Y'], ['Z']], ['W']), -1.0)
def test_sp2(meas): """ Test all possible info measures, with rv_names """ d = n_mod_m(4, 2) d.set_rv_names('xyzw') ip = ShannonPartition(d) assert ip[meas] == pytest.approx(I(d, meas[0], meas[1]))
def test_coi7(): """ Test that H = I for one variable """ outcomes = "ABC" pmf = [1/3]*3 d = D(outcomes, pmf) assert_almost_equal(H(d), I(d))
def test_coi6(): """ Test conditional I, with and without names """ d = n_mod_m(4, 2) assert_almost_equal(I(d, [[0]], [1, 2, 3]), 0.0) d.set_rv_names("XYZW") assert_almost_equal(I(d, [['X']], ['Y', 'Z', 'W']), 0.0)
def test_caekl_1(d): """ Ensure that it reduces to the mutual information for bivariate distributions. """ assert I(d) == pytest.approx(J(d))
def test_coi2(): """ Test I for larger parity distribution """ d = n_mod_m(4, 2) assert_almost_equal(I(d), 1.0)
def test_sp1(meas): """ Test all possible info measures """ d = n_mod_m(4, 2) ip = ShannonPartition(d) assert ip[meas] == pytest.approx(I(d, meas[0], meas[1]))
def test_coi2(): """ Test I for larger parity distribution """ d = n_mod_m(4, 2) assert I(d) == pytest.approx(1.0)
def test_shannon_inequality(dist): """ I(X:Y|Z) >= 0 """ i = I(dist, [[0], [1]], [2]) assert i >= 0 - epsilon
def test_sp1(): """ Test all possible info measures """ d = n_mod_m(4, 2) ip = ShannonPartition(d) for meas in all_info_measures(range(4)): yield assert_almost_equal, ip[meas], I(d, meas[0], meas[1])
def test_coi4(): """ Test conditional I, with and without names """ d = n_mod_m(4, 2) assert I(d, [[0], [1], [2]], [3]) == pytest.approx(-1.0) d.set_rv_names("XYZW") assert I(d, [['X'], ['Y'], ['Z']], ['W']) == pytest.approx(-1.0)
def test_coi6(): """ Test conditional I, with and without names """ d = n_mod_m(4, 2) assert I(d, [[0]], [1, 2, 3]) == pytest.approx(0.0) d.set_rv_names("XYZW") assert I(d, [['X']], ['Y', 'Z', 'W']) == pytest.approx(0.0)
def test_coi3(): """ Test I for subsets of variables, with and without names """ d = n_mod_m(4, 2) assert I(d, [[0], [1], [2]]) == pytest.approx(0.0) d.set_rv_names("XYZW") assert I(d, [['X'], ['Y'], ['Z']]) == pytest.approx(0.0)
def test_coi3(): """ Test I for subsets of variables, with and without names """ d = n_mod_m(4, 2) assert_almost_equal(I(d, [[0], [1], [2]]), 0.0) d.set_rv_names("XYZW") assert_almost_equal(I(d, [['X'], ['Y'], ['Z']]), 0.0)