def test_entropy(): a, b = make_variables("A B", 2) c, d = make_variables("C D", 4) j = UniformDist(a, b, c, d) assert j.entropy(a) == 1 assert j.entropy(c) == 2 assert j.entropy([a, b]) == j.entropy(a, b)
def test_uniform(): a, b = make_variables("A B", 4) c = Variable("C", 2) j = UniformDist(a, b, c) assert j.probabilities.size == 4 * 4 * 2 assert j.entropy(a) == 2.0 assert j.entropy(b) == 2.0 assert j.entropy(c) == 1.0
def test_ay_polani(): w, x, y, z = make_variables("W X Y Z", 2) wdist = UniformDist(w) # Ay & Polani, Example 3 eq1 = Equation('BR', [w], [x, y], equations.branch_same_) eq2 = Equation('XOR', [x, y], [z], equations.xor_) # Build the graph eg3 = CausalGraph([eq1, eq2]) m_eg3 = MeasureCause(eg3, wdist) # See the table on p29 assert m_eg3.mutual_info(x, y) == 1 assert m_eg3.mutual_info(x, y, w) == 0 assert m_eg3.mutual_info(w, z, y) == 0 assert m_eg3.causal_flow(x, y) == 0 assert m_eg3.causal_flow(x, y, w) == 0 assert m_eg3.causal_flow(w, z, y) == 1 # Ay & Polani, Example 5.1 def copy_first_(i1, i2, o1): o1[i1] = 1.0 eq2 = Equation('COPYX', [x, y], [z], copy_first_) eg51 = CausalGraph([eq1, eq2]) m_eg51 = MeasureCause(eg51, wdist) # See paragraph at top of page 30 assert m_eg51.mutual_info(x, z, y) == 0 assert m_eg51.causal_flow(x, z, y) == 1 assert m_eg51.causal_flow(x, z) == 1 # Ay & Polani, Example 5.2 def random_sometimes_(i1, i2, o1): if i1 != i2: o1[:] = .5 else: equations.xor_(i1, i2, o1) eq2 = Equation('RAND', [x, y], [z], random_sometimes_) eg52 = CausalGraph([eq1, eq2]) m_eg52 = MeasureCause(eg52, wdist) # See pg 30 expected = 3.0 / 4.0 * log2(4.0 / 3.0) assert_allclose(m_eg52.causal_flow(x, z, y), expected)
def test_controlled_diamond(): """This examples can move us from a correlation case to a diamond """ c1, c2, s1, s2, s3, s4, a1 = make_variables('c1 c2 s1 s2 s3 s4 a1', 2) eq1 = Equation('SAME', [c1], [s1], equations.same_) eq2 = Equation('SAMEB', [c2], [s2, s3], equations.branch_same_) eq3 = Equation('AND', [s1, s2], [s4], equations.and_) eq4 = Equation('OR', [s3, s4], [a1], equations.or_) net = CausalGraph([eq1, eq2, eq3, eq4]) # Let's just use Uniform m = MeasureCause(net, UniformDist(c1, c2)) # Mutual info is pretty useless, as it is the same across these... assert m.mutual_info(s2, a1) == m.mutual_info(s3, a1) # Look how much better average sad is! assert m.average_sad(s2, a1) < m.average_sad(s3, a1)
def test_distribution(): a, b, c = make_variables("A B C", 2) eq1 = Equation('xor', [a, b], [c], equations.xor_) net = CausalGraph([eq1]) ab = UniformDist(a, b) j_obs = net.generate_joint(ab) j_do_a = net.generate_joint(ab, do_dist=JointDistByState({a: 0})) # for ass, p in j_obs.iter_conditional(c, a): # print ass, p # # assert p == 0.5 # # for ass, p in j_do_a.iter_conditional(c, a): # # assert p == 0.5 # print ass, p assert j_obs.mutual_info(b, c) == 0 # Very different under "Doing" assert j_do_a.mutual_info(b, c) == 1.0
def test_mutual_info(): a, b = make_variables("A B", 2) c, d = make_variables("C D", 4) j = UniformDist(a, b, c, d) assert j.mutual_info([a, b], c) == 0 assert j.mutual_info(a, b, c) == 0
def test_joint(): a, b, c, d = make_variables("A B C D", 2) j = UniformDist(a, b, c, d) assert_frame_equal(j.joint([a, b]).probabilities, j.joint(a, b).probabilities)
def test_joint(): a, b, c, d = make_variables("A B C D", 2) j = UniformDist(a, b, c, d) assert_frame_equal( j.joint([a, b]).probabilities, j.joint(a, b).probabilities)