Beispiel #1
0
def pr_box(eta=1, name=False):
    """
    The Popescu-Rohrlich box, or PR box, is the canonical non-signalling, non-local probability
    distribution used in the study of superquantum correlations. It has two space-like seperated
    inputs, X and Y, and two associated outputs, A and B.

    `eta` is the noise level of this correlation. For 0 <= eta <= 1/2 the box can be realized
    classically. For 1/2 < eta <= 1/sqrt(2) the box can be realized quantum-mechanically.

    Parameters
    ----------
    eta : float, 0 <= eta <= 1
        The noise level of the box. Defaults to 1.

    name : bool
        Whether to set rv names or not. Defaults to False.

    Returns
    -------
    pr : Distribution
        The PR box distribution.
    """
    outcomes = list(product([0, 1], repeat=4))
    pmf = [ ((1+eta)/16 if (x*y == a^b) else (1-eta)/16) for x, y, a, b in outcomes ]
    pr = Distribution(outcomes, pmf)

    if name:
        pr.set_rv_names("XYAB")

    return pr
Beispiel #2
0
def test_parse_rvs2():
    outcomes = ['00', '11']
    pmf = [1/2]*2
    d = Distribution(outcomes, pmf)
    d.set_rv_names('XY')
    with pytest.raises(ditException):
        parse_rvs(d, ['X', 'Y', 'Z'])
Beispiel #3
0
def test_pr_1():
    """
    Test
    """
    d1 = Distribution(list(product([0, 1], repeat=4)), [1/16]*16)
    d2 = pr_box(0.0)
    assert d1.is_approx_equal(d2)
Beispiel #4
0
def test_init11():
    outcomes = ["0", "1"]
    pmf = [1 / 2, 1 / 2]
    d = Distribution(outcomes, pmf)
    sd = ScalarDistribution.from_distribution(d)
    # Different sample space representations
    assert_false(d.is_approx_equal(sd))
Beispiel #5
0
def test_to_dict():
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    dd = d.to_dict()
    for o, p in dd.items():
        yield assert_almost_equal, d[o], p
def test_init12():
    outcomes = ['0', '1']
    pmf = [1/2, 1/2]
    d = Distribution(outcomes, pmf)
    sd = ScalarDistribution.from_distribution(d, base=10)
    d.set_base(10)
    # Different sample space representations
    assert_false(d.is_approx_equal(sd))
Beispiel #7
0
def test_K4():
	outcomes = ['00', '01', '10', '11', '22', '33']
	pmf = [1/8, 1/8, 1/8, 1/8, 1/4, 1/4]
	d = Distribution(outcomes, pmf)
	assert_almost_equal(K(d), 1.5)
	assert_almost_equal(K(d, [[0],[1]]), 1.5)
	d.set_rv_names("XY")
	assert_almost_equal(K(d, [['X'],['Y']]), 1.5)
Beispiel #8
0
def test_K1():
	outcomes = ['00', '11']
	pmf = [1/2, 1/2]
	d = Distribution(outcomes, pmf)
	assert_almost_equal(K(d), 1.0)
	assert_almost_equal(K(d, [[0],[1]]), 1.0)
	d.set_rv_names("XY")
	assert_almost_equal(K(d, [['X'],['Y']]), 1.0)
def test_K1():
    """ Test K for dependent events """
    outcomes = ['00', '11']
    pmf = [1/2, 1/2]
    d = Distribution(outcomes, pmf)
    assert K(d) == pytest.approx(1.0)
    assert K(d, [[0], [1]]) == pytest.approx(1.0)
    d.set_rv_names("XY")
    assert K(d, [['X'], ['Y']]) == pytest.approx(1.0)
def test_K3():
    """ Test K for mixed events """
    outcomes = ['00', '01', '11']
    pmf = [1/3, 1/3, 1/3]
    d = Distribution(outcomes, pmf)
    assert_almost_equal(K(d), 0.0)
    assert_almost_equal(K(d, [[0], [1]]), 0.0)
    d.set_rv_names("XY")
    assert_almost_equal(K(d, [['X'], ['Y']]), 0.0)
def test_K4():
    """ Test K in a canonical example """
    outcomes = ['00', '01', '10', '11', '22', '33']
    pmf = [1/8, 1/8, 1/8, 1/8, 1/4, 1/4]
    d = Distribution(outcomes, pmf)
    assert K(d) == pytest.approx(1.5)
    assert K(d, [[0], [1]]) == pytest.approx(1.5)
    d.set_rv_names("XY")
    assert K(d, [['X'], ['Y']]) == pytest.approx(1.5)
def test_K3():
    """ Test K for mixed events """
    outcomes = ['00', '01', '11']
    pmf = [1/3, 1/3, 1/3]
    d = Distribution(outcomes, pmf)
    assert K(d) == pytest.approx(0.0)
    assert K(d, [[0], [1]]) == pytest.approx(0.0)
    d.set_rv_names("XY")
    assert K(d, [['X'], ['Y']]) == pytest.approx(0.0)
def test_K2():
    """ Test conditional K for dependent events """
    outcomes = ['00', '11']
    pmf = [1/2, 1/2]
    d = Distribution(outcomes, pmf)
    assert_almost_equal(K(d, [[0], [1]], [0]), 0.0)
    assert_almost_equal(K(d, [[0], [1]], [1]), 0.0)
    d.set_rv_names("XY")
    assert_almost_equal(K(d, [['X'], ['Y']], ['X']), 0.0)
    assert_almost_equal(K(d, [['X'], ['Y']], ['Y']), 0.0)
Beispiel #14
0
def test_really_big_words():
    """
    Test to ensure that large but sparse outcomes are fast.
    """
    outcomes = ['01'*45, '10'*45]
    pmf = [1/2]*2
    d = Distribution(outcomes, pmf)
    d = d.coalesce([range(30), range(30, 60), range(60, 90)])
    new_outcomes = (('10'*15,)*3, ('01'*15,)*3)
    assert_equal(d.outcomes, new_outcomes)
Beispiel #15
0
def test_insert_join():
    """ Test insert_join """
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    assert_raises(IndexError, insert_join, d, 5, [[0], [1]])

    for idx in range(d.outcome_length()):
        d2 = insert_join(d, idx, [[0], [1]])
        m = d2.marginal([idx])
        npt.assert_allclose(d2.pmf, m.pmf)
Beispiel #16
0
def test_K5():
	outcomes = ['000', '010', '100', '110', '221', '331']
	pmf = [1/8, 1/8, 1/8, 1/8, 1/4, 1/4]
	d = Distribution(outcomes, pmf)
	assert_almost_equal(K(d, [[0],[1]]), 1.5)
	assert_almost_equal(K(d), 1.0)
	assert_almost_equal(K(d, [[0],[1],[2]]), 1.0)
	d.set_rv_names("XYZ")
	assert_almost_equal(K(d, [['X'],['Y']]), 1.5)
	assert_almost_equal(K(d, [['X'],['Y'],['Z']]), 1.0)
	assert_almost_equal(K(d, ['X', 'Y'], ['Z']), 0.5)
	assert_almost_equal(K(d, ['XY', 'YZ']), 2.0)
def test_K5():
    """ Test K on subvariables and conditionals """
    outcomes = ['000', '010', '100', '110', '221', '331']
    pmf = [1/8, 1/8, 1/8, 1/8, 1/4, 1/4]
    d = Distribution(outcomes, pmf)
    assert K(d, [[0], [1]]) == pytest.approx(1.5)
    assert K(d) == pytest.approx(1.0)
    assert K(d, [[0], [1], [2]]) == pytest.approx(1.0)
    d.set_rv_names("XYZ")
    assert K(d, [['X'], ['Y']]) == pytest.approx(1.5)
    assert K(d, [['X'], ['Y'], ['Z']]) == pytest.approx(1.0)
    assert K(d, ['X', 'Y'], ['Z']) == pytest.approx(0.5)
    assert K(d, ['XY', 'YZ']) == pytest.approx(2.0)
Beispiel #18
0
def test_pr_2():
    """
    Test
    """
    d1 = Distribution([(0, 0, 0, 0),
                       (0, 0, 1, 1),
                       (0, 1, 0, 0),
                       (0, 1, 1, 1),
                       (1, 0, 0, 0),
                       (1, 0, 1, 1),
                       (1, 1, 0, 1),
                       (1, 1, 1, 0)], [1/8]*8)
    d2 = pr_box(1.0, name=True)
    assert d1.is_approx_equal(d2)
def test_disequilibrium4():
    """
    Test that uniform Distributions have zero disequilibrium.
    """
    for n in range(2, 11):
        d = Distribution.from_distribution(uniform(n))
        yield assert_almost_equal, disequilibrium(d), 0
def test_LMPR_complexity3():
    """
    Test that uniform Distirbutions have zero complexity.
    """
    for n in range(2, 11):
        d = Distribution.from_distribution(uniform(n))
        yield assert_almost_equal, LMPR_complexity(d), 0
Beispiel #21
0
def test_join():
    """ Test join """
    outcomes = ['00', '01', '10', '11']
    pmf = [1 / 4] * 4
    d = Distribution(outcomes, pmf)
    d2 = join(d, [[0], [1]])
    assert d2.outcomes == (0, 1, 2, 3)
    assert np.allclose(d2.pmf, d.pmf)
Beispiel #22
0
def test_meet():
    """ Test meet """
    outcomes = ['00', '01', '10', '11']
    pmf = [1 / 4] * 4
    d = Distribution(outcomes, pmf)
    d2 = meet(d, [[0], [1]])
    assert d2.outcomes == (0, )
    assert np.allclose(d2.pmf, [1])
def test_fci1():
    """
    Test known values.
    """
    d = Distribution(['000', '011', '101', '110'], [1 / 4] * 4)
    assert F(d) == pytest.approx(2.0)
    assert F(d, [[0], [1]]) == pytest.approx(0.0)
    assert F(d, [[0], [1]], [2]) == pytest.approx(1.0)
Beispiel #24
0
def Rdn():
    """
    A distribution with redundant information.
    """
    pmf = [1 / 2] * 2
    outcomes = ['000', '111']
    d = Distribution(outcomes, pmf)
    return d
Beispiel #25
0
def Subtle():
    """
    The Subtle distribution.
    """
    pmf = [1 / 3] * 3
    outcomes = [('0', '0', '00'), ('1', '1', '11'), ('0', '1', '01')]
    d = Distribution(outcomes, pmf)
    return d
Beispiel #26
0
def test_disequilibrium6(n):
    """
    Test that peaked Distributions have non-zero disequilibrium.
    """
    d = ScalarDistribution([1] + [0] * (n - 1))
    d.make_dense()
    d = Distribution.from_distribution(d)
    assert disequilibrium(d) >= 0
Beispiel #27
0
def test_simple_rd_5():
    """
    Test against know result, using blahut-arimoto.
    """
    dist = Distribution(['0', '1'], [1/2, 1/2])
    rd = RDCurve(dist, beta_num=10, method='ba')
    for r, d in zip(rd.rates, rd.distortions):
        assert r == pytest.approx(1 - entropy(d))
Beispiel #28
0
def Xor():
    """
    A distribution with synergistic information, [0] xor [1] = [2]
    """
    pmf = [1 / 4] * 4
    outcomes = ['000', '011', '101', '110']
    d = Distribution(outcomes, pmf)
    return d
Beispiel #29
0
def test_LMPR_complexity5(n):
    """
    Test that peaked Distributions have zero complexity.
    """
    d = ScalarDistribution([1] + [0] * (n - 1))
    d.make_dense()
    d = Distribution.from_distribution(d)
    assert LMPR_complexity(d) == pytest.approx(0)
Beispiel #30
0
def test_dist_iter1():
    outcomes = ['00', '01', '10', '11']
    pmf = [1 / 4] * 4
    d = Distribution(outcomes, pmf)
    for o in d:
        assert o in outcomes
    for o1, o2 in zip(d, outcomes):
        assert o1 == o2
Beispiel #31
0
def test_simple_rd_1():
    """
    Test against know result, using scipy.
    """
    dist = Distribution(['0', '1'], [1/2, 1/2])
    rd = RDCurve(dist, beta_num=10)
    for r, d in zip(rd.rates, rd.distortions):
        assert r == pytest.approx(1 - entropy(d))
Beispiel #32
0
def test_rd():
    """
    Test specific RD optimizer.
    """
    dist = Distribution(['0', '1'], [1 / 2, 1 / 2])
    rd = RateDistortionHamming.functional()
    r, d = rd(dist, beta=0.0)
    assert d == pytest.approx(0.5, abs=1e-5)
Beispiel #33
0
def test_to_string8():
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    d = d.marginal([0])
    s = d.to_string(show_mask='!')
    s_ = """Class:          Distribution
Alphabet:       ('0', '1') for all rvs
Base:           linear
Outcome Class:  str
Outcome Length: 1 (mask: 2)
RV Names:       None

x    p(x)
0!   0.5
1!   0.5"""
    assert_equal(s, s_)
Beispiel #34
0
 def generate_individual(cls, input_dist: dit.Distribution, conditional: np.ndarray, nudge_size: float,
                          mutations_per_step: int, start_mutation_size: float,
                          change_mutation_size: float, timestamp: int):
     new_distribution = input_dist.copy()
     instance = cls(input_dist, new_distribution, conditional, nudge_size, mutations_per_step, start_mutation_size,
                    change_mutation_size, timestamp)
     instance.mutate()
     return instance
Beispiel #35
0
def test_meet_sigalg():
    """ Test meet_sigalg """
    outcomes = ['00', '01', '10', '11']
    pmf = [1 / 4] * 4
    d = Distribution(outcomes, pmf)
    sigalg = frozenset([frozenset([]), frozenset(outcomes)])
    meeted = meet_sigalg(d, [[0], [1]])
    assert sigalg == meeted
Beispiel #36
0
def test_join_sigalg():
    """ Test join_sigalg """
    outcomes = ['00', '01', '10', '11']
    pmf = [1 / 4] * 4
    d = Distribution(outcomes, pmf)
    sigalg = frozenset([frozenset(_) for _ in powerset(outcomes)])
    joined = join_sigalg(d, [[0], [1]])
    assert sigalg == joined
Beispiel #37
0
def test_LMPR_complexity5(n):
    """
    Test that peaked Distributions have zero complexity.
    """
    d = ScalarDistribution([1] + [0]*(n-1))
    d.make_dense()
    d = Distribution.from_distribution(d)
    assert LMPR_complexity(d) == pytest.approx(0)
Beispiel #38
0
def test_disequilibrium6(n):
    """
    Test that peaked Distributions have non-zero disequilibrium.
    """
    d = ScalarDistribution([1] + [0]*(n-1))
    d.make_dense()
    d = Distribution.from_distribution(d)
    assert disequilibrium(d) >= 0
Beispiel #39
0
def test_dist_iter2():
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    for o in reversed(d):
        assert o in outcomes
    for o1, o2 in zip(reversed(d), reversed(outcomes)):
        assert o1 == o2
Beispiel #40
0
def test_to_string8():
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    d = d.marginal([0])
    s = d.to_string(show_mask='!')
    s_ = """Class:          Distribution
Alphabet:       ('0', '1') for all rvs
Base:           linear
Outcome Class:  str
Outcome Length: 1 (mask: 2)
RV Names:       None

x    p(x)
0!   0.5
1!   0.5"""
    assert s == s_
Beispiel #41
0
def ImperfectRdn():
    """
    Like Rdn() with a small off-term.
    """
    pmf = [.499, .5, .001]
    outcomes = [('0', '0', '0'), ('1', '1', '1'), ('0', '1', '0')]
    d = Distribution(outcomes, pmf)
    return d
Beispiel #42
0
def test_renyi_entropy_2(alpha):
    """
    Test the Renyi entropy of joint distributions.
    """
    d = Distribution(['00', '11', '22', '33'], [1 / 4] * 4)
    assert renyi_entropy(d, alpha) == pytest.approx(2)
    assert renyi_entropy(d, alpha, [0]) == pytest.approx(2)
    assert renyi_entropy(d, alpha, [1]) == pytest.approx(2)
def test_LMPR_complexity4():
    """
    Test that peaked Distributions have zero complexity.
    """
    for n in range(2, 11):
        d = ScalarDistribution([1] + [0]*(n-1))
        d.make_dense()
        d = Distribution.from_distribution(d)
        yield assert_almost_equal, LMPR_complexity(d), 0
Beispiel #44
0
def test_pid_gh4():
    """
    Test igh on a generic trivariate source distribution.
    """
    events = ['0000', '0010', '0100', '0110', '1000', '1010', '1100', '1111']
    d = Distribution(events, [1 / 8] * 8)
    gho = GHOptimizer(d, [[0], [1], [2]], [3])
    res = gho.optimize()
    assert -res.fun == pytest.approx(0.03471177057967193, abs=1e-3)
Beispiel #45
0
def test_prepare_string3():
    outcomes = [(0, 0), (0, 1), (1, 0), (1, 1)]
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    s_ = """Class:          Distribution
Alphabet:       (0, 1) for all rvs
Base:           linear
Outcome Class:  tuple
Outcome Length: 2
RV Names:       None

x    p(x)
00   0.25
01   0.25
10   0.25
11   0.25"""
    s = d.to_string(str_outcomes=True)
    assert_equal(s, s_)
Beispiel #46
0
def test_dfts4():
    """
    Test inferring a distribution from a time-series.
    """
    gm = golden_mean()
    ts = np.array([next(gm) for _ in range(1000000)]).reshape(1000000, 1)
    d1 = dist_from_timeseries(ts)
    d2 = Distribution([((0,), 0), ((0,), 1), ((1,), 0)], [1 / 3, 1 / 3, 1 / 3])
    assert d1.is_approx_equal(d2, atol=1e-3)
Beispiel #47
0
def test_dfts2():
    """
    Test inferring a distribution from a time-series.
    """
    gm = golden_mean()
    ts = [next(gm) for _ in range(1000000)]
    d1 = dist_from_timeseries(ts, base=None)
    d2 = Distribution([((0,), 0), ((0,), 1), ((1,), 0)], [np.log2(1 / 3)] * 3, base=2)
    assert d1.is_approx_equal(d2, atol=1e-2)
Beispiel #48
0
def test_dfts3():
    """
    Test inferring a distribution from a time-series.
    """
    gm = golden_mean()
    ts = [next(gm) for _ in range(1000000)]
    d1 = dist_from_timeseries(ts, history_length=0)
    d2 = Distribution([(0,), (1,)], [2 / 3, 1 / 3])
    assert d1.is_approx_equal(d2, atol=1e-3)
Beispiel #49
0
def test_to_string5():
    # Basic with marginal and mask
    outcomes = ['00', '01', '10', '11']
    pmf = [1 / 4] * 4
    d = Distribution(outcomes, pmf)
    d = d.marginal([0])
    s = d.to_string(show_mask=True)
    s_ = """Class:          Distribution
Alphabet:       ('0', '1') for all rvs
Base:           linear
Outcome Class:  str
Outcome Length: 1 (mask: 2)
RV Names:       None

x    p(x)
0*   0.5
1*   0.5"""
    assert_equal(s, s_)
Beispiel #50
0
def test_pid_preceq3():
    """
    Test ipreceq on a generic trivariate source distribution.
    """
    events = ['0000', '0010', '0100', '0110', '1000', '1010', '1100', '1111']
    d = Distribution(events, [1 / 8] * 8)
    ko = KolchinskyOptimizer(d, [[0], [1], [2]], [3])
    res = ko.optimize()
    assert -res.fun == pytest.approx(0.13795718192252743, abs=1e-3)
    def _assign_information(self):
        # get the PID atoms
        keep_sets = PID_sets(self.gate.k)
        string_sets = []
        # the way we're accessing the variables later means we need these as
        # strings. This way we can ensure that all of the strings we use as
        # keys match in our final output
        for ks in keep_sets:
            new_set_entry = []
            for tup in ks:
                new_set_entry.append(str(tup))
            string_sets.append(set(new_set_entry))

        print(string_sets)

        # put these in a dictionary so we can have zeros for missing values
        info_sets = {str(k): 0 for k in keep_sets}

        # calculate entropy of the output distribution for normalization
        output_dist = Distribution(self.gate.outputs,
                                   [1 / 2**self.gate.k] * 2**self.gate.k)
        output_entropy = entropy(output_dist)

        # in some cases we can end up with more coverage than we should have
        # so we will normalize by the total coverage
        coverage_sum = 0

        # ok now we can gather our inputs into redundancies
        # make a label for the redundancy atom by finding input groups
        # that appear in the transition
        for t, trans in enumerate(self._distributed):
            # only have to do this if there are, in fact, literals somewhere
            if len(trans.keys()) > 0:
                redundant = set(trans.keys())
                print('Transition', t)
                print(redundant)
                # find the index of the set that matches this one
                for si, key in enumerate(string_sets):
                    if key == redundant:
                        info_i = si

                # every coverage value matches in a single transition so we can
                # use any of them
                coverage = max(trans.values())
                info_sets[str(keep_sets[info_i])] = coverage
                coverage_sum += coverage

            # if there are no literals we just have to avoid a division by zero
            # and move on
            else:
                coverage_sum = 1

        # normalize to the gate entropy and by coverage
        for k in info_sets:
            info_sets[k] = info_sets[k] / coverage_sum * output_entropy

        return info_sets
Beispiel #52
0
def test_renyi_entropy_2():
    """
    Test the Renyi entropy of joint distributions.
    """
    d = Distribution(['00', '11', '22', '33'], [1/4]*4)
    for alpha in [0, 1/2, 1, 2, 5, np.inf]:
        yield assert_almost_equal, renyi_entropy(d, alpha), 2
        yield assert_almost_equal, renyi_entropy(d, alpha, [0]), 2
        yield assert_almost_equal, renyi_entropy(d, alpha, [1]), 2
def test_disequilibrium6():
    """
    Test that peaked Distributions have non-zero disequilibrium.
    """
    for n in range(2, 11):
        d = ScalarDistribution([1] + [0]*(n-1))
        d.make_dense()
        d = Distribution.from_distribution(d)
        yield assert_greater, disequilibrium(d), 0
Beispiel #54
0
def test_to_string4():
    # Basic with marginal
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    d = d.marginal([0])
    s = d.to_string()
    s_ = """Class:          Distribution
Alphabet:       ('0', '1') for all rvs
Base:           linear
Outcome Class:  str
Outcome Length: 1
RV Names:       None

x   p(x)
0   0.5
1   0.5"""
    assert s == s_
Beispiel #55
0
def test_prepare_string3():
    outcomes = [(0, 0), (0, 1), (1, 0), (1, 1)]
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    s_ = """Class:          Distribution
Alphabet:       (0, 1) for all rvs
Base:           linear
Outcome Class:  tuple
Outcome Length: 2
RV Names:       None

x    p(x)
00   0.25
01   0.25
10   0.25
11   0.25"""
    s = d.to_string(str_outcomes=True)
    assert s == s_
Beispiel #56
0
def test_to_string4():
    # Basic with marginal
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    d = d.marginal([0])
    s = d.to_string()
    s_ = """Class:          Distribution
Alphabet:       ('0', '1') for all rvs
Base:           linear
Outcome Class:  str
Outcome Length: 1
RV Names:       None

x   p(x)
0   0.5
1   0.5"""
    assert_equal(s, s_)
Beispiel #57
0
def test_tsallis_entropy_1(q):
    """
    Test the pseudo-additivity property.
    """
    d = Distribution(['00', '01', '02', '10', '11', '12'], [1/6]*6)
    S_AB = tsallis_entropy(d, q)
    S_A = tsallis_entropy(d, q, [0])
    S_B = tsallis_entropy(d, q, [1])
    pa_prop = S_A + S_B + (1-q)*S_A*S_B
    assert S_AB == pytest.approx(pa_prop)
Beispiel #58
0
def test_to_string2():
    # Test with exact.
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    s = d.to_string(exact=True)
    s_ = """Class:          Distribution
Alphabet:       ('0', '1') for all rvs
Base:           linear
Outcome Class:  str
Outcome Length: 2
RV Names:       None

x    p(x)
00   1/4
01   1/4
10   1/4
11   1/4"""
    assert_equal(s, s_)
Beispiel #59
0
def test_to_string1():
    # Basic
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    s = d.to_string()
    s_ = """Class:          Distribution
Alphabet:       ('0', '1') for all rvs
Base:           linear
Outcome Class:  str
Outcome Length: 2
RV Names:       None

x    p(x)
00   0.25
01   0.25
10   0.25
11   0.25"""
    assert_equal(s, s_)
Beispiel #60
0
def test_to_string9():
    # Basic
    outcomes = ['00', '01', '10', '11']
    pmf = [1/4]*4
    d = Distribution(outcomes, pmf)
    d.set_base(2)
    s = d.to_string()
    s_ = """Class:          Distribution
Alphabet:       ('0', '1') for all rvs
Base:           2
Outcome Class:  str
Outcome Length: 2
RV Names:       None

x    log p(x)
00   -2.0
01   -2.0
10   -2.0
11   -2.0"""
    assert_equal(s, s_)