Esempio n. 1
0
def test_special():
    """
    With a specialty you can re-roll 10s. 1s on the reroll do *not* count as
    botches.
    """
    die = storyteller.special(6, rerolls=4)
    probs = {
        # 1 on the first roll
        -1: 0.1,
        # 2-5 on the first roll
        0: 0.4,
        # 6-9 on the first roll, or 1-5 on the second roll
        1: 0.4 + 0.1 * 0.5,
        # 6-9 on the second roll, or 1-5 on the third roll
        2: 0.4 * 0.1 + 0.5 * 0.01,
        3: 0.4 * 0.01 + 0.5 * 0.001,
        4: 0.4 * 0.001 + 0.5 * 0.0001,
    }
    probs[5] = 1 - sum(probs.values())
    assert die.is_close(DRV(probs))
    assert sum(die.to_dict().values()) == 1
    revised_die = storyteller.revised_special(6, rerolls=4).apply(
        storyteller.total)
    assert revised_die.is_close(DRV(probs))
    assert sum(revised_die.to_dict().values()) == 1
Esempio n. 2
0
def test_pool_params():
    """Although not used in the examples, test the parameters."""
    assert ore.pool(1, hd=1).apply(ore.Match.get_matches).is_same(
        ore.matches(1, hd=1))
    drv = ore.pool(2, difficulty=8).apply(ore.Match.get_matches)
    assert drv.is_same(
        DRV({
            (): Fraction(97, 100),
            (ore.Match(2, 8), ): Fraction(1, 100),
            (ore.Match(2, 9), ): Fraction(1, 100),
            (ore.Match(2, 10), ): Fraction(1, 100),
        }))
    drv = ore.pool(2, hd=1, difficulty=8).apply(ore.Match.get_matches)
    assert drv.is_same(
        DRV({
            ():
            Fraction(79, 100),
            (ore.Match(2, 8), ):
            Fraction(1, 100),
            (ore.Match(2, 9), ):
            Fraction(1, 100),
            (ore.Match(3, 10), ):
            Fraction(1, 100),
            # 18 ways to roll a 10 plus something that isn't a 10
            (
                ore.Match(2, 10), ):
            Fraction(18, 100),
        }))
Esempio n. 3
0
def test_matmul():
    """
    The @ operator can be used with an integer or DRV on the left, and a DRV
    (but not an integer) on the right.
    """
    drv = DRV({1: 0.5, 2: 0.5})
    assert (1 @ drv).to_dict() == drv.to_dict()
    with pytest.raises(TypeError):
        1.0 @ drv
    with pytest.raises(TypeError):
        drv @ 1
    with pytest.raises(TypeError):
        drv @ 1.0
    assert (drv @ drv).to_dict() == {1: 0.25, 2: 0.375, 3: 0.25, 4: 0.125}
    # The difference with a non-int-valued DRV is you can't put it on the left.
    float_drv = DRV({1.0: 0.5, 2.0: 0.5})
    assert (1 @ float_drv).to_dict() == float_drv.to_dict()
    with pytest.raises(TypeError):
        1.0 @ drv
    with pytest.raises(TypeError):
        float_drv @ 1
    with pytest.raises(TypeError):
        float_drv @ 1.0
    with pytest.raises(TypeError):
        float_drv @ float_drv
Esempio n. 4
0
def test_tree():
    """
    Extra tests for DRV expression trees, mainly for code coverage.
    """
    # Test the case of a postfix applied to a DRV with no expression tree.
    drv = DRV({1: Fraction(1, 2), 2: Fraction(1, 2)})
    assert repr(drv.faster()) == 'DRV({1: 0.5, 2: 0.5})'
    assert drv.faster().to_dict() == drv.to_dict()

    class Addable(object):
        def __init__(self, value):
            self.value = value

        def __add__(self, other):
            return self.value

    # Test the case of adding None to a DRV with an expression tree. This
    # requires a manually-specified tree because the "usual" ways of
    # constructing a DRV that would have a tree, don't result in anything that
    # you can add None to.
    drv = DRV({
        Addable(1): Fraction(1, 2),
        Addable(2): Fraction(1, 2)
    },
              tree=Atom('MyCoin()'))
    assert repr(drv + None) == '(MyCoin() + None)'

    # Test the same thing without the expression tree, for comparison
    drv = DRV({Addable(1): Fraction(1, 2), Addable(2): Fraction(1, 2)})
    assert repr(drv + None) == 'DRV({1: Fraction(1, 2), 2: Fraction(1, 2)})'
Esempio n. 5
0
def test_pool_examples():
    """
    Check the examples of using pool() do vaguely work.
    """

    # First example of penalty die
    def penalty(result):
        return PlainResult(*(tuple(result)[1:]))

    penalised = ore.pool(1).apply(penalty).apply(ore.Match.get_matches)
    assert penalised.is_same(DRV({(): 1}))
    penalised = ore.pool(2).apply(penalty).apply(
        ore.Match.get_matches_or_highest)
    assert penalised.apply(len).is_same(DRV({1: 1}))
    assert penalised.apply(lambda x: x[0]).is_same(
        keep_lowest(1, d10,
                    count=2).apply(lambda x: ore.Match(1, next(iter(x)))))

    # Second example of penalty die
    def penalty(result):
        matches = sorted(
            ore.Match.get_all_sets(result),
            key=lambda x: (x.width, x.height),
        )
        matches[0] = ore.Match(matches[0].width - 1, matches[0].height)
        return PlainResult(*(die for match in matches
                             for die in [match.height] * match.width))

    penalised = ore.pool(1).apply(penalty).apply(ore.Match.get_matches)
    assert penalised.is_same(DRV({(): 1}))
    penalised = ore.pool(3).apply(penalty).apply(ore.Match.get_matches)
    # Discarding from the narrowest match of 3 dice doesn't affect your chance
    # of success! Width 3 becomes width 2, and width 2 means there's an
    # unmatched third die to discard.
    assert p(penalised.apply(len) > 0) == p(ore.matches(3).apply(len) > 0)
Esempio n. 6
0
def test_apply():
    """
    For calculations not supported by operator overloading, you can use the
    apply() function to re-map the generated values. It can be a many-to-one
    mapping, and can return a DRV.
    """
    d6 = DRV({x: 1 / 6 for x in range(1, 7)})
    assert d6.apply(lambda x: x @ d6, allow_drv=True).is_close(d6 @ d6)
Esempio n. 7
0
def test_weighted():
    """
    You can compute a DRV from disjoint cases.
    """
    var = DRV({1: 0.5, 2: 0.5})
    var2 = DRV.weighted_average((
        (var, 0.5),
        (var + 2, 0.5),
    ))
    # So, var2 should be uniformly distributed
    assert var2.is_same(DRV({x: 0.25 for x in range(1, 5)}))
Esempio n. 8
0
def test_allow_highest():
    """
    Check that allow_highest has the correct effect.
    """
    assert ore.matches(1).is_same(DRV({(): 1}))
    assert ore.matches(1, allow_highest=False).is_same(DRV({(): 1}))
    allowed = ore.matches(1, allow_highest=True)
    assert allowed.apply(lambda x: x[0].width).is_same(DRV({1: 1}))
    assert allowed.apply(lambda x: x[0].height).is_same(d10)
    # This isn't documented, but the repr() format for Matches is
    # "widthxheight", as used in the rules.
    assert allowed.apply(repr).is_same(d10.apply(lambda x: f'(1x{x},)'))
Esempio n. 9
0
def test_p():
    """
    The p function returns the probability that a boolean DRV is True.
    """
    coins = (10 @ DRV({0: 0.5, 1: 0.5}))
    assert drv.p(coins <= 0) == 0.5**10
    assert drv.p(coins >= 10) == 0.5**10
    assert drv.p(coins >= 5) > 0.5
    assert drv.p(coins >= 5) + drv.p(coins < 5) == 1
    # Non-boolean input is rejected, even though 0 == False and 1 == True
    with pytest.raises(TypeError):
        drv.p(coins)
    # It still works when True (or False) is missing.
    assert drv.p(DRV({False: 1})) == 0
    assert drv.p(DRV({True: 1})) == 1
Esempio n. 10
0
def test_sample():
    """
    DRV with float probabilities uses random(). With Fraction uses randrange().
    """
    drv = DRV({True: 0.5, False: 0.5})
    mock = Mock()
    mock.random.return_value = 0.3
    mock.randrange.side_effect = TypeError()
    assert drv.sample(random=mock) is True

    drv = DRV({True: Fraction(1, 2), False: Fraction(1, 2)})
    mock = Mock()
    mock.randrange.return_value = 0
    mock.random.side_effect = TypeError()
    assert drv.sample(random=mock) is True
Esempio n. 11
0
def test_matmul_non_numeric():
    """
    The @ operator still works if the possible values aren't numbers, provided
    they can be added together using the + operator.
    """
    coin = DRV({'H': 0.5, 'T': 0.5})
    assert (2 @ coin).to_dict() == {x: 0.25 for x in ('HH', 'TT', 'HT', 'TH')}
Esempio n. 12
0
def test_equality():
    """
    Equality operators are already tested by dice_tests.py, but here we check
    some corner cases.
    """
    # Impossible values are excluded.
    var = DRV({'H': 0.5, 'T': 0.5})
    assert (var == 'H').to_dict() == {True: 0.5, False: 0.5}
    assert (var == 'X').to_dict() == {False: 1}
    cheat = DRV({'H': 1})
    assert (cheat == 'H').to_dict() == {True: 1}
    assert (cheat == 'X').to_dict() == {False: 1}
    # No boolean conversion
    with pytest.raises(ValueError):
        var in [cheat, var]
    with pytest.raises(ValueError):
        1 in [cheat, var]
Esempio n. 13
0
def test_convolve_switch():
    """
    There's a switch to enable/disable the numpy.convolve optimisation.

    This feature is used by scripts/convolve_performance.py, which isn't run
    as part of the tests, so we should at least test that it's available,
    enabled by default, and the code runs either with or without it.
    """
    assert drv.CONVOLVE_OPTIMISATION
    # This test doesn't even ensure that the optimisation is used, just that
    # flipping the switch doesn't immediately fail.
    with patch('omnidice.drv.CONVOLVE_OPTIMISATION', True):
        result1 = (10 @ DRV({1: 0.5, 2: 0.5})).to_dict()
    with patch('omnidice.drv.CONVOLVE_OPTIMISATION', False):
        result2 = (10 @ DRV({1: 0.5, 2: 0.5})).to_dict()
    assert result1.keys() == result2.keys()
    assert list(result1.values()) == list(map(pytest.approx, result2.values()))
Esempio n. 14
0
def test_dice():
    """The dice() function returns the summarised data from the rolls."""
    with pytest.raises(ValueError):
        opend6.dice(0)
    # One die is a wild die.
    assert opend6.dice(1).is_same(opend6.wild_die)
    # Two dice is wild + regular, and we need to test that addition is correct.
    expected = DRV.weighted_average((
        (DRV({0: 1}), Fraction(1, 6)),
        (d6 + d6.explode().given(lambda x: x != 1), Fraction(5, 6)),
    ))
    _check(
        opend6.dice(2),
        expected,
        single=False,
        totals=d6 + d6.explode(),
        highest=pool(d6, d6).apply(max),
    )
Esempio n. 15
0
def test_keep_lowest():
    """
    Roll N, keep the worst K of some DRV.
    """
    pool = pools.keep_lowest(2, d6, count=3)
    assert p(pool == pools.PlainResult(6, 6)) == Fraction(1, 216)
    # There are three ways each to get 1, 1, x for x = 2..6, plus 1, 1, 1.
    assert p(pool == pools.PlainResult(1, 1)) == Fraction(16, 216)
    pool0 = pools.keep_lowest(0, d6, count=10)
    assert pool0.is_same(DRV({pools.PlainResult(): 1}))
Esempio n. 16
0
def _check(
        result,
        values,
        botch=Fraction(1, 6),
        single=True,
        totals=None,
        highest=None,
):
    if totals is None:
        totals = values
    if highest is None:
        highest = totals
    assert result.apply(type).is_same(DRV({opend6.Result: 1}))
    assert result.apply(int).is_same(values)
    # Test the fields of Result
    assert result.apply(lambda x: x.total).is_same(totals)
    assert result.apply(lambda x: x.highest).is_same(highest)
    assert p(result.apply(lambda x: x.botch)) == botch
    assert result.apply(lambda x: x.singleton).is_same(DRV({single: 1}))
Esempio n. 17
0
def test_convolve():
    """
    There is an optimisation which uses numpy.convolve for large additions.
    Run some bigger jobs, to make sure it all works correctly.
    """
    def check(result):
        result = result.to_dict()
        assert set(result) == set(range(2, 2001))
        for idx in range(2, 1002):
            assert result[idx] == pytest.approx((idx - 1) / 1E6)
        for idx in range(1002, 2001):
            assert result[idx] == pytest.approx((2001 - idx) / 1E6)

    d1000 = DRV({idx: 0.001 for idx in range(1, 1001)})
    check(d1000 + d1000)
    floaty = d1000.apply(float)
    check(floaty + floaty)
    sparse = d1000.apply(lambda x: x * 1000)
    check((sparse + sparse).apply(lambda x: x // 1000))
Esempio n. 18
0
def test_standard(target):
    """Difficulty 2 - 10"""
    expected = DRV({
        -1: 0.1,
        0: (target - 2) / 10,
        1: (11 - target) / 10,
    })
    die = storyteller.standard(target)
    assert die.is_close(expected)
    assert die.is_same(die.apply(storyteller.total))
    revised_die = storyteller.revised_standard(target)
    assert revised_die.apply(storyteller.total).is_close(expected)
Esempio n. 19
0
def test_hard():
    """
    Hard dice can be added to the pool.
    """
    # With d+hd, there's only one possible way to get a match
    assert ore.matches(d=1, hd=1).is_close(
        DRV({
            (ore.Match(2, 10), ): 1 / 10,
            (): 9 / 10,
        }))
    # 2d + hd is still small enough to figure out easily...
    assert ore.matches(d=2, hd=1).is_close(
        DRV({
            (ore.Match(3, 10), ): 1 / 100,
            (ore.Match(2, 10), ): 18 / 100,
            **{(ore.Match(2, n), ): 1 / 100
               for n in range(1, 10)},
            (): 72 / 100,
        }))
    # 3d + hd: now we can get two pairs of 10 and something else.
    result = p(ore.matches(d=3, hd=1).apply(len) == 2)
    assert result == pytest.approx(9 * 3 / 1000)
Esempio n. 20
0
def test_given():
    """
    Conditional probability distribution, given that some predicate is true.
    """
    var = DRV({x: 0.125 for x in range(8)})
    var_odd = var.given(lambda x: x % 2 != 0)
    var_even = var.given(lambda x: x % 2 == 0)
    assert p(var_odd == 2) == 0
    assert p(var_odd == 1) == 0.25
    assert p(var_even == 2) == 0.25
    assert p(var_even == 1) == 0
    var_square = var.given(lambda x: int(sqrt(x))**2 == x)
    assert p(var_square == 0) == pytest.approx(1 / 3)
    with pytest.raises(ZeroDivisionError):
        var.given(lambda x: x == 8)
Esempio n. 21
0
def test_more_dice(reg):
    """
    Test with higher numbers of dice. Three dice is wild + 2 @ regular, etc.
    """
    def remove_highest(values):
        return sum(values) - max(values)

    expected = DRV.weighted_average((
        (pool(d6, count=reg).apply(remove_highest), Fraction(1, 6)),
        (reg @ d6 + d6.explode().given(lambda x: x != 1), Fraction(5, 6)),
    ))
    _check(
        opend6.dice(reg + 1),
        expected,
        single=False,
        totals=reg @ d6 + d6.explode(),
        highest=pool(d6, count=reg + 1).apply(max),
    )
Esempio n. 22
0
def test_bad_probabilities():
    """
    Probabilities passed to the constructor must be between 0 and 1.
    """
    DRV({1: 1.0})
    DRV({1: 1})
    with pytest.raises(ValueError):
        DRV({1: -0.5, 2: 0.5, 3: 0.5, 4: 0.5})
    with pytest.raises(ValueError):
        DRV({1: -0.00000000001, 2: 0.5, 3: 0.5})
    with pytest.raises(ValueError):
        DRV({1: 1.00000000001})
    # They don't have to add up to exactly 1, though
    DRV({1: 0.333, 2: 0.333, 3: 0.333})
Esempio n. 23
0
def test_dice_char():
    """The char parameter adds dice bought with Character Points"""

    # I don't think the rules explicity say what happens on a botch, if an
    # exploded character die is the highest score rolled. I'm going to assume
    # that the botch cancels the 6.
    def cancel(value):
        return max(value - 6, 0)

    expected = DRV.weighted_average((
        (d6.explode().apply(cancel), Fraction(1, 6)),
        (d6.explode() + d6.explode().given(lambda x: x != 1), Fraction(5, 6)),
    ))
    _check(
        opend6.dice(1, char=1),
        expected,
        single=False,
        totals=2 @ d6.explode(),
        highest=pool(d6, d6).apply(max),
    )
Esempio n. 24
0
def test_keep_highest():
    """
    Roll N, keep the best K of some DRV.
    """
    pool = pools.keep_highest(2, d6, count=3)
    # There are three ways each to get 6, 6, x for x = 1..5, plus 6, 6, 6.
    assert p(pool == pools.PlainResult(6, 6)) == Fraction(16, 216)
    assert p(pool == pools.PlainResult(1, 1)) == Fraction(1, 216)
    # count=1000 acts as a performance test: if the implementation tries to
    # compute all possibilities and then restrict to 0 dice, it will fail.
    pool0 = pools.keep_highest(0, d6, count=1000)
    assert pool0.is_same(DRV({pools.PlainResult(): 1}))
    # Examples from docs
    poolA = pools.keep_highest(2, d6) + d6 + d6
    poolB = pools.pool(d6, count=3)
    poolC = pools.keep_highest(2, d6, count=3)
    poolD = pools.pool(d6, result_type=pools.KeepHighest(2)) + d6 + d6
    assert poolA.is_same(poolB)
    assert not poolA.is_same(poolC)
    assert poolD.is_same(poolC)
Esempio n. 25
0
def test_revised_botch():
    """
    The botch rule is different in Revised. Now if you roll at least one
    success, you cannot botch, even if it's cancelled out by 1s.
    """
    # With one or two dice, Revised makes no difference from 1st/2nd.
    die = storyteller.revised_standard(6).apply(storyteller.total)
    probs = {
        -1: 0.1,
        0: 0.4,
        1: 0.5,
    }
    assert die.is_close(DRV(probs))
    roll2 = (2 @ storyteller.revised_standard(6)).apply(storyteller.total)
    # With 2 dice, the only rolls that botch are 1+1 .. 1+5 and 2+1 .. 5+1
    assert p(roll2 < 0) == pytest.approx(0.09)
    # It's three dice where the difference kicks in: the case of 2 botches and
    # one success has probability 0.1 * 0.1 * 0.5 * 3 = 0.015. In Revised
    # that's not a botch.
    old_roll = 3 @ storyteller.standard(6)
    new_roll = (3 @ storyteller.revised_standard(6)).apply(storyteller.total)
    assert p(old_roll == -1) == pytest.approx(p(new_roll == -1) + 0.015)
Esempio n. 26
0
def _cached_pool_hd(d: int, hd: int) -> DRV:
    pl = _cached_pool(d)
    if hd > 0:
        pl += Pool(DRV({10: 1}), count=hd)
    return pl
Esempio n. 27
0
def test_is_same():
    """
    The is_same() method tells you whether two objects represent the same
    distribution.
    """
    small = DRV({0: 0.75, 1: 0.25})
    big = DRV({1: 0.75, 2: 0.25})
    booley = DRV({False: 0.75, True: 0.25})
    fraction = DRV({0: Fraction(3, 4), 1: Fraction(1, 4)})
    extra = DRV({0: 0.75, 2: 0, 1: 0.25})
    unordered = DRV({1: 0.25, 0: 0.75})
    approx = DRV({0: 0.75 + 1e-10, 1: 0.25 - 1e-10})
    assert small.is_same(small)
    assert (small + 1).is_same(big)
    assert not small.is_same(big)
    assert small.is_same(booley)
    assert small.is_same(fraction)
    assert small.is_same(extra)
    assert small.is_same(unordered)
    assert not small.is_same(approx)
Esempio n. 28
0
def plain(pl: DRV) -> DRV:
    """
    Convert all possible values to :obj:`PlainResult`, thus removing any
    special behaviour of the result_type.
    """
    return pl.apply(PlainResult._from)
Esempio n. 29
0
def test_is_close():
    """
    The is_close() method tells you whether two objects represent approximately
    the same distribution.
    """
    small = DRV({0: 0.75, 1: 0.25})
    big = DRV({1: 0.75, 2: 0.25})
    booley = DRV({False: 0.75, True: 0.25})
    fraction = DRV({0: Fraction(3, 4), 1: Fraction(1, 4)})
    extra = DRV({0: 0.75, 2: 0, 1: 0.25})
    unordered = DRV({1: 0.25, 0: 0.75})
    approx = DRV({0: 0.75 + 1e-10, 1: 0.25 - 1e-10})
    assert not small.is_close(big)
    assert small.is_close(approx)
    assert not small.is_close(approx, rel_tol=1e-12)
    # It's down to rounding errors whether or not they're close with absolute
    # tolerance 1e-10. In fact not, but just test either side of it.
    assert not small.is_close(approx, abs_tol=5e-11, rel_tol=0)
    assert small.is_close(approx, abs_tol=2e-10, rel_tol=0)
    everything = [small, big, booley, fraction, extra, unordered, approx]
    for a, b in itertools.product(everything, repeat=2):
        if a.is_same(b):
            assert a.is_close(b), (a, b)
Esempio n. 30
0
def _add_extra(explode: Union[bool, int], result: DRV, blown: Any) -> DRV:
    extra = _get_extra(explode)
    if type(blown) != int:
        extra = extra.apply(type(blown))
    extra += blown
    return result.apply(lambda x: extra if x == blown else x, allow_drv=True)