def test_cannot_define_a_setting_with_default_not_valid():
    with pytest.raises(InvalidArgument):
        Settings.define_setting(
            u'kittens',
            default=8, description=u'Kittens are pretty great',
            options=(1, 2, 3, 4),
        )
def test_define_setting_then_loading_profile():
    x = Settings()
    Settings.define_setting(
        u'fun_times',
        default=3, description=u'Something something spoon',
        options=(1, 2, 3, 4),
    )
    Settings.register_profile('hi', Settings(fun_times=2))
    assert x.fun_times == 3
    assert Settings.get_profile('hi').fun_times == 2
Esempio n. 3
0
def test_provided_kwargs_are_defaults():
    @given(hello=booleans(), world=booleans(), settings=Settings(strict=False))
    def greet(hello, **kwargs):
        assert hello == u'salve'
        assert kwargs == {u'world': u'mundi'}

    greet(u'salve', world=u'mundi')
Esempio n. 4
0
def test_seeds_off_random():
    s = Settings(max_shrinks=0, database=None)
    r = random.getstate()
    x = find(st.integers(), lambda x: True, settings=s)
    random.setstate(r)
    y = find(st.integers(), lambda x: True, settings=s)
    assert x == y
def test_can_run_without_database():
    @given(integers(), settings=Settings(database=None))
    def test_blah(x):
        assert False

    with raises(AssertionError):
        test_blah()
def test_contains_the_test_function_name_in_the_exception_string():

    calls = [0]

    @given(integers(), settings=Settings(max_iterations=10, max_examples=10))
    def this_has_a_totally_unique_name(x):
        calls[0] += 1
        assume(False)

    with raises(Unsatisfiable) as e:
        this_has_a_totally_unique_name()
        print(u'Called %d times' % tuple(calls))

    assert this_has_a_totally_unique_name.__name__ in e.value.args[0]

    calls2 = [0]

    class Foo(object):
        @given(integers(),
               settings=Settings(max_iterations=10, max_examples=10))
        def this_has_a_unique_name_and_lives_on_a_class(self, x):
            calls2[0] += 1
            assume(False)

    with raises(Unsatisfiable) as e:
        Foo().this_has_a_unique_name_and_lives_on_a_class()
        print(u'Called %d times' % tuple(calls2))

    assert (Foo.this_has_a_unique_name_and_lives_on_a_class.__name__
            ) in e.value.args[0]
Esempio n. 7
0
    def __init__(self, test_function, settings=None, random=None, database_key=None):
        self._test_function = test_function
        self.settings = settings or Settings()
        self.shrinks = 0
        self.call_count = 0
        self.event_call_counts = Counter()
        self.valid_examples = 0
        self.random = random or Random(getrandbits(128))
        self.database_key = database_key
        self.status_runtimes = {}

        self.all_drawtimes = []
        self.all_runtimes = []

        self.events_to_strings = WeakKeyDictionary()

        self.target_selector = TargetSelector(self.random)

        self.interesting_examples = {}
        # We use call_count because there may be few possible valid_examples.
        self.first_bug_found_at = None
        self.last_bug_found_at = None

        self.shrunk_examples = set()

        self.health_check_state = None

        self.used_examples_from_database = False
        self.tree = DataTree()

        # We want to be able to get the ConjectureData object that results
        # from running a buffer without recalculating, especially during
        # shrinking where we need to know about the structure of the
        # executed test case.
        self.__data_cache = LRUReusedCache(CACHE_SIZE)
Esempio n. 8
0
def test_can_simplify_text_through_a_morpher(rnd):
    m = find(morphers,
             lambda x: bool(x.become(s.text())),
             random=rnd,
             settings=Settings(database=None))
    with BuildContext():
        assert m.become(s.text()) == u'0'
Esempio n. 9
0
def test_flatmap_retrieve_from_db():
    constant_float_lists = floats(0, 1).flatmap(lambda x: lists(just(x)))

    track = []

    db = ExampleDatabase()

    @given(constant_float_lists, settings=Settings(database=db))
    def record_and_test_size(xs):
        track.append(xs)
        assert sum(xs) < 1

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track
    example = track[-1]

    while track:
        track.pop()

    with pytest.raises(AssertionError):
        record_and_test_size()

    assert track[0] == example
Esempio n. 10
0
def minimal(definition,
            condition=None,
            settings=None,
            timeout_after=10,
            random=None):
    settings = Settings(settings, max_examples=50000, database=None)

    runtime = []

    def wrapped_condition(x):
        if timeout_after is not None:
            if runtime:
                runtime[0] += TIME_INCREMENT
                if runtime[0] >= timeout_after:
                    raise Timeout()
        if condition is None:
            result = True
        else:
            result = condition(x)
        if result and not runtime:
            runtime.append(0.0)
        return result

    return find(definition,
                wrapped_condition,
                settings=settings,
                random=random)
Esempio n. 11
0
    def __init__(
        self, test_function, settings=None, random=None,
        database_key=None,
    ):
        self._test_function = test_function
        self.settings = settings or Settings()
        self.last_data = None
        self.changed = 0
        self.shrinks = 0
        self.call_count = 0
        self.event_call_counts = Counter()
        self.valid_examples = 0
        self.start_time = time.time()
        self.random = random or Random(getrandbits(128))
        self.database_key = database_key
        self.status_runtimes = {}
        self.events_to_strings = WeakKeyDictionary()

        # Tree nodes are stored in an array to prevent heavy nesting of data
        # structures. Branches are dicts mapping bytes to child nodes (which
        # will in general only be partially populated). Leaves are
        # ConjectureData objects that have been previously seen as the result
        # of following that path.
        self.tree = [{}]

        # A node is dead if there is nothing left to explore past that point.
        # Recursively, a node is dead if either it is a leaf or every byte
        # leads to a dead node when starting from here.
        self.dead = set()
        self.forced = {}
Esempio n. 12
0
def minimal(definition,
            condition=None,
            settings=None,
            timeout_after=10,
            random=None):
    settings = Settings(
        settings,
        max_examples=50000,
        max_iterations=100000,
        max_shrinks=5000,
        database=None,
    )

    runtime = [0.0]

    def wrapped_condition(x):
        runtime[0] += TIME_INCREMENT
        if runtime[0] >= timeout_after:
            raise Timeout()

        if condition is None:
            return True
        return condition(x)

    return find(
        definition,
        wrapped_condition,
        settings=settings,
        random=random,
    )
class CoinPayoutHelperTestcase(unittest.TestCase):
    """ Tests for :mod:`coin_payout_helper`"""
    @given(st_coins(), st.floats(0, 1), st.floats(0, 1), settings=Settings(max_examples=1000))
    # the following two fixed testcases are already enough for full code coverage
    @example([], 1, .2)
    @example([(100, 1), (50, 2), (20, 3), (10, 1), (5, 0), (2, 0), (1, 0)], 1, 0.01)
    # another side-case: all zeroes
    @example([(100, 0), (50, 0), (20, 0), (10, 0), (5, 0), (2, 0), (1, 0)], 1, .2)
    def test_get_possible_payout(self, coins, requested_fraction, coin_limit_fraction):
        """ test :meth:`coin_payout_helper.get_possible_payout()` for a given state of available coins"""
        total = sum([value * count for (value, count) in coins])
        num_coins = sum([count for (value, count) in coins])
        (payout_infinite_coin_number, allowed_remaining) = coin_payout_helper.get_possible_payout(coins, max_number_of_coins=int(1e9))
        # print coins, total, (payout_infinite_coin_number, allowed_remaining)
        # the limit on the number of coins is not strictly guaranteed, so it isn't tested here.
        (payout_limited_coin_number, _) = coin_payout_helper.get_possible_payout(coins, max_number_of_coins=round(coin_limit_fraction * (num_coins + 20)))

        self.assertGreaterEqual(payout_limited_coin_number, 0)
        self.assertGreaterEqual(payout_infinite_coin_number, 0)
        self.assertLessEqual(payout_limited_coin_number, payout_infinite_coin_number)
        self.assertLessEqual(payout_infinite_coin_number, total)

        # pick request, simulate payout
        requested = round(requested_fraction * total)
        paid_out = simulate_payout(coins, requested)
        self.assertLessEqual(paid_out, requested)
        if requested > payout_infinite_coin_number:
            # requested too much, only the maximum is guaranteed
            self.assertGreaterEqual(paid_out, payout_infinite_coin_number - allowed_remaining)
        else:
            self.assertGreaterEqual(paid_out, requested - allowed_remaining)
Esempio n. 14
0
def test_still_tears_down_on_failed_reify():
    x = HasSetupAndTeardown()
    with pytest.raises(AttributeError):
        with Settings(perform_health_check=False):
            x.fail_in_reify()
    assert x.setups > 0
    assert x.teardowns == x.setups
Esempio n. 15
0
def test_raises_unsatisfiable_if_all_false():
    @given(integers(), settings=Settings(max_examples=50))
    def test_assume_false(x):
        assume(False)

    with pytest.raises(Unsatisfiable):
        test_assume_false()
Esempio n. 16
0
def test_can_explicitly_pass_settings():
    try:
        FailsEventually.TestCase.settings.stateful_step_count = 15
        run_state_machine_as_test(FailsEventually,
                                  settings=Settings(stateful_step_count=2, ))
    finally:
        FailsEventually.TestCase.settings.stateful_step_count = 5
Esempio n. 17
0
def test_saves_failing_example_in_database():
    db = ExampleDatabase(":memory:")
    with raises(AssertionError):
        run_state_machine_as_test(
            DepthMachine, settings=Settings(database=db, max_examples=100)
        )
    assert any(list(db.data.values()))
Esempio n. 18
0
def minimal(definition,
            condition=None,
            settings=None,
            timeout_after=10,
            random=None):
    settings = Settings(
        settings,
        max_examples=50000,
        max_iterations=100000,
        max_shrinks=5000,
        database=None,
        timeout=timeout_after,
    )

    condition = condition or (lambda x: True)

    @timeout(timeout_after * 1.20)
    def run():
        return find(
            definition,
            condition,
            settings=settings,
            random=random,
        )

    return run()
Esempio n. 19
0
def test_times_out():
    with pytest.raises(Timeout) as e:
        find(integers(),
             lambda x: time.sleep(0.05) or False,
             settings=Settings(timeout=0.01))

    e.value.args[0]
Esempio n. 20
0
def test_given_warns_on_use_of_non_strategies(recwarn):
    @given(bool, settings=Settings(strict=False))
    def test(x):
        pass

    test()
    assert recwarn.pop(DeprecationWarning) is not None
def test_can_derandomize():
    @fails
    @given(integers(), settings=Settings(derandomize=True))
    def test_blah(x):
        assert x > 0

    test_blah()
Esempio n. 22
0
def test_raises_timeout_on_slow_test():
    @given(integers(), settings=Settings(timeout=0.01))
    def test_is_slow(x):
        time.sleep(0.02)

    with pytest.raises(Timeout):
        test_is_slow()
Esempio n. 23
0
def test_raises_when_no_example():
    settings = Settings(
        max_examples=20,
        min_satisfying_examples=0,
    )
    with pytest.raises(NoSuchExample):
        find(integers(), lambda x: False, settings=settings)
def test_can_collectively_minimize(spec):
    """This should generally exercise strategies' strictly_simpler heuristic by
    putting us in a state where example cloning is required to get to the
    answer fast enough."""

    if spec.template_upper_bound < 2:
        return
    n = 10

    def distinct_reprs(x):
        result = set()
        for t in x:
            result.add(repr(t))
            if len(result) >= 2:
                return True
        return False

    try:
        xs = find(
            lists(spec, min_size=n, max_size=n),
            distinct_reprs,
            settings=Settings(
                timeout=3.0, average_list_length=3, max_examples=2000))
        assert len(xs) == n
        assert len(set((map(repr, xs)))) == 2
    except NoSuchExample:
        pass
Esempio n. 25
0
    def run_test():
        if condition is None:

            def _condition(x):
                return True

            condition_string = ""
        else:
            _condition = condition
            condition_string = strip_lambda(
                reflection.get_pretty_function_description(condition)
            )

        def test_function(data):
            try:
                value = data.draw(specifier)
            except UnsatisfiedAssumption:
                data.mark_invalid()
            if not _condition(value):
                data.mark_invalid()
            if predicate(value):
                data.mark_interesting()

        successes = 0
        actual_runs = 0
        for actual_runs in range(1, RUNS + 1):
            # We choose the max_examples a bit larger than default so that we
            # run at least 100 examples outside of the small example generation
            # part of the generation phase.
            runner = ConjectureRunner(
                test_function,
                settings=Settings(
                    max_examples=150,
                    phases=no_shrink,
                    suppress_health_check=suppress_health_check,
                ),
            )
            runner.run()
            if runner.interesting_examples:
                successes += 1
                if successes >= required_runs:
                    return

            # If we reach a point where it's impossible to hit our target even
            # if every remaining attempt were to succeed, give up early and
            # report failure.
            if (required_runs - successes) > (RUNS - actual_runs):
                break

        event = reflection.get_pretty_function_description(predicate)
        if condition is not None:
            event += "|"
            event += condition_string

        raise HypothesisFalsified(
            f"P({event}) ~ {successes} / {actual_runs} = "
            f"{successes / actual_runs:.2f} < {required_runs / RUNS:.2f}; "
            "rejected"
        )
Esempio n. 26
0
    def __init__(
        self,
        test_function,
        settings=None,
        random=None,
        database_key=None,
        ignore_limits=False,
    ):
        self._test_function = test_function
        self.settings = settings or Settings()
        self.shrinks = 0
        self.finish_shrinking_deadline = None
        self.call_count = 0
        self.valid_examples = 0
        self.random = random or Random(getrandbits(128))
        self.database_key = database_key
        self.ignore_limits = ignore_limits

        # Global dict of per-phase statistics, and a list of per-call stats
        # which transfer to the global dict at the end of each phase.
        self.statistics = {}
        self.stats_per_test_case = []

        self.events_to_strings = WeakKeyDictionary()

        self.interesting_examples = {}
        # We use call_count because there may be few possible valid_examples.
        self.first_bug_found_at = None
        self.last_bug_found_at = None

        self.shrunk_examples = set()

        self.health_check_state = None

        self.tree = DataTree()

        self.best_observed_targets = defaultdict(lambda: NO_SCORE)
        self.best_examples_of_observed_targets = {}

        # We keep the pareto front in the example database if we have one. This
        # is only marginally useful at present, but speeds up local development
        # because it means that large targets will be quickly surfaced in your
        # testing.
        if self.database_key is not None and self.settings.database is not None:
            self.pareto_front = ParetoFront(self.random)
            self.pareto_front.on_evict(self.on_pareto_evict)
        else:
            self.pareto_front = None

        # We want to be able to get the ConjectureData object that results
        # from running a buffer without recalculating, especially during
        # shrinking where we need to know about the structure of the
        # executed test case.
        self.__data_cache = LRUReusedCache(CACHE_SIZE)

        # We ensure that the test has this much stack space remaining, no matter
        # the size of the stack when called, to de-flake RecursionErrors (#2494).
        self.__recursion_limit = sys.getrecursionlimit()
        self.__pending_call_explanation = None
Esempio n. 27
0
def test_small_sum_lists():
    xs = minimal(
        lists(floats()),
        lambda x: len(x) >= 100 and sum(t for t in x
                                        if float(u'inf') > t >= 0) >= 1,
        settings=Settings(average_list_length=200),
    )
    assert 1.0 <= sum(t for t in xs if t >= 0) <= 1.5
Esempio n. 28
0
def test_all_minimal_elements_reify(spec):
    random = Random(
        hashlib.md5((show(spec) +
                     ':test_all_minimal_elements_round_trip_via_the_database'
                     ).encode('utf-8')).digest())
    strat = strategy(spec, Settings(average_list_length=2))
    for elt in minimal_elements(strat, random):
        strat.reify(elt)
Esempio n. 29
0
def test_finds_non_reversible_floats():
    t = minimal(
        lists(floats()),
        lambda xs: not math.isnan(sum(xs)) and sum(xs) != sum(reversed(xs)),
        timeout_after=40,
        settings=Settings(database=None))
    assert len(repr(t)) <= 200
    print(t)
Esempio n. 30
0
def test_can_simplify_lists_of_morphers_of_single_type():
    ms = find(s.lists(morphers),
              lambda x: sum(t.become(s.integers()) for t in x) >= 100,
              settings=Settings(database=None))

    with BuildContext():
        ls = [t.become(s.integers()) for t in ms]
    assert sum(ls) == 100
Esempio n. 31
0
def test_can_define_settings():
    test_description = u'This is a setting just for these tests'
    assert not Settings.default.strict

    x = Settings()
    assert not x.strict

    Settings.define_setting(
        u'a_setting_just_for_these_tests',
        default=3,
        description=test_description,
    )

    assert test_description in Settings.a_setting_just_for_these_tests.__doc__

    assert x.a_setting_just_for_these_tests == 3
    assert Settings().a_setting_just_for_these_tests == 3
Esempio n. 32
0
def test_when_set_to_no_simplifies_runs_failing_example_twice():
    failing = [0]

    @given(integers(), settings=Settings(max_shrinks=0, max_examples=200))
    def foo(x):
        if x > 11:
            note('Lo')
            failing[0] += 1
            assert False

    with Settings(verbosity=Verbosity.normal):
        with raises(AssertionError):
            with capture_out() as out:
                foo()
    assert failing == [2]
    assert u'Falsifying example' in out.getvalue()
    assert u'Lo' in out.getvalue()
Esempio n. 33
0
def test_can_define_settings():
    test_description = u'This is a setting just for these tests'
    assert not Settings.default.strict

    x = Settings()
    assert not x.strict

    Settings.define_setting(
        u'a_setting_just_for_these_tests',
        default=3,
        description=test_description,
    )

    assert test_description in Settings.a_setting_just_for_these_tests.__doc__

    assert x.a_setting_just_for_these_tests == 3
    assert Settings().a_setting_just_for_these_tests == 3
Esempio n. 34
0
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.

# END HEADER

from __future__ import division, print_function, absolute_import, \
    unicode_literals

import pytest
from hypothesis import Settings

TEST_DESCRIPTION = 'This is a setting just for these tests'

Settings.define_setting(
    'a_setting_just_for_these_tests',
    default=3,
    description=TEST_DESCRIPTION,
)


def test_has_docstrings():
    assert TEST_DESCRIPTION in Settings.a_setting_just_for_these_tests.__doc__


def setup_function(fn):
    try:
        delattr(Settings.default, 'a_setting_just_for_these_tests')
    except AttributeError:
        pass

Esempio n. 35
0
def teardown_function(fn):
    Settings.load_profile('default')
Esempio n. 36
0
def setup_function(fn):
    Settings.load_profile('nonstrict')
Esempio n. 37
0
def test_picks_up_changes_to_defaults_when_switching_profiles():
    original_default = Settings.default.max_examples
    Settings.load_profile('nonstrict')
    Settings.register_profile('test_settings', Settings())
    Settings.load_profile('test_settings')

    Settings.register_profile('other_test_settings', Settings())
    Settings.default.max_examples = 18
    assert Settings.default.max_examples == 18
    Settings.load_profile('other_test_settings')
    assert Settings.default.max_examples == original_default
    Settings.load_profile('test_settings')
    assert Settings.default.max_examples == 18
Esempio n. 38
0
import pytest

import hypothesis.specifiers as s
import hypothesis.strategies as st
from hypothesis import find, Settings, strategy
from hypothesis.errors import InvalidArgument
from tests.common.basic import Bitfields
from hypothesis.internal.compat import text_type, binary_type, \
    integer_types
from hypothesis.searchstrategy.narytree import Leaf, Branch, NAryTree

original_profile = Settings.default

Settings.register_profile(
    'nonstrict', Settings(strict=False)
)


def setup_function(fn):
    Settings.load_profile('nonstrict')


def teardown_function(fn):
    Settings.load_profile('default')


@pytest.mark.parametrize(u'typ', [
    complex, float, bool, Random, type(None), text_type, binary_type,
    Decimal, Fraction,
])
Esempio n. 39
0
from __future__ import division, print_function, absolute_import

import gc
import os
import warnings
from tempfile import mkdtemp

import pytest

from hypothesis import Settings
from hypothesis.settings import set_hypothesis_home_dir

warnings.filterwarnings(u'error', category=UnicodeWarning)

set_hypothesis_home_dir(mkdtemp())

Settings.default.timeout = -1
Settings.default.strict = True

Settings.register_profile(
    'speedy', Settings(
        timeout=1, max_examples=5,
    ))
Settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default'))


@pytest.fixture(scope=u'function', autouse=True)
def some_fixture():
    gc.collect()
Esempio n. 40
0
def setup_function(fn):
    Settings.load_profile('nonstrict')
    warnings.simplefilter('always', HypothesisDeprecationWarning)
Esempio n. 41
0
def teardown_function(fn):
    Settings.load_profile('default')
    warnings.simplefilter('once', HypothesisDeprecationWarning)
Esempio n. 42
0
def test_cannot_set_non_settings():
    s = Settings()
    with pytest.raises(AttributeError):
        s.databas_file = 'some_file'
Esempio n. 43
0
import os

from hypothesis import Settings, Verbosity


Settings.register_profile("dev", Settings(max_examples=10))
Settings.register_profile(
    "debug",
    Settings(max_examples=10, verbosity=Verbosity.verbose),
)
Settings.load_profile(os.getenv(u'HYPOTHESIS_PROFILE', 'dev'))
Esempio n. 44
0
class IgnoreImplicitWait:
    def __init__(self, driver, default_wait):
        self._driver = driver
        self._default_wait = default_wait

        self._driver.implicitly_wait(0)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._driver.implicitly_wait(self._default_wait)


Settings.register_profile('selenium', Settings(max_examples=1, timeout=0))
Settings.load_profile('selenium')


class SeleniumTestCase(StaticLiveServerTestCase):
    selenium_implicit_wait = 30
    _driver = None

    def __init__(self, *args, **kwargs):
        self.browser_tag = _browser_tag
        super().__init__(*args, **kwargs)

    @property
    def driver(self):
        if not SeleniumTestCase._driver:
            if _use_remote_driver:
Esempio n. 45
0
"""Message passing with lazy connection handling and encryption"""
try:  # pragma: no cover
    import os
    from hypothesis import Settings
    Settings.register_profile("ci", Settings(
        max_examples=10000
    ))
    Settings.load_profile(os.getenv(u'HYPOTHESIS_PROFILE', 'default'))
except (ImportError, AttributeError):  # pragma: no cover
    pass

import asyncio
import ssl

from .chirp import Chirp  # noqa
from .const import Config, MessageType  # noqa
from .struct import Message, SelfConnectError  # noqa

SendErrors = (
    ssl.SSLError,
    asyncio.TimeoutError,
    asyncio.IncompleteReadError,
    ConnectionRefusedError,
    ConnectionResetError,
    BrokenPipeError,
)
"""Convenience tuple to except all common send errors."""

__all__ = (
    'Chirp',
    'Message',