Exemple #1
0
def test_distribution_is_correctly_translated(inter, rnd):
    assert inter == sorted(inter)
    lower, c1, c2, upper = inter
    d = ConjectureData(
        draw_bytes=lambda data, n, distribution: distribution(rnd, n),
        max_length=10 ** 6
    )
    assert d.draw(interval(lower, upper, c1, lambda r: c2)) == c2
    assert d.draw(interval(lower, upper, c2, lambda r: c1)) == c1
def test_going_from_interesting_to_invalid_is_flaky():
    tree = DataTree()
    data = ConjectureData.for_buffer(b"", observer=tree.new_observer())
    with pytest.raises(StopTest):
        data.conclude_test(Status.INTERESTING)

    data = ConjectureData.for_buffer(b"", observer=tree.new_observer())
    with pytest.raises(Flaky):
        data.conclude_test(Status.INVALID)
def test_always_reduces_integers_to_smallest_suitable_sizes(problem):
    n, blob = problem
    blob = hbytes(blob)
    try:
        d = ConjectureData.for_buffer(blob)
        k = d.draw(st.integers())
        stop = blob[len(d.buffer)]
    except (StopTest, IndexError):
        reject()

    assume(k > n)
    assume(stop > 0)

    def f(data):
        k = data.draw(st.integers())
        data.output = repr(k)
        if data.draw_bits(8) == stop and k >= n:
            data.mark_interesting()

    runner = ConjectureRunner(f, random=Random(0), settings=settings(
        suppress_health_check=HealthCheck.all(), timeout=unlimited,
        phases=(Phase.shrink,), database=None, verbosity=Verbosity.debug
    ), database_key=None)

    runner.test_function(ConjectureData.for_buffer(blob))

    assert runner.interesting_examples

    v, = runner.interesting_examples.values()

    shrinker = runner.new_shrinker(v, lambda x: x.status == Status.INTERESTING)

    shrinker.clear_passes()
    shrinker.add_new_pass('minimize_individual_blocks')

    shrinker.shrink()

    v = shrinker.shrink_target

    m = ConjectureData.for_buffer(v.buffer).draw(st.integers())
    assert m == n

    # Upper bound on the length needed is calculated as follows:
    # * We have an initial byte at the beginning to decide the length of the
    #   integer.
    # * We have a terminal byte as the stop value.
    # * The rest is the integer payload. This should be n. Including the sign
    #   bit, n needs (1 + n.bit_length()) / 8 bytes (rounded up). But we only
    #   have power of two sizes, so it may be up to a factor of two more than
    #   that.
    bits_needed = 1 + n.bit_length()
    actual_bits_needed = min(
        [s for s in WideRangeIntStrategy.sizes if s >= bits_needed])
    bytes_needed = actual_bits_needed // 8
    # 3 extra bytes: two for the sampler, one for the capping value.
    assert len(v.buffer) == 3 + bytes_needed
def test_changing_n_bits_is_flaky_in_prefix():
    tree = DataTree()
    data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())
    data.draw_bits(1)
    with pytest.raises(StopTest):
        data.conclude_test(Status.INTERESTING)

    data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())
    with pytest.raises(Flaky):
        data.draw_bits(2)
def test_concluding_with_overrun_at_prefix_is_not_flaky():
    tree = DataTree()
    data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())
    data.draw_bits(1)
    with pytest.raises(StopTest):
        data.conclude_test(Status.INTERESTING)

    data = ConjectureData.for_buffer(b"", observer=tree.new_observer())
    with pytest.raises(StopTest):
        data.conclude_test(Status.OVERRUN)
def test_changing_value_of_forced_is_flaky():
    tree = DataTree()
    data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())
    data.draw_bits(1, forced=1)
    with pytest.raises(StopTest):
        data.conclude_test(Status.INTERESTING)

    data = ConjectureData.for_buffer(b"\1\0", observer=tree.new_observer())

    with pytest.raises(Flaky):
        data.draw_bits(1, forced=0)
def test_coin_biased_towards_falsehood():
    p = 1.0 / 500

    for i in range(255):
        assert not cu.biased_coin(ConjectureData.for_buffer([i]), p)

    second_order = [
        cu.biased_coin(ConjectureData.for_buffer([255, i]), p) for i in range(255)
    ]

    assert False in second_order
    assert True in second_order
def test_will_avoid_exhausted_branches_for_necessary_prefix():
    tree = DataTree()
    data = ConjectureData.for_buffer([0], observer=tree.new_observer())
    data.draw_bits(1)
    data.freeze()

    data = ConjectureData.for_buffer([1, 1], observer=tree.new_observer())
    data.draw_bits(1)
    data.draw_bits(8)
    data.freeze()

    assert list(tree.find_necessary_prefix_for_novelty()) == [1]
def test_extending_past_conclusion_is_flaky():
    tree = DataTree()
    data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer())
    data.draw_bits(1)
    with pytest.raises(StopTest):
        data.conclude_test(Status.INTERESTING)

    data = ConjectureData.for_buffer(b"\1\0", observer=tree.new_observer())
    data.draw_bits(1)

    with pytest.raises(Flaky):
        data.draw_bits(1)
def test_draw_write_round_trip(f):
    d = ConjectureData.for_buffer(hbytes(10))
    flt.write_float(d, f)
    d2 = ConjectureData.for_buffer(d.buffer)
    g = flt.draw_float(d2)

    if f == f:
        assert f == g

    assert float_to_int(f) == float_to_int(g)

    d3 = ConjectureData.for_buffer(d2.buffer)
    flt.draw_float(d3)
    assert d3.buffer == d2.buffer
 def test_foreign_key_primary(self, buf):
     # Regression test for #1307
     company_strategy = from_model(Company, name=just("test"))
     strategy = from_model(
         CompanyExtension, company=company_strategy, self_modifying=just(2)
     )
     try:
         ConjectureData.for_buffer(buf).draw(strategy)
     except HypothesisException:
         reject()
     # Draw again with the same buffer. This will cause a duplicate
     # primary key.
     ConjectureData.for_buffer(buf).draw(strategy)
     assert CompanyExtension.objects.all().count() == 1
def test_child_becomes_exhausted_after_split():
    tree = DataTree()
    data = ConjectureData.for_buffer([0, 0], observer=tree.new_observer())
    data.draw_bits(8)
    data.draw_bits(8, forced=0)
    data.freeze()

    data = ConjectureData.for_buffer([1, 0], observer=tree.new_observer())
    data.draw_bits(8)
    data.draw_bits(8)
    data.freeze()

    assert not tree.is_exhausted
    assert tree.root.transition.children[0].is_exhausted
def test_avoids_zig_zag_trap(p):
    b, marker, lower_bound = p

    random.seed(0)

    b = hbytes(b)
    marker = hbytes(marker)

    n_bits = 8 * (len(b) + 1)

    def test_function(data):
        m = data.draw_bits(n_bits)
        if m < lower_bound:
            data.mark_invalid()
        n = data.draw_bits(n_bits)
        if data.draw_bytes(len(marker)) != marker:
            data.mark_invalid()
        if abs(m - n) == 1:
            data.mark_interesting()

    runner = ConjectureRunner(
        test_function, database_key=None, settings=settings(
            base_settings,
            phases=(Phase.generate, Phase.shrink)
        )
    )

    runner.test_function(ConjectureData.for_buffer(
        b + hbytes([0]) + b + hbytes([1]) + marker))

    assert runner.interesting_examples

    runner.run()

    v, = runner.interesting_examples.values()

    data = ConjectureData.for_buffer(v.buffer)

    m = data.draw_bits(n_bits)
    n = data.draw_bits(n_bits)
    assert m == lower_bound
    if m == 0:
        assert n == 1
    else:
        assert n == m - 1

    budget = 2 * n_bits * ceil(log(n_bits, 2)) + 2

    assert runner.shrinks <= budget
def test_will_generate_novel_prefix_to_avoid_exhausted_branches():
    tree = DataTree()
    data = ConjectureData.for_buffer([1], observer=tree.new_observer())
    data.draw_bits(1)
    data.freeze()

    data = ConjectureData.for_buffer([0, 1], observer=tree.new_observer())
    data.draw_bits(1)
    data.draw_bits(8)
    data.freeze()

    prefix = list(tree.generate_novel_prefix(Random(0)))

    assert len(prefix) == 2
    assert prefix[0] == 0
def test_can_observe_draws():
    class LoggingObserver(DataObserver):
        def __init__(self):
            self.log = []

        def draw_bits(self, *args):
            self.log.append(("draw",) + args)

        def conclude_test(self, *args):
            assert x.frozen
            self.log.append(("concluded",) + args)

    observer = LoggingObserver()
    x = ConjectureData.for_buffer(hbytes([1, 2, 3]), observer=observer)

    x.draw_bits(1)
    x.draw_bits(7, forced=10)
    x.draw_bits(8)
    with pytest.raises(StopTest):
        x.conclude_test(Status.INTERESTING, interesting_origin="neat")

    assert observer.log == [
        ("draw", 1, False, 1),
        ("draw", 7, True, 10),
        ("draw", 8, False, 3),
        ("concluded", Status.INTERESTING, "neat"),
    ]
def test_exhaustive_enumeration(prefix, bits, seed):
    seen = set()

    def f(data):
        if prefix:
            data.write(hbytes(prefix))
            assert len(data.buffer) == len(prefix)
        k = data.draw_bits(bits)
        assert k not in seen
        seen.add(k)

    size = 2 ** bits

    seen_prefixes = set()

    runner = ConjectureRunner(
        f, settings=settings(database=None, max_examples=size),
        random=Random(seed),
    )
    with pytest.raises(RunIsComplete):
        runner.cached_test_function(b'')
        for _ in hrange(size):
            p = runner.generate_novel_prefix()
            assert p not in seen_prefixes
            seen_prefixes.add(p)
            data = ConjectureData.for_buffer(
                hbytes(p + hbytes(2 + len(prefix))))
            runner.test_function(data)
            assert data.status == Status.VALID
            node = 0
            for b in data.buffer:
                node = runner.tree[node][b]
            assert node in runner.dead
    assert len(seen) == size
def test_exhaustive_enumeration_of_partial_buffer():
    seen = set()

    def f(data):
        k = data.draw_bytes(2)
        assert k[1] == 0
        assert k not in seen
        seen.add(k)

    seen_prefixes = set()

    runner = ConjectureRunner(
        f, settings=settings(database=None, max_examples=256, buffer_size=2),
        random=Random(0),
    )
    with pytest.raises(RunIsComplete):
        runner.cached_test_function(b'')
        for _ in hrange(256):
            p = runner.generate_novel_prefix()
            assert p not in seen_prefixes
            seen_prefixes.add(p)
            data = ConjectureData.for_buffer(hbytes(p + hbytes(2)))
            runner.test_function(data)
            assert data.status == Status.VALID
            node = 0
            for b in data.buffer:
                node = runner.tree[node][b]
            assert node in runner.dead
    assert len(seen) == 256
Exemple #18
0
 def incorporate_new_buffer(self, buffer):
     if buffer in self.seen:
         return False
     assert self.last_data.status == Status.INTERESTING
     if (
         self.settings.timeout > 0 and
         time.time() >= self.start_time + self.settings.timeout
     ):
         self.exit_reason = ExitReason.timeout
         raise RunIsComplete()
     buffer = buffer[:self.last_data.index]
     if sort_key(buffer) >= sort_key(self.last_data.buffer):
         return False
     assert sort_key(buffer) <= sort_key(self.last_data.buffer)
     data = ConjectureData.for_buffer(buffer)
     self.test_function(data)
     if self.consider_new_test_data(data):
         self.shrinks += 1
         self.last_data = data
         if self.shrinks >= self.settings.max_shrinks:
             self.exit_reason = ExitReason.max_shrinks
             raise RunIsComplete()
         self.last_data = data
         self.changed += 1
         return True
     return False
Exemple #19
0
    def _run(self):
        self.last_data = None
        self.start_time = time.time()

        self.reuse_existing_examples()
        self.generate_new_examples()

        if (
            Phase.shrink not in self.settings.phases or
            not self.interesting_examples
        ):
            self.exit_with(ExitReason.finished)

        for prev_data in sorted(
            self.interesting_examples.values(),
            key=lambda d: sort_key(d.buffer)
        ):
            assert prev_data.status == Status.INTERESTING
            data = ConjectureData.for_buffer(prev_data.buffer)
            self.test_function(data)
            if data.status != Status.INTERESTING:
                self.exit_with(ExitReason.flaky)

        while len(self.shrunk_examples) < len(self.interesting_examples):
            target, self.last_data = min([
                (k, v) for k, v in self.interesting_examples.items()
                if k not in self.shrunk_examples],
                key=lambda kv: (sort_key(kv[1].buffer), sort_key(repr(kv[0]))),
            )
            self.debug('Shrinking %r' % (target,))
            assert self.last_data.interesting_origin == target
            self.shrink()
            self.shrunk_examples.add(target)
        self.exit_with(ExitReason.finished)
def test_saves_data_while_shrinking(monkeypatch):
    key = b'hi there'
    n = 5
    db = InMemoryExampleDatabase()
    assert list(db.fetch(key)) == []
    seen = set()

    monkeypatch.setattr(
        ConjectureRunner, 'generate_new_examples',
        lambda runner: runner.test_function(
            ConjectureData.for_buffer([255] * 10)))

    def f(data):
        x = data.draw_bytes(10)
        if sum(x) >= 2000 and len(seen) < n:
            seen.add(hbytes(x))
        if hbytes(x) in seen:
            data.mark_interesting()
    runner = ConjectureRunner(
        f, settings=settings(database=db), database_key=key)
    runner.run()
    assert runner.interesting_examples
    assert len(seen) == n
    in_db = non_covering_examples(db)
    assert in_db.issubset(seen)
    assert in_db == seen
def cleanup():
    files = glob('hypothesis-examples/raw/*')
    Random().shuffle(files)

    for f in files:
        try:
            with open(f, 'rb') as i:
                data = ConjectureData.for_buffer(i.read())
        except FileNotFoundError:
            continue

        with tempfile.TemporaryDirectory() as d:
            prog = os.path.join(d, 'test.c')

            try:
                gen(data, prog)
            except StopTest:
                continue

            name = os.path.basename(f)

            if not is_interesting(prog):
                print(f"Removing {name}")
                rmf(os.path.join(raw, name))
                rmf(os.path.join(programs, name + '.c'))
def sample(seed, count, max_size):
    random = Random(seed)

    names = os.listdir(raw)
    names.sort()
    random.shuffle(names)

    printed = 0

    for n in names:
        with open(os.path.join(raw, n), 'rb') as i:
            buffer = i.read()
        if max_size > 0 and len(buffer) > max_size:
            continue

        initial_data = ConjectureData.for_buffer(buffer)
        prog = os.path.join(programs, n + '.c')
        with open(prog, 'r') as i:
            already_good = is_valid(prog)

        if not already_good:
            try:
                gen(initial_data, prog)
            except StopTest:
                continue

        if not interesting_reason(prog, printing=False):
            continue

        print(n)
        printed += 1
        if printed >= count:
            break
def test_draw_past_end_sets_overflow():
    x = ConjectureData.for_buffer(bytes(5))
    with pytest.raises(StopTest) as e:
        x.draw_bytes(6)
    assert e.value.testcounter == x.testcounter
    assert x.frozen
    assert x.status == Status.OVERRUN
def test_can_get_odd_number_of_bits():
    counts = Counter()
    for i in range(256):
        x = cu.getrandbits(ConjectureData.for_buffer([i]), 3)
        assert 0 <= x <= 7
        counts[x] += 1
    assert len(set(counts.values())) == 1
def test_database_uses_values_from_secondary_key():
    key = b'key'
    database = InMemoryExampleDatabase()

    def f(data):
        if data.draw_bits(8) >= 5:
            data.mark_interesting()
        else:
            data.mark_invalid()

    runner = ConjectureRunner(f, settings=settings(
        max_examples=1, buffer_size=1024,
        database=database, suppress_health_check=HealthCheck.all(),
    ), database_key=key)

    for i in range(10):
        database.save(runner.secondary_key, hbytes([i]))

    runner.test_function(ConjectureData.for_buffer(hbytes([10])))
    assert runner.interesting_examples

    assert len(set(database.fetch(key))) == 1
    assert len(set(database.fetch(runner.secondary_key))) == 10

    runner.clear_secondary_key()

    assert len(set(database.fetch(key))) == 1
    assert set(
        map(int_from_bytes, database.fetch(runner.secondary_key))
    ) == set(range(6, 11))

    v, = runner.interesting_examples.values()

    assert list(v.buffer) == [5]
def test_compact_blocks_during_generation():
    d = ConjectureData.for_buffer([1] * 10)
    for _ in hrange(5):
        d.draw_bits(1)
    assert len(list(d.blocks)) == 5
    for _ in hrange(5):
        d.draw_bits(1)
    assert len(list(d.blocks)) == 10
def test_can_write_empty_string():
    d = ConjectureData.for_buffer([1, 1, 1])
    d.draw_bits(1)
    d.write(hbytes())
    d.draw_bits(1)
    d.draw_bits(0, forced=0)
    d.draw_bits(1)
    assert d.buffer == hbytes([1, 1, 1])
Exemple #28
0
 def new_buffer(self):
     self.last_data = ConjectureData(
         max_length=self.settings.buffer_size,
         draw_bytes=lambda data, n, distribution:
         distribution(self.random, n)
     )
     self.test_function(self.last_data)
     self.last_data.freeze()
def test_prescreen_with_masked_byte_agrees_with_results(byte_a, byte_b):
    def f(data):
        data.draw_bits(2)

    runner = ConjectureRunner(f)

    data_a = ConjectureData.for_buffer(hbytes([byte_a]))
    data_b = ConjectureData.for_buffer(hbytes([byte_b]))

    runner.test_function(data_a)
    prescreen_b = runner.prescreen_buffer(hbytes([byte_b]))
    # Always test buffer B, to check whether the prescreen was correct.
    runner.test_function(data_b)

    # If the prescreen passed, then the buffers should be different.
    # If it failed, then the buffers should be the same.
    assert prescreen_b == (data_a.buffer != data_b.buffer)
def test_detects_too_small_block_starts():
    def f(data):
        data.draw_bytes(8)
        data.mark_interesting()
    runner = ConjectureRunner(f, settings=settings(database=None))
    r = ConjectureData.for_buffer(hbytes(8))
    runner.test_function(r)
    assert r.status == Status.INTERESTING
    assert not runner.prescreen_buffer(hbytes([255] * 7))
Exemple #31
0
def test_drawing_certain_coin_still_writes():
    data = ConjectureData.for_buffer([0, 1])
    assert not data.buffer
    assert cu.biased_coin(data, 1)
    assert data.buffer
Exemple #32
0
def test_does_draw_data_for_empty_range():
    data = ConjectureData.for_buffer(b"\1")
    assert cu.integer_range(data, 1, 1) == 1
    data.freeze()
    assert data.buffer == b"\0"
Exemple #33
0
def test_can_draw_arbitrary_fractions(p, b):
    try:
        cu.biased_coin(ConjectureData.for_buffer(b), p)
    except StopTest:
        reject()
Exemple #34
0
def test_uniform_float_can_draw_1():
    d = ConjectureData.for_buffer(bytes([255] * 7))
    assert cu.fractional_float(d) == 1.0
    assert len(d.buffer) == 7
Exemple #35
0
def find(
    specifier,  # type: SearchStrategy
    condition,  # type: Callable[[Any], bool]
    settings=None,  # type: Settings
    random=None,   # type: Any
    database_key=None,  # type: bytes
):
    # type: (...) -> Any
    """Returns the minimal example from the given strategy ``specifier`` that
    matches the predicate function ``condition``."""
    settings = settings or Settings(
        max_examples=2000,
        max_shrinks=2000,
    )
    settings = Settings(settings, suppress_health_check=HealthCheck.all())

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument(
            'Expected SearchStrategy but got %r of type %s' % (
                specifier, type(specifier).__name__
            ))
    specifier.validate()

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]
    last_repr = [None]

    def template_condition(data):
        with BuildContext(data):
            try:
                data.is_find = True
                result = data.draw(search)
                data.note(result)
                success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity >= Verbosity.verbose:
            if not successful_examples[0]:
                report(
                    u'Tried non-satisfying example %s' % (nicerepr(result),))
            elif success:
                if successful_examples[0] == 1:
                    last_repr[0] = nicerepr(result)
                    report(u'Found satisfying example %s' % (last_repr[0],))
                    last_data[0] = data
                elif (
                    sort_key(hbytes(data.buffer)) <
                    sort_key(last_data[0].buffer)
                ) and nicerepr(result) != last_repr[0]:
                    last_repr[0] = nicerepr(result)
                    report(u'Shrunk example to %s' % (last_repr[0],))
                    last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()
    start = time.time()
    runner = ConjectureRunner(
        template_condition, settings=settings, random=random,
        database_key=database_key,
    )
    runner.run()
    note_engine_for_statistics(runner)
    run_time = time.time() - start
    if runner.interesting_examples:
        data = ConjectureData.for_buffer(
            list(runner.interesting_examples.values())[0].buffer)
        with BuildContext(data):
            return data.draw(search)
    if runner.valid_examples == 0 and (
        runner.exit_reason != ExitReason.finished
    ):
        if settings.timeout > 0 and run_time > settings.timeout:
            raise Timeout((  # pragma: no cover
                'Ran out of time before finding enough valid examples for '
                '%s. Only %d valid examples found in %.2f seconds.'
            ) % (
                get_pretty_function_description(condition),
                runner.valid_examples, run_time))

        else:
            raise Unsatisfiable(
                'Unable to satisfy assumptions of %s.' %
                (get_pretty_function_description(condition),)
            )

    raise NoSuchExample(get_pretty_function_description(condition))
Exemple #36
0
def test_choice():
    assert cu.choice(ConjectureData.for_buffer([1]), [1, 2, 3]) == 2
Exemple #37
0
def test_restricted_bits():
    assert (cu.integer_range(ConjectureData.for_buffer([1, 0, 0, 0, 0]),
                             lower=0,
                             upper=2**64 - 1) == 0)
Exemple #38
0
def test_drawing_impossible_coin_still_writes():
    data = ConjectureData.for_buffer([1, 0])
    assert not data.buffer
    assert not cu.biased_coin(data, 0)
    assert data.buffer
Exemple #39
0
        def wrapped_test(*arguments, **kwargs):
            # Tell pytest to omit the body of this function from tracebacks
            __tracebackhide__ = True

            test = wrapped_test.hypothesis.inner_test

            if getattr(test, "is_hypothesis_test", False):
                raise InvalidArgument(
                    (
                        "You have applied @given to the test %s more than once, which "
                        "wraps the test several times and is extremely slow. A "
                        "similar effect can be gained by combining the arguments "
                        "of the two calls to given. For example, instead of "
                        "@given(booleans()) @given(integers()), you could write "
                        "@given(booleans(), integers())"
                    )
                    % (test.__name__,)
                )

            settings = wrapped_test._hypothesis_internal_use_settings

            random = get_random_for_wrapped_test(test, wrapped_test)

            processed_args = process_arguments_to_given(
                wrapped_test, arguments, kwargs, given_kwargs, argspec, settings,
            )
            arguments, kwargs, test_runner, search_strategy = processed_args

            runner = getattr(search_strategy, "runner", None)
            if isinstance(runner, TestCase) and test.__name__ in dir(TestCase):
                msg = (
                    "You have applied @given to the method %s, which is "
                    "used by the unittest runner but is not itself a test."
                    "  This is not useful in any way." % test.__name__
                )
                fail_health_check(settings, msg, HealthCheck.not_a_test_method)
            if bad_django_TestCase(runner):  # pragma: no cover
                # Covered by the Django tests, but not the pytest coverage task
                raise InvalidArgument(
                    "You have applied @given to a method on %s, but this "
                    "class does not inherit from the supported versions in "
                    "`hypothesis.extra.django`.  Use the Hypothesis variants "
                    "to ensure that each example is run in a separate "
                    "database transaction." % qualname(type(runner))
                )

            state = StateForActualGivenExecution(
                test_runner, search_strategy, test, settings, random, wrapped_test,
            )

            reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure

            # If there was a @reproduce_failure decorator, use it to reproduce
            # the error (or complain that we couldn't). Either way, this will
            # always raise some kind of error.
            if reproduce_failure is not None:
                expected_version, failure = reproduce_failure
                if expected_version != __version__:
                    raise InvalidArgument(
                        (
                            "Attempting to reproduce a failure from a different "
                            "version of Hypothesis. This failure is from %s, but "
                            "you are currently running %r. Please change your "
                            "Hypothesis version to a matching one."
                        )
                        % (expected_version, __version__)
                    )
                try:
                    state.execute_once(
                        ConjectureData.for_buffer(decode_failure(failure)),
                        print_example=True,
                        is_final=True,
                    )
                    raise DidNotReproduce(
                        "Expected the test to raise an error, but it "
                        "completed successfully."
                    )
                except StopTest:
                    raise DidNotReproduce(
                        "The shape of the test data has changed in some way "
                        "from where this blob was defined. Are you sure "
                        "you're running the same test?"
                    )
                except UnsatisfiedAssumption:
                    raise DidNotReproduce(
                        "The test data failed to satisfy an assumption in the "
                        "test. Have you added it since this blob was "
                        "generated?"
                    )

            # There was no @reproduce_failure, so start by running any explicit
            # examples from @example decorators.

            execute_explicit_examples(state, wrapped_test, arguments, kwargs)

            # If there were any explicit examples, they all ran successfully.
            # The next step is to use the Conjecture engine to run the test on
            # many different inputs.

            if not (
                Phase.reuse in settings.phases or Phase.generate in settings.phases
            ):
                return

            try:
                if isinstance(runner, TestCase) and hasattr(runner, "subTest"):
                    subTest = runner.subTest
                    try:
                        runner.subTest = fake_subTest
                        state.run_engine()
                    finally:
                        runner.subTest = subTest
                else:
                    state.run_engine()
            except BaseException as e:
                # The exception caught here should either be an actual test
                # failure (or MultipleFailures), or some kind of fatal error
                # that caused the engine to stop.

                generated_seed = wrapped_test._hypothesis_internal_use_generated_seed
                with local_settings(settings):
                    if not (state.failed_normally or generated_seed is None):
                        if running_under_pytest:
                            report(
                                "You can add @seed(%(seed)d) to this test or "
                                "run pytest with --hypothesis-seed=%(seed)d "
                                "to reproduce this failure." % {"seed": generated_seed}
                            )
                        else:
                            report(
                                "You can add @seed(%d) to this test to "
                                "reproduce this failure." % (generated_seed,)
                            )
                    # The dance here is to avoid showing users long tracebacks
                    # full of Hypothesis internals they don't care about.
                    # We have to do this inline, to avoid adding another
                    # internal stack frame just when we've removed the rest.
                    #
                    # Using a variable for our trimmed error ensures that the line
                    # which will actually appear in tracebacks is as clear as
                    # possible - "raise the_error_hypothesis_found".
                    the_error_hypothesis_found = e.with_traceback(
                        get_trimmed_traceback()
                    )
                    raise the_error_hypothesis_found
Exemple #40
0
        def wrapped_test(*arguments, **kwargs):
            # Tell pytest to omit the body of this function from tracebacks
            __tracebackhide__ = True

            test = wrapped_test.hypothesis.inner_test

            if getattr(test, 'is_hypothesis_test', False):
                note_deprecation(
                    ('You have applied @given to test: %s more than once. In '
                     'future this will be an error. Applying @given twice '
                     'wraps the test twice, which can be extremely slow. A '
                     'similar effect can be gained by combining the arguments '
                     'of the two calls to given. For example, instead of '
                     '@given(booleans()) @given(integers()), you could write '
                     '@given(booleans(), integers())') % (test.__name__, ))

            settings = wrapped_test._hypothesis_internal_use_settings

            random = get_random_for_wrapped_test(test, wrapped_test)

            if infer in generator_kwargs.values():
                hints = get_type_hints(test)
            for name in [
                    name for name, value in generator_kwargs.items()
                    if value is infer
            ]:
                if name not in hints:
                    raise InvalidArgument(
                        'passed %s=infer for %s, but %s has no type annotation'
                        % (name, test.__name__, name))
                generator_kwargs[name] = st.from_type(hints[name])

            processed_args = process_arguments_to_given(
                wrapped_test, arguments, kwargs, generator_arguments,
                generator_kwargs, argspec, test, settings)
            arguments, kwargs, test_runner, search_strategy = processed_args

            runner = getattr(search_strategy, 'runner', None)
            if isinstance(runner, TestCase) and test.__name__ in dir(TestCase):
                msg = ('You have applied @given to the method %s, which is '
                       'used by the unittest runner but is not itself a test.'
                       '  This is not useful in any way.' % test.__name__)
                fail_health_check(settings, msg, HealthCheck.not_a_test_method)
            if bad_django_TestCase(runner):  # pragma: no cover
                # Covered by the Django tests, but not the pytest coverage task
                raise InvalidArgument(
                    'You have applied @given to a method on %s, but this '
                    'class does not inherit from the supported versions in '
                    '`hypothesis.extra.django`.  Use the Hypothesis variants '
                    'to ensure that each example is run in a separate '
                    'database transaction.' % qualname(type(runner)))

            state = StateForActualGivenExecution(
                test_runner,
                search_strategy,
                test,
                settings,
                random,
                had_seed=wrapped_test._hypothesis_internal_use_seed)

            reproduce_failure = \
                wrapped_test._hypothesis_internal_use_reproduce_failure

            if reproduce_failure is not None:
                expected_version, failure = reproduce_failure
                if expected_version != __version__:
                    raise InvalidArgument(
                        ('Attempting to reproduce a failure from a different '
                         'version of Hypothesis. This failure is from %s, but '
                         'you are currently running %r. Please change your '
                         'Hypothesis version to a matching one.') %
                        (expected_version, __version__))
                try:
                    state.execute(
                        ConjectureData.for_buffer(decode_failure(failure)),
                        print_example=True,
                        is_final=True,
                    )
                    raise DidNotReproduce(
                        'Expected the test to raise an error, but it '
                        'completed successfully.')
                except StopTest:
                    raise DidNotReproduce(
                        'The shape of the test data has changed in some way '
                        'from where this blob was defined. Are you sure '
                        "you're running the same test?")
                except UnsatisfiedAssumption:
                    raise DidNotReproduce(
                        'The test data failed to satisfy an assumption in the '
                        'test. Have you added it since this blob was '
                        'generated?')

            execute_explicit_examples(test_runner, test, wrapped_test,
                                      settings, arguments, kwargs)

            if settings.max_examples <= 0:
                return

            if not (Phase.reuse in settings.phases
                    or Phase.generate in settings.phases):
                return

            try:
                if isinstance(runner, TestCase) and hasattr(runner, 'subTest'):
                    subTest = runner.subTest
                    try:
                        setattr(runner, 'subTest', fake_subTest)
                        state.run()
                    finally:
                        setattr(runner, 'subTest', subTest)
                else:
                    state.run()
            except BaseException:
                generated_seed = \
                    wrapped_test._hypothesis_internal_use_generated_seed
                if generated_seed is not None and not state.failed_normally:
                    with local_settings(settings):
                        if running_under_pytest:
                            report(
                                'You can add @seed(%(seed)d) to this test or '
                                'run pytest with --hypothesis-seed=%(seed)d '
                                'to reproduce this failure.' %
                                {'seed': generated_seed})
                        else:
                            report('You can add @seed(%d) to this test to '
                                   'reproduce this failure.' %
                                   (generated_seed, ))
                raise
Exemple #41
0
def test_too_small_to_be_useful_coin():
    assert not cu.biased_coin(ConjectureData.for_buffer([1]), 0.5**65)
Exemple #42
0
    def run(self):
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        if global_force_seed is None:
            database_key = str_to_bytes(fully_qualified_name(self.test))
        else:
            database_key = None
        self.start_time = benchmark_time()
        runner = ConjectureRunner(
            self.evaluate_test_data,
            settings=self.settings,
            random=self.random,
            database_key=database_key,
        )
        try:
            runner.run()
        finally:
            self.used_examples_from_database = \
                runner.used_examples_from_database
        note_engine_for_statistics(runner)
        run_time = benchmark_time() - self.start_time

        self.used_examples_from_database = runner.used_examples_from_database

        if runner.used_examples_from_database:
            if self.settings.derandomize:
                note_deprecation((
                    'In future derandomize will imply database=None, but your '
                    'test: %s is currently using examples from the database. '
                    'To get the future behaviour, update your settings to '
                    'include database=None.') % (self.test.__name__, ))
            if self.__had_seed:
                note_deprecation(
                    ('In future use of @seed will imply database=None in your '
                     'settings, but your test: %s is currently using examples '
                     'from the database. To get the future behaviour, update '
                     'your settings for this test to include database=None.') %
                    (self.test.__name__, ))

        timed_out = runner.exit_reason == ExitReason.timeout
        if runner.call_count == 0:
            return
        if runner.interesting_examples:
            self.falsifying_examples = sorted(
                [d for d in runner.interesting_examples.values()],
                key=lambda d: sort_key(d.buffer),
                reverse=True)
        else:
            if runner.valid_examples == 0:
                if timed_out:
                    raise Timeout(
                        ('Ran out of time before finding a satisfying '
                         'example for %s. Only found %d examples in %.2fs.') %
                        (get_pretty_function_description(
                            self.test), runner.valid_examples, run_time))
                else:
                    raise Unsatisfiable(
                        'Unable to satisfy assumptions of hypothesis %s.' %
                        (get_pretty_function_description(self.test), ))

        if not self.falsifying_examples:
            return

        self.failed_normally = True

        flaky = 0

        for falsifying_example in self.falsifying_examples:
            ran_example = ConjectureData.for_buffer(falsifying_example.buffer)
            self.__was_flaky = False
            assert falsifying_example.__expected_exception is not None
            try:
                self.execute(ran_example,
                             print_example=True,
                             is_final=True,
                             expected_failure=(
                                 falsifying_example.__expected_exception,
                                 falsifying_example.__expected_traceback,
                             ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                self.__flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.')
            except BaseException:
                if len(self.falsifying_examples) <= 1:
                    raise
                report(traceback.format_exc())
            finally:  # pragma: no cover
                # This section is in fact entirely covered by the tests in
                # test_reproduce_failure, but it seems to trigger a lovely set
                # of coverage bugs: The branches show up as uncovered (despite
                # definitely being covered - you can add an assert False else
                # branch to verify this and see it fail - and additionally the
                # second branch still complains about lack of coverage even if
                # you add a pragma: no cover to it!
                # See https://bitbucket.org/ned/coveragepy/issues/623/
                if self.settings.print_blob is not PrintSettings.NEVER:
                    failure_blob = encode_failure(falsifying_example.buffer)
                    # Have to use the example we actually ran, not the original
                    # falsifying example! Otherwise we won't catch problems
                    # where the repr of the generated example doesn't parse.
                    can_use_repr = ran_example.can_reproduce_example_from_repr
                    if (self.settings.print_blob is PrintSettings.ALWAYS or
                        (self.settings.print_blob is PrintSettings.INFER
                         and self.settings.verbosity >= Verbosity.normal
                         and not can_use_repr and len(failure_blob) < 200)):
                        report((
                            '\n'
                            'You can reproduce this example by temporarily '
                            'adding @reproduce_failure(%r, %r) as a decorator '
                            'on your test case') % (
                                __version__,
                                failure_blob,
                            ))
            if self.__was_flaky:
                flaky += 1

        # If we only have one example then we should have raised an error or
        # flaky prior to this point.
        assert len(self.falsifying_examples) > 1

        if flaky > 0:
            raise Flaky(
                ('Hypothesis found %d distinct failures, but %d of them '
                 'exhibited some sort of flaky behaviour.') %
                (len(self.falsifying_examples), flaky))
        else:
            raise MultipleFailures(('Hypothesis found %d distinct failures.') %
                                   (len(self.falsifying_examples, )))
def test_none_strategy_does_not_draw():
    data = ConjectureData.for_buffer(b"")
    s = none()
    assert s.do_draw(data) is None
def test_just_strategy_does_not_draw():
    data = ConjectureData.for_buffer(b"")
    s = just("hello")
    assert s.do_draw(data) == "hello"
Exemple #45
0
 def new_conjecture_data_for_buffer(self, buffer):
     return ConjectureData.for_buffer(buffer,
                                      observer=self.tree.new_observer())
Exemple #46
0
def test_integer_range_lower_equals_upper():
    data = ConjectureData.for_buffer([0])

    assert cu.integer_range(data, lower=0, upper=0, center=0) == 0

    assert len(data.buffer) == 1
Exemple #47
0
def test_assert_biased_coin_always_treats_one_as_true():
    assert cu.biased_coin(ConjectureData.for_buffer([0, 1]), p=1.0 / 257)
Exemple #48
0
def test_center_in_middle_above():
    assert (cu.integer_range(ConjectureData.for_buffer([1, 0]),
                             lower=0,
                             upper=10,
                             center=5) == 5)
Exemple #49
0
def test_biased_coin_can_be_forced():
    assert cu.biased_coin(ConjectureData.for_buffer([0]), p=0.5, forced=True)
    assert not cu.biased_coin(
        ConjectureData.for_buffer([1]), p=0.5, forced=False)
Exemple #50
0
    def run(self):
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        database_key = str_to_bytes(fully_qualified_name(self.test))
        start_time = time.time()
        runner = ConjectureRunner(
            self.evaluate_test_data,
            settings=self.settings,
            random=self.random,
            database_key=database_key,
        )
        runner.run()
        note_engine_for_statistics(runner)
        run_time = time.time() - start_time
        timed_out = (self.settings.timeout > 0
                     and run_time >= self.settings.timeout)
        if runner.last_data is None:
            return
        if runner.last_data.status == Status.INTERESTING:
            self.falsifying_example = runner.last_data.buffer
            if self.settings.database is not None:
                self.settings.database.save(database_key,
                                            self.falsifying_example)
        else:
            if runner.valid_examples < min(
                    self.settings.min_satisfying_examples,
                    self.settings.max_examples,
            ) and not (runner.exit_reason == ExitReason.finished
                       and self.at_least_one_success):
                if timed_out:
                    raise Timeout(
                        ('Ran out of time before finding a satisfying '
                         'example for '
                         '%s. Only found %d examples in ' + '%.2fs.') %
                        (get_pretty_function_description(
                            self.test), runner.valid_examples, run_time))
                else:
                    raise Unsatisfiable(
                        ('Unable to satisfy assumptions of hypothesis '
                         '%s. Only %d examples considered '
                         'satisfied assumptions') % (
                             get_pretty_function_description(self.test),
                             runner.valid_examples,
                         ))

        if self.falsifying_example is None:
            return

        assert self.last_exception is not None

        try:
            with self.settings:
                self.test_runner(
                    ConjectureData.for_buffer(self.falsifying_example),
                    reify_and_execute(self.search_strategy,
                                      self.test,
                                      print_example=True,
                                      is_final=True))
        except (UnsatisfiedAssumption, StopTest):
            report(traceback.format_exc())
            raise Flaky('Unreliable assumption: An example which satisfied '
                        'assumptions on the first run now fails it.')

        report(
            'Failed to reproduce exception. Expected: \n' +
            self.last_exception, )

        filter_message = (
            'Unreliable test data: Failed to reproduce a failure '
            'and then when it came to recreating the example in '
            'order to print the test data with a flaky result '
            'the example was filtered out (by e.g. a '
            'call to filter in your strategy) when we didn\'t '
            'expect it to be.')

        try:
            self.test_runner(
                ConjectureData.for_buffer(self.falsifying_example),
                reify_and_execute(self.search_strategy,
                                  test_is_flaky(self.test,
                                                self.repr_for_last_exception),
                                  print_example=True,
                                  is_final=True))
        except (UnsatisfiedAssumption, StopTest):
            raise Flaky(filter_message)
Exemple #51
0
def test_fractional_float():
    assert cu.fractional_float(ConjectureData.for_buffer([0] * 8)) == 0.0
Exemple #52
0
def find(specifier, condition, settings=None, random=None, database_key=None):
    settings = settings or Settings(
        max_examples=2000,
        min_satisfying_examples=0,
        max_shrinks=2000,
    )

    if database_key is None and settings.database is not None:
        database_key = function_digest(condition)

    if not isinstance(specifier, SearchStrategy):
        raise InvalidArgument('Expected SearchStrategy but got %r of type %s' %
                              (specifier, type(specifier).__name__))
    specifier.validate()

    search = specifier

    random = random or new_random()
    successful_examples = [0]
    last_data = [None]

    def template_condition(data):
        with BuildContext(data):
            try:
                data.is_find = True
                result = data.draw(search)
                data.note(result)
                success = condition(result)
            except UnsatisfiedAssumption:
                data.mark_invalid()

        if success:
            successful_examples[0] += 1

        if settings.verbosity == Verbosity.verbose:
            if not successful_examples[0]:
                report(lambda: u'Trying example %s' % (nicerepr(result), ))
            elif success:
                if successful_examples[0] == 1:
                    report(lambda: u'Found satisfying example %s' %
                           (nicerepr(result), ))
                else:
                    report(lambda: u'Shrunk example to %s' %
                           (nicerepr(result), ))
                last_data[0] = data
        if success and not data.frozen:
            data.mark_interesting()

    start = time.time()
    runner = ConjectureRunner(
        template_condition,
        settings=settings,
        random=random,
        database_key=database_key,
    )
    runner.run()
    note_engine_for_statistics(runner)
    run_time = time.time() - start
    if runner.last_data.status == Status.INTERESTING:
        data = ConjectureData.for_buffer(runner.last_data.buffer)
        with BuildContext(data):
            return data.draw(search)
    if (runner.valid_examples <= settings.min_satisfying_examples
            and runner.exit_reason != ExitReason.finished):
        if settings.timeout > 0 and run_time > settings.timeout:
            raise Timeout(
                ('Ran out of time before finding enough valid examples for '
                 '%s. Only %d valid examples found in %.2f seconds.') %
                (get_pretty_function_description(condition),
                 runner.valid_examples, run_time))

        else:
            raise Unsatisfiable(
                ('Unable to satisfy assumptions of '
                 '%s. Only %d examples considered satisfied assumptions') % (
                     get_pretty_function_description(condition),
                     runner.valid_examples,
                 ))

    raise NoSuchExample(get_pretty_function_description(condition))
Exemple #53
0
def test_sampler_shrinks():
    sampler = cu.Sampler([4.0, 8.0, 1.0, 1.0, 0.5])
    assert sampler.sample(ConjectureData.for_buffer([0] * 3)) == 0
Exemple #54
0
def test_always_reduces_integers_to_smallest_suitable_sizes(problem):
    n, blob = problem
    blob = hbytes(blob)
    try:
        d = ConjectureData.for_buffer(blob)
        k = d.draw(st.integers())
        stop = blob[len(d.buffer)]
    except (StopTest, IndexError):
        reject()

    assume(k > n)
    assume(stop > 0)

    def f(data):
        k = data.draw(st.integers())
        data.output = repr(k)
        if data.draw_bits(8) == stop and k >= n:
            data.mark_interesting()

    runner = ConjectureRunner(
        f,
        random=Random(0),
        settings=settings(
            suppress_health_check=HealthCheck.all(),
            phases=(Phase.shrink,),
            database=None,
            verbosity=Verbosity.debug,
        ),
        database_key=None,
    )

    runner.cached_test_function(blob)

    assert runner.interesting_examples

    (v,) = runner.interesting_examples.values()

    shrinker = runner.new_shrinker(v, lambda x: x.status == Status.INTERESTING)

    shrinker.fixate_shrink_passes(["minimize_individual_blocks"])

    v = shrinker.shrink_target

    m = ConjectureData.for_buffer(v.buffer).draw(st.integers())
    assert m == n

    # Upper bound on the length needed is calculated as follows:
    # * We have an initial byte at the beginning to decide the length of the
    #   integer.
    # * We have a terminal byte as the stop value.
    # * The rest is the integer payload. This should be n. Including the sign
    #   bit, n needs (1 + n.bit_length()) / 8 bytes (rounded up). But we only
    #   have power of two sizes, so it may be up to a factor of two more than
    #   that.
    bits_needed = 1 + n.bit_length()
    actual_bits_needed = min(
        [s for s in WideRangeIntStrategy.sizes if s >= bits_needed]
    )
    bytes_needed = actual_bits_needed // 8
    # 3 extra bytes: two for the sampler, one for the capping value.
    assert len(v.buffer) == 3 + bytes_needed
Exemple #55
0
def test_sampler_does_not_draw_minimum_if_zero():
    sampler = cu.Sampler([0, 2, 47])
    assert sampler.sample(ConjectureData.for_buffer([0, 0])) != 0
Exemple #56
0
def test_uniform_float_shrinks_to_zero():
    d = ConjectureData.for_buffer(bytes(7))
    assert cu.fractional_float(d) == 0.0
    assert len(d.buffer) == 7
Exemple #57
0
def test_fixed_size_bytes_just_draw_bytes():
    from hypothesis.internal.conjecture.data import ConjectureData
    x = ConjectureData.for_buffer(b'foo')
    assert x.draw(binary(min_size=3, max_size=3)) == b'foo'
Exemple #58
0
def perform_health_checks(random, settings, test_runner, search_strategy):
    # Tell pytest to omit the body of this function from tracebacks
    __tracebackhide__ = True
    if not settings.perform_health_check:
        return
    if not Settings.default.perform_health_check:
        return

    health_check_random = Random(random.getrandbits(128))
    # We "pre warm" the health check with one draw to give it some
    # time to calculate any cached data. This prevents the case
    # where the first draw of the health check takes ages because
    # of loading unicode data the first time.
    data = ConjectureData(max_length=settings.buffer_size,
                          draw_bytes=lambda data, n, distribution:
                          distribution(health_check_random, n))
    with Settings(settings, verbosity=Verbosity.quiet):
        try:
            test_runner(
                data,
                reify_and_execute(
                    search_strategy,
                    lambda *args, **kwargs: None,
                ))
        except BaseException:
            pass
    count = 0
    overruns = 0
    filtered_draws = 0
    start = time.time()
    while (count < 10 and time.time() < start + 1 and filtered_draws < 50
           and overruns < 20):
        try:
            data = ConjectureData(max_length=settings.buffer_size,
                                  draw_bytes=lambda data, n, distribution:
                                  distribution(health_check_random, n))
            with Settings(settings, verbosity=Verbosity.quiet):
                test_runner(
                    data,
                    reify_and_execute(
                        search_strategy,
                        lambda *args, **kwargs: None,
                    ))
            count += 1
        except UnsatisfiedAssumption:
            filtered_draws += 1
        except StopTest:
            if data.status == Status.INVALID:
                filtered_draws += 1
            else:
                assert data.status == Status.OVERRUN
                overruns += 1
        except InvalidArgument:
            raise
        except Exception:
            escalate_hypothesis_internal_error()
            if (HealthCheck.exception_in_generation
                    in settings.suppress_health_check):
                raise
            report(traceback.format_exc())
            if test_runner is default_new_style_executor:
                fail_health_check(
                    settings,
                    'An exception occurred during data '
                    'generation in initial health check. '
                    'This indicates a bug in the strategy. '
                    'This could either be a Hypothesis bug or '
                    "an error in a function you've passed to "
                    'it to construct your data.',
                    HealthCheck.exception_in_generation,
                )
            else:
                fail_health_check(
                    settings,
                    'An exception occurred during data '
                    'generation in initial health check. '
                    'This indicates a bug in the strategy. '
                    'This could either be a Hypothesis bug or '
                    'an error in a function you\'ve passed to '
                    'it to construct your data. Additionally, '
                    'you have a custom executor, which means '
                    'that this could be your executor failing '
                    'to handle a function which returns None. ',
                    HealthCheck.exception_in_generation,
                )
    if overruns >= 20 or (not count and overruns > 0):
        fail_health_check(
            settings,
            ('Examples routinely exceeded the max allowable size. '
             '(%d examples overran while generating %d valid ones)'
             '. Generating examples this large will usually lead to'
             ' bad results. You should try setting average_size or '
             'max_size parameters on your collections and turning '
             'max_leaves down on recursive() calls.') % (overruns, count),
            HealthCheck.data_too_large)
    if filtered_draws >= 50 or (not count and filtered_draws > 0):
        fail_health_check(
            settings, ('It looks like your strategy is filtering out a lot '
                       'of data. Health check found %d filtered examples but '
                       'only %d good ones. This will make your tests much '
                       'slower, and also will probably distort the data '
                       'generation quite a lot. You should adapt your '
                       'strategy to filter less. This can also be caused by '
                       'a low max_leaves parameter in recursive() calls') %
            (filtered_draws, count), HealthCheck.filter_too_much)
    runtime = time.time() - start
    if runtime > 1.0 or count < 10:
        fail_health_check(
            settings,
            ('Data generation is extremely slow: Only produced '
             '%d valid examples in %.2f seconds (%d invalid ones '
             'and %d exceeded maximum size). Try decreasing '
             "size of the data you're generating (with e.g."
             'average_size or max_leaves parameters).') %
            (count, runtime, filtered_draws, overruns),
            HealthCheck.too_slow,
        )
Exemple #59
0
 def new_conjecture_data(self, draw_bytes):
     return ConjectureData(
         draw_bytes=draw_bytes,
         max_length=self.settings.buffer_size,
         observer=self.tree.new_observer(),
     )
Exemple #60
0
def test_integer_range_center_default():
    assert (cu.integer_range(ConjectureData.for_buffer([0]),
                             lower=0,
                             upper=10,
                             center=None) == 0)