def test_saves_data_while_shrinking(monkeypatch): key = b'hi there' n = 5 db = InMemoryExampleDatabase() assert list(db.fetch(key)) == [] seen = set() monkeypatch.setattr( ConjectureRunner, 'generate_new_examples', lambda runner: runner.test_function( ConjectureData.for_buffer([255] * 10))) def f(data): x = data.draw_bytes(10) if sum(x) >= 2000 and len(seen) < n: seen.add(hbytes(x)) if hbytes(x) in seen: data.mark_interesting() runner = ConjectureRunner( f, settings=settings(database=db), database_key=key) runner.run() assert runner.interesting_examples assert len(seen) == n in_db = non_covering_examples(db) assert in_db.issubset(seen) assert in_db == seen
def test_does_not_use_database_when_seed_is_forced(monkeypatch): monkeypatch.setattr(core, "global_force_seed", 42) database = InMemoryExampleDatabase() database.fetch = None @settings(database=database) @given(st.integers()) def test(i): pass test()
def test_backend_returns_what_you_put_in(xs): backend = InMemoryExampleDatabase() mapping = {} for key, value in xs: mapping.setdefault(key, set()).add(value) backend.save(key, value) for key, values in mapping.items(): backend_contents = list(backend.fetch(key)) distinct_backend_contents = set(backend_contents) assert len(backend_contents) == len(distinct_backend_contents) assert distinct_backend_contents == set(values)
def test_replays_slipped_examples_once_initial_bug_is_fixed(fix): target = [] bug_fixed = False @settings(database=InMemoryExampleDatabase(), max_examples=500) @given(st.integers()) def test(i): if abs(i) < 1000: return if not target: target.append(i) if i == target[0]: if bug_fixed and fix == TypeError: return raise TypeError() if len(target) == 1: target.append(i) if bug_fixed and fix == ValueError: return if i == target[1]: raise ValueError() with pytest.raises(MultipleFailures): test() bug_fixed = True with pytest.raises(ValueError if fix == TypeError else TypeError): test()
def test_handles_flaky_tests_where_only_one_is_flaky(): flaky_fixed = False target = [] flaky_failed_once = [False] @settings(database=InMemoryExampleDatabase(), max_examples=1000) @given(st.integers()) def test(i): if abs(i) < 1000: return if not target: target.append(i) if i == target[0]: raise TypeError() if flaky_failed_once[0] and not flaky_fixed: return if len(target) == 1: target.append(i) if i == target[1]: flaky_failed_once[0] = True raise ValueError() with pytest.raises(Flaky): test() flaky_fixed = True with pytest.raises(MultipleFailures): test()
def test_will_shrink_covering_examples(): best = [None] replaced = [] def tagged(data): b = hbytes(data.draw_bytes(4)) if any(b): data.add_tag('nonzero') if best[0] is None: best[0] = b elif b < best[0]: replaced.append(best[0]) best[0] = b db = InMemoryExampleDatabase() runner = ConjectureRunner(tagged, settings=settings( max_examples=100, phases=no_shrink, buffer_size=1024, database=db, ), database_key=b'stuff') runner.run() saved = set(all_values(db)) assert best[0] in saved for r in replaced: assert r not in saved
def test_will_save_covering_examples(): tags = {} def tagged(data): b = hbytes(data.draw_bytes(4)) try: tag = tags[b] except KeyError: if len(tags) < 10: tag = len(tags) tags[b] = tag else: tag = None if tag is not None: data.add_tag(tag) db = InMemoryExampleDatabase() runner = ConjectureRunner(tagged, settings=settings( max_examples=100, phases=no_shrink, buffer_size=1024, database=db, ), database_key=b'stuff') runner.run() assert len(all_values(db)) == len(tags)
def test_terminates_shrinks(n, monkeypatch): from hypothesis.internal.conjecture import engine db = InMemoryExampleDatabase() def generate_new_examples(self): def draw_bytes(data, n): return hbytes([255] * n) self.test_function( ConjectureData(draw_bytes=draw_bytes, max_length=self.settings.buffer_size)) monkeypatch.setattr(ConjectureRunner, 'generate_new_examples', generate_new_examples) monkeypatch.setattr(engine, 'MAX_SHRINKS', n) runner = ConjectureRunner(slow_shrinker(), settings=settings( max_examples=5000, database=db, timeout=unlimited, ), random=Random(0), database_key=b'key') runner.run() last_data, = runner.interesting_examples.values() assert last_data.status == Status.INTERESTING assert runner.shrinks == n in_db = set(db.data[runner.secondary_key]) assert len(in_db) == n
def test_does_print_on_reuse_from_database(): passes_healthcheck = False database = InMemoryExampleDatabase() @settings(database=database) @given(st.integers()) def test(i): assume(passes_healthcheck) raise ValueError() with capture_out() as o: with pytest.raises(FailedHealthCheck): test() assert '@seed' in o.getvalue() passes_healthcheck = True with capture_out() as o: with pytest.raises(ValueError): test() assert all_values(database) assert '@seed' not in o.getvalue() passes_healthcheck = False with capture_out() as o: with pytest.raises(FailedHealthCheck): test() assert '@seed' in o.getvalue()
def test_respects_max_examples_in_database_usage(): key = b'a database key' database = InMemoryExampleDatabase() do_we_care = True counter = [0] def check(x): counter[0] += 1 return do_we_care and has_a_non_zero_byte(x) def stuff(): try: find(st.binary(min_size=100), check, settings=settings(database=database, max_examples=10), database_key=key) except NoSuchExample: pass stuff() assert len(all_values(database)) > 10 do_we_care = False counter[0] = 0 stuff() assert counter == [10]
def test_terminates_shrinks(n, monkeypatch): from hypothesis.internal.conjecture import engine db = InMemoryExampleDatabase() def generate_new_examples(self): self.cached_test_function([255] * 1000) monkeypatch.setattr( ConjectureRunner, "generate_new_examples", generate_new_examples ) monkeypatch.setattr(engine, "MAX_SHRINKS", n) runner = ConjectureRunner( slow_shrinker(), settings=settings(max_examples=5000, database=db), random=Random(0), database_key=b"key", ) runner.run() (last_data,) = runner.interesting_examples.values() assert last_data.status == Status.INTERESTING assert runner.shrinks == n in_db = set(db.data[runner.secondary_key]) assert len(in_db) == n
def test_clears_out_database_as_things_get_boring(): key = b'a database key' database = InMemoryExampleDatabase() do_we_care = True def stuff(): try: find(st.binary(min_size=50), lambda x: do_we_care and has_a_non_zero_byte(x), settings=settings(database=database, max_examples=10), database_key=key) except NoSuchExample: pass stuff() assert len(all_values(database)) > 1 do_we_care = False stuff() initial = len(all_values(database)) assert initial > 0 for _ in range(initial): stuff() keys = len(all_values(database)) if not keys: break else: assert False
def test_reuse_phase_runs_for_max_examples_if_generation_is_disabled(): with deterministic_PRNG(): db = InMemoryExampleDatabase() for i in range(256): db.save(b"key", bytes([i])) seen = set() def test(data): seen.add(data.draw_bits(8)) ConjectureRunner( test, settings=settings(max_examples=100, database=db, phases=[Phase.reuse]), database_key=b"key", ).run() assert len(seen) == 100
def test_saves_incremental_steps_in_database(): key = b'a database key' database = InMemoryExampleDatabase() find(st.binary(min_size=10), lambda x: has_a_non_zero_byte(x), settings=settings(database=database), database_key=key) assert len(all_values(database)) > 1
def test_database_clears_secondary_key(): key = b"key" database = InMemoryExampleDatabase() def f(data): if data.draw_bits(8) == 10: data.mark_interesting() else: data.mark_invalid() runner = ConjectureRunner( f, settings=settings( max_examples=1, buffer_size=1024, database=database, suppress_health_check=HealthCheck.all(), ), database_key=key, ) for i in range(10): database.save(runner.secondary_key, hbytes([i])) runner.cached_test_function([10]) assert runner.interesting_examples assert len(set(database.fetch(key))) == 1 assert len(set(database.fetch(runner.secondary_key))) == 10 runner.clear_secondary_key() assert len(set(database.fetch(key))) == 1 assert len(set(database.fetch(runner.secondary_key))) == 0
def test_database_uses_values_from_secondary_key(): key = b'key' database = InMemoryExampleDatabase() def f(data): if data.draw_bits(8) >= 5: data.mark_interesting() else: data.mark_invalid() runner = ConjectureRunner(f, settings=settings( max_examples=1, buffer_size=1024, database=database, suppress_health_check=HealthCheck.all(), ), database_key=key) for i in range(10): database.save(runner.secondary_key, hbytes([i])) runner.test_function(ConjectureData.for_buffer(hbytes([10]))) assert runner.interesting_examples assert len(set(database.fetch(key))) == 1 assert len(set(database.fetch(runner.secondary_key))) == 10 runner.clear_secondary_key() assert len(set(database.fetch(key))) == 1 assert set( map(int_from_bytes, database.fetch(runner.secondary_key)) ) == set(range(6, 11)) v, = runner.interesting_examples.values() assert list(v.buffer) == [5]
def __init__(self): super().__init__() self.tempd = tempfile.mkdtemp() exampledir = os.path.join(self.tempd, "examples") self.dbs = [ DirectoryBasedExampleDatabase(exampledir), InMemoryExampleDatabase(), DirectoryBasedExampleDatabase(exampledir), ]
def __init__(self): super(DatabaseComparison, self).__init__() self.tempd = tempfile.mkdtemp() exampledir = os.path.join(self.tempd, 'examples') self.dbs = [ DirectoryBasedExampleDatabase(exampledir), InMemoryExampleDatabase(), SQLiteExampleDatabase(':memory:'), DirectoryBasedExampleDatabase(exampledir), ]
def test_saves_on_interrupt(): def interrupts(data): raise KeyboardInterrupt() db = InMemoryExampleDatabase() runner = ConjectureRunner( interrupts, settings=settings(database=db), database_key=b'key') with pytest.raises(KeyboardInterrupt): runner.run() assert db.data
def test_runs_full_set_of_examples(): def test(data): data.draw_bits(64) runner = ConjectureRunner( test, settings=settings(TEST_SETTINGS, database=InMemoryExampleDatabase()), database_key=b"stuff", ) runner.run() assert runner.valid_examples == TEST_SETTINGS.max_examples
def test_clears_out_its_database_on_shrinking( initial_attempt, skip_target, monkeypatch ): def generate_new_examples(self): self.test_function( ConjectureData.for_buffer(hbytes([initial_attempt]))) monkeypatch.setattr( ConjectureRunner, 'generate_new_examples', generate_new_examples) key = b'key' db = InMemoryExampleDatabase() def f(data): if data.draw_bits(8) >= 127: data.mark_interesting() runner = ConjectureRunner( f, settings=settings(database=db, max_examples=256), database_key=key, random=Random(0), ) for n in hrange(256): if n != 127 or not skip_target: db.save(runner.secondary_key, hbytes([n])) runner.run() assert len(runner.interesting_examples) == 1 for b in db.fetch(runner.secondary_key): assert b[0] >= 127 assert len(list(db.fetch(runner.database_key))) == 1
def __init__(self): super(DatabaseComparison, self).__init__() self.tempd = tempfile.mkdtemp() exampledir = os.path.join(self.tempd, 'examples') self.dbs = [ DirectoryBasedExampleDatabase(exampledir), InMemoryExampleDatabase(), DirectoryBasedExampleDatabase(exampledir), ] with validate_deprecation(): self.dbs.append(SQLiteExampleDatabase(':memory:'))
def test_clears_out_everything_smaller_than_the_interesting_example(): target = None # We retry the test run a few times to get a large enough initial # set of examples that we're not going to explore them all in the # initial run. last_sum = [None] database = InMemoryExampleDatabase() seen = set() @settings( database=database, verbosity=Verbosity.quiet, max_examples=100, timeout=unlimited, suppress_health_check=[HealthCheck.hung_test], ) @given(st.binary(min_size=10, max_size=10)) def test(b): if target is not None: if len(seen) < 30: seen.add(b) if b in seen: return if b >= target: raise ValueError() return b = hbytes(b) s = sum(b) if ( (last_sum[0] is None and s > 1000) or (last_sum[0] is not None and s >= last_sum[0] - 1) ): last_sum[0] = s raise ValueError() with pytest.raises(ValueError): test() saved = non_covering_examples(database) assert len(saved) > 30 target = sorted(saved)[len(saved) // 2] with pytest.raises(ValueError): test() saved = non_covering_examples(database) assert target in saved or target in seen for s in saved: assert s >= target
def test_smaller_interesting_dominates_larger_valid(): def test(data): if data.draw_bits(8) == 0: data.mark_interesting() runner = ConjectureRunner( test, settings=settings(TEST_SETTINGS, database=InMemoryExampleDatabase()), database_key=b"stuff", ) d1 = runner.cached_test_function([0]).as_result() d2 = runner.cached_test_function([1]).as_result() assert dominance(d1, d2) == DominanceRelation.LEFT_DOMINATES
def test_saves_data_while_shrinking(): key = b'hi there' n = 5 db = InMemoryExampleDatabase() assert list(db.fetch(key)) == [] seen = set() def f(data): x = data.draw_bytes(512) if sum(x) >= 5000 and len(seen) < n: seen.add(hbytes(x)) if hbytes(x) in seen: data.mark_interesting() runner = ConjectureRunner(f, settings=settings(database=db), database_key=key) runner.run() assert runner.interesting_examples assert len(seen) == n in_db = non_covering_examples(db) assert in_db.issubset(seen) assert in_db == seen
def test_includes_right_hand_side_targets_in_dominance(): def test(data): if data.draw_bits(8): data.target_observations[""] = 10 runner = ConjectureRunner( test, settings=settings(TEST_SETTINGS, database=InMemoryExampleDatabase()), database_key=b"stuff", ) d1 = runner.cached_test_function([0]).as_result() d2 = runner.cached_test_function([1]).as_result() assert dominance(d1, d2) == DominanceRelation.NO_DOMINANCE
def test_multiplexed_dbs_read_and_write_all(): a = InMemoryExampleDatabase() b = InMemoryExampleDatabase() multi = MultiplexedDatabase(a, b) a.save(b"a", b"aa") b.save(b"b", b"bb") multi.save(b"c", b"cc") multi.move(b"a", b"b", b"aa") for db in (a, b, multi): assert set(db.fetch(b"a")) == set() assert set(db.fetch(b"c")) == {b"cc"} got = list(multi.fetch(b"b")) assert len(got) == 2 assert set(got) == {b"aa", b"bb"} multi.delete(b"c", b"cc") for db in (a, b, multi): assert set(db.fetch(b"c")) == set()
def test_will_save_when_reuse_not_in_phases(): database = InMemoryExampleDatabase() assert not database.data @settings(database=database, phases=(Phase.generate, )) @given(st.integers()) def test_usage(i): raise ValueError() with pytest.raises(ValueError): test_usage() (saved, ) = [v for k, v in database.data.items() if b"coverage" not in k] assert len(saved) == 1
def test_readonly_db_is_not_writable(): inner = InMemoryExampleDatabase() wrapped = ReadOnlyDatabase(inner) inner.save(b"key", b"value") inner.save(b"key", b"value2") wrapped.delete(b"key", b"value") wrapped.move(b"key", b"key2", b"value2") wrapped.save(b"key", b"value3") assert set(wrapped.fetch(b"key")) == {b"value", b"value2"} assert set(wrapped.fetch(b"key2")) == set()
def test_clears_out_everything_smaller_than_the_interesting_example(): in_clearing = False target = [None] for _ in range(5): # We retry the test run a few times to get a large enough initial # set of examples that we're not going to explore them all in the # initial run. cache = {} seen = set() database = InMemoryExampleDatabase() @settings(database=database, verbosity=Verbosity.quiet, max_examples=100) @given(st.binary(min_size=10, max_size=10)) def test(i): if not in_clearing: if len([b for b in i if b > 1]) >= 8: assert cache.setdefault(i, len(cache) % 10 != 9) elif len(seen) <= 20: seen.add(i) else: if target[0] is None: remainder = sorted([s for s in saved if s not in seen]) target[0] = remainder[len(remainder) // 2] assert i in seen or i < target[0] with pytest.raises(AssertionError): test() saved = all_values(database) if len(saved) > 30: break else: assert False, 'Never generated enough examples while shrinking' in_clearing = True with pytest.raises(AssertionError): test() saved = all_values(database) for s in saved: assert s >= target[0]
def test_uses_tags_in_calculating_pareto_front(): with deterministic_PRNG(): def test(data): if data.draw_bits(1): data.start_example(11) data.draw_bits(8) data.stop_example() runner = ConjectureRunner( test, settings=settings(max_examples=10, database=InMemoryExampleDatabase(),), database_key=b"stuff", ) runner.run() assert len(runner.pareto_front) == 2
def test_replays_both_failing_values(): target = [None] @settings(database=InMemoryExampleDatabase()) @given(st.integers()) def test(i): if abs(i) < 1000: return if target[0] is None: target[0] = i exc_class = TypeError if target[0] == i else ValueError raise exc_class() with pytest.raises(MultipleFailures): test() with pytest.raises(MultipleFailures): test()
def test_fuzz_one_input_does_not_add_redundant_entries_to_database(buffers, db_size): db = InMemoryExampleDatabase() seen = [] @given(st.binary(min_size=2, max_size=2)) @settings(database=db) def test(s): seen.append(s) assert s != b"XX" raise AssertionError for buf in buffers: with pytest.raises(AssertionError): test.hypothesis.fuzz_one_input(buf) (saved_examples,) = db.data.values() assert seen == buffers assert len(saved_examples) == db_size
def test_garbage_collects_the_database(): key = b'hi there' n = 200 db = InMemoryExampleDatabase() local_settings = settings(database=db, max_shrinks=n, timeout=unlimited) runner = ConjectureRunner(slow_shrinker(), settings=local_settings, database_key=key) runner.run() assert runner.interesting_examples assert len(non_covering_examples(db)) == n + 1 runner = ConjectureRunner(lambda data: data.draw_bytes(4), settings=local_settings, database_key=key) runner.run() assert 0 < len(non_covering_examples(db)) < n
def test_will_shrink_covering_examples(): seen = [hbytes([255] * 4)] def tagged(data): seen[0] = min(seen[0], hbytes(data.draw_bytes(4))) data.add_tag(0) db = InMemoryExampleDatabase() runner = ConjectureRunner(tagged, settings=settings( max_examples=100, max_iterations=10000, max_shrinks=0, buffer_size=1024, database=db, ), database_key=b'stuff') runner.run() assert all_values(db) == set(seen)
def test_can_delete_keys(): backend = InMemoryExampleDatabase() backend.save(b"foo", b"bar") backend.save(b"foo", b"baz") backend.delete(b"foo", b"bar") assert list(backend.fetch(b"foo")) == [b"baz"]
def test_can_delete_keys(): backend = InMemoryExampleDatabase() backend.save(b'foo', b'bar') backend.save(b'foo', b'baz') backend.delete(b'foo', b'bar') assert list(backend.fetch(b'foo')) == [b'baz']