def set_up_hypothesis() -> None: default_settings = settings( # Turn off the health checks because setUp/tearDown are too slow suppress_health_check=[HealthCheck.too_slow], # Turn off the example database; we don't have a way to persist this # or share this across runs, so we don't derive any benefit from it at # this time. database=None, ) # Configure Hypothesis to run faster when iterating locally settings.register_profile( "dev", settings(default_settings, max_examples=5, timeout=0) ) # ... and use the defaults (which have more combinations) when running # on CI, which we want to be more deterministic. settings.register_profile( "ci", settings(default_settings, derandomize=True, timeout=120) ) # Use the dev profile by default, but use the ci profile on sandcastle. settings.load_profile( "ci" if is_sandcastle() else os.getenv("HYPOTHESIS_PROFILE", "dev") ) # We need to set a global (but non-conflicting) path to store some state # during hypothesis example runs. We want to avoid putting this state in # the repo. set_hypothesis_home_dir(tempfile.mkdtemp(prefix="eden_hypothesis.")) atexit.register(cleanup_tmp_dir, pathlib.Path(hypothesis_home_dir()))
def test_garbage_collects_the_database(): key = b'hi there' n = 200 db = ExampleDatabase(':memory:') assert list(db.fetch(key)) == [] seen = set() go = True def f(data): x = hbytes(data.draw_bytes(512)) if not go: return if sum(x) >= 5000 and len(seen) < n: seen.add(x) if x in seen: data.mark_interesting() runner = TestRunner( f, settings=settings(database=db, max_shrinks=2 * n), database_key=key) runner.run() assert runner.last_data.status == Status.INTERESTING assert len(seen) == n assert set(db.fetch(key)) == seen go = False runner = TestRunner( f, settings=settings(database=db, max_shrinks=2 * n), database_key=key) runner.run() assert 0 < len(set(db.fetch(key))) < n
def run(): filterwarnings('error') filterwarnings('ignore', category=ImportWarning) filterwarnings('ignore', category=FutureWarning, module='pandas._version') # Fixed in recent versions but allowed by pytest=3.0.0; see #1630 filterwarnings('ignore', category=DeprecationWarning, module='pluggy') # See https://github.com/numpy/numpy/pull/432 filterwarnings('ignore', message='numpy.dtype size changed') filterwarnings('ignore', message='numpy.ufunc size changed') # Imported by Pandas in version 1.9, but fixed in later versions. filterwarnings( 'ignore', message='Importing from numpy.testing.decorators is deprecated' ) filterwarnings( 'ignore', message='Importing from numpy.testing.nosetester is deprecated' ) new_home = mkdtemp() set_hypothesis_home_dir(new_home) assert settings.default.database.path.startswith(new_home) charmap() assert os.path.exists(charmap_file()), charmap_file() assert isinstance(settings, type) # We do a smoke test here before we mess around with settings. x = settings() import hypothesis._settings as settings_module for s in settings_module.all_settings.values(): v = getattr(x, s.name) # Check if it has a dynamically defined default and if so skip # comparison. if getattr(settings, s.name).show_default: assert v == s.default, '%r == x.%s != s.%s == %r' % ( v, s.name, s.name, s.default, ) settings.register_profile('default', settings( max_examples=10 if IN_COVERAGE_TESTS else not_set, timeout=unlimited, )) settings.register_profile( 'speedy', settings( max_examples=5, )) settings.register_profile('debug', settings(verbosity=Verbosity.debug)) settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default'))
def test_can_generate_both_zeros_when_in_interval(l, r): interval = st.floats(l, r) find( interval, lambda x: assume(x == 0) and math.copysign(1, x) == 1, settings=settings(max_iterations=20000)) find( interval, lambda x: assume(x == 0) and math.copysign(1, x) == -1, settings=settings(max_iterations=20000))
def example(self, random=None): """Provide an example of the sort of value that this strategy generates. This is biased to be slightly simpler than is typical for values from this strategy, for clarity purposes. This method shouldn't be taken too seriously. It's here for interactive exploration of the API, not for any sort of real testing. This method is part of the public API. """ from hypothesis import find, settings try: return find( self, lambda x: True, random=random, settings=settings( max_shrinks=0, max_iterations=1000, database=None ) ) except (NoSuchExample, Unsatisfiable): raise NoExamples( u'Could not find any valid examples in 100 tries' )
def test_database_clears_secondary_key(): key = b"key" database = InMemoryExampleDatabase() def f(data): if data.draw_bits(8) == 10: data.mark_interesting() else: data.mark_invalid() runner = ConjectureRunner( f, settings=settings( max_examples=1, buffer_size=1024, database=database, suppress_health_check=HealthCheck.all(), ), database_key=key, ) for i in range(10): database.save(runner.secondary_key, hbytes([i])) runner.cached_test_function([10]) assert runner.interesting_examples assert len(set(database.fetch(key))) == 1 assert len(set(database.fetch(runner.secondary_key))) == 10 runner.clear_secondary_key() assert len(set(database.fetch(key))) == 1 assert len(set(database.fetch(runner.secondary_key))) == 0
def test_exhaustive_enumeration(prefix, bits, seed): seen = set() def f(data): if prefix: data.write(hbytes(prefix)) assert len(data.buffer) == len(prefix) k = data.draw_bits(bits) assert k not in seen seen.add(k) size = 2 ** bits seen_prefixes = set() runner = ConjectureRunner( f, settings=settings(database=None, max_examples=size), random=Random(seed), ) with pytest.raises(RunIsComplete): runner.cached_test_function(b'') for _ in hrange(size): p = runner.generate_novel_prefix() assert p not in seen_prefixes seen_prefixes.add(p) data = ConjectureData.for_buffer( hbytes(p + hbytes(2 + len(prefix)))) runner.test_function(data) assert data.status == Status.VALID node = 0 for b in data.buffer: node = runner.tree[node][b] assert node in runner.dead assert len(seen) == size
def test_can_simplify_hard_recursive_data_into_boolean_alternative(rnd): """This test forces us to exercise the simplification through redrawing functionality, thus testing that we can deal with bad templates.""" def leaves(ls): if isinstance(ls, (bool,) + integer_types): return [ls] else: return sum(map(leaves, ls), []) def hard(base): return recursive( base, lambda x: lists(x, max_size=5), max_leaves=20) r = find( hard(booleans()) | hard(booleans()) | hard(booleans()) | hard(integers()) | hard(booleans()), lambda x: len(leaves(x)) >= 3 and any(isinstance(t, bool) for t in leaves(x)), random=rnd, settings=settings( database=None, max_examples=5000, max_shrinks=1000)) lvs = leaves(r) assert lvs == [False] * 3 assert all(isinstance(v, bool) for v in lvs), repr(lvs)
def test_clears_out_its_database_on_shrinking( initial_attempt, skip_target, monkeypatch ): def generate_new_examples(self): self.test_function( ConjectureData.for_buffer(hbytes([initial_attempt]))) monkeypatch.setattr( ConjectureRunner, 'generate_new_examples', generate_new_examples) key = b'key' db = InMemoryExampleDatabase() def f(data): if data.draw_bits(8) >= 127: data.mark_interesting() runner = ConjectureRunner( f, settings=settings(database=db, max_examples=256), database_key=key, random=Random(0), ) for n in hrange(256): if n != 127 or not skip_target: db.save(runner.secondary_key, hbytes([n])) runner.run() assert len(runner.interesting_examples) == 1 for b in db.fetch(runner.secondary_key): assert b[0] >= 127 assert len(list(db.fetch(runner.database_key))) == 1
def test_can_delete_intervals(monkeypatch): def generate_new_examples(self): self.test_function( ConjectureData.for_buffer(hbytes([255] * 10 + [1, 3]))) monkeypatch.setattr( ConjectureRunner, 'generate_new_examples', generate_new_examples) monkeypatch.setattr( Shrinker, 'shrink', fixate(Shrinker.adaptive_example_deletion) ) def f(data): while True: n = data.draw_bits(8) if n == 255: continue elif n == 1: break else: data.mark_invalid() if data.draw_bits(8) == 3: data.mark_interesting() runner = ConjectureRunner(f, settings=settings(database=None)) runner.run() x, = runner.interesting_examples.values() assert x.buffer == hbytes([1, 3])
def test_run_nothing(): def f(data): assert False runner = ConjectureRunner(f, settings=settings(phases=())) runner.run() assert runner.call_count == 0
def test_zeroes_bytes_above_bound(): def f(data): if data.draw_bits(1): x = data.draw_bytes(9) assert not any(x[4:8]) ConjectureRunner(f, settings=settings(buffer_size=10)).run()
def test_terminates_shrinks(n, monkeypatch): from hypothesis.internal.conjecture import engine db = InMemoryExampleDatabase() def generate_new_examples(self): def draw_bytes(data, n): return hbytes([255] * n) self.test_function(self.new_conjecture_data(draw_bytes)) monkeypatch.setattr( ConjectureRunner, "generate_new_examples", generate_new_examples ) monkeypatch.setattr(engine, "MAX_SHRINKS", n) runner = ConjectureRunner( slow_shrinker(), settings=settings(max_examples=5000, database=db), random=Random(0), database_key=b"key", ) runner.run() last_data, = runner.interesting_examples.values() assert last_data.status == Status.INTERESTING assert runner.shrinks == n in_db = set(db.data[runner.secondary_key]) assert len(in_db) == n
def test_database_uses_values_from_secondary_key(): key = b'key' database = InMemoryExampleDatabase() def f(data): if data.draw_bits(8) >= 5: data.mark_interesting() else: data.mark_invalid() runner = ConjectureRunner(f, settings=settings( max_examples=1, buffer_size=1024, database=database, suppress_health_check=HealthCheck.all(), ), database_key=key) for i in range(10): database.save(runner.secondary_key, hbytes([i])) runner.test_function(ConjectureData.for_buffer(hbytes([10]))) assert runner.interesting_examples assert len(set(database.fetch(key))) == 1 assert len(set(database.fetch(runner.secondary_key))) == 10 runner.clear_secondary_key() assert len(set(database.fetch(key))) == 1 assert set( map(int_from_bytes, database.fetch(runner.secondary_key)) ) == set(range(6, 11)) v, = runner.interesting_examples.values() assert list(v.buffer) == [5]
def test_bordering_on_a_leap_year(): x = minimal( datetimes(min_year=2002, max_year=2005), lambda x: x.month == 2 and x.day == 29, settings=settings(database=None, max_examples=10 ** 7) ) assert x.year == 2004
def test_seeds_off_random(): s = settings(max_shrinks=0, database=None) r = random.getstate() x = find(st.integers(), lambda x: True, settings=s) random.setstate(r) y = find(st.integers(), lambda x: True, settings=s) assert x == y
def test_can_collectively_minimize(spec): """This should generally exercise strategies' strictly_simpler heuristic by putting us in a state where example cloning is required to get to the answer fast enough.""" if spec.template_upper_bound < 2: return n = 10 def distinct_reprs(x): result = set() for t in x: result.add(repr(t)) if len(result) >= 2: return True return False try: xs = find( lists(spec, min_size=n, max_size=n), distinct_reprs, settings=settings( timeout=3.0, max_examples=2000)) assert len(xs) == n assert 2 <= len(set((map(repr, xs)))) <= 3 except NoSuchExample: pass
def test_contains_the_test_function_name_in_the_exception_string(): look_for_one = settings( max_examples=1, suppress_health_check=HealthCheck.all()) @given(integers()) @look_for_one def this_has_a_totally_unique_name(x): reject() with raises(Unsatisfiable) as e: this_has_a_totally_unique_name() assert this_has_a_totally_unique_name.__name__ in e.value.args[0] class Foo(object): @given(integers()) @look_for_one def this_has_a_unique_name_and_lives_on_a_class(self, x): reject() with raises(Unsatisfiable) as e: Foo().this_has_a_unique_name_and_lives_on_a_class() assert ( Foo.this_has_a_unique_name_and_lives_on_a_class.__name__ ) in e.value.args[0]
def test_will_shrink_covering_examples(): best = [None] replaced = [] def tagged(data): b = hbytes(data.draw_bytes(4)) if any(b): data.add_tag('nonzero') if best[0] is None: best[0] = b elif b < best[0]: replaced.append(best[0]) best[0] = b db = InMemoryExampleDatabase() runner = ConjectureRunner(tagged, settings=settings( max_examples=100, max_iterations=10000, max_shrinks=0, buffer_size=1024, database=db, ), database_key=b'stuff') runner.run() saved = set(all_values(db)) assert best[0] in saved for r in replaced: assert r not in saved
def test_can_cover_without_a_database_key(): def tagged(data): data.add_tag(0) runner = ConjectureRunner(tagged, settings=settings(), database_key=None) runner.run() assert len(runner.covering_examples) == 1
def test_saves_data_while_shrinking(): key = b'hi there' n = 5 db = ExampleDatabase(':memory:') assert list(db.fetch(key)) == [] seen = set() def f(data): x = data.draw_bytes(512) if sum(x) >= 5000 and len(seen) < n: seen.add(hbytes(x)) if hbytes(x) in seen: data.mark_interesting() runner = ConjectureRunner( f, settings=settings(database=db), database_key=key) runner.run() assert runner.last_data.status == Status.INTERESTING assert len(seen) == n in_db = set( v for vs in db.data.values() for v in vs ) assert in_db.issubset(seen) assert in_db == seen
def test_garbage_collects_the_database(): key = b'hi there' n = 200 db = ExampleDatabase(':memory:') local_settings = settings( database=db, max_shrinks=n, timeout=unlimited) runner = ConjectureRunner( slow_shrinker(), settings=local_settings, database_key=key) runner.run() assert runner.last_data.status == Status.INTERESTING def in_db(): return set( v for vs in db.data.values() for v in vs ) assert len(in_db()) == n + 1 runner = ConjectureRunner( lambda data: data.draw_bytes(4), settings=local_settings, database_key=key) runner.run() assert 0 < len(in_db()) < n
def test_still_tears_down_on_failed_reify(): x = HasSetupAndTeardown() with pytest.raises(AttributeError): with settings(perform_health_check=False): x.fail_in_reify() assert x.setups > 0 assert x.teardowns == x.setups
def test_debug_data(capsys): buf = [0, 1, 2] def f(data): for x in hbytes(buf): if data.draw_bits(8) != x: data.mark_invalid() data.start_example(1) data.stop_example() data.mark_interesting() runner = ConjectureRunner( f, settings=settings( max_examples=5000, buffer_size=1024, database=None, suppress_health_check=HealthCheck.all(), verbosity=Verbosity.debug, ), ) runner.cached_test_function(buf) runner.run() out, _ = capsys.readouterr() assert re.match(u"\\d+ bytes \\[.*\\] -> ", out) assert "INTERESTING" in out
def test_can_handle_unicode_repr(): def foo(x): pass from hypothesis import settings with settings(strict=False): assert arg_string(foo, [Snowman()], {}) == 'x=☃' assert arg_string(foo, [], {'x': Snowman()}) == 'x=☃'
def test_exhaustive_enumeration_of_partial_buffer(): seen = set() def f(data): k = data.draw_bytes(2) assert k[1] == 0 assert k not in seen seen.add(k) seen_prefixes = set() runner = ConjectureRunner( f, settings=settings(database=None, max_examples=256, buffer_size=2), random=Random(0), ) with pytest.raises(RunIsComplete): runner.cached_test_function(b'') for _ in hrange(256): p = runner.generate_novel_prefix() assert p not in seen_prefixes seen_prefixes.add(p) data = ConjectureData.for_buffer(hbytes(p + hbytes(2))) runner.test_function(data) assert data.status == Status.VALID node = 0 for b in data.buffer: node = runner.tree[node][b] assert node in runner.dead assert len(seen) == 256
def test_saves_data_while_shrinking(monkeypatch): key = b'hi there' n = 5 db = InMemoryExampleDatabase() assert list(db.fetch(key)) == [] seen = set() monkeypatch.setattr( ConjectureRunner, 'generate_new_examples', lambda runner: runner.test_function( ConjectureData.for_buffer([255] * 10))) def f(data): x = data.draw_bytes(10) if sum(x) >= 2000 and len(seen) < n: seen.add(hbytes(x)) if hbytes(x) in seen: data.mark_interesting() runner = ConjectureRunner( f, settings=settings(database=db), database_key=key) runner.run() assert runner.interesting_examples assert len(seen) == n in_db = non_covering_examples(db) assert in_db.issubset(seen) assert in_db == seen
def test_can_simplify_text_through_a_morpher(rnd): m = find( morphers, lambda x: bool(x.become(s.text())), random=rnd, settings=settings(database=None) ) with BuildContext(): assert m.become(s.text()) == u'0'
def test_can_use_recursive_data_in_sets(rnd): nested_sets = st.recursive( st.booleans(), lambda js: st.frozensets(js, average_size=2.0), max_leaves=10 ) nested_sets.example(rnd) def flatten(x): if isinstance(x, bool): return frozenset((x,)) else: result = frozenset() for t in x: result |= flatten(t) if len(result) == 2: break return result assert rnd is not None x = find( nested_sets, lambda x: len(flatten(x)) == 2, random=rnd, settings=settings(database=None, max_shrinks=1000, max_examples=1000)) assert x in ( frozenset((False, True)), frozenset((False, frozenset((True,)))), frozenset((frozenset((False, True)),)) )
def executor_test_settings(func): if hu.is_sandcastle() or hu.is_travis(): return settings( max_examples=CI_MAX_EXAMPLES, timeout=CI_TIMEOUT )(func) else: return func
return True elif os.getenv('TW_JOB_USER') == 'sandcastle': return True return False def is_travis(): return 'TRAVIS' in os.environ hypothesis.settings.register_profile( "sandcastle", hypothesis.settings( derandomize=True, suppress_health_check=[hypothesis.HealthCheck.too_slow], database=None, min_satisfying_examples=1, max_examples=100, verbosity=hypothesis.Verbosity.verbose)) hypothesis.settings.register_profile( "dev", hypothesis.settings( suppress_health_check=[hypothesis.HealthCheck.too_slow], database=None, max_examples=10, min_satisfying_examples=1, verbosity=hypothesis.Verbosity.verbose)) hypothesis.settings.register_profile( "debug", hypothesis.settings(
def reduce_with_hypothesis_base(experiment, name, suppress_intervals): experiment = EXPERIMENTS[experiment] base_predicate = experiment.calculate_error_predicate(info(experiment, name)) generated = generate(experiment, name) classified = base_predicate(generated) assert classified == Classification.INTERESTING, (classified, base_predicate) results, predicate = tracking_predicate(base_predicate) generation_stats = { c: ExampleStatistics() for c in Status } input_to_outputs = [] def test_function(data): if suppress_intervals: start = data.start_example stop = data.stop_example def start_example(label): if data.depth >= 0: data.depth += 1 if data.depth > data.max_depth: data.max_depth = data.depth else: start(label) def stop_example(discard=False): if data.depth >= 1: data.depth -= 1 else: stop(discard) data.start_example = start_example data.stop_example = stop_example generation_start = time.monotonic() try: try: source = data.draw(experiment.generator) except UnsatisfiedAssumption: data.mark_invalid() finally: generation_time = time.monotonic() - generation_start result = predicate(source) input_to_outputs.append((encode_failure(data.buffer).decode('ascii'), source, result.name)) if trace_memory_usage: display_top(tracemalloc.take_snapshot()) if result == Classification.INTERESTING: data.mark_interesting() elif result in (Classification.INVALIDCHEAP, Classification.INVALIDEXPENSIVE): data.mark_invalid() finally: generation_stats[data.status].record(size=len(data.buffer), runtime=generation_time) buffer = raw_buffer(experiment, name) runner = eng.ConjectureRunner( test_function, settings=settings( database=None, max_examples=1, suppress_health_check=HealthCheck.all(), deadline=None, verbosity=Verbosity.debug, buffer_size=BUFFER_SIZE, ), random=Random(int.from_bytes(hashlib.sha1(buffer).digest(), "big")), ) def debug_data(data): runner.debug( f"DATA {hashlib.sha1(data.buffer).hexdigest()[:8]}: {len(data.buffer)} bytes, {data.status.name}" ) runner.debug_data = debug_data runner.cached_test_function(buffer) assert runner.interesting_examples results.start() runner.shrink_interesting_examples() results.finish() v, = runner.interesting_examples.values() return { "final": { "buffer": base64.b64encode(v.buffer).decode("ascii"), "generated": buffer_to_value(experiment, v.buffer), }, "reductionstats": attr.asdict(results), "input_to_outputs": input_to_outputs, "generationstats": {k.name: attr.asdict(v) for k, v in generation_stats.items()}, }
@main.command() def show_stats(): data = os.path.join(ROOT, "data") mkdirp(data) with open(os.path.join(data, "reduction-stats.jsons"), "w") as o: for e in EXPERIMENTS: for s in calculate_stats(e): t = json.dumps(s) print(t) print(t, file=o) @main.command() @click.argument('experiment') @click.argument('source') @click.option('--buffer-size', default=settings().buffer_size) @click.option('--count', default=200) def import_corpus(experiment, source, buffer_size, count): random.seed(0) targets = os.listdir(source) targets.sort() random.shuffle(targets) try: os.unlink(os.path.join(CORPORA, experiment + '.tar')) except FileNotFoundError: pass shutil.rmtree(os.path.join(CORPORA, experiment), ignore_errors=True) mkdirp(os.path.join(CORPORA, experiment))
def test_can_find_with_db_eq_none(): find(s.integers(), bool, settings=settings(database=None, max_examples=100))
def pytest_configure(config): # HealthCheck.too_slow causes more trouble than good -- especially in CIs. settings.register_profile( "patience", settings(suppress_health_check=[HealthCheck.too_slow])) settings.load_profile("patience")
def empty_big(self): self.big = 0 @rule() def pour_small_into_big(self): old_big = self.big self.big = min(5, self.big + self.small) self.small = self.small - (self.big - old_big) @rule() def pour_big_into_small(self): old_small = self.small self.small = min(3, self.small + self.big) self.big = self.big - (self.small - old_small) @invariant() def physics_of_jugs(self): assert 0 <= self.small <= 3 assert 0 <= self.big <= 5 @invariant() def die_hard_problem_not_solved(self): note("> small: {s} big: {b}".format(s=self.small, b=self.big)) assert self.big != 4 # The default of 200 is sometimes not enough for Hypothesis to find # a falsifying example. with settings(max_examples=2000): DieHardTest = DieHardProblem.TestCase
def test_step_will_be_negative(size): find_any(st.slices(size), lambda x: (x.step or 1) < 0, settings(max_examples=10**6))
class Matrices(SearchStrategy): def __init__(self, elements, size): super().__init__() self.__length = st.integers(0, ceil(size**0.5)) self.__elements = elements def do_draw(self, data): n = data.draw(self.__length) m = data.draw(self.__length) return [data.draw(self.__elements) for _ in range(n * m)] LOTS = 10**6 TRIAL_SETTINGS = settings(max_examples=LOTS, database=None) @pytest.mark.parametrize( "seed", [2282791295271755424, 1284235381287210546, 14202812238092722246, 26097] ) @pytest.mark.parametrize("size", [5, 10, 20]) @pytest.mark.parametrize("p", [0.01, 0.1]) @pytest.mark.parametrize("strategy_class", [LinearLists, Matrices]) def test_minimal_poisoned_containers(seed, size, p, strategy_class, monkeypatch): elements = Poisoned(p) strategy = strategy_class(elements, size) def test_function(data): v = data.draw(strategy) data.output = repr(v)
def test_stop_will_equal_size(size): find_any(st.slices(size), lambda x: x.stop == size, settings(max_examples=10**6))
RevisionType, TimestampWithTimezone, ) from swh.storage.algos.revisions_walker import get_revisions_walker from swh.storage.algos.snapshot import snapshot_get_latest from swh.web.common.utils import browsers_supported_image_mimes from swh.web.tests.data import get_tests_data # Module dedicated to the generation of input data for tests through # the use of hypothesis. # Some of these data are sampled from a test archive created and populated # in the swh.web.tests.data module. # Set the swh-web hypothesis profile if none has been explicitly set hypothesis_default_settings = settings.get_profile("default") if repr(settings()) == repr(hypothesis_default_settings): settings.load_profile("swh-web") # The following strategies exploit the hypothesis capabilities def _filter_checksum(cs): generated_checksums = get_tests_data()["generated_checksums"] if not int.from_bytes(cs, byteorder="little") or cs in generated_checksums: return False generated_checksums.add(cs) return True def _known_swh_object(object_type): return sampled_from(get_tests_data()[object_type])
@rule(target=objects, strat=strategies, data=data()) def get_example(self, strat, data): data.draw(strat) @rule(target=strategies, left=integers(), right=integers()) def integer_range(self, left, right): left, right = sorted((left, right)) return integers(left, right) @rule(strat=strategies) def repr_is_good(self, strat): assert u' at 0x' not in repr(strat) MAIN = __name__ == u'__main__' TestHypothesis = HypothesisSpec.TestCase TestHypothesis.settings = settings( TestHypothesis.settings, stateful_step_count=10 if PYPY else 50, max_shrinks=500, timeout=unlimited, min_satisfying_examples=0, verbosity=max(TestHypothesis.settings.verbosity, Verbosity.verbose), max_examples=10000 if MAIN else 200, ) if MAIN: TestHypothesis().runTest()
from helpers.messagemock import message_mock, message_bridge from helpers.fixtures import * # noqa: F403 from glimpsebrowser.utils import qtutils, standarddir, usertypes, utils, version from glimpsebrowser.misc import objects from glimpsebrowser.qt import sip import glimpsebrowser.app # To register commands ON_CI = 'CI' in os.environ _glimpse_scheme_handler = None # Set hypothesis settings hypothesis.settings.register_profile('default', hypothesis.settings(deadline=600)) hypothesis.settings.register_profile('ci', hypothesis.settings(deadline=None)) hypothesis.settings.load_profile('ci' if ON_CI else 'default') def _apply_platform_markers(config, item): """Apply a skip marker to a given item.""" markers = [ ('posix', not utils.is_posix, "Requires a POSIX os"), ('windows', not utils.is_windows, "Requires Windows"), ('linux', not utils.is_linux, "Requires Linux"), ('mac', not utils.is_mac, "Requires macOS"), ('not_mac', utils.is_mac, "Skipped on macOS"), ('not_frozen', getattr(sys, 'frozen', False), "Can't be run when frozen"),
def run(): filterwarnings("error") filterwarnings("ignore", category=ImportWarning) filterwarnings("ignore", category=FutureWarning, module="pandas._version") # Fixed in recent versions but allowed by pytest=3.0.0; see #1630 filterwarnings("ignore", category=DeprecationWarning, module="pluggy") # See https://github.com/numpy/numpy/pull/432 filterwarnings("ignore", message="numpy.dtype size changed") filterwarnings("ignore", message="numpy.ufunc size changed") # See https://github.com/HypothesisWorks/hypothesis/issues/1674 filterwarnings( "ignore", message=("The virtualenv distutils package at .+ appears to be in the " "same location as the system distutils?"), category=UserWarning, ) # Imported by Pandas in version 1.9, but fixed in later versions. filterwarnings( "ignore", message="Importing from numpy.testing.decorators is deprecated") filterwarnings( "ignore", message="Importing from numpy.testing.nosetester is deprecated") # User-facing warning which does not apply to our own tests filterwarnings("ignore", category=NonInteractiveExampleWarning) new_home = mkdtemp() set_hypothesis_home_dir(new_home) assert settings.default.database.path.startswith(new_home) charmap() assert os.path.exists(charmap_file()), charmap_file() assert isinstance(settings, type) # We do a smoke test here before we mess around with settings. x = settings() from hypothesis import _settings as settings_module for s in settings_module.all_settings.values(): v = getattr(x, s.name) # Check if it has a dynamically defined default and if so skip # comparison. if getattr(settings, s.name).show_default: assert v == s.default, "%r == x.%s != s.%s == %r" % ( v, s.name, s.name, s.default, ) settings.register_profile( "default", settings(max_examples=20 if IN_COVERAGE_TESTS else not_set)) settings.register_profile("speedy", settings(max_examples=5)) settings.register_profile("debug", settings(verbosity=Verbosity.debug)) settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "default"))
@flaky(min_passes=1, max_runs=2) def test_can_time_out_in_simplify(): def slow_always_true(x): time.sleep(0.1) return True start = time.time() find(s.lists(s.booleans()), slow_always_true, settings=settings(timeout=0.1, database=None)) finish = time.time() run_time = finish - start assert run_time <= 0.4 some_normal_settings = settings() def test_is_not_normally_default(): assert settings.default is not some_normal_settings @given(s.booleans()) @some_normal_settings def test_settings_are_default_in_given(x): assert settings.default is some_normal_settings def test_given_shrinks_pytest_helper_errors(): final_value = [None]
def test_any_with_dotall_generate_newline_binary(pattern): find_any(st.from_regex(pattern), lambda s: s == b"\n", settings(max_examples=10**6))
def test_fullmatch_generates_example(pattern, matching_str): find_any( st.from_regex(pattern, fullmatch=True), lambda s: s == matching_str, settings(max_examples=10**6), )
def test_bordering_on_a_leap_year(): x = find(datetimes(min_year=2002, max_year=2005), lambda x: x.month == 2 and x.day == 29, settings=settings(database=None, max_examples=10**7, timeout=-1)) assert x.year == 2004
def test_can_generate(pattern, encode): if encode: pattern = pattern.encode("ascii") with local_settings( settings(suppress_health_check=[HealthCheck.data_too_large])): assert_all_examples(st.from_regex(pattern), re.compile(pattern).search)
def load_pyspark_profile(): if settings()._current_profile != 'pyspark': # Logger("sparkle-hypothesis").warning("Switching hypothesis profile to 'pyspark'") settings.load_profile("pyspark")
def load_default_profile(): if settings()._current_profile != 'default': # Logger("sparkle-hypothesis").warning("Switching hypothesis profile to 'default'") settings.load_profile('default')
def no_deadline(fn): try: return hypothesis.settings(deadline=None)(fn) except hypothesis.errors.InvalidArgument: return fn
def add_value(self, v): writer.writerow(['add_value']) return v @rule(k=keys, v=values) def save(self, k, v): writer.writerow(['save']) self.model[k].add(v) self.database.save(k, v) @rule(k=keys, v=values) def delete(self, k, v): writer.writerow(['delete']) self.model[k].discard(v) # outcomment this to see it working self.database.delete(k, v) @rule(k=keys) def values_agree(self, k): assert set(self.database.fetch(k)) == self.model[k] def teardown(self): writer.writerow(['teardown']) shutil.rmtree(self.tempd) # Adjust any settings here, default max is 100 examples, 50 is default step count DatabaseComparison.TestCase.settings = settings( max_examples=200, stateful_step_count=100, suppress_health_check=[HealthCheck.data_too_large]) TestDBComparison = DatabaseComparison.TestCase
# consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at http://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import division, print_function, absolute_import import os import hypothesis.strategies as st from hypothesis import find, settings, given settings.register_profile('benchmarking', settings(database=None, )) import pytest import random def setup_module(): settings.load_profile('benchmarking') def teardown_module(): settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default')) @st.composite def sorted_three(draw):
def example(self, random=None): """Provide an example of the sort of value that this strategy generates. This is biased to be slightly simpler than is typical for values from this strategy, for clarity purposes. This method shouldn't be taken too seriously. It's here for interactive exploration of the API, not for any sort of real testing. This method is part of the public API. """ context = _current_build_context.value if context is not None: if context.data is not None and context.data.depth > 0: note_deprecation( 'Using example() inside a strategy definition is a bad ' 'idea. It will become an error in a future version of ' "Hypothesis, but it's unlikely that it's doing what you " 'intend even now. Instead consider using ' 'hypothesis.strategies.builds() or ' '@hypothesis.strategies.composite to define your strategy.' ' See ' 'https://hypothesis.readthedocs.io/en/latest/data.html' '#hypothesis.strategies.builds or ' 'https://hypothesis.readthedocs.io/en/latest/data.html' '#composite-strategies for more details.') else: note_deprecation( 'Using example() inside a test function is a bad ' 'idea. It will become an error in a future version of ' "Hypothesis, but it's unlikely that it's doing what you " 'intend even now. Instead consider using ' 'hypothesis.strategies.data() to draw ' 'more examples during testing. See ' 'https://hypothesis.readthedocs.io/en/latest/data.html' '#drawing-interactively-in-tests for more details.') from hypothesis import find, settings, Verbosity # Conjecture will always try the zero example first. This would result # in us producing the same example each time, which is boring, so we # deliberately skip the first example it feeds us. first = [] def condition(x): if first: return True else: first.append(x) return False try: return find(self, condition, random=random, settings=settings( max_shrinks=0, database=None, verbosity=Verbosity.quiet, )) except (NoSuchExample, Unsatisfiable): # This can happen when a strategy has only one example. e.g. # st.just(x). In that case we wanted the first example after all. if first: return first[0] raise NoExamples(u'Could not find any valid examples in 100 tries')
from helpers.messagemock import message_mock from helpers.fixtures import * # noqa: F403 from helpers import utils as testutils from qutebrowser.utils import qtutils, standarddir, usertypes, utils, version from qutebrowser.misc import objects, earlyinit from qutebrowser.qt import sip import qutebrowser.app # To register commands _qute_scheme_handler = None # Set hypothesis settings hypothesis.settings.register_profile( 'default', hypothesis.settings( deadline=600, suppress_health_check=[hypothesis.HealthCheck.function_scoped_fixture], )) hypothesis.settings.register_profile( 'ci', hypothesis.settings(deadline=None, suppress_health_check=[ hypothesis.HealthCheck.function_scoped_fixture, hypothesis.HealthCheck.too_slow, ])) hypothesis.settings.load_profile('ci' if testutils.ON_CI else 'default') def _apply_platform_markers(config, item): """Apply a skip marker to a given item.""" markers = [ ('posix', pytest.mark.skipif, not utils.is_posix,
def normalize( base_name, test_function, *, required_successes=100, allowed_to_update=False, max_dfas=10, ): """Attempt to ensure that this test function successfully normalizes - i.e. whenever it declares a test case to be interesting, we are able to shrink that to the same interesting test case (which logically should be the shortlex minimal interesting test case, though we may not be able to detect if it is). Will run until we have seen ``required_successes`` many interesting test cases in a row normalize to the same value. If ``allowed_to_update`` is True, whenever we fail to normalize we will learn a new DFA-based shrink pass that allows us to make progress. Any learned DFAs will be written back into the learned DFA file at the end of this function. If ``allowed_to_update`` is False, this will raise an error as soon as it encounters a failure to normalize. Additionally, if more than ``max_dfas` DFAs are required to normalize this test function, this function will raise an error - it's essentially designed for small patches that other shrink passes don't cover, and if it's learning too many patches then you need a better shrink pass than this can provide. """ # Need import inside the function to avoid circular imports from hypothesis.internal.conjecture.engine import BUFFER_SIZE, ConjectureRunner runner = ConjectureRunner( test_function, settings=settings(database=None, suppress_health_check=HealthCheck.all()), ignore_limits=True, ) seen = set() dfas_added = 0 found_interesting = False consecutive_successes = 0 failures_to_find_interesting = 0 while consecutive_successes < required_successes: attempt = runner.cached_test_function(b"", extend=BUFFER_SIZE) if attempt.status < Status.INTERESTING: failures_to_find_interesting += 1 assert (found_interesting or failures_to_find_interesting <= 1000 ), "Test function seems to have no interesting test cases" continue found_interesting = True target = attempt.interesting_origin def shrinking_predicate(d): return d.status == Status.INTERESTING and d.interesting_origin == target if target not in seen: seen.add(target) runner.shrink(attempt, shrinking_predicate) continue previous = fully_shrink(runner, runner.interesting_examples[target], shrinking_predicate) current = fully_shrink(runner, attempt, shrinking_predicate) if current.buffer == previous.buffer: consecutive_successes += 1 continue consecutive_successes = 0 if not allowed_to_update: raise FailedToNormalise( f"Shrinker failed to normalize {previous.buffer!r} to " f"{current.buffer!r} and we are not allowed to learn new DFAs." ) if dfas_added >= max_dfas: raise FailedToNormalise( f"Test function is too hard to learn: Added {dfas_added} " "DFAs and still not done.") dfas_added += 1 new_dfa = learn_a_new_dfa(runner, previous.buffer, current.buffer, shrinking_predicate) name = base_name + "-" + hashlib.sha256( repr(new_dfa).encode()).hexdigest()[:10] # If there is a name collision this DFA should already be being # used for shrinking, so we should have already been able to shrink # v further. assert name not in SHRINKING_DFAS SHRINKING_DFAS[name] = new_dfa if dfas_added > 0: # We've learned one or more DFAs in the course of normalising, so now # we update the file to record those for posterity. update_learned_dfas()
import pytest import hypothesis from PyQt5.QtCore import PYQT_VERSION pytest.register_assert_rewrite('helpers') from helpers import logfail from helpers.logfail import fail_on_logging from helpers.messagemock import message_mock from helpers.fixtures import * from qutebrowser.utils import qtutils # Set hypothesis settings hypothesis.settings.register_profile('default', hypothesis.settings(strict=True)) hypothesis.settings.load_profile('default') def _apply_platform_markers(config, item): """Apply a skip marker to a given item.""" markers = [ ('posix', os.name != 'posix', "Requires a POSIX os"), ('windows', os.name != 'nt', "Requires Windows"), ('linux', not sys.platform.startswith('linux'), "Requires Linux"), ('osx', sys.platform != 'darwin', "Requires OS X"), ('not_osx', sys.platform == 'darwin', "Skipped on OS X"), ('not_frozen', getattr(sys, 'frozen', False), "Can't be run when frozen"), ('frozen', not getattr(sys, 'frozen', False), "Can only run when frozen"),
data = runner.cached_test_function(attempt) assert data.status == Status.VALID assert attempt.startswith(data.buffer) def test_exhaust_space(): with deterministic_PRNG(): runner = ConjectureRunner( lambda data: data.draw_bits(1), settings=TEST_SETTINGS ) runner.run() assert runner.tree.is_exhausted assert runner.valid_examples == 2 SMALL_COUNT_SETTINGS = settings(TEST_SETTINGS, max_examples=500) def test_discards_kill_branches(): starts = set() with deterministic_PRNG(): def test(data): assert runner.call_count <= 256 while True: data.start_example(1) b = data.draw_bits(8) data.stop_example(b != 0) if len(data.buffer) == 1: s = bytes(data.buffer)
from helpers import logfail from helpers.logfail import fail_on_logging from helpers.messagemock import message_mock, message_bridge from helpers.fixtures import * # noqa: F403 from helpers import utils as testutils from qutebrowser.utils import qtutils, standarddir, usertypes, utils, version from qutebrowser.misc import objects, earlyinit from qutebrowser.qt import sip import qutebrowser.app # To register commands _qute_scheme_handler = None # Set hypothesis settings hypothesis.settings.register_profile('default', hypothesis.settings(deadline=600)) hypothesis.settings.register_profile('ci', hypothesis.settings(deadline=None)) hypothesis.settings.load_profile('ci' if testutils.ON_CI else 'default') def _apply_platform_markers(config, item): """Apply a skip marker to a given item.""" markers = [ ('posix', pytest.mark.skipif, not utils.is_posix, "Requires a POSIX os"), ('windows', pytest.mark.skipif, not utils.is_windows, "Requires Windows"), ('linux', pytest.mark.skipif, not utils.is_linux, "Requires Linux"), ('mac', pytest.mark.skipif, not utils.is_mac, "Requires macOS"), ('not_mac', pytest.mark.skipif, utils.is_mac, "Skipped on macOS"), ('not_frozen', pytest.mark.skipif, getattr(sys, 'frozen', False),
def test_can_minimize_float_arrays(): x = find( arrays(float, 50), lambda t: t.sum() >= 1.0, settings=settings(database=None)) assert 1.0 <= x.sum() <= 1.1