def capture_hypothesis_output() -> Generator[List[str], None, None]: """Capture all output of Hypothesis into a list of strings. It allows us to have more granular control over Schemathesis output. Usage:: @given(i=st.integers()) def test(i): assert 0 with capture_hypothesis_output() as output: test() # hypothesis test # output == ["Falsifying example: test(i=0)"] """ output = [] def get_output(value: str) -> None: # Drop messages that could be confusing in the Schemathesis context if value.startswith(IGNORED_PATTERNS): return output.append(value) # the following context manager is untyped with with_reporter(get_output): # type: ignore yield output
def pytest_runtest_call(item): if not hasattr(item, "obj"): yield elif not is_hypothesis_test(item.obj): # If @given was not applied, check whether other hypothesis decorators # were applied, and fail if so if getattr(item.obj, "_hypothesis_internal_settings_applied", False): raise InvalidArgument( "Using `@settings` on a test without `@given` is completely pointless." ) yield else: if item.get_closest_marker("parametrize") is not None: # Give every parametrized test invocation a unique database key item.obj.hypothesis.inner_test._hypothesis_internal_add_digest = item.nodeid.encode( "utf-8") store = StoringReporter(item.config) def note_statistics(stats): lines = [item.nodeid + ":", ""] + stats.get_description() + [""] gathered_statistics[item.nodeid] = lines item.hypothesis_statistics = lines with collector.with_value(note_statistics): with with_reporter(store): yield if store.results: item.hypothesis_report_information = list(store.results)
def pytest_pyfunc_call(pyfuncitem): from hypothesis.reporting import with_reporter store = StoringReporter() with with_reporter(store): yield if store.results: pyfuncitem.hypothesis_falsifying_example = store.results[-1]
def pytest_pyfunc_call(pyfuncitem): from hypothesis.reporting import with_reporter store = StoringReporter(pyfuncitem.config) with with_reporter(store): yield if store.results: pyfuncitem.hypothesis_report_information = list(store.results)
def pytest_pyfunc_call(pyfuncitem): from hypothesis.reporting import with_reporter store = StoringReporter() with with_reporter(store): yield if store.results: pyfuncitem.hypothesis_report_information = list(store.results)
def test_does_not_print_notes_if_all_succeed(): @given(integers()) def test(i): note('Hi there') with capture_out() as out: with reporting.with_reporter(reporting.default): test() assert not out.getvalue()
def capture_out(): old_out = sys.stdout try: new_out = StringIO() sys.stdout = new_out with with_reporter(default): yield new_out finally: sys.stdout = old_out
def test_can_seed_random(): with capture_out() as out: with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): @given(st.random_module()) def test(r): assert False test() assert 'random.seed(0)' in out.getvalue()
def test_prints_output_by_default(): @given(integers()) def test_int(x): assert False with capture_out() as o: with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): test_int() assert u'Falsifying example' in o.getvalue()
def test_can_suppress_output(): @given(integers()) def test_int(x): assert False with capture_out() as o: with reporting.with_reporter(reporting.silent): with pytest.raises(AssertionError): test_int() assert u'Falsifying example' not in o.getvalue()
def test_does_not_print_notes_if_all_succeed(): @given(integers()) @settings(verbosity=Verbosity.normal) def test(i): note("Hi there") with capture_out() as out: with reporting.with_reporter(reporting.default): test() assert not out.getvalue()
def execute_explicit_examples(state, wrapped_test, arguments, kwargs): original_argspec = getfullargspec(state.test) for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())): example_kwargs = dict(original_argspec.kwonlydefaults or {}) if example.args: if len(example.args) > len(original_argspec.args): raise InvalidArgument( "example has too many arguments for test. " "Expected at most %d but got %d" % (len(original_argspec.args), len(example.args)) ) example_kwargs.update( dict(zip(original_argspec.args[-len(example.args) :], example.args)) ) else: example_kwargs.update(example.kwargs) if Phase.explicit not in state.settings.phases: continue example_kwargs.update(kwargs) with local_settings(state.settings): fragments_reported = [] def report_buffered(): for f in fragments_reported: report(f) del fragments_reported[:] try: with with_reporter(fragments_reported.append): state.execute_once( ArtificialDataForExample(example_kwargs), is_final=True, print_example=True, ) except UnsatisfiedAssumption: # Odd though it seems, we deliberately support explicit examples that # are then rejected by a call to `assume()`. As well as iterative # development, this is rather useful to replay Hypothesis' part of # a saved failure when other arguments are supplied by e.g. pytest. # See https://github.com/HypothesisWorks/hypothesis/issues/2125 pass except BaseException: report_buffered() raise if current_verbosity() >= Verbosity.verbose: prefix = "Falsifying example" assert fragments_reported[0].startswith(prefix) fragments_reported[0] = ( "Trying example" + fragments_reported[0][len(prefix) :] ) report_buffered()
def test_can_seed_random(): with capture_out() as out: with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): @given(st.random_module()) def test(r): raise AssertionError test() assert "RandomSeeder(0)" in out.getvalue()
def test_error_in_strategy_produces_health_check_error(): def boom(x): raise ValueError() @given(st.integers().map(boom)) def test(x): pass with raises(FailedHealthCheck) as e: with reporting.with_reporter(reporting.default): test() assert 'executor' not in e.value.args[0]
def test_prints_notes_once_on_failure(): @given(lists(integers()), settings=Settings(database=None)) def test(xs): note('Hi there') assert sum(xs) > 100 with capture_out() as out: with reporting.with_reporter(reporting.default): with raises(AssertionError): test() lines = out.getvalue().strip().splitlines() assert len(lines) == 2 assert 'Hi there' in lines
def test_examples_are_tried_in_order(): @example(x=1) @example(x=2) @given(integers(), settings=Settings(max_examples=0)) @example(x=3) def test(x): print(u"x -> %d" % (x,)) with capture_out() as out: with reporting.with_reporter(reporting.default): test() ls = out.getvalue().splitlines() assert ls == [u"x -> 1", 'x -> 2', 'x -> 3']
def test_captures_output_from_child(): class TestForking(ForkingTestCase): @given(integers()) def test_positive(self, x): assert x > 0 with reporting.with_reporter(reporting.default): with capture_out() as out: with pytest.raises(AssertionError): TestForking(u"test_positive").test_positive() out = out.getvalue() assert u"Falsifying example: test_positive" in out
def test_captures_output_from_child_under_abnormal_exit(): class TestForking(ForkingTestCase): @given(integers()) def test_death(self, x): os._exit(1) with reporting.with_reporter(reporting.default): with capture_out() as out: with pytest.raises(AbnormalExit): TestForking(u"test_death").test_death() out = out.getvalue() assert u"Falsifying example: test_death" in out
def test_does_not_print_on_explicit_examples_if_no_failure(): @example(1) @given(integers()) def test_positive(x): assert x > 0 with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): with capture_out() as out: test_positive() out = out.getvalue() assert "Falsifying example: test_positive(1)" not in out
def test_captures_output_from_child_under_abnormal_exit(): class TestForking(ForkingTestCase): @given(integers()) def test_death(self, x): os._exit(1) with reporting.with_reporter(reporting.default): with capture_out() as out: with pytest.raises(AbnormalExit): TestForking('test_death').test_death() out = out.getvalue() assert 'Falsifying example: test_death' in out
def test_does_not_report_zero_seed(): random.seed(0) zero_value = random.randint(0, 3) with capture_out() as out: with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): @given(st.integers()) def test(r): assert random.randint(0, 3) != zero_value test() assert 'random.seed' not in out.getvalue()
def test_prints_verbose_output_for_explicit_examples(): @settings(verbosity=Verbosity.verbose) @example("NOT AN INTEGER") @given(integers()) def test_always_passes(x): pass with reporting.with_reporter(reporting.default): with capture_out() as out: test_always_passes() out = out.getvalue() assert u"Trying example: test_always_passes(x='NOT AN INTEGER')" in out
def test_prints_output_for_explicit_examples(): @example(-1) @given(integers()) def test_positive(x): assert x > 0 with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): with capture_out() as out: test_positive() out = out.getvalue() assert u'Falsifying example: test_positive(x=-1)' in out
def test_does_not_print_on_explicit_examples_if_no_failure(): @example(1) @given(integers()) def test_positive(x): assert x > 0 with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): with capture_out() as out: test_positive() out = out.getvalue() assert u'Falsifying example: test_positive(1)' not in out
def test_captures_output_from_child(): class TestForking(ForkingTestCase): @given(integers()) def test_positive(self, x): assert x > 0 with reporting.with_reporter(reporting.default): with capture_out() as out: with pytest.raises(AssertionError): TestForking('test_positive').test_positive() out = out.getvalue() assert 'Falsifying example: test_positive' in out
def test_prints_on_failure_by_default(): @given(integers(), integers(), settings=Settings(max_examples=200, timeout=-1)) def test_ints_are_sorted(balthazar, evans): assume(evans >= 0) assert balthazar <= evans with raises(AssertionError): with capture_out() as out: with reporting.with_reporter(reporting.default): test_ints_are_sorted() out = out.getvalue() lines = [l.strip() for l in out.split(u"\n")] assert u"Falsifying example: test_ints_are_sorted(balthazar=1, evans=0)" in lines
def test_prints_notes_once_on_failure(): @given(lists(integers())) @settings(database=None, verbosity=Verbosity.normal) def test(xs): note('Hi there') if sum(xs) <= 100: raise ValueError() with capture_out() as out: with reporting.with_reporter(reporting.default): with raises(ValueError): test() lines = out.getvalue().strip().splitlines() assert lines.count('Hi there') == 1
def test_examples_are_tried_in_order(): @example(x=1) @example(x=2) @given(integers()) @settings(max_examples=0) @example(x=3) def test(x): print(u"x -> %d" % (x,)) with capture_out() as out: with reporting.with_reporter(reporting.default): test() ls = out.getvalue().splitlines() assert ls == [u"x -> 1", 'x -> 2', 'x -> 3']
def test_captures_original_repr_of_example(): @example(x=[]) @given(integers()) def test_mutation(x): x.append(1) assert not x with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): with capture_out() as out: test_mutation() out = out.getvalue() assert u'Falsifying example: test_mutation(x=[])' in out
def test_suppressing_error_in_value_generation(): def boom(x): raise ValueError() @settings(suppress_health_check=[HealthCheck.exception_in_generation]) @given(st.integers().map(boom)) def test(x): pass with capture_out() as out: with reporting.with_reporter(reporting.default): with raises(ValueError): test() assert 'ValueError' not in out.getvalue()
def pytest_runtest_call(item): if not (hasattr(item, 'obj') and is_hypothesis_test(item.obj)): yield else: store = StoringReporter(item.config) def note_statistics(stats): gathered_statistics[item.nodeid] = stats with collector.with_value(note_statistics): with with_reporter(store): yield if store.results: item.hypothesis_report_information = list(store.results)
def test_given_twice_is_same(): @given(st.data(), st.data()) def test(data1, data2): data1.draw(st.integers()) data2.draw(st.integers()) raise ValueError() with raises(ValueError): with capture_out() as out: with reporting.with_reporter(reporting.default): test() result = out.getvalue() assert "Draw 1: 0" in result assert "Draw 2: 0" in result
def test_examples_are_tried_in_order(): @example(x=1) @example(x=2) @given(integers()) @settings(phases=[Phase.explicit]) @example(x=3) def test(x): print(f"x -> {x}") with capture_out() as out: with reporting.with_reporter(reporting.default): test() ls = out.getvalue().splitlines() assert ls == ["x -> 1", "x -> 2", "x -> 3"]
def test_examples_are_tried_in_order(): @example(x=1) @example(x=2) @given(integers()) @settings(phases=[Phase.explicit]) @example(x=3) def test(x): print_unicode(u"x -> %d" % (x,)) with capture_out() as out: with reporting.with_reporter(reporting.default): test() ls = out.getvalue().splitlines() assert ls == [u"x -> 1", u"x -> 2", u"x -> 3"]
def test_error_in_strategy_produces_only_one_traceback(): def boom(x): raise ValueError() with Settings(strict=False): @given(st.integers().map(boom)) def test(x): pass with raises(ValueError): with reporting.with_reporter(reporting.default): with capture_out() as out: test() assert out.getvalue().count('ValueError') == 2
def test_prints_on_failure_by_default(): @given(integers(), integers()) def test_ints_are_sorted(balthazar, evans): assume(evans >= 0) assert balthazar <= evans with pytest.raises(AssertionError): with capture_out() as out: with reporting.with_reporter(reporting.default): test_ints_are_sorted() out = out.getvalue() lines = [l.strip() for l in out.split('\n')] assert ( 'Falsifying example: test_ints_are_sorted(balthazar=1, evans=0)' in lines)
def pytest_pyfunc_call(pyfuncitem): if not getattr(pyfuncitem.obj, 'is_hypothesis_test', False): yield else: store = StoringReporter(pyfuncitem.config) def note_statistics(stats): gathered_statistics[pyfuncitem.name] = stats with collector.with_value(note_statistics): with with_reporter(store): yield if store.results: pyfuncitem.hypothesis_report_information = list(store.results)
def test_error_in_strategy_produces_only_one_traceback(): def boom(x): raise ValueError() with settings(strict=False): @given(st.integers().map(boom)) def test(x): pass with raises(ValueError): with reporting.with_reporter(reporting.default): with capture_out() as out: test() assert out.getvalue().count('ValueError') == 2
def test_given_twice_is_same(): @given(st.data(), st.data()) def test(data1, data2): data1.draw(st.integers()) data2.draw(st.integers()) raise ValueError() with raises(ValueError): with capture_out() as out: with reporting.with_reporter(reporting.default): test() result = out.getvalue() assert 'Draw 1: 0' in result assert 'Draw 2: 0' in result
def test_prints_on_failure_by_default(): @given(int, int) def test_ints_are_sorted(balthazar, evans): assume(evans >= 0) assert balthazar <= evans with pytest.raises(AssertionError): with capture_out() as out: with reporting.with_reporter(reporting.default): test_ints_are_sorted() out = out.getvalue() lines = [l.strip() for l in out.split('\n')] assert ( 'Falsifying example: test_ints_are_sorted(balthazar=1, evans=0)' in lines)
def pytest_runtest_call(item): if not (hasattr(item, "obj") and is_hypothesis_test(item.obj)): yield else: store = StoringReporter(item.config) def note_statistics(stats): lines = [item.nodeid + ":", ""] + stats.get_description() + [""] gathered_statistics[item.nodeid] = lines item.hypothesis_statistics = lines with collector.with_value(note_statistics): with with_reporter(store): yield if store.results: item.hypothesis_report_information = list(store.results)
def test_prints_on_failure(): @given(st.data()) def test(data): x = data.draw(st.lists(st.integers(), min_size=1)) y = data.draw(st.sampled_from(x)) assert y in x x.remove(y) assert y not in x with raises(AssertionError): with capture_out() as out: with reporting.with_reporter(reporting.default): test() result = out.getvalue() assert 'Draw 1: [0, 0]' in result assert 'Draw 2: 0' in result
def test_prints_labels_if_given_on_failure(): @given(st.data()) def test(data): x = data.draw(st.lists(st.integers(0, 10), min_size=2), label="Some numbers") y = data.draw(st.sampled_from(x), label="A number") assert y in x x.remove(y) assert y not in x with raises(AssertionError): with capture_out() as out: with reporting.with_reporter(reporting.default): test() result = out.getvalue() assert "Draw 1 (Some numbers): [0, 0]" in result assert "Draw 2 (A number): 0" in result
def test_prints_on_failure(): @given(st.data()) def test(data): x = data.draw(st.lists(st.integers(0, 10), min_size=2)) y = data.draw(st.sampled_from(x)) x.remove(y) if y in x: raise ValueError() with raises(ValueError): with capture_out() as out: with reporting.with_reporter(reporting.default): test() result = out.getvalue() assert "Draw 1: [0, 0]" in result assert "Draw 2: 0" in result
def test_prints_note_in_failing_example(): @example(x=42) @example(x=43) @given(integers()) def test(x): note("x -> %d" % (x,)) assert x == 42 with capture_out() as out: with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): test() v = out.getvalue() print_unicode(v) assert "x -> 43" in v assert "x -> 42" not in v