def process_arguments_to_given(wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings): selfy = None arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs) # If the test function is a method of some kind, the bound object # will be the first named argument if there are any, otherwise the # first vararg (if any). if argspec.args: selfy = kwargs.get(argspec.args[0]) elif arguments: selfy = arguments[0] # Ensure that we don't mistake mocks for self here. # This can cause the mock to be used as the test runner. if is_mock(selfy): selfy = None test_runner = new_style_executor(selfy) arguments = tuple(arguments) search_strategy = st.tuples( st.just(arguments), st.fixed_dictionaries(generator_kwargs).map( lambda args: dict(args, **kwargs))) if selfy is not None: search_strategy = WithRunner(search_strategy, selfy) search_strategy.validate() return arguments, kwargs, test_runner, search_strategy
def process_arguments_to_given(wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings): import hypothesis.strategies as sd selfy = None arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs) # If the test function is a method of some kind, the bound object # will be the first named argument if there are any, otherwise the # first vararg (if any). if argspec.args: selfy = kwargs.get(argspec.args[0]) elif arguments: selfy = arguments[0] test_runner = new_style_executor(selfy) arguments = tuple(arguments) given_specifier = sd.tuples( sd.just(arguments), sd.fixed_dictionaries(generator_kwargs).map( lambda args: dict(args, **kwargs))) search_strategy = given_specifier if selfy is not None: search_strategy = WithRunner(search_strategy, selfy) search_strategy.validate() return arguments, kwargs, test_runner, search_strategy
def process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings, ): selfy = None arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs) # If the test function is a method of some kind, the bound object # will be the first named argument if there are any, otherwise the # first vararg (if any). if argspec.args: selfy = kwargs.get(argspec.args[0]) elif arguments: selfy = arguments[0] # Ensure that we don't mistake mocks for self here. # This can cause the mock to be used as the test runner. if is_mock(selfy): selfy = None test_runner = new_style_executor(selfy) arguments = tuple(arguments) # We use TupleStrategy over tuples() here to avoid polluting # st.STRATEGY_CACHE with references (see #493), and because this is # trivial anyway if the fixed_dictionaries strategy is cacheable. search_strategy = TupleStrategy( ( st.just(arguments), st.fixed_dictionaries(generator_kwargs).map( lambda args: dict(args, **kwargs) ), ) ) if selfy is not None: search_strategy = WithRunner(search_strategy, selfy) search_strategy.validate() return arguments, kwargs, test_runner, search_strategy
def process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings ): selfy = None arguments, kwargs = convert_positional_arguments( wrapped_test, arguments, kwargs) # If the test function is a method of some kind, the bound object # will be the first named argument if there are any, otherwise the # first vararg (if any). if argspec.args: selfy = kwargs.get(argspec.args[0]) elif arguments: selfy = arguments[0] # Ensure that we don't mistake mocks for self here. # This can cause the mock to be used as the test runner. if is_mock(selfy): selfy = None test_runner = new_style_executor(selfy) arguments = tuple(arguments) search_strategy = st.tuples( st.just(arguments), st.fixed_dictionaries(generator_kwargs).map( lambda args: dict(args, **kwargs) ) ) if selfy is not None: search_strategy = WithRunner(search_strategy, selfy) search_strategy.validate() return arguments, kwargs, test_runner, search_strategy
def wrapped_test(*arguments, **kwargs): settings = wrapped_test._hypothesis_internal_use_settings if wrapped_test._hypothesis_internal_use_seed is not None: random = Random(wrapped_test._hypothesis_internal_use_seed) elif settings.derandomize: random = Random(function_digest(test)) else: random = new_random() import hypothesis.strategies as sd selfy = None arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs) # If the test function is a method of some kind, the bound object # will be the first named argument if there are any, otherwise the # first vararg (if any). if argspec.args: selfy = kwargs.get(argspec.args[0]) elif arguments: selfy = arguments[0] test_runner = new_style_executor(selfy) for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())): if example.args: if len(example.args) > len(original_argspec.args): raise InvalidArgument( "example has too many arguments for test. " "Expected at most %d but got %d" % (len(original_argspec.args), len(example.args)) ) example_kwargs = dict(zip(original_argspec.args[-len(example.args) :], example.args)) else: example_kwargs = example.kwargs if Phase.explicit not in settings.phases: continue example_kwargs.update(kwargs) # Note: Test may mutate arguments and we can't rerun explicit # examples, so we have to calculate the failure message at this # point rather than than later. message_on_failure = "Falsifying example: %s(%s)" % ( test.__name__, arg_string(test, arguments, example_kwargs), ) try: with BuildContext() as b: test_runner(None, lambda data: test(*arguments, **example_kwargs)) except BaseException: report(message_on_failure) for n in b.notes: report(n) raise if settings.max_examples <= 0: return arguments = tuple(arguments) given_specifier = sd.tuples( sd.just(arguments), sd.fixed_dictionaries(generator_kwargs).map(lambda args: dict(args, **kwargs)) ) def fail_health_check(message, label): if label in settings.suppress_health_check: return message += ( "\nSee https://hypothesis.readthedocs.io/en/latest/health" "checks.html for more information about this. " ) message += ( "If you want to disable just this health check, add %s " "to the suppress_health_check settings for this test." ) % (label,) raise FailedHealthCheck(message) search_strategy = given_specifier if selfy is not None: search_strategy = WithRunner(search_strategy, selfy) search_strategy.validate() perform_health_check = settings.perform_health_check perform_health_check &= Settings.default.perform_health_check from hypothesis.internal.conjecture.data import TestData, Status, StopTest if not (Phase.reuse in settings.phases or Phase.generate in settings.phases): return if perform_health_check: health_check_random = Random(random.getrandbits(128)) # We "pre warm" the health check with one draw to give it some # time to calculate any cached data. This prevents the case # where the first draw of the health check takes ages because # of loading unicode data the first time. data = TestData( max_length=settings.buffer_size, draw_bytes=lambda data, n, distribution: distribution(health_check_random, n), ) with Settings(settings, verbosity=Verbosity.quiet): try: test_runner(data, reify_and_execute(search_strategy, lambda *args, **kwargs: None)) except BaseException: pass count = 0 overruns = 0 filtered_draws = 0 start = time.time() while count < 10 and time.time() < start + 1 and filtered_draws < 50 and overruns < 20: try: data = TestData( max_length=settings.buffer_size, draw_bytes=lambda data, n, distribution: distribution(health_check_random, n), ) with Settings(settings, verbosity=Verbosity.quiet): test_runner(data, reify_and_execute(search_strategy, lambda *args, **kwargs: None)) count += 1 except UnsatisfiedAssumption: filtered_draws += 1 except StopTest: if data.status == Status.INVALID: filtered_draws += 1 else: assert data.status == Status.OVERRUN overruns += 1 except InvalidArgument: raise except Exception: if HealthCheck.exception_in_generation in settings.suppress_health_check: raise report(traceback.format_exc()) if test_runner is default_new_style_executor: fail_health_check( "An exception occurred during data " "generation in initial health check. " "This indicates a bug in the strategy. " "This could either be a Hypothesis bug or " "an error in a function you've passed to " "it to construct your data.", HealthCheck.exception_in_generation, ) else: fail_health_check( "An exception occurred during data " "generation in initial health check. " "This indicates a bug in the strategy. " "This could either be a Hypothesis bug or " "an error in a function you've passed to " "it to construct your data. Additionally, " "you have a custom executor, which means " "that this could be your executor failing " "to handle a function which returns None. ", HealthCheck.exception_in_generation, ) if overruns >= 20 or (not count and overruns > 0): fail_health_check( ( "Examples routinely exceeded the max allowable size. " "(%d examples overran while generating %d valid ones)" ". Generating examples this large will usually lead to" " bad results. You should try setting average_size or " "max_size parameters on your collections and turning " "max_leaves down on recursive() calls." ) % (overruns, count), HealthCheck.data_too_large, ) if filtered_draws >= 50 or (not count and filtered_draws > 0): fail_health_check( ( "It looks like your strategy is filtering out a lot " "of data. Health check found %d filtered examples but " "only %d good ones. This will make your tests much " "slower, and also will probably distort the data " "generation quite a lot. You should adapt your " "strategy to filter less. This can also be caused by " "a low max_leaves parameter in recursive() calls" ) % (filtered_draws, count), HealthCheck.filter_too_much, ) runtime = time.time() - start if runtime > 1.0 or count < 10: fail_health_check( ( "Data generation is extremely slow: Only produced " "%d valid examples in %.2f seconds (%d invalid ones " "and %d exceeded maximum size). Try decreasing " "size of the data you're generating (with e.g." "average_size or max_leaves parameters)." ) % (count, runtime, filtered_draws, overruns), HealthCheck.too_slow, ) last_exception = [None] repr_for_last_exception = [None] def evaluate_test_data(data): try: result = test_runner(data, reify_and_execute(search_strategy, test)) if result is not None and settings.perform_health_check: fail_health_check( ("Tests run under @given should return None, but " "%s returned %r instead.") % (test.__name__, result), HealthCheck.return_value, ) return False except UnsatisfiedAssumption: data.mark_invalid() except (HypothesisDeprecationWarning, FailedHealthCheck, StopTest): raise except Exception: last_exception[0] = traceback.format_exc() verbose_report(last_exception[0]) data.mark_interesting() from hypothesis.internal.conjecture.engine import TestRunner falsifying_example = None database_key = str_to_bytes(fully_qualified_name(test)) start_time = time.time() runner = TestRunner(evaluate_test_data, settings=settings, random=random, database_key=database_key) runner.run() run_time = time.time() - start_time timed_out = settings.timeout > 0 and run_time >= settings.timeout if runner.last_data is None: return if runner.last_data.status == Status.INTERESTING: falsifying_example = runner.last_data.buffer if settings.database is not None: settings.database.save(database_key, falsifying_example) else: if runner.valid_examples < min(settings.min_satisfying_examples, settings.max_examples): if timed_out: raise Timeout( ( "Ran out of time before finding a satisfying " "example for " "%s. Only found %d examples in " + "%.2fs." ) % (get_pretty_function_description(test), runner.valid_examples, run_time) ) else: raise Unsatisfiable( ( "Unable to satisfy assumptions of hypothesis " "%s. Only %d examples considered " "satisfied assumptions" ) % (get_pretty_function_description(test), runner.valid_examples) ) return assert last_exception[0] is not None try: with settings: test_runner( TestData.for_buffer(falsifying_example), reify_and_execute(search_strategy, test, print_example=True, is_final=True), ) except (UnsatisfiedAssumption, StopTest): report(traceback.format_exc()) raise Flaky( "Unreliable assumption: An example which satisfied " "assumptions on the first run now fails it." ) report("Failed to reproduce exception. Expected: \n" + last_exception[0]) filter_message = ( "Unreliable test data: Failed to reproduce a failure " "and then when it came to recreating the example in " "order to print the test data with a flaky result " "the example was filtered out (by e.g. a " "call to filter in your strategy) when we didn't " "expect it to be." ) try: test_runner( TestData.for_buffer(falsifying_example), reify_and_execute( search_strategy, test_is_flaky(test, repr_for_last_exception[0]), print_example=True, is_final=True, ), ) except (UnsatisfiedAssumption, StopTest): raise Flaky(filter_message)
def wrapped_test(*arguments, **kwargs): settings = wrapped_test._hypothesis_internal_use_settings if wrapped_test._hypothesis_internal_use_seed is not None: random = Random( wrapped_test._hypothesis_internal_use_seed) elif settings.derandomize: random = Random(function_digest(test)) else: random = new_random() import hypothesis.strategies as sd selfy = None arguments, kwargs = convert_positional_arguments( wrapped_test, arguments, kwargs) # If the test function is a method of some kind, the bound object # will be the first named argument if there are any, otherwise the # first vararg (if any). if argspec.args: selfy = kwargs.get(argspec.args[0]) elif arguments: selfy = arguments[0] test_runner = new_style_executor(selfy) for example in reversed(getattr( wrapped_test, 'hypothesis_explicit_examples', () )): if example.args: if len(example.args) > len(original_argspec.args): raise InvalidArgument( 'example has too many arguments for test. ' 'Expected at most %d but got %d' % ( len(original_argspec.args), len(example.args))) example_kwargs = dict(zip( original_argspec.args[-len(example.args):], example.args )) else: example_kwargs = example.kwargs example_kwargs.update(kwargs) # Note: Test may mutate arguments and we can't rerun explicit # examples, so we have to calculate the failure message at this # point rather than than later. message_on_failure = 'Falsifying example: %s(%s)' % ( test.__name__, arg_string(test, arguments, example_kwargs) ) try: with BuildContext() as b: test_runner( None, lambda data: test(*arguments, **example_kwargs) ) except BaseException: report(message_on_failure) for n in b.notes: report(n) raise if settings.max_examples <= 0: return arguments = tuple(arguments) given_specifier = sd.tuples( sd.just(arguments), sd.fixed_dictionaries(generator_kwargs).map( lambda args: dict(args, **kwargs) ) ) def fail_health_check(message): message += ( '\nSee http://hypothesis.readthedocs.org/en/latest/health' 'checks.html for more information about this.' ) raise FailedHealthCheck(message) search_strategy = given_specifier search_strategy.validate() perform_health_check = settings.perform_health_check perform_health_check &= Settings.default.perform_health_check from hypothesis.internal.conjecture.data import TestData, Status, \ StopTest if perform_health_check: initial_state = getglobalrandomstate() health_check_random = Random(random.getrandbits(128)) # We "pre warm" the health check with one draw to give it some # time to calculate any cached data. This prevents the case # where the first draw of the health check takes ages because # of loading unicode data the first time. data = TestData( max_length=settings.buffer_size, draw_bytes=lambda data, n, distribution: distribution(health_check_random, n) ) with Settings(settings, verbosity=Verbosity.quiet): try: test_runner(data, reify_and_execute( search_strategy, lambda *args, **kwargs: None, )) except BaseException: pass count = 0 overruns = 0 filtered_draws = 0 start = time.time() while ( count < 10 and time.time() < start + 1 and filtered_draws < 50 and overruns < 20 ): try: data = TestData( max_length=settings.buffer_size, draw_bytes=lambda data, n, distribution: distribution(health_check_random, n) ) with Settings(settings, verbosity=Verbosity.quiet): test_runner(data, reify_and_execute( search_strategy, lambda *args, **kwargs: None, )) count += 1 except UnsatisfiedAssumption: filtered_draws += 1 except StopTest: if data.status == Status.INVALID: filtered_draws += 1 else: assert data.status == Status.OVERRUN overruns += 1 except Exception: report(traceback.format_exc()) if test_runner is default_new_style_executor: fail_health_check( 'An exception occurred during data ' 'generation in initial health check. ' 'This indicates a bug in the strategy. ' 'This could either be a Hypothesis bug or ' "an error in a function yo've passed to " 'it to construct your data.' ) else: fail_health_check( 'An exception occurred during data ' 'generation in initial health check. ' 'This indicates a bug in the strategy. ' 'This could either be a Hypothesis bug or ' 'an error in a function you\'ve passed to ' 'it to construct your data. Additionally, ' 'you have a custom executor, which means ' 'that this could be your executor failing ' 'to handle a function which returns None. ' ) if overruns >= 20 or ( not count and overruns > 0 ): fail_health_check(( 'Examples routinely exceeded the max allowable size. ' '(%d examples overran while generating %d valid ones)' '. Generating examples this large will usually lead to' ' bad results. You should try setting average_size or ' 'max_size parameters on your collections and turning ' 'max_leaves down on recursive() calls.') % ( overruns, count )) if filtered_draws >= 50 or ( not count and filtered_draws > 0 ): fail_health_check(( 'It looks like your strategy is filtering out a lot ' 'of data. Health check found %d filtered examples but ' 'only %d good ones. This will make your tests much ' 'slower, and also will probably distort the data ' 'generation quite a lot. You should adapt your ' 'strategy to filter less. This can also be caused by ' 'a low max_leaves parameter in recursive() calls') % ( filtered_draws, count )) runtime = time.time() - start if runtime > 1.0 or count < 10: fail_health_check(( 'Data generation is extremely slow: Only produced ' '%d valid examples in %.2f seconds (%d invalid ones ' 'and %d exceeded maximum size). Try decreasing ' "size of the data you're generating (with e.g." 'average_size or max_leaves parameters).' ) % (count, runtime, filtered_draws, overruns)) if getglobalrandomstate() != initial_state: fail_health_check( 'Data generation depends on global random module. ' 'This makes results impossible to replay, which ' 'prevents Hypothesis from working correctly. ' 'If you want to use methods from random, use ' 'randoms() from hypothesis.strategies to get an ' 'instance of Random you can use. Alternatively, you ' 'can use the random_module() strategy to explicitly ' 'seed the random module.' ) last_exception = [None] repr_for_last_exception = [None] performed_random_check = [False] def evaluate_test_data(data): if perform_health_check and not performed_random_check[0]: initial_state = getglobalrandomstate() performed_random_check[0] = True else: initial_state = None try: result = test_runner(data, reify_and_execute( search_strategy, test, )) if result is not None and settings.perform_health_check: raise FailedHealthCheck(( 'Tests run under @given should return None, but ' '%s returned %r instead.' ) % (test.__name__, result), settings) return False except UnsatisfiedAssumption: data.mark_invalid() except ( HypothesisDeprecationWarning, FailedHealthCheck, StopTest, ): raise except Exception: last_exception[0] = traceback.format_exc() verbose_report(last_exception[0]) data.mark_interesting() finally: if ( initial_state is not None and getglobalrandomstate() != initial_state ): fail_health_check( 'Your test used the global random module. ' 'This is unlikely to work correctly. You should ' 'consider using the randoms() strategy from ' 'hypothesis.strategies instead. Alternatively, ' 'you can use the random_module() strategy to ' 'explicitly seed the random module.') from hypothesis.internal.conjecture.engine import TestRunner falsifying_example = None database_key = str_to_bytes(fully_qualified_name(test)) start_time = time.time() runner = TestRunner( evaluate_test_data, settings=settings, random=random, database_key=database_key, ) runner.run() run_time = time.time() - start_time timed_out = ( settings.timeout > 0 and run_time >= settings.timeout ) if runner.last_data.status == Status.INTERESTING: falsifying_example = runner.last_data.buffer if settings.database is not None: settings.database.save( database_key, falsifying_example ) else: if runner.valid_examples < min( settings.min_satisfying_examples, settings.max_examples, ): if timed_out: raise Timeout(( 'Ran out of time before finding a satisfying ' 'example for ' '%s. Only found %d examples in ' + '%.2fs.' ) % ( get_pretty_function_description(test), runner.valid_examples, run_time )) else: raise Unsatisfiable(( 'Unable to satisfy assumptions of hypothesis ' '%s. Only %d examples considered ' 'satisfied assumptions' ) % ( get_pretty_function_description(test), runner.valid_examples,)) return assert last_exception[0] is not None try: with settings: test_runner( TestData.for_buffer(falsifying_example), reify_and_execute( search_strategy, test, print_example=True, is_final=True )) except (UnsatisfiedAssumption, StopTest): report(traceback.format_exc()) raise Flaky( 'Unreliable assumption: An example which satisfied ' 'assumptions on the first run now fails it.' ) report( 'Failed to reproduce exception. Expected: \n' + last_exception[0], ) filter_message = ( 'Unreliable test data: Failed to reproduce a failure ' 'and then when it came to recreating the example in ' 'order to print the test data with a flaky result ' 'the example was filtered out (by e.g. a ' 'call to filter in your strategy) when we didn\'t ' 'expect it to be.' ) try: test_runner( TestData.for_buffer(falsifying_example), reify_and_execute( search_strategy, test_is_flaky(test, repr_for_last_exception[0]), print_example=True, is_final=True )) except (UnsatisfiedAssumption, StopTest): raise Flaky(filter_message)
def wrapped_test(*arguments, **kwargs): settings = wrapped_test._hypothesis_internal_use_settings if wrapped_test._hypothesis_internal_use_seed is not None: random = Random( wrapped_test._hypothesis_internal_use_seed) elif settings.derandomize: random = Random(function_digest(test)) else: random = new_random() import hypothesis.strategies as sd selfy = None arguments, kwargs = convert_positional_arguments( wrapped_test, arguments, kwargs) # If the test function is a method of some kind, the bound object # will be the first named argument if there are any, otherwise the # first vararg (if any). if argspec.args: selfy = kwargs.get(argspec.args[0]) elif arguments: selfy = arguments[0] test_runner = new_style_executor(selfy) for example in reversed(getattr( wrapped_test, 'hypothesis_explicit_examples', () )): if example.args: if len(example.args) > len(original_argspec.args): raise InvalidArgument( 'example has too many arguments for test. ' 'Expected at most %d but got %d' % ( len(original_argspec.args), len(example.args))) example_kwargs = dict(zip( original_argspec.args[-len(example.args):], example.args )) else: example_kwargs = example.kwargs if Phase.explicit not in settings.phases: continue example_kwargs.update(kwargs) # Note: Test may mutate arguments and we can't rerun explicit # examples, so we have to calculate the failure message at this # point rather than than later. message_on_failure = 'Falsifying example: %s(%s)' % ( test.__name__, arg_string(test, arguments, example_kwargs) ) try: with BuildContext(None) as b: test_runner( None, lambda data: test(*arguments, **example_kwargs) ) except BaseException: traceback.print_exc() report(message_on_failure) for n in b.notes: report(n) raise if settings.max_examples <= 0: return arguments = tuple(arguments) given_specifier = sd.tuples( sd.just(arguments), sd.fixed_dictionaries(generator_kwargs).map( lambda args: dict(args, **kwargs) ) ) def fail_health_check(message, label): if label in settings.suppress_health_check: return message += ( '\nSee https://hypothesis.readthedocs.io/en/latest/health' 'checks.html for more information about this. ' ) message += ( 'If you want to disable just this health check, add %s ' 'to the suppress_health_check settings for this test.' ) % (label,) raise FailedHealthCheck(message) search_strategy = given_specifier if selfy is not None: search_strategy = WithRunner(search_strategy, selfy) search_strategy.validate() perform_health_check = settings.perform_health_check perform_health_check &= Settings.default.perform_health_check from hypothesis.internal.conjecture.data import ConjectureData, \ Status, StopTest if not ( Phase.reuse in settings.phases or Phase.generate in settings.phases ): return if perform_health_check: health_check_random = Random(random.getrandbits(128)) # We "pre warm" the health check with one draw to give it some # time to calculate any cached data. This prevents the case # where the first draw of the health check takes ages because # of loading unicode data the first time. data = ConjectureData( max_length=settings.buffer_size, draw_bytes=lambda data, n, distribution: distribution(health_check_random, n) ) with Settings(settings, verbosity=Verbosity.quiet): try: test_runner(data, reify_and_execute( search_strategy, lambda *args, **kwargs: None, )) except BaseException: pass count = 0 overruns = 0 filtered_draws = 0 start = time.time() while ( count < 10 and time.time() < start + 1 and filtered_draws < 50 and overruns < 20 ): try: data = ConjectureData( max_length=settings.buffer_size, draw_bytes=lambda data, n, distribution: distribution(health_check_random, n) ) with Settings(settings, verbosity=Verbosity.quiet): test_runner(data, reify_and_execute( search_strategy, lambda *args, **kwargs: None, )) count += 1 except UnsatisfiedAssumption: filtered_draws += 1 except StopTest: if data.status == Status.INVALID: filtered_draws += 1 else: assert data.status == Status.OVERRUN overruns += 1 except InvalidArgument: raise except Exception: if ( HealthCheck.exception_in_generation in settings.suppress_health_check ): raise report(traceback.format_exc()) if test_runner is default_new_style_executor: fail_health_check( 'An exception occurred during data ' 'generation in initial health check. ' 'This indicates a bug in the strategy. ' 'This could either be a Hypothesis bug or ' "an error in a function you've passed to " 'it to construct your data.', HealthCheck.exception_in_generation, ) else: fail_health_check( 'An exception occurred during data ' 'generation in initial health check. ' 'This indicates a bug in the strategy. ' 'This could either be a Hypothesis bug or ' 'an error in a function you\'ve passed to ' 'it to construct your data. Additionally, ' 'you have a custom executor, which means ' 'that this could be your executor failing ' 'to handle a function which returns None. ', HealthCheck.exception_in_generation, ) if overruns >= 20 or ( not count and overruns > 0 ): fail_health_check(( 'Examples routinely exceeded the max allowable size. ' '(%d examples overran while generating %d valid ones)' '. Generating examples this large will usually lead to' ' bad results. You should try setting average_size or ' 'max_size parameters on your collections and turning ' 'max_leaves down on recursive() calls.') % ( overruns, count ), HealthCheck.data_too_large) if filtered_draws >= 50 or ( not count and filtered_draws > 0 ): fail_health_check(( 'It looks like your strategy is filtering out a lot ' 'of data. Health check found %d filtered examples but ' 'only %d good ones. This will make your tests much ' 'slower, and also will probably distort the data ' 'generation quite a lot. You should adapt your ' 'strategy to filter less. This can also be caused by ' 'a low max_leaves parameter in recursive() calls') % ( filtered_draws, count ), HealthCheck.filter_too_much) runtime = time.time() - start if runtime > 1.0 or count < 10: fail_health_check(( 'Data generation is extremely slow: Only produced ' '%d valid examples in %.2f seconds (%d invalid ones ' 'and %d exceeded maximum size). Try decreasing ' "size of the data you're generating (with e.g." 'average_size or max_leaves parameters).' ) % (count, runtime, filtered_draws, overruns), HealthCheck.too_slow, ) last_exception = [None] repr_for_last_exception = [None] def evaluate_test_data(data): try: result = test_runner(data, reify_and_execute( search_strategy, test, )) if result is not None and settings.perform_health_check: fail_health_check(( 'Tests run under @given should return None, but ' '%s returned %r instead.' ) % (test.__name__, result), HealthCheck.return_value) return False except UnsatisfiedAssumption: data.mark_invalid() except ( HypothesisDeprecationWarning, FailedHealthCheck, StopTest, ): raise except Exception: last_exception[0] = traceback.format_exc() verbose_report(last_exception[0]) data.mark_interesting() from hypothesis.internal.conjecture.engine import ConjectureRunner falsifying_example = None database_key = str_to_bytes(fully_qualified_name(test)) start_time = time.time() runner = ConjectureRunner( evaluate_test_data, settings=settings, random=random, database_key=database_key, ) runner.run() note_engine_for_statistics(runner) run_time = time.time() - start_time timed_out = ( settings.timeout > 0 and run_time >= settings.timeout ) if runner.last_data is None: return if runner.last_data.status == Status.INTERESTING: falsifying_example = runner.last_data.buffer if settings.database is not None: settings.database.save( database_key, falsifying_example ) else: if runner.valid_examples < min( settings.min_satisfying_examples, settings.max_examples, ): if timed_out: raise Timeout(( 'Ran out of time before finding a satisfying ' 'example for ' '%s. Only found %d examples in ' + '%.2fs.' ) % ( get_pretty_function_description(test), runner.valid_examples, run_time )) else: raise Unsatisfiable(( 'Unable to satisfy assumptions of hypothesis ' '%s. Only %d examples considered ' 'satisfied assumptions' ) % ( get_pretty_function_description(test), runner.valid_examples,)) return assert last_exception[0] is not None try: with settings: test_runner( ConjectureData.for_buffer(falsifying_example), reify_and_execute( search_strategy, test, print_example=True, is_final=True )) except (UnsatisfiedAssumption, StopTest): report(traceback.format_exc()) raise Flaky( 'Unreliable assumption: An example which satisfied ' 'assumptions on the first run now fails it.' ) report( 'Failed to reproduce exception. Expected: \n' + last_exception[0], ) filter_message = ( 'Unreliable test data: Failed to reproduce a failure ' 'and then when it came to recreating the example in ' 'order to print the test data with a flaky result ' 'the example was filtered out (by e.g. a ' 'call to filter in your strategy) when we didn\'t ' 'expect it to be.' ) try: test_runner( ConjectureData.for_buffer(falsifying_example), reify_and_execute( search_strategy, test_is_flaky(test, repr_for_last_exception[0]), print_example=True, is_final=True )) except (UnsatisfiedAssumption, StopTest): raise Flaky(filter_message)