Exemplo n.º 1
0
def test_positional_errors_if_given_bad_kwargs():
    def foo(a):
        pass

    with raises(TypeError) as e:
        convert_positional_arguments(foo, (), {"b": 1})
    assert "unexpected keyword argument" in e.value.args[0]
Exemplo n.º 2
0
def test_positional_errors_if_given_duplicate_kwargs():
    def foo(a):
        pass

    with raises(TypeError) as e:
        convert_positional_arguments(foo, (2, ), {"a": 1})
    assert "multiple values" in e.value.args[0]
Exemplo n.º 3
0
def test_positional_errors_if_given_bad_kwargs():
    def foo(a):
        pass

    with pytest.raises(TypeError) as e:
        convert_positional_arguments(foo, (), {'b': 1})
    assert 'unexpected keyword argument' in e.value.args[0]
Exemplo n.º 4
0
def test_positional_errors_if_too_many_args():
    def foo(a):
        pass

    with pytest.raises(TypeError) as e:
        convert_positional_arguments(foo, (1, 2), {})
    assert '2 given' in e.value.args[0]
Exemplo n.º 5
0
def test_positional_errors_if_given_duplicate_kwargs():
    def foo(a):
        pass

    with pytest.raises(TypeError) as e:
        convert_positional_arguments(foo, (2,), {'a': 1})
    assert 'multiple values' in e.value.args[0]
Exemplo n.º 6
0
def test_positional_errors_if_too_many_args():
    def foo(a):
        pass

    with raises(TypeError) as e:
        convert_positional_arguments(foo, (1, 2), {})
    assert "2 given" in e.value.args[0]
Exemplo n.º 7
0
 def __repr__(self):
     if self.__representation is None:
         _args = self.__args
         _kwargs = self.__kwargs
         argspec = getfullargspec(self.__function)
         defaults = dict(argspec.kwonlydefaults or {})
         if argspec.defaults is not None:
             for name, value in zip(reversed(argspec.args),
                                    reversed(argspec.defaults)):
                 defaults[name] = value
         if len(argspec.args) > 1 or argspec.defaults:
             _args, _kwargs = convert_positional_arguments(
                 self.__function, _args, _kwargs)
         else:
             _args, _kwargs = convert_keyword_arguments(
                 self.__function, _args, _kwargs)
         kwargs_for_repr = dict(_kwargs)
         for k, v in defaults.items():
             if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
                 del kwargs_for_repr[k]
         self.__representation = '%s(%s)' % (
             self.__function.__name__,
             arg_string(
                 self.__function, _args, kwargs_for_repr, reorder=False),
         )
     return self.__representation
Exemplo n.º 8
0
 def __repr__(self):
     if self.__representation is None:
         _args = self.__args
         _kwargs = self.__kwargs
         argspec = getargspec(self.__function)
         defaults = {}
         if argspec.defaults is not None:
             for k in hrange(1, len(argspec.defaults) + 1):
                 defaults[argspec.args[-k]] = argspec.defaults[-k]
         if len(argspec.args) > 1 or argspec.defaults:
             _args, _kwargs = convert_positional_arguments(
                 self.__function, _args, _kwargs)
         else:
             _args, _kwargs = convert_keyword_arguments(
                 self.__function, _args, _kwargs)
         kwargs_for_repr = dict(_kwargs)
         for k, v in defaults.items():
             if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
                 del kwargs_for_repr[k]
         self.__representation = '%s(%s)' % (
             self.__function.__name__,
             arg_string(
                 self.__function, _args, kwargs_for_repr, reorder=False),
         )
     return self.__representation
Exemplo n.º 9
0
def process_arguments_to_given(wrapped_test, arguments, kwargs,
                               generator_arguments, generator_kwargs, argspec,
                               test, settings):
    selfy = None
    arguments, kwargs = convert_positional_arguments(wrapped_test, arguments,
                                                     kwargs)

    # If the test function is a method of some kind, the bound object
    # will be the first named argument if there are any, otherwise the
    # first vararg (if any).
    if argspec.args:
        selfy = kwargs.get(argspec.args[0])
    elif arguments:
        selfy = arguments[0]

    # Ensure that we don't mistake mocks for self here.
    # This can cause the mock to be used as the test runner.
    if is_mock(selfy):
        selfy = None

    test_runner = new_style_executor(selfy)

    arguments = tuple(arguments)

    search_strategy = st.tuples(
        st.just(arguments),
        st.fixed_dictionaries(generator_kwargs).map(
            lambda args: dict(args, **kwargs)))

    if selfy is not None:
        search_strategy = WithRunner(search_strategy, selfy)

    search_strategy.validate()

    return arguments, kwargs, test_runner, search_strategy
Exemplo n.º 10
0
 def __repr__(self):
     if self.__representation is None:
         sig = signature(self.function)
         pos = [
             p for p in sig.parameters.values()
             if "POSITIONAL" in p.kind.name
         ]
         if len(pos) > 1 or any(p.default is not sig.empty for p in pos):
             _args, _kwargs = convert_positional_arguments(
                 self.function, self.__args, self.__kwargs)
         else:
             _args, _kwargs = convert_keyword_arguments(
                 self.function, self.__args, self.__kwargs)
         kwargs_for_repr = {
             k: v
             for k, v in _kwargs.items() if k not in sig.parameters
             or v is not sig.parameters[k].default
         }
         self.__representation = "{}({}){}".format(
             self.function.__name__,
             arg_string(self.function,
                        _args,
                        kwargs_for_repr,
                        reorder=False),
             "".join(map(_repr_filter, self.__filters)),
         )
     return self.__representation
Exemplo n.º 11
0
 def __repr__(self):
     if self.__representation is None:
         _args = self.__args
         _kwargs = self.__kwargs
         argspec = getargspec(self.__function)
         defaults = {}
         if argspec.defaults is not None:
             for k in hrange(1, len(argspec.defaults) + 1):
                 defaults[argspec.args[-k]] = argspec.defaults[-k]
         if len(argspec.args) > 1 or argspec.defaults:
             _args, _kwargs = convert_positional_arguments(
                 self.__function, _args, _kwargs)
         else:
             _args, _kwargs = convert_keyword_arguments(
                 self.__function, _args, _kwargs)
         kwargs_for_repr = dict(_kwargs)
         for k, v in defaults.items():
             if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
                 del kwargs_for_repr[k]
         self.__representation = '%s(%s)' % (
             self.__function.__name__,
             arg_string(
                 self.__function, _args, kwargs_for_repr, reorder=False),
         )
     return self.__representation
Exemplo n.º 12
0
def process_arguments_to_given(wrapped_test, arguments, kwargs,
                               generator_arguments, generator_kwargs, argspec,
                               test, settings):
    import hypothesis.strategies as sd

    selfy = None
    arguments, kwargs = convert_positional_arguments(wrapped_test, arguments,
                                                     kwargs)

    # If the test function is a method of some kind, the bound object
    # will be the first named argument if there are any, otherwise the
    # first vararg (if any).
    if argspec.args:
        selfy = kwargs.get(argspec.args[0])
    elif arguments:
        selfy = arguments[0]
    test_runner = new_style_executor(selfy)

    arguments = tuple(arguments)

    given_specifier = sd.tuples(
        sd.just(arguments),
        sd.fixed_dictionaries(generator_kwargs).map(
            lambda args: dict(args, **kwargs)))

    search_strategy = given_specifier
    if selfy is not None:
        search_strategy = WithRunner(search_strategy, selfy)

    search_strategy.validate()

    return arguments, kwargs, test_runner, search_strategy
Exemplo n.º 13
0
 def __repr__(self):
     if self.__representation is None:
         _args = self.__args
         _kwargs = self.__kwargs
         argspec = getfullargspec(self.__function)
         defaults = dict(argspec.kwonlydefaults or {})
         if argspec.defaults is not None:
             for name, value in zip(reversed(argspec.args),
                                    reversed(argspec.defaults)):
                 defaults[name] = value
         if len(argspec.args) > 1 or argspec.defaults:
             _args, _kwargs = convert_positional_arguments(
                 self.__function, _args, _kwargs)
         else:
             _args, _kwargs = convert_keyword_arguments(
                 self.__function, _args, _kwargs)
         kwargs_for_repr = dict(_kwargs)
         for k, v in defaults.items():
             if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
                 del kwargs_for_repr[k]
         self.__representation = '%s(%s)' % (
             self.__function.__name__,
             arg_string(
                 self.__function, _args, kwargs_for_repr, reorder=False),
         )
     return self.__representation
Exemplo n.º 14
0
def do_conversion_test(f, args, kwargs):
    result = f(*args, **kwargs)

    cargs, ckwargs = convert_keyword_arguments(f, args, kwargs)
    assert result == f(*cargs, **ckwargs)

    cargs2, ckwargs2 = convert_positional_arguments(f, args, kwargs)
    assert result == f(*cargs2, **ckwargs2)
Exemplo n.º 15
0
def do_conversion_test(f, args, kwargs):
    result = f(*args, **kwargs)

    cargs, ckwargs = convert_keyword_arguments(f, args, kwargs)
    assert result == f(*cargs, **ckwargs)

    cargs2, ckwargs2 = convert_positional_arguments(f, args, kwargs)
    assert result == f(*cargs2, **ckwargs2)
Exemplo n.º 16
0
 def calc_repr():
     _args = args
     _kwargs = kwargs
     _args, _kwargs = convert_positional_arguments(
         strategy_definition, _args, _kwargs)
     kwargs_for_repr = dict(_kwargs)
     for k, v in defaults.items():
         if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
             del kwargs_for_repr[k]
     return u'%s(%s)' % (strategy_definition.__name__,
                         arg_string(strategy_definition, _args,
                                    kwargs_for_repr))
Exemplo n.º 17
0
 def accept(*args, **kwargs):
     result = strategy_definition(*args, **kwargs)
     args, kwargs = convert_positional_arguments(strategy_definition, args,
                                                 kwargs)
     kwargs_for_repr = dict(kwargs)
     for k, v in defaults.items():
         if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
             del kwargs_for_repr[k]
     representation = u'%s(%s)' % (strategy_definition.__name__,
                                   arg_string(strategy_definition, args,
                                              kwargs_for_repr))
     return ReprWrapperStrategy(result, representation)
Exemplo n.º 18
0
 def calc_repr():
     _args = args
     _kwargs = kwargs
     _args, _kwargs = convert_positional_arguments(
         strategy_definition, _args, _kwargs)
     kwargs_for_repr = dict(_kwargs)
     for k, v in defaults.items():
         if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
             del kwargs_for_repr[k]
     return u'%s(%s)' % (
         strategy_definition.__name__,
         arg_string(strategy_definition, _args, kwargs_for_repr)
     )
Exemplo n.º 19
0
 def accept(*args, **kwargs):
     result = strategy_definition(*args, **kwargs)
     args, kwargs = convert_positional_arguments(
         strategy_definition, args, kwargs)
     kwargs_for_repr = dict(kwargs)
     for k, v in defaults.items():
         if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
             del kwargs_for_repr[k]
     representation = u'%s(%s)' % (
         strategy_definition.__name__,
         arg_string(strategy_definition, args, kwargs_for_repr)
     )
     return ReprWrapperStrategy(result, representation)
Exemplo n.º 20
0
def process_arguments_to_given(
    wrapped_test,
    arguments,
    kwargs,
    generator_arguments,
    generator_kwargs,
    argspec,
    test,
    settings,
):
    selfy = None
    arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)

    # If the test function is a method of some kind, the bound object
    # will be the first named argument if there are any, otherwise the
    # first vararg (if any).
    if argspec.args:
        selfy = kwargs.get(argspec.args[0])
    elif arguments:
        selfy = arguments[0]

    # Ensure that we don't mistake mocks for self here.
    # This can cause the mock to be used as the test runner.
    if is_mock(selfy):
        selfy = None

    test_runner = new_style_executor(selfy)

    arguments = tuple(arguments)

    # We use TupleStrategy over tuples() here to avoid polluting
    # st.STRATEGY_CACHE with references (see #493), and because this is
    # trivial anyway if the fixed_dictionaries strategy is cacheable.
    search_strategy = TupleStrategy(
        (
            st.just(arguments),
            st.fixed_dictionaries(generator_kwargs).map(
                lambda args: dict(args, **kwargs)
            ),
        )
    )

    if selfy is not None:
        search_strategy = WithRunner(search_strategy, selfy)

    search_strategy.validate()

    return arguments, kwargs, test_runner, search_strategy
Exemplo n.º 21
0
def process_arguments_to_given(
    wrapped_test,
    arguments,
    kwargs,
    generator_arguments,
    generator_kwargs,
    argspec,
    test,
    settings,
):
    selfy = None
    arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)

    # If the test function is a method of some kind, the bound object
    # will be the first named argument if there are any, otherwise the
    # first vararg (if any).
    if argspec.args:
        selfy = kwargs.get(argspec.args[0])
    elif arguments:
        selfy = arguments[0]

    # Ensure that we don't mistake mocks for self here.
    # This can cause the mock to be used as the test runner.
    if is_mock(selfy):
        selfy = None

    test_runner = new_style_executor(selfy)

    arguments = tuple(arguments)

    # We use TupleStrategy over tuples() here to avoid polluting
    # st.STRATEGY_CACHE with references (see #493), and because this is
    # trivial anyway if the fixed_dictionaries strategy is cacheable.
    search_strategy = TupleStrategy(
        (
            st.just(arguments),
            st.fixed_dictionaries(generator_kwargs).map(
                lambda args: dict(args, **kwargs)
            ),
        )
    )

    if selfy is not None:
        search_strategy = WithRunner(search_strategy, selfy)

    search_strategy.validate()

    return arguments, kwargs, test_runner, search_strategy
Exemplo n.º 22
0
 def inner(*args, **kwargs):
     if data.frozen:
         raise InvalidState(
             "This generated %s function can only be called within the "
             "scope of the @given that created it." %
             (nicerepr(self.like), ))
     if self.pure:
         args, kwargs = convert_positional_arguments(
             self.like, args, kwargs)
         key = (inner, args, frozenset(kwargs.items()))
         val = data.draw(SharedStrategy(base=self.returns, key=key))
     else:
         val = data.draw(self.returns)
     note("Called function: %s(%s) -> %r" % (nicerepr(
         self.like), arg_string(self.like, args, kwargs), val))
     return val
Exemplo n.º 23
0
 def inner(*args, **kwargs):
     if data.frozen:
         raise InvalidState(
             f"This generated {nicerepr(self.like)} function can only "
             "be called within the scope of the @given that created it."
         )
     if self.pure:
         args, kwargs = convert_positional_arguments(
             self.like, args, kwargs)
         key = (args, frozenset(kwargs.items()))
         cache = self._cache.setdefault(inner, {})
         if key not in cache:
             cache[key] = data.draw(self.returns)
             rep = repr_call(self.like, args, kwargs, reorder=False)
             note(f"Called function: {rep} -> {cache[key]!r}")
         return cache[key]
     else:
         val = data.draw(self.returns)
         rep = repr_call(self.like, args, kwargs, reorder=False)
         note(f"Called function: {rep} -> {val!r}")
         return val
Exemplo n.º 24
0
def process_arguments_to_given(
    wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs,
    argspec, test, settings
):
    selfy = None
    arguments, kwargs = convert_positional_arguments(
        wrapped_test, arguments, kwargs)

    # If the test function is a method of some kind, the bound object
    # will be the first named argument if there are any, otherwise the
    # first vararg (if any).
    if argspec.args:
        selfy = kwargs.get(argspec.args[0])
    elif arguments:
        selfy = arguments[0]

    # Ensure that we don't mistake mocks for self here.
    # This can cause the mock to be used as the test runner.
    if is_mock(selfy):
        selfy = None

    test_runner = new_style_executor(selfy)

    arguments = tuple(arguments)

    search_strategy = st.tuples(
        st.just(arguments),
        st.fixed_dictionaries(generator_kwargs).map(
            lambda args: dict(args, **kwargs)
        )
    )

    if selfy is not None:
        search_strategy = WithRunner(search_strategy, selfy)

    search_strategy.validate()

    return arguments, kwargs, test_runner, search_strategy
Exemplo n.º 25
0
        def wrapped_test(*arguments, **kwargs):
            if settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = provided_random or new_random()

            import hypothesis.strategies as sd
            from hypothesis.internal.strategymethod import strategy

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            for arg in hypothesis_owned_arguments:
                try:
                    value = kwargs[arg]
                except KeyError:
                    continue
                if not isinstance(value, HypothesisProvided):
                    note_deprecation(
                        'Passing in explicit values to override Hypothesis '
                        'provided values is deprecated and will no longer '
                        'work in Hypothesis 2.0. If you need to do this, '
                        'extract a common function and call that from a '
                        'Hypothesis based test.', settings
                    )

            # Anything in unused_kwargs hasn't been injected through
            # argspec.defaults, so we need to add them.
            for k in unused_kwargs:
                if k not in kwargs:
                    kwargs[k] = unused_kwargs[k]
            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in reversed(getattr(
                wrapped_test, u'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    example_kwargs = dict(zip(
                        argspec.args[-len(example.args):], example.args
                    ))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = u'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext() as b:
                        test_runner(
                            lambda: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise

            if not any(
                isinstance(x, HypothesisProvided)
                for xs in (arguments, kwargs.values())
                for x in xs
            ):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict(
                    (k, convert_to_specifier(v)) for (k, v) in kwargs.items()))
            )

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.'
                )
                if settings.strict:
                    raise FailedHealthCheck(message)
                else:
                    warnings.warn(FailedHealthCheck(message))

            search_strategy = strategy(given_specifier, settings)
            search_strategy.validate()

            if settings.database:
                storage = settings.database.storage(
                    fully_qualified_name(test))
            else:
                storage = None

            start = time.time()
            warned_random = [False]
            perform_health_check = settings.perform_health_check
            if Settings.default is not None:
                perform_health_check &= Settings.default.perform_health_check

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                count = 0
                bad_draws = 0
                filtered_draws = 0
                errors = 0
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and bad_draws < 50
                ):
                    try:
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(reify_and_execute(
                                search_strategy,
                                search_strategy.draw_template(
                                    health_check_random,
                                    search_strategy.draw_parameter(
                                        health_check_random,
                                    )),
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except BadTemplateDraw:
                        bad_draws += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except Exception:
                        if errors == 0:
                            report(traceback.format_exc())
                        errors += 1
                        if test_runner is default_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function you've passed to "
                                'it to construct your data.'
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. '
                            )
                if filtered_draws >= 50:
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less.') % (
                        filtered_draws, count
                    ))
                if bad_draws >= 50:
                    fail_health_check(
                        'Hypothesis is struggling to generate examples. '
                        'This is often a sign of a recursive strategy which '
                        'fans out too broadly. If you\'re using recursive, '
                        'try to reduce the size of the recursive step or '
                        'increase the maximum permitted number of leaves.'
                    )
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds. Try decreasing '
                        "size of the data you're generating (with e.g."
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime))
                if getglobalrandomstate() != initial_state:
                    warned_random[0] = True
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.'
                    )

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                if perform_health_check and not warned_random[0]:
                    initial_state = getglobalrandomstate()
                record_repr = [None]
                try:
                    result = test_runner(reify_and_execute(
                        search_strategy, xs, test,
                        record_repr=record_repr,
                    ))
                    if result is not None:
                        note_deprecation((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                            'In Hypothesis 2.0 this will become an error.'
                        ) % (test.__name__, result), settings)
                    return False
                except HypothesisDeprecationWarning:
                    raise
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True
                finally:
                    if (
                        not warned_random[0] and
                        perform_health_check and
                        getglobalrandomstate() != initial_state
                    ):
                        warned_random[0] = True
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.'
                        )
            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example,
                    settings, storage, start_time=start,
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(
                    search_strategy, falsifying_template, test,
                    print_example=True, is_final=True
                ))

                report(
                    u'Failed to reproduce exception. Expected: \n' +
                    last_exception[0],
                )

                test_runner(reify_and_execute(
                    search_strategy, falsifying_template,
                    test_is_flaky(test, repr_for_last_exception[0]),
                    print_example=True, is_final=True
                ))
def test_converter_notices_missing_kwonly_args():
    def f(*, a, b=2):
        pass

    with pytest.raises(TypeError):
        assert convert_positional_arguments(f, (), dict())
Exemplo n.º 27
0
def test_positional_errors_if_too_many_args():
    def foo(a):
        pass

    with raises(TypeError, match="2 given"):
        convert_positional_arguments(foo, (1, 2), {})
Exemplo n.º 28
0
def test_converter_handles_kwonly_args():
    def f(*, a, b=2):
        pass

    out = convert_positional_arguments(f, (), {"a": 1})
    assert out == ((), {"a": 1, "b": 2})
def test_converter_handles_kwonly_args():
    def f(*, a, b=2):
        pass

    out = convert_positional_arguments(f, (), dict(a=1))
    assert out == ((), dict(a=1, b=2))
Exemplo n.º 30
0
        def wrapped_test(*arguments, **kwargs):
            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)
            # Anything in unused_kwargs hasn't been injected through
            # argspec.defaults, so we need to add them.
            for k in unused_kwargs:
                if k not in kwargs:
                    kwargs[k] = unused_kwargs[k]
            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            ):
                if example.args:
                    example_kwargs = dict(zip(
                        argspec.args[-len(example.args):], example.args
                    ))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    test_runner(
                        lambda: test(*arguments, **example_kwargs)
                    )
                except BaseException:
                    report(message_on_failure)
                    raise

            if not any(
                isinstance(x, HypothesisProvided)
                for xs in (arguments, kwargs.values())
                for x in xs
            ):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict(
                    (k, convert_to_specifier(v)) for (k, v) in kwargs.items()))
            )

            search_strategy = strategy(given_specifier, settings)

            if settings.database:
                storage = settings.database.storage(
                    fully_qualified_name(test))
            else:
                storage = None

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                record_repr = [None]
                try:
                    test_runner(reify_and_execute(
                        search_strategy, xs, test,
                        always_print=settings.max_shrinks <= 0,
                        record_repr=record_repr,
                    ))
                    return False
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    if settings.max_shrinks <= 0:
                        raise e
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example,
                    settings, storage
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(
                    search_strategy, falsifying_template, test,
                    print_example=True
                ))

                report(
                    'Failed to reproduce exception. Expected: \n' +
                    last_exception[0],
                )

                test_runner(reify_and_execute(
                    search_strategy, falsifying_template,
                    test_is_flaky(test, repr_for_last_exception[0]),
                    print_example=True
                ))
Exemplo n.º 31
0
        def wrapped_test(*arguments, **kwargs):
            selfy = None
            arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)
            # Because we converted all kwargs to given into real args and
            # error if we have neither args nor kwargs, this should always
            # be valid
            assert argspec.args
            selfy = kwargs.get(argspec.args[0])
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in getattr(wrapped_test, "hypothesis_explicit_examples", ()):
                if example.args:
                    example_kwargs = dict(zip(argspec.args[-len(example.args) :], example.args))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = "Falsifying example: %s(%s)" % (
                    test.__name__,
                    arg_string(test, arguments, example_kwargs),
                )
                try:
                    test_runner(lambda: test(*arguments, **example_kwargs))
                except BaseException:
                    report(message_on_failure)
                    raise

            if not any(isinstance(x, HypothesisProvided) for xs in (arguments, kwargs.values()) for x in xs):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict((k, convert_to_specifier(v)) for (k, v) in kwargs.items())),
            )

            search_strategy = strategy(given_specifier, settings)

            if settings.database:
                storage = settings.database.storage(fully_qualified_name(test))
            else:
                storage = None

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                record_repr = [None]
                try:
                    test_runner(
                        reify_and_execute(
                            search_strategy, xs, test, always_print=settings.max_shrinks <= 0, record_repr=record_repr
                        )
                    )
                    return False
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    if settings.max_shrinks <= 0:
                        raise e
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example, settings, storage
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(search_strategy, falsifying_template, test, print_example=True))

                report("Failed to reproduce exception. Expected: \n" + last_exception[0])

                test_runner(
                    reify_and_execute(
                        search_strategy,
                        falsifying_template,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True,
                    )
                )
Exemplo n.º 32
0
def test_positional_errors_if_too_few_args():
    def foo(a, b, c):
        pass

    with pytest.raises(TypeError):
        convert_positional_arguments(foo, (1, 2), {})
def test_converter_notices_missing_kwonly_args():
    def f(*, a, b=2):
        pass

    with pytest.raises(TypeError):
        assert convert_positional_arguments(f, (), dict())
Exemplo n.º 34
0
def test_positional_does_not_error_if_extra_args_are_kwargs():
    def foo(a, b, c):
        pass

    convert_positional_arguments(foo, (1, 2), {"c": 3})
Exemplo n.º 35
0
def test_positional_errors_if_too_few_args():
    def foo(a, b, c):
        pass

    with raises(TypeError):
        convert_positional_arguments(foo, (1, 2), {})
Exemplo n.º 36
0
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = executor(selfy)

            for example in reversed(
                    getattr(wrapped_test, 'hypothesis_explicit_examples', ())):
                if example.args:
                    example_kwargs = dict(
                        zip(original_argspec.args[-len(example.args):],
                            example.args))
                else:
                    example_kwargs = example.kwargs
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs))
                try:
                    with BuildContext() as b:
                        test_runner(lambda: test(*arguments, **example_kwargs))
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)))

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.')
                if settings.strict:
                    raise FailedHealthCheck(message)
                else:
                    warnings.warn(FailedHealthCheck(message))

            search_strategy = given_specifier
            search_strategy.validate()

            if settings.database:
                storage = settings.database.storage(fully_qualified_name(test))
            else:
                storage = None

            start = time.time()
            warned_random = [False]
            perform_health_check = settings.perform_health_check
            if Settings.default is not None:
                perform_health_check &= Settings.default.perform_health_check

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                count = 0
                bad_draws = 0
                filtered_draws = 0
                errors = 0
                while (count < 10 and time.time() < start + 1
                       and filtered_draws < 50 and bad_draws < 50):
                    try:
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(
                                reify_and_execute(
                                    search_strategy,
                                    search_strategy.draw_template(
                                        health_check_random,
                                        search_strategy.draw_parameter(
                                            health_check_random, )),
                                    lambda *args, **kwargs: None,
                                ))
                        count += 1
                    except BadTemplateDraw:
                        bad_draws += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except Exception:
                        if errors == 0:
                            report(traceback.format_exc())
                        errors += 1
                        if test_runner is default_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function yo've passed to "
                                'it to construct your data.')
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. ')
                if filtered_draws >= 50:
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less.') % (filtered_draws, count))
                if bad_draws >= 50:
                    fail_health_check(
                        'Hypothesis is struggling to generate examples. '
                        'This is often a sign of a recursive strategy which '
                        'fans out too broadly. If you\'re using recursive, '
                        'try to reduce the size of the recursive step or '
                        'increase the maximum permitted number of leaves.')
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check(
                        ('Data generation is extremely slow: Only produced '
                         '%d valid examples in %.2f seconds. Try decreasing '
                         "size of the data yo're generating (with e.g."
                         'average_size or max_leaves parameters).') %
                        (count, runtime))
                if getglobalrandomstate() != initial_state:
                    warned_random[0] = True
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.')

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                if perform_health_check and not warned_random[0]:
                    initial_state = getglobalrandomstate()
                record_repr = [None]
                try:
                    result = test_runner(
                        reify_and_execute(
                            search_strategy,
                            xs,
                            test,
                            record_repr=record_repr,
                        ))
                    if result is not None and settings.perform_health_check:
                        raise FailedHealthCheck(
                            ('Tests run under @given should return None, but '
                             '%s returned %r instead.') %
                            (test.__name__, result), settings)
                    return False
                except (HypothesisDeprecationWarning, FailedHealthCheck,
                        UnsatisfiedAssumption):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True
                finally:
                    if (not warned_random[0] and perform_health_check
                            and getglobalrandomstate() != initial_state):
                        warned_random[0] = True
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.')

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            with settings:
                falsifying_template = None
                try:
                    falsifying_template = best_satisfying_template(
                        search_strategy,
                        random,
                        is_template_example,
                        settings,
                        storage,
                        start_time=start,
                    )
                except NoSuchExample:
                    return

                assert last_exception[0] is not None

                try:
                    test_runner(
                        reify_and_execute(search_strategy,
                                          falsifying_template,
                                          test,
                                          print_example=True,
                                          is_final=True))
                except UnsatisfiedAssumption:
                    report(traceback.format_exc())
                    raise Flaky(
                        'Unreliable assumption: An example which satisfied '
                        'assumptions on the first run now fails it.')

                report(
                    'Failed to reproduce exception. Expected: \n' +
                    last_exception[0], )

                try:
                    test_runner(
                        reify_and_execute(search_strategy,
                                          falsifying_template,
                                          test_is_flaky(
                                              test,
                                              repr_for_last_exception[0]),
                                          print_example=True,
                                          is_final=True))
                except UnsatisfiedAssumption:
                    raise Flaky(
                        'Unreliable test data: Failed to reproduce a failure '
                        'and then when it came to recreating the example in '
                        'order to print the test data with a flaky result '
                        'the example was filtered out (by e.g. a '
                        'call to filter in your strategy) when we didn\'t '
                        'expect it to be.')
Exemplo n.º 37
0
def test_positional_errors_if_given_duplicate_kwargs():
    def foo(a):
        pass

    with raises(TypeError, match="multiple values"):
        convert_positional_arguments(foo, (2, ), {"a": 1})
Exemplo n.º 38
0
def test_positional_errors_if_given_bad_kwargs():
    def foo(a):
        pass

    with raises(TypeError, match="unexpected keyword argument"):
        convert_positional_arguments(foo, (), {"b": 1})
Exemplo n.º 39
0
def test_positional_does_not_error_if_extra_args_are_kwargs():
    def foo(a, b, c):
        pass

    convert_positional_arguments(foo, (1, 2), {'c': 3})
Exemplo n.º 40
0
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(
                    wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = new_style_executor(selfy)

            for example in reversed(getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    if len(example.args) > len(original_argspec.args):
                        raise InvalidArgument(
                            'example has too many arguments for test. '
                            'Expected at most %d but got %d' % (
                                len(original_argspec.args), len(example.args)))
                    example_kwargs = dict(zip(
                        original_argspec.args[-len(example.args):],
                        example.args
                    ))
                else:
                    example_kwargs = example.kwargs
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext() as b:
                        test_runner(
                            None,
                            lambda data: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise
            if settings.max_examples <= 0:
                return

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)
                )
            )

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.'
                )
                raise FailedHealthCheck(message)

            search_strategy = given_specifier
            search_strategy.validate()

            perform_health_check = settings.perform_health_check
            perform_health_check &= Settings.default.perform_health_check

            from hypothesis.internal.conjecture.data import TestData, Status, \
                StopTest

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                # We "pre warm" the health check with one draw to give it some
                # time to calculate any cached data. This prevents the case
                # where the first draw of the health check takes ages because
                # of loading unicode data the first time.
                data = TestData(
                    max_length=settings.buffer_size,
                    draw_bytes=lambda data, n, distribution:
                    distribution(health_check_random, n)
                )
                with Settings(settings, verbosity=Verbosity.quiet):
                    try:
                        test_runner(data, reify_and_execute(
                            search_strategy,
                            lambda *args, **kwargs: None,
                        ))
                    except BaseException:
                        pass
                count = 0
                overruns = 0
                filtered_draws = 0
                start = time.time()
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and overruns < 20
                ):
                    try:
                        data = TestData(
                            max_length=settings.buffer_size,
                            draw_bytes=lambda data, n, distribution:
                            distribution(health_check_random, n)
                        )
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(data, reify_and_execute(
                                search_strategy,
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except StopTest:
                        if data.status == Status.INVALID:
                            filtered_draws += 1
                        else:
                            assert data.status == Status.OVERRUN
                            overruns += 1
                    except Exception:
                        report(traceback.format_exc())
                        if test_runner is default_new_style_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function yo've passed to "
                                'it to construct your data.'
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. '
                            )
                if overruns >= 20 or (
                    not count and overruns > 0
                ):
                    fail_health_check((
                        'Examples routinely exceeded the max allowable size. '
                        '(%d examples overran while generating %d valid ones)'
                        '. Generating examples this large will usually lead to'
                        ' bad results. You should try setting average_size or '
                        'max_size parameters on your collections and turning '
                        'max_leaves down on recursive() calls.') % (
                        overruns, count
                    ))
                if filtered_draws >= 50 or (
                    not count and filtered_draws > 0
                ):
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less. This can also be caused by '
                        'a low max_leaves parameter in recursive() calls') % (
                        filtered_draws, count
                    ))
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds (%d invalid ones '
                        'and %d exceeded maximum size). Try decreasing '
                        "size of the data you're generating (with e.g."
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime, filtered_draws, overruns))
                if getglobalrandomstate() != initial_state:
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.'
                    )
            last_exception = [None]
            repr_for_last_exception = [None]
            performed_random_check = [False]

            def evaluate_test_data(data):
                if perform_health_check and not performed_random_check[0]:
                    initial_state = getglobalrandomstate()
                    performed_random_check[0] = True
                else:
                    initial_state = None
                try:
                    result = test_runner(data, reify_and_execute(
                        search_strategy, test,
                    ))
                    if result is not None and settings.perform_health_check:
                        raise FailedHealthCheck((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                        ) % (test.__name__, result), settings)
                    return False
                except UnsatisfiedAssumption:
                    data.mark_invalid()
                except (
                    HypothesisDeprecationWarning, FailedHealthCheck,
                    StopTest,
                ):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    verbose_report(last_exception[0])
                    data.mark_interesting()
                finally:
                    if (
                        initial_state is not None and
                        getglobalrandomstate() != initial_state
                    ):
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.')

            from hypothesis.internal.conjecture.engine import TestRunner

            falsifying_example = None
            database_key = str_to_bytes(fully_qualified_name(test))
            start_time = time.time()
            runner = TestRunner(
                evaluate_test_data,
                settings=settings, random=random,
                database_key=database_key,
            )
            runner.run()
            run_time = time.time() - start_time
            timed_out = (
                settings.timeout > 0 and
                run_time >= settings.timeout
            )
            if runner.last_data.status == Status.INTERESTING:
                falsifying_example = runner.last_data.buffer
                if settings.database is not None:
                    settings.database.save(
                        database_key, falsifying_example
                    )
            else:
                if runner.valid_examples < min(
                    settings.min_satisfying_examples,
                    settings.max_examples,
                ):
                    if timed_out:
                        raise Timeout((
                            'Ran out of time before finding a satisfying '
                            'example for '
                            '%s. Only found %d examples in ' +
                            '%.2fs.'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples, run_time
                        ))
                    else:
                        raise Unsatisfiable((
                            'Unable to satisfy assumptions of hypothesis '
                            '%s. Only %d examples considered '
                            'satisfied assumptions'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples,))
                return

            assert last_exception[0] is not None

            try:
                with settings:
                    test_runner(
                        TestData.for_buffer(falsifying_example),
                        reify_and_execute(
                            search_strategy, test,
                            print_example=True, is_final=True
                        ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                raise Flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.'
                )

            report(
                'Failed to reproduce exception. Expected: \n' +
                last_exception[0],
            )

            filter_message = (
                'Unreliable test data: Failed to reproduce a failure '
                'and then when it came to recreating the example in '
                'order to print the test data with a flaky result '
                'the example was filtered out (by e.g. a '
                'call to filter in your strategy) when we didn\'t '
                'expect it to be.'
            )

            try:
                test_runner(
                    TestData.for_buffer(falsifying_example),
                    reify_and_execute(
                        search_strategy,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True, is_final=True
                    ))
            except (UnsatisfiedAssumption, StopTest):
                raise Flaky(filter_message)
def test_converter_handles_kwonly_args():
    def f(*, a, b=2):
        pass

    out = convert_positional_arguments(f, (), dict(a=1))
    assert out == ((), dict(a=1, b=2))
Exemplo n.º 42
0
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(
                    wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = new_style_executor(selfy)

            for example in reversed(getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    if len(example.args) > len(original_argspec.args):
                        raise InvalidArgument(
                            'example has too many arguments for test. '
                            'Expected at most %d but got %d' % (
                                len(original_argspec.args), len(example.args)))
                    example_kwargs = dict(zip(
                        original_argspec.args[-len(example.args):],
                        example.args
                    ))
                else:
                    example_kwargs = example.kwargs
                if Phase.explicit not in settings.phases:
                    continue
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext(None) as b:
                        test_runner(
                            None,
                            lambda data: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    traceback.print_exc()
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise
            if settings.max_examples <= 0:
                return

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)
                )
            )

            def fail_health_check(message, label):
                if label in settings.suppress_health_check:
                    return
                message += (
                    '\nSee https://hypothesis.readthedocs.io/en/latest/health'
                    'checks.html for more information about this. '
                )
                message += (
                    'If you want to disable just this health check, add %s '
                    'to the suppress_health_check settings for this test.'
                ) % (label,)
                raise FailedHealthCheck(message)

            search_strategy = given_specifier
            if selfy is not None:
                search_strategy = WithRunner(search_strategy, selfy)

            search_strategy.validate()

            perform_health_check = settings.perform_health_check
            perform_health_check &= Settings.default.perform_health_check

            from hypothesis.internal.conjecture.data import ConjectureData, \
                Status, StopTest
            if not (
                Phase.reuse in settings.phases or
                Phase.generate in settings.phases
            ):
                return

            if perform_health_check:
                health_check_random = Random(random.getrandbits(128))
                # We "pre warm" the health check with one draw to give it some
                # time to calculate any cached data. This prevents the case
                # where the first draw of the health check takes ages because
                # of loading unicode data the first time.
                data = ConjectureData(
                    max_length=settings.buffer_size,
                    draw_bytes=lambda data, n, distribution:
                    distribution(health_check_random, n)
                )
                with Settings(settings, verbosity=Verbosity.quiet):
                    try:
                        test_runner(data, reify_and_execute(
                            search_strategy,
                            lambda *args, **kwargs: None,
                        ))
                    except BaseException:
                        pass
                count = 0
                overruns = 0
                filtered_draws = 0
                start = time.time()
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and overruns < 20
                ):
                    try:
                        data = ConjectureData(
                            max_length=settings.buffer_size,
                            draw_bytes=lambda data, n, distribution:
                            distribution(health_check_random, n)
                        )
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(data, reify_and_execute(
                                search_strategy,
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except StopTest:
                        if data.status == Status.INVALID:
                            filtered_draws += 1
                        else:
                            assert data.status == Status.OVERRUN
                            overruns += 1
                    except InvalidArgument:
                        raise
                    except Exception:
                        if (
                            HealthCheck.exception_in_generation in
                            settings.suppress_health_check
                        ):
                            raise
                        report(traceback.format_exc())
                        if test_runner is default_new_style_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function you've passed to "
                                'it to construct your data.',
                                HealthCheck.exception_in_generation,
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. ',
                                HealthCheck.exception_in_generation,
                            )
                if overruns >= 20 or (
                    not count and overruns > 0
                ):
                    fail_health_check((
                        'Examples routinely exceeded the max allowable size. '
                        '(%d examples overran while generating %d valid ones)'
                        '. Generating examples this large will usually lead to'
                        ' bad results. You should try setting average_size or '
                        'max_size parameters on your collections and turning '
                        'max_leaves down on recursive() calls.') % (
                        overruns, count
                    ), HealthCheck.data_too_large)
                if filtered_draws >= 50 or (
                    not count and filtered_draws > 0
                ):
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less. This can also be caused by '
                        'a low max_leaves parameter in recursive() calls') % (
                        filtered_draws, count
                    ), HealthCheck.filter_too_much)
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds (%d invalid ones '
                        'and %d exceeded maximum size). Try decreasing '
                        "size of the data you're generating (with e.g."
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime, filtered_draws, overruns),
                        HealthCheck.too_slow,
                    )
            last_exception = [None]
            repr_for_last_exception = [None]

            def evaluate_test_data(data):
                try:
                    result = test_runner(data, reify_and_execute(
                        search_strategy, test,
                    ))
                    if result is not None and settings.perform_health_check:
                        fail_health_check((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                        ) % (test.__name__, result), HealthCheck.return_value)
                    return False
                except UnsatisfiedAssumption:
                    data.mark_invalid()
                except (
                    HypothesisDeprecationWarning, FailedHealthCheck,
                    StopTest,
                ):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    verbose_report(last_exception[0])
                    data.mark_interesting()

            from hypothesis.internal.conjecture.engine import ConjectureRunner

            falsifying_example = None
            database_key = str_to_bytes(fully_qualified_name(test))
            start_time = time.time()
            runner = ConjectureRunner(
                evaluate_test_data,
                settings=settings, random=random,
                database_key=database_key,
            )
            runner.run()
            note_engine_for_statistics(runner)
            run_time = time.time() - start_time
            timed_out = (
                settings.timeout > 0 and
                run_time >= settings.timeout
            )
            if runner.last_data is None:
                return
            if runner.last_data.status == Status.INTERESTING:
                falsifying_example = runner.last_data.buffer
                if settings.database is not None:
                    settings.database.save(
                        database_key, falsifying_example
                    )
            else:
                if runner.valid_examples < min(
                    settings.min_satisfying_examples,
                    settings.max_examples,
                ):
                    if timed_out:
                        raise Timeout((
                            'Ran out of time before finding a satisfying '
                            'example for '
                            '%s. Only found %d examples in ' +
                            '%.2fs.'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples, run_time
                        ))
                    else:
                        raise Unsatisfiable((
                            'Unable to satisfy assumptions of hypothesis '
                            '%s. Only %d examples considered '
                            'satisfied assumptions'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples,))
                return

            assert last_exception[0] is not None

            try:
                with settings:
                    test_runner(
                        ConjectureData.for_buffer(falsifying_example),
                        reify_and_execute(
                            search_strategy, test,
                            print_example=True, is_final=True
                        ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                raise Flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.'
                )

            report(
                'Failed to reproduce exception. Expected: \n' +
                last_exception[0],
            )

            filter_message = (
                'Unreliable test data: Failed to reproduce a failure '
                'and then when it came to recreating the example in '
                'order to print the test data with a flaky result '
                'the example was filtered out (by e.g. a '
                'call to filter in your strategy) when we didn\'t '
                'expect it to be.'
            )

            try:
                test_runner(
                    ConjectureData.for_buffer(falsifying_example),
                    reify_and_execute(
                        search_strategy,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True, is_final=True
                    ))
            except (UnsatisfiedAssumption, StopTest):
                raise Flaky(filter_message)
Exemplo n.º 43
0
        def wrapped_test(*arguments, **kwargs):
            import hypothesis.strategies as sd
            from hypothesis.internal.strategymethod import strategy

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)
            # Anything in unused_kwargs hasn't been injected through
            # argspec.defaults, so we need to add them.
            for k in unused_kwargs:
                if k not in kwargs:
                    kwargs[k] = unused_kwargs[k]
            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in getattr(
                wrapped_test, u'hypothesis_explicit_examples', ()
            ):
                if example.args:
                    example_kwargs = dict(zip(
                        argspec.args[-len(example.args):], example.args
                    ))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = u'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    test_runner(
                        lambda: test(*arguments, **example_kwargs)
                    )
                except BaseException:
                    report(message_on_failure)
                    raise

            if not any(
                isinstance(x, HypothesisProvided)
                for xs in (arguments, kwargs.values())
                for x in xs
            ):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict(
                    (k, convert_to_specifier(v)) for (k, v) in kwargs.items()))
            )

            search_strategy = strategy(given_specifier, settings)

            if settings.database:
                storage = settings.database.storage(
                    fully_qualified_name(test))
            else:
                storage = None

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                record_repr = [None]
                try:
                    test_runner(reify_and_execute(
                        search_strategy, xs, test,
                        always_print=settings.max_shrinks <= 0,
                        record_repr=record_repr,
                    ))
                    return False
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    if settings.max_shrinks <= 0:
                        raise e
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example,
                    settings, storage
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(
                    search_strategy, falsifying_template, test,
                    print_example=True, is_final=True
                ))

                report(
                    u'Failed to reproduce exception. Expected: \n' +
                    last_exception[0],
                )

                test_runner(reify_and_execute(
                    search_strategy, falsifying_template,
                    test_is_flaky(test, repr_for_last_exception[0]),
                    print_example=True, is_final=True
                ))
Exemplo n.º 44
0
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = new_style_executor(selfy)

            for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())):
                if example.args:
                    if len(example.args) > len(original_argspec.args):
                        raise InvalidArgument(
                            "example has too many arguments for test. "
                            "Expected at most %d but got %d" % (len(original_argspec.args), len(example.args))
                        )
                    example_kwargs = dict(zip(original_argspec.args[-len(example.args) :], example.args))
                else:
                    example_kwargs = example.kwargs
                if Phase.explicit not in settings.phases:
                    continue
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = "Falsifying example: %s(%s)" % (
                    test.__name__,
                    arg_string(test, arguments, example_kwargs),
                )
                try:
                    with BuildContext() as b:
                        test_runner(None, lambda data: test(*arguments, **example_kwargs))
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise
            if settings.max_examples <= 0:
                return

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments), sd.fixed_dictionaries(generator_kwargs).map(lambda args: dict(args, **kwargs))
            )

            def fail_health_check(message, label):
                if label in settings.suppress_health_check:
                    return
                message += (
                    "\nSee https://hypothesis.readthedocs.io/en/latest/health"
                    "checks.html for more information about this. "
                )
                message += (
                    "If you want to disable just this health check, add %s "
                    "to the suppress_health_check settings for this test."
                ) % (label,)
                raise FailedHealthCheck(message)

            search_strategy = given_specifier
            if selfy is not None:
                search_strategy = WithRunner(search_strategy, selfy)

            search_strategy.validate()

            perform_health_check = settings.perform_health_check
            perform_health_check &= Settings.default.perform_health_check

            from hypothesis.internal.conjecture.data import TestData, Status, StopTest

            if not (Phase.reuse in settings.phases or Phase.generate in settings.phases):
                return

            if perform_health_check:
                health_check_random = Random(random.getrandbits(128))
                # We "pre warm" the health check with one draw to give it some
                # time to calculate any cached data. This prevents the case
                # where the first draw of the health check takes ages because
                # of loading unicode data the first time.
                data = TestData(
                    max_length=settings.buffer_size,
                    draw_bytes=lambda data, n, distribution: distribution(health_check_random, n),
                )
                with Settings(settings, verbosity=Verbosity.quiet):
                    try:
                        test_runner(data, reify_and_execute(search_strategy, lambda *args, **kwargs: None))
                    except BaseException:
                        pass
                count = 0
                overruns = 0
                filtered_draws = 0
                start = time.time()
                while count < 10 and time.time() < start + 1 and filtered_draws < 50 and overruns < 20:
                    try:
                        data = TestData(
                            max_length=settings.buffer_size,
                            draw_bytes=lambda data, n, distribution: distribution(health_check_random, n),
                        )
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(data, reify_and_execute(search_strategy, lambda *args, **kwargs: None))
                        count += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except StopTest:
                        if data.status == Status.INVALID:
                            filtered_draws += 1
                        else:
                            assert data.status == Status.OVERRUN
                            overruns += 1
                    except InvalidArgument:
                        raise
                    except Exception:
                        if HealthCheck.exception_in_generation in settings.suppress_health_check:
                            raise
                        report(traceback.format_exc())
                        if test_runner is default_new_style_executor:
                            fail_health_check(
                                "An exception occurred during data "
                                "generation in initial health check. "
                                "This indicates a bug in the strategy. "
                                "This could either be a Hypothesis bug or "
                                "an error in a function you've passed to "
                                "it to construct your data.",
                                HealthCheck.exception_in_generation,
                            )
                        else:
                            fail_health_check(
                                "An exception occurred during data "
                                "generation in initial health check. "
                                "This indicates a bug in the strategy. "
                                "This could either be a Hypothesis bug or "
                                "an error in a function you've passed to "
                                "it to construct your data. Additionally, "
                                "you have a custom executor, which means "
                                "that this could be your executor failing "
                                "to handle a function which returns None. ",
                                HealthCheck.exception_in_generation,
                            )
                if overruns >= 20 or (not count and overruns > 0):
                    fail_health_check(
                        (
                            "Examples routinely exceeded the max allowable size. "
                            "(%d examples overran while generating %d valid ones)"
                            ". Generating examples this large will usually lead to"
                            " bad results. You should try setting average_size or "
                            "max_size parameters on your collections and turning "
                            "max_leaves down on recursive() calls."
                        )
                        % (overruns, count),
                        HealthCheck.data_too_large,
                    )
                if filtered_draws >= 50 or (not count and filtered_draws > 0):
                    fail_health_check(
                        (
                            "It looks like your strategy is filtering out a lot "
                            "of data. Health check found %d filtered examples but "
                            "only %d good ones. This will make your tests much "
                            "slower, and also will probably distort the data "
                            "generation quite a lot. You should adapt your "
                            "strategy to filter less. This can also be caused by "
                            "a low max_leaves parameter in recursive() calls"
                        )
                        % (filtered_draws, count),
                        HealthCheck.filter_too_much,
                    )
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check(
                        (
                            "Data generation is extremely slow: Only produced "
                            "%d valid examples in %.2f seconds (%d invalid ones "
                            "and %d exceeded maximum size). Try decreasing "
                            "size of the data you're generating (with e.g."
                            "average_size or max_leaves parameters)."
                        )
                        % (count, runtime, filtered_draws, overruns),
                        HealthCheck.too_slow,
                    )
            last_exception = [None]
            repr_for_last_exception = [None]

            def evaluate_test_data(data):
                try:
                    result = test_runner(data, reify_and_execute(search_strategy, test))
                    if result is not None and settings.perform_health_check:
                        fail_health_check(
                            ("Tests run under @given should return None, but " "%s returned %r instead.")
                            % (test.__name__, result),
                            HealthCheck.return_value,
                        )
                    return False
                except UnsatisfiedAssumption:
                    data.mark_invalid()
                except (HypothesisDeprecationWarning, FailedHealthCheck, StopTest):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    verbose_report(last_exception[0])
                    data.mark_interesting()

            from hypothesis.internal.conjecture.engine import TestRunner

            falsifying_example = None
            database_key = str_to_bytes(fully_qualified_name(test))
            start_time = time.time()
            runner = TestRunner(evaluate_test_data, settings=settings, random=random, database_key=database_key)
            runner.run()
            run_time = time.time() - start_time
            timed_out = settings.timeout > 0 and run_time >= settings.timeout
            if runner.last_data is None:
                return
            if runner.last_data.status == Status.INTERESTING:
                falsifying_example = runner.last_data.buffer
                if settings.database is not None:
                    settings.database.save(database_key, falsifying_example)
            else:
                if runner.valid_examples < min(settings.min_satisfying_examples, settings.max_examples):
                    if timed_out:
                        raise Timeout(
                            (
                                "Ran out of time before finding a satisfying "
                                "example for "
                                "%s. Only found %d examples in " + "%.2fs."
                            )
                            % (get_pretty_function_description(test), runner.valid_examples, run_time)
                        )
                    else:
                        raise Unsatisfiable(
                            (
                                "Unable to satisfy assumptions of hypothesis "
                                "%s. Only %d examples considered "
                                "satisfied assumptions"
                            )
                            % (get_pretty_function_description(test), runner.valid_examples)
                        )
                return

            assert last_exception[0] is not None

            try:
                with settings:
                    test_runner(
                        TestData.for_buffer(falsifying_example),
                        reify_and_execute(search_strategy, test, print_example=True, is_final=True),
                    )
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                raise Flaky(
                    "Unreliable assumption: An example which satisfied " "assumptions on the first run now fails it."
                )

            report("Failed to reproduce exception. Expected: \n" + last_exception[0])

            filter_message = (
                "Unreliable test data: Failed to reproduce a failure "
                "and then when it came to recreating the example in "
                "order to print the test data with a flaky result "
                "the example was filtered out (by e.g. a "
                "call to filter in your strategy) when we didn't "
                "expect it to be."
            )

            try:
                test_runner(
                    TestData.for_buffer(falsifying_example),
                    reify_and_execute(
                        search_strategy,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True,
                        is_final=True,
                    ),
                )
            except (UnsatisfiedAssumption, StopTest):
                raise Flaky(filter_message)