Ejemplo n.º 1
0
def test_copying_preserves_argspec(f):
    af = getargspec(f)
    t = copy_argspec(u'foo', getargspec(f))(universal_acceptor)
    at = getargspec(t)
    assert af.args == at.args
    assert af.varargs == at.varargs
    assert af.keywords == at.keywords
    assert len(af.defaults or ()) == len(at.defaults or ())
Ejemplo n.º 2
0
def test_copying_preserves_argspec(f):
    af = getargspec(f)
    t = copy_argspec(u'foo', getargspec(f))(universal_acceptor)
    at = getargspec(t)
    assert af.args == at.args
    assert af.varargs == at.varargs
    assert af.keywords == at.keywords
    assert len(af.defaults or ()) == len(at.defaults or ())
Ejemplo n.º 3
0
def test_copying_preserves_argspec(f):
    af = getargspec(f)
    t = define_function_signature(
        'foo', 'docstring', getargspec(f))(universal_acceptor)
    at = getargspec(t)
    assert af.args == at.args
    assert af.varargs == at.varargs
    assert af.keywords == at.keywords
    assert len(af.defaults or ()) == len(at.defaults or ())
Ejemplo n.º 4
0
def test_copying_preserves_argspec():
    for f in [has_one_arg, has_two_args, has_varargs, has_kwargs]:
        af = getargspec(f)
        t = copy_argspec('foo', getargspec(f))(universal_acceptor)
        at = getargspec(t)
        assert af.args == at.args
        assert af.varargs == at.varargs
        assert af.keywords == at.keywords
        assert len(af.defaults or ()) == len(at.defaults or ())
Ejemplo n.º 5
0
def test_copying_preserves_argspec():
    for f in [has_one_arg, has_two_args, has_varargs, has_kwargs]:
        af = getargspec(f)
        t = copy_argspec('foo', getargspec(f))(universal_acceptor)
        at = getargspec(t)
        assert af.args == at.args
        assert af.varargs == at.varargs
        assert af.keywords == at.keywords
        assert len(af.defaults or ()) == len(at.defaults or ())
Ejemplo n.º 6
0
def _extract_lambda_source(f):
    """Extracts a single lambda expression from the string source. Returns a
    string indicating an unknown body if it gets confused in any way.

    This is not a good function and I am sorry for it. Forgive me my
    sins, oh lord

    """
    out = BytesIO() if PY2 else StringIO()
    deparse_code(sys.version_info[0] + sys.version_info[1] * 0.1,
                 f.__code__,
                 out=out,
                 is_pypy=PYPY,
                 compile_mode='eval')
    source = out.getvalue()
    if source.startswith('return '):
        source = source[len('return '):]
    else:
        source = '<unknown>'
    args = getargspec(f)
    arg_bits = []
    for a in args.args:
        if isinstance(a, str):
            arg_bits.append(a)
        else:  # pragma: no cover
            assert isinstance(a, list)
            arg_bits.append('(%s)' % (', '.join(a)))
    if args.varargs is not None:
        arg_bits.append('*' + args.varargs)
    if args.keywords is not None:
        arg_bits.append('**' + args.keywords)
    return 'lambda %s: %s' % (', '.join(arg_bits), source)
Ejemplo n.º 7
0
 def __repr__(self):
     if self.__representation is None:
         _args = self.__args
         _kwargs = self.__kwargs
         argspec = getargspec(self.__function)
         defaults = {}
         if argspec.defaults is not None:
             for k in hrange(1, len(argspec.defaults) + 1):
                 defaults[argspec.args[-k]] = argspec.defaults[-k]
         if len(argspec.args) > 1 or argspec.defaults:
             _args, _kwargs = convert_positional_arguments(
                 self.__function, _args, _kwargs)
         else:
             _args, _kwargs = convert_keyword_arguments(
                 self.__function, _args, _kwargs)
         kwargs_for_repr = dict(_kwargs)
         for k, v in defaults.items():
             if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
                 del kwargs_for_repr[k]
         self.__representation = '%s(%s)' % (
             self.__function.__name__,
             arg_string(
                 self.__function, _args, kwargs_for_repr, reorder=False),
         )
     return self.__representation
Ejemplo n.º 8
0
def function_digest(function):
    """Returns a string that is stable across multiple invocations across
    multiple processes and is prone to changing significantly in response to
    minor changes to the function.

    No guarantee of uniqueness though it usually will be.

    """
    hasher = hashlib.md5()
    try:
        hasher.update(to_unicode(inspect.getsource(function)).encode('utf-8'))
    # Different errors on different versions of python. What fun.
    except (OSError, IOError, TypeError):
        pass
    try:
        hasher.update(str_to_bytes(function.__name__))
    except AttributeError:
        pass
    try:
        hasher.update(function.__module__.__name__.encode('utf-8'))
    except AttributeError:
        pass
    try:
        hasher.update(str_to_bytes(repr(getargspec(function))))
    except TypeError:
        pass
    return hasher.digest()
Ejemplo n.º 9
0
def execute_explicit_examples(test_runner, test, wrapped_test, settings,
                              arguments, kwargs):
    original_argspec = getargspec(test)

    for example in reversed(
            getattr(wrapped_test, 'hypothesis_explicit_examples', ())):
        if example.args:
            if len(example.args) > len(original_argspec.args):
                raise InvalidArgument(
                    'example has too many arguments for test. '
                    'Expected at most %d but got %d' %
                    (len(original_argspec.args), len(example.args)))
            example_kwargs = dict(
                zip(original_argspec.args[-len(example.args):], example.args))
        else:
            example_kwargs = example.kwargs
        if Phase.explicit not in settings.phases:
            continue
        example_kwargs.update(kwargs)
        # Note: Test may mutate arguments and we can't rerun explicit
        # examples, so we have to calculate the failure message at this
        # point rather than than later.
        message_on_failure = 'Falsifying example: %s(%s)' % (
            test.__name__, arg_string(test, arguments, example_kwargs))
        try:
            with BuildContext(None) as b:
                test_runner(None,
                            lambda data: test(*arguments, **example_kwargs))
        except BaseException:
            traceback.print_exc()
            report(message_on_failure)
            for n in b.notes:
                report(n)
            raise
Ejemplo n.º 10
0
def defines_strategy(strategy_definition):
    from hypothesis.internal.reflection import proxies, arg_string, \
        convert_positional_arguments
    argspec = getargspec(strategy_definition)
    defaults = {}
    if argspec.defaults is not None:
        for k in hrange(1, len(argspec.defaults) + 1):
            defaults[argspec.args[-k]] = argspec.defaults[-k]

    @proxies(strategy_definition)
    def accept(*args, **kwargs):
        result = strategy_definition(*args, **kwargs)

        def calc_repr():
            _args = args
            _kwargs = kwargs
            _args, _kwargs = convert_positional_arguments(
                strategy_definition, _args, _kwargs)
            kwargs_for_repr = dict(_kwargs)
            for k, v in defaults.items():
                if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
                    del kwargs_for_repr[k]
            return u'%s(%s)' % (strategy_definition.__name__,
                                arg_string(strategy_definition, _args,
                                           kwargs_for_repr))

        return ReprWrapperStrategy(result, calc_repr)

    return accept
Ejemplo n.º 11
0
def defines_strategy(strategy_definition):
    from hypothesis.internal.reflection import proxies, arg_string, \
        convert_positional_arguments
    argspec = getargspec(strategy_definition)
    defaults = {}
    if argspec.defaults is not None:
        for k in hrange(1, len(argspec.defaults) + 1):
            defaults[argspec.args[-k]] = argspec.defaults[-k]

    @proxies(strategy_definition)
    def accept(*args, **kwargs):
        result = strategy_definition(*args, **kwargs)

        def calc_repr():
            _args = args
            _kwargs = kwargs
            _args, _kwargs = convert_positional_arguments(
                strategy_definition, _args, _kwargs)
            kwargs_for_repr = dict(_kwargs)
            for k, v in defaults.items():
                if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
                    del kwargs_for_repr[k]
            return u'%s(%s)' % (
                strategy_definition.__name__,
                arg_string(strategy_definition, _args, kwargs_for_repr)
            )
        return ReprWrapperStrategy(result, calc_repr)
    return accept
Ejemplo n.º 12
0
def function_digest(function):
    """Returns a string that is stable across multiple invocations across
    multiple processes and is prone to changing significantly in response to
    minor changes to the function.

    No guarantee of uniqueness though it usually will be.

    """
    hasher = hashlib.md5()
    try:
        hasher.update(to_unicode(inspect.getsource(function)).encode(u'utf-8'))
    # Different errors on different versions of python. What fun.
    except (OSError, IOError, TypeError):
        pass
    try:
        hasher.update(function.__name__.encode(u'utf-8'))
    except AttributeError:
        pass
    try:
        hasher.update(function.__module__.__name__.encode(u'utf-8'))
    except AttributeError:
        pass
    try:
        hasher.update(repr(getargspec(function)).encode(u'utf-8'))
    except TypeError:
        pass
    return hasher.digest()
Ejemplo n.º 13
0
def _extract_lambda_source(f):
    """Extracts a single lambda expression from the string source. Returns a
    string indicating an unknown body if it gets confused in any way.

    This is not a good function and I am sorry for it. Forgive me my
    sins, oh lord

    """
    out = BytesIO() if PY2 else StringIO()
    deparse_code(
        sys.version_info[0] + sys.version_info[1] * 0.1,
        f.__code__,
        out=out, is_pypy=PYPY, compile_mode='eval'
    )
    source = out.getvalue()
    if source.startswith('return '):
        source = source[len('return '):]
    else:
        source = '<unknown>'
    args = getargspec(f)
    arg_bits = []
    for a in args.args:
        if isinstance(a, str):
            arg_bits.append(a)
        else:  # pragma: no cover
            assert isinstance(a, list)
            arg_bits.append('(%s)' % (', '.join(a)))
    if args.varargs is not None:
        arg_bits.append('*' + args.varargs)
    if args.keywords is not None:
        arg_bits.append('**' + args.keywords)
    return 'lambda %s: %s' % (
        ', '.join(arg_bits), source
    )
Ejemplo n.º 14
0
 def __repr__(self):
     if self.__representation is None:
         _args = self.__args
         _kwargs = self.__kwargs
         argspec = getargspec(self.__function)
         defaults = {}
         if argspec.defaults is not None:
             for k in hrange(1, len(argspec.defaults) + 1):
                 defaults[argspec.args[-k]] = argspec.defaults[-k]
         if len(argspec.args) > 1 or argspec.defaults:
             _args, _kwargs = convert_positional_arguments(
                 self.__function, _args, _kwargs)
         else:
             _args, _kwargs = convert_keyword_arguments(
                 self.__function, _args, _kwargs)
         kwargs_for_repr = dict(_kwargs)
         for k, v in defaults.items():
             if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]:
                 del kwargs_for_repr[k]
         self.__representation = '%s(%s)' % (
             self.__function.__name__,
             arg_string(
                 self.__function, _args, kwargs_for_repr, reorder=False),
         )
     return self.__representation
Ejemplo n.º 15
0
def test_name_does_not_clash_with_function_names():
    def f():
        pass

    @copy_argspec('f', getargspec(f))
    def g():
        pass
    g()
Ejemplo n.º 16
0
def test_name_does_not_clash_with_function_names():
    def f():
        pass

    @define_function_signature('f', 'A docstring for f', getargspec(f))
    def g():
        pass
    g()
Ejemplo n.º 17
0
def test_name_does_not_clash_with_function_names():
    def f():
        pass

    @copy_argspec('f', getargspec(f))
    def g():
        pass
    g()
Ejemplo n.º 18
0
def arg_string(f, args, kwargs):
    args, kwargs = convert_positional_arguments(f, args, kwargs)

    argspec = getargspec(f)

    bits = []

    for a in argspec.args:
        if a in kwargs:
            bits.append(u'%s=%s' % (a, nicerepr(kwargs.pop(a))))
    if kwargs:
        for a in sorted(kwargs):
            bits.append(u'%s=%s' % (a, nicerepr(kwargs[a])))

    return u', '.join([unicode_safe_repr(x) for x in args] + bits)
Ejemplo n.º 19
0
def arg_string(f, args, kwargs, reorder=True):
    if reorder:
        args, kwargs = convert_positional_arguments(f, args, kwargs)

    argspec = getargspec(f)

    bits = []

    for a in argspec.args:
        if a in kwargs:
            bits.append('%s=%s' % (a, nicerepr(kwargs.pop(a))))
    if kwargs:
        for a in sorted(kwargs):
            bits.append('%s=%s' % (a, nicerepr(kwargs[a])))

    return ', '.join([nicerepr(x) for x in args] + bits)
Ejemplo n.º 20
0
def convert_keyword_arguments(function, args, kwargs):
    """Returns a pair of a tuple and a dictionary which would be equivalent
    passed as positional and keyword args to the function. Unless function has.

    **kwargs the dictionary will always be empty.

    """
    argspec = getargspec(function)
    new_args = []
    kwargs = dict(kwargs)

    defaults = {}

    if argspec.defaults:
        for name, value in zip(
                argspec.args[-len(argspec.defaults):],
                argspec.defaults
        ):
            defaults[name] = value

    n = max(len(args), len(argspec.args))

    for i in hrange(n):
        if i < len(args):
            new_args.append(args[i])
        else:
            arg_name = argspec.args[i]
            if arg_name in kwargs:
                new_args.append(kwargs.pop(arg_name))
            elif arg_name in defaults:
                new_args.append(defaults[arg_name])
            else:
                raise TypeError(u'No value provided for argument %r' % (
                    arg_name
                ))

    if kwargs and not argspec.keywords:
        if len(kwargs) > 1:
            raise TypeError(u'%s() got unexpected keyword arguments %s' % (
                function.__name__, u', '.join(map(repr, kwargs))
            ))
        else:
            bad_kwarg = next(iter(kwargs))
            raise TypeError(u'%s() got an unexpected keyword argument %r' % (
                function.__name__, bad_kwarg
            ))
    return tuple(new_args), kwargs
Ejemplo n.º 21
0
def composite(f):
    """Defines a strategy that is built out of potentially arbitrarily many
    other strategies.

    This is intended to be used as a decorator. See the full
    documentation for more details about how to use this function.

    """

    from hypothesis.internal.reflection import define_function_signature
    argspec = getargspec(f)

    if (
        argspec.defaults is not None and
        len(argspec.defaults) == len(argspec.args)
    ):
        raise InvalidArgument(
            'A default value for initial argument will never be used')
    if len(argspec.args) == 0 and not argspec.varargs:
        raise InvalidArgument(
            'Functions wrapped with composite must take at least one '
            'positional argument.'
        )

    new_argspec = ArgSpec(
        args=argspec.args[1:], varargs=argspec.varargs,
        keywords=argspec.keywords, defaults=argspec.defaults
    )

    @defines_strategy
    @define_function_signature(f.__name__, f.__doc__, new_argspec)
    def accept(*args, **kwargs):
        class CompositeStrategy(SearchStrategy):

            def do_draw(self, data):
                first_draw = [True]

                def draw(strategy):
                    if not first_draw[0]:
                        data.mark_bind()
                    first_draw[0] = False
                    return data.draw(strategy)

                return f(draw, *args, **kwargs)
        return CompositeStrategy()
    accept.__module__ = f.__module__
    return accept
Ejemplo n.º 22
0
def composite(f):
    """Defines a strategy that is built out of potentially arbitrarily many
    other strategies.

    This is intended to be used as a decorator. See the full
    documentation for more details about how to use this function.

    """

    from hypothesis.internal.reflection import copy_argspec
    argspec = getargspec(f)

    if (
        argspec.defaults is not None and
        len(argspec.defaults) == len(argspec.args)
    ):
        raise InvalidArgument(
            'A default value for initial argument will never be used')
    if len(argspec.args) == 0 and not argspec.varargs:
        raise InvalidArgument(
            'Functions wrapped with composite must take at least one '
            'positional argument.'
        )

    new_argspec = ArgSpec(
        args=argspec.args[1:], varargs=argspec.varargs,
        keywords=argspec.keywords, defaults=argspec.defaults
    )

    @defines_strategy
    @copy_argspec(f.__name__, new_argspec)
    def accept(*args, **kwargs):
        class CompositeStrategy(SearchStrategy):

            def do_draw(self, data):
                first_draw = [True]

                def draw(strategy):
                    if not first_draw[0]:
                        data.mark_bind()
                    first_draw[0] = False
                    return data.draw(strategy)

                return f(draw, *args, **kwargs)
        return CompositeStrategy()
    return accept
Ejemplo n.º 23
0
def composite(f):
    """Defines a strategy that is built out of potentially arbitrarily many
    other strategies.

    This is intended to be used as a decorator. See the full
    documentation for more details about how to use this function.

    """

    from hypothesis.searchstrategy.morphers import MorpherStrategy
    from hypothesis.internal.reflection import copy_argspec
    argspec = getargspec(f)

    if (
        argspec.defaults is not None and
        len(argspec.defaults) == len(argspec.args)
    ):
        raise InvalidArgument(
            'A default value for initial argument will never be used')
    if len(argspec.args) == 0 and not argspec.varargs:
        raise InvalidArgument(
            'Functions wrapped with composite must take at least one '
            'positional argument.'
        )

    new_argspec = ArgSpec(
        args=argspec.args[1:], varargs=argspec.varargs,
        keywords=argspec.keywords, defaults=argspec.defaults
    )

    base_strategy = streaming(MorpherStrategy())

    @defines_strategy
    @copy_argspec(f.__name__, new_argspec)
    def accept(*args, **kwargs):
        def call_with_draw(morphers):
            index = [0]

            def draw(strategy):
                i = index[0]
                index[0] += 1
                return morphers[i].become(strategy)
            return f(*((draw,) + args), **kwargs)
        return base_strategy.map(call_with_draw)
    return accept
Ejemplo n.º 24
0
def convert_positional_arguments(function, args, kwargs):
    """Return a tuple (new_args, new_kwargs) where all possible arguments have
    been moved to kwargs.

    new_args will only be non-empty if function has a
    variadic argument.

    """
    argspec = getargspec(function)
    kwargs = dict(kwargs)
    if not argspec.keywords:
        for k in kwargs.keys():
            if k not in argspec.args:
                raise TypeError(
                    u'%s() got an unexpected keyword argument %r' % (
                        function.__name__, k
                    ))
    if len(args) < len(argspec.args):
        for i in hrange(
            len(args), len(argspec.args) - len(argspec.defaults or ())
        ):
            if argspec.args[i] not in kwargs:
                raise TypeError(u'No value provided for argument %s' % (
                    argspec.args[i],
                ))

    if len(args) > len(argspec.args) and not argspec.varargs:
        raise TypeError(
            u'%s() takes at most %d positional arguments (%d given)' % (
                function.__name__, len(argspec.args), len(args)
            )
        )

    for arg, name in zip(args, argspec.args):
        if name in kwargs:
            raise TypeError(
                u'%s() got multiple values for keyword argument %r' % (
                    function.__name__, name
                ))
        else:
            kwargs[name] = arg
    return (
        tuple(args[len(argspec.args):]),
        kwargs,
    )
Ejemplo n.º 25
0
def composite(f):
    """Defines a strategy that is built out of potentially arbitrarily many
    other strategies.

    This is intended to be used as a decorator. See the full
    documentation for more details about how to use this function.

    """

    from hypothesis.searchstrategy.morphers import MorpherStrategy
    from hypothesis.internal.reflection import copy_argspec
    argspec = getargspec(f)

    if (argspec.defaults is not None
            and len(argspec.defaults) == len(argspec.args)):
        raise InvalidArgument(
            'A default value for initial argument will never be used')
    if len(argspec.args) == 0 and not argspec.varargs:
        raise InvalidArgument(
            'Functions wrapped with composite must take at least one '
            'positional argument.')

    new_argspec = ArgSpec(args=argspec.args[1:],
                          varargs=argspec.varargs,
                          keywords=argspec.keywords,
                          defaults=argspec.defaults)

    base_strategy = streaming(MorpherStrategy())

    @defines_strategy
    @copy_argspec(f.__name__, new_argspec)
    def accept(*args, **kwargs):
        def call_with_draw(morphers):
            index = [0]

            def draw(strategy):
                i = index[0]
                index[0] += 1
                return morphers[i].become(strategy)

            return f(*((draw, ) + args), **kwargs)

        return base_strategy.map(call_with_draw)

    return accept
Ejemplo n.º 26
0
def arg_string(f, args, kwargs):
    args, kwargs = convert_positional_arguments(f, args, kwargs)

    argspec = getargspec(f)

    bits = []

    for a in argspec.args:
        if a in kwargs:
            bits.append(u'%s=%s' % (a, nicerepr(kwargs.pop(a))))
    if kwargs:
        for a in sorted(kwargs):
            bits.append(u'%s=%s' % (a, nicerepr(kwargs[a])))

    return u', '.join(
        [unicode_safe_repr(x) for x in args] +
        bits
    )
Ejemplo n.º 27
0
def arg_string(f, args, kwargs, reorder=True):
    if reorder:
        args, kwargs = convert_positional_arguments(f, args, kwargs)

    argspec = getargspec(f)

    bits = []

    for a in argspec.args:
        if a in kwargs:
            bits.append('%s=%s' % (a, nicerepr(kwargs.pop(a))))
    if kwargs:
        for a in sorted(kwargs):
            bits.append('%s=%s' % (a, nicerepr(kwargs[a])))

    return ', '.join(
        [nicerepr(x) for x in args] +
        bits
    )
Ejemplo n.º 28
0
def convert_keyword_arguments(function, args, kwargs):
    """Returns a pair of a tuple and a dictionary which would be equivalent
    passed as positional and keyword args to the function. Unless function has.

    **kwargs the dictionary will always be empty.

    """
    argspec = getargspec(function)
    new_args = []
    kwargs = dict(kwargs)

    defaults = {}

    if argspec.defaults:
        for name, value in zip(argspec.args[-len(argspec.defaults):],
                               argspec.defaults):
            defaults[name] = value

    n = max(len(args), len(argspec.args))

    for i in hrange(n):
        if i < len(args):
            new_args.append(args[i])
        else:
            arg_name = argspec.args[i]
            if arg_name in kwargs:
                new_args.append(kwargs.pop(arg_name))
            elif arg_name in defaults:
                new_args.append(defaults[arg_name])
            else:
                raise TypeError('No value provided for argument %r' %
                                (arg_name))

    if kwargs and not argspec.keywords:
        if len(kwargs) > 1:
            raise TypeError('%s() got unexpected keyword arguments %s' %
                            (function.__name__, ', '.join(map(repr, kwargs))))
        else:
            bad_kwarg = next(iter(kwargs))
            raise TypeError('%s() got an unexpected keyword argument %r' %
                            (function.__name__, bad_kwarg))
    return tuple(new_args), kwargs
Ejemplo n.º 29
0
    def accept(f):
        fargspec = getargspec(f)
        must_pass_as_kwargs = []
        invocation_parts = []
        for a in argspec.args:
            if a not in fargspec.args and not fargspec.varargs:
                must_pass_as_kwargs.append(a)
            else:
                invocation_parts.append(a)
        if argspec.varargs:
            used_names.append(argspec.varargs)
            parts.append('*' + argspec.varargs)
            invocation_parts.append('*' + argspec.varargs)
        for k in must_pass_as_kwargs:
            invocation_parts.append('%(k)s=%(k)s' % {'k': k})

        if argspec.keywords:
            used_names.append(argspec.keywords)
            parts.append('**' + argspec.keywords)
            invocation_parts.append('**' + argspec.keywords)

        candidate_names = ['f'] + [
            'f_%d' % (i, ) for i in hrange(1,
                                           len(used_names) + 2)
        ]

        for funcname in candidate_names:  # pragma: no branch
            if funcname not in used_names:
                break

        base_accept = source_exec_as_module(
            COPY_ARGSPEC_SCRIPT % {
                'name': name,
                'funcname': funcname,
                'argspec': ', '.join(parts),
                'invocation': ', '.join(invocation_parts)
            }).accept

        result = base_accept(f)
        result.__doc__ = docstring
        result.__defaults__ = argspec.defaults
        return result
Ejemplo n.º 30
0
    def accept(f):
        fargspec = getargspec(f)
        must_pass_as_kwargs = []
        invocation_parts = []
        for a in argspec.args:
            if a not in fargspec.args and not fargspec.varargs:
                must_pass_as_kwargs.append(a)
            else:
                invocation_parts.append(a)
        if argspec.varargs:
            used_names.append(argspec.varargs)
            parts.append(u'*' + argspec.varargs)
            invocation_parts.append(u'*' + argspec.varargs)
        for k in must_pass_as_kwargs:
            invocation_parts.append(u'%(k)s=%(k)s' % {u'k': k})

        if argspec.keywords:
            used_names.append(argspec.keywords)
            parts.append(u'**' + argspec.keywords)
            invocation_parts.append(u'**' + argspec.keywords)

        candidate_names = [u'f'] + [
            u'f_%d' % (i,) for i in hrange(1, len(used_names) + 2)
        ]

        for funcname in candidate_names:  # pragma: no branch
            if funcname not in used_names:
                break

        base_accept = source_exec_as_module(
            COPY_ARGSPEC_SCRIPT % {
                u'name': name,
                u'funcname': funcname,
                u'argspec': u', '.join(parts),
                u'invocation': u', '.join(invocation_parts)
            }).accept

        result = base_accept(f)
        result.__defaults__ = argspec.defaults
        return result
Ejemplo n.º 31
0
def convert_positional_arguments(function, args, kwargs):
    """Return a tuple (new_args, new_kwargs) where all possible arguments have
    been moved to kwargs.

    new_args will only be non-empty if function has a
    variadic argument.

    """
    argspec = getargspec(function)
    kwargs = dict(kwargs)
    if not argspec.keywords:
        for k in kwargs.keys():
            if k not in argspec.args:
                raise TypeError(u'%s() got an unexpected keyword argument %r' %
                                (function.__name__, k))
    if len(args) < len(argspec.args):
        for i in hrange(len(args),
                        len(argspec.args) - len(argspec.defaults or ())):
            if argspec.args[i] not in kwargs:
                raise TypeError(u'No value provided for argument %s' %
                                (argspec.args[i], ))

    if len(args) > len(argspec.args) and not argspec.varargs:
        raise TypeError(
            u'%s() takes at most %d positional arguments (%d given)' %
            (function.__name__, len(argspec.args), len(args)))

    for arg, name in zip(args, argspec.args):
        if name in kwargs:
            raise TypeError(
                u'%s() got multiple values for keyword argument %r' %
                (function.__name__, name))
        else:
            kwargs[name] = arg
    return (
        tuple(args[len(argspec.args):]),
        kwargs,
    )
Ejemplo n.º 32
0
def extract_lambda_source(f):
    """Extracts a single lambda expression from the string source. Returns a
    string indicating an unknown body if it gets confused in any way.

    This is not a good function and I am sorry for it. Forgive me my
    sins, oh lord

    """
    args = getargspec(f).args
    arg_strings = []
    # In Python 2 you can have destructuring arguments to functions. This
    # results in an argspec with non-string values. I'm not very interested in
    # handling these properly, but it's important to not crash on them.
    bad_lambda = False
    for a in args:
        if isinstance(a, (tuple, list)):  # pragma: no cover
            arg_strings.append(u'(%s)' % (u', '.join(a),))
            bad_lambda = True
        else:
            assert isinstance(a, str)
            arg_strings.append(a)

    if_confused = u'lambda %s: <unknown>' % (u', '.join(arg_strings),)
    if bad_lambda:  # pragma: no cover
        return if_confused
    try:
        source = inspect.getsource(f)
    except IOError:
        return if_confused

    if not isinstance(source, text_type):  # pragma: no branch
        source = source.decode(u'utf-8')  # pragma: no cover
    source = LINE_CONTINUATION.sub(u' ', source)
    source = WHITESPACE.sub(u' ', source)
    source = source.strip()

    try:
        tree = ast.parse(source)
    except SyntaxError:
        for i in hrange(len(source) - 1, len(u'lambda'), -1):
            prefix = source[:i]
            if u'lambda' not in prefix:
                return if_confused
            try:
                tree = ast.parse(prefix)
                source = prefix
                break
            except SyntaxError:
                continue
        else:
            return if_confused

    all_lambdas = extract_all_lambdas(tree)
    aligned_lambdas = [
        l for l in all_lambdas
        if args_for_lambda_ast(l) == args
    ]
    if len(aligned_lambdas) != 1:
        return if_confused
    lambda_ast = aligned_lambdas[0]
    assert lambda_ast.lineno == 1
    source = source[lambda_ast.col_offset:].strip()

    source = source[source.index(u'lambda'):]
    for i in hrange(len(source), len(u'lambda'), -1):  # pragma: no branch
        try:
            parsed = ast.parse(source[:i])
            assert len(parsed.body) == 1
            assert parsed.body
            if not isinstance(parsed.body[0].value, ast.Lambda):
                continue
            source = source[:i]
            break
        except SyntaxError:
            pass
    lines = source.split(u'\n')
    lines = [PROBABLY_A_COMMENT.sub(u'', l) for l in lines]
    source = u'\n'.join(lines)

    source = WHITESPACE.sub(u' ', source)
    source = SPACE_FOLLOWS_OPEN_BRACKET.sub(u'(', source)
    source = SPACE_PRECEDES_CLOSE_BRACKET.sub(u')', source)
    source = source.strip()
    return source
Ejemplo n.º 33
0
    def run_test_with_generator(test):
        original_argspec = getargspec(test)

        def invalid(message):
            def wrapped_test(*arguments, **kwargs):
                raise InvalidArgument(message)
            return wrapped_test

        if not (generator_arguments or generator_kwargs):
            return invalid(
                'given must be called with at least one argument')

        if (
            generator_arguments and (
                original_argspec.varargs or original_argspec.keywords)
        ):
            return invalid(
                'varargs or keywords are not supported with positional '
                'arguments to @given'
            )

        if (
            len(generator_arguments) > len(original_argspec.args)
        ):
            return invalid((
                'Too many positional arguments for %s() (got %d but'
                ' expected at most %d') % (
                    test.__name__, len(generator_arguments),
                    len(original_argspec.args)))

        if generator_arguments and generator_kwargs:
            return invalid(
                'cannot mix positional and keyword arguments to @given'
            )
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            return invalid(
                '%s() got an unexpected keyword argument %r' % (
                    test.__name__,
                    extra_kwargs[0]
                ))
        arguments = original_argspec.args
        for a in arguments:
            if isinstance(a, list):  # pragma: no cover
                return invalid((
                    'Cannot decorate function %s() because it has '
                    'destructuring arguments') % (
                        test.__name__,
                ))
        if original_argspec.defaults:
            return invalid(
                'Cannot apply @given to a function with defaults.'
            )
        for name, strategy in zip(
            arguments[-len(generator_arguments):], generator_arguments
        ):
            generator_kwargs[name] = strategy

        argspec = inspect.ArgSpec(
            args=[a for a in arguments if a not in generator_kwargs],
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=None
        )

        @impersonate(test)
        @copy_argspec(
            test.__name__, argspec
        )
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(
                    wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = new_style_executor(selfy)

            for example in reversed(getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    if len(example.args) > len(original_argspec.args):
                        raise InvalidArgument(
                            'example has too many arguments for test. '
                            'Expected at most %d but got %d' % (
                                len(original_argspec.args), len(example.args)))
                    example_kwargs = dict(zip(
                        original_argspec.args[-len(example.args):],
                        example.args
                    ))
                else:
                    example_kwargs = example.kwargs
                if Phase.explicit not in settings.phases:
                    continue
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext(None) as b:
                        test_runner(
                            None,
                            lambda data: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    traceback.print_exc()
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise
            if settings.max_examples <= 0:
                return

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)
                )
            )

            def fail_health_check(message, label):
                if label in settings.suppress_health_check:
                    return
                message += (
                    '\nSee https://hypothesis.readthedocs.io/en/latest/health'
                    'checks.html for more information about this. '
                )
                message += (
                    'If you want to disable just this health check, add %s '
                    'to the suppress_health_check settings for this test.'
                ) % (label,)
                raise FailedHealthCheck(message)

            search_strategy = given_specifier
            if selfy is not None:
                search_strategy = WithRunner(search_strategy, selfy)

            search_strategy.validate()

            perform_health_check = settings.perform_health_check
            perform_health_check &= Settings.default.perform_health_check

            from hypothesis.internal.conjecture.data import ConjectureData, \
                Status, StopTest
            if not (
                Phase.reuse in settings.phases or
                Phase.generate in settings.phases
            ):
                return

            if perform_health_check:
                health_check_random = Random(random.getrandbits(128))
                # We "pre warm" the health check with one draw to give it some
                # time to calculate any cached data. This prevents the case
                # where the first draw of the health check takes ages because
                # of loading unicode data the first time.
                data = ConjectureData(
                    max_length=settings.buffer_size,
                    draw_bytes=lambda data, n, distribution:
                    distribution(health_check_random, n)
                )
                with Settings(settings, verbosity=Verbosity.quiet):
                    try:
                        test_runner(data, reify_and_execute(
                            search_strategy,
                            lambda *args, **kwargs: None,
                        ))
                    except BaseException:
                        pass
                count = 0
                overruns = 0
                filtered_draws = 0
                start = time.time()
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and overruns < 20
                ):
                    try:
                        data = ConjectureData(
                            max_length=settings.buffer_size,
                            draw_bytes=lambda data, n, distribution:
                            distribution(health_check_random, n)
                        )
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(data, reify_and_execute(
                                search_strategy,
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except StopTest:
                        if data.status == Status.INVALID:
                            filtered_draws += 1
                        else:
                            assert data.status == Status.OVERRUN
                            overruns += 1
                    except InvalidArgument:
                        raise
                    except Exception:
                        if (
                            HealthCheck.exception_in_generation in
                            settings.suppress_health_check
                        ):
                            raise
                        report(traceback.format_exc())
                        if test_runner is default_new_style_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function you've passed to "
                                'it to construct your data.',
                                HealthCheck.exception_in_generation,
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. ',
                                HealthCheck.exception_in_generation,
                            )
                if overruns >= 20 or (
                    not count and overruns > 0
                ):
                    fail_health_check((
                        'Examples routinely exceeded the max allowable size. '
                        '(%d examples overran while generating %d valid ones)'
                        '. Generating examples this large will usually lead to'
                        ' bad results. You should try setting average_size or '
                        'max_size parameters on your collections and turning '
                        'max_leaves down on recursive() calls.') % (
                        overruns, count
                    ), HealthCheck.data_too_large)
                if filtered_draws >= 50 or (
                    not count and filtered_draws > 0
                ):
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less. This can also be caused by '
                        'a low max_leaves parameter in recursive() calls') % (
                        filtered_draws, count
                    ), HealthCheck.filter_too_much)
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds (%d invalid ones '
                        'and %d exceeded maximum size). Try decreasing '
                        "size of the data you're generating (with e.g."
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime, filtered_draws, overruns),
                        HealthCheck.too_slow,
                    )
            last_exception = [None]
            repr_for_last_exception = [None]

            def evaluate_test_data(data):
                try:
                    result = test_runner(data, reify_and_execute(
                        search_strategy, test,
                    ))
                    if result is not None and settings.perform_health_check:
                        fail_health_check((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                        ) % (test.__name__, result), HealthCheck.return_value)
                    return False
                except UnsatisfiedAssumption:
                    data.mark_invalid()
                except (
                    HypothesisDeprecationWarning, FailedHealthCheck,
                    StopTest,
                ):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    verbose_report(last_exception[0])
                    data.mark_interesting()

            from hypothesis.internal.conjecture.engine import ConjectureRunner

            falsifying_example = None
            database_key = str_to_bytes(fully_qualified_name(test))
            start_time = time.time()
            runner = ConjectureRunner(
                evaluate_test_data,
                settings=settings, random=random,
                database_key=database_key,
            )
            runner.run()
            note_engine_for_statistics(runner)
            run_time = time.time() - start_time
            timed_out = (
                settings.timeout > 0 and
                run_time >= settings.timeout
            )
            if runner.last_data is None:
                return
            if runner.last_data.status == Status.INTERESTING:
                falsifying_example = runner.last_data.buffer
                if settings.database is not None:
                    settings.database.save(
                        database_key, falsifying_example
                    )
            else:
                if runner.valid_examples < min(
                    settings.min_satisfying_examples,
                    settings.max_examples,
                ):
                    if timed_out:
                        raise Timeout((
                            'Ran out of time before finding a satisfying '
                            'example for '
                            '%s. Only found %d examples in ' +
                            '%.2fs.'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples, run_time
                        ))
                    else:
                        raise Unsatisfiable((
                            'Unable to satisfy assumptions of hypothesis '
                            '%s. Only %d examples considered '
                            'satisfied assumptions'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples,))
                return

            assert last_exception[0] is not None

            try:
                with settings:
                    test_runner(
                        ConjectureData.for_buffer(falsifying_example),
                        reify_and_execute(
                            search_strategy, test,
                            print_example=True, is_final=True
                        ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                raise Flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.'
                )

            report(
                'Failed to reproduce exception. Expected: \n' +
                last_exception[0],
            )

            filter_message = (
                'Unreliable test data: Failed to reproduce a failure '
                'and then when it came to recreating the example in '
                'order to print the test data with a flaky result '
                'the example was filtered out (by e.g. a '
                'call to filter in your strategy) when we didn\'t '
                'expect it to be.'
            )

            try:
                test_runner(
                    ConjectureData.for_buffer(falsifying_example),
                    reify_and_execute(
                        search_strategy,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True, is_final=True
                    ))
            except (UnsatisfiedAssumption, StopTest):
                raise Flaky(filter_message)
        for attr in dir(test):
            if attr[0] != '_' and not hasattr(wrapped_test, attr):
                setattr(wrapped_test, attr, getattr(test, attr))
        wrapped_test.is_hypothesis_test = True
        wrapped_test._hypothesis_internal_use_seed = getattr(
            test, '_hypothesis_internal_use_seed', None
        )
        wrapped_test._hypothesis_internal_use_settings = getattr(
            test, '_hypothesis_internal_use_settings', None
        ) or Settings.default
        return wrapped_test
Ejemplo n.º 34
0
 def accept(proxy):
     return impersonate(target)(wraps(target)(define_function_signature(
         target.__name__, target.__doc__, getargspec(target))(proxy)))
Ejemplo n.º 35
0
def test_uses_varargs():
    f = copy_argspec(
        'foo', getargspec(has_varargs))(universal_acceptor)
    assert f(1, 2) == ((1, 2), {})
Ejemplo n.º 36
0
def test_does_not_convert_unknown_kwargs_into_args():
    @given(hello=int, world=int)
    def greet(hello, **kwargs):
        pass

    assert getargspec(greet).args == [u'hello']
Ejemplo n.º 37
0
    def run_test_with_generator(test):
        generator_arguments = tuple(given_arguments)
        generator_kwargs = dict(given_kwargs)

        original_argspec = getargspec(test)

        check_invalid = is_invalid_test(test.__name__, original_argspec,
                                        generator_arguments, generator_kwargs)

        if check_invalid is not None:
            return check_invalid

        arguments = original_argspec.args

        for name, strategy in zip(arguments[-len(generator_arguments):],
                                  generator_arguments):
            generator_kwargs[name] = strategy

        argspec = inspect.ArgSpec(
            args=[a for a in arguments if a not in generator_kwargs],
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=None)

        @impersonate(test)
        @define_function_signature(test.__name__, test.__doc__, argspec)
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings

            random = get_random_for_wrapped_test(test, wrapped_test)

            processed_args = process_arguments_to_given(
                wrapped_test, arguments, kwargs, generator_arguments,
                generator_kwargs, argspec, test, settings)
            arguments, kwargs, test_runner, search_strategy = processed_args

            execute_explicit_examples(test_runner, test, wrapped_test,
                                      settings, arguments, kwargs)

            if settings.max_examples <= 0:
                return

            if not (Phase.reuse in settings.phases
                    or Phase.generate in settings.phases):
                return

            perform_health_checks(random, settings, test_runner,
                                  search_strategy)

            state = StateForActualGivenExecution(test_runner, search_strategy,
                                                 test, settings, random)
            state.run()

        for attr in dir(test):
            if attr[0] != '_' and not hasattr(wrapped_test, attr):
                setattr(wrapped_test, attr, getattr(test, attr))
        wrapped_test.is_hypothesis_test = True
        wrapped_test._hypothesis_internal_use_seed = getattr(
            test, '_hypothesis_internal_use_seed', None)
        wrapped_test._hypothesis_internal_use_settings = getattr(
            test, '_hypothesis_internal_use_settings',
            None) or Settings.default
        return wrapped_test
Ejemplo n.º 38
0
def test_copying_sets_name():
    f = copy_argspec(
        u'hello_world', getargspec(has_two_args))(universal_acceptor)
    assert f.__name__ == u'hello_world'
Ejemplo n.º 39
0
def test_uses_varargs():
    f = define_function_signature(
        'foo', 'A docstring for foo',
        getargspec(has_varargs))(universal_acceptor)
    assert f(1, 2) == ((1, 2), {})
Ejemplo n.º 40
0
 def accept(proxy):
     return wraps(target)(
         copy_argspec(target.__name__, getargspec(target))(proxy))
Ejemplo n.º 41
0
def test_copying_sets_docstring():
    f = define_function_signature(
        'foo', 'A docstring for foo',
        getargspec(has_two_args))(universal_acceptor)
    assert f.__doc__ == 'A docstring for foo'
Ejemplo n.º 42
0
def test_copying_sets_name():
    f = define_function_signature(
        'hello_world', 'A docstring for hello_world',
        getargspec(has_two_args))(universal_acceptor)
    assert f.__name__ == 'hello_world'
Ejemplo n.º 43
0
def test_copying_sets_name():
    f = copy_argspec(
        'hello_world', getargspec(has_two_args))(universal_acceptor)
    assert f.__name__ == 'hello_world'
Ejemplo n.º 44
0
    def run_test_with_generator(test):
        original_argspec = getargspec(test)

        def invalid(message):
            def wrapped_test(*arguments, **kwargs):
                raise InvalidArgument(message)
            return wrapped_test

        if (provided_random is not None) and settings.derandomize:
            return invalid(
                u'Cannot both be derandomized and provide an explicit random')

        if not (generator_arguments or generator_kwargs):
            return invalid(
                u'given must be called with at least one argument')

        if generator_arguments and original_argspec.varargs:
            return invalid(
                u'varargs are not supported with positional arguments to '
                u'@given'
            )
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            return invalid(
                u'%s() got an unexpected keyword argument %r' % (
                    test.__name__,
                    extra_kwargs[0]
                ))
        if (
            len(generator_arguments) > len(original_argspec.args)
        ):
            return invalid((
                u'Too many positional arguments for %s() (got %d but'
                u' expected at most %d') % (
                    test.__name__, len(generator_arguments),
                    len(original_argspec.args)))
        arguments = original_argspec.args
        specifiers = list(generator_arguments)
        seen_kwarg = None
        for a in arguments:
            if isinstance(a, list):  # pragma: no cover
                return invalid((
                    u'Cannot decorate function %s() because it has '
                    u'destructuring arguments') % (
                        test.__name__,
                ))
            if a in generator_kwargs:
                seen_kwarg = seen_kwarg or a
                specifiers.append(generator_kwargs[a])
            else:
                if seen_kwarg is not None:
                    return invalid((
                        u'Argument %s comes after keyword %s which has been '
                        u'specified, but does not itself have a '
                        u'specification') % (
                        a, seen_kwarg
                    ))

        argspec = inspect.ArgSpec(
            args=arguments,
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=tuple(map(HypothesisProvided, specifiers))
        )

        unused_kwargs = {}
        for k in extra_kwargs:
            unused_kwargs[k] = HypothesisProvided(generator_kwargs[k])

        hypothesis_owned_arguments = [
            argspec.args[-1 - i] for i in hrange(len(argspec.defaults))
        ] + list(unused_kwargs)

        @impersonate(test)
        @copy_argspec(
            test.__name__, argspec
        )
        def wrapped_test(*arguments, **kwargs):
            if settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = provided_random or new_random()

            import hypothesis.strategies as sd
            from hypothesis.internal.strategymethod import strategy

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            for arg in hypothesis_owned_arguments:
                try:
                    value = kwargs[arg]
                except KeyError:
                    continue
                if not isinstance(value, HypothesisProvided):
                    note_deprecation(
                        'Passing in explicit values to override Hypothesis '
                        'provided values is deprecated and will no longer '
                        'work in Hypothesis 2.0. If you need to do this, '
                        'extract a common function and call that from a '
                        'Hypothesis based test.', settings
                    )

            # Anything in unused_kwargs hasn't been injected through
            # argspec.defaults, so we need to add them.
            for k in unused_kwargs:
                if k not in kwargs:
                    kwargs[k] = unused_kwargs[k]
            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in reversed(getattr(
                wrapped_test, u'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    example_kwargs = dict(zip(
                        argspec.args[-len(example.args):], example.args
                    ))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = u'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext() as b:
                        test_runner(
                            lambda: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise

            if not any(
                isinstance(x, HypothesisProvided)
                for xs in (arguments, kwargs.values())
                for x in xs
            ):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict(
                    (k, convert_to_specifier(v)) for (k, v) in kwargs.items()))
            )

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.'
                )
                if settings.strict:
                    raise FailedHealthCheck(message)
                else:
                    warnings.warn(FailedHealthCheck(message))

            search_strategy = strategy(given_specifier, settings)
            search_strategy.validate()

            if settings.database:
                storage = settings.database.storage(
                    fully_qualified_name(test))
            else:
                storage = None

            start = time.time()
            warned_random = [False]
            perform_health_check = settings.perform_health_check
            if Settings.default is not None:
                perform_health_check &= Settings.default.perform_health_check

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                count = 0
                bad_draws = 0
                filtered_draws = 0
                errors = 0
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and bad_draws < 50
                ):
                    try:
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(reify_and_execute(
                                search_strategy,
                                search_strategy.draw_template(
                                    health_check_random,
                                    search_strategy.draw_parameter(
                                        health_check_random,
                                    )),
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except BadTemplateDraw:
                        bad_draws += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except Exception:
                        if errors == 0:
                            report(traceback.format_exc())
                        errors += 1
                        if test_runner is default_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function you've passed to "
                                'it to construct your data.'
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. '
                            )
                if filtered_draws >= 50:
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less.') % (
                        filtered_draws, count
                    ))
                if bad_draws >= 50:
                    fail_health_check(
                        'Hypothesis is struggling to generate examples. '
                        'This is often a sign of a recursive strategy which '
                        'fans out too broadly. If you\'re using recursive, '
                        'try to reduce the size of the recursive step or '
                        'increase the maximum permitted number of leaves.'
                    )
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds. Try decreasing '
                        "size of the data you're generating (with e.g."
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime))
                if getglobalrandomstate() != initial_state:
                    warned_random[0] = True
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.'
                    )

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                if perform_health_check and not warned_random[0]:
                    initial_state = getglobalrandomstate()
                record_repr = [None]
                try:
                    result = test_runner(reify_and_execute(
                        search_strategy, xs, test,
                        record_repr=record_repr,
                    ))
                    if result is not None:
                        note_deprecation((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                            'In Hypothesis 2.0 this will become an error.'
                        ) % (test.__name__, result), settings)
                    return False
                except HypothesisDeprecationWarning:
                    raise
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True
                finally:
                    if (
                        not warned_random[0] and
                        perform_health_check and
                        getglobalrandomstate() != initial_state
                    ):
                        warned_random[0] = True
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.'
                        )
            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example,
                    settings, storage, start_time=start,
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(
                    search_strategy, falsifying_template, test,
                    print_example=True, is_final=True
                ))

                report(
                    u'Failed to reproduce exception. Expected: \n' +
                    last_exception[0],
                )

                test_runner(reify_and_execute(
                    search_strategy, falsifying_template,
                    test_is_flaky(test, repr_for_last_exception[0]),
                    print_example=True, is_final=True
                ))
        for attr in dir(test):
            if attr[0] != '_' and not hasattr(wrapped_test, attr):
                setattr(wrapped_test, attr, getattr(test, attr))
        wrapped_test.is_hypothesis_test = True
        return wrapped_test
Ejemplo n.º 45
0
    def run_test_with_generator(test):
        original_argspec = getargspec(test)

        def invalid(message):
            def wrapped_test(*arguments, **kwargs):
                raise InvalidArgument(message)

            return wrapped_test

        if not (generator_arguments or generator_kwargs):
            return invalid('given must be called with at least one argument')

        if (generator_arguments
                and (original_argspec.varargs or original_argspec.keywords)):
            return invalid(
                'varargs or keywords are not supported with positional '
                'arguments to @given')

        if (len(generator_arguments) > len(original_argspec.args)):
            return invalid(
                ('Too many positional arguments for %s() (got %d but'
                 ' expected at most %d') %
                (test.__name__, len(generator_arguments),
                 len(original_argspec.args)))

        if generator_arguments and generator_kwargs:
            return invalid(
                'cannot mix positional and keyword arguments to @given')
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args
        ]
        if extra_kwargs and not original_argspec.keywords:
            return invalid('%s() got an unexpected keyword argument %r' %
                           (test.__name__, extra_kwargs[0]))
        arguments = original_argspec.args
        for a in arguments:
            if isinstance(a, list):  # pragma: no cover
                return invalid(('Cannot decorate function %s() because it has '
                                'destructuring arguments') % (test.__name__, ))
        if original_argspec.defaults:
            return invalid('Cannot apply @given to a function with defaults.')
        for name, strategy in zip(arguments[-len(generator_arguments):],
                                  generator_arguments):
            generator_kwargs[name] = strategy

        argspec = inspect.ArgSpec(
            args=[a for a in arguments if a not in generator_kwargs],
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=None)

        @impersonate(test)
        @copy_argspec(test.__name__, argspec)
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = executor(selfy)

            for example in reversed(
                    getattr(wrapped_test, 'hypothesis_explicit_examples', ())):
                if example.args:
                    example_kwargs = dict(
                        zip(original_argspec.args[-len(example.args):],
                            example.args))
                else:
                    example_kwargs = example.kwargs
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs))
                try:
                    with BuildContext() as b:
                        test_runner(lambda: test(*arguments, **example_kwargs))
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)))

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.')
                if settings.strict:
                    raise FailedHealthCheck(message)
                else:
                    warnings.warn(FailedHealthCheck(message))

            search_strategy = given_specifier
            search_strategy.validate()

            if settings.database:
                storage = settings.database.storage(fully_qualified_name(test))
            else:
                storage = None

            start = time.time()
            warned_random = [False]
            perform_health_check = settings.perform_health_check
            if Settings.default is not None:
                perform_health_check &= Settings.default.perform_health_check

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                count = 0
                bad_draws = 0
                filtered_draws = 0
                errors = 0
                while (count < 10 and time.time() < start + 1
                       and filtered_draws < 50 and bad_draws < 50):
                    try:
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(
                                reify_and_execute(
                                    search_strategy,
                                    search_strategy.draw_template(
                                        health_check_random,
                                        search_strategy.draw_parameter(
                                            health_check_random, )),
                                    lambda *args, **kwargs: None,
                                ))
                        count += 1
                    except BadTemplateDraw:
                        bad_draws += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except Exception:
                        if errors == 0:
                            report(traceback.format_exc())
                        errors += 1
                        if test_runner is default_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function yo've passed to "
                                'it to construct your data.')
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. ')
                if filtered_draws >= 50:
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less.') % (filtered_draws, count))
                if bad_draws >= 50:
                    fail_health_check(
                        'Hypothesis is struggling to generate examples. '
                        'This is often a sign of a recursive strategy which '
                        'fans out too broadly. If you\'re using recursive, '
                        'try to reduce the size of the recursive step or '
                        'increase the maximum permitted number of leaves.')
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check(
                        ('Data generation is extremely slow: Only produced '
                         '%d valid examples in %.2f seconds. Try decreasing '
                         "size of the data yo're generating (with e.g."
                         'average_size or max_leaves parameters).') %
                        (count, runtime))
                if getglobalrandomstate() != initial_state:
                    warned_random[0] = True
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.')

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                if perform_health_check and not warned_random[0]:
                    initial_state = getglobalrandomstate()
                record_repr = [None]
                try:
                    result = test_runner(
                        reify_and_execute(
                            search_strategy,
                            xs,
                            test,
                            record_repr=record_repr,
                        ))
                    if result is not None and settings.perform_health_check:
                        raise FailedHealthCheck(
                            ('Tests run under @given should return None, but '
                             '%s returned %r instead.') %
                            (test.__name__, result), settings)
                    return False
                except (HypothesisDeprecationWarning, FailedHealthCheck,
                        UnsatisfiedAssumption):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True
                finally:
                    if (not warned_random[0] and perform_health_check
                            and getglobalrandomstate() != initial_state):
                        warned_random[0] = True
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.')

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            with settings:
                falsifying_template = None
                try:
                    falsifying_template = best_satisfying_template(
                        search_strategy,
                        random,
                        is_template_example,
                        settings,
                        storage,
                        start_time=start,
                    )
                except NoSuchExample:
                    return

                assert last_exception[0] is not None

                try:
                    test_runner(
                        reify_and_execute(search_strategy,
                                          falsifying_template,
                                          test,
                                          print_example=True,
                                          is_final=True))
                except UnsatisfiedAssumption:
                    report(traceback.format_exc())
                    raise Flaky(
                        'Unreliable assumption: An example which satisfied '
                        'assumptions on the first run now fails it.')

                report(
                    'Failed to reproduce exception. Expected: \n' +
                    last_exception[0], )

                try:
                    test_runner(
                        reify_and_execute(search_strategy,
                                          falsifying_template,
                                          test_is_flaky(
                                              test,
                                              repr_for_last_exception[0]),
                                          print_example=True,
                                          is_final=True))
                except UnsatisfiedAssumption:
                    raise Flaky(
                        'Unreliable test data: Failed to reproduce a failure '
                        'and then when it came to recreating the example in '
                        'order to print the test data with a flaky result '
                        'the example was filtered out (by e.g. a '
                        'call to filter in your strategy) when we didn\'t '
                        'expect it to be.')

        for attr in dir(test):
            if attr[0] != '_' and not hasattr(wrapped_test, attr):
                setattr(wrapped_test, attr, getattr(test, attr))
        wrapped_test.is_hypothesis_test = True
        wrapped_test._hypothesis_internal_use_seed = getattr(
            test, '_hypothesis_internal_use_seed', None)
        wrapped_test._hypothesis_internal_use_settings = getattr(
            test, '_hypothesis_internal_use_settings',
            None) or Settings.default
        return wrapped_test
Ejemplo n.º 46
0
 def accept(proxy):
     return impersonate(target)(wraps(target)(
         copy_argspec(target.__name__, getargspec(target))(proxy)))
Ejemplo n.º 47
0
    def run_test_with_generator(test):
        original_argspec = getargspec(test)

        def invalid(message):
            def wrapped_test(*arguments, **kwargs):
                raise InvalidArgument(message)
            return wrapped_test

        if not (generator_arguments or generator_kwargs):
            return invalid(
                'given must be called with at least one argument')

        if (
            generator_arguments and (
                original_argspec.varargs or original_argspec.keywords)
        ):
            return invalid(
                'varargs or keywords are not supported with positional '
                'arguments to @given'
            )

        if (
            len(generator_arguments) > len(original_argspec.args)
        ):
            return invalid((
                'Too many positional arguments for %s() (got %d but'
                ' expected at most %d') % (
                    test.__name__, len(generator_arguments),
                    len(original_argspec.args)))

        if generator_arguments and generator_kwargs:
            return invalid(
                'cannot mix positional and keyword arguments to @given'
            )
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            return invalid(
                '%s() got an unexpected keyword argument %r' % (
                    test.__name__,
                    extra_kwargs[0]
                ))
        arguments = original_argspec.args
        for a in arguments:
            if isinstance(a, list):  # pragma: no cover
                return invalid((
                    'Cannot decorate function %s() because it has '
                    'destructuring arguments') % (
                        test.__name__,
                ))
        if original_argspec.defaults:
            return invalid(
                'Cannot apply @given to a function with defaults.'
            )
        for name, strategy in zip(
            arguments[-len(generator_arguments):], generator_arguments
        ):
            generator_kwargs[name] = strategy

        argspec = inspect.ArgSpec(
            args=[a for a in arguments if a not in generator_kwargs],
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=None
        )

        @impersonate(test)
        @copy_argspec(
            test.__name__, argspec
        )
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(
                    wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = new_style_executor(selfy)

            for example in reversed(getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    if len(example.args) > len(original_argspec.args):
                        raise InvalidArgument(
                            'example has too many arguments for test. '
                            'Expected at most %d but got %d' % (
                                len(original_argspec.args), len(example.args)))
                    example_kwargs = dict(zip(
                        original_argspec.args[-len(example.args):],
                        example.args
                    ))
                else:
                    example_kwargs = example.kwargs
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext() as b:
                        test_runner(
                            None,
                            lambda data: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise
            if settings.max_examples <= 0:
                return

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)
                )
            )

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.'
                )
                raise FailedHealthCheck(message)

            search_strategy = given_specifier
            search_strategy.validate()

            perform_health_check = settings.perform_health_check
            perform_health_check &= Settings.default.perform_health_check

            from hypothesis.internal.conjecture.data import TestData, Status, \
                StopTest

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                # We "pre warm" the health check with one draw to give it some
                # time to calculate any cached data. This prevents the case
                # where the first draw of the health check takes ages because
                # of loading unicode data the first time.
                data = TestData(
                    max_length=settings.buffer_size,
                    draw_bytes=lambda data, n, distribution:
                    distribution(health_check_random, n)
                )
                with Settings(settings, verbosity=Verbosity.quiet):
                    try:
                        test_runner(data, reify_and_execute(
                            search_strategy,
                            lambda *args, **kwargs: None,
                        ))
                    except BaseException:
                        pass
                count = 0
                overruns = 0
                filtered_draws = 0
                start = time.time()
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and overruns < 20
                ):
                    try:
                        data = TestData(
                            max_length=settings.buffer_size,
                            draw_bytes=lambda data, n, distribution:
                            distribution(health_check_random, n)
                        )
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(data, reify_and_execute(
                                search_strategy,
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except StopTest:
                        if data.status == Status.INVALID:
                            filtered_draws += 1
                        else:
                            assert data.status == Status.OVERRUN
                            overruns += 1
                    except Exception:
                        report(traceback.format_exc())
                        if test_runner is default_new_style_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function yo've passed to "
                                'it to construct your data.'
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. '
                            )
                if overruns >= 20 or (
                    not count and overruns > 0
                ):
                    fail_health_check((
                        'Examples routinely exceeded the max allowable size. '
                        '(%d examples overran while generating %d valid ones)'
                        '. Generating examples this large will usually lead to'
                        ' bad results. You should try setting average_size or '
                        'max_size parameters on your collections and turning '
                        'max_leaves down on recursive() calls.') % (
                        overruns, count
                    ))
                if filtered_draws >= 50 or (
                    not count and filtered_draws > 0
                ):
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less. This can also be caused by '
                        'a low max_leaves parameter in recursive() calls') % (
                        filtered_draws, count
                    ))
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds (%d invalid ones '
                        'and %d exceeded maximum size). Try decreasing '
                        "size of the data you're generating (with e.g."
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime, filtered_draws, overruns))
                if getglobalrandomstate() != initial_state:
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.'
                    )
            last_exception = [None]
            repr_for_last_exception = [None]
            performed_random_check = [False]

            def evaluate_test_data(data):
                if perform_health_check and not performed_random_check[0]:
                    initial_state = getglobalrandomstate()
                    performed_random_check[0] = True
                else:
                    initial_state = None
                try:
                    result = test_runner(data, reify_and_execute(
                        search_strategy, test,
                    ))
                    if result is not None and settings.perform_health_check:
                        raise FailedHealthCheck((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                        ) % (test.__name__, result), settings)
                    return False
                except UnsatisfiedAssumption:
                    data.mark_invalid()
                except (
                    HypothesisDeprecationWarning, FailedHealthCheck,
                    StopTest,
                ):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    verbose_report(last_exception[0])
                    data.mark_interesting()
                finally:
                    if (
                        initial_state is not None and
                        getglobalrandomstate() != initial_state
                    ):
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.')

            from hypothesis.internal.conjecture.engine import TestRunner

            falsifying_example = None
            database_key = str_to_bytes(fully_qualified_name(test))
            start_time = time.time()
            runner = TestRunner(
                evaluate_test_data,
                settings=settings, random=random,
                database_key=database_key,
            )
            runner.run()
            run_time = time.time() - start_time
            timed_out = (
                settings.timeout > 0 and
                run_time >= settings.timeout
            )
            if runner.last_data.status == Status.INTERESTING:
                falsifying_example = runner.last_data.buffer
                if settings.database is not None:
                    settings.database.save(
                        database_key, falsifying_example
                    )
            else:
                if runner.valid_examples < min(
                    settings.min_satisfying_examples,
                    settings.max_examples,
                ):
                    if timed_out:
                        raise Timeout((
                            'Ran out of time before finding a satisfying '
                            'example for '
                            '%s. Only found %d examples in ' +
                            '%.2fs.'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples, run_time
                        ))
                    else:
                        raise Unsatisfiable((
                            'Unable to satisfy assumptions of hypothesis '
                            '%s. Only %d examples considered '
                            'satisfied assumptions'
                        ) % (
                            get_pretty_function_description(test),
                            runner.valid_examples,))
                return

            assert last_exception[0] is not None

            try:
                with settings:
                    test_runner(
                        TestData.for_buffer(falsifying_example),
                        reify_and_execute(
                            search_strategy, test,
                            print_example=True, is_final=True
                        ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                raise Flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.'
                )

            report(
                'Failed to reproduce exception. Expected: \n' +
                last_exception[0],
            )

            filter_message = (
                'Unreliable test data: Failed to reproduce a failure '
                'and then when it came to recreating the example in '
                'order to print the test data with a flaky result '
                'the example was filtered out (by e.g. a '
                'call to filter in your strategy) when we didn\'t '
                'expect it to be.'
            )

            try:
                test_runner(
                    TestData.for_buffer(falsifying_example),
                    reify_and_execute(
                        search_strategy,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True, is_final=True
                    ))
            except (UnsatisfiedAssumption, StopTest):
                raise Flaky(filter_message)
        for attr in dir(test):
            if attr[0] != '_' and not hasattr(wrapped_test, attr):
                setattr(wrapped_test, attr, getattr(test, attr))
        wrapped_test.is_hypothesis_test = True
        wrapped_test._hypothesis_internal_use_seed = getattr(
            test, '_hypothesis_internal_use_seed', None
        )
        wrapped_test._hypothesis_internal_use_settings = getattr(
            test, '_hypothesis_internal_use_settings', None
        ) or Settings.default
        return wrapped_test
Ejemplo n.º 48
0
def test_argspec_lines_up(f, g):
    af = getargspec(f)
    ag = getargspec(g(f))
    assert af.args == ag.args
    assert af.keywords == ag.keywords
    assert af.varargs == ag.varargs
Ejemplo n.º 49
0
def test_uses_defaults():
    f = copy_argspec(
        'foo', getargspec(has_a_default))(universal_acceptor)
    assert f(3, 2) == ((3, 2, 1), {})
Ejemplo n.º 50
0
def extract_lambda_source(f):
    """Extracts a single lambda expression from the string source. Returns a
    string indicating an unknown body if it gets confused in any way.

    This is not a good function and I am sorry for it. Forgive me my
    sins, oh lord

    """
    args = getargspec(f).args
    arg_strings = []
    # In Python 2 you can have destructuring arguments to functions. This
    # results in an argspec with non-string values. I'm not very interested in
    # handling these properly, but it's important to not crash on them.
    bad_lambda = False
    for a in args:
        if isinstance(a, (tuple, list)):  # pragma: no cover
            arg_strings.append('(%s)' % (', '.join(a), ))
            bad_lambda = True
        else:
            assert isinstance(a, str)
            arg_strings.append(a)

    if_confused = 'lambda %s: <unknown>' % (', '.join(arg_strings), )
    if bad_lambda:  # pragma: no cover
        return if_confused
    try:
        source = inspect.getsource(f)
    except IOError:
        return if_confused

    source = LINE_CONTINUATION.sub(' ', source)
    source = WHITESPACE.sub(' ', source)
    source = source.strip()

    try:
        tree = ast.parse(source)
    except SyntaxError:
        for i in hrange(len(source) - 1, len('lambda'), -1):
            prefix = source[:i]
            if 'lambda' not in prefix:
                return if_confused
            try:
                tree = ast.parse(prefix)
                source = prefix
                break
            except SyntaxError:
                continue
        else:
            return if_confused

    all_lambdas = extract_all_lambdas(tree)
    aligned_lambdas = [
        l for l in all_lambdas if args_for_lambda_ast(l) == args
    ]
    if len(aligned_lambdas) != 1:
        return if_confused
    lambda_ast = aligned_lambdas[0]
    assert lambda_ast.lineno == 1
    source = source[lambda_ast.col_offset:].strip()

    source = source[source.index('lambda'):]
    for i in hrange(len(source), len('lambda'), -1):  # pragma: no branch
        try:
            parsed = ast.parse(source[:i])
            assert len(parsed.body) == 1
            assert parsed.body
            if not isinstance(parsed.body[0].value, ast.Lambda):
                continue
            source = source[:i]
            break
        except SyntaxError:
            pass
    lines = source.split('\n')
    lines = [PROBABLY_A_COMMENT.sub('', l) for l in lines]
    source = '\n'.join(lines)

    source = WHITESPACE.sub(' ', source)
    source = SPACE_FOLLOWS_OPEN_BRACKET.sub('(', source)
    source = SPACE_PRECEDES_CLOSE_BRACKET.sub(')', source)
    source = source.strip()
    return source
Ejemplo n.º 51
0
 def accept(proxy):
     return impersonate(target)(wraps(target)(copy_argspec(
         target.__name__, getargspec(target))(proxy)))
Ejemplo n.º 52
0
def test_uses_defaults():
    f = define_function_signature(
        'foo', 'A docstring for foo',
        getargspec(has_a_default))(universal_acceptor)
    assert f(3, 2) == ((3, 2, 1), {})
Ejemplo n.º 53
0
    def run_test_with_generator(test):
        original_argspec = getargspec(test)

        def invalid(message):
            def wrapped_test(*arguments, **kwargs):
                raise InvalidArgument(message)

            return wrapped_test

        if not (generator_arguments or generator_kwargs):
            return invalid("given must be called with at least one argument")

        if generator_arguments and (original_argspec.varargs or original_argspec.keywords):
            return invalid("varargs or keywords are not supported with positional " "arguments to @given")

        if len(generator_arguments) > len(original_argspec.args):
            return invalid(
                ("Too many positional arguments for %s() (got %d but" " expected at most %d")
                % (test.__name__, len(generator_arguments), len(original_argspec.args))
            )

        if generator_arguments and generator_kwargs:
            return invalid("cannot mix positional and keyword arguments to @given")
        extra_kwargs = [k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            return invalid("%s() got an unexpected keyword argument %r" % (test.__name__, extra_kwargs[0]))
        arguments = original_argspec.args
        for a in arguments:
            if isinstance(a, list):  # pragma: no cover
                return invalid(
                    ("Cannot decorate function %s() because it has " "destructuring arguments") % (test.__name__,)
                )
        if original_argspec.defaults:
            return invalid("Cannot apply @given to a function with defaults.")
        for name, strategy in zip(arguments[-len(generator_arguments) :], generator_arguments):
            generator_kwargs[name] = strategy

        argspec = inspect.ArgSpec(
            args=[a for a in arguments if a not in generator_kwargs],
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=None,
        )

        @impersonate(test)
        @copy_argspec(test.__name__, argspec)
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = new_style_executor(selfy)

            for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())):
                if example.args:
                    if len(example.args) > len(original_argspec.args):
                        raise InvalidArgument(
                            "example has too many arguments for test. "
                            "Expected at most %d but got %d" % (len(original_argspec.args), len(example.args))
                        )
                    example_kwargs = dict(zip(original_argspec.args[-len(example.args) :], example.args))
                else:
                    example_kwargs = example.kwargs
                if Phase.explicit not in settings.phases:
                    continue
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = "Falsifying example: %s(%s)" % (
                    test.__name__,
                    arg_string(test, arguments, example_kwargs),
                )
                try:
                    with BuildContext() as b:
                        test_runner(None, lambda data: test(*arguments, **example_kwargs))
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise
            if settings.max_examples <= 0:
                return

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments), sd.fixed_dictionaries(generator_kwargs).map(lambda args: dict(args, **kwargs))
            )

            def fail_health_check(message, label):
                if label in settings.suppress_health_check:
                    return
                message += (
                    "\nSee https://hypothesis.readthedocs.io/en/latest/health"
                    "checks.html for more information about this. "
                )
                message += (
                    "If you want to disable just this health check, add %s "
                    "to the suppress_health_check settings for this test."
                ) % (label,)
                raise FailedHealthCheck(message)

            search_strategy = given_specifier
            if selfy is not None:
                search_strategy = WithRunner(search_strategy, selfy)

            search_strategy.validate()

            perform_health_check = settings.perform_health_check
            perform_health_check &= Settings.default.perform_health_check

            from hypothesis.internal.conjecture.data import TestData, Status, StopTest

            if not (Phase.reuse in settings.phases or Phase.generate in settings.phases):
                return

            if perform_health_check:
                health_check_random = Random(random.getrandbits(128))
                # We "pre warm" the health check with one draw to give it some
                # time to calculate any cached data. This prevents the case
                # where the first draw of the health check takes ages because
                # of loading unicode data the first time.
                data = TestData(
                    max_length=settings.buffer_size,
                    draw_bytes=lambda data, n, distribution: distribution(health_check_random, n),
                )
                with Settings(settings, verbosity=Verbosity.quiet):
                    try:
                        test_runner(data, reify_and_execute(search_strategy, lambda *args, **kwargs: None))
                    except BaseException:
                        pass
                count = 0
                overruns = 0
                filtered_draws = 0
                start = time.time()
                while count < 10 and time.time() < start + 1 and filtered_draws < 50 and overruns < 20:
                    try:
                        data = TestData(
                            max_length=settings.buffer_size,
                            draw_bytes=lambda data, n, distribution: distribution(health_check_random, n),
                        )
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(data, reify_and_execute(search_strategy, lambda *args, **kwargs: None))
                        count += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except StopTest:
                        if data.status == Status.INVALID:
                            filtered_draws += 1
                        else:
                            assert data.status == Status.OVERRUN
                            overruns += 1
                    except InvalidArgument:
                        raise
                    except Exception:
                        if HealthCheck.exception_in_generation in settings.suppress_health_check:
                            raise
                        report(traceback.format_exc())
                        if test_runner is default_new_style_executor:
                            fail_health_check(
                                "An exception occurred during data "
                                "generation in initial health check. "
                                "This indicates a bug in the strategy. "
                                "This could either be a Hypothesis bug or "
                                "an error in a function you've passed to "
                                "it to construct your data.",
                                HealthCheck.exception_in_generation,
                            )
                        else:
                            fail_health_check(
                                "An exception occurred during data "
                                "generation in initial health check. "
                                "This indicates a bug in the strategy. "
                                "This could either be a Hypothesis bug or "
                                "an error in a function you've passed to "
                                "it to construct your data. Additionally, "
                                "you have a custom executor, which means "
                                "that this could be your executor failing "
                                "to handle a function which returns None. ",
                                HealthCheck.exception_in_generation,
                            )
                if overruns >= 20 or (not count and overruns > 0):
                    fail_health_check(
                        (
                            "Examples routinely exceeded the max allowable size. "
                            "(%d examples overran while generating %d valid ones)"
                            ". Generating examples this large will usually lead to"
                            " bad results. You should try setting average_size or "
                            "max_size parameters on your collections and turning "
                            "max_leaves down on recursive() calls."
                        )
                        % (overruns, count),
                        HealthCheck.data_too_large,
                    )
                if filtered_draws >= 50 or (not count and filtered_draws > 0):
                    fail_health_check(
                        (
                            "It looks like your strategy is filtering out a lot "
                            "of data. Health check found %d filtered examples but "
                            "only %d good ones. This will make your tests much "
                            "slower, and also will probably distort the data "
                            "generation quite a lot. You should adapt your "
                            "strategy to filter less. This can also be caused by "
                            "a low max_leaves parameter in recursive() calls"
                        )
                        % (filtered_draws, count),
                        HealthCheck.filter_too_much,
                    )
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check(
                        (
                            "Data generation is extremely slow: Only produced "
                            "%d valid examples in %.2f seconds (%d invalid ones "
                            "and %d exceeded maximum size). Try decreasing "
                            "size of the data you're generating (with e.g."
                            "average_size or max_leaves parameters)."
                        )
                        % (count, runtime, filtered_draws, overruns),
                        HealthCheck.too_slow,
                    )
            last_exception = [None]
            repr_for_last_exception = [None]

            def evaluate_test_data(data):
                try:
                    result = test_runner(data, reify_and_execute(search_strategy, test))
                    if result is not None and settings.perform_health_check:
                        fail_health_check(
                            ("Tests run under @given should return None, but " "%s returned %r instead.")
                            % (test.__name__, result),
                            HealthCheck.return_value,
                        )
                    return False
                except UnsatisfiedAssumption:
                    data.mark_invalid()
                except (HypothesisDeprecationWarning, FailedHealthCheck, StopTest):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    verbose_report(last_exception[0])
                    data.mark_interesting()

            from hypothesis.internal.conjecture.engine import TestRunner

            falsifying_example = None
            database_key = str_to_bytes(fully_qualified_name(test))
            start_time = time.time()
            runner = TestRunner(evaluate_test_data, settings=settings, random=random, database_key=database_key)
            runner.run()
            run_time = time.time() - start_time
            timed_out = settings.timeout > 0 and run_time >= settings.timeout
            if runner.last_data is None:
                return
            if runner.last_data.status == Status.INTERESTING:
                falsifying_example = runner.last_data.buffer
                if settings.database is not None:
                    settings.database.save(database_key, falsifying_example)
            else:
                if runner.valid_examples < min(settings.min_satisfying_examples, settings.max_examples):
                    if timed_out:
                        raise Timeout(
                            (
                                "Ran out of time before finding a satisfying "
                                "example for "
                                "%s. Only found %d examples in " + "%.2fs."
                            )
                            % (get_pretty_function_description(test), runner.valid_examples, run_time)
                        )
                    else:
                        raise Unsatisfiable(
                            (
                                "Unable to satisfy assumptions of hypothesis "
                                "%s. Only %d examples considered "
                                "satisfied assumptions"
                            )
                            % (get_pretty_function_description(test), runner.valid_examples)
                        )
                return

            assert last_exception[0] is not None

            try:
                with settings:
                    test_runner(
                        TestData.for_buffer(falsifying_example),
                        reify_and_execute(search_strategy, test, print_example=True, is_final=True),
                    )
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                raise Flaky(
                    "Unreliable assumption: An example which satisfied " "assumptions on the first run now fails it."
                )

            report("Failed to reproduce exception. Expected: \n" + last_exception[0])

            filter_message = (
                "Unreliable test data: Failed to reproduce a failure "
                "and then when it came to recreating the example in "
                "order to print the test data with a flaky result "
                "the example was filtered out (by e.g. a "
                "call to filter in your strategy) when we didn't "
                "expect it to be."
            )

            try:
                test_runner(
                    TestData.for_buffer(falsifying_example),
                    reify_and_execute(
                        search_strategy,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True,
                        is_final=True,
                    ),
                )
            except (UnsatisfiedAssumption, StopTest):
                raise Flaky(filter_message)

        for attr in dir(test):
            if attr[0] != "_" and not hasattr(wrapped_test, attr):
                setattr(wrapped_test, attr, getattr(test, attr))
        wrapped_test.is_hypothesis_test = True
        wrapped_test._hypothesis_internal_use_seed = getattr(test, "_hypothesis_internal_use_seed", None)
        wrapped_test._hypothesis_internal_use_settings = (
            getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default
        )
        return wrapped_test
Ejemplo n.º 54
0
def test_uses_defaults():
    f = copy_argspec(
        u'foo', getargspec(has_a_default))(universal_acceptor)
    assert f(3, 2) == ((3, 2, 1), {})
Ejemplo n.º 55
0
    def run_test_with_generator(test):
        original_argspec = getargspec(test)

        def invalid(message):
            def wrapped_test(*arguments, **kwargs):
                raise InvalidArgument(message)
            return wrapped_test

        if not (generator_arguments or generator_kwargs):
            return invalid(
                'given must be called with at least one argument')

        if (
            generator_arguments and (
                original_argspec.varargs or original_argspec.keywords)
        ):
            return invalid(
                'varargs or keywords are not supported with positional '
                'arguments to @given'
            )

        if (
            len(generator_arguments) > len(original_argspec.args)
        ):
            return invalid((
                'Too many positional arguments for %s() (got %d but'
                ' expected at most %d') % (
                    test.__name__, len(generator_arguments),
                    len(original_argspec.args)))

        if generator_arguments and generator_kwargs:
            return invalid(
                'cannot mix positional and keyword arguments to @given'
            )
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            return invalid(
                '%s() got an unexpected keyword argument %r' % (
                    test.__name__,
                    extra_kwargs[0]
                ))
        arguments = original_argspec.args
        for a in arguments:
            if isinstance(a, list):  # pragma: no cover
                return invalid((
                    'Cannot decorate function %s() because it has '
                    'destructuring arguments') % (
                        test.__name__,
                ))
        if original_argspec.defaults:
            return invalid(
                'Cannot apply @given to a function with defaults.'
            )
        for name, strategy in zip(
            arguments[-len(generator_arguments):], generator_arguments
        ):
            generator_kwargs[name] = strategy

        argspec = inspect.ArgSpec(
            args=[a for a in arguments if a not in generator_kwargs],
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=None
        )

        @impersonate(test)
        @copy_argspec(
            test.__name__, argspec
        )
        def wrapped_test(*arguments, **kwargs):
            settings = wrapped_test._hypothesis_internal_use_settings
            if wrapped_test._hypothesis_internal_use_seed is not None:
                random = Random(
                    wrapped_test._hypothesis_internal_use_seed)
            elif settings.derandomize:
                random = Random(function_digest(test))
            else:
                random = new_random()

            import hypothesis.strategies as sd

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)

            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            test_runner = executor(selfy)

            for example in reversed(getattr(
                wrapped_test, 'hypothesis_explicit_examples', ()
            )):
                if example.args:
                    example_kwargs = dict(zip(
                        original_argspec.args[-len(example.args):],
                        example.args
                    ))
                else:
                    example_kwargs = example.kwargs
                example_kwargs.update(kwargs)
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = 'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    with BuildContext() as b:
                        test_runner(
                            lambda: test(*arguments, **example_kwargs)
                        )
                except BaseException:
                    report(message_on_failure)
                    for n in b.notes:
                        report(n)
                    raise

            arguments = tuple(arguments)

            given_specifier = sd.tuples(
                sd.just(arguments),
                sd.fixed_dictionaries(generator_kwargs).map(
                    lambda args: dict(args, **kwargs)
                )
            )

            def fail_health_check(message):
                message += (
                    '\nSee http://hypothesis.readthedocs.org/en/latest/health'
                    'checks.html for more information about this.'
                )
                if settings.strict:
                    raise FailedHealthCheck(message)
                else:
                    warnings.warn(FailedHealthCheck(message))

            search_strategy = given_specifier
            search_strategy.validate()

            if settings.database:
                storage = settings.database.storage(
                    fully_qualified_name(test))
            else:
                storage = None

            start = time.time()
            warned_random = [False]
            perform_health_check = settings.perform_health_check
            if Settings.default is not None:
                perform_health_check &= Settings.default.perform_health_check

            if perform_health_check:
                initial_state = getglobalrandomstate()
                health_check_random = Random(random.getrandbits(128))
                count = 0
                bad_draws = 0
                filtered_draws = 0
                errors = 0
                while (
                    count < 10 and time.time() < start + 1 and
                    filtered_draws < 50 and bad_draws < 50
                ):
                    try:
                        with Settings(settings, verbosity=Verbosity.quiet):
                            test_runner(reify_and_execute(
                                search_strategy,
                                search_strategy.draw_template(
                                    health_check_random,
                                    search_strategy.draw_parameter(
                                        health_check_random,
                                    )),
                                lambda *args, **kwargs: None,
                            ))
                        count += 1
                    except BadTemplateDraw:
                        bad_draws += 1
                    except UnsatisfiedAssumption:
                        filtered_draws += 1
                    except Exception:
                        if errors == 0:
                            report(traceback.format_exc())
                        errors += 1
                        if test_runner is default_executor:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                "an error in a function you've passed to "
                                'it to construct your data.'
                            )
                        else:
                            fail_health_check(
                                'An exception occurred during data '
                                'generation in initial health check. '
                                'This indicates a bug in the strategy. '
                                'This could either be a Hypothesis bug or '
                                'an error in a function you\'ve passed to '
                                'it to construct your data. Additionally, '
                                'you have a custom executor, which means '
                                'that this could be your executor failing '
                                'to handle a function which returns None. '
                            )
                if filtered_draws >= 50:
                    fail_health_check((
                        'It looks like your strategy is filtering out a lot '
                        'of data. Health check found %d filtered examples but '
                        'only %d good ones. This will make your tests much '
                        'slower, and also will probably distort the data '
                        'generation quite a lot. You should adapt your '
                        'strategy to filter less.') % (
                        filtered_draws, count
                    ))
                if bad_draws >= 50:
                    fail_health_check(
                        'Hypothesis is struggling to generate examples. '
                        'This is often a sign of a recursive strategy which '
                        'fans out too broadly. If you\'re using recursive, '
                        'try to reduce the size of the recursive step or '
                        'increase the maximum permitted number of leaves.'
                    )
                runtime = time.time() - start
                if runtime > 1.0 or count < 10:
                    fail_health_check((
                        'Data generation is extremely slow: Only produced '
                        '%d valid examples in %.2f seconds. Try decreasing '
                        "size of the data you're generating (with e.g. "
                        'average_size or max_leaves parameters).'
                    ) % (count, runtime))
                if getglobalrandomstate() != initial_state:
                    warned_random[0] = True
                    fail_health_check(
                        'Data generation depends on global random module. '
                        'This makes results impossible to replay, which '
                        'prevents Hypothesis from working correctly. '
                        'If you want to use methods from random, use '
                        'randoms() from hypothesis.strategies to get an '
                        'instance of Random you can use. Alternatively, you '
                        'can use the random_module() strategy to explicitly '
                        'seed the random module.'
                    )

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                if perform_health_check and not warned_random[0]:
                    initial_state = getglobalrandomstate()
                record_repr = [None]
                try:
                    result = test_runner(reify_and_execute(
                        search_strategy, xs, test,
                        record_repr=record_repr,
                    ))
                    if result is not None and settings.perform_health_check:
                        raise FailedHealthCheck((
                            'Tests run under @given should return None, but '
                            '%s returned %r instead.'
                        ) % (test.__name__, result), settings)
                    return False
                except (
                    HypothesisDeprecationWarning, FailedHealthCheck,
                    UnsatisfiedAssumption
                ):
                    raise
                except Exception:
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True
                finally:
                    if (
                        not warned_random[0] and
                        perform_health_check and
                        getglobalrandomstate() != initial_state
                    ):
                        warned_random[0] = True
                        fail_health_check(
                            'Your test used the global random module. '
                            'This is unlikely to work correctly. You should '
                            'consider using the randoms() strategy from '
                            'hypothesis.strategies instead. Alternatively, '
                            'you can use the random_module() strategy to '
                            'explicitly seed the random module.'
                        )
            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            with settings:
                falsifying_template = None
                try:
                    falsifying_template = best_satisfying_template(
                        search_strategy, random, is_template_example,
                        settings, storage, start_time=start,
                    )
                except NoSuchExample:
                    return

                assert last_exception[0] is not None

                try:
                    test_runner(reify_and_execute(
                        search_strategy, falsifying_template, test,
                        print_example=True, is_final=True
                    ))
                except UnsatisfiedAssumption:
                    report(traceback.format_exc())
                    raise Flaky(
                        'Unreliable assumption: An example which satisfied '
                        'assumptions on the first run now fails it.'
                    )

                report(
                    'Failed to reproduce exception. Expected: \n' +
                    last_exception[0],
                )

                try:
                    test_runner(reify_and_execute(
                        search_strategy, falsifying_template,
                        test_is_flaky(test, repr_for_last_exception[0]),
                        print_example=True, is_final=True
                    ))
                except UnsatisfiedAssumption:
                    raise Flaky(
                        'Unreliable test data: Failed to reproduce a failure '
                        'and then when it came to recreating the example in '
                        'order to print the test data with a flaky result '
                        'the example was filtered out (by e.g. a '
                        'call to filter in your strategy) when we didn\'t '
                        'expect it to be.'
                    )
        for attr in dir(test):
            if attr[0] != '_' and not hasattr(wrapped_test, attr):
                setattr(wrapped_test, attr, getattr(test, attr))
        wrapped_test.is_hypothesis_test = True
        wrapped_test._hypothesis_internal_use_seed = getattr(
            test, '_hypothesis_internal_use_seed', None
        )
        wrapped_test._hypothesis_internal_use_settings = getattr(
            test, '_hypothesis_internal_use_settings', None
        ) or Settings.default
        return wrapped_test
Ejemplo n.º 56
0
    def run_test_with_generator(test):
        if settings.derandomize:
            assert provided_random is None
            random = Random(function_digest(test))
        else:
            random = provided_random or Random()

        original_argspec = getargspec(test)
        if generator_arguments and original_argspec.varargs:
            raise InvalidArgument(
                u'varargs are not supported with positional arguments to '
                u'@given'
            )
        extra_kwargs = [
            k for k in generator_kwargs if k not in original_argspec.args]
        if extra_kwargs and not original_argspec.keywords:
            raise InvalidArgument(
                u'%s() got an unexpected keyword argument %r' % (
                    test.__name__,
                    extra_kwargs[0]
                ))
        if (
            len(generator_arguments) > len(original_argspec.args)
        ):
            raise InvalidArgument((
                u'Too many positional arguments for %s() (got %d but'
                u' expected at most %d') % (
                    test.__name__, len(generator_arguments),
                    len(original_argspec.args)))
        arguments = original_argspec.args
        specifiers = list(generator_arguments)
        seen_kwarg = None
        for a in arguments:
            if isinstance(a, list):  # pragma: no cover
                raise InvalidArgument((
                    u'Cannot decorate function %s() because it has '
                    u'destructuring arguments') % (
                        test.__name__,
                ))
            if a in generator_kwargs:
                seen_kwarg = seen_kwarg or a
                specifiers.append(generator_kwargs[a])
            else:
                if seen_kwarg is not None:
                    raise InvalidArgument((
                        u'Argument %s comes after keyword %s which has been '
                        u'specified, but does not itself have a '
                        u'specification') % (
                        a, seen_kwarg
                    ))

        argspec = inspect.ArgSpec(
            args=arguments,
            keywords=original_argspec.keywords,
            varargs=original_argspec.varargs,
            defaults=tuple(map(HypothesisProvided, specifiers))
        )

        unused_kwargs = {}
        for k in extra_kwargs:
            unused_kwargs[k] = HypothesisProvided(generator_kwargs[k])

        @impersonate(test)
        @copy_argspec(
            test.__name__, argspec
        )
        def wrapped_test(*arguments, **kwargs):
            import hypothesis.strategies as sd
            from hypothesis.internal.strategymethod import strategy

            selfy = None
            arguments, kwargs = convert_positional_arguments(
                wrapped_test, arguments, kwargs)
            # Anything in unused_kwargs hasn't been injected through
            # argspec.defaults, so we need to add them.
            for k in unused_kwargs:
                if k not in kwargs:
                    kwargs[k] = unused_kwargs[k]
            # If the test function is a method of some kind, the bound object
            # will be the first named argument if there are any, otherwise the
            # first vararg (if any).
            if argspec.args:
                selfy = kwargs.get(argspec.args[0])
            elif arguments:
                selfy = arguments[0]
            if isinstance(selfy, HypothesisProvided):
                selfy = None
            test_runner = executor(selfy)

            for example in getattr(
                wrapped_test, u'hypothesis_explicit_examples', ()
            ):
                if example.args:
                    example_kwargs = dict(zip(
                        argspec.args[-len(example.args):], example.args
                    ))
                else:
                    example_kwargs = dict(example.kwargs)

                for k, v in kwargs.items():
                    if not isinstance(v, HypothesisProvided):
                        example_kwargs[k] = v
                # Note: Test may mutate arguments and we can't rerun explicit
                # examples, so we have to calculate the failure message at this
                # point rather than than later.
                message_on_failure = u'Falsifying example: %s(%s)' % (
                    test.__name__, arg_string(test, arguments, example_kwargs)
                )
                try:
                    test_runner(
                        lambda: test(*arguments, **example_kwargs)
                    )
                except BaseException:
                    report(message_on_failure)
                    raise

            if not any(
                isinstance(x, HypothesisProvided)
                for xs in (arguments, kwargs.values())
                for x in xs
            ):
                # All arguments have been satisfied without needing to invoke
                # hypothesis
                test_runner(lambda: test(*arguments, **kwargs))
                return

            def convert_to_specifier(v):
                if isinstance(v, HypothesisProvided):
                    return strategy(v.value, settings)
                else:
                    return sd.just(v)

            given_specifier = sd.tuples(
                sd.tuples(*map(convert_to_specifier, arguments)),
                sd.fixed_dictionaries(dict(
                    (k, convert_to_specifier(v)) for (k, v) in kwargs.items()))
            )

            search_strategy = strategy(given_specifier, settings)

            if settings.database:
                storage = settings.database.storage(
                    fully_qualified_name(test))
            else:
                storage = None

            last_exception = [None]
            repr_for_last_exception = [None]

            def is_template_example(xs):
                record_repr = [None]
                try:
                    test_runner(reify_and_execute(
                        search_strategy, xs, test,
                        always_print=settings.max_shrinks <= 0,
                        record_repr=record_repr,
                    ))
                    return False
                except UnsatisfiedAssumption as e:
                    raise e
                except Exception as e:
                    if settings.max_shrinks <= 0:
                        raise e
                    last_exception[0] = traceback.format_exc()
                    repr_for_last_exception[0] = record_repr[0]
                    verbose_report(last_exception[0])
                    return True

            is_template_example.__name__ = test.__name__
            is_template_example.__qualname__ = qualname(test)

            falsifying_template = None
            try:
                falsifying_template = best_satisfying_template(
                    search_strategy, random, is_template_example,
                    settings, storage
                )
            except NoSuchExample:
                return

            assert last_exception[0] is not None

            with settings:
                test_runner(reify_and_execute(
                    search_strategy, falsifying_template, test,
                    print_example=True, is_final=True
                ))

                report(
                    u'Failed to reproduce exception. Expected: \n' +
                    last_exception[0],
                )

                test_runner(reify_and_execute(
                    search_strategy, falsifying_template,
                    test_is_flaky(test, repr_for_last_exception[0]),
                    print_example=True, is_final=True
                ))
        for attr in dir(test):
            if attr[0] != '_' and not hasattr(wrapped_test, attr):
                setattr(wrapped_test, attr, getattr(test, attr))
        wrapped_test.is_hypothesis_test = True
        return wrapped_test
Ejemplo n.º 57
0
def test_uses_varargs():
    f = copy_argspec(
        u'foo', getargspec(has_varargs))(universal_acceptor)
    assert f(1, 2) == ((1, 2), {})