def test_does_not_add_param_empty_to_type_hints(): def f(x): pass f.__signature__ = Signature([P("y", P.KEYWORD_ONLY)], return_annotation=None) assert get_type_hints(f) == {}
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [name for name, value in generator_kwargs.items() if value is infer]: if name not in hints: raise InvalidArgument( 'passed %s=infer for %s, but %s has no type annotation' % (name, test.__name__, name)) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings ) arguments, kwargs, test_runner, search_strategy = processed_args execute_explicit_examples( test_runner, test, wrapped_test, settings, arguments, kwargs ) if settings.max_examples <= 0: return if not ( Phase.reuse in settings.phases or Phase.generate in settings.phases ): return try: perform_health_checks( random, settings, test_runner, search_strategy) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random) state.run() except: generated_seed = \ wrapped_test._hypothesis_internal_use_generated_seed if generated_seed is not None: if running_under_pytest: report(( 'You can add @seed(%(seed)d) to this test or run ' 'pytest with --hypothesis-seed=%(seed)d to ' 'reproduce this failure.') % { 'seed': generated_seed},) else: report(( 'You can add @seed(%d) to this test to reproduce ' 'this failure.') % (generated_seed,)) raise
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [name for name, value in generator_kwargs.items() if value is infer]: if name not in hints: raise InvalidArgument( 'passed %s=infer for %s, but %s has no type annotation' % (name, test.__name__, name)) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings ) arguments, kwargs, test_runner, search_strategy = processed_args execute_explicit_examples( test_runner, test, wrapped_test, settings, arguments, kwargs ) if settings.max_examples <= 0: return if not ( Phase.reuse in settings.phases or Phase.generate in settings.phases ): return try: perform_health_checks( random, settings, test_runner, search_strategy) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random) state.run() except: generated_seed = \ wrapped_test._hypothesis_internal_use_generated_seed if generated_seed is not None: if running_under_pytest: report(( 'You can add @seed(%(seed)d) to this test or run ' 'pytest with --hypothesis-seed=%(seed)d to ' 'reproduce this failure.') % { 'seed': generated_seed},) else: report(( 'You can add @seed(%d) to this test to reproduce ' 'this failure.') % (generated_seed,)) raise
def _get_strategies( *funcs: Callable, pass_result_to_next_func: bool = False ) -> Dict[str, st.SearchStrategy]: """Return a dict of strategies for the union of arguments to `funcs`. If `pass_result_to_next_func` is True, assume that the result of each function is passed to the next, and therefore skip the first argument of all but the first function. This dict is used to construct our call to the `@given(...)` decorator. """ assert funcs, "Must pass at least one function" given_strategies: Dict[str, st.SearchStrategy] = {} for i, f in enumerate(funcs): params = _get_params(f) if pass_result_to_next_func and i >= 1: del params[next(iter(params))] hints = get_type_hints(f) builder_args = { k: infer if k in hints else _strategy_for(v) for k, v in params.items() } with _with_any_registered(): strat = st.builds(f, **builder_args).wrapped_strategy # type: ignore if strat.args: raise NotImplementedError("Expected to pass everything as kwargs") for k, v in strat.kwargs.items(): if _valid_syntax_repr(v)[1] == "nothing()" and k in hints: # e.g. from_type(Hashable) is OK but the unwrapped repr is not v = LazyStrategy(st.from_type, (hints[k], ), {}) if k in given_strategies: given_strategies[k] |= v else: given_strategies[k] = v # If there is only one function, we pass arguments to @given in the order of # that function's signature. Otherwise, we use alphabetical order. if len(funcs) == 1: return {name: given_strategies[name] for name in _get_params(f)} return dict(sorted(given_strategies.items()))
def _get_strategies( *funcs: Callable, pass_result_to_next_func: bool = False ) -> Dict[str, st.SearchStrategy]: """Return a dict of strategies for the union of arguments to `funcs`. If `pass_result_to_next_func` is True, assume that the result of each function is passed to the next, and therefore skip the first argument of all but the first function. This dict is used to construct our call to the `@given(...)` decorator. """ given_strategies = {} # type: Dict[str, st.SearchStrategy] for i, f in enumerate(funcs): params = _get_params(f) if pass_result_to_next_func and i >= 1: del params[next(iter(params))] hints = get_type_hints(f) builder_args = { k: infer if k in hints else _strategy_for(v) for k, v in params.items() } with _with_any_registered(): strat = st.builds(f, **builder_args).wrapped_strategy # type: ignore args, kwargs = strat.mapped_strategy.wrapped_strategy.element_strategies if args.element_strategies: raise NotImplementedError("Expected to pass everything as kwargs") for k, v in zip(kwargs.keys, kwargs.mapped_strategy.element_strategies): if k in given_strategies: given_strategies[k] |= v else: given_strategies[k] = v # If there is only one function, we pass arguments to @given in the order of # that function's signature. Otherwise, we use alphabetical order. if len(funcs) == 1: return {name: given_strategies[name] for name in _get_params(f)} return dict(sorted(given_strategies.items()))
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [ name for name, value in generator_kwargs.items() if value is infer ]: if name not in hints: raise InvalidArgument( 'passed %s=infer for %s, but %s has no type annotation' % (name, test.__name__, name)) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings) arguments, kwargs, test_runner, search_strategy = processed_args execute_explicit_examples(test_runner, test, wrapped_test, settings, arguments, kwargs) if settings.max_examples <= 0: return if not (Phase.reuse in settings.phases or Phase.generate in settings.phases): return perform_health_checks(random, settings, test_runner, search_strategy) state = StateForActualGivenExecution(test_runner, search_strategy, test, settings, random) state.run()
def types_to_strategy(attrib, types): """Find all the type metadata for this attribute, reconcile it, and infer a strategy from the mess.""" # If we know types from the validator(s), that's sufficient. if len(types) == 1: typ, = types if isinstance(typ, tuple): return st.one_of(*map(st.from_type, typ)) return st.from_type(typ) elif types: # We have a list of tuples of types, and want to find a type # (or tuple of types) that is a subclass of all of of them. type_tuples = [k if isinstance(k, tuple) else (k,) for k in types] # Flatten the list, filter types that would fail validation, and # sort so that ordering is stable between runs and shrinks well. allowed = [ t for t in set(sum(type_tuples, ())) if all(issubclass(t, tup) for tup in type_tuples) ] allowed.sort(key=type_sorting_key) return st.one_of([st.from_type(t) for t in allowed]) # Otherwise, try the `type` attribute as a fallback, and finally try # the type hints on a converter (desperate!) before giving up. if isinstance(getattr(attrib, "type", None), type): # The convoluted test is because variable annotations may be stored # in string form; attrs doesn't evaluate them and we don't handle them. # See PEP 526, PEP 563, and Hypothesis issue #1004 for details. return st.from_type(attrib.type) converter = getattr(attrib, "converter", None) if isinstance(converter, type): return st.from_type(converter) elif callable(converter): hints = get_type_hints(converter) if "return" in hints: return st.from_type(hints["return"]) return st.nothing()
def types_to_strategy(attrib, types): """Find all the type metadata for this attribute, reconcile it, and infer a strategy from the mess.""" # If we know types from the validator(s), that's sufficient. if len(types) == 1: (typ,) = types if isinstance(typ, tuple): return st.one_of(*map(st.from_type, typ)) return st.from_type(typ) elif types: # We have a list of tuples of types, and want to find a type # (or tuple of types) that is a subclass of all of of them. type_tuples = [k if isinstance(k, tuple) else (k,) for k in types] # Flatten the list, filter types that would fail validation, and # sort so that ordering is stable between runs and shrinks well. allowed = [ t for t in set(sum(type_tuples, ())) if all(issubclass(t, tup) for tup in type_tuples) ] allowed.sort(key=type_sorting_key) return st.one_of([st.from_type(t) for t in allowed]) # Otherwise, try the `type` attribute as a fallback, and finally try # the type hints on a converter (desperate!) before giving up. if is_a_type(getattr(attrib, "type", None)): # The convoluted test is because variable annotations may be stored # in string form; attrs doesn't evaluate them and we don't handle them. # See PEP 526, PEP 563, and Hypothesis issue #1004 for details. return st.from_type(attrib.type) converter = getattr(attrib, "converter", None) if isinstance(converter, type): return st.from_type(converter) elif callable(converter): hints = get_type_hints(converter) if "return" in hints: return st.from_type(hints["return"]) return st.nothing()
def magic( *modules_or_functions: Union[Callable, types.ModuleType], except_: Except = (), style: str = "pytest", ) -> str: """Guess which ghostwriters to use, for a module or collection of functions. As for all ghostwriters, the ``except_`` argument should be an :class:`python:Exception` or tuple of exceptions, and ``style`` may be either ``"pytest"`` to write test functions or ``"unittest"`` to write test methods and :class:`~python:unittest.TestCase`. After finding the public functions attached to any modules, the ``magic`` ghostwriter looks for pairs of functions to pass to :func:`~roundtrip`, then checks for :func:`~binary_operation` and :func:`~ufunc` functions, and any others are passed to :func:`~fuzz`. For example, try :command:`hypothesis write gzip` on the command line! """ except_ = _check_except(except_) _check_style(style) if not modules_or_functions: raise InvalidArgument("Must pass at least one function or module to test.") functions = set() for thing in modules_or_functions: if callable(thing): functions.add(thing) elif isinstance(thing, types.ModuleType): if hasattr(thing, "__all__"): funcs = [getattr(thing, name) for name in thing.__all__] # type: ignore else: funcs = [ v for k, v in vars(thing).items() if callable(v) and not k.startswith("_") ] for f in funcs: try: if callable(f) and inspect.signature(f).parameters: functions.add(f) except ValueError: pass else: raise InvalidArgument(f"Can't test non-module non-callable {thing!r}") imports = set() parts = [] by_name = {_get_qualname(f): f for f in functions} if len(by_name) < len(functions): raise InvalidArgument("Functions to magic() test must have unique names") # Look for pairs of functions that roundtrip, based on known naming patterns. for writename, readname in ROUNDTRIP_PAIRS: for name in sorted(by_name): match = re.fullmatch(writename, name) if match: other = readname.format(*match.groups()) if other in by_name: imp, body = _make_roundtrip_body( (by_name.pop(name), by_name.pop(other)), except_=except_, style=style, ) imports |= imp parts.append(body) # Look for binary operators - functions with two identically-typed arguments, # and the same return type. The latter restriction might be lifted later. for name, func in sorted(by_name.items()): hints = get_type_hints(func) hints.pop("return", None) if len(hints) == len(_get_params(func)) == 2: a, b = hints.values() if a == b: imp, body = _make_binop_body(func, except_=except_, style=style) imports |= imp parts.append(body) del by_name[name] # Look for Numpy ufuncs or gufuncs, and write array-oriented tests for them. if "numpy" in sys.modules: for name, func in sorted(by_name.items()): if _is_probably_ufunc(func): imp, body = _make_ufunc_body(func, except_=except_, style=style) imports |= imp parts.append(body) del by_name[name] # For all remaining callables, just write a fuzz-test. In principle we could # guess at equivalence or idempotence; but it doesn't seem accurate enough to # be worth the trouble when it's so easy for the user to specify themselves. for _, f in sorted(by_name.items()): imp, body = _make_test_body( f, test_body=_write_call(f), except_=except_, ghost="fuzz", style=style, ) imports |= imp parts.append(body) return _make_test(imports, "\n".join(parts))
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True test = wrapped_test.hypothesis.inner_test if getattr(test, "is_hypothesis_test", False): raise InvalidArgument(( "You have applied @given to the test %s more than once, which " "wraps the test several times and is extremely slow. A " "similar effect can be gained by combining the arguments " "of the two calls to given. For example, instead of " "@given(booleans()) @given(integers()), you could write " "@given(booleans(), integers())") % (test.__name__, )) settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) # Use type information to convert "infer" arguments into appropriate # strategies. if infer in given_kwargs.values(): hints = get_type_hints(test) for name in [ name for name, value in given_kwargs.items() if value is infer ]: if name not in hints: raise InvalidArgument( "passed %s=infer for %s, but %s has no type annotation" % (name, test.__name__, name)) given_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, given_kwargs, argspec, test, settings, ) arguments, kwargs, test_runner, search_strategy = processed_args runner = getattr(search_strategy, "runner", None) if isinstance(runner, TestCase) and test.__name__ in dir(TestCase): msg = ("You have applied @given to the method %s, which is " "used by the unittest runner but is not itself a test." " This is not useful in any way." % test.__name__) fail_health_check(settings, msg, HealthCheck.not_a_test_method) if bad_django_TestCase(runner): # pragma: no cover # Covered by the Django tests, but not the pytest coverage task raise InvalidArgument( "You have applied @given to a method on %s, but this " "class does not inherit from the supported versions in " "`hypothesis.extra.django`. Use the Hypothesis variants " "to ensure that each example is run in a separate " "database transaction." % qualname(type(runner))) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, wrapped_test, ) reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure # If there was a @reproduce_failure decorator, use it to reproduce # the error (or complain that we couldn't). Either way, this will # always raise some kind of error. if reproduce_failure is not None: expected_version, failure = reproduce_failure if expected_version != __version__: raise InvalidArgument( ("Attempting to reproduce a failure from a different " "version of Hypothesis. This failure is from %s, but " "you are currently running %r. Please change your " "Hypothesis version to a matching one.") % (expected_version, __version__)) try: state.execute_once( ConjectureData.for_buffer(decode_failure(failure)), print_example=True, is_final=True, ) raise DidNotReproduce( "Expected the test to raise an error, but it " "completed successfully.") except StopTest: raise DidNotReproduce( "The shape of the test data has changed in some way " "from where this blob was defined. Are you sure " "you're running the same test?") except UnsatisfiedAssumption: raise DidNotReproduce( "The test data failed to satisfy an assumption in the " "test. Have you added it since this blob was " "generated?") # There was no @reproduce_failure, so start by running any explicit # examples from @example decorators. execute_explicit_examples(state, wrapped_test, arguments, kwargs) # If there were any explicit examples, they all ran successfully. # The next step is to use the Conjecture engine to run the test on # many different inputs. if settings.max_examples <= 0: return if not (Phase.reuse in settings.phases or Phase.generate in settings.phases): return try: if isinstance(runner, TestCase) and hasattr(runner, "subTest"): subTest = runner.subTest try: runner.subTest = fake_subTest state.run_engine() finally: runner.subTest = subTest else: state.run_engine() except BaseException as e: # The exception caught here should either be an actual test # failure (or MultipleFailures), or some kind of fatal error # that caused the engine to stop. generated_seed = wrapped_test._hypothesis_internal_use_generated_seed with local_settings(settings): if not (state.failed_normally or generated_seed is None): if running_under_pytest: report( "You can add @seed(%(seed)d) to this test or " "run pytest with --hypothesis-seed=%(seed)d " "to reproduce this failure." % {"seed": generated_seed}) else: report("You can add @seed(%d) to this test to " "reproduce this failure." % (generated_seed, )) # The dance here is to avoid showing users long tracebacks # full of Hypothesis internals they don't care about. # We have to do this inline, to avoid adding another # internal stack frame just when we've removed the rest. if PY2: # Python 2 doesn't have Exception.with_traceback(...); # instead it has a three-argument form of the `raise` # statement. Unfortunately this is a SyntaxError on # Python 3, and before Python 2.7.9 it was *also* a # SyntaxError to use it in a nested function so we # can't `exec` or `eval` our way out (BPO-21591). # So unless we break some versions of Python 2, none # of them get traceback elision. raise # On Python 3, we swap out the real traceback for our # trimmed version. Using a variable ensures that the line # which will actually appear in tracebacks is as clear as # possible - "raise the_error_hypothesis_found". the_error_hypothesis_found = e.with_traceback( get_trimmed_traceback()) raise the_error_hypothesis_found
def test_can_get_type_hints(thing): assert isinstance(get_type_hints(thing), dict)
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True test = wrapped_test.hypothesis.inner_test if getattr(test, 'is_hypothesis_test', False): note_deprecation( ('You have applied @given to test: %s more than once. In ' 'future this will be an error. Applying @given twice ' 'wraps the test twice, which can be extremely slow. A ' 'similar effect can be gained by combining the arguments ' 'of the two calls to given. For example, instead of ' '@given(booleans()) @given(integers()), you could write ' '@given(booleans(), integers())') % (test.__name__, )) settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [ name for name, value in generator_kwargs.items() if value is infer ]: if name not in hints: raise InvalidArgument( 'passed %s=infer for %s, but %s has no type annotation' % (name, test.__name__, name)) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings) arguments, kwargs, test_runner, search_strategy = processed_args runner = getattr(search_strategy, 'runner', None) if isinstance(runner, TestCase) and test.__name__ in dir(TestCase): msg = ('You have applied @given to the method %s, which is ' 'used by the unittest runner but is not itself a test.' ' This is not useful in any way.' % test.__name__) fail_health_check(settings, msg, HealthCheck.not_a_test_method) if bad_django_TestCase(runner): # pragma: no cover # Covered by the Django tests, but not the pytest coverage task raise InvalidArgument( 'You have applied @given to a method on %s, but this ' 'class does not inherit from the supported versions in ' '`hypothesis.extra.django`. Use the Hypothesis variants ' 'to ensure that each example is run in a separate ' 'database transaction.' % qualname(type(runner))) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, had_seed=wrapped_test._hypothesis_internal_use_seed) reproduce_failure = \ wrapped_test._hypothesis_internal_use_reproduce_failure if reproduce_failure is not None: expected_version, failure = reproduce_failure if expected_version != __version__: raise InvalidArgument( ('Attempting to reproduce a failure from a different ' 'version of Hypothesis. This failure is from %s, but ' 'you are currently running %r. Please change your ' 'Hypothesis version to a matching one.') % (expected_version, __version__)) try: state.execute( ConjectureData.for_buffer(decode_failure(failure)), print_example=True, is_final=True, ) raise DidNotReproduce( 'Expected the test to raise an error, but it ' 'completed successfully.') except StopTest: raise DidNotReproduce( 'The shape of the test data has changed in some way ' 'from where this blob was defined. Are you sure ' "you're running the same test?") except UnsatisfiedAssumption: raise DidNotReproduce( 'The test data failed to satisfy an assumption in the ' 'test. Have you added it since this blob was ' 'generated?') execute_explicit_examples(test_runner, test, wrapped_test, settings, arguments, kwargs) if settings.max_examples <= 0: return if not (Phase.reuse in settings.phases or Phase.generate in settings.phases): return try: if isinstance(runner, TestCase) and hasattr(runner, 'subTest'): subTest = runner.subTest try: setattr(runner, 'subTest', fake_subTest) state.run() finally: setattr(runner, 'subTest', subTest) else: state.run() except BaseException as e: generated_seed = \ wrapped_test._hypothesis_internal_use_generated_seed with local_settings(settings): if not (state.failed_normally or generated_seed is None): if running_under_pytest: report( 'You can add @seed(%(seed)d) to this test or ' 'run pytest with --hypothesis-seed=%(seed)d ' 'to reproduce this failure.' % {'seed': generated_seed}) else: report('You can add @seed(%d) to this test to ' 'reproduce this failure.' % (generated_seed, )) # The dance here is to avoid showing users long tracebacks # full of Hypothesis internals they don't care about. # We have to do this inline, to avoid adding another # internal stack frame just when we've removed the rest. if PY2: # Python 2 doesn't have Exception.with_traceback(...); # instead it has a three-argument form of the `raise` # statement. Unfortunately this is a SyntaxError on # Python 3, and before Python 2.7.9 it was *also* a # SyntaxError to use it in a nested function so we # can't `exec` or `eval` our way out (BPO-21591). # So unless we break some versions of Python 2, none # of them get traceback elision. raise # On Python 3, we swap out the real traceback for our # trimmed version. Using a variable ensures that the line # which will actually appear in trackbacks is as clear as # possible - "raise the_error_hypothesis_found". the_error_hypothesis_found = \ e.with_traceback(get_trimmed_traceback()) raise the_error_hypothesis_found
def magic( *modules_or_functions: Union[Callable, types.ModuleType], except_: Except = (), style: str = "pytest", ) -> str: """Guess which ghostwriters to use, for a module or collection of functions. As for all ghostwriters, the ``except_`` argument should be an :class:`python:Exception` or tuple of exceptions, and ``style`` may be either ``"pytest"`` to write test functions or ``"unittest"`` to write test methods and :class:`~python:unittest.TestCase`. After finding the public functions attached to any modules, the ``magic`` ghostwriter looks for pairs of functions to pass to :func:`~roundtrip`, then checks for :func:`~binary_operation` and :func:`~ufunc` functions, and any others are passed to :func:`~fuzz`. For example, try :command:`hypothesis write gzip` on the command line! """ except_ = _check_except(except_) _check_style(style) if not modules_or_functions: raise InvalidArgument( "Must pass at least one function or module to test.") functions = set() for thing in modules_or_functions: if callable(thing): functions.add(thing) elif isinstance(thing, types.ModuleType): if hasattr(thing, "__all__"): funcs = [getattr(thing, name, None) for name in thing.__all__] # type: ignore else: funcs = [ v for k, v in vars(thing).items() if callable(v) and not k.startswith("_") ] for f in funcs: try: if (not is_mock(f)) and callable(f) and _get_params(f): functions.add(f) except (TypeError, ValueError): pass else: raise InvalidArgument( f"Can't test non-module non-callable {thing!r}") imports = set() parts = [] by_name = {} for f in functions: try: by_name[_get_qualname(f, include_module=True)] = f except Exception: pass # e.g. Pandas 'CallableDynamicDoc' object has no attribute '__name__' if not by_name: return (f"# Found no testable functions in\n" f"# {functions!r} from {modules_or_functions}\n") # Look for pairs of functions that roundtrip, based on known naming patterns. for writename, readname in ROUNDTRIP_PAIRS: for name in sorted(by_name): match = re.fullmatch(writename, name.split(".")[-1]) if match: inverse_name = readname.format(*match.groups()) for other in sorted(n for n in by_name if n.split(".")[-1] == inverse_name)[:1]: imp, body = _make_roundtrip_body( (by_name.pop(name), by_name.pop(other)), except_=except_, style=style, ) imports |= imp parts.append(body) # Look for equivalent functions: same name, all required arguments of any can # be found in all signatures, and if all have return-type annotations they match. names = defaultdict(list) for _, f in sorted(by_name.items()): names[_get_qualname(f)].append(f) for group in names.values(): if len(group) >= 2 and len({frozenset(_get_params(f)) for f in group}) == 1: sentinel = object() returns = { get_type_hints(f).get("return", sentinel) for f in group } if len(returns - {sentinel}) <= 1: imp, body = _make_equiv_body(group, except_=except_, style=style) imports |= imp parts.append(body) for f in group: by_name.pop(_get_qualname(f, include_module=True)) # Look for binary operators - functions with two identically-typed arguments, # and the same return type. The latter restriction might be lifted later. for name, func in sorted(by_name.items()): hints = get_type_hints(func) hints.pop("return", None) if len(hints) == len(_get_params(func)) == 2: a, b = hints.values() if a == b: imp, body = _make_binop_body(func, except_=except_, style=style) imports |= imp parts.append(body) del by_name[name] # Look for Numpy ufuncs or gufuncs, and write array-oriented tests for them. if "numpy" in sys.modules: for name, func in sorted(by_name.items()): if _is_probably_ufunc(func): imp, body = _make_ufunc_body(func, except_=except_, style=style) imports |= imp parts.append(body) del by_name[name] # For all remaining callables, just write a fuzz-test. In principle we could # guess at equivalence or idempotence; but it doesn't seem accurate enough to # be worth the trouble when it's so easy for the user to specify themselves. for _, f in sorted(by_name.items()): imp, body = _make_test_body(f, test_body=_write_call(f), except_=except_, ghost="fuzz", style=style) imports |= imp parts.append(body) return _make_test(imports, "\n".join(parts))
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True test = wrapped_test.hypothesis.inner_test if getattr(test, 'is_hypothesis_test', False): note_deprecation(( 'You have applied @given to test: %s more than once. In ' 'future this will be an error. Applying @given twice ' 'wraps the test twice, which can be extremely slow. A ' 'similar effect can be gained by combining the arguments ' 'of the two calls to given. For example, instead of ' '@given(booleans()) @given(integers()), you could write ' '@given(booleans(), integers())') % (test.__name__, ) ) settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [name for name, value in generator_kwargs.items() if value is infer]: if name not in hints: raise InvalidArgument( 'passed %s=infer for %s, but %s has no type annotation' % (name, test.__name__, name)) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings ) arguments, kwargs, test_runner, search_strategy = processed_args runner = getattr(search_strategy, 'runner', None) if isinstance(runner, TestCase) and test.__name__ in dir(TestCase): msg = ('You have applied @given to the method %s, which is ' 'used by the unittest runner but is not itself a test.' ' This is not useful in any way.' % test.__name__) fail_health_check(settings, msg, HealthCheck.not_a_test_method) if bad_django_TestCase(runner): # pragma: no cover # Covered by the Django tests, but not the pytest coverage task raise InvalidArgument( 'You have applied @given to a method on %s, but this ' 'class does not inherit from the supported versions in ' '`hypothesis.extra.django`. Use the Hypothesis variants ' 'to ensure that each example is run in a separate ' 'database transaction.' % qualname(type(runner)) ) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, had_seed=wrapped_test._hypothesis_internal_use_seed ) reproduce_failure = \ wrapped_test._hypothesis_internal_use_reproduce_failure if reproduce_failure is not None: expected_version, failure = reproduce_failure if expected_version != __version__: raise InvalidArgument(( 'Attempting to reproduce a failure from a different ' 'version of Hypothesis. This failure is from %s, but ' 'you are currently running %r. Please change your ' 'Hypothesis version to a matching one.' ) % (expected_version, __version__)) try: state.execute(ConjectureData.for_buffer( decode_failure(failure)), print_example=True, is_final=True, ) raise DidNotReproduce( 'Expected the test to raise an error, but it ' 'completed successfully.' ) except StopTest: raise DidNotReproduce( 'The shape of the test data has changed in some way ' 'from where this blob was defined. Are you sure ' "you're running the same test?" ) except UnsatisfiedAssumption: raise DidNotReproduce( 'The test data failed to satisfy an assumption in the ' 'test. Have you added it since this blob was ' 'generated?' ) execute_explicit_examples( test_runner, test, wrapped_test, settings, arguments, kwargs ) if settings.max_examples <= 0: return if not ( Phase.reuse in settings.phases or Phase.generate in settings.phases ): return try: if isinstance(runner, TestCase) and hasattr(runner, 'subTest'): subTest = runner.subTest try: setattr(runner, 'subTest', fake_subTest) state.run() finally: setattr(runner, 'subTest', subTest) else: state.run() except BaseException: generated_seed = \ wrapped_test._hypothesis_internal_use_generated_seed if generated_seed is not None and not state.failed_normally: with local_settings(settings): if running_under_pytest: report( 'You can add @seed(%(seed)d) to this test or ' 'run pytest with --hypothesis-seed=%(seed)d ' 'to reproduce this failure.' % { 'seed': generated_seed}) else: report( 'You can add @seed(%d) to this test to ' 'reproduce this failure.' % (generated_seed,)) raise
def test_no_type_hints(): assert get_type_hints(WeirdSig) == {}
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True if getattr(test, 'is_hypothesis_test', False): note_deprecation(( 'You have applied @given to a test more than once. In ' 'future this will be an error. Applying @given twice ' 'wraps the test twice, which can be extremely slow. A ' 'similar effect can be gained by combining the arguments ' 'to the two calls to given. For example, instead of ' '@given(booleans()) @given(integers()), you could write ' '@given(booleans(), integers())' )) settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [name for name, value in generator_kwargs.items() if value is infer]: if name not in hints: raise InvalidArgument( 'passed %s=infer for %s, but %s has no type annotation' % (name, test.__name__, name)) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings ) arguments, kwargs, test_runner, search_strategy = processed_args execute_explicit_examples( test_runner, test, wrapped_test, settings, arguments, kwargs ) if settings.max_examples <= 0: return if not ( Phase.reuse in settings.phases or Phase.generate in settings.phases ): return try: state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, had_seed=wrapped_test._hypothesis_internal_use_seed ) state.run() except BaseException: generated_seed = \ wrapped_test._hypothesis_internal_use_generated_seed if ( generated_seed is not None and not state.used_examples_from_database ): if running_under_pytest: report(( 'You can add @seed(%(seed)d) to this test or run ' 'pytest with --hypothesis-seed=%(seed)d to ' 'reproduce this failure.') % { 'seed': generated_seed},) else: report(( 'You can add @seed(%d) to this test to reproduce ' 'this failure.') % (generated_seed,)) raise
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True if getattr(test, 'is_hypothesis_test', False): note_deprecation( 'You have applied @given to a test more than once. In ' 'future this will be an error. Applying @given twice ' 'wraps the test twice, which can be extremely slow. A ' 'similar effect can be gained by combining the arguments ' 'of the two calls to given. For example, instead of ' '@given(booleans()) @given(integers()), you could write ' '@given(booleans(), integers())') settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [ name for name, value in generator_kwargs.items() if value is infer ]: if name not in hints: raise InvalidArgument( 'passed %s=infer for %s, but %s has no type annotation' % (name, test.__name__, name)) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings) arguments, kwargs, test_runner, search_strategy = processed_args runner = getattr(search_strategy, 'runner', None) if isinstance(runner, TestCase) and test.__name__ in dir(TestCase): msg = ('You have applied @given to the method %s, which is ' 'used by the unittest runner but is not itself a test.' ' This is not useful in any way.' % test.__name__) fail_health_check(settings, msg, HealthCheck.not_a_test_method) if bad_django_TestCase(runner): # pragma: no cover # Covered by the Django tests, but not the pytest coverage task raise InvalidArgument( 'You have applied @given to a method on %s, but this ' 'class does not inherit from the supported versions in ' '`hypothesis.extra.django`. Use the Hypothesis variants ' 'to ensure that each example is run in a separate ' 'database transaction.' % qualname(type(runner))) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, had_seed=wrapped_test._hypothesis_internal_use_seed) reproduce_failure = \ wrapped_test._hypothesis_internal_use_reproduce_failure if reproduce_failure is not None: expected_version, failure = reproduce_failure if expected_version != __version__: raise InvalidArgument( ('Attempting to reproduce a failure from a different ' 'version of Hypothesis. This failure is from %s, but ' 'you are currently running %r. Please change your ' 'Hypothesis version to a matching one.') % (expected_version, __version__)) try: state.execute( ConjectureData.for_buffer(decode_failure(failure)), print_example=True, is_final=True, ) raise DidNotReproduce( 'Expected the test to raise an error, but it ' 'completed successfully.') except StopTest: raise DidNotReproduce( 'The shape of the test data has changed in some way ' 'from where this blob was defined. Are you sure ' "you're running the same test?") except UnsatisfiedAssumption: raise DidNotReproduce( 'The test data failed to satisfy an assumption in the ' 'test. Have you added it since this blob was ' 'generated?') execute_explicit_examples(test_runner, test, wrapped_test, settings, arguments, kwargs) if settings.max_examples <= 0: return if not (Phase.reuse in settings.phases or Phase.generate in settings.phases): return try: if isinstance(runner, TestCase) and hasattr(runner, 'subTest'): subTest = runner.subTest try: setattr(runner, 'subTest', fake_subTest) state.run() finally: setattr(runner, 'subTest', subTest) else: state.run() except BaseException: generated_seed = \ wrapped_test._hypothesis_internal_use_generated_seed if generated_seed is not None and not state.failed_normally: if running_under_pytest: report( ('You can add @seed(%(seed)d) to this test or run ' 'pytest with --hypothesis-seed=%(seed)d to ' 'reproduce this failure.') % {'seed': generated_seed}, ) else: report( ('You can add @seed(%d) to this test to reproduce ' 'this failure.') % (generated_seed, )) raise
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True test = wrapped_test.hypothesis.inner_test if getattr(test, "is_hypothesis_test", False): raise InvalidArgument( ( "You have applied @given to the test %s more than once, which " "wraps the test several times and is extremely slow. A " "similar effect can be gained by combining the arguments " "of the two calls to given. For example, instead of " "@given(booleans()) @given(integers()), you could write " "@given(booleans(), integers())" ) % (test.__name__,) ) settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [ name for name, value in generator_kwargs.items() if value is infer ]: if name not in hints: raise InvalidArgument( "passed %s=infer for %s, but %s has no type annotation" % (name, test.__name__, name) ) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings, ) arguments, kwargs, test_runner, search_strategy = processed_args runner = getattr(search_strategy, "runner", None) if isinstance(runner, TestCase) and test.__name__ in dir(TestCase): msg = ( "You have applied @given to the method %s, which is " "used by the unittest runner but is not itself a test." " This is not useful in any way." % test.__name__ ) fail_health_check(settings, msg, HealthCheck.not_a_test_method) if bad_django_TestCase(runner): # pragma: no cover # Covered by the Django tests, but not the pytest coverage task raise InvalidArgument( "You have applied @given to a method on %s, but this " "class does not inherit from the supported versions in " "`hypothesis.extra.django`. Use the Hypothesis variants " "to ensure that each example is run in a separate " "database transaction." % qualname(type(runner)) ) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, had_seed=wrapped_test._hypothesis_internal_use_seed, ) reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure if reproduce_failure is not None: expected_version, failure = reproduce_failure if expected_version != __version__: raise InvalidArgument( ( "Attempting to reproduce a failure from a different " "version of Hypothesis. This failure is from %s, but " "you are currently running %r. Please change your " "Hypothesis version to a matching one." ) % (expected_version, __version__) ) try: state.execute( ConjectureData.for_buffer(decode_failure(failure)), print_example=True, is_final=True, ) raise DidNotReproduce( "Expected the test to raise an error, but it " "completed successfully." ) except StopTest: raise DidNotReproduce( "The shape of the test data has changed in some way " "from where this blob was defined. Are you sure " "you're running the same test?" ) except UnsatisfiedAssumption: raise DidNotReproduce( "The test data failed to satisfy an assumption in the " "test. Have you added it since this blob was " "generated?" ) execute_explicit_examples( test_runner, test, wrapped_test, settings, arguments, kwargs ) if settings.max_examples <= 0: return if not ( Phase.reuse in settings.phases or Phase.generate in settings.phases ): return try: if isinstance(runner, TestCase) and hasattr(runner, "subTest"): subTest = runner.subTest try: setattr(runner, "subTest", fake_subTest) state.run() finally: setattr(runner, "subTest", subTest) else: state.run() except BaseException as e: generated_seed = wrapped_test._hypothesis_internal_use_generated_seed with local_settings(settings): if not (state.failed_normally or generated_seed is None): if running_under_pytest: report( "You can add @seed(%(seed)d) to this test or " "run pytest with --hypothesis-seed=%(seed)d " "to reproduce this failure." % {"seed": generated_seed} ) else: report( "You can add @seed(%d) to this test to " "reproduce this failure." % (generated_seed,) ) # The dance here is to avoid showing users long tracebacks # full of Hypothesis internals they don't care about. # We have to do this inline, to avoid adding another # internal stack frame just when we've removed the rest. if PY2: # Python 2 doesn't have Exception.with_traceback(...); # instead it has a three-argument form of the `raise` # statement. Unfortunately this is a SyntaxError on # Python 3, and before Python 2.7.9 it was *also* a # SyntaxError to use it in a nested function so we # can't `exec` or `eval` our way out (BPO-21591). # So unless we break some versions of Python 2, none # of them get traceback elision. raise # On Python 3, we swap out the real traceback for our # trimmed version. Using a variable ensures that the line # which will actually appear in trackbacks is as clear as # possible - "raise the_error_hypothesis_found". the_error_hypothesis_found = e.with_traceback( get_trimmed_traceback() ) raise the_error_hypothesis_found
def run_test_as_given(test): if inspect.isclass(test): # Provide a meaningful error to users, instead of exceptions from # internals that assume we're dealing with a function. raise InvalidArgument("@given cannot be applied to a class.") given_arguments = tuple(_given_arguments) given_kwargs = dict(_given_kwargs) original_argspec = getfullargspec(test) check_invalid = is_invalid_test(test.__name__, original_argspec, given_arguments, given_kwargs) # If the argument check found problems, return a dummy test function # that will raise an error if it is actually called. if check_invalid is not None: return check_invalid # Because the argument check succeeded, we can convert @given's # positional arguments into keyword arguments for simplicity. if given_arguments: assert not given_kwargs for name, strategy in zip(reversed(original_argspec.args), reversed(given_arguments)): given_kwargs[name] = strategy # These have been converted, so delete them to prevent accidental use. del given_arguments argspec = new_given_argspec(original_argspec, given_kwargs) # Use type information to convert "infer" arguments into appropriate strategies. if infer in given_kwargs.values(): hints = get_type_hints(test) for name in [ name for name, value in given_kwargs.items() if value is infer ]: if name not in hints: # As usual, we want to emit this error when the test is executed, # not when it's decorated. @impersonate(test) @define_function_signature(test.__name__, test.__doc__, argspec) def wrapped_test(*arguments, **kwargs): __tracebackhide__ = True raise InvalidArgument( "passed %s=infer for %s, but %s has no type annotation" % (name, test.__name__, name)) return wrapped_test given_kwargs[name] = st.from_type(hints[name]) @impersonate(test) @define_function_signature(test.__name__, test.__doc__, argspec) def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True test = wrapped_test.hypothesis.inner_test if getattr(test, "is_hypothesis_test", False): raise InvalidArgument(( "You have applied @given to the test %s more than once, which " "wraps the test several times and is extremely slow. A " "similar effect can be gained by combining the arguments " "of the two calls to given. For example, instead of " "@given(booleans()) @given(integers()), you could write " "@given(booleans(), integers())") % (test.__name__, )) settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, given_kwargs, argspec, settings, ) arguments, kwargs, test_runner, search_strategy = processed_args runner = getattr(search_strategy, "runner", None) if isinstance(runner, TestCase) and test.__name__ in dir(TestCase): msg = ("You have applied @given to the method %s, which is " "used by the unittest runner but is not itself a test." " This is not useful in any way." % test.__name__) fail_health_check(settings, msg, HealthCheck.not_a_test_method) if bad_django_TestCase(runner): # pragma: no cover # Covered by the Django tests, but not the pytest coverage task raise InvalidArgument( "You have applied @given to a method on %s, but this " "class does not inherit from the supported versions in " "`hypothesis.extra.django`. Use the Hypothesis variants " "to ensure that each example is run in a separate " "database transaction." % qualname(type(runner))) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, wrapped_test, ) reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure # If there was a @reproduce_failure decorator, use it to reproduce # the error (or complain that we couldn't). Either way, this will # always raise some kind of error. if reproduce_failure is not None: expected_version, failure = reproduce_failure if expected_version != __version__: raise InvalidArgument( ("Attempting to reproduce a failure from a different " "version of Hypothesis. This failure is from %s, but " "you are currently running %r. Please change your " "Hypothesis version to a matching one.") % (expected_version, __version__)) try: state.execute_once( ConjectureData.for_buffer(decode_failure(failure)), print_example=True, is_final=True, ) raise DidNotReproduce( "Expected the test to raise an error, but it " "completed successfully.") except StopTest: raise DidNotReproduce( "The shape of the test data has changed in some way " "from where this blob was defined. Are you sure " "you're running the same test?") except UnsatisfiedAssumption: raise DidNotReproduce( "The test data failed to satisfy an assumption in the " "test. Have you added it since this blob was " "generated?") # There was no @reproduce_failure, so start by running any explicit # examples from @example decorators. execute_explicit_examples(state, wrapped_test, arguments, kwargs) # If there were any explicit examples, they all ran successfully. # The next step is to use the Conjecture engine to run the test on # many different inputs. if not (Phase.reuse in settings.phases or Phase.generate in settings.phases): return try: if isinstance(runner, TestCase) and hasattr(runner, "subTest"): subTest = runner.subTest try: runner.subTest = fake_subTest state.run_engine() finally: runner.subTest = subTest else: state.run_engine() except BaseException as e: # The exception caught here should either be an actual test # failure (or MultipleFailures), or some kind of fatal error # that caused the engine to stop. generated_seed = wrapped_test._hypothesis_internal_use_generated_seed with local_settings(settings): if not (state.failed_normally or generated_seed is None): if running_under_pytest: report( "You can add @seed(%(seed)d) to this test or " "run pytest with --hypothesis-seed=%(seed)d " "to reproduce this failure." % {"seed": generated_seed}) else: report("You can add @seed(%d) to this test to " "reproduce this failure." % (generated_seed, )) # The dance here is to avoid showing users long tracebacks # full of Hypothesis internals they don't care about. # We have to do this inline, to avoid adding another # internal stack frame just when we've removed the rest. # # Using a variable for our trimmed error ensures that the line # which will actually appear in tracebacks is as clear as # possible - "raise the_error_hypothesis_found". the_error_hypothesis_found = e.with_traceback( get_trimmed_traceback()) raise the_error_hypothesis_found def fuzz_one_input( buffer: Union[bytes, bytearray, memoryview, BinaryIO] ) -> Optional[bytes]: """Run the test as a fuzz target, driven with the `buffer` of bytes. Returns None if buffer invalid for the strategy, canonical pruned bytes if the buffer was valid, and leaves raised exceptions alone. Note: this feature is experimental and may change or be removed. """ if isinstance(buffer, io.IOBase): buffer = buffer.read() if isinstance(buffer, (bytearray, memoryview)): buffer = bytes(buffer) assert isinstance(buffer, bytes) test = wrapped_test.hypothesis.inner_test settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) _args, _kwargs, test_runner, search_strategy = process_arguments_to_given( wrapped_test, (), {}, given_kwargs, argspec, settings, ) assert not _args assert not _kwargs state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, wrapped_test, ) data = ConjectureData.for_buffer(buffer) try: state.execute_once(data) except (StopTest, UnsatisfiedAssumption): return None except BaseException: if settings.database is not None: settings.database.save(function_digest(test), bytes(data.buffer)) raise return bytes(data.buffer) # After having created the decorated test function, we need to copy # over some attributes to make the switch as seamless as possible. for attrib in dir(test): if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)): setattr(wrapped_test, attrib, getattr(test, attrib)) wrapped_test.is_hypothesis_test = True if hasattr(test, "_hypothesis_internal_settings_applied"): # Used to check if @settings is applied twice. wrapped_test._hypothesis_internal_settings_applied = True wrapped_test._hypothesis_internal_use_seed = getattr( test, "_hypothesis_internal_use_seed", None) wrapped_test._hypothesis_internal_use_settings = (getattr( test, "_hypothesis_internal_use_settings", None) or Settings.default) wrapped_test._hypothesis_internal_use_reproduce_failure = getattr( test, "_hypothesis_internal_use_reproduce_failure", None) wrapped_test.hypothesis = HypothesisHandle(test, fuzz_one_input) return wrapped_test
def test_can_get_type_hints(thing): assert isinstance(get_type_hints(thing), dict)