def wrapped_test(*arguments, **kwargs): settings = wrapped_test._hypothesis_internal_use_settings if wrapped_test._hypothesis_internal_use_seed is not None: random = Random(wrapped_test._hypothesis_internal_use_seed) elif settings.derandomize: random = Random(function_digest(test)) else: random = new_random() import hypothesis.strategies as sd selfy = None arguments, kwargs = convert_positional_arguments( wrapped_test, arguments, kwargs) # If the test function is a method of some kind, the bound object # will be the first named argument if there are any, otherwise the # first vararg (if any). if argspec.args: selfy = kwargs.get(argspec.args[0]) elif arguments: selfy = arguments[0] test_runner = new_style_executor(selfy) for example in reversed( getattr(wrapped_test, 'hypothesis_explicit_examples', ())): if example.args: if len(example.args) > len(original_argspec.args): raise InvalidArgument( 'example has too many arguments for test. ' 'Expected at most %d but got %d' % (len(original_argspec.args), len(example.args))) example_kwargs = dict( zip(original_argspec.args[-len(example.args):], example.args)) else: example_kwargs = example.kwargs if Phase.explicit not in settings.phases: continue example_kwargs.update(kwargs) # Note: Test may mutate arguments and we can't rerun explicit # examples, so we have to calculate the failure message at this # point rather than than later. message_on_failure = 'Falsifying example: %s(%s)' % ( test.__name__, arg_string(test, arguments, example_kwargs)) try: with BuildContext() as b: test_runner( None, lambda data: test(*arguments, **example_kwargs)) except BaseException: report(message_on_failure) for n in b.notes: report(n) raise if settings.max_examples <= 0: return arguments = tuple(arguments) given_specifier = sd.tuples( sd.just(arguments), sd.fixed_dictionaries(generator_kwargs).map( lambda args: dict(args, **kwargs))) def fail_health_check(message, label): if label in settings.suppress_health_check: return message += ( '\nSee http://hypothesis.readthedocs.org/en/latest/health' 'checks.html for more information about this. ') message += ( 'If you want to disable just this health check, add %s ' 'to the suppress_health_check settings for this test.') % ( label, ) raise FailedHealthCheck(message) search_strategy = given_specifier if selfy is not None: search_strategy = WithRunner(search_strategy, selfy) search_strategy.validate() perform_health_check = settings.perform_health_check perform_health_check &= Settings.default.perform_health_check from hypothesis.internal.conjecture.data import TestData, Status, \ StopTest if not (Phase.reuse in settings.phases or Phase.generate in settings.phases): return if perform_health_check: initial_state = getglobalrandomstate() health_check_random = Random(random.getrandbits(128)) # We "pre warm" the health check with one draw to give it some # time to calculate any cached data. This prevents the case # where the first draw of the health check takes ages because # of loading unicode data the first time. data = TestData(max_length=settings.buffer_size, draw_bytes=lambda data, n, distribution: distribution(health_check_random, n)) with Settings(settings, verbosity=Verbosity.quiet): try: test_runner( data, reify_and_execute( search_strategy, lambda *args, **kwargs: None, )) except BaseException: pass count = 0 overruns = 0 filtered_draws = 0 start = time.time() while (count < 10 and time.time() < start + 1 and filtered_draws < 50 and overruns < 20): try: data = TestData( max_length=settings.buffer_size, draw_bytes=lambda data, n, distribution: distribution(health_check_random, n)) with Settings(settings, verbosity=Verbosity.quiet): test_runner( data, reify_and_execute( search_strategy, lambda *args, **kwargs: None, )) count += 1 except UnsatisfiedAssumption: filtered_draws += 1 except StopTest: if data.status == Status.INVALID: filtered_draws += 1 else: assert data.status == Status.OVERRUN overruns += 1 except InvalidArgument: raise except Exception: report(traceback.format_exc()) if test_runner is default_new_style_executor: fail_health_check( 'An exception occurred during data ' 'generation in initial health check. ' 'This indicates a bug in the strategy. ' 'This could either be a Hypothesis bug or ' "an error in a function you've passed to " 'it to construct your data.', HealthCheck.exception_in_generation, ) else: fail_health_check( 'An exception occurred during data ' 'generation in initial health check. ' 'This indicates a bug in the strategy. ' 'This could either be a Hypothesis bug or ' 'an error in a function you\'ve passed to ' 'it to construct your data. Additionally, ' 'you have a custom executor, which means ' 'that this could be your executor failing ' 'to handle a function which returns None. ', HealthCheck.exception_in_generation, ) if overruns >= 20 or (not count and overruns > 0): fail_health_check(( 'Examples routinely exceeded the max allowable size. ' '(%d examples overran while generating %d valid ones)' '. Generating examples this large will usually lead to' ' bad results. You should try setting average_size or ' 'max_size parameters on your collections and turning ' 'max_leaves down on recursive() calls.') % (overruns, count), HealthCheck.data_too_large) if filtered_draws >= 50 or (not count and filtered_draws > 0): fail_health_check(( 'It looks like your strategy is filtering out a lot ' 'of data. Health check found %d filtered examples but ' 'only %d good ones. This will make your tests much ' 'slower, and also will probably distort the data ' 'generation quite a lot. You should adapt your ' 'strategy to filter less. This can also be caused by ' 'a low max_leaves parameter in recursive() calls') % (filtered_draws, count), HealthCheck.filter_too_much) runtime = time.time() - start if runtime > 1.0 or count < 10: fail_health_check( ('Data generation is extremely slow: Only produced ' '%d valid examples in %.2f seconds (%d invalid ones ' 'and %d exceeded maximum size). Try decreasing ' "size of the data you're generating (with e.g." 'average_size or max_leaves parameters).') % (count, runtime, filtered_draws, overruns), HealthCheck.too_slow, ) if getglobalrandomstate() != initial_state: fail_health_check( 'Data generation depends on global random module. ' 'This makes results impossible to replay, which ' 'prevents Hypothesis from working correctly. ' 'If you want to use methods from random, use ' 'randoms() from hypothesis.strategies to get an ' 'instance of Random you can use. Alternatively, you ' 'can use the random_module() strategy to explicitly ' 'seed the random module.', HealthCheck.random_module, ) last_exception = [None] repr_for_last_exception = [None] performed_random_check = [False] def evaluate_test_data(data): if perform_health_check and not performed_random_check[0]: initial_state = getglobalrandomstate() performed_random_check[0] = True else: initial_state = None try: result = test_runner( data, reify_and_execute( search_strategy, test, )) if result is not None and settings.perform_health_check: fail_health_check( ('Tests run under @given should return None, but ' '%s returned %r instead.') % (test.__name__, result), HealthCheck.return_value) return False except UnsatisfiedAssumption: data.mark_invalid() except ( HypothesisDeprecationWarning, FailedHealthCheck, StopTest, ): raise except Exception: last_exception[0] = traceback.format_exc() verbose_report(last_exception[0]) data.mark_interesting() finally: if (initial_state is not None and getglobalrandomstate() != initial_state): fail_health_check( 'Your test used the global random module. ' 'This is unlikely to work correctly. You should ' 'consider using the randoms() strategy from ' 'hypothesis.strategies instead. Alternatively, ' 'you can use the random_module() strategy to ' 'explicitly seed the random module.', HealthCheck.random_module, ) from hypothesis.internal.conjecture.engine import TestRunner falsifying_example = None database_key = str_to_bytes(fully_qualified_name(test)) start_time = time.time() runner = TestRunner( evaluate_test_data, settings=settings, random=random, database_key=database_key, ) runner.run() run_time = time.time() - start_time timed_out = (settings.timeout > 0 and run_time >= settings.timeout) if runner.last_data is None: return if runner.last_data.status == Status.INTERESTING: falsifying_example = runner.last_data.buffer if settings.database is not None: settings.database.save(database_key, falsifying_example) else: if runner.valid_examples < min( settings.min_satisfying_examples, settings.max_examples, ): if timed_out: raise Timeout( ('Ran out of time before finding a satisfying ' 'example for ' '%s. Only found %d examples in ' + '%.2fs.') % (get_pretty_function_description(test), runner.valid_examples, run_time)) else: raise Unsatisfiable( ('Unable to satisfy assumptions of hypothesis ' '%s. Only %d examples considered ' 'satisfied assumptions') % ( get_pretty_function_description(test), runner.valid_examples, )) return assert last_exception[0] is not None try: with settings: test_runner( TestData.for_buffer(falsifying_example), reify_and_execute(search_strategy, test, print_example=True, is_final=True)) except (UnsatisfiedAssumption, StopTest): report(traceback.format_exc()) raise Flaky( 'Unreliable assumption: An example which satisfied ' 'assumptions on the first run now fails it.') report( 'Failed to reproduce exception. Expected: \n' + last_exception[0], ) filter_message = ( 'Unreliable test data: Failed to reproduce a failure ' 'and then when it came to recreating the example in ' 'order to print the test data with a flaky result ' 'the example was filtered out (by e.g. a ' 'call to filter in your strategy) when we didn\'t ' 'expect it to be.') try: test_runner( TestData.for_buffer(falsifying_example), reify_and_execute(search_strategy, test_is_flaky( test, repr_for_last_exception[0]), print_example=True, is_final=True)) except (UnsatisfiedAssumption, StopTest): raise Flaky(filter_message)
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True if getattr(test, 'is_hypothesis_test', False): note_deprecation( 'You have applied @given to a test more than once. In ' 'future this will be an error. Applying @given twice ' 'wraps the test twice, which can be extremely slow. A ' 'similar effect can be gained by combining the arguments ' 'of the two calls to given. For example, instead of ' '@given(booleans()) @given(integers()), you could write ' '@given(booleans(), integers())') settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [ name for name, value in generator_kwargs.items() if value is infer ]: if name not in hints: raise InvalidArgument( 'passed %s=infer for %s, but %s has no type annotation' % (name, test.__name__, name)) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings) arguments, kwargs, test_runner, search_strategy = processed_args runner = getattr(search_strategy, 'runner', None) if isinstance(runner, TestCase) and test.__name__ in dir(TestCase): msg = ('You have applied @given to the method %s, which is ' 'used by the unittest runner but is not itself a test.' ' This is not useful in any way.' % test.__name__) fail_health_check(settings, msg, HealthCheck.not_a_test_method) if bad_django_TestCase(runner): # pragma: no cover # Covered by the Django tests, but not the pytest coverage task raise InvalidArgument( 'You have applied @given to a method on %s, but this ' 'class does not inherit from the supported versions in ' '`hypothesis.extra.django`. Use the Hypothesis variants ' 'to ensure that each example is run in a separate ' 'database transaction.' % qualname(type(runner))) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, had_seed=wrapped_test._hypothesis_internal_use_seed) reproduce_failure = \ wrapped_test._hypothesis_internal_use_reproduce_failure if reproduce_failure is not None: expected_version, failure = reproduce_failure if expected_version != __version__: raise InvalidArgument( ('Attempting to reproduce a failure from a different ' 'version of Hypothesis. This failure is from %s, but ' 'you are currently running %r. Please change your ' 'Hypothesis version to a matching one.') % (expected_version, __version__)) try: state.execute( ConjectureData.for_buffer(decode_failure(failure)), print_example=True, is_final=True, ) raise DidNotReproduce( 'Expected the test to raise an error, but it ' 'completed successfully.') except StopTest: raise DidNotReproduce( 'The shape of the test data has changed in some way ' 'from where this blob was defined. Are you sure ' "you're running the same test?") except UnsatisfiedAssumption: raise DidNotReproduce( 'The test data failed to satisfy an assumption in the ' 'test. Have you added it since this blob was ' 'generated?') execute_explicit_examples(test_runner, test, wrapped_test, settings, arguments, kwargs) if settings.max_examples <= 0: return if not (Phase.reuse in settings.phases or Phase.generate in settings.phases): return try: if isinstance(runner, TestCase) and hasattr(runner, 'subTest'): subTest = runner.subTest try: setattr(runner, 'subTest', fake_subTest) state.run() finally: setattr(runner, 'subTest', subTest) else: state.run() except BaseException: generated_seed = \ wrapped_test._hypothesis_internal_use_generated_seed if generated_seed is not None and not state.failed_normally: if running_under_pytest: report( ('You can add @seed(%(seed)d) to this test or run ' 'pytest with --hypothesis-seed=%(seed)d to ' 'reproduce this failure.') % {'seed': generated_seed}, ) else: report( ('You can add @seed(%d) to this test to reproduce ' 'this failure.') % (generated_seed, )) raise
def get_columns(draw, columns): raise InvalidArgument( "Columns parameter must either be an integer or a list of strategies")
def magic( *modules_or_functions: Union[Callable, types.ModuleType], except_: Except = (), style: str = "pytest", ) -> str: """Guess which ghostwriters to use, for a module or collection of functions. As for all ghostwriters, the ``except_`` argument should be an :class:`python:Exception` or tuple of exceptions, and ``style`` may be either ``"pytest"`` to write test functions or ``"unittest"`` to write test methods and :class:`~python:unittest.TestCase`. After finding the public functions attached to any modules, the ``magic`` ghostwriter looks for pairs of functions to pass to :func:`~roundtrip`, then checks for :func:`~binary_operation` and :func:`~ufunc` functions, and any others are passed to :func:`~fuzz`. For example, try :command:`hypothesis write gzip` on the command line! """ except_ = _check_except(except_) _check_style(style) if not modules_or_functions: raise InvalidArgument("Must pass at least one function or module to test.") functions = set() for thing in modules_or_functions: if callable(thing): functions.add(thing) elif isinstance(thing, types.ModuleType): if hasattr(thing, "__all__"): funcs = [getattr(thing, name, None) for name in thing.__all__] # type: ignore else: funcs = [ v for k, v in vars(thing).items() if callable(v) and not k.startswith("_") ] for f in funcs: try: if (not is_mock(f)) and callable(f) and _get_params(f): functions.add(f) except (TypeError, ValueError): pass else: raise InvalidArgument(f"Can't test non-module non-callable {thing!r}") imports = set() parts = [] by_name = {} for f in functions: try: by_name[_get_qualname(f, include_module=True)] = f except Exception: pass # e.g. Pandas 'CallableDynamicDoc' object has no attribute '__name__' if not by_name: return ( f"# Found no testable functions in\n" f"# {functions!r} from {modules_or_functions}\n" ) # Look for pairs of functions that roundtrip, based on known naming patterns. for writename, readname in ROUNDTRIP_PAIRS: for name in sorted(by_name): match = re.fullmatch(writename, name.split(".")[-1]) if match: inverse_name = readname.format(*match.groups()) for other in sorted( n for n in by_name if n.split(".")[-1] == inverse_name )[:1]: imp, body = _make_roundtrip_body( (by_name.pop(name), by_name.pop(other)), except_=except_, style=style, ) imports |= imp parts.append(body) # Look for equivalent functions: same name, all required arguments of any can # be found in all signatures, and if all have return-type annotations they match. names = defaultdict(list) for _, f in sorted(by_name.items()): names[_get_qualname(f)].append(f) for group in names.values(): if len(group) >= 2 and len({frozenset(_get_params(f)) for f in group}) == 1: sentinel = object() returns = {get_type_hints(f).get("return", sentinel) for f in group} if len(returns - {sentinel}) <= 1: imp, body = _make_equiv_body(group, except_=except_, style=style) imports |= imp parts.append(body) for f in group: by_name.pop(_get_qualname(f, include_module=True)) # Look for binary operators - functions with two identically-typed arguments, # and the same return type. The latter restriction might be lifted later. for name, func in sorted(by_name.items()): hints = get_type_hints(func) hints.pop("return", None) if len(hints) == len(_get_params(func)) == 2: a, b = hints.values() if a == b: imp, body = _make_binop_body(func, except_=except_, style=style) imports |= imp parts.append(body) del by_name[name] # Look for Numpy ufuncs or gufuncs, and write array-oriented tests for them. if "numpy" in sys.modules: for name, func in sorted(by_name.items()): if _is_probably_ufunc(func): imp, body = _make_ufunc_body(func, except_=except_, style=style) imports |= imp parts.append(body) del by_name[name] # For all remaining callables, just write a fuzz-test. In principle we could # guess at equivalence or idempotence; but it doesn't seem accurate enough to # be worth the trouble when it's so easy for the user to specify themselves. for _, f in sorted(by_name.items()): imp, body = _make_test_body( f, test_body=_write_call(f), except_=except_, ghost="fuzz", style=style ) imports |= imp parts.append(body) return _make_test(imports, "\n".join(parts))
def find( specifier, # type: SearchStrategy condition, # type: Callable[[Any], bool] settings=None, # type: Settings random=None, # type: Any database_key=None, # type: bytes ): # type: (...) -> Any """Returns the minimal example from the given strategy ``specifier`` that matches the predicate function ``condition``.""" settings = settings or Settings( max_examples=2000, max_shrinks=2000, ) settings = Settings(settings, suppress_health_check=HealthCheck.all()) if database_key is None and settings.database is not None: database_key = function_digest(condition) if not isinstance(specifier, SearchStrategy): raise InvalidArgument('Expected SearchStrategy but got %r of type %s' % (specifier, type(specifier).__name__)) specifier.validate() search = specifier random = random or new_random() successful_examples = [0] last_data = [None] last_repr = [None] def template_condition(data): with BuildContext(data): try: data.is_find = True result = data.draw(search) data.note(result) success = condition(result) except UnsatisfiedAssumption: data.mark_invalid() if success: successful_examples[0] += 1 if settings.verbosity >= Verbosity.verbose: if not successful_examples[0]: report(u'Tried non-satisfying example %s' % (nicerepr(result), )) elif success: if successful_examples[0] == 1: last_repr[0] = nicerepr(result) report(u'Found satisfying example %s' % (last_repr[0], )) last_data[0] = data elif (sort_key(hbytes(data.buffer)) < sort_key( last_data[0].buffer) ) and nicerepr(result) != last_repr[0]: last_repr[0] = nicerepr(result) report(u'Shrunk example to %s' % (last_repr[0], )) last_data[0] = data if success and not data.frozen: data.mark_interesting() start = time.time() runner = ConjectureRunner( template_condition, settings=settings, random=random, database_key=database_key, ) runner.run() note_engine_for_statistics(runner) run_time = time.time() - start if runner.interesting_examples: data = ConjectureData.for_buffer( list(runner.interesting_examples.values())[0].buffer) with BuildContext(data): return data.draw(search) if runner.valid_examples == 0 and (runner.exit_reason != ExitReason.finished): if settings.timeout > 0 and run_time > settings.timeout: raise Timeout(( # pragma: no cover 'Ran out of time before finding enough valid examples for ' '%s. Only %d valid examples found in %.2f seconds.') % (get_pretty_function_description(condition), runner.valid_examples, run_time)) else: raise Unsatisfiable('Unable to satisfy assumptions of %s.' % (get_pretty_function_description(condition), )) raise NoSuchExample(get_pretty_function_description(condition))
def floats(min_value=None, max_value=None, allow_nan=None, allow_infinity=None): """Returns a strategy which generates floats. - If min_value is not None, all values will be >= min_value. - If max_value is not None, all values will be <= max_value. - If min_value or max_value is not None, it is an error to enable allow_nan. - If both min_value and max_value are not None, it is an error to enable allow_infinity. Where not explicitly ruled out by the bounds, all of infinity, -infinity and NaN are possible values generated by this strategy. """ if allow_nan is None: allow_nan = bool(min_value is None and max_value is None) elif allow_nan: if min_value is not None or max_value is not None: raise InvalidArgument( u'Cannot have allow_nan=%r, with min_value or max_value' % (allow_nan)) check_valid_bound(min_value, u'min_value') check_valid_bound(max_value, u'max_value') check_valid_interval(min_value, max_value, u'min_value', u'max_value') if min_value is not None: min_value = float(min_value) if max_value is not None: max_value = float(max_value) if min_value == float(u'-inf'): min_value = None if max_value == float(u'inf'): max_value = None if allow_infinity is None: allow_infinity = bool(min_value is None or max_value is None) elif allow_infinity: if min_value is not None and max_value is not None: raise InvalidArgument( u'Cannot have allow_infinity=%r, with both min_value and ' u'max_value' % (allow_infinity)) from hypothesis.searchstrategy.numbers import WrapperFloatStrategy, \ GaussianFloatStrategy, BoundedFloatStrategy, ExponentialFloatStrategy,\ JustIntFloats, NastyFloats, FullRangeFloats, \ FixedBoundedFloatStrategy if min_value is None and max_value is None: return WrapperFloatStrategy( GaussianFloatStrategy() | BoundedFloatStrategy() | ExponentialFloatStrategy() | JustIntFloats() | NastyFloats(allow_nan, allow_infinity) | FullRangeFloats(allow_nan, allow_infinity)) elif min_value is not None and max_value is not None: if min_value == max_value: return just(min_value) elif math.isinf(max_value - min_value): assert min_value < 0 and max_value > 0 return floats(min_value=0, max_value=max_value) | floats( min_value=min_value, max_value=0) elif count_between_floats(min_value, max_value) > 1000: critical_values = [ min_value, max_value, min_value + (max_value - min_value) / 2 ] if min_value <= 0 <= max_value: if not is_negative(max_value): critical_values.append(0.0) if is_negative(min_value): critical_values.append(-0.0) return FixedBoundedFloatStrategy( lower_bound=min_value, upper_bound=max_value) | sampled_from(critical_values) elif is_negative(max_value): assert is_negative(min_value) ub_int = float_to_int(max_value) lb_int = float_to_int(min_value) assert ub_int <= lb_int return integers(min_value=ub_int, max_value=lb_int).map(int_to_float) elif is_negative(min_value): return floats(min_value=min_value, max_value=-0.0) | floats( min_value=0, max_value=max_value) else: ub_int = float_to_int(max_value) lb_int = float_to_int(min_value) assert lb_int <= ub_int return integers(min_value=lb_int, max_value=ub_int).map(int_to_float) elif min_value is not None: critical_values = [min_value] if allow_infinity: critical_values.append(float(u'inf')) if is_negative(min_value): critical_values.append(-0.0) if min_value <= 0: critical_values.append(0.0) return (floats( allow_infinity=allow_infinity, allow_nan=False).map(lambda x: assume(not math.isnan( x)) and min_value + abs(x))) | sampled_from(critical_values) else: assert max_value is not None critical_values = [max_value] if allow_infinity: critical_values.append(float(u'-inf')) if max_value >= 0: critical_values.append(-0.0) if not is_negative(max_value): critical_values.append(0.0) return (floats( allow_infinity=allow_infinity, allow_nan=False).map(lambda x: assume(not math.isnan( x)) and max_value - abs(x))) | sampled_from(critical_values)
def __init__(self, grammar, start, explicit): assert isinstance(grammar, lark.lark.Lark) if start is None: start = grammar.options.start if not isinstance(start, list): start = [start] self.grammar = grammar # This is a total hack, but working around the changes is a nicer user # experience than breaking for anyone who doesn't instantly update their # installation of Lark alongside Hypothesis. compile_args = getfullargspec(grammar.grammar.compile).args if "terminals_to_keep" in compile_args: terminals, rules, ignore_names = grammar.grammar.compile(start, ()) elif "start" in compile_args: # pragma: no cover # Support lark <= 0.10.0, without the terminals_to_keep argument. terminals, rules, ignore_names = grammar.grammar.compile(start) else: # pragma: no cover # This branch is to support lark <= 0.7.1, without the start argument. terminals, rules, ignore_names = grammar.grammar.compile() self.names_to_symbols = {} for r in rules: t = r.origin self.names_to_symbols[t.name] = t for t in terminals: self.names_to_symbols[t.name] = Terminal(t.name) self.start = st.sampled_from([self.names_to_symbols[s] for s in start]) self.ignored_symbols = tuple(self.names_to_symbols[n] for n in ignore_names) self.terminal_strategies = { t.name: st.from_regex(t.pattern.to_regexp(), fullmatch=True) for t in terminals } unknown_explicit = set(explicit) - get_terminal_names( terminals, rules, ignore_names ) if unknown_explicit: raise InvalidArgument( "The following arguments were passed as explicit_strategies, " "but there is no such terminal production in this grammar: " + repr(sorted(unknown_explicit)) ) self.terminal_strategies.update(explicit) nonterminals = {} for rule in rules: nonterminals.setdefault(rule.origin.name, []).append(tuple(rule.expansion)) for v in nonterminals.values(): v.sort(key=len) self.nonterminal_strategies = { k: st.sampled_from(v) for k, v in nonterminals.items() } self.__rule_labels = {}
def floats(min_value=None, max_value=None): """Returns a strategy which generates floats. If min_value is not None, all values will be >= min_value. If max_value is not None, all values will be <= max_value. Where not explicitly ruled out by the bounds, all of infinity, -infinity and NaN are possible values generated by this strategy. """ for e in (min_value, max_value): if e is not None and math.isnan(e): raise InvalidArgument('nan is not a valid end point') if min_value is not None: min_value = float(min_value) if max_value is not None: max_value = float(max_value) if min_value == float('-inf'): min_value = None if max_value == float('inf'): max_value = None from hypothesis.searchstrategy.numbers import WrapperFloatStrategy, \ GaussianFloatStrategy, BoundedFloatStrategy, ExponentialFloatStrategy,\ JustIntFloats, NastyFloats, FullRangeFloats, \ FixedBoundedFloatStrategy, FloatsFromBase if min_value is None and max_value is None: return WrapperFloatStrategy(GaussianFloatStrategy() | BoundedFloatStrategy() | ExponentialFloatStrategy() | JustIntFloats() | NastyFloats() | FullRangeFloats()) elif min_value is not None and max_value is not None: if max_value < min_value: raise InvalidArgument('Cannot have max_value=%r < min_value=%r' % (max_value, min_value)) elif min_value == max_value: return just(min_value) elif math.isinf(max_value - min_value): assert min_value < 0 and max_value > 0 return floats(min_value=0, max_value=max_value) | floats( min_value=min_value, max_value=0) elif count_between_floats(min_value, max_value) > 1000: critical_values = [ min_value, max_value, min_value + (max_value - min_value) / 2 ] if min_value <= 0 <= max_value: if not is_negative(max_value): critical_values.append(0.0) if is_negative(min_value): critical_values.append(-0.0) return FixedBoundedFloatStrategy( lower_bound=min_value, upper_bound=max_value) | sampled_from(critical_values) elif is_negative(max_value): assert is_negative(min_value) ub_int = float_to_int(max_value) lb_int = float_to_int(min_value) assert ub_int <= lb_int return integers(min_value=ub_int, max_value=lb_int).map(int_to_float) elif is_negative(min_value): return floats(min_value=min_value, max_value=-0.0) | floats( min_value=0, max_value=max_value) else: ub_int = float_to_int(max_value) lb_int = float_to_int(min_value) assert lb_int <= ub_int return integers(min_value=lb_int, max_value=ub_int).map(int_to_float) elif min_value is not None: critical_values = [min_value, float('inf')] if is_negative(min_value): critical_values.append(-0.0) if min_value <= 0: critical_values.append(0.0) return FloatsFromBase( base=min_value, sign=1, ) | sampled_from(critical_values) else: assert max_value is not None critical_values = [max_value, float('-inf')] if max_value >= 0: critical_values.append(-0.0) if not is_negative(max_value): critical_values.append(0.0) return FloatsFromBase(base=max_value, sign=-1) | sampled_from(critical_values)
def datetime_specced_strategy(spec, settings): if not spec.naive_options: raise InvalidArgument('Must allow either naive or non-naive datetimes') return datetimes(allow_naive=(True in spec.naive_options), timezones=(None if False in spec.naive_options else []))
def check_argument(condition, fail_message, *f_args, **f_kwargs): if not condition: raise InvalidArgument(fail_message.format(*f_args, **f_kwargs))
def do_draw(self, data): if 0 in self.shape: return np.zeros(dtype=self.dtype, shape=self.shape) # Reset this flag for each test case to emit warnings from set_element self._report_overflow = True # This could legitimately be a np.empty, but the performance gains for # that would be so marginal that there's really not much point risking # undefined behaviour shenanigans. result = np.zeros(shape=self.array_size, dtype=self.dtype) if self.fill.is_empty: # We have no fill value (either because the user explicitly # disabled it or because the default behaviour was used and our # elements strategy does not produce reusable values), so we must # generate a fully dense array with a freshly drawn value for each # entry. if self.unique: seen = set() elements = cu.many(data, min_size=self.array_size, max_size=self.array_size, average_size=self.array_size) i = 0 while elements.more(): # We assign first because this means we check for # uniqueness after numpy has converted it to the relevant # type for us. Because we don't increment the counter on # a duplicate we will overwrite it on the next draw. self.set_element(data, result, i) if result[i] not in seen: seen.add(result[i]) i += 1 else: elements.reject() else: for i in hrange(len(result)): self.set_element(data, result, i) else: # We draw numpy arrays as "sparse with an offset". We draw a # collection of index assignments within the array and assign # fresh values from our elements strategy to those indices. If at # the end we have not assigned every element then we draw a single # value from our fill strategy and use that to populate the # remaining positions with that strategy. elements = cu.many( data, min_size=0, max_size=self.array_size, # sqrt isn't chosen for any particularly principled reason. It # just grows reasonably quickly but sublinearly, and for small # arrays it represents a decent fraction of the array size. average_size=math.sqrt(self.array_size), ) needs_fill = np.full(self.array_size, True) seen = set() while elements.more(): i = cu.integer_range(data, 0, self.array_size - 1) if not needs_fill[i]: elements.reject() continue self.set_element(data, result, i) if self.unique: if result[i] in seen: elements.reject() continue else: seen.add(result[i]) needs_fill[i] = False if needs_fill.any(): # We didn't fill all of the indices in the early loop, so we # put a fill value into the rest. # We have to do this hilarious little song and dance to work # around numpy's special handling of iterable values. If the # value here were e.g. a tuple then neither array creation # nor putmask would do the right thing. But by creating an # array of size one and then assigning the fill value as a # single element, we both get an array with the right value in # it and putmask will do the right thing by repeating the # values of the array across the mask. one_element = np.zeros(shape=1, dtype=self.dtype) self.set_element(data, one_element, 0, self.fill) fill_value = one_element[0] if self.unique: try: is_nan = np.isnan(fill_value) except TypeError: is_nan = False if not is_nan: raise InvalidArgument( 'Cannot fill unique array with non-NaN ' 'value %r' % (fill_value, )) np.putmask(result, needs_fill, one_element) return result.reshape(self.shape)
def __from_schema( schema: Union[bool, Schema], *, custom_formats: Dict[str, st.SearchStrategy[str]] = None, ) -> st.SearchStrategy[JSONType]: try: schema = resolve_all_refs(schema) except RecursionError: raise HypothesisRefResolutionError( f"Could not resolve recursive references in schema={schema!r}" ) from None # We check for _FORMATS_TOKEN to avoid re-validating known good data. if custom_formats is not None and _FORMATS_TOKEN not in custom_formats: assert isinstance(custom_formats, dict) for name, strat in custom_formats.items(): if not isinstance(name, str): raise InvalidArgument(f"format name {name!r} must be a string") if name in STRING_FORMATS: raise InvalidArgument( f"Cannot redefine standard format {name!r}") if not isinstance(strat, st.SearchStrategy): raise InvalidArgument( f"custom_formats[{name!r}]={strat!r} must be a Hypothesis " "strategy which generates strings matching this format.") format_checker = jsonschema.FormatChecker() custom_formats = { name: _get_format_filter(name, format_checker, strategy) if name in format_checker.checkers else strategy for name, strategy in custom_formats.items() } custom_formats[_FORMATS_TOKEN] = None # type: ignore schema = canonicalish(schema) # Boolean objects are special schemata; False rejects all and True accepts all. if schema == FALSEY: return st.nothing() if schema == TRUTHY: return JSON_STRATEGY # Only check if declared, lest we error on inner non-latest-draft schemata. if "$schema" in schema: jsonschema.validators.validator_for(schema).check_schema(schema) if schema["$schema"] == "http://json-schema.org/draft-03/schema#": raise InvalidArgument("Draft-03 schemas are not supported") assert isinstance(schema, dict) # Now we handle as many validation keywords as we can... # Applying subschemata with boolean logic if "not" in schema: not_ = schema.pop("not") assert isinstance(not_, dict) validator = make_validator(not_).is_valid return from_schema( schema, custom_formats=custom_formats).filter(lambda v: not validator(v)) if "anyOf" in schema: tmp = schema.copy() ao = tmp.pop("anyOf") assert isinstance(ao, list) return st.one_of( [merged_as_strategies([tmp, s], custom_formats) for s in ao]) if "allOf" in schema: tmp = schema.copy() ao = tmp.pop("allOf") assert isinstance(ao, list) return merged_as_strategies([tmp] + ao, custom_formats) if "oneOf" in schema: tmp = schema.copy() oo = tmp.pop("oneOf") assert isinstance(oo, list) schemas = [merged([tmp, s]) for s in oo] return st.one_of([ from_schema(s, custom_formats=custom_formats) for s in schemas if s is not None ]).filter(make_validator(schema).is_valid) # Simple special cases if "enum" in schema: assert schema["enum"], "Canonicalises to non-empty list or FALSEY" return st.sampled_from(schema["enum"]) if "const" in schema: return st.just(schema["const"]) # Finally, resolve schema by type - defaulting to "object" map_: Dict[str, Callable[[Schema], st.SearchStrategy[JSONType]]] = { "null": lambda _: st.none(), "boolean": lambda _: st.booleans(), "number": number_schema, "integer": integer_schema, "string": partial(string_schema, custom_formats), "array": partial(array_schema, custom_formats), "object": partial(object_schema, custom_formats), } assert set(map_) == set(TYPE_STRINGS) return st.one_of([map_[t](schema) for t in get_type(schema)])
def canonicalish(schema: JSONType) -> Dict[str, Any]: """Convert a schema into a more-canonical form. This is obviously incomplete, but improves best-effort recognition of equivalent schemas and makes conversion logic simpler. """ if schema is True: return {} elif schema is False: return {"not": {}} # Otherwise, we're dealing with "objects", i.e. dicts. if not isinstance(schema, dict): raise InvalidArgument( f"Got schema={schema} of type {type(schema).__name__}, " "but expected a dict.") # Make a copy, so we don't mutate the existing schema in place. schema = dict(schema) if "const" in schema: if not make_validator(schema).is_valid(schema["const"]): return FALSEY return {"const": schema["const"]} if "enum" in schema: validator = make_validator(schema) enum_ = sorted((v for v in schema["enum"] if validator.is_valid(v)), key=sort_key) if not enum_: return FALSEY elif len(enum_) == 1: return {"const": enum_[0]} return {"enum": enum_} # Recurse into the value of each keyword with a schema (or list of them) as a value for key in SCHEMA_KEYS: if isinstance(schema.get(key), list): schema[key] = [canonicalish(v) for v in schema[key]] elif isinstance(schema.get(key), (bool, dict)): schema[key] = canonicalish(schema[key]) else: assert key not in schema for key in SCHEMA_OBJECT_KEYS: if key in schema: schema[key] = { k: v if isinstance(v, list) else canonicalish(v) for k, v in schema[key].items() } type_ = get_type(schema) if "number" in type_: lo, hi, exmin, exmax = get_number_bounds(schema) mul = schema.get("multipleOf") if (lo is not None and hi is not None and (lo > hi or (lo == hi and (exmin or exmax)) or (mul and not has_divisibles(lo, hi, mul, exmin, exmax)))): type_.remove("number") if "integer" in type_: lo, hi = get_integer_bounds(schema) mul = schema.get("multipleOf") if lo is not None and isinstance(mul, int) and mul > 1 and (lo % mul): lo += mul - (lo % mul) if hi is not None and isinstance(mul, int) and mul > 1 and (hi % mul): hi -= hi % mul if "number" not in type_: if lo is not None: schema["minimum"] = lo schema.pop("exclusiveMinimum", None) if hi is not None: schema["maximum"] = hi schema.pop("exclusiveMaximum", None) if lo is not None and hi is not None and lo > hi: type_.remove("integer") if "array" in type_ and "contains" in schema: if schema["contains"] == FALSEY: type_.remove("array") else: schema["minItems"] = max(schema.get("minItems", 0), 1) if schema["contains"] == TRUTHY: schema.pop("contains") schema["minItems"] = max(schema.get("minItems", 1), 1) if "array" in type_ and schema.get("minItems", 0) > schema.get( "maxItems", math.inf): type_.remove("array") if ("array" in type_ and "minItems" in schema and isinstance(schema.get("items", []), (bool, dict))): count = upper_bound_instances(schema["items"]) if (count == 0 and schema["minItems"] > 0) or (schema.get( "uniqueItems", False) and count < schema["minItems"]): type_.remove("array") if "array" in type_ and isinstance(schema.get("items"), list): schema["items"] = schema["items"][:schema.get("maxItems")] for idx, s in enumerate(schema["items"]): if s == FALSEY: if schema.get("minItems", 0) > idx: type_.remove("array") break schema["items"] = schema["items"][:idx] schema["maxItems"] = idx schema.pop("additionalItems", None) break if ("array" in type_ and isinstance(schema.get("items"), list) and schema.get("additionalItems") == FALSEY): schema.pop("maxItems", None) if "array" in type_ and (schema.get("items") == FALSEY or schema.get("maxItems", 1) == 0): schema["maxItems"] = 0 schema.pop("items", None) schema.pop("uniqueItems", None) schema.pop("additionalItems", None) if "array" in type_ and schema.get("items", TRUTHY) == TRUTHY: schema.pop("items", None) if ("properties" in schema and not schema.get("patternProperties") and schema.get("additionalProperties") == FALSEY): schema["maxProperties"] = min(schema.get("maxProperties", math.inf), len(schema["properties"])) if "object" in type_ and schema.get("minProperties", 0) > schema.get( "maxProperties", math.inf): type_.remove("object") # Canonicalise "required" schemas to remove redundancy if "object" in type_ and "required" in schema: assert isinstance(schema["required"], list) reqs = set(schema["required"]) if schema.get("dependencies"): # When the presence of a required property requires other properties via # dependencies, those properties can be moved to the base required keys. dep_names = { k: sorted(v) for k, v in schema["dependencies"].items() if isinstance(v, list) } while reqs.intersection(dep_names): for r in reqs.intersection(dep_names): reqs.update(dep_names.pop(r)) for k, v in list(schema["dependencies"].items()): if isinstance(v, list) and k not in dep_names: schema["dependencies"].pop(k) schema["required"] = sorted(reqs) max_ = schema.get("maxProperties", float("inf")) assert isinstance(max_, (int, float)) propnames = schema.get("propertyNames", {}) if len(schema["required"]) > max_: type_.remove("object") else: validator = make_validator(propnames) if not all( validator.is_valid(name) for name in schema["required"]): type_.remove("object") for t, kw in TYPE_SPECIFIC_KEYS: numeric = {"number", "integer"} if t in type_ or (t in numeric and numeric.intersection(type_)): continue for k in kw.split(): schema.pop(k, None) # Remove no-op requires if "required" in schema and not schema["required"]: schema.pop("required") # Canonicalise "not" subschemas if "not" in schema: not_ = schema.pop("not") if not_ == TRUTHY or not_ == schema: # If everything is rejected, discard all other (irrelevant) keys # TODO: more sensitive detection of cases where the not-clause # excludes everything in the schema. return FALSEY type_keys = {k: set(v.split()) for k, v in TYPE_SPECIFIC_KEYS} type_constraints = {"type"} for v in type_keys.values(): type_constraints |= v if set(not_).issubset(type_constraints): not_["type"] = get_type(not_) for t in set(type_).intersection(not_["type"]): # If some type is allowed and totally unconstrained byt the "not" # schema, it cannot be allowed if t == "integer" and "number" in type_: continue if not type_keys.get(t, set()).intersection(not_): type_.remove(t) if t not in ("integer", "number"): not_["type"].remove(t) not_ = canonicalish(not_) if not_ != FALSEY: # If the "not" key rejects nothing, discard it schema["not"] = not_ assert isinstance(type_, list), type_ if not type_: assert type_ == [] return FALSEY if type_ == ["null"]: return {"const": None} if type_ == ["boolean"]: return {"enum": [False, True]} if type_ == ["null", "boolean"]: return {"enum": [None, False, True]} if len(type_) == 1: schema["type"] = type_[0] elif type_ == get_type({}): schema.pop("type", None) else: schema["type"] = type_ # Canonicalise "xxxOf" lists; in each case canonicalising and sorting the # sub-schemas then handling any key-specific logic. if TRUTHY in schema.get("anyOf", ()): schema.pop("anyOf", None) if "anyOf" in schema: schema["anyOf"] = sorted(schema["anyOf"], key=encode_canonical_json) schema["anyOf"] = [s for s in schema["anyOf"] if s != FALSEY] if not schema["anyOf"]: return FALSEY if len(schema) == len(schema["anyOf"]) == 1: return schema["anyOf"][0] # type: ignore if "allOf" in schema: schema["allOf"] = sorted(schema["allOf"], key=encode_canonical_json) if any(s == FALSEY for s in schema["allOf"]): return FALSEY if all(s == TRUTHY for s in schema["allOf"]): schema.pop("allOf") elif len(schema) == len(schema["allOf"]) == 1: return schema["allOf"][0] # type: ignore else: tmp = schema.copy() ao = tmp.pop("allOf") out = merged([tmp] + ao) if isinstance(out, dict): # pragma: no branch schema = out # TODO: this assertion is soley because mypy 0.750 doesn't know # that `schema` is a dict otherwise. Needs minimal report upstream. assert isinstance(schema, dict) if "oneOf" in schema: one_of = schema.pop("oneOf") assert isinstance(one_of, list) one_of = sorted(one_of, key=encode_canonical_json) one_of = [s for s in one_of if s != FALSEY] if len(one_of) == 1: m = merged([schema, one_of[0]]) if m is not None: # pragma: no branch return m if (not one_of) or one_of.count(TRUTHY) > 1: return FALSEY schema["oneOf"] = one_of # if/then/else schemas are ignored unless if and another are present if "if" not in schema: schema.pop("then", None) schema.pop("else", None) if "then" not in schema and "else" not in schema: schema.pop("if", None) if schema.get("uniqueItems") is False: del schema["uniqueItems"] return schema
def find(specifier, condition, settings=None, random=None, database_key=None): settings = settings or Settings( max_examples=2000, min_satisfying_examples=0, max_shrinks=2000, ) if database_key is None and settings.database is not None: database_key = function_digest(condition) if not isinstance(specifier, SearchStrategy): raise InvalidArgument('Expected SearchStrategy but got %r of type %s' % (specifier, type(specifier).__name__)) search = specifier random = random or new_random() successful_examples = [0] last_data = [None] def template_condition(data): with BuildContext(): try: data.is_find = True result = data.draw(search) data.note(result) success = condition(result) except UnsatisfiedAssumption: data.mark_invalid() if success: successful_examples[0] += 1 if settings.verbosity == Verbosity.verbose: if not successful_examples[0]: report(lambda: u'Trying example %s' % (nicerepr(result), )) elif success: if successful_examples[0] == 1: report(lambda: u'Found satisfying example %s' % (nicerepr(result), )) else: report(lambda: u'Shrunk example to %s' % (nicerepr(result), )) last_data[0] = data if success and not data.frozen: data.mark_interesting() from hypothesis.internal.conjecture.engine import TestRunner from hypothesis.internal.conjecture.data import TestData, Status start = time.time() runner = TestRunner( template_condition, settings=settings, random=random, database_key=database_key, ) runner.run() run_time = time.time() - start if runner.last_data.status == Status.INTERESTING: with BuildContext(): return TestData.for_buffer(runner.last_data.buffer).draw(search) if runner.valid_examples <= settings.min_satisfying_examples: if settings.timeout > 0 and run_time > settings.timeout: raise Timeout( ('Ran out of time before finding enough valid examples for ' '%s. Only %d valid examples found in %.2f seconds.') % (get_pretty_function_description(condition), runner.valid_examples, run_time)) else: raise Unsatisfiable( ('Unable to satisfy assumptions of ' '%s. Only %d examples considered satisfied assumptions') % ( get_pretty_function_description(condition), runner.valid_examples, )) raise NoSuchExample(get_pretty_function_description(condition))
def find( specifier, # type: SearchStrategy condition, # type: Callable[[Any], bool] settings=None, # type: Settings random=None, # type: Any database_key=None, # type: bytes ): # type: (...) -> Any """Returns the minimal example from the given strategy ``specifier`` that matches the predicate function ``condition``.""" note_deprecation( "`find(s, f)` is deprecated, because it is rarely used but takes " "ongoing work to maintain as we upgrade other parts of Hypothesis.", since="2019-07-11", ) if settings is None: settings = Settings(max_examples=2000) settings = Settings(settings, suppress_health_check=HealthCheck.all()) if database_key is None and settings.database is not None: database_key = function_digest(condition) if not isinstance(specifier, SearchStrategy): raise InvalidArgument("Expected SearchStrategy but got %r of type %s" % (specifier, type(specifier).__name__)) specifier.validate() search = specifier random = random or new_random() successful_examples = [0] last_data = [None] last_repr = [None] def template_condition(data): with deterministic_PRNG(): with BuildContext(data): try: data.is_find = True result = data.draw(search) data.note(result) success = condition(result) except UnsatisfiedAssumption: data.mark_invalid() if success: successful_examples[0] += 1 if settings.verbosity >= Verbosity.verbose: if not successful_examples[0]: report(u"Tried non-satisfying example %s" % (nicerepr(result), )) elif success: if successful_examples[0] == 1: last_repr[0] = nicerepr(result) report(u"Found satisfying example %s" % (last_repr[0], )) last_data[0] = data elif (sort_key(hbytes(data.buffer)) < sort_key( last_data[0].buffer) ) and nicerepr(result) != last_repr[0]: last_repr[0] = nicerepr(result) report(u"Shrunk example to %s" % (last_repr[0], )) last_data[0] = data if success and not data.frozen: data.mark_interesting() runner = ConjectureRunner(template_condition, settings=settings, random=random, database_key=database_key) runner.run() note_engine_for_statistics(runner) if runner.interesting_examples: data = ConjectureData.for_buffer( list(runner.interesting_examples.values())[0].buffer) with deterministic_PRNG(): with BuildContext(data): return data.draw(search) if runner.valid_examples == 0 and (runner.exit_reason != ExitReason.finished): raise Unsatisfiable("Unable to satisfy assumptions of %s." % (get_pretty_function_description(condition), )) raise NoSuchExample(get_pretty_function_description(condition))
def __not_a_first_class_strategy(self, name): raise InvalidArgument( ('Cannot call %s on a DataStrategy. You should probably be ' "using @composite for whatever it is you're trying to do.") % (name, ))
def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True test = wrapped_test.hypothesis.inner_test if getattr(test, "is_hypothesis_test", False): raise InvalidArgument(( "You have applied @given to the test %s more than once, which " "wraps the test several times and is extremely slow. A " "similar effect can be gained by combining the arguments " "of the two calls to given. For example, instead of " "@given(booleans()) @given(integers()), you could write " "@given(booleans(), integers())") % (test.__name__, )) settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) # Use type information to convert "infer" arguments into appropriate # strategies. if infer in given_kwargs.values(): hints = get_type_hints(test) for name in [ name for name, value in given_kwargs.items() if value is infer ]: if name not in hints: raise InvalidArgument( "passed %s=infer for %s, but %s has no type annotation" % (name, test.__name__, name)) given_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, given_kwargs, argspec, test, settings, ) arguments, kwargs, test_runner, search_strategy = processed_args runner = getattr(search_strategy, "runner", None) if isinstance(runner, TestCase) and test.__name__ in dir(TestCase): msg = ("You have applied @given to the method %s, which is " "used by the unittest runner but is not itself a test." " This is not useful in any way." % test.__name__) fail_health_check(settings, msg, HealthCheck.not_a_test_method) if bad_django_TestCase(runner): # pragma: no cover # Covered by the Django tests, but not the pytest coverage task raise InvalidArgument( "You have applied @given to a method on %s, but this " "class does not inherit from the supported versions in " "`hypothesis.extra.django`. Use the Hypothesis variants " "to ensure that each example is run in a separate " "database transaction." % qualname(type(runner))) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, had_seed=wrapped_test._hypothesis_internal_use_seed, ) reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure # If there was a @reproduce_failure decorator, use it to reproduce # the error (or complain that we couldn't). Either way, this will # always raise some kind of error. if reproduce_failure is not None: expected_version, failure = reproduce_failure if expected_version != __version__: raise InvalidArgument( ("Attempting to reproduce a failure from a different " "version of Hypothesis. This failure is from %s, but " "you are currently running %r. Please change your " "Hypothesis version to a matching one.") % (expected_version, __version__)) try: state.execute_once( ConjectureData.for_buffer(decode_failure(failure)), print_example=True, is_final=True, ) raise DidNotReproduce( "Expected the test to raise an error, but it " "completed successfully.") except StopTest: raise DidNotReproduce( "The shape of the test data has changed in some way " "from where this blob was defined. Are you sure " "you're running the same test?") except UnsatisfiedAssumption: raise DidNotReproduce( "The test data failed to satisfy an assumption in the " "test. Have you added it since this blob was " "generated?") # There was no @reproduce_failure, so start by running any explicit # examples from @example decorators. execute_explicit_examples(state, wrapped_test, arguments, kwargs) # If there were any explicit examples, they all ran successfully. # The next step is to use the Conjecture engine to run the test on # many different inputs. if settings.max_examples <= 0: return if not (Phase.reuse in settings.phases or Phase.generate in settings.phases): return try: if isinstance(runner, TestCase) and hasattr(runner, "subTest"): subTest = runner.subTest try: runner.subTest = fake_subTest state.run_engine() finally: runner.subTest = subTest else: state.run_engine() except BaseException as e: # The exception caught here should either be an actual test # failure (or MultipleFailures), or some kind of fatal error # that caused the engine to stop. generated_seed = wrapped_test._hypothesis_internal_use_generated_seed with local_settings(settings): if not (state.failed_normally or generated_seed is None): if running_under_pytest: report( "You can add @seed(%(seed)d) to this test or " "run pytest with --hypothesis-seed=%(seed)d " "to reproduce this failure." % {"seed": generated_seed}) else: report("You can add @seed(%d) to this test to " "reproduce this failure." % (generated_seed, )) # The dance here is to avoid showing users long tracebacks # full of Hypothesis internals they don't care about. # We have to do this inline, to avoid adding another # internal stack frame just when we've removed the rest. if PY2: # Python 2 doesn't have Exception.with_traceback(...); # instead it has a three-argument form of the `raise` # statement. Unfortunately this is a SyntaxError on # Python 3, and before Python 2.7.9 it was *also* a # SyntaxError to use it in a nested function so we # can't `exec` or `eval` our way out (BPO-21591). # So unless we break some versions of Python 2, none # of them get traceback elision. raise # On Python 3, we swap out the real traceback for our # trimmed version. Using a variable ensures that the line # which will actually appear in trackbacks is as clear as # possible - "raise the_error_hypothesis_found". the_error_hypothesis_found = e.with_traceback( get_trimmed_traceback()) raise the_error_hypothesis_found
def floats(min_value=None, max_value=None, allow_nan=None, allow_infinity=None): """Returns a strategy which generates floats. - If min_value is not None, all values will be >= min_value. - If max_value is not None, all values will be <= max_value. - If min_value or max_value is not None, it is an error to enable allow_nan. - If both min_value and max_value are not None, it is an error to enable allow_infinity. Where not explicitly ruled out by the bounds, all of infinity, -infinity and NaN are possible values generated by this strategy. """ if allow_nan is None: allow_nan = bool(min_value is None and max_value is None) elif allow_nan: if min_value is not None or max_value is not None: raise InvalidArgument( 'Cannot have allow_nan=%r, with min_value or max_value' % (allow_nan)) check_valid_bound(min_value, 'min_value') check_valid_bound(max_value, 'max_value') check_valid_interval(min_value, max_value, 'min_value', 'max_value') if min_value is not None: min_value = float(min_value) if max_value is not None: max_value = float(max_value) if min_value == float(u'-inf'): min_value = None if max_value == float(u'inf'): max_value = None if allow_infinity is None: allow_infinity = bool(min_value is None or max_value is None) elif allow_infinity: if min_value is not None and max_value is not None: raise InvalidArgument( 'Cannot have allow_infinity=%r, with both min_value and ' 'max_value' % (allow_infinity)) from hypothesis.searchstrategy.numbers import FloatStrategy, \ FixedBoundedFloatStrategy if min_value is None and max_value is None: return FloatStrategy( allow_infinity=allow_infinity, allow_nan=allow_nan, ) elif min_value is not None and max_value is not None: if min_value == max_value: return just(min_value) elif math.isinf(max_value - min_value): assert min_value < 0 and max_value > 0 return floats(min_value=0, max_value=max_value) | floats( min_value=min_value, max_value=0) elif count_between_floats(min_value, max_value) > 1000: return FixedBoundedFloatStrategy(lower_bound=min_value, upper_bound=max_value) elif is_negative(max_value): assert is_negative(min_value) ub_int = float_to_int(max_value) lb_int = float_to_int(min_value) assert ub_int <= lb_int return integers(min_value=ub_int, max_value=lb_int).map(int_to_float) elif is_negative(min_value): return floats(min_value=min_value, max_value=-0.0) | floats( min_value=0, max_value=max_value) else: ub_int = float_to_int(max_value) lb_int = float_to_int(min_value) assert lb_int <= ub_int return integers(min_value=lb_int, max_value=ub_int).map(int_to_float) elif min_value is not None: if min_value < 0: result = floats(min_value=0.0) | floats(min_value=min_value, max_value=0.0) else: result = (floats(allow_infinity=allow_infinity, allow_nan=False).map(lambda x: assume( not math.isnan(x)) and min_value + abs(x))) if min_value == 0 and not is_negative(min_value): result = result.filter(lambda x: math.copysign(1.0, x) == 1) return result else: assert max_value is not None if max_value > 0: result = floats( min_value=0.0, max_value=max_value, ) | floats(max_value=0.0) else: result = (floats(allow_infinity=allow_infinity, allow_nan=False).map(lambda x: assume( not math.isnan(x)) and max_value - abs(x))) if max_value == 0 and is_negative(max_value): result = result.filter(is_negative) return result
def lists( elements=None, min_size=None, average_size=None, max_size=None, unique_by=None, unique=False, ): """Returns a list containing values drawn from elements length in the interval [min_size, max_size] (no bounds in that direction if these are None). If max_size is 0 then elements may be None and only the empty list will be drawn. average_size may be used as a size hint to roughly control the size of list but it may not be the actual average of sizes you get, due to a variety of factors. If unique is True (or something that evaluates to True), we compare direct object equality, as if unique_by was `lambda x: x`. This comparison only works for hashable types. if unique_by is not None it must be a function returning a hashable type when given a value drawn from elements. The resulting list will satisfy the condition that for i != j, unique_by(result[i]) != unique_by(result[j]). """ if unique: if unique_by is not None: raise InvalidArgument(( u'cannot specify both unique and unique_by (you probably only ' u'want to set unique_by)')) else: unique_by = lambda x: x if unique_by is not None: from hypothesis.searchstrategy.collections import UniqueListStrategy if max_size == 0: return builds(list) check_strategy(elements) if min_size is not None and elements.template_upper_bound < min_size: raise InvalidArgument( (u'Cannot generate unique lists of size %d from %r, which ' u'contains no more than %d distinct values') % ( min_size, elements, elements.template_upper_bound, )) min_size = min_size or 0 max_size = max_size or float(u'inf') max_size = min(max_size, elements.template_upper_bound) if average_size is None: if max_size < float(u'inf'): if max_size <= 5: average_size = min_size + 0.75 * (max_size - min_size) else: average_size = (max_size + min_size) / 2 else: average_size = max(Settings.default.average_list_length, min_size * 2) check_valid_sizes(min_size, average_size, max_size) result = UniqueListStrategy(elements=elements, average_size=average_size, max_size=max_size, min_size=min_size, key=unique_by) return result check_valid_sizes(min_size, average_size, max_size) from hypothesis.searchstrategy.collections import ListStrategy, \ SingleElementListStrategy if min_size is None: min_size = 0 if average_size is None: if max_size is None: average_size = Settings.default.average_list_length else: average_size = (min_size + max_size) * 0.5 if elements is None or (max_size is not None and max_size <= 0): if max_size is None or max_size > 0: raise InvalidArgument( u'Cannot create non-empty lists without an element type') else: return ListStrategy(()) else: check_strategy(elements) if elements.template_upper_bound == 1: from hypothesis.searchstrategy.numbers import IntegersFromStrategy if max_size is None: length_strat = IntegersFromStrategy(min_size, average_size=average_size - min_size) else: length_strat = integers(min_size, max_size) return SingleElementListStrategy(elements, length_strat) return ListStrategy( (elements, ), average_length=average_size, min_size=min_size, max_size=max_size, )
def lists( elements=None, min_size=None, average_size=None, max_size=None, unique_by=None, unique=False, ): """Returns a list containing values drawn from elements length in the interval [min_size, max_size] (no bounds in that direction if these are None). If max_size is 0 then elements may be None and only the empty list will be drawn. average_size may be used as a size hint to roughly control the size of list but it may not be the actual average of sizes you get, due to a variety of factors. If unique is True (or something that evaluates to True), we compare direct object equality, as if unique_by was `lambda x: x`. This comparison only works for hashable types. if unique_by is not None it must be a function returning a hashable type when given a value drawn from elements. The resulting list will satisfy the condition that for i != j, unique_by(result[i]) != unique_by(result[j]). """ check_valid_sizes(min_size, average_size, max_size) if elements is None or (max_size is not None and max_size <= 0): if max_size is None or max_size > 0: raise InvalidArgument( u'Cannot create non-empty lists without an element type') else: return builds(list) check_strategy(elements) if elements.is_empty: if (min_size or 0) > 0: raise InvalidArgument( ('Cannot create non-empty lists with elements drawn from ' 'strategy %r because it has no values.') % (elements, )) else: return builds(list) if unique: if unique_by is not None: raise InvalidArgument( ('cannot specify both unique and unique_by (you probably only ' 'want to set unique_by)')) else: unique_by = lambda x: x if unique_by is not None: from hypothesis.searchstrategy.collections import UniqueListStrategy check_strategy(elements) min_size = min_size or 0 max_size = max_size or float(u'inf') if average_size is None: if max_size < float(u'inf'): if max_size <= 5: average_size = min_size + 0.75 * (max_size - min_size) else: average_size = (max_size + min_size) / 2 else: average_size = max(_AVERAGE_LIST_LENGTH, min_size * 2) check_valid_sizes(min_size, average_size, max_size) result = UniqueListStrategy(elements=elements, average_size=average_size, max_size=max_size, min_size=min_size, key=unique_by) return result check_valid_sizes(min_size, average_size, max_size) from hypothesis.searchstrategy.collections import ListStrategy if min_size is None: min_size = 0 if average_size is None: if max_size is None: average_size = _AVERAGE_LIST_LENGTH else: average_size = (min_size + max_size) * 0.5 check_strategy(elements) return ListStrategy( (elements, ), average_length=average_size, min_size=min_size, max_size=max_size, )
def _check_style(style: str) -> None: if style not in ("pytest", "unittest"): raise InvalidArgument(f"Valid styles are 'pytest' or 'unittest', got {style!r}")
def _validate_phases(phases): phases = tuple(phases) for a in phases: if not isinstance(a, Phase): raise InvalidArgument("%r is not a valid phase" % (a, )) return tuple(p for p in list(Phase) if p in phases)
def by_name(cls, key): result = getattr(cls, key, None) if isinstance(result, Verbosity): return result raise InvalidArgument('No such verbosity level %r' % (key,))
def _validate_stateful_step_count(x): check_type(int, x, name="stateful_step_count") if x < 1: raise InvalidArgument("stateful_step_count=%r must be at least one." % (x, )) return x
def wrapped_test(*arguments, **kwargs): raise InvalidArgument(message)
def __call__(self, test): """Make the settings object (self) an attribute of the test. The settings are later discovered by looking them up on the test itself. Also, we want to issue a deprecation warning for settings used alone (without @given) so, note the deprecation in the new test, but also attach the version without the warning as an attribute, so that @given can unwrap it (since if @given is used, that means we don't want the deprecation warning). When it's time to turn the warning into an error, we'll raise an exception instead of calling note_deprecation (and can delete "test(*args, **kwargs)"). """ if not callable(test): raise InvalidArgument( "settings objects can be called as a decorator with @given, " "but test=%r" % (test, )) if inspect.isclass(test): from hypothesis.stateful import GenericStateMachine if issubclass(test, GenericStateMachine): attr_name = "_hypothesis_internal_settings_applied" if getattr(test, attr_name, False): raise InvalidArgument( "Applying the @settings decorator twice would " "overwrite the first version; merge their arguments " "instead.") setattr(test, attr_name, True) test.TestCase.settings = self return test else: raise InvalidArgument( "@settings(...) can only be used as a decorator on " "functions, or on subclasses of GenericStateMachine.") if hasattr(test, "_hypothesis_internal_settings_applied"): raise InvalidArgument( "%s has already been decorated with a settings object." "\n Previous: %r\n This: %r" % ( get_pretty_function_description(test), test._hypothesis_internal_use_settings, self, )) test._hypothesis_internal_use_settings = self # For double-@settings check: test._hypothesis_internal_settings_applied = True @proxies(test) def new_test(*args, **kwargs): raise InvalidArgument( "Using `@settings` on a test without `@given` is completely pointless." ) # @given will get the test from this attribution (rather than use the # version with the deprecation warning) new_test._hypothesis_internal_test_function_without_warning = test # This means @given has been applied, so we don't need to worry about # warning for @settings alone. has_given_applied = getattr(test, "is_hypothesis_test", False) test_to_use = test if has_given_applied else new_test test_to_use._hypothesis_internal_use_settings = self # Can't use _hypothesis_internal_use_settings as an indicator that # @settings was applied, because @given also assigns that attribute. test._hypothesis_internal_settings_applied = True return test_to_use
def pytest_runtest_call(item): if not hasattr(item, "obj"): yield elif not is_hypothesis_test(item.obj): # If @given was not applied, check whether other hypothesis # decorators were applied, and raise an error if they were. if getattr(item.obj, "is_hypothesis_strategy_function", False): raise InvalidArgument( "%s is a function that returns a Hypothesis strategy, but pytest " "has collected it as a test function. This is useless as the " "function body will never be executed. To define a test " "function, use @given instead of @composite." % (item.nodeid, )) message = "Using `@%s` on a test without `@given` is completely pointless." for name, attribute in [ ("example", "hypothesis_explicit_examples"), ("seed", "_hypothesis_internal_use_seed"), ("settings", "_hypothesis_internal_settings_applied"), ("reproduce_example", "_hypothesis_internal_use_reproduce_failure"), ]: if hasattr(item.obj, attribute): raise InvalidArgument(message % (name, )) yield else: # Retrieve the settings for this test from the test object, which # is normally a Hypothesis wrapped_test wrapper. If this doesn't # work, the test object is probably something weird # (e.g a stateful test wrapper), so we skip the function-scoped # fixture check. settings = getattr(item.obj, "_hypothesis_internal_use_settings", None) # Check for suspicious use of function-scoped fixtures, but only # if the corresponding health check is not suppressed. if (settings is not None and HealthCheck.function_scoped_fixture not in settings.suppress_health_check): # Warn about function-scoped fixtures, excluding autouse fixtures because # the advice is probably not actionable and the status quo seems OK... # See https://github.com/HypothesisWorks/hypothesis/issues/377 for detail. msg = ( "%s uses the %r fixture, which is reset between function calls but not " "between test cases generated by `@given(...)`. You can change it to " "a module- or session-scoped fixture if it is safe to reuse; if not " "we recommend using a context manager inside your test function. See " "https://docs.pytest.org/en/latest/fixture.html#sharing-test-data " "for details on fixture scope.") argnames = None for fx_defs in item._request._fixturemanager.getfixtureinfo( node=item, func=item.function, cls=None).name2fixturedefs.values(): if argnames is None: argnames = frozenset( signature(item.function).parameters) for fx in fx_defs: if fx.argname in argnames: active_fx = item._request._get_active_fixturedef( fx.argname) if active_fx.scope == "function": fail_health_check( settings, msg % (item.nodeid, fx.argname), HealthCheck.function_scoped_fixture, ) if item.get_closest_marker("parametrize") is not None: # Give every parametrized test invocation a unique database key key = item.nodeid.encode("utf-8") item.obj.hypothesis.inner_test._hypothesis_internal_add_digest = key store = StoringReporter(item.config) def note_statistics(stats): stats["nodeid"] = item.nodeid item.hypothesis_statistics = base64.b64encode( describe_statistics(stats).encode()).decode() with collector.with_value(note_statistics): with with_reporter(store): yield if store.results: item.hypothesis_report_information = list(store.results)
def new_test(*args, **kwargs): raise InvalidArgument( "Using `@settings` on a test without `@given` is completely pointless." )
def get_lines_num(draw, lines_param): raise InvalidArgument("Lines param must be an integer or None")
def current_build_context(): context = _current_build_context.value if context is None: raise InvalidArgument(u'No build context registered') return context