def wrapped_test(*arguments): if "verifier" in kwargs: verifier = kwargs["verifier"] del kwargs["verifier"] else: verifier = Verifier() # The only thing we accept in falsifying the test are exceptions # Returning successfully is always a pass. def to_falsify(xs): testargs, testkwargs = xs try: test(*(arguments + testargs), **testkwargs) return True except UnsatisfiedAssumption as e: raise e except Exception: return False try: falsifying_example = verifier.falsify(to_falsify, (generator_arguments, kwargs))[0] except Unfalsifiable: return # We run this one final time so we get good errors # Otherwise we would have swallowed all the reports of it actually # having gone wrong. test(*(arguments + falsifying_example[0]), **falsifying_example[1])
def test_minor_variations_in_code_change_the_randomization(): table = StrategyTable() settings = hs.Settings(derandomize=True) settings.database = None table.define_specification_for(float, lambda *_: BrokenFloatStrategy()) v1 = Verifier(strategy_table=table, settings=settings) x1 = v1.falsify(lambda x: x == 42, float) x2 = v1.falsify(lambda x: x == 1, float) assert x1 != x2
def test_a_derandomized_verifier_produces_the_same_results_called_twice(): table = StrategyTable() settings = hs.Settings(derandomize=True) table.define_specification_for(float, lambda *_: BrokenFloatStrategy()) v1 = Verifier(strategy_table=table, settings=settings) foo = lambda x: False x1 = v1.falsify(foo, float) x2 = v1.falsify(foo, float) assert x1 == x2
def test_can_falsify_types_without_default_productions(): strategies = SearchStrategies() strategies.define_specification_for(Bar, BarStrategy) with pytest.raises(MissingSpecification): SearchStrategies.default().strategy(Bar) verifier = Verifier(search_strategies = strategies) assert verifier.falsify(lambda x : False, Bar,)[0] == Bar() assert verifier.falsify(lambda x : x.size() < 3, Bar)[0] == Bar(Bar(Bar()))
def given(*generator_arguments, **kwargs): if 'verifier' in kwargs: verifier = kwargs.pop('verifier') verifier.start_time = time.time() elif 'verifier_settings' in kwargs: verifier = Verifier(settings=kwargs.pop('verifier_settings')) else: verifier = Verifier() def run_test_with_generator(test): def wrapped_test(*arguments): # The only thing we accept in falsifying the test are exceptions # Returning successfully is always a pass. def to_falsify(xs): testargs, testkwargs = xs try: test(*(arguments + testargs), **testkwargs) return True except UnsatisfiedAssumption as e: raise e except Exception: # pylint: disable=broad-except return False to_falsify.__name__ = test.__name__ to_falsify.__qualname__ = getattr(test, '__qualname__', test.__name__) try: falsifying_example = verifier.falsify( to_falsify, (generator_arguments, kwargs))[0] except Unfalsifiable: return print('Falsifying example: %s' % (arg_string(test, arguments + falsifying_example[0], falsifying_example[1]))) # We run this one final time so we get good errors # Otherwise we would have swallowed all the reports of it actually # having gone wrong. test(*(arguments + falsifying_example[0]), **falsifying_example[1]) # If we get here then something has gone wrong: We found a counter # example but it didn't fail when we invoked it again. raise Flaky(test, falsifying_example) wrapped_test.__name__ = test.__name__ wrapped_test.__doc__ = test.__doc__ wrapped_test.verifier = verifier return wrapped_test return run_test_with_generator
def test_raises_timeout_on_timeout(): # slow enough that it won't get a full set of examples but fast enough # that it will get at least min_satisfying_examples sleep_time = 0.001 timeout = sleep_time * hs.default.min_satisfying_examples * 2 def good_but_slow(x): time.sleep(sleep_time) return True verifier = Verifier(settings=hs.Settings(timeout=timeout)) with pytest.raises(Timeout): verifier.falsify(good_but_slow, int)
def test_can_falsify_types_without_default_productions(): strategies = SearchStrategies() strategies.define_specification_for(Bar, BarStrategy) with pytest.raises(MissingSpecification): SearchStrategies.default().strategy(Bar) verifier = Verifier(search_strategies=strategies) assert verifier.falsify( lambda x: False, Bar, )[0] == Bar() assert verifier.falsify(lambda x: x.size() < 3, Bar)[0] == Bar(Bar(Bar()))
def test_can_falsify_types_without_default_productions(): strategies = StrategyTable() strategies.define_specification_for( Bar, lambda s, d: BarStrategy( s.strategy(descriptors.integers_in_range(0, 100)))) with pytest.raises(MissingSpecification): StrategyTable.default().strategy(Bar) verifier = Verifier(strategy_table=strategies) assert verifier.falsify( lambda x: False, Bar, )[0] == Bar() assert verifier.falsify(lambda x: x.size() < 3, Bar)[0] == Bar(Bar(Bar()))
def test_still_minimizes_on_non_assertion_failures(): @given(int, verifier=Verifier(starting_size=500)) def is_not_too_large(x): if x >= 10: raise ValueError("No, %s is just too large. Sorry" % x) with pytest.raises(ValueError) as exinfo: is_not_too_large() assert " 10 " in exinfo.value.args[0]
def test_gravitates_towards_good_parameter_values(): good_value_counts = [0] all_value_counts = [0] v = Verifier(settings=hs.Settings(database=None)) def just_being_awkward(xs): assume(len(xs) >= 10) all_value_counts[0] += 1 assume(all(x >= 0 for x in xs)) good_value_counts[0] += 1 return True with pytest.raises(Unfalsifiable): v.falsify(just_being_awkward, [float]) assert all_value_counts[0] >= 300 p = cumulative_binomial_probability( n=all_value_counts[0], k=good_value_counts[0], p=0.6, ) assert p >= 0.01
def test_two_verifiers_produce_different_results_in_normal_mode(): settings = hs.Settings() settings.database = None table = StrategyTable() table.define_specification_for(float, lambda *_: BrokenFloatStrategy()) v1 = Verifier(strategy_table=table, settings=settings) v2 = Verifier(strategy_table=table, settings=settings) x1 = v1.falsify(lambda x: False, float) x2 = v2.falsify(lambda x: False, float) assert x1 != x2
def given(*generator_arguments, **kwargs): if "verifier" in kwargs: verifier = kwargs["verifier"] del kwargs["verifier"] else: verifier = Verifier() def run_test_with_generator(test): def wrapped_test(*arguments): # The only thing we accept in falsifying the test are exceptions # Returning successfully is always a pass. def to_falsify(xs): testargs, testkwargs = xs try: test(*(arguments + testargs), **testkwargs) return True except UnsatisfiedAssumption as e: raise e except Exception: return False try: falsifying_example = verifier.falsify( to_falsify, (generator_arguments, kwargs))[0] except Unfalsifiable: return # We run this one final time so we get good errors # Otherwise we would have swallowed all the reports of it actually # having gone wrong. test(*(arguments + falsifying_example[0]), **falsifying_example[1]) wrapped_test.__name__ = test.__name__ wrapped_test.__doc__ = test.__doc__ return wrapped_test return run_test_with_generator
def test_can_minimize_lists_of_floats(): v = Verifier(random=Random(196269418687253827969443357943160693167)) x = v.falsify(lambda x, y: False, float, frozenset([bool])) assert x == (0.0, frozenset())
def test_can_derandomize_on_evalled_functions(): table = StrategyTable() settings = hs.Settings(derandomize=True) v = Verifier(strategy_table=table, settings=settings) assert v.falsify(eval('lambda x: x > 0'), int) == (0, )