def test_ceil(x): """The compat ceil function always has the Python 3 semantics. Under Python 2, math.ceil returns a float, which cannot represent large integers - for example, `float(2**53) == float(2**53 + 1)` - and this is obviously incorrect for unlimited-precision integer operations. """ assert isinstance(ceil(x), integer_types) assert x <= ceil(x) < x + 1
def test_ceil(x): """The compat ceil function always has the Python 3 semantics. Under Python 2, math.ceil returns a float, which cannot represent large integers - for example, `float(2**53) == float(2**53 + 1)` - and this is obviously incorrect for unlimited-precision integer operations. """ assert isinstance(ceil(x), int) assert x <= ceil(x) < x + 1
def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = benchmark_time() result = self.test(*args, **kwargs) finish = benchmark_time() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = (finish - start - internal_draw_time) * 1000 self.__test_runtime = runtime if self.settings.deadline is not_set: if ( not self.__warned_deadline and runtime >= 200 ): self.__warned_deadline = True note_deprecation(( 'Test: %s took %.2fms to run. In future the ' 'default deadline setting will be 200ms, which ' 'will make this an error. You can set deadline to ' 'an explicit value of e.g. %d to turn tests ' 'slower than this into an error, or you can set ' 'it to None to disable this check entirely.') % ( self.test.__name__, runtime, ceil(runtime / 100) * 100, )) else: current_deadline = self.settings.deadline if not is_final: current_deadline *= 1.25 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result
def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = benchmark_time() result = self.test(*args, **kwargs) finish = benchmark_time() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = (finish - start - internal_draw_time) * 1000 self.__test_runtime = runtime if self.settings.deadline is not_set: if (not self.__warned_deadline and runtime >= 200): self.__warned_deadline = True note_deprecation( ('Test took %.2fms to run. In future the default ' 'deadline setting will be 200ms, which will ' 'make this an error. You can set deadline to ' 'an explicit value of e.g. %d to turn tests ' 'slower than this into an error, or you can set ' 'it to None to disable this check entirely.') % ( runtime, ceil(runtime / 100) * 100, )) else: current_deadline = self.settings.deadline if not is_final: current_deadline *= 1.25 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result
def get_integer_predicate_bounds(predicate: Predicate) -> ConstructivePredicate: kwargs, predicate = get_numeric_predicate_bounds(predicate) # type: ignore if "min_value" in kwargs: if kwargs["min_value"] == -math.inf: del kwargs["min_value"] elif math.isinf(kwargs["min_value"]): return UNSATISFIABLE elif kwargs["min_value"] != int(kwargs["min_value"]): kwargs["min_value"] = ceil(kwargs["min_value"]) elif kwargs.get("exclude_min", False): kwargs["min_value"] = int(kwargs["min_value"]) + 1 if "max_value" in kwargs: if kwargs["max_value"] == math.inf: del kwargs["max_value"] elif math.isinf(kwargs["max_value"]): return UNSATISFIABLE elif kwargs["max_value"] != int(kwargs["max_value"]): kwargs["max_value"] = floor(kwargs["max_value"]) elif kwargs.get("exclude_max", False): kwargs["max_value"] = int(kwargs["max_value"]) - 1 kwargs = {k: v for k, v in kwargs.items() if k in {"min_value", "max_value"}} return ConstructivePredicate(kwargs, predicate)
def reuse_existing_examples(self): """If appropriate (we have a database and have been told to use it), try to reload existing examples from the database. If there are a lot we don't try all of them. We always try the smallest example in the database (which is guaranteed to be the last failure) and the largest (which is usually the seed example which the last failure came from but we don't enforce that). We then take a random sampling of the remainder and try those. Any examples that are no longer interesting are cleared out. """ if self.has_existing_examples(): # We have to do some careful juggling here. We have two database # corpora: The primary and secondary. The primary corpus is a # small set of minimized examples each of which has at one point # demonstrated a distinct bug. We want to retry all of these. # We also have a secondary corpus of examples that have at some # point demonstrated interestingness (currently only ones that # were previously non-minimal examples of a bug, but this will # likely expand in future). These are a good source of potentially # interesting examples, but there are a lot of them, so we down # sample the secondary corpus to a more manageable size. corpus = sorted( self.settings.database.fetch(self.database_key), key=sort_key ) desired_size = max(2, ceil(0.1 * self.settings.max_examples)) if len(corpus) < desired_size: secondary_corpus = list( self.settings.database.fetch(self.secondary_key), ) shortfall = desired_size - len(corpus) if len(secondary_corpus) <= shortfall: extra = secondary_corpus else: extra = self.random.sample(secondary_corpus, shortfall) extra.sort(key=sort_key) corpus.extend(extra) for existing in corpus: if self.valid_examples >= self.settings.max_examples: self.exit_with(ExitReason.max_examples) if self.call_count >= max( self.settings.max_iterations, self.settings.max_examples ): self.exit_with(ExitReason.max_iterations) self.last_data = ConjectureData.for_buffer(existing) self.test_function(self.last_data) if self.last_data.status != Status.INTERESTING: self.settings.database.delete( self.database_key, existing) self.settings.database.delete( self.secondary_key, existing)
def test_shrinks_downwards_to_integers(f): g = minimal( st.floats(), lambda x: x >= f, random=Random(0), settings=settings(verbosity=Verbosity.quiet), ) assert g == ceil(f)
def reuse_existing_examples(self): """If appropriate (we have a database and have been told to use it), try to reload existing examples from the database. If there are a lot we don't try all of them. We always try the smallest example in the database (which is guaranteed to be the last failure) and the largest (which is usually the seed example which the last failure came from but we don't enforce that). We then take a random sampling of the remainder and try those. Any examples that are no longer interesting are cleared out. """ if self.has_existing_examples(): self.debug('Reusing examples from database') # We have to do some careful juggling here. We have two database # corpora: The primary and secondary. The primary corpus is a # small set of minimized examples each of which has at one point # demonstrated a distinct bug. We want to retry all of these. # We also have a secondary corpus of examples that have at some # point demonstrated interestingness (currently only ones that # were previously non-minimal examples of a bug, but this will # likely expand in future). These are a good source of potentially # interesting examples, but there are a lot of them, so we down # sample the secondary corpus to a more manageable size. corpus = sorted( self.settings.database.fetch(self.database_key), key=sort_key ) desired_size = max(2, ceil(0.1 * self.settings.max_examples)) for extra_key in [self.secondary_key, self.covering_key]: if len(corpus) < desired_size: extra_corpus = list( self.settings.database.fetch(extra_key), ) shortfall = desired_size - len(corpus) if len(extra_corpus) <= shortfall: extra = extra_corpus else: extra = self.random.sample(extra_corpus, shortfall) extra.sort(key=sort_key) corpus.extend(extra) for existing in corpus: self.last_data = ConjectureData.for_buffer(existing) try: self.test_function(self.last_data) finally: if self.last_data.status != Status.INTERESTING: self.settings.database.delete( self.database_key, existing) self.settings.database.delete( self.secondary_key, existing)
def test_floats_order_worse_than_their_integral_part(f): assume(f != int(f)) assume(int(f) != 0) i = flt.float_to_lex(f) if f < 0: g = ceil(f) else: g = floor(f) assert flt.float_to_lex(float(g)) < i
def test_avoids_zig_zag_trap(p): b, marker, lower_bound = p random.seed(0) b = hbytes(b) marker = hbytes(marker) n_bits = 8 * (len(b) + 1) def test_function(data): m = data.draw_bits(n_bits) if m < lower_bound: data.mark_invalid() n = data.draw_bits(n_bits) if data.draw_bytes(len(marker)) != marker: data.mark_invalid() if abs(m - n) == 1: data.mark_interesting() runner = ConjectureRunner( test_function, database_key=None, settings=settings( base_settings, phases=(Phase.generate, Phase.shrink) ) ) runner.test_function(ConjectureData.for_buffer( b + hbytes([0]) + b + hbytes([1]) + marker)) assert runner.interesting_examples runner.run() v, = runner.interesting_examples.values() data = ConjectureData.for_buffer(v.buffer) m = data.draw_bits(n_bits) n = data.draw_bits(n_bits) assert m == lower_bound if m == 0: assert n == 1 else: assert n == m - 1 budget = 2 * n_bits * ceil(log(n_bits, 2)) + 2 assert runner.shrinks <= budget
def test_our_floor_and_ceil_avoid_numpy_rounding(value): a = np.array([eval(value)]) f = floor(a) c = ceil(a) assert type(f) == int assert type(c) == int # Using math.floor or math.ceil for these values would give an incorrect # result. assert (math.floor(a) > a) or (math.ceil(a) < a) assert f <= a <= c assert f + 1 > a > c - 1
def test_always_reduces_integers_to_smallest_suitable_sizes(problem): n, blob = problem try: d = ConjectureData.for_buffer(blob) k = d.draw(st.integers()) stop = blob[len(d.buffer)] except (StopTest, IndexError): reject() assume(k > n) assume(stop > 0) def f(data): k = data.draw(st.integers()) data.output = repr(k) if data.draw_bits(8) == stop and k >= n: data.mark_interesting() runner = ConjectureRunner(f, random=Random(0), settings=settings( suppress_health_check=HealthCheck.all(), timeout=unlimited, phases=(Phase.shrink,), database=None, verbosity=Verbosity.quiet )) runner.test_function(ConjectureData.for_buffer(blob)) assert runner.interesting_examples runner.run() v, = runner.interesting_examples.values() runner.debug = note runner.debug_data(v) m = ConjectureData.for_buffer(v.buffer).draw(st.integers()) assert m == n # Upper bound on the length needed is calculated as follows: # * We have an initial byte at the beginning to decide the length of the # integer. # * We have a terminal byte as the stop value. # * The rest is the integer payload. This should be n. Including the sign # bit, n needs (1 + n.bit_length()) / 8 bytes (rounded up). But we only # have power of two sizes, so it may be up to a factor of two more than # that. assert len(v.buffer) <= 2 + 2 * max(1, ceil((1 + n.bit_length()) / 8))
def reuse_existing_examples(self): """If appropriate (we have a database and have been told to use it), try to reload existing examples from the database. If there are a lot we don't try all of them. We always try the smallest example in the database (which is guaranteed to be the last failure) and the largest (which is usually the seed example which the last failure came from but we don't enforce that). We then take a random sampling of the remainder and try those. Any examples that are no longer interesting are cleared out. """ if self.has_existing_examples(): corpus = sorted( self.settings.database.fetch(self.database_key), key=sort_key ) desired_size = max(2, ceil(0.1 * self.settings.max_examples)) if desired_size < len(corpus): new_corpus = [corpus[0], corpus[-1]] n_boost = max(desired_size - 2, 0) new_corpus.extend(self.random.sample(corpus[1:-1], n_boost)) corpus = new_corpus corpus.sort(key=sort_key) for existing in corpus: if self.valid_examples >= self.settings.max_examples: self.exit_with(ExitReason.max_examples) if self.call_count >= max( self.settings.max_iterations, self.settings.max_examples ): self.exit_with(ExitReason.max_iterations) data = ConjectureData.for_buffer(existing) self.test_function(data) data.freeze() self.last_data = data self.consider_new_test_data(data) if data.status == Status.INTERESTING: assert data.status == Status.INTERESTING self.last_data = data break else: self.settings.database.delete( self.database_key, existing)
def timed_test(*args, **kwargs): start = time.time() result = test(*args, **kwargs) runtime = (time.time() - start) * 1000 if self.settings.deadline is not_set: if (not self.__warned_deadline and runtime >= 200): self.__warned_deadline = True note_deprecation( ('Test took %.2fms to run. In future the default ' 'deadline setting will be 200ms, which will ' 'make this an error. You can set deadline to ' 'an explicit value of e.g. %d to turn tests ' 'slower than this into an error, or you can set ' 'it to None to disable this check entirely.') % ( runtime, ceil(runtime / 100) * 100, )) elif runtime >= self.settings.deadline: raise DeadlineExceeded( ('Test took %.2fms, which exceeds the deadline of ' '%.2fms') % (runtime, self.settings.deadline)) return result
def timed_test(*args, **kwargs): self.__test_runtime = None start = time.time() result = test(*args, **kwargs) runtime = (time.time() - start) * 1000 self.__test_runtime = runtime if self.settings.deadline is not_set: if ( not self.__warned_deadline and runtime >= 200 ): self.__warned_deadline = True note_deprecation(( 'Test took %.2fms to run. In future the default ' 'deadline setting will be 200ms, which will ' 'make this an error. You can set deadline to ' 'an explicit value of e.g. %d to turn tests ' 'slower than this into an error, or you can set ' 'it to None to disable this check entirely.') % ( runtime, ceil(runtime / 100) * 100, )) elif runtime >= self.current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result
def test_meets_budgetary_requirements(cls, example): # Somewhat arbitrary but not unreasonable budget. n = len(example) budget = n * ceil(math.log(n, 2)) + 5 assert measure_baseline(cls, example) <= budget
def reuse_existing_examples(self): """If appropriate (we have a database and have been told to use it), try to reload existing examples from the database. If there are a lot we don't try all of them. We always try the smallest example in the database (which is guaranteed to be the last failure) and the largest (which is usually the seed example which the last failure came from but we don't enforce that). We then take a random sampling of the remainder and try those. Any examples that are no longer interesting are cleared out. """ if self.has_existing_examples(): self.debug("Reusing examples from database") # We have to do some careful juggling here. We have two database # corpora: The primary and secondary. The primary corpus is a # small set of minimized examples each of which has at one point # demonstrated a distinct bug. We want to retry all of these. # We also have a secondary corpus of examples that have at some # point demonstrated interestingness (currently only ones that # were previously non-minimal examples of a bug, but this will # likely expand in future). These are a good source of potentially # interesting examples, but there are a lot of them, so we down # sample the secondary corpus to a more manageable size. corpus = sorted( self.settings.database.fetch(self.database_key), key=sort_key ) desired_size = max(2, ceil(0.1 * self.settings.max_examples)) if len(corpus) < desired_size: extra_corpus = list(self.settings.database.fetch(self.secondary_key)) shortfall = desired_size - len(corpus) if len(extra_corpus) <= shortfall: extra = extra_corpus else: extra = self.random.sample(extra_corpus, shortfall) extra.sort(key=sort_key) corpus.extend(extra) for existing in corpus: data = self.cached_test_function(existing) if data.status != Status.INTERESTING: self.settings.database.delete(self.database_key, existing) self.settings.database.delete(self.secondary_key, existing) # If we've not found any interesting examples so far we try some of # the pareto front from the last run. if len(corpus) < desired_size and not self.interesting_examples: desired_extra = desired_size - len(corpus) pareto_corpus = list(self.settings.database.fetch(self.pareto_key)) if len(pareto_corpus) > desired_extra: pareto_corpus = self.random.sample(pareto_corpus, desired_extra) pareto_corpus.sort(key=sort_key) for existing in pareto_corpus: data = self.cached_test_function(existing) if data not in self.pareto_front: self.settings.database.delete(self.pareto_key, existing) if data.status == Status.INTERESTING: break
def float_hack(self): """Our encoding of floating point numbers does the right thing when you lexically shrink it, but there are some highly non-obvious lexical shrinks corresponding to natural floating point operations. We can't actually tell when the floating point encoding is being used (that would break the assumptions that Hypothesis doesn't inspect the generated values), but we can cheat: We just guess when it might be being used and perform shrinks that are valid regardless of our guess is correct. So that's what this method does. It's a cheat to give us good shrinking of floating at low cost in runtime and only moderate cost in elegance. """ # If the block is of the wrong size then we're certainly not using the # float encoding. if self.size != 8: return # If the high bit is zero then we're in the integer representation of # floats so we don't need these hacks because it will shrink normally. if self.current[0] >> 7 == 0: return i = int_from_bytes(self.current) f = lex_to_float(i) # This floating point number can be represented in our simple format. # So we try converting it to that (which will give the same float, but # a different encoding of it). If that doesn't work then the float # value of this doesn't unambiguously give the desired predicate, so # this approach isn't useful. If it *does* work, then we're now in a # situation where we don't need it, so either way we return here. if is_simple(f): self.incorporate_float(f) return # We check for a bunch of standard "large" floats. If we're currently # worse than them and the shrink downwards doesn't help, abort early # because there's not much useful we can do here. for g in [ float('nan'), float('inf'), sys.float_info.max, ]: j = float_to_lex(g) if j < i: if self.incorporate_int(j): f = g i = j if math.isinf(f) or math.isnan(f): return # Finally we get to the important bit: Each of these is a small change # to the floating point number that corresponds to a large change in # the lexical representation. Trying these ensures that our floating # point shrink can always move past these obstacles. In particular it # ensures we can always move to integer boundaries and shrink past a # change that would require shifting the exponent while not changing # the float value much. for g in [ floor(f), ceil(f), ]: if self.incorporate_float(g): return if f > 2: self.incorporate_float(f - 1)
def __init__(self, elements, size): super().__init__() self.__length = st.integers(0, ceil(size**0.5)) self.__elements = elements
def test_our_ceil_agrees_with_math_ceil(value): assert ceil(value) == math.ceil(value)
min_argc=None, # int max_argc=None, # int manual_argument_bindings=None, # {} dict manual_keyword_bindings=None, # {} dict body=_phony_callable, decorators=None, # [] list kwarginit=hs.nothing(), ): """DOCUMENT ME!!!""" # Replicates check_valid_sizes logic but with correct variable names check_valid_size(min_argc, "min_argc") check_valid_size(max_argc, "max_argc") check_valid_interval(min_argc, max_argc, "min_argc", "max_argc") min_argc = None if min_argc is None else ceil(min_argc) max_argc = None if max_argc is None else floor(max_argc) check_strategy(kwarginit, name="kwarginit") if decorators is not None: check_type(list, decorators, "decorators") for index, d in enumerate(decorators): _check_callable(d, name="iteration %r in 'decorators'" % (index)) _check_callable(body, name="body") #if not hasattr(binding_regex, 'pattern'): # # this has to be done later anyway inside the binding generator, # # might as well do it now to make things a little faster. # binding_regex = re.compile(binding_regex)
def __init__(self, elements, size): SearchStrategy.__init__(self) self.__length = st.integers(0, ceil(size**0.5)) self.__elements = elements
def test_ceil(x): assert isinstance(ceil(x), int) assert x <= ceil(x) < x + 1 assert ceil(x) == math.ceil(x)
def float_hack(self): """Our encoding of floating point numbers does the right thing when you lexically shrink it, but there are some highly non-obvious lexical shrinks corresponding to natural floating point operations. We can't actually tell when the floating point encoding is being used (that would break the assumptions that Hypothesis doesn't inspect the generated values), but we can cheat: We just guess when it might be being used and perform shrinks that are valid regardless of our guess is correct. So that's what this method does. It's a cheat to give us good shrinking of floating at low cost in runtime and only moderate cost in elegance. """ # If the block is of the wrong size then we're certainly not using the # float encoding. if self.size != 8: return # If the high bit is zero then we're in the integer representation of # floats so we don't need these hacks because it will shrink normally. if self.current[0] >> 7 == 0: return i = self.current_int f = lex_to_float(i) # This floating point number can be represented in our simple format. # So we try converting it to that (which will give the same float, but # a different encoding of it). If that doesn't work then the float # value of this doesn't unambiguously give the desired predicate, so # this approach isn't useful. If it *does* work, then we're now in a # situation where we don't need it, so either way we return here. if is_simple(f): self.incorporate_float(f) return # We check for a bunch of standard "large" floats. If we're currently # worse than them and the shrink downwards doesn't help, abort early # because there's not much useful we can do here. for g in [ float('nan'), float('inf'), sys.float_info.max, ]: j = float_to_lex(g) if j < i: if self.incorporate_int(j): f = g i = j if math.isinf(f) or math.isnan(f): return # Finally we get to the important bit: Each of these is a small change # to the floating point number that corresponds to a large change in # the lexical representation. Trying these ensures that our floating # point shrink can always move past these obstacles. In particular it # ensures we can always move to integer boundaries and shrink past a # change that would require shifting the exponent while not changing # the float value much. for g in [floor(f), ceil(f)]: if self.incorporate_float(g): return if f > 2: self.incorporate_float(f - 1)
def __init__(self, elements, size): SearchStrategy.__init__(self) self.__length = st.integers(0, ceil(size ** 0.5)) self.__elements = elements