Пример #1
0
    def __init__(self, weights):
        # We consider each weight expressed in terms of the average weight,
        # say t. We write the weight of i as nt + f, where n is an integer and
        # 0 <= f < 1. We then store n items for this weight which correspond
        # to drawing i unconditionally, and if f > 0 we store an additional
        # item that corresponds to drawing i with probability f. This ensures
        # that (under a uniform model) we draw i with probability proportionate
        # to its weight.

        # We then rearrange things to shrink better. The table with the whole
        # weights is kept in sorted order so that shrinking still corresponds
        # to shrinking leftwards. The fractional weights however are put in
        # a second table that is logically "to the right" of the whole weights
        # and are sorted in order of decreasing acceptance probaility. This
        # ensures that shrinking lexicographically always results in drawing
        # less data.
        self.table = []
        self.extras = []
        self.acceptance = []
        total = sum(weights)
        n = len(weights)
        for i, x in enumerate(weights):
            whole_occurrences = floor(x * n / total)
            acceptance = x - whole_occurrences
            self.acceptance.append(acceptance)
            for _ in range(whole_occurrences):
                self.table.append(i)
            if acceptance > 0:
                self.extras.append(i)
        self.extras.sort(key=self.acceptance.__getitem__, reverse=True)
Пример #2
0
def get_integer_predicate_bounds(predicate: Predicate) -> ConstructivePredicate:
    kwargs, predicate = get_numeric_predicate_bounds(predicate)  # type: ignore

    if "min_value" in kwargs:
        if kwargs["min_value"] == -math.inf:
            del kwargs["min_value"]
        elif math.isinf(kwargs["min_value"]):
            return UNSATISFIABLE
        elif kwargs["min_value"] != int(kwargs["min_value"]):
            kwargs["min_value"] = ceil(kwargs["min_value"])
        elif kwargs.get("exclude_min", False):
            kwargs["min_value"] = int(kwargs["min_value"]) + 1

    if "max_value" in kwargs:
        if kwargs["max_value"] == math.inf:
            del kwargs["max_value"]
        elif math.isinf(kwargs["max_value"]):
            return UNSATISFIABLE
        elif kwargs["max_value"] != int(kwargs["max_value"]):
            kwargs["max_value"] = floor(kwargs["max_value"])
        elif kwargs.get("exclude_max", False):
            kwargs["max_value"] = int(kwargs["max_value"]) - 1

    kwargs = {k: v for k, v in kwargs.items() if k in {"min_value", "max_value"}}
    return ConstructivePredicate(kwargs, predicate)
Пример #3
0
    def __init__(self, weights):
        # We consider each weight expressed in terms of the average weight,
        # say t. We write the weight of i as nt + f, where n is an integer and
        # 0 <= f < 1. We then store n items for this weight which correspond
        # to drawing i unconditionally, and if f > 0 we store an additional
        # item that corresponds to drawing i with probability f. This ensures
        # that (under a uniform model) we draw i with probability proportionate
        # to its weight.

        # We then rearrange things to shrink better. The table with the whole
        # weights is kept in sorted order so that shrinking still corresponds
        # to shrinking leftwards. The fractional weights however are put in
        # a second table that is logically "to the right" of the whole weights
        # and are sorted in order of decreasing acceptance probaility. This
        # ensures that shrinking lexicographically always results in drawing
        # less data.
        self.table = []
        self.extras = []
        self.acceptance = []
        total = sum(weights)
        n = len(weights)
        for i, x in enumerate(weights):
            whole_occurrences = floor(x * n / total)
            acceptance = x - whole_occurrences
            self.acceptance.append(acceptance)
            for _ in range(whole_occurrences):
                self.table.append(i)
            if acceptance > 0:
                self.extras.append(i)
        self.extras.sort(key=self.acceptance.__getitem__, reverse=True)
def test_floats_order_worse_than_their_integral_part(f):
    assume(f != int(f))
    assume(int(f) != 0)
    i = flt.float_to_lex(f)
    if f < 0:
        g = ceil(f)
    else:
        g = floor(f)

    assert flt.float_to_lex(float(g)) < i
def test_floats_order_worse_than_their_integral_part(f):
    assume(f != int(f))
    assume(int(f) != 0)
    i = flt.float_to_lex(f)
    if f < 0:
        g = ceil(f)
    else:
        g = floor(f)

    assert flt.float_to_lex(float(g)) < i
Пример #6
0
def test_our_floor_and_ceil_avoid_numpy_rounding(value):
    a = np.array([eval(value)])

    f = floor(a)
    c = ceil(a)

    assert type(f) == int
    assert type(c) == int

    # Using math.floor or math.ceil for these values would give an incorrect
    # result.
    assert (math.floor(a) > a) or (math.ceil(a) < a)

    assert f <= a <= c
    assert f + 1 > a > c - 1
Пример #7
0
def test_our_floor_agrees_with_math_floor(value):
    assert floor(value) == math.floor(value)
def test_floor(x):
    assert isinstance(floor(x), integer_types)
    assert x - 1 < floor(x) <= x
Пример #9
0
def biased_coin(data, p, *, forced=None):
    """Return True with probability p (assuming a uniform generator),
    shrinking towards False. If ``forced`` is set to a non-None value, this
    will always return that value but will write choices appropriate to having
    drawn that value randomly."""

    # NB this function is vastly more complicated than it may seem reasonable
    # for it to be. This is because it is used in a lot of places and it's
    # important for it to shrink well, so it's worth the engineering effort.

    if p <= 0 or p >= 1:
        bits = 1
    else:
        # When there is a meaningful draw, in order to shrink well we will
        # set things up so that 0 and 1 always correspond to False and True
        # respectively. This means we want enough bits available that in a
        # draw we will always have at least one truthy value and one falsey
        # value.
        bits = math.ceil(-math.log(min(p, 1 - p), 2))
    # In order to avoid stupidly large draws where the probability is
    # effectively zero or one, we treat probabilities of under 2^-64 to be
    # effectively zero.
    if bits > 64:
        # There isn't enough precision near one for this to occur for values
        # far from 0.
        p = 0.0
        bits = 1

    size = 2**bits

    data.start_example(BIASED_COIN_LABEL)
    while True:
        # The logic here is a bit complicated and special cased to make it
        # play better with the shrinker.

        # We imagine partitioning the real interval [0, 1] into 256 equal parts
        # and looking at each part and whether its interior is wholly <= p
        # or wholly >= p. At most one part can be neither.

        # We then pick a random part. If it's wholly on one side or the other
        # of p then we use that as the answer. If p is contained in the
        # interval then we start again with a new probability that is given
        # by the fraction of that interval that was <= our previous p.

        # We then take advantage of the fact that we have control of the
        # labelling to make this shrink better, using the following tricks:

        # If p is <= 0 or >= 1 the result of this coin is certain. We make sure
        # to write a byte to the data stream anyway so that these don't cause
        # difficulties when shrinking.
        if p <= 0:
            data.draw_bits(1, forced=0)
            result = False
        elif p >= 1:
            data.draw_bits(1, forced=1)
            result = True
        else:
            falsey = floor(size * (1 - p))
            truthy = floor(size * p)
            remainder = size * p - truthy

            if falsey + truthy == size:
                partial = False
            else:
                partial = True

            if forced is None:
                # We want to get to the point where True is represented by
                # 1 and False is represented by 0 as quickly as possible, so
                # we use the remove_discarded machinery in the shrinker to
                # achieve that by discarding any draws that are > 1 and writing
                # a suitable draw into the choice sequence at the end of the
                # loop.
                data.start_example(BIASED_COIN_INNER_LABEL)
                i = data.draw_bits(bits)
                data.stop_example(discard=i > 1)
            else:
                i = data.draw_bits(bits, forced=int(forced))

            # We always label the region that causes us to repeat the loop as
            # 255 so that shrinking this byte never causes us to need to draw
            # more data.
            if partial and i == size - 1:
                p = remainder
                continue
            if falsey == 0:
                # Every other partition is truthy, so the result is true
                result = True
            elif truthy == 0:
                # Every other partition is falsey, so the result is false
                result = False
            elif i <= 1:
                # We special case so that zero is always false and 1 is always
                # true which makes shrinking easier because we can always
                # replace a truthy block with 1. This has the slightly weird
                # property that shrinking from 2 to 1 can cause the result to
                # grow, but the shrinker always tries 0 and 1 first anyway, so
                # this will usually be fine.
                result = bool(i)
            else:
                # Originally everything in the region 0 <= i < falsey was false
                # and everything above was true. We swapped one truthy element
                # into this region, so the region becomes 0 <= i <= falsey
                # except for i = 1. We know i > 1 here, so the test for truth
                # becomes i > falsey.
                result = i > falsey

            if i > 1:
                data.draw_bits(bits, forced=int(result))
        break
    data.stop_example()
    return result
Пример #10
0
    def float_hack(self):
        """Our encoding of floating point numbers does the right thing when you
        lexically shrink it, but there are some highly non-obvious lexical
        shrinks corresponding to natural floating point operations.

        We can't actually tell when the floating point encoding is being used
        (that would break the assumptions that Hypothesis doesn't inspect
        the generated values), but we can cheat: We just guess when it might be
        being used and perform shrinks that are valid regardless of our guess
        is correct.

        So that's what this method does. It's a cheat to give us good shrinking
        of floating at low cost in runtime and only moderate cost in elegance.

        """

        # If the block is of the wrong size then we're certainly not using the
        # float encoding.
        if self.size != 8:
            return

        # If the high bit is zero then we're in the integer representation of
        # floats so we don't need these hacks because it will shrink normally.
        if self.current[0] >> 7 == 0:
            return

        i = int_from_bytes(self.current)
        f = lex_to_float(i)

        # This floating point number can be represented in our simple format.
        # So we try converting it to that (which will give the same float, but
        # a different encoding of it). If that doesn't work then the float
        # value of this doesn't unambiguously give the desired predicate, so
        # this approach isn't useful. If it *does* work, then we're now in a
        # situation where we don't need it, so either way we return here.
        if is_simple(f):
            self.incorporate_float(f)
            return

        # We check for a bunch of standard "large" floats. If we're currently
        # worse than them and the shrink downwards doesn't help, abort early
        # because there's not much useful we can do here.
        for g in [
            float('nan'), float('inf'), sys.float_info.max,
        ]:
            j = float_to_lex(g)
            if j < i:
                if self.incorporate_int(j):
                    f = g
                    i = j

        if math.isinf(f) or math.isnan(f):
            return

        # Finally we get to the important bit: Each of these is a small change
        # to the floating point number that corresponds to a large change in
        # the lexical representation. Trying these ensures that our floating
        # point shrink can always move past these obstacles. In particular it
        # ensures we can always move to integer boundaries and shrink past a
        # change that would require shifting the exponent while not changing
        # the float value much.
        for g in [
            floor(f), ceil(f),
        ]:
            if self.incorporate_float(g):
                return

        if f > 2:
            self.incorporate_float(f - 1)
Пример #11
0
def test_floor(x):
    assert isinstance(floor(x), integer_types)
    assert x - 1 < floor(x) <= x
Пример #12
0
		max_argc=None, # int
		manual_argument_bindings=None, # {} dict
		manual_keyword_bindings=None, # {} dict
		body=_phony_callable,
		decorators=None, # [] list
		kwarginit=hs.nothing(),
	):
	"""DOCUMENT ME!!!"""

	# Replicates check_valid_sizes logic but with correct variable names
	check_valid_size(min_argc, "min_argc")
	check_valid_size(max_argc, "max_argc")
	check_valid_interval(min_argc, max_argc, "min_argc", "max_argc")

	min_argc = None if min_argc is None else ceil(min_argc)
	max_argc = None if max_argc is None else floor(max_argc)

	check_strategy(kwarginit, name="kwarginit")

	if decorators is not None:
		check_type(list, decorators, "decorators")
		for index, d in enumerate(decorators):
			_check_callable(d, name="iteration %r in 'decorators'" % (index))

	_check_callable(body, name="body")

	#if not hasattr(binding_regex, 'pattern'):
	#	# this has to be done later anyway inside the binding generator,
	#	# might as well do it now to make things a little faster.
	#	binding_regex = re.compile(binding_regex)
Пример #13
0
def test_floor(x):
    assert isinstance(floor(x), int)
    assert x - 1 < floor(x) <= x
Пример #14
0
def biased_coin(data, p):
    """Return False with probability p (assuming a uniform generator),
    shrinking towards False."""
    data.start_example(BIASED_COIN_LABEL)
    while True:
        # The logic here is a bit complicated and special cased to make it
        # play better with the shrinker.

        # We imagine partitioning the real interval [0, 1] into 256 equal parts
        # and looking at each part and whether its interior is wholly <= p
        # or wholly >= p. At most one part can be neither.

        # We then pick a random part. If it's wholly on one side or the other
        # of p then we use that as the answer. If p is contained in the
        # interval then we start again with a new probability that is given
        # by the fraction of that interval that was <= our previous p.

        # We then take advantage of the fact that we have control of the
        # labelling to make this shrink better, using the following tricks:

        # If p is <= 0 or >= 1 the result of this coin is certain. We make sure
        # to write a byte to the data stream anyway so that these don't cause
        # difficulties when shrinking.
        if p <= 0:
            data.write(hbytes([0]))
            result = False
        elif p >= 1:
            data.write(hbytes([1]))
            result = True
        else:
            falsey = floor(256 * (1 - p))
            truthy = floor(256 * p)
            remainder = 256 * p - truthy

            if falsey + truthy == 256:
                if isinstance(p, Fraction):
                    m = p.numerator
                    n = p.denominator
                else:
                    m, n = p.as_integer_ratio()
                assert n & (n - 1) == 0, n  # n is a power of 2
                assert n > m > 0
                truthy = m
                falsey = n - m
                bits = bit_length(n) - 1
                partial = False
            else:
                bits = 8
                partial = True

            i = data.draw_bits(bits)

            # We always label the region that causes us to repeat the loop as
            # 255 so that shrinking this byte never causes us to need to draw
            # more data.
            if partial and i == 255:
                p = remainder
                continue
            if falsey == 0:
                # Every other partition is truthy, so the result is true
                result = True
            elif truthy == 0:
                # Every other partition is falsey, so the result is false
                result = False
            elif i <= 1:
                # We special case so that zero is always false and 1 is always
                # true which makes shrinking easier because we can always
                # replace a truthy block with 1. This has the slightly weird
                # property that shrinking from 2 to 1 can cause the result to
                # grow, but the shrinker always tries 0 and 1 first anyway, so
                # this will usually be fine.
                result = bool(i)
            else:
                # Originally everything in the region 0 <= i < falsey was false
                # and everything above was true. We swapped one truthy element
                # into this region, so the region becomes 0 <= i <= falsey
                # except for i = 1. We know i > 1 here, so the test for truth
                # becomes i > falsey.
                result = i > falsey
        break
    data.stop_example()
    return result
Пример #15
0
def biased_coin(data, p):
    """Return False with probability p (assuming a uniform generator),
    shrinking towards False."""
    data.start_example(BIASED_COIN_LABEL)
    while True:
        # The logic here is a bit complicated and special cased to make it
        # play better with the shrinker.

        # We imagine partitioning the real interval [0, 1] into 256 equal parts
        # and looking at each part and whether its interior is wholly <= p
        # or wholly >= p. At most one part can be neither.

        # We then pick a random part. If it's wholly on one side or the other
        # of p then we use that as the answer. If p is contained in the
        # interval then we start again with a new probability that is given
        # by the fraction of that interval that was <= our previous p.

        # We then take advantage of the fact that we have control of the
        # labelling to make this shrink better, using the following tricks:

        # If p is <= 0 or >= 1 the result of this coin is certain. We make sure
        # to write a byte to the data stream anyway so that these don't cause
        # difficulties when shrinking.
        if p <= 0:
            data.draw_bits(1, forced=0)
            result = False
        elif p >= 1:
            data.draw_bits(1, forced=1)
            result = True
        else:
            falsey = floor(256 * (1 - p))
            truthy = floor(256 * p)
            remainder = 256 * p - truthy

            if falsey + truthy == 256:
                if isinstance(p, Fraction):
                    m = p.numerator
                    n = p.denominator
                else:
                    m, n = p.as_integer_ratio()
                assert n & (n - 1) == 0, n  # n is a power of 2
                assert n > m > 0
                truthy = m
                falsey = n - m
                bits = bit_length(n) - 1
                partial = False
            else:
                bits = 8
                partial = True

            i = data.draw_bits(bits)

            # We always label the region that causes us to repeat the loop as
            # 255 so that shrinking this byte never causes us to need to draw
            # more data.
            if partial and i == 255:
                p = remainder
                continue
            if falsey == 0:
                # Every other partition is truthy, so the result is true
                result = True
            elif truthy == 0:
                # Every other partition is falsey, so the result is false
                result = False
            elif i <= 1:
                # We special case so that zero is always false and 1 is always
                # true which makes shrinking easier because we can always
                # replace a truthy block with 1. This has the slightly weird
                # property that shrinking from 2 to 1 can cause the result to
                # grow, but the shrinker always tries 0 and 1 first anyway, so
                # this will usually be fine.
                result = bool(i)
            else:
                # Originally everything in the region 0 <= i < falsey was false
                # and everything above was true. We swapped one truthy element
                # into this region, so the region becomes 0 <= i <= falsey
                # except for i = 1. We know i > 1 here, so the test for truth
                # becomes i > falsey.
                result = i > falsey
        break
    data.stop_example()
    return result
Пример #16
0
    def float_hack(self):
        """Our encoding of floating point numbers does the right thing when you
        lexically shrink it, but there are some highly non-obvious lexical
        shrinks corresponding to natural floating point operations.

        We can't actually tell when the floating point encoding is being used
        (that would break the assumptions that Hypothesis doesn't inspect
        the generated values), but we can cheat: We just guess when it might be
        being used and perform shrinks that are valid regardless of our guess
        is correct.

        So that's what this method does. It's a cheat to give us good shrinking
        of floating at low cost in runtime and only moderate cost in elegance.
        """
        # If the block is of the wrong size then we're certainly not using the
        # float encoding.
        if self.size != 8:
            return

        # If the high bit is zero then we're in the integer representation of
        # floats so we don't need these hacks because it will shrink normally.
        if self.current[0] >> 7 == 0:
            return

        i = self.current_int
        f = lex_to_float(i)

        # This floating point number can be represented in our simple format.
        # So we try converting it to that (which will give the same float, but
        # a different encoding of it). If that doesn't work then the float
        # value of this doesn't unambiguously give the desired predicate, so
        # this approach isn't useful. If it *does* work, then we're now in a
        # situation where we don't need it, so either way we return here.
        if is_simple(f):
            self.incorporate_float(f)
            return

        # We check for a bunch of standard "large" floats. If we're currently
        # worse than them and the shrink downwards doesn't help, abort early
        # because there's not much useful we can do here.
        for g in [
            float('nan'), float('inf'), sys.float_info.max,
        ]:
            j = float_to_lex(g)
            if j < i:
                if self.incorporate_int(j):
                    f = g
                    i = j

        if math.isinf(f) or math.isnan(f):
            return

        # Finally we get to the important bit: Each of these is a small change
        # to the floating point number that corresponds to a large change in
        # the lexical representation. Trying these ensures that our floating
        # point shrink can always move past these obstacles. In particular it
        # ensures we can always move to integer boundaries and shrink past a
        # change that would require shifting the exponent while not changing
        # the float value much.
        for g in [floor(f), ceil(f)]:
            if self.incorporate_float(g):
                return

        if f > 2:
            self.incorporate_float(f - 1)
Пример #17
0
def test_floor(x):
    assert isinstance(floor(x), int)
    assert x - 1 < floor(x) <= x
    assert floor(x) == math.floor(x)