Пример #1
0
def _create_hyp_nested_strategy(simple_class_strategy):
    """
    Create a recursive attrs class.

    Given a strategy for building (simpler) classes, create and return
    a strategy for building classes that have the simpler class as an
    attribute.
    """
    # Use a tuple strategy to combine simple attributes and an attr class.
    def just_class(tup):
        combined_attrs = list(tup[0])
        combined_attrs.append(attr.ib(default=attr.Factory(tup[1])))
        return _create_hyp_class(combined_attrs)

    def list_of_class(tup):
        default = attr.Factory(lambda: [tup[1]()])
        combined_attrs = list(tup[0])
        combined_attrs.append(attr.ib(default=default))
        return _create_hyp_class(combined_attrs)

    def dict_of_class(tup):
        default = attr.Factory(lambda: {"cls": tup[1]()})
        combined_attrs = list(tup[0])
        combined_attrs.append(attr.ib(default=default))
        return _create_hyp_class(combined_attrs)

    return st.one_of(st.tuples(st.lists(simple_attrs), simple_class_strategy)
                     .map(just_class),
                     st.tuples(st.lists(simple_attrs), simple_class_strategy)
                     .map(list_of_class))
def test_mutual_recursion():
    t = st.deferred(lambda: a | b)
    a = st.deferred(lambda: st.none() | st.tuples(st.just("a"), b))
    b = st.deferred(lambda: st.none() | st.tuples(st.just("b"), a))

    for c in ("a", "b"):
        assert minimal(t, lambda x: x is not None and x[0] == c) == (c, None)
Пример #3
0
def question_type_and_weight() -> SearchStrategy:
    return one_of(
        tuples(sampled_from(Question.CHOICE_TYPES),
               fixed_decimals()),
        tuples(sampled_from(sorted(set(Question.available_types()) - set(Question.CHOICE_TYPES))),
               just(0))
    )
Пример #4
0
def builds_ignoring_invalid(target, *args, **kwargs):
    def splat(value):
        try:
            return target(*value[0], **value[1])
        except InvalidArgument:
            assume(False)
    return st.tuples(
        st.tuples(*args), st.fixed_dictionaries(kwargs)).map(splat)
def test_tuple_strategy_too_large_to_fit():
    x = frozensets(integers(0, 5))
    assert not math.isinf(x.template_upper_bound)
    x = tuples(x, x)
    assert not math.isinf(x.template_upper_bound)
    assert math.isinf(tuples(x, x).template_upper_bound)
    assert math.isinf(
        tuples(integers(), x).template_upper_bound)
Пример #6
0
def test_tuples_do_not_block_cloning():
    assert (
        minimal(
            lists(tuples(booleans() | tuples(integers()))),
            lambda x: len(x) >= 50 and any(isinstance(t[0], bool) for t in x),
        )
        == [(False,)] * 50
    )
Пример #7
0
def arrays_BOP_BO(min_B=1, max_B=10, min_O=1, max_O=100, min_P=1, max_P=5):
    shapes = tuples(
        lengths(lo=min_B, hi=max_B),
        lengths(lo=min_O, hi=max_O),
        lengths(lo=min_P, hi=max_P),
    )
    return shapes.flatmap(
        lambda BOP: tuples(ndarrays_of_shape(BOP), ndarrays_of_shape(BOP[:-1]))
    )
Пример #8
0
def builds_ignoring_invalid(target, *args, **kwargs):
    def splat(value):
        try:
            result = target(*value[0], **value[1])
            result.validate()
            return result
        except InvalidArgument:
            reject()
    return st.tuples(
        st.tuples(*args), st.fixed_dictionaries(kwargs)).map(splat)
Пример #9
0
def arrays_BI_BO(min_B=1, max_B=10, min_I=1, max_I=100, min_O=1, max_O=100):
    shapes = tuples(
        lengths(lo=min_B, hi=max_B),
        lengths(lo=min_I, hi=max_I),
        lengths(lo=min_O, hi=max_O),
    )
    return shapes.flatmap(
        lambda BIO: tuples(
            ndarrays_of_shape((BIO[0], BIO[1])), ndarrays_of_shape((BIO[0], BIO[2]))
        )
    )
Пример #10
0
def from_attrs(target, args, kwargs, to_infer):
    """An internal version of builds(), specialised for Attrs classes."""
    fields = attr.fields(target)
    kwargs = {k: v for k, v in kwargs.items() if v is not infer}
    for name in to_infer:
        kwargs[name] = from_attrs_attribute(getattr(fields, name), target)
    # We might make this strategy more efficient if we added a layer here that
    # retries drawing if validation fails, for improved composition.
    # The treatment of timezones in datetimes() provides a precedent.
    return st.tuples(st.tuples(*args), st.fixed_dictionaries(kwargs)).map(
        lambda value: target(*value[0], **value[1])
    )
Пример #11
0
def array_dtypes(subtype_strategy=scalar_dtypes(),
                 min_size=1, max_size=5, allow_subarrays=False):
    """Return a strategy for generating array (compound) dtypes, with members
    drawn from the given subtype strategy."""
    order_check('size', 0, min_size, max_size)
    native_strings = st.text if text_type is str else st.binary
    elements = st.tuples(native_strings(), subtype_strategy)
    if allow_subarrays:
        elements |= st.tuples(native_strings(), subtype_strategy,
                              array_shapes(max_dims=2, max_side=2))
    return st.lists(elements=elements, min_size=min_size, max_size=max_size,
                    unique_by=lambda d: d[0])
Пример #12
0
def test_broad_recursive_data_will_fail_a_health_check():
    r = st.recursive(
        st.integers(), lambda s: st.tuples(*((s,) * 10)),
        max_leaves=10,
    )

    @given(st.tuples(r, r, r, r, r, r, r))
    def test(x):
        pass

    with raises(FailedHealthCheck):
        test()
Пример #13
0
def _one_hots():
    index_size = st.integers(min_value=1, max_value=5)
    lengths = st.lists(
        elements=st.integers(min_value=0, max_value=5))
    return st.tuples(index_size, lengths).flatmap(
        lambda x: st.tuples(
            st.just(x[0]),
            st.just(x[1]),
            st.lists(
                elements=st.integers(min_value=0, max_value=x[0] - 1),
                min_size=sum(x[1]),
                max_size=sum(x[1]))))
Пример #14
0
 def gen_with_size(args):
     lengths, inner_shape = args
     data_dim = [sum(lengths)] + inner_shape
     lengths = np.array(lengths, dtype=np.int32)
     if with_pad_data:
         return st.tuples(
             st.just(lengths),
             hu.arrays(data_dim),
             hu.arrays(inner_shape),
             hu.arrays(inner_shape))
     else:
         return st.tuples(st.just(lengths), hu.arrays(data_dim))
Пример #15
0
def sparse_segmented_tensor(min_dim=1, max_dim=4, dtype=np.float32,
                            is_sorted=True, elements=None, **kwargs):
    data_dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
    all_dims_ = data_dims_.flatmap(lambda data_dims: st.tuples(
        st.just(data_dims),
        st.integers(min_value=1, max_value=data_dims[0]),
    ))
    return all_dims_.flatmap(lambda dims: st.tuples(
        arrays(dims[0], dtype, elements),
        arrays(dims[1], dtype=np.int64, elements=st.integers(
            min_value=0, max_value=dims[0][0] - 1)),
        segment_ids(dims[1], is_sorted=is_sorted),
    ))
Пример #16
0
    def __init__(self):
        self.reinit()
        self.acquired = set()
        self.size = 0

        self.init_step = tuples(
            just("init"),
            integers(
                min_value=1,
                max_value=32
            )
        )
        self.acquire_step = tuples(just("acquire"), just(0))
Пример #17
0
def add_bools(list_of_lists):
    """
    Given recursive list that can contain other lists, return tuple of that plus
    a booleans strategy for each list.
    """
    l = []
    def count(recursive):
        l.append(1)
        for child in recursive:
            if isinstance(child, list):
                count(child)
    count(list_of_lists)
    return st.tuples(st.just(list_of_lists), st.tuples(*[st.sampled_from([True, False]) for i in l]))
Пример #18
0
 def steps(self):
     if not self.initialized:
         self.initialized = True
         return self.init_step
     else:
         if self.acquired:
             release_step = tuples(just(
                 "release"
             ), sampled_from(sorted(self.acquired)))
         else:
             release_step = tuples(just(
                 "release"
             ), integers(min_value=0, max_value=self.size - 1))
         return release_step | self.acquire_step
Пример #19
0
def distribution_structures(draw, size=(2, 4), alphabet=(2, 4), uniform=False, min_events=1):
    """
    A hypothesis strategy for generating distributions.

    Parameters
    ----------
    draw : function
        A sampling function passed in by hypothesis.
    size : int
        The size of outcomes desired. Defaults to a 3 or 4, randomly.
    alphabet : int
        The alphabet size for each variable. Defaults to 2, 3, or 4, randomly.
    uniform : bool
        Whether the probabilities should be uniform or random. Defaults to random.

    Returns
    -------
    dist : Distribution
        A random distribution.
    """
    try:
        len(size)
    except TypeError:
        size = (size, size)
    try:
        len(alphabet)
    except TypeError:
        alphabet = (alphabet, alphabet)

    size_ = draw(integers(*size))
    alphabet_ = int(draw(integers(*alphabet)))

    events = draw(lists(tuples(*[integers(0, alphabet_ - 1)] * size_), min_size=min_events, unique=True))

    # make sure no marginal is a constant
    for var in zip(*events):
        assume(len(set(var)) > 1)

    if uniform:
        probs = [1 / len(events)] * len(events)
    else:
        probs = draw(tuples(*[floats(0, 1)] * len(events)))
        for prob in probs:
            assume(prob > 0)
        total = sum(probs)
        probs = [p / total for p in probs]

    dist =  Distribution(events, probs)
    dist.normalize()
    return dist
Пример #20
0
def type_to_strat(x, opts): # type: (type) -> SearchStrategy
   '''
   Given a type, return a strategy which yields a value of that type. Types maybe complex: Union, NamedTuple, etc.
   For more information, see https://docs.python.org/3/library/typing.html
   Usage:
   >>> type_to_strat(Union[int,str]).exmample()
   . . . 3
   '''
   recur = lambda y: type_to_strat(y, opts)
   if x in primitives:
       prim =  primitives[x].filter(opts.get(x, lambda x: x))
       return prim
   elif hasattr(x, '_fields'):# NamedTuple isn't a type, it's a function
   #elif isinstance(x, Callable): #this catches List[T] for some reason
       name = x.__name__
       fts = OrderedDict(x._field_types)
       vals = map(recur, fts.values())
       # `NamedTuple` is actually a ... `namedtuple` itself
       toArgDict = lambda xs: dict(zip(fts.keys(), xs))
       return st.tuples(*vals).map(lambda ys: x(**toArgDict(ys)))
   elif issubclass(x, Dict):
       return st.dictionaries(*map(recur, x.__parameters__))
   elif issubclass(x, Tuple):
       if x.__tuple_use_ellipsis__: # variable lenth tuple
           element_type = x.__tuple_params__[0]
           return recur(List[element_type]).map(tuple)
       return st.tuples(*map(recur, x.__tuple_params__))
   elif issubclass(x, Union):
       return reduce(operator.ior, map(recur, x.__union_params__))
   elif issubclass(x, Optional):
       # Optional[X] is equivalent to Union[X, type(None)]. second param is always Nonetype.
       value = x.__union_params__[0]
       return (recur(value) | st.none()) # type: SearchStrategy
   else:
       element_type = recur(x.__parameters__[0])
       if issubclass(x, list):
           return st.lists(element_type)
       elif issubclass(x, set):
           return st.sets(element_type)
       elif issubclass(x, Sequence):
           anySizeTuple = recur(Tuple[element_type,...])
           return st.sets(element_type) | st.lists(element_type) | anySizeTuple
       elif issubclass(x, Generator):
           toGen = lambda xs: (x for x in xs) # type: Callable[[Iterable[T]], Generator[T]]
           return recur(List[element_type]).map(toGen)
       # not sure how to create an Iterable (it doesn't have an `__next__` method)
       elif issubclass(x, Iterator)  or issubclass(x, Iteratable):
           return recur(List[element_type]).map(iter)
       else:
           raise ValueError("Could not find strategy for type %s" % x)
Пример #21
0
def arrays_OPFI_BI_lengths(max_B=5, max_P=3, max_F=5, max_I=8):
    shapes = tuples(
                lengths(hi=max_B),
                lengths(hi=max_P),
                lengths(hi=max_F),
                lengths(hi=max_I),
                arrays('int32', shape=(5,),
                    elements=integers(min_value=1, max_value=10)))

    strat = shapes.flatmap(
        lambda opfi_lengths: tuples(
            ndarrays_of_shape(opfi_lengths[:-1]),
            ndarrays_of_shape((sum(opfi_lengths[-1]), opfi_lengths[-2])),
            just(opfi_lengths[-1])))
    return strat
def test_containment(n):
    iv = minimal(
        tuples(lists(integers()), integers()),
        lambda x: x[1] in x[0] and x[1] >= n,
        timeout_after=60
    )
    assert iv == ([n], n)
Пример #23
0
def url(schemes=[], userpass=False, port=False, url=False, query=False,
        fragment=False):

    if schemes:
        scheme = st.just(random.choice(schemes))
    else:
        scheme = st.text(alphabet=ascii_lowercase+digits, min_size=2)

    d = {'scheme': scheme,
         'domain': st.lists(
             st.text(
                 alphabet=ascii_lowercase + digits,
                 min_size=1,
                 max_size=63),
             min_size=1,
             max_size=3),
         'tld': st.text(alphabet=ascii_lowercase, min_size=2, max_size=63)}

    if userpass:
        d['user'] = st.text(alphabet=ascii_lowercase + digits)
        d['passwd'] = st.text(alphabet=ascii_lowercase + digits)
    if port:
        d['port'] = st.integers(min_value=0, max_value=65535)
    if url:
        d['url'] = st.lists(st.text())
    if query:
        d['query'] = st.lists(st.tuples(
            st.text(alphabet=ascii_lowercase, min_size=1),
            st.text(alphabet=ascii_lowercase + digits, min_size=1)))
    if fragment:
        d['fragment'] = st.text()

    urlst = strategy(st.fixed_dictionaries(d))

    return urlst.map(to_url).filter(max_len)
Пример #24
0
def segmented_tensor(min_dim=1, max_dim=4, dtype=np.float32, is_sorted=True,
                     elements=None, **kwargs):
    data_dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
    return data_dims_.flatmap(lambda data_dims: st.tuples(
        arrays(data_dims, dtype, elements),
        segment_ids(data_dims[0], is_sorted=is_sorted),
    ))
Пример #25
0
def test_keys_and_default_are_not_shared():
    find_any(
        st.tuples(
            st.shared(st.integers(), key=1),
            st.shared(st.integers())),
        lambda x: x[0] != x[1]
    )
Пример #26
0
def arrays_BOP(min_B=1, max_B=10, min_O=1, max_O=100, min_P=1, max_P=5):
    shapes = tuples(
        lengths(lo=min_B, hi=max_B),
        lengths(lo=min_O, hi=max_O),
        lengths(lo=min_P, hi=max_P),
    )
    return shapes.flatmap(ndarrays_of_shape)
Пример #27
0
def test_different_keys_are_not_shared():
    find_any(
        st.tuples(
            st.shared(st.integers(), key=1),
            st.shared(st.integers(), key=2)),
        lambda x: x[0] != x[1]
    )
Пример #28
0
def test_can_find_nested():
    x = find(
        st.recursive(st.booleans(), lambda x: st.tuples(x, x)),
        lambda x: isinstance(x, tuple) and isinstance(x[0], tuple)
    )

    assert x == ((False, False), False)
def bucket_name():
    return strategies.tuples(
        subscription_id(),
        customer_id(),
    ).map(
        lambda (s, c): signup.get_bucket_name(s, c)
    )
Пример #30
0
def _gen_test_add_padding(with_pad_data=True,
                          is_remove=False):
    def gen_with_size(args):
        lengths, inner_shape = args
        data_dim = [sum(lengths)] + inner_shape
        lengths = np.array(lengths, dtype=np.int32)
        if with_pad_data:
            return st.tuples(
                st.just(lengths),
                hu.arrays(data_dim),
                hu.arrays(inner_shape),
                hu.arrays(inner_shape))
        else:
            return st.tuples(st.just(lengths), hu.arrays(data_dim))

    min_len = 4 if is_remove else 0
    lengths = st.lists(
        st.integers(min_value=min_len, max_value=10),
        min_size=0,
        max_size=5)
    inner_shape = st.lists(
        st.integers(min_value=1, max_value=3),
        min_size=0,
        max_size=2)
    return st.tuples(lengths, inner_shape).flatmap(gen_with_size)
Пример #31
0
    sets(sampled_from((1, 2))), lambda t: len(t) == 2)

test_subset_of_ten_is_sometimes_empty = define_test(sets(integers(1, 10)),
                                                    lambda t: len(t) == 0)

test_mostly_sensible_floats = define_test(floats(), lambda t: t + 1 > t)

test_mostly_largish_floats = define_test(floats(),
                                         lambda t: t + 1 > 1,
                                         condition=lambda x: x > 0)

test_ints_can_occasionally_be_really_large = define_test(
    integers(), lambda t: t >= 2**63)

test_mixing_is_sometimes_distorted = define_test(
    lists(booleans() | tuples()),
    distorted,
    condition=lambda x: len(set(map(type, x))) == 2,
)

test_mixes_2_reasonably_often = define_test(
    lists(booleans() | tuples()),
    lambda x: len(set(map(type, x))) > 1,
    condition=bool)

test_partial_mixes_3_reasonably_often = define_test(
    lists(booleans() | tuples() | just(u"hi")),
    lambda x: 1 < len(set(map(type, x))) < 3,
    condition=bool,
)
Пример #32
0
def coordinates_to_edges_with_points(
        coordinates: Strategy[Coordinate]) -> Strategy[Tuple[Edge, Point]]:
    return strategies.tuples(coordinates_to_ported_edges(coordinates),
                             coordinates_to_ported_points(coordinates))
Пример #33
0
 def extend(base):
     return base.flatmap(lambda strat: st.tuples(
         strat, st.sampled_from([
             st.tuples,
             st.frozensets,
         ]))).map(lambda strat__extend: strat__extend[1](strat__extend[0]))
Пример #34
0
def _strategy(codes, context):
    """
    Convert SRE regex parse tree to strategy that generates strings matching that
    regex represented by that parse tree.

    `codes` is either a list of SRE regex elements representations or a particular
    element representation. Each element is a tuple of element code (as string) and
    parameters. E.g. regex 'ab[0-9]+' compiles to following elements:

        [
            ('literal', 97),
            ('literal', 98),
            ('max_repeat', (1, 4294967295, [
                ('in', [
                    ('range', (48, 57))
                ])
            ]))
        ]

    The function recursively traverses regex element tree and converts each element
    to strategy that generates strings that match that element.

    Context stores
    1. List of groups (for backreferences)
    2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect behavior
       of various inner strategies)
    """
    if not isinstance(codes, tuple):
        # List of codes
        strategies = []

        i = 0
        while i < len(codes):
            if codes[i][0] == sre.LITERAL and not (context.flags
                                                   & re.IGNORECASE):
                # Merge subsequent "literals" into one `just()` strategy
                # that generates corresponding text if no IGNORECASE
                j = i + 1
                while j < len(codes) and codes[j][0] == sre.LITERAL:
                    j += 1

                strategies.append(
                    hs.just(u''.join([
                        six.unichr(charcode) for (_, charcode) in codes[i:j]
                    ])))

                i = j
            else:
                strategies.append(_strategy(codes[i], context))
                i += 1

        return hs.tuples(*strategies).map(u''.join)
    else:
        # Single code
        code, value = codes
        if code == sre.LITERAL:
            # Regex 'a' (single char)
            c = six.unichr(value)
            if context.flags & re.IGNORECASE:
                return hs.sampled_from([c.lower(), c.upper()])
            else:
                return hs.just(c)

        elif code == sre.NOT_LITERAL:
            # Regex '[^a]' (negation of a single char)
            c = six.unichr(value)
            blacklist = set([c.lower(), c.upper()]) \
                if context.flags & re.IGNORECASE else [c]
            return hs.characters(blacklist_characters=blacklist)

        elif code == sre.IN:
            # Regex '[abc0-9]' (set of characters)
            charsets = value

            builder = CharactersBuilder(negate=charsets[0][0] == sre.NEGATE,
                                        flags=context.flags)

            for charset_code, charset_value in charsets:
                if charset_code == sre.NEGATE:
                    # Regex '[^...]' (negation)
                    pass
                elif charset_code == sre.LITERAL:
                    # Regex '[a]' (single char)
                    builder.add_chars(six.unichr(charset_value))
                elif charset_code == sre.RANGE:
                    # Regex '[a-z]' (char range)
                    low, high = charset_value
                    for x in six.moves.range(low, high + 1):
                        builder.add_chars(six.unichr(x))
                elif charset_code == sre.CATEGORY:
                    # Regex '[\w]' (char category)
                    builder.add_category(charset_value)
                else:
                    raise he.InvalidArgument('Unknown charset code: %s' %
                                             charset_code)

            return builder.strategy

        elif code == sre.ANY:
            # Regex '.' (any char)
            if context.flags & re.DOTALL:
                return hs.characters()
            else:
                return hs.characters(blacklist_characters="\n")

        elif code == sre.AT:
            # Regexes like '^...', '...$', '\bfoo', '\Bfoo'
            if value == sre.AT_END:
                return hs.one_of(hs.just(u''), hs.just(u'\n'))
            return hs.just('')

        elif code == sre.SUBPATTERN:
            # Various groups: '(...)', '(:...)' or '(?P<name>...)'
            old_flags = context.flags
            if HAS_SUBPATTERN_FLAGS:
                context.flags = (context.flags | value[1]) & ~value[2]

            strat = _strategy(value[-1], context)

            context.flags = old_flags

            if value[0]:
                context.groups[value[0]] = strat
                strat = hs.shared(strat, key=value[0])

            return strat

        elif code == sre.GROUPREF:
            # Regex '\\1' or '(?P=name)' (group reference)
            return hs.shared(context.groups[value], key=value)

        elif code == sre.ASSERT:
            # Regex '(?=...)' or '(?<=...)' (positive lookahead/lookbehind)
            return _strategy(value[1], context)

        elif code == sre.ASSERT_NOT:
            # Regex '(?!...)' or '(?<!...)' (negative lookahead/lookbehind)
            return hs.just('')

        elif code == sre.BRANCH:
            # Regex 'a|b|c' (branch)
            return hs.one_of(
                [_strategy(branch, context) for branch in value[1]])

        elif code in [sre.MIN_REPEAT, sre.MAX_REPEAT]:
            # Regexes 'a?', 'a*', 'a+' and their non-greedy variants (repeaters)
            at_least, at_most, regex = value
            if at_most == 4294967295:
                at_most = None
            return hs.lists(_strategy(regex, context),
                            min_size=at_least,
                            max_size=at_most).map(''.join)

        elif code == sre.GROUPREF_EXISTS:
            # Regex '(?(id/name)yes-pattern|no-pattern)' (if group exists selection)
            return hs.one_of(
                _strategy(value[1], context),
                _strategy(value[2], context) if value[2] else hs.just(u''),
            )

        else:
            raise he.InvalidArgument('Unknown code point: %s' % repr(code))
import pytest
import numpy as np
from hypothesis.core import given
import hypothesis.strategies as st
from hypothesis.extra.numpy import floating_dtypes

from crowddynamics.core.vector2D import length
from crowddynamics.simulation.agents import Circular, ThreeCircle
import crowddynamics.testing as testing


@pytest.mark.parametrize('exclude_zero', (None, 'near', 'exact'))
@given(dtype=st.none() | floating_dtypes(),
       shape=st.none() | st.integers(0, 2)
       | st.tuples(st.integers(0, 2), st.integers(0, 2)))
def test_reals(exclude_zero, shape, dtype):
    strategy = testing.reals(min_value=None,
                             max_value=None,
                             exclude_zero=exclude_zero,
                             shape=shape,
                             dtype=dtype)
    for _ in range(10):
        strategy.example()
    assert True


@given(v=testing.unit_vectors())
def test_unit_vectors(v):
    assert np.isclose(length(v), 1.0)

Пример #36
0
def broadcastable_shape(draw,
                        shape,
                        min_dim=0,
                        max_dim=5,
                        min_side=1,
                        max_side=5,
                        allow_singleton=True):
    """ Hypothesis search strategy: given an array shape, generate a
    broadcast-compatible shape, specifying the minimum/maximum permissable
    number of dimensions in the resulting shape (both values are inclusive).

    Examples from this strategy shrink towards the input shape.

    Parameters
    ----------
    shape : Tuple[int, ...]
        The shape with which

    min_dim : int, optional (default=0)
        The smallest number of dimensions that the broadcast-compatible
        shape can possess.

    max_dim : int, optional (default=5)
        The largest number of dimensions that the broadcast-compatible
        shape can possess.

    min_side : int, optional (default=1)
        The smallest size that a new, leading dimensions can
        possess

    max_side : int, optional (default=5)
        The largest size that a new, leading dimension can
        possess.

    allow_singleton : bool, optional (default=True)
        If `False` the aligned dimensions of the broadcastable
        shape cannot contain singleton dimensions (i.e. size-1
        dimensions aligned with larger dimensions)

    Returns
    -------
    hypothesis.searchstrategy.SearchStrategy[Tuple[int, ...]]

    Notes
    -----
    `draw` is a parameter reserved by hypothesis, and should not be specified
    by the user.

    Examples
    --------
    >>> for i in range(5):
    ...    print(broadcastable_shape(shape=(2, 3)).example())
    (1, 3)
    ()
    (2, 3)
    (5, 2, 3)
    (8, 5, 1, 3)
    (3, )
    """
    _check_min_max(0, min_dim, max_dim, "dim")
    _check_min_max(1, min_side, max_side, "side")

    if not isinstance(shape, Sequence) or any(
            i < 0 or not isinstance(i, Integral) for i in shape):
        raise ValueError(
            "`shape` must be a sequence of non-negative integers. Got: {}".
            format(shape))

    ndim = draw(st.integers(min_dim - len(shape),
                            max_dim - len(shape))) + len(shape)
    n_aligned = min(len(shape), ndim)
    n_leading = ndim - n_aligned
    if n_aligned > 0:
        if allow_singleton:
            aligned_dims = draw(
                st.tuples(*(st.sampled_from((size, 1))
                            for size in shape[-n_aligned:])))
        else:
            aligned_dims = shape[-n_aligned:]
    else:
        aligned_dims = tuple()

    leading_dims = draw(
        st.tuples(*(st.integers(min_side, max_side)
                    for i in range(n_leading))))
    return leading_dims + aligned_dims
Пример #37
0
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio


def test_flags():
    assert MplexFlag.NEW_STREAM == 0
    assert MplexFlag.MESSAGE == 1
    assert MplexFlag.CLOSE == 2


def test_create_mplex_protocol():
    mplex_protocol = MplexProtocol(*get_connection_mock("127.0.0.1", 7777))
    assert isinstance(mplex_protocol, MplexProtocol)


@given(fragmented_message=tuples(integers(min_value=0),
                                 sampled_from(MplexFlag), binary()))
async def test_read_message(fragmented_message: Tuple[StreamID, MplexFlag,
                                                      StreamData]):
    reader_mock, writer_mock = get_connection_mock("127.0.0.1", 7777)
    stream_id, flag, data = fragmented_message

    mplex_protocol = MplexProtocol(reader_mock, writer_mock)
    encoded_message = (uvarint.encode(stream_id << 3 | flag) +
                       uvarint.encode(len(data)) + data)
    reader_mock.feed_data(encoded_message)
    message = await mplex_protocol.read_message()
    assert isinstance(message, MplexMessage)
    assert message.stream_id == stream_id
    assert message.flag == flag
    assert message.data == data
Пример #38
0
def to_pairs(strategy: Strategy[Domain]) -> Strategy[Tuple[Domain, Domain]]:
    return strategies.tuples(strategy, strategy)
Пример #39
0
def _strategy(codes, context, pattern):
    """Convert SRE regex parse tree to strategy that generates strings matching
    that regex represented by that parse tree.

    `codes` is either a list of SRE regex elements representations or a
    particular element representation. Each element is a tuple of element code
    (as string) and parameters. E.g. regex 'ab[0-9]+' compiles to following
    elements:

        [
            (LITERAL, 97),
            (LITERAL, 98),
            (MAX_REPEAT, (1, 4294967295, [
                (IN, [
                    (RANGE, (48, 57))
                ])
            ]))
        ]

    The function recursively traverses regex element tree and converts each
    element to strategy that generates strings that match that element.

    Context stores
    1. List of groups (for backreferences)
    2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect
       behavior of various inner strategies)

    """
    def recurse(codes):
        return _strategy(codes, context, pattern)

    if isinstance(pattern, text_type):
        empty = u''
        to_char = hunichr
    else:
        empty = b''
        to_char = int_to_byte
        binary_char = st.binary(min_size=1, max_size=1)

    if not isinstance(codes, tuple):
        # List of codes
        strategies = []

        i = 0
        while i < len(codes):
            if codes[i][0] == sre.LITERAL and \
                    not context.flags & re.IGNORECASE:
                # Merge subsequent "literals" into one `just()` strategy
                # that generates corresponding text if no IGNORECASE
                j = i + 1
                while j < len(codes) and codes[j][0] == sre.LITERAL:
                    j += 1

                if i + 1 < j:
                    strategies.append(st.just(
                        empty.join([to_char(charcode)
                                    for (_, charcode) in codes[i:j]])
                    ))

                    i = j
                    continue

            strategies.append(recurse(codes[i]))
            i += 1

        # We handle this separately at the top level, but some regex can
        # contain empty lists internally, so we need to handle this here too.
        if not strategies:
            return st.just(empty)

        if len(strategies) == 1:
            return strategies[0]
        return st.tuples(*strategies).map(empty.join)
    else:
        # Single code
        code, value = codes
        if code == sre.LITERAL:
            # Regex 'a' (single char)
            c = to_char(value)
            if context.flags & re.IGNORECASE and \
                    re.match(c, c.swapcase(), re.IGNORECASE) is not None:
                # We do the explicit check for swapped-case matching because
                # eg 'ß'.upper() == 'SS' and ignorecase doesn't match it.
                return st.sampled_from([c, c.swapcase()])
            return st.just(c)

        elif code == sre.NOT_LITERAL:
            # Regex '[^a]' (negation of a single char)
            c = to_char(value)
            blacklist = set(c)
            if context.flags & re.IGNORECASE and \
                    re.match(c, c.swapcase(), re.IGNORECASE) is not None:
                blacklist |= set(c.swapcase())
            if isinstance(pattern, text_type):
                return st.characters(blacklist_characters=blacklist)
            else:
                return binary_char.filter(lambda c: c not in blacklist)

        elif code == sre.IN:
            # Regex '[abc0-9]' (set of characters)
            negate = value[0][0] == sre.NEGATE
            if isinstance(pattern, text_type):
                builder = CharactersBuilder(negate, context.flags)
            else:
                builder = BytesBuilder(negate, context.flags)

            for charset_code, charset_value in value:
                if charset_code == sre.NEGATE:
                    # Regex '[^...]' (negation)
                    # handled by builder = CharactersBuilder(...) above
                    pass
                elif charset_code == sre.LITERAL:
                    # Regex '[a]' (single char)
                    builder.add_char(charset_value)
                elif charset_code == sre.RANGE:
                    # Regex '[a-z]' (char range)
                    low, high = charset_value
                    for char_code in hrange(low, high + 1):
                        builder.add_char(char_code)
                elif charset_code == sre.CATEGORY:
                    # Regex '[\w]' (char category)
                    builder.add_category(charset_value)
                else:  # pragma: no cover
                    # Currently there are no known code points other than
                    # handled here. This code is just future proofing
                    raise AssertionError('Unknown charset code: %s'
                                         % charset_code)
            return builder.strategy

        elif code == sre.ANY:
            # Regex '.' (any char)
            if isinstance(pattern, text_type):
                if context.flags & re.DOTALL:
                    return st.characters()
                return st.characters(blacklist_characters=u'\n')
            else:
                if context.flags & re.DOTALL:
                    return binary_char
                return binary_char.filter(lambda c: c != b'\n')

        elif code == sre.AT:
            # Regexes like '^...', '...$', '\bfoo', '\Bfoo'
            # An empty string (or newline) will match the token itself, but
            # we don't and can't check the position (eg '%' at the end)
            return st.just(empty)

        elif code == sre.SUBPATTERN:
            # Various groups: '(...)', '(:...)' or '(?P<name>...)'
            old_flags = context.flags
            if HAS_SUBPATTERN_FLAGS:  # pragma: no cover
                # This feature is available only in specific Python versions
                context.flags = (context.flags | value[1]) & ~value[2]

            strat = _strategy(value[-1], context, pattern)

            context.flags = old_flags

            if value[0]:
                strat = update_group(value[0], strat)

            return strat

        elif code == sre.GROUPREF:
            # Regex '\\1' or '(?P=name)' (group reference)
            return reuse_group(value)

        elif code == sre.ASSERT:
            # Regex '(?=...)' or '(?<=...)' (positive lookahead/lookbehind)
            return recurse(value[1])

        elif code == sre.ASSERT_NOT:
            # Regex '(?!...)' or '(?<!...)' (negative lookahead/lookbehind)
            return st.just(empty)

        elif code == sre.BRANCH:
            # Regex 'a|b|c' (branch)
            return st.one_of([recurse(branch) for branch in value[1]])

        elif code in [sre.MIN_REPEAT, sre.MAX_REPEAT]:
            # Regexes 'a?', 'a*', 'a+' and their non-greedy variants
            # (repeaters)
            at_least, at_most, subregex = value
            if at_most == sre.MAXREPEAT:
                at_most = None
            if at_least == 0 and at_most == 1:
                return st.just(empty) | recurse(subregex)
            return st.lists(recurse(subregex),
                            min_size=at_least,
                            max_size=at_most).map(empty.join)

        elif code == sre.GROUPREF_EXISTS:
            # Regex '(?(id/name)yes-pattern|no-pattern)'
            # (if group exists choice)
            return group_conditional(
                value[0],
                recurse(value[1]),
                recurse(value[2]) if value[2] else st.just(empty),
            )

        else:  # pragma: no cover
            # Currently there are no known code points other than handled here.
            # This code is just future proofing
            raise AssertionError('Unknown code point: %s' % repr(code))
def valid_slices(draw, batch_shape):
    """Samples a legal (possibly empty) slice for shape batch_shape."""
    # We build up a list of slices in several stages:
    # 1. Choose 0 to batch_rank slices to come before an Ellipsis (...).
    # 2. Decide whether or not to add an Ellipsis; if using, updating the indexing
    #    used (e.g. batch_shape[i]) to identify safe bounds.
    # 3. Choose 0 to [remaining_dims] slices to come last.
    # 4. Decide where to insert between 0 and 4 newaxis slices.
    batch_shape = tf.TensorShape(batch_shape).as_list()
    slices = []
    batch_rank = len(batch_shape)
    arbitrary_slices = hps.tuples(
        hps.one_of(hps.just(None), hps.integers(min_value=-100,
                                                max_value=100)),
        hps.one_of(hps.just(None), hps.integers(min_value=-100,
                                                max_value=100)),
        hps.one_of(
            hps.just(None),
            hps.integers(min_value=-100, max_value=100).filter(
                lambda x: x != 0))).map(lambda tup: slice(*tup))

    # 1. Choose 0 to batch_rank slices to come before an Ellipsis (...).
    nslc_before_ellipsis = draw(hps.integers(min_value=0,
                                             max_value=batch_rank))
    for i in range(nslc_before_ellipsis):
        slc = draw(
            hps.one_of(hps.integers(min_value=0, max_value=batch_shape[i] - 1),
                       arbitrary_slices))
        slices.append(slc)
    # 2. Decide whether or not to add an Ellipsis; if using, updating the indexing
    #    used (e.g. batch_shape[i]) to identify safe bounds.
    has_ellipsis = draw(hps.booleans().map(lambda x: (Ellipsis, x)))[1]
    nslc_after_ellipsis = draw(
        hps.integers(min_value=0, max_value=batch_rank - nslc_before_ellipsis))
    if has_ellipsis:
        slices.append(Ellipsis)
        remain_start, remain_end = (batch_rank - nslc_after_ellipsis,
                                    batch_rank)
    else:
        remain_start = nslc_before_ellipsis
        remain_end = nslc_before_ellipsis + nslc_after_ellipsis
    # 3. Choose 0 to [remaining_dims] slices to come last.
    for i in range(remain_start, remain_end):
        slc = draw(
            hps.one_of(hps.integers(min_value=0, max_value=batch_shape[i] - 1),
                       arbitrary_slices))
        slices.append(slc)
    # 4. Decide where to insert between 0 and 4 newaxis slices.
    newaxis_positions = draw(
        hps.lists(hps.integers(min_value=0, max_value=len(slices)),
                  max_size=4))
    for i in sorted(newaxis_positions, reverse=True):
        slices.insert(i, tf.newaxis)
    slices = tuple(slices)
    # Since `d[0]` ==> `d.__getitem__(0)` instead of `d.__getitem__((0,))`;
    # and similarly `d[:3]` ==> `d.__getitem__(slice(None, 3))` instead of
    # `d.__getitem__((slice(None, 3),))`; it is useful to test such scenarios.
    if len(slices) == 1 and draw(hps.booleans()):
        # Sometimes only a single item non-tuple.
        return slices[0]
    return slices
Пример #41
0
def test_groupref_not_shared_between_regex():
    # If group references are (incorrectly!) shared between regex, this would
    # fail as the would only be one reference.
    st.tuples(st.from_regex("(a)\\1"), st.from_regex("(b)\\1")).example()
Пример #42
0
    )
    assert np.logical_or(x == 0, x == 1).all()
    assert np.count_nonzero(x) in (1, len(x) - 1)


@flaky(max_runs=50, min_passes=1)
def test_can_minimize_float_arrays():
    x = minimal(nps.arrays(float, 50), lambda t: t.sum() >= 1.0)
    assert x.sum() in (1, 50)


class Foo(object):
    pass


foos = st.tuples().map(lambda _: Foo())


def test_can_create_arrays_of_composite_types():
    arr = minimal(nps.arrays(object, 100, foos))
    for x in arr:
        assert isinstance(x, Foo)


@given(st.lists(st.integers()), st.data())
def test_can_create_zero_dim_arrays_of_lists(x, data):
    arr = data.draw(nps.arrays(object, (), elements=st.just(x)))
    assert arr.shape == ()
    assert arr.dtype == np.dtype(object)
    assert arr.item() == x
Пример #43
0
 def make_option(strategy, funcs):
     return strat.tuples(strategy, wrapper(strategy), funcs)
def test_or_errors_when_given_non_strategy():
    bools = tuples(booleans())
    with pytest.raises(ValueError):
        bools | u"foo"
def test_bad_branching_tree():
    tree = st.deferred(
        lambda: st.tuples(tree, tree, tree, tree, tree) | st.integers())
    assert minimal(tree) == 0
    assert minimal(tree, lambda x: isinstance(x, tuple)) == (0,) * 5
def test_binary_tree():
    tree = st.deferred(lambda: st.integers() | st.tuples(tree, tree))

    assert minimal(tree) == 0
    assert minimal(tree, lambda x: isinstance(x, tuple)) == (0, 0)
Пример #47
0
NUMPY_TEST_CASES = [

    # ArgSpec(args=['a', 'b', 'transpose_a', 'transpose_b', 'adjoint_a',
    #               'adjoint_b', 'a_is_sparse', 'b_is_sparse', 'name'],
    #         varargs=None,
    #         keywords=None,
    #         defaults=(False, False, False, False, False, False, None))
    TestCase('linalg.matmul', [matmul_compatible_pair()]),

    # ArgSpec(args=['a', 'name', 'conjugate'], varargs=None, keywords=None)
    TestCase('linalg.matrix_transpose', [single_array(min_dims=2)]),

    # ArgSpec(args=['a', 'x', 'name'], varargs=None, keywords=None,
    #         defaults=(None,))
    TestCase('math.polygamma', [
        hps.tuples(hps.integers(0, 10).map(float), positive_floats()),
    ]),

    # ArgSpec(args=['arr', 'weights', 'minlength',
    #               'maxlength', 'dtype', 'name'],
    #         varargs=None,
    #         keywords=None,
    #         defaults=(None, None, None, tf.int32, None))
    TestCase('math.bincount', []),

    # ArgSpec(args=['chol', 'rhs', 'name'], varargs=None, keywords=None,
    #         defaults=(None,))
    TestCase('linalg.cholesky_solve', [
        matmul_compatible_pair(x_strategy=psd_matrix().map(np.linalg.cholesky))
    ]),
Пример #48
0
    pruned = list(iteritems(to_inv_odict(iteritems(to_inv_odict(items)))))
    assume(len(pruned) >= len(items) // 2)
    return pruned


ondupbehaviors = (IGNORE, OVERWRITE, RAISE)
mutable_bidict_types = (bidict, loosebidict, looseorderedbidict, orderedbidict)
bidict_types = mutable_bidict_types + (frozenbidict, frozenorderedbidict)
mutating_methods_by_arity = {
    0: ('clear', 'popitem'),
    1: ('__delitem__', 'pop', 'setdefault', 'move_to_end'),
    2: ('__setitem__', 'pop', 'put', 'forceput', 'setdefault'),
    -1: ('update', 'forceupdate'),
}
immutable = integers()
itemlists = lists(tuples(immutable, immutable))
inititems = itemlists.map(prune_dup_vals)


@pytest.mark.parametrize('B', bidict_types)
@given(init=inititems)
def test_equality(B, init):
    b = B(init)
    d = dict(init)
    o = OrderedDict(init)
    oi = to_inv_odict(iteritems(o))
    di = OrderedDict(oi)
    assert b == d
    assert b == o
    assert not b != d
    assert not b != o
def test_mutually_recursive_tuples_draw_nothing():
    x = st.deferred(lambda: st.tuples(y))
    y = st.tuples(x)

    assert_actually_empty(x)
    assert_actually_empty(y)
Пример #50
0
        try:
            veripy.int(*arg_list)
        except AssertionError:
            pass

    @given(arg_list=st.lists(min_size=1,
                             elements=st.lists(min_size=1,
                                               elements=st.floats())))
    def test_list(arg_list):
        try:
            veripy.list(*arg_list)
        except AssertionError:
            pass

    @given(arg_list=st.lists(min_size=1,
                             elements=st.lists(min_size=1,
                                               elements=st.tuples())))
    def test_tuple(arg_list):
        try:
            veripy.tuple(*arg_list)
        except AssertionError:
            pass

    if __name__ == "__main__":
        tests = [test_str, test_float, test_int, test_list, test_tuple]
        for t in tests:
            print("running ({} times) - {}".format(test_samples, t.__name__))
            t()
            print("success")
        print("done")
def test_self_tuple_draws_nothing():
    x = st.deferred(lambda: st.tuples(x))
    assert_actually_empty(x)
Пример #52
0
import astroid
import nose
from python_ta.typecheck.base import TypeFail
from hypothesis import given, settings, assume, HealthCheck
import tests.custom_hypothesis_support as cs
import hypothesis.strategies as hs
settings.load_profile("pyta")


@given(hs.integers(), hs.lists(hs.tuples(cs.comparator_operator_equality, hs.integers()), min_size=1))
def test_compare_equality(left_value, operator_value_tuples):
    """Test type setting of Compare node representing comparators: ''==', '!=', '>=', '<=', 'is'. """
    program = f'{repr(left_value)}'
    for operator, value in operator_value_tuples:
        program += ' ' + ' '.join([operator, repr(value)])
    module, _ = cs._parse_text(program)
    compare_node = list(module.nodes_of_class(astroid.Compare))[0]
    assert compare_node.inf_type.getValue() == bool


@given(hs.lists(cs.comparator_operator, min_size=3), cs.numeric_list(min_size=4))
def test_compare_inequality(operators, values):
    """Test type setting of Compare node representing comparators: '<', '>'. """
    a = list(zip(operators, values))
    pre = []
    for operator, value in a:
        pre.append(str(operator))
        pre.append(str(value))
    # pre_input_program = [str(elt) for tuple in zip(operator, values) for elt in tuple]
    program = f'{str(values[0])} ' + ' '.join(pre)
    module, _ = cs._parse_text(program)
class TestExtension(TestCase):
    def setUp(self):
        self.thisP = RTPPayload_TTML()

    @given(
        st.tuples(st.text(), st.sampled_from(SUPPORTED_ENCODINGS),
                  st.booleans()).filter(
                      lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16))
    def test_init(self, data):
        doc, encoding, bom = data
        reservedBits = bytearray(b'\x00\x00')
        newP = RTPPayload_TTML(reservedBits, doc, encoding, bom)

        self.assertEqual(newP.reserved, reservedBits)
        self.assertEqual(newP.userDataWords, doc)
        self.assertEqual(newP._encoding, encoding)
        self.assertEqual(newP._bom, bom)

    @given(st.text(),
           st.text().filter(lambda x: x not in SUPPORTED_ENCODINGS),
           st.booleans())
    def test_init_invalidEnc(self, doc, enc, bom):
        reservedBits = bytearray(b'\x00\x00')

        with self.assertRaises(AttributeError):
            RTPPayload_TTML(reservedBits, doc, enc, bom)

    def test_reserved_default(self):
        self.assertEqual(self.thisP.reserved, bytearray(b'\x00\x00'))

    def test_reserved_notBytes(self):
        with self.assertRaises(AttributeError):
            self.thisP.reserved = ""

    @given(st.binary().filter(lambda x: x != bytearray(b'\x00\x00')))
    def test_reserved_invalid(self, value):
        with self.assertRaises(ValueError):
            self.thisP.reserved = bytearray(value)

    def test_userDataWords_default(self):
        self.assertEqual(self.thisP.userDataWords, "")

    @given(st.text().filter(lambda x: len(utfEncode(x, "UTF-8")) < 2**16))
    def test_userDataWords(self, doc):
        self.thisP.userDataWords = doc
        self.assertEqual(self.thisP.userDataWords, doc)

    def test_userDataWords_invalidType(self):
        with self.assertRaises(AttributeError):
            self.thisP.userDataWords = 0

    def test_userDataWords_tooLong(self):
        doc = ""
        for x in range(2**16):
            doc += "a"
        with self.assertRaises(LengthError):
            self.thisP.userDataWords = doc

    @given(
        st.tuples(st.text(), st.sampled_from(SUPPORTED_ENCODINGS),
                  st.booleans()).filter(
                      lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16))
    def test_userDataWords_encodings(self, data):
        doc, encoding, bom = data
        payload = RTPPayload_TTML(userDataWords=doc,
                                  encoding=encoding,
                                  bom=bom)
        self.assertEqual(payload.userDataWords, doc)
        self.assertEqual(payload._userDataWords, utfEncode(doc, encoding, bom))

    def test_eq(self):
        reservedBits = bytearray(b'\x00\x00')
        newP = RTPPayload_TTML(reservedBits, "")

        self.assertEqual(newP, self.thisP)

    def test_bytearray_default(self):
        expected = bytearray(4)
        self.assertEqual(bytes(self.thisP), expected)

        newP = RTPPayload_TTML().fromBytearray(expected)
        self.assertEqual(newP, self.thisP)

    @given(
        st.binary(min_size=2, max_size=2).filter(lambda x: x != b'\x00\x00'))
    def test_fromBytearray_invalidLen(self, length):
        bArray = bytearray(4)
        bArray[2:4] = length

        with self.assertRaises(LengthError):
            RTPPayload_TTML().fromBytearray(bArray)

    @given(st.text())
    def test_toBytearray(self, doc):
        self.thisP.userDataWords = doc

        bDoc = utfEncode(doc)
        expected = bytearray(2)
        expected += int(len(bDoc)).to_bytes(2, byteorder='big')
        expected += bDoc

        self.assertEqual(expected, self.thisP.toBytearray())

    @given(st.text())
    def test_fromBytearray(self, doc):
        expected = RTPPayload_TTML(userDataWords=doc)

        bDoc = utfEncode(doc)
        bArray = bytearray(2)
        bArray += int(len(bDoc)).to_bytes(2, byteorder='big')
        bArray += bDoc

        self.thisP.fromBytearray(bArray)

        self.assertEqual(expected, self.thisP)
Пример #54
0
def coordinates_to_bounding_boxes_with_points(
        coordinates: Strategy[Coordinate]
) -> Strategy[Tuple[BoundingBox, Point]]:
    return strategies.tuples(coordinates_to_bounding_boxes(coordinates),
                             coordinates_to_ported_points(coordinates))
def test_tuples_raise_error_on_bad_kwargs():
    with pytest.raises(TypeError):
        ds.tuples(stuff=u'things')
    (ds.text, {
        u'alphabet': ds.sampled_from(u'abc')
    }),
)
def test_produces_valid_examples_from_keyword(fn, kwargs):
    fn(**kwargs).example()


@fn_test((ds.one_of, (1, )))
def test_validates_args(fn, args):
    with pytest.raises(InvalidArgument):
        fn(*args)


@fn_test(
    (ds.one_of, (ds.booleans(), ds.tuples(ds.booleans()))),
    (ds.one_of, (ds.booleans(), )),
    (ds.text, ()),
    (ds.binary, ()),
    (ds.builds, (lambda x, y: x + y, ds.integers(), ds.integers())),
)
def test_produces_valid_examples_from_args(fn, args):
    fn(*args).example()


def test_tuples_raise_error_on_bad_kwargs():
    with pytest.raises(TypeError):
        ds.tuples(stuff=u'things')


def test_streaming_streams():
Пример #57
0

@settings(deadline=None)
@given(
    pos_dframe=data_frames(
        index=range_indexes(min_size=5),
        columns=columns(["X1", "y1", "X2", "y2"], dtype=float),
        rows=st.tuples(
            st.floats(min_value=1,
                      max_value=10,
                      allow_nan=False,
                      allow_infinity=False),
            st.floats(min_value=1,
                      max_value=10,
                      allow_nan=False,
                      allow_infinity=False),
            st.floats(min_value=1,
                      max_value=10,
                      allow_nan=False,
                      allow_infinity=False),
            st.floats(min_value=1,
                      max_value=10,
                      allow_nan=False,
                      allow_infinity=False),
        ),
    ),
    tol=st.floats(min_value=0.01, max_value=4.98),
)
def test_close_single_contact(pos_dframe, tol):

    idx = pd.MultiIndex.from_product(
        [["bpart1", "bpart2"], ["X", "y"]],
Пример #58
0
def test_can_create_arrays_of_tuples():
    arr = minimal(
        nps.arrays(object, 10, st.tuples(st.integers(), st.integers())),
        lambda x: all(t0 != t1 for t0, t1 in x),
    )
    assert all(a in ((1, 0), (0, 1)) for a in arr)
Пример #59
0
def graph_builder(draw,
                  node_data=st.fixed_dictionaries({}),
                  edge_data=st.fixed_dictionaries({}),
                  node_keys=None,
                  min_nodes=0,
                  max_nodes=25,
                  min_edges=0,
                  max_edges=None,
                  graph_type=nx.Graph,
                  self_loops=False,
                  connected=True):
    """
    A :mod:`hypothesis` strategy for building networkx graphs.

    Parameters
    ----------
    draw
        For internal hypothesis use.
    node_data: `hypothesis.SearchStrategy[dict]`
        The strategy to use to generate node attributes. Must generate a
        mapping.
    edge_data: `hypothesis.SearchStrategy[dict]`
        The strategy to use to generate edge attributes. Must generate a
        mapping.
    node_keys: `hypothesis.SearchStrategy[collections.abc.Hashable]` or None
        The strategy to use to generate node keys. Must generate a Hashable. If
        `None`, node keys will be taken from range(0, number_of_nodes).
    min_nodes: int
        The minimum number of nodes that should be in the generated graph. Must
        be positive.
    max_nodes: int or None
        The maximum number of nodes that should be in the generated graph. Must
        be larger than `min_nodes`. `None` means no upper limit.
    min_edges: int
        The minimum number of edges that should be in the generated graph. Less
        edges may be added if the produced graph contains too few nodes.
    max_edges: int or None
        The maximum number of edges that should be in the generated graph.
        `None` means no upper limit. Note that if `connected` is True more edges
        may be added.
    graph_type: class
        The type of graph that should be created.
    self_loops: bool
        Whether self loops (edges between a node and itself) are allowed.
    connected: bool
        If `True`, the generated graph is guaranteed to be a single (weakly)
        connected component.

    Raises
    ------
    ValueError
        - If `min_nodes` < 0.
        - If `max_nodes` < `min_nodes`
        - If the graph has to be connected, but `max_edges` is too small
          relative to `max_nodes`.

    Returns
    -------
    networkx.Graph
        The created graph. The actual type is determined by the argument
        `graph_type`.
    """
    if min_nodes < 0:
        raise ValueError('min_nodes can not be negative')
    if max_nodes is not None and min_nodes > max_nodes:
        raise ValueError('min_nodes must be less than or equal to max_nodes')
    if max_nodes is not None and max_edges is not None and connected and max_edges < max_nodes - 1:
        raise ValueError("It's impossible to create a connected graph of {}"
                         "nodes with less than {} edges".format(
                             max_nodes, max_nodes - 1))

    graph = graph_type()
    is_multigraph = graph.is_multigraph()
    is_directed = graph.is_directed()

    # Draw node indices and their associated data
    node_datas = draw(
        st.lists(node_data, min_size=min_nodes, max_size=max_nodes))

    if not node_datas:
        return graph

    graph.add_nodes_from(enumerate(node_datas))

    # Draw a set of initial edges that guarantee that graph will be connected.
    # We use the invariant that all nodes < n_idx are connected. We create an
    # edge between n_idx and one of those before so that all nodes < n_idx + 1
    # are now connected.
    if connected:
        # Shrink towards high index, so shrink to the path graph. Otherwise
        # it'll shrink to the star graph.
        initial_edges = [
            draw(
                st.tuples(
                    st.integers(-(n_idx - 1), 0).map(lambda x: -x),
                    st.just(n_idx))) for n_idx in range(1, len(graph))
        ]
        graph.add_edges_from(initial_edges)

    # Now for the mess. The maximum number of edges possible depends on the
    # graph type.
    if not is_multigraph:
        # Multi(Di)Graphs can make an infinite number of edges. For everything
        # else we clamp the range to (0, max_possible_edges)
        max_possible_edges = len(graph) * (len(graph) - 1)
        if is_directed:
            max_possible_edges *= 2
        if self_loops:
            max_possible_edges += len(graph)
        if max_edges is None or max_edges > max_possible_edges:
            max_edges = max_possible_edges
    if max_edges is not None:
        # Correct for number of edges already made if graph is connected.
        # This may mean we added more edges than originally allowed.
        max_edges -= len(graph.edges)
        if max_edges < 0:
            max_edges = 0

    # Likewise for min_edges
    # We already added some edges, so subtract those.
    min_edges -= len(graph.edges)
    if min_edges < 0:
        min_edges = 0
    elif max_edges is not None and min_edges > max_edges:
        min_edges = max_edges

    def edge_filter(edge):
        """
        Helper function to decide whether the edge between idx and jdx can still
        be added to graph.
        """
        # <= because self loops
        idx, jdx = edge
        return ((not graph.has_edge(idx, jdx) or is_multigraph)
                and (idx <= jdx or is_directed) and (idx != jdx or self_loops))

    # We need to sample a number of items from options, these items are
    # possibly not unique. In addition, we need to draw the same number of
    # items from edge_data and associate the two. To top it off, uniqueness
    # is defined by the content of the first element of the tuple.
    edges = st.lists(st.tuples(
        st.integers(min_value=0, max_value=len(graph) - 1),
        st.integers(min_value=0, max_value=len(graph) - 1),
    ).filter(edge_filter),
                     unique=not is_multigraph,
                     min_size=min_edges,
                     max_size=max_edges)
    graph.add_edges_from(draw(edges))

    edge_datas = draw(
        st.lists(edge_data,
                 min_size=len(graph.edges),
                 max_size=len(graph.edges)))
    for edge, data in zip(graph.edges, edge_datas):
        graph.edges[edge].update(data)

    if node_keys is not None:
        new_idxs = draw(
            st.sets(node_keys, min_size=len(graph), max_size=len(graph)))
        graph = nx.relabel_nodes(graph, dict(zip(list(graph), list(new_idxs))))

    return graph
Пример #60
0
    rbox1 = InputArgRef()
    rbox2 = InputArgRef()
    rbox3 = InputArgRef()
    after_optimizer = FakeOptimizer(cpu=FakeCPU({rbox1: cls}))
    deserialize_optimizer_knowledge(after_optimizer,
                                    FakeStorage(numb_state.create_numbering()),
                                    [InputArgInt(), rbox2, rbox1, rbox3],
                                    liveboxes)
    assert box1 in after_optimizer.constant_classes
    assert box2 not in after_optimizer.constant_classes
    assert box3 not in after_optimizer.constant_classes


box_strategy = strategies.builds(InputArgInt) | strategies.builds(InputArgRef)
tuples = strategies.tuples(box_strategy, strategies.booleans()).filter(
    lambda (box, known_class): isinstance(box, InputArgRef) or not known_class)
boxes_known_classes = strategies.lists(tuples, min_size=1)


@given(boxes_known_classes)
def test_random_class_knowledge(boxes_known_classes):
    cls = FakeClass()
    dct1 = {
        box: InstancePtrInfo(known_class=cls)
        for box, known_class in boxes_known_classes if known_class
    }
    optimizer = FakeOptimizer(dct1)

    refboxes = [
        box for (box, _) in boxes_known_classes