def applicatives(substrat):
    return one_of(
        strat.lists(substrat),
        strat.lists(substrat).map(tuple),
        substrat.map(lenses.identity.Identity),
        apply_strat(maybes(), substrat),
    )
Example #2
0
def _tensor_splits(draw):
    lengths = draw(st.lists(st.integers(1, 5), min_size=1, max_size=10))
    batch_size = draw(st.integers(1, 5))
    element_pairs = [
        (batch, r) for batch in range(batch_size) for r in range(len(lengths))
    ]
    perm = draw(st.permutations(element_pairs))
    perm = perm[:-1]  # skip one range
    ranges = [[(0, 0)] * len(lengths) for _ in range(batch_size)]
    offset = 0
    for pair in perm:
        ranges[pair[0]][pair[1]] = (offset, lengths[pair[1]])
        offset += lengths[pair[1]]

    data = draw(st.lists(
        st.floats(min_value=-1.0, max_value=1.0),
        min_size=offset,
        max_size=offset
    ))

    key = draw(st.permutations(range(offset)))

    return (
        np.array(data).astype(np.float32), np.array(ranges),
        np.array(lengths), np.array(key).astype(np.int64)
    )
Example #3
0
def extended_textual_header(draw, count=-1, end_text_stanza_probability=None):
    if count == -1:
        if end_text_stanza_probability is not None:
            raise ValueError("end_text_stanza_probability {} does not make sense when count is not {}"
                             .format(end_text_stanza_probability, count))
        count = draw(integers(min_value=0, max_value=10))
        headers = draw(lists(stanza(),
                             min_size=count,
                             max_size=count))
        headers.append(END_TEXT_STANZA)
        return headers

    if count == 0:
        return []

    # For counted headers, the end-text stanza is optional. We generate it
    # with the specified probability
    if end_text_stanza_probability is None:
        end_text_stanza_probability = 0.5

    random = draw(randoms())
    x = random.uniform(0.0, 1.0)
    num_data_stanzas = count - 1 if x <= end_text_stanza_probability else count

    headers = draw(lists(stanza(),
                   min_size=num_data_stanzas,
                   max_size=num_data_stanzas))

    if num_data_stanzas == count - 1:
        headers.append(END_TEXT_STANZA)

    assert len(headers) == count

    return headers
def array_values(draw):
    matching = draw(st.lists(elements=st.binary(min_size=2, max_size=2), min_size=1))
    non_matching = draw(
        st.lists(
            elements=st.binary(min_size=2, max_size=2),
            min_size=1).filter(lambda x: x != matching))
    return (matching, non_matching)
Example #5
0
def _gen_test_add_padding(with_pad_data=True,
                          is_remove=False):
    def gen_with_size(args):
        lengths, inner_shape = args
        data_dim = [sum(lengths)] + inner_shape
        lengths = np.array(lengths, dtype=np.int32)
        if with_pad_data:
            return st.tuples(
                st.just(lengths),
                hu.arrays(data_dim),
                hu.arrays(inner_shape),
                hu.arrays(inner_shape))
        else:
            return st.tuples(st.just(lengths), hu.arrays(data_dim))

    min_len = 4 if is_remove else 0
    lengths = st.lists(
        st.integers(min_value=min_len, max_value=10),
        min_size=0,
        max_size=5)
    inner_shape = st.lists(
        st.integers(min_value=1, max_value=3),
        min_size=0,
        max_size=2)
    return st.tuples(lengths, inner_shape).flatmap(gen_with_size)
Example #6
0
def dataset(draw, num_dims, trace_headers=None):
    dims = draw(lists(integers(min_value=0, max_value=10),
                      min_size=num_dims,
                      max_size=num_dims))
    text_header = draw(textual_reel_header())
    binary_header = draw(header(BinaryReelHeader))
    ext_text_headers = draw(extended_textual_header(binary_header.num_extended_textual_headers))

    # TODO: We need to look at making sure `sample_interval` is set
    # appropriately/consistently. See the docstring for
    # TraceHeader.sample_interval for more information.
    #
    # TODO: We need to generate datasets with both trace header revision types.
    if trace_headers is None:
        if num_dims == 1:
            # one-dimension datasets have only one trace
            trace_headers = [draw(header(TraceHeaderRev1))]
        else:
            trace_headers = draw(lists(header(TraceHeaderRev1),
                                       min_size=2,
                                       max_size=100))

    return InMemoryDataset(dims,
                           text_header,
                           binary_header,
                           ext_text_headers,
                           trace_headers)
Example #7
0
def _dense_features_map(draw, num_records, **kwargs):
    float_lengths = draw(
        st.lists(
            st.integers(min_value=1, max_value=10),
            min_size=num_records,
            max_size=num_records
        )
    )

    total_length = sum(float_lengths)

    float_keys = draw(
        st.lists(
            st.integers(min_value=1, max_value=100),
            min_size=total_length,
            max_size=total_length,
            unique=True
        )
    )

    float_values = draw(
        st.lists(st.floats(),
                 min_size=total_length,
                 max_size=total_length)
    )

    return [float_lengths, float_keys, float_values]
Example #8
0
 def define_list_strategy(specifier, settings):
     if not specifier:
         return st.lists(max_size=0)
     else:
         with settings:
             return st.lists(
                 st.one_of(*[self(s, settings) for s in specifier]))
Example #9
0
    def _vector_simple_float(self, func, type, data):
        func = always_inline(func)

        size = rffi.sizeof(rffi.DOUBLE)
        myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
        def f(bytecount, va, vb, vc):
            i = 0
            while i < bytecount:
                myjitdriver.jit_merge_point()
                a = raw_storage_getitem(type,va,i)
                b = raw_storage_getitem(type,vb,i)
                c = func(a,b)
                raw_storage_setitem(vc, i, rffi.cast(type,c))
                i += size

        la = data.draw(st.lists(st.floats(), min_size=10, max_size=150))
        l = len(la)
        lb = data.draw(st.lists(st.floats(), min_size=l, max_size=l))

        rawstorage = RawStorage()
        va = rawstorage.new(la, type)
        vb = rawstorage.new(lb, type)
        vc = rawstorage.new(None, type, size=l)
        self.meta_interp(f, [l*size, va, vb, vc], vec=True)

        for i in range(l):
            c = raw_storage_getitem(type,vc,i*size)
            r = rffi.cast(type, func(la[i], lb[i]))
            assert isclose(r, c)

        rawstorage.clear()
Example #10
0
def tensors(n, min_dim=1, max_dim=4, dtype=np.float32, elements=None, **kwargs):
    dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
    return dims_.flatmap(
        lambda dims: st.lists(
            arrays(dims, dtype, elements),
            min_size=n,
            max_size=n))
Example #11
0
def _inputs(draw):
    N = draw(st.integers(min_value=0, max_value=5))
    D = draw(st.integers(min_value=1, max_value=5))
    # N, D, data, lambda1, lambda2
    return (
        N,
        D,
        draw(st.lists(
            min_size=N * D,
            max_size=N * D,
            elements=st.one_of(
                st.floats(min_value=-10, max_value=1 - TOLERANCE),
                st.floats(min_value=1 + TOLERANCE, max_value=10))
        )),
        draw(st.lists(
            elements=st.one_of(
                st.floats(min_value=-2, max_value=-TOLERANCE),
                st.floats(min_value=TOLERANCE, max_value=2)),
            min_size=D,
            max_size=D,
        )),
        draw(st.lists(
            elements=st.floats(min_value=-2, max_value=2),
            min_size=D,
            max_size=D,
        )),
    )
Example #12
0
def _lists_pairs_with_duplication(draw):
    # pylint: disable=too-many-locals
    n = draw(st.integers(min_value=3, max_value=LISTS_MAX_SIZE))
    indexes = st.integers(min_value=0, max_value=n - 1)
    keys = draw(st.lists(IMMUTABLES, min_size=n, max_size=n))
    vals = draw(st.lists(IMMUTABLES, min_size=n, max_size=n))
    fwd = OrderedDict(izip(keys, vals))
    inv = OrderedDict(izip(vals, keys))
    which_to_dup = draw(RAND).choice((1, 2, 3))
    should_dup_key = which_to_dup in (1, 3)
    should_dup_val = which_to_dup in (2, 3)
    should_add_dup_key = should_dup_key and len(fwd) < n
    should_add_dup_val = should_dup_val and len(inv) < n
    if not should_add_dup_key and not should_add_dup_val:
        return list(izip(keys, vals))
    if should_add_dup_key:
        dup_key_idx = draw(indexes)
        added_key = keys[dup_key_idx]
    else:
        added_key = draw(IMMUTABLES)
        assume(added_key not in fwd)
    if should_add_dup_val:
        dup_val_idx = draw(indexes)
        if should_add_dup_key:
            assume(dup_val_idx != dup_key_idx)
        added_val = vals[dup_val_idx]
    else:
        added_val = draw(IMMUTABLES)
        assume(added_val not in inv)
    insert_idx = draw(indexes)
    keys.insert(insert_idx, added_key)
    vals.insert(insert_idx, added_val)
    return list(izip(keys, vals))
Example #13
0
def _create_hyp_nested_strategy(simple_class_strategy):
    """
    Create a recursive attrs class.

    Given a strategy for building (simpler) classes, create and return
    a strategy for building classes that have the simpler class as an
    attribute.
    """
    # Use a tuple strategy to combine simple attributes and an attr class.
    def just_class(tup):
        combined_attrs = list(tup[0])
        combined_attrs.append(attr.ib(default=attr.Factory(tup[1])))
        return _create_hyp_class(combined_attrs)

    def list_of_class(tup):
        default = attr.Factory(lambda: [tup[1]()])
        combined_attrs = list(tup[0])
        combined_attrs.append(attr.ib(default=default))
        return _create_hyp_class(combined_attrs)

    def dict_of_class(tup):
        default = attr.Factory(lambda: {"cls": tup[1]()})
        combined_attrs = list(tup[0])
        combined_attrs.append(attr.ib(default=default))
        return _create_hyp_class(combined_attrs)

    return st.one_of(st.tuples(st.lists(simple_attrs), simple_class_strategy)
                     .map(just_class),
                     st.tuples(st.lists(simple_attrs), simple_class_strategy)
                     .map(list_of_class))
Example #14
0
def url(schemes=[], userpass=False, port=False, url=False, query=False,
        fragment=False):

    if schemes:
        scheme = st.just(random.choice(schemes))
    else:
        scheme = st.text(alphabet=ascii_lowercase+digits, min_size=2)

    d = {'scheme': scheme,
         'domain': st.lists(
             st.text(
                 alphabet=ascii_lowercase + digits,
                 min_size=1,
                 max_size=63),
             min_size=1,
             max_size=3),
         'tld': st.text(alphabet=ascii_lowercase, min_size=2, max_size=63)}

    if userpass:
        d['user'] = st.text(alphabet=ascii_lowercase + digits)
        d['passwd'] = st.text(alphabet=ascii_lowercase + digits)
    if port:
        d['port'] = st.integers(min_value=0, max_value=65535)
    if url:
        d['url'] = st.lists(st.text())
    if query:
        d['query'] = st.lists(st.tuples(
            st.text(alphabet=ascii_lowercase, min_size=1),
            st.text(alphabet=ascii_lowercase + digits, min_size=1)))
    if fragment:
        d['fragment'] = st.text()

    urlst = strategy(st.fixed_dictionaries(d))

    return urlst.map(to_url).filter(max_len)
Example #15
0
def samples_strat(draw):
  Sample = namedtuple('Sample', ['subject', 'id'])
  sample_strat = st.tuples(st.text(min_size=1), st.integers()).map(lambda x: Sample(*x))
  old_samples = draw(st.lists(sample_strat, min_size=1, max_size=10, unique_by=lambda x: x.id))
  new_samples = draw(st.lists(sample_strat, min_size=1, max_size=10, unique_by=lambda x: x.id))
  assume(len(set([s.id for s in (old_samples + new_samples)])) == len(old_samples + new_samples))
  assume(len(set([s.subject for s in (old_samples + new_samples)])) == len(old_samples + new_samples))
  return old_samples, new_samples
def test_minimize_list_of_longish_lists():
    xs = find(
        lists(lists(booleans())),
        lambda x: len([t for t in x if any(t) and len(t) >= 3]) >= 10)
    assert len(xs) == 10
    for x in xs:
        assert len(x) == 3
        assert len([t for t in x if t]) == 1
Example #17
0
def test_drawing_many_near_boundary():
    ls = find(
        st.lists(st.recursive(
            st.booleans(),
            lambda x: st.lists(x, min_size=8, max_size=10).map(tuple),
            max_leaves=9)),
        lambda x: len(set(x)) >= 5)
    assert len(ls) == 5
def write_pattern(draw, min_size=0):
    keys = draw(st.lists(st.integers(0, 1000), unique=True, min_size=1))
    values = draw(st.lists(st.integers(), unique=True, min_size=1))
    return draw(
        st.lists(
            st.tuples(st.sampled_from(keys), st.sampled_from(values)), min_size=min_size
        )
    )
def test_resampling():
    x = minimal(
        st.lists(st.integers(), min_size=1).flatmap(
            lambda x: st.lists(st.sampled_from(x))
        ),
        lambda x: len(x) >= 10 and len(set(x)) == 1,
    )
    assert x == [0] * 10
def test_can_find_sets_unique_by_incomplete_data():
    ls = find(
        lists(lists(integers(min_value=0), min_size=2), unique_by=max),
        lambda x: len(x) >= 10
    )
    assert len(ls) == 10
    assert sorted(list(map(max, ls))) == list(range(10))
    for v in ls:
        assert 0 in v
def test_lists_of_incompatible_sizes_are_checked():
    s10 = lists(booleans(), min_size=10)
    s2 = lists(booleans(), max_size=9)

    x10 = s10.to_basic(some_template(s10))
    x2 = s2.to_basic(some_template(s2))
    with pytest.raises(BadData):
        s2.from_basic(x10)
    with pytest.raises(BadData):
        s10.from_basic(x2)
def test_drawing_many_near_boundary():
    ls = find(
        st.lists(st.recursive(
            st.booleans(),
            lambda x: st.lists(x, min_size=8, max_size=10).map(tuple),
            max_leaves=9)),
        lambda x: len(set(x)) >= 5,
        settings=settings(max_examples=10000, database=None, max_shrinks=2000)
    )
    assert len(ls) == 5
Example #23
0
def test_diff_values_array(data):
    a = data.draw(st.lists(elements=st.integers(min_value=0), min_size=1))
    b = data.draw(st.lists(elements=st.integers(max_value=-1), min_size=1))
    c = data.draw(st.lists(elements=st.floats(min_value=1e8), min_size=1))
    d = data.draw(st.lists(elements=st.floats(max_value=-1e8), min_size=1))
    # TODO: Figure out a way to include 0 in lists (arrays)

    assert are_values_different(a, b)
    assert are_values_different(c, d)
    assert not are_values_different(a, a)
Example #24
0
def test_large_data_will_fail_a_health_check():
    @given(st.lists(
           st.lists(st.text(average_size=100), average_size=100),
           average_size=100))
    @settings(database=None, buffer_size=1000)
    def test(x):
        pass

    with raises(FailedHealthCheck) as e:
        test()
    assert 'allowable size' in e.value.args[0]
def create_dummy_rate_file(rate_file):
    rates = lists(floats(min_value=0.00001, allow_nan=False, allow_infinity=False), min_size=0, max_size=100).example()
    max_year = datetime.datetime.now().year
    date_times = lists(datetimes(min_year=2016, max_year=max_year), min_size=len(rates),
                       max_size=len(rates)).map(sorted).example()
    with open(rate_file, 'a') as f:
        for date_time, rate in zip(date_times, rates):
            writer = csv.writer(f, lineterminator='\n')
            market_data = [date_time.strftime("%Y-%m-%d %H:%M:%S"), rate]
            writer.writerow(market_data)
    return rates, date_times
Example #26
0
 def valid_phase_type_generator(draw, k):
     ptg = []
     for i in range(k):
         ptg.append(draw(lists(floats(min_value=0, max_value=1),
                                             min_size=k, max_size=k)))
     ptg_array = np.array(ptg)
     ptg_totals = np.array(draw(lists(floats(min_value = 0, max_value=1),
                                             min_size=k, max_size=k))).reshape(-1, 1)
     with np.errstate(invalid='ignore'):
         ptg_norm  = (np.nan_to_num(ptg_array / ptg_array.sum(axis=1).reshape(-1,1))) \
                             * ptg_totals
     return ptg_norm
Example #27
0
def _one_hots():
    index_size = st.integers(min_value=1, max_value=5)
    lengths = st.lists(
        elements=st.integers(min_value=0, max_value=5))
    return st.tuples(index_size, lengths).flatmap(
        lambda x: st.tuples(
            st.just(x[0]),
            st.just(x[1]),
            st.lists(
                elements=st.integers(min_value=0, max_value=x[0] - 1),
                min_size=sum(x[1]),
                max_size=sum(x[1]))))
Example #28
0
def test_can_track_morphers():
    t = Tracker()
    assert t.track(Morpher(0, 0)) == 1
    assert t.track(Morpher(0, 0)) == 2

    m1 = Morpher(0, 1)
    m2 = Morpher(0, 1)

    m1.become(s.lists(s.integers()))
    m2.become(s.lists(s.integers()))

    assert t.track(m1) == 1
    assert t.track(m2) == 2
def monoids():
    base = many_one_of(
        strat.integers(),
        strat.lists(strat.integers()),
        strat.lists(strat.integers()).map(tuple),
        strat.text(),
        strat.integers().map(MonoidProduct),
        strat.dictionaries(strat.integers(), strat.integers()),
    )

    def recurse(substrat):
        return stream_apply_strat(maybes(), substrat)

    return strat.recursive(base, recurse)
def test_drawing_many_near_boundary():
    target = 4

    ls = minimal(
        st.lists(st.recursive(
            st.booleans(),
            lambda x: st.lists(
                x, min_size=2 * (target - 1), max_size=2 * target
            ).map(tuple),
            max_leaves=2 * target - 1)),
        lambda x: len(set(x)) >= target,
        timeout_after=None
    )
    assert len(ls) == target
Example #31
0
import warnings

import numpy as np

import hypothesis.extra.numpy as nps
import hypothesis.strategies as st
from hypothesis import given

int16s = nps.from_dtype(np.dtype("int16"))


bounded_lists = st.lists(int16s, max_size=1).filter(lambda x: sum(x) < 256)


problems = st.tuples(
    bounded_lists, bounded_lists, bounded_lists, bounded_lists, bounded_lists,
)


@given(problems)
def test(p):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        assert sum([x for sub in p for x in sub], np.int16(0)) < 5 * 256
Example #32
0
    with BinPickleFile(file, direct=True) as bpf:
        assert len(bpf.entries) == 1
        e = bpf.entries[0]
        assert e.dec_length == a.nbytes
        assert e.enc_length == a.nbytes
        b2 = bpf._read_buffer(e)
        assert b2.nbytes == e.dec_length
        a2 = np.frombuffer(b2, dtype='i4')
        assert len(a2) == len(a)
        assert all(a2 == a)
        del a2
        del b2


@settings(deadline=None)
@given(st.lists(st.binary()),
       st.one_of(RW_CODECS))
def test_write_encoded_arrays(arrays, codec):
    with TemporaryDirectory('.test', 'binpickle-') as path:
        file = Path(path) / 'data.bpk'

        with BinPickler.compressed(file, codec) as w:
            for a in arrays:
                w._write_buffer(a)
            w._finish_file()

        with BinPickleFile(file) as bpf:
            assert not bpf.find_errors()
            assert len(bpf.entries) == len(arrays)
            for e, a in zip(bpf.entries, arrays):
                try:
class TestUnit(TestCase):


    @given(st.lists(sfst.get_shape_2d(), min_size=1), sfst.get_labels(min_size=1)) # type: ignore
    def test_from_element_items(self,
            shapes: tp.List[tp.Tuple[int, int]],
            labels: tp.Sequence[tp.Hashable]
            ) -> None:

        # use shapes to get coordinates, where the max shape + 1 is the final shape
        shape = tuple(np.array(shapes).max(axis=0) + 1)

        def values() -> tp.Iterator[tp.Tuple[tp.Tuple[int, int], tp.Hashable]]:
            for idx, coord in enumerate(shapes):
                yield coord, labels[idx % len(labels)]

        post = TypeBlocks.from_element_items(values(), shape=shape, dtype=object)
        self.assertEqual(post.shape, shape)


    @given(st.integers(max_value=sfst.MAX_COLUMNS)) # type: ignore
    def test_from_zero_size_shape(self, value: int) -> None:

        for shape in ((0, value), (value, 0)):
            post = TypeBlocks.from_zero_size_shape(shape=shape)
            self.assertEqual(post.shape, shape)


    @given(sfst.get_type_blocks())  # type: ignore
    def test_basic_attributes(self, tb: TypeBlocks) -> None:
        self.assertEqual(len(tb.dtypes), tb.shape[1])
        self.assertEqual(len(tb.shapes), len(tb.mloc))
        self.assertEqual(tb.copy().shape, tb.shape)
        self.assertEqual(tb.ndim, 2)
        self.assertEqual(tb.unified, len(tb.mloc) <= 1)

        if tb.shape[0] > 0 and tb.shape[1] > 0:
            self.assertTrue(tb.size > 0)
            self.assertTrue(tb.nbytes > 0)
        else:
            self.assertTrue(tb.size == 0)
            self.assertTrue(tb.nbytes == 0)



    @given(sfst.get_type_blocks())  # type: ignore
    def test_values(self, tb: TypeBlocks) -> None:
        values = tb.values
        self.assertEqual(values.shape, tb.shape)
        self.assertEqual(values.dtype, tb._row_dtype)


    @given(sfst.get_type_blocks())  # type: ignore
    def test_axis_values(self, tb: TypeBlocks) -> None:
        # this test found a flaw in axis_values when dealing with axis 1 and unified,  1D type blocks
        for axis in (0, 1):
            for reverse in (True, False):
                post = tuple(tb.axis_values(axis=axis, reverse=reverse))
                for idx, array in enumerate(post):
                    self.assertTrue(len(array) == tb.shape[axis])
                    if axis == 0 and not reverse: # colums
                        self.assertTrue(array.dtype == tb.dtypes[idx])
                    elif axis == 0 and reverse: # colums
                        self.assertTrue(array.dtype == tb.dtypes[tb.shape[1] - 1 - idx])
                    else:
                        # NOTE: only checking kinde because found cases where byte-order deviates
                        self.assertTrue(array.dtype.kind == tb._row_dtype.kind)


    @given(sfst.get_type_blocks())  # type: ignore
    def test_element_items(self, tb: TypeBlocks) -> None:
        # NOTE: this found a flaw in _extract_iloc where we tried to optimize selection with a unified array
        count = 0
        for k, v in tb.element_items():
            count += 1
            v_extract = tb.iloc[k]
            self.assertEqualWithNaN(v, v_extract)
        self.assertEqual(count, tb.size)

    @given(sfst.get_type_blocks())  # type: ignore
    def test_reblock_signature(self, tb: TypeBlocks) -> None:
        post = tuple(tb._reblock_signature())
        unique_dtypes = np.unique(tb.dtypes)
        # the reblock signature must be have at least as many entries as types
        self.assertTrue(len(post) >= len(unique_dtypes))
        # sum of column widths is qual to columns in shape
        self.assertTrue(sum(p[1] for p in post), tb.shape[1])


    @given(sfst.get_type_blocks(), sfst.get_type_blocks())  # type: ignore
    def test_block_compatible(self, tb1: TypeBlocks, tb2: TypeBlocks) -> None:

        for axis in (None, 0, 1):
            post1 = tb1.block_compatible(tb2, axis)
            post2 = tb2.block_compatible(tb1, axis)
            # either direction gets the same result
            self.assertTrue(post1 == post2)
            # if the shapes are different, they cannot be block compatible
            if axis is None and tb1.shape != tb2.shape:
                self.assertFalse(post1)


    @given(sfst.get_type_blocks(), sfst.get_type_blocks())  # type: ignore
    def test_reblock_compatible(self, tb1: TypeBlocks, tb2: TypeBlocks) -> None:

        post1 = tb1.reblock_compatible(tb2)
        post2 = tb2.reblock_compatible(tb1)
        # either direction gets the same result
        self.assertTrue(post1 == post2)
        # if the shapes are different, they cannot be block compatible
        if tb1.shape[1] != tb2.shape[1]:
            self.assertFalse(post1)

    @unittest.skip('pending')
    def test_concatenate_blocks(self) -> None:
        pass

    @unittest.skip('pending')
    def test_consolidate_blocks(self) -> None:
        pass

    @unittest.skip('pending')
    def test_reblock(self) -> None:
        pass

    @unittest.skip('pending')
    def test_consolidate(self) -> None:
        pass

    @unittest.skip('pending')
    def test_resize_blocks(self) -> None:
        pass

    @unittest.skip('pending')
    def test_group(self) -> None:
        pass

    @unittest.skip('pending')
    def test_ufunc_axis_skipna(self) -> None:
        pass

    @unittest.skip('pending')
    def test_display(self) -> None:
        pass

    @unittest.skip('pending')
    def test_cols_to_slice(self) -> None:
        pass

    @unittest.skip('pending')
    def test_indices_to_contiguous_pairs(self) -> None:
        pass

    @unittest.skip('pending')
    def test_all_block_slices(self) -> None:
        pass

    @unittest.skip('pending')
    def test_key_to_block_slices(self) -> None:
        pass

    @unittest.skip('pending')
    def test_mask_blocks(self) -> None:
        pass

    @unittest.skip('pending')
    def test_astype_blocks(self) -> None:
        pass

    @unittest.skip('pending')
    def test_shift_blocks(self) -> None:
        pass

    @unittest.skip('pending')
    def test_assign_blocks_from_keys(self) -> None:
        pass

    @unittest.skip('pending')
    def test_assign_blocks_from_boolean_blocks(self) -> None:
        pass

    @unittest.skip('pending')
    def test_slice_blocks(self) -> None:
        pass

    @unittest.skip('pending')
    def test_extract_array(self) -> None:
        pass

    @unittest.skip('pending')
    def test_extract(self) -> None:
        pass

    @unittest.skip('pending')
    def test_extract_iloc(self) -> None:
        pass

    @unittest.skip('pending')
    def test_extract_iloc_mask(self) -> None:
        pass

    @unittest.skip('pending')
    def test_extract_iloc_assign(self) -> None:
        pass

    @given(sfst.get_type_blocks(min_rows=1, min_columns=1))  # type: ignore
    def test_drop(self, tb: TypeBlocks) -> None:

        for row in range(tb.shape[0]):
            tb_post1 = tb.drop(row)
            self.assertTrue(tb_post1.shape[0] == tb.shape[0] - 1)

        if tb.shape[0] > 2:
            for start in range(1, tb.shape[0]):
                tb_post2 = tb.drop(slice(start, None))
                self.assertTrue(tb_post2.shape[0] == start)

        for col in range(tb.shape[1]):
            tb_post3 = tb.drop((None, col))
            self.assertTrue(tb_post3.shape[1] == tb.shape[1] - 1)

        if tb.shape[1] > 2:
            for start in range(1, tb.shape[1]):
                tb_post4 = tb.drop((None, slice(start, None)))
                self.assertTrue(tb_post4.shape[1] == start)



    @unittest.skip('pending')
    def test_getitem(self) -> None:
        pass

    @unittest.skip('pending')
    def test_ufunc_unary_operator(self) -> None:
        pass

    @unittest.skip('pending')
    def test_block_shape_slices(self) -> None:
        pass

    @unittest.skip('pending')
    def test_ufunc_binary_operator(self) -> None:
        pass

    @unittest.skip('pending')
    def test_transpose(self) -> None:
        pass

    @unittest.skip('pending')
    def test_isna(self) -> None:
        pass

    @unittest.skip('pending')
    def test_notna(self) -> None:
        pass

    @unittest.skip('pending')
    def test_fillna_leading(self) -> None:
        pass

    @unittest.skip('pending')
    def test_fillna_trailing(self) -> None:
        pass

    @unittest.skip('pending')
    def test_fillna_forward(self) -> None:
        pass

    @unittest.skip('pending')
    def test_fillna_backward(self) -> None:
        pass

    @unittest.skip('pending')
    def test_dropna_to_keep_locations(self) -> None:
        pass

    @unittest.skip('pending')
    def test_fillna(self) -> None:
        pass


    @given(sfst.get_type_blocks_aligned_array())  # type: ignore
    def test_append(self, tb_aligned_array: tp.Tuple[TypeBlocks, np.ndarray]) -> None:
        tb, aa = tb_aligned_array
        shape_original = tb.shape
        tb.append(aa)
        if aa.ndim == 1:
            self.assertEqual(tb.shape[1], shape_original[1] + 1)
        else:
            self.assertEqual(tb.shape[1], shape_original[1] + aa.shape[1])

    @given(sfst.get_type_blocks_aligned_type_blocks(min_size=2, max_size=2))  # type: ignore
    def test_extend(self, tbs: tp.Sequence[TypeBlocks]) -> None:
        front = tbs[0]
        back = tbs[1]
        shape_original = front.shape
        # extend with type blocks
        front.extend(back)
        self.assertEqual(front.shape,
                (shape_original[0], shape_original[1] + back.shape[1]))

        # extend with iterable of arrays
        front.extend(back._blocks)
        self.assertEqual(front.shape,
                (shape_original[0], shape_original[1] + back.shape[1] * 2))
from hypothesis import given
from hypothesis.strategies import integers, lists


from les_iterables.selecting import reject_truthy


@given(items=lists(integers()))
def test_reject_truthy(items):
    result = list(reject_truthy(items))
    assert not any(result)
    def sample_program_config(self, draw):
        padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME",
                                                  "VALID"]))
        groups = draw(st.integers(min_value=1, max_value=3))
        data_format = draw(st.sampled_from(["NCHW", "NHWC"]))
        axis = draw(st.sampled_from([1]))
        filter_channel = draw(st.integers(min_value=1, max_value=16)) * 4
        filter_size = draw(st.integers(min_value=1, max_value=4))
        in_channel = groups * filter_channel
        out_channel_factor = draw(st.integers(min_value=1, max_value=16)) * 4
        out_channel = groups * out_channel_factor
        batch_size = draw(st.integers(min_value=1, max_value=4))
        dilations = draw(
            st.lists(st.integers(min_value=1, max_value=2),
                     min_size=2,
                     max_size=2))
        paddings = draw(
            st.lists(st.integers(min_value=0, max_value=2),
                     min_size=2,
                     max_size=2))
        strides = draw(
            st.lists(st.integers(min_value=1, max_value=2),
                     min_size=2,
                     max_size=2))
        has_bias = draw(st.booleans())
        use_mkldnn = draw(st.booleans())
        epsilon = draw(st.floats(min_value=0.0, max_value=0.001))

        x_shape = [
            batch_size, in_channel, 64, 64
        ] if data_format == "NCHW" else [batch_size, 64, 64, in_channel]
        w_shape = [out_channel, filter_channel, filter_size, filter_size]
        scale_shape = [out_channel]
        bias_shape = [out_channel]
        var_shape = [out_channel]
        mean_shape = [out_channel]

        def generate_conv2d_Input():
            return np.random.random(x_shape).astype(np.float32)

        def generate_conv2d_Filter():
            return np.random.random(w_shape).astype(np.float32)

        def generate_conv2d_Bias():
            return np.random.random(bias_shape).astype(np.float32)

        def generate_bn_Scale():
            return np.random.random(scale_shape).astype(np.float32)

        def generate_bn_Bias():
            return np.random.random(bias_shape).astype(np.float32)

        def generate_bn_Mean():
            return np.random.random(mean_shape).astype(np.float32)

        def generate_bn_Var():
            return np.random.random(var_shape).astype(np.float32)

        conv2d_op = OpConfig("conv2d",
                             inputs={
                                 "Input": ["conv2d_input"],
                                 "Filter": ["conv2d_weight"],
                             },
                             outputs={"Output": ["conv2d_out"]},
                             data_format=data_format,
                             dilations=dilations,
                             padding_algorithm=padding_algorithm,
                             groups=groups,
                             paddings=paddings,
                             strides=strides,
                             use_mkldnn=use_mkldnn,
                             has_bias=has_bias,
                             is_test=True)
        bn_op = OpConfig("batch_norm",
                         inputs={
                             "X": ["conv2d_out"],
                             "Scale": ["batch_norm_Scale"],
                             "Bias": ["batch_norm_Bias"],
                             "Mean": ["batch_norm_Mean"],
                             "Variance": ["batch_norm_Variance"],
                         },
                         outputs={
                             "Y": ["batch_norm_Y"],
                             "MeanOut": ["batch_norm_Mean"],
                             "VarianceOut": ["batch_norm_Variance"],
                             "SavedMean": ["batch_norm_SavedMean"],
                             "SavedVariance": ["batch_norm_SavedVariance"],
                             "ReserveSpace": ["batch_norm_ReserveSpace"],
                         },
                         epsilon=epsilon,
                         trainable_statistics=False,
                         data_layout=data_format,
                         is_test=True)
        if has_bias == True:
            conv2d_op.inputs["Bias"] = ["conv2d_bias"]
        ops = [conv2d_op, bn_op]

        program_config = ProgramConfig(
            ops=ops,
            inputs={
                "conv2d_input":
                TensorConfig(data_gen=partial(generate_conv2d_Input)),
            },
            weights={
                "conv2d_weight":
                TensorConfig(data_gen=partial(generate_conv2d_Filter)),
                "batch_norm_Scale":
                TensorConfig(data_gen=generate_bn_Scale),
                "batch_norm_Bias":
                TensorConfig(data_gen=generate_bn_Bias),
                "batch_norm_Mean":
                TensorConfig(data_gen=generate_bn_Mean),
                "batch_norm_Variance":
                TensorConfig(data_gen=generate_bn_Var),
            },
            outputs=["batch_norm_Y"])
        if has_bias == True:
            program_config.weights["conv2d_bias"] = TensorConfig(
                data_gen=partial(generate_conv2d_Bias))
        return program_config
Example #36
0
    pg_simple.PgSimple._connect = lambda x: 0
    return pg_simple.PgSimple(pool=None)


@given(where=s.sampled_from([
    ('', []),
    ('condition = %s', ["77"]),
    ('condition = %s and condition2 = %s', ["77", "88"]),
    ('condition = %s and condition2 like %s', ["77", "88"]),
]),
       order=s.one_of(
           s.tuples(s.sampled_from(IDENTIFIER_CHARS),
                    s.sampled_from(pg_simple.Order)), s.none()),
       limit=s.one_of(s.integers(), s.none()),
       offset=s.one_of(s.integers(), s.none()),
       fields=s.lists(s.sampled_from(IDENTIFIER_CHARS), min_size=1))
def test_select(db, fields, where, order, limit, offset):
    sql = db._select_sql('my_table', fields, where[0], order, limit,
                         offset).lower().strip()

    tokens = sqlparse.parse(sql)[0].flatten()
    valid_tokens = [t for t in tokens if not t.ttype in INVALID_TOKENS]

    assert valid_tokens.pop(0).value == 'select'
    for f in fields:
        assert valid_tokens.pop(0).value == f.lower()

    assert valid_tokens.pop(0).value == 'my_table'

    if where:
        condition, values = where
Example #37
0
def paths(draw):  # pragma: no cover
    # type: (DrawCallable) -> Sequence[Text]
    return draw(
        lists(text(min_size=1, alphabet=path_characters()), max_size=10))
Example #38
0
 def _handle_state(self, state):
     opcode, value = state
     if opcode == re.sre_parse.LITERAL:
         return strats.just(hunichr(value))
     elif opcode == re.sre_parse.NOT_LITERAL:
         return strats.characters(blacklist_characters=hunichr(value))
     elif opcode == re.sre_parse.AT:
         return strats.just('')
     elif opcode == re.sre_parse.IN:
         if value[0][0] == re.sre_parse.NEGATE:
             candidates = []
             for v in value[1:]:
                 candidates.extend(chain(*(self._handle_character_sets(v))))
             return strats.characters(blacklist_characters=candidates)
         else:
             candidates = []
             for v in value:
                 candidates.extend(chain(*(self._handle_character_sets(v))))
             return strats.sampled_from(candidates)
     elif opcode == re.sre_parse.ANY:
         return strats.characters()
     elif opcode == re.sre_parse.BRANCH:
         branches = []
         for val in value[1]:
             branch = [self._handle_state(v) for v in val]
             branches.append(strategy_concat(branch))
         return strats.one_of(branches)
     elif opcode == re.sre_parse.SUBPATTERN:
         parts = []
         for part in value[1]:
             parts.append(self._handle_state(part))
         result = strategy_concat(parts)
         if value[0]:
             self.cache[value[0]] = result
             result = strats.shared(result, key=value[0])
         return result
     elif opcode == re.sre_parse.ASSERT:
         result = []
         for part in value[1]:
             result.append(self._handle_state(part))
         return strategy_concat(result)
     elif opcode == re.sre_parse.ASSERT_NOT:
         return strats.just('')
     elif opcode == re.sre_parse.GROUPREF:
         return strats.shared(self.cache[value], key=value)
     elif opcode == re.sre_parse.MIN_REPEAT:
         start_range, end_range, val = value
         result = []
         for v in val:
             part = strats.lists(
                 self._handle_state(v),
                 min_size=start_range,
                 max_size=end_range).map(lambda x: u"".join(x))
             result.append(part)
         return strategy_concat(result)
     elif opcode == re.sre_parse.MAX_REPEAT:
         start_range, end_range, val = value
         result = []
         for v in val:
             part = strats.lists(
                 self._handle_state(v),
                 min_size=start_range,
                 max_size=end_range).map(lambda x: u"".join(x))
             result.append(part)
         return strats.tuples(*result).map(lambda x: u"".join(x))
     else:
         import ipdb
         ipdb.set_trace()
         raise NotImplementedError(opcode)
Example #39
0
def json_pointers() -> st.SearchStrategy[str]:
    """Return a strategy for strings in json-pointer format."""
    return st.lists(
        st.text(st.characters()).map(
            lambda p: "/" + p.replace("~", "~0").replace("/", "~1"))).map(
                "".join)
test_can_produce_negative_infinity = define_test(floats(),
                                                 lambda x: x == float(u"-inf"))

test_can_produce_nan = define_test(floats(), math.isnan)

test_can_produce_floats_near_left = define_test(floats(0, 1),
                                                lambda t: t < 0.2)

test_can_produce_floats_near_right = define_test(floats(0, 1),
                                                 lambda t: t > 0.8)

test_can_produce_floats_in_middle = define_test(floats(0, 1),
                                                lambda t: 0.2 <= t <= 0.8)

test_can_produce_long_lists = define_test(lists(integers()), long_list, p=0.3)

test_can_produce_short_lists = define_test(lists(integers()),
                                           lambda x: len(x) <= 10)

test_can_produce_the_same_int_twice = define_test(
    lists(integers()), lambda t: len(set(t)) < len(t))


def distorted_value(x):
    c = collections.Counter(x)
    return min(c.values()) * 3 <= max(c.values())


def distorted(x):
    return distorted_value(map(type, x))
Example #41
0
def il_input_files(min_size=0):
    return lists(
        sampled_from([target['name'] for target in IL_INPUT_FILES.values()]),
        min_size=min_size,
        unique=True,
    )
Example #42
0
if sys.version_info < (2, 7):  # pragma: no cover
    import mock
    given = mock.MagicMock()  # noqa
    example = mock.MagicMock()  # noqa
    st = mock.MagicMock()  # noqa

_test_base = os.path.dirname(
    os.path.abspath(__file__)
).encode("UTF-8")

yml_st = st.recursive(
    st.floats(-1, 1) | st.booleans() |
    st.text() | st.none() | st.binary(),
    lambda children: st.lists(
        children, average_size=5, max_size=10
    ) | st.dictionaries(
        st.text(),
        children,
        average_size=5,
        max_size=10
    ),
    max_leaves=30
)


class TestTest(unittest.TestCase):
    def test_read_yml(self):
        """Test if reading yml files works without errors."""
        path = os.path.join(
            _test_base,
Example #43
0

class Foo(object):
    pass


foos = st.tuples().map(lambda _: Foo())


def test_can_create_arrays_of_composite_types():
    arr = minimal(nps.arrays(object, 100, foos))
    for x in arr:
        assert isinstance(x, Foo)


@given(st.lists(st.integers()), st.data())
def test_can_create_zero_dim_arrays_of_lists(x, data):
    arr = data.draw(nps.arrays(object, (), elements=st.just(x)))
    assert arr.shape == ()
    assert arr.dtype == np.dtype(object)
    assert arr.item() == x


def test_can_create_arrays_of_tuples():
    arr = minimal(
        nps.arrays(object, 10, st.tuples(st.integers(), st.integers())),
        lambda x: all(t0 != t1 for t0, t1 in x),
    )
    assert all(a in ((1, 0), (0, 1)) for a in arr)

    def sample_program_config(self, draw):
        # 1. Generate shape of input:X of conv2d
        x_shape = draw(
            st.lists(
                st.integers(
                    min_value=5, max_value=100), min_size=4, max_size=4))
        x_shape[1] = draw(st.integers(min_value=5, max_value=10))

        # 2. Generate legal attr:data_format of conv2d
        data_format = draw(st.sampled_from(["NCHW", "NHWC"]))

        # 3. Generate legal shape of input:Y of conv2d
        f_shape = draw(
            st.lists(
                st.integers(
                    min_value=1, max_value=4), min_size=4, max_size=4))
        if data_format == "NCHW":
            f_shape[1] = x_shape[1]
        else:
            f_shape[1] = x_shape[3]

        # 4. Generate legal attr:strides of conv2d
        strides = draw(
            st.lists(
                st.integers(
                    min_value=1, max_value=4), min_size=2, max_size=2))

        # 5. Generate legal attr:padding_algorithm of conv2d
        padding_algorithm = draw(st.sampled_from(["EXPLICIT", "SAME", "VALID"]))

        # 6. Generate legal attr:padding of conv2d
        padding = draw(
            st.lists(
                st.integers(
                    min_value=1, max_value=4), min_size=4, max_size=4))

        # 7. Generate legal attr:groups of conv2d
        groups = draw(st.integers(min_value=1, max_value=3))

        # 8. Generate legal attr:dilations of conv2d
        dilations = draw(
            st.lists(
                st.integers(
                    min_value=1, max_value=4), min_size=2, max_size=2))

        # 9. Generate legal shape of input:bias of elementwise_add
        bias_shape = [f_shape[0]]

        # 10. Generate legal shape of attr:axis of elementwise_add
        axis = 1
        if data_format == "NCHW":
            axis = 1
        else:
            axis = 3

        # 11. Generate legal shape of input:bias of conv2d
        conv_bias_shape = []
        inputs = dict()
        weights = dict()
        use_mkldnn = None
        if draw(st.booleans()):
            conv_bias_shape = [f_shape[0]]
            inputs = {
                "Input": ["input_x"],
                "Filter": ["filter"],
                "Bias": ["conv_bias"],
            }
            weights = {
                "filter": TensorConfig(shape=f_shape),
                "bias": TensorConfig(shape=bias_shape),
                "conv_bias": TensorConfig(shape=conv_bias_shape)
            }
            use_mkldnn = True
        else:
            inputs = {
                "Input": ["input_x"],
                "Filter": ["filter"],
            }
            weights = {
                "filter": TensorConfig(shape=f_shape),
                "bias": TensorConfig(shape=bias_shape)
            }
            use_mkldnn = False

        conv2d_op = OpConfig(
            "conv2d",
            inputs=inputs,
            outputs={"Output": ["conv2d_out"]},
            strides=strides,
            padding_algorithm=padding_algorithm,
            paddings=padding,
            groups=groups,
            dilations=dilations,
            data_format=data_format,
            use_mkldnn=use_mkldnn)

        add_op = OpConfig(
            "elementwise_add",
            inputs={"X": ["conv2d_out"],
                    "Y": ["bias"]},
            outputs={"Out": ["add_out"]},
            axis=axis)

        ops = [conv2d_op, add_op]

        program_config = ProgramConfig(
            ops=ops,
            weights=weights,
            inputs={"input_x": TensorConfig(shape=x_shape)},
            outputs=ops[-1].outputs["Out"])
        return program_config
 def test(data):
     x = data.draw(st.lists(st.integers(), min_size=1))
     y = data.draw(st.sampled_from(x))
     x.remove(y)
     if y in x:
         raise ValueError()
Example #46
0
def test_flatmap_breaks_reusability(s):
    cond = True

    def nontrivial_flatmap(x):
        """Non-trivial flat-mapping function, intended to remain opaque even
        if some strategies introspect their flat-mappings."""
        if cond:
            return st.just(x)
        else:
            return st.none()

    assert s.has_reusable_values
    assert not s.flatmap(nontrivial_flatmap).has_reusable_values


@pytest.mark.parametrize(
    "strat",
    [
        st.lists(st.booleans()),
        st.sets(st.booleans()),
        st.dictionaries(st.booleans(), st.booleans()),
    ],
)
def test_mutable_collections_do_not_have_reusable_values(strat):
    assert not strat.has_reusable_values


def test_recursion_does_not_break_reusability():
    x = st.deferred(lambda: st.none() | st.tuples(x))
    assert x.has_reusable_values
Example #47
0
from __future__ import print_function

import string

import pytest
from hashids import Hashids
from hashids_cffi import Hashids as HashidsCFFI
from hypothesis import assume, given
from hypothesis.strategies import text, integers, tuples, lists, sampled_from, characters

valid_characters = list(string.ascii_letters + string.digits + string.punctuation)
MAX_ULONGLONG = 2 ** 64 - 1


@given(lists(characters(min_codepoint=128), average_size=64).map(''.join).filter(lambda s: bool(s) and not s.isspace()),
       lists(sampled_from(valid_characters), min_size=16, average_size=16, unique=True).map(''.join),
       integers(min_value=0, max_value=32))
def test_unicode_salt(salt, alphabet, min_length):
    with pytest.raises(UnicodeEncodeError):
        HashidsCFFI(salt=salt, alphabet=alphabet, min_length=min_length)


@given(text(alphabet=valid_characters, average_size=64),
       lists(characters(min_codepoint=128), min_size=16, average_size=16, unique=True).map(''.join),
       integers(min_value=0, max_value=32))
def test_unicode_alphabet(salt, alphabet, min_length):
    alphabet = alphabet
    salt = salt

    with pytest.raises(UnicodeEncodeError):
        HashidsCFFI(salt=salt, alphabet=alphabet, min_length=min_length)
Example #48
0
                                  7: 'Williams'}})
df_ex_1 = pd.DataFrame({'mascot': {0: 'Lion', 1: 'Smeagol', 2: 'Tribble', 3: 'Womp Rat', 4: 'Molly'},
                        'player_count': {0: 20, 1: 12, 2: 1014, 3: 2, 4: 8},
                        'team': {0: 'Gryffindor',
                                 1: 'The Shire',
                                 2: 'Enterprise',
                                 3: 'Coruscant',
                                 4: 'Gridpoint'}})
df_ex_2 = pd.DataFrame({'mascot': {0: 'Hammer', 1: 'Womp Rat', 2: 'Rose', 3: 'lion', 4: 'smeagol'},
                        'player_count': {0: 3, 1: 4, 2: 80, 3: 20, 4: 12},
                        'team': {0: 'Asgard',
                                 1: 'Coruscant',
                                 2: 'Gridpoint',
                                 3: 'Gryffindor',
                                 4: 'The Shire'}})
lst_text = lists(elements=text(alphabet=list(string.printable), min_size=4, max_size=10), min_size=1).example()


@given(text(min_size=3))
@example(' BAD label ')
@example(' BAD label !@#$!%!%')
def test_make_good_label(s):
    print(type(make_good_label(s)))
    assert isinstance(make_good_label(s), str)
    assert make_good_label(' BAD label ') == 'bad_label'


@given(series(dtype=np.unicode_))
@example(series(dtype=np.int_).example())
@example(series(dtype=float).example())
@example(series(dtype=bool).example())
Example #49
0
@given(floats(-sys.float_info.max, sys.float_info.max))
def test_largest_range(x):
    assert not math.isinf(x)


@given(floats())
@TRY_HARDER
def test_negation_is_self_inverse(x):
    assume(not math.isnan(x))
    y = -x
    assert -y == x


@fails
@given(lists(floats()))
def test_is_not_nan(xs):
    assert not any(math.isnan(x) for x in xs)


@fails
@given(floats())
@TRY_HARDER
def test_is_not_positive_infinite(x):
    assume(x > 0)
    assert not math.isinf(x)


@fails
@given(floats())
@TRY_HARDER
class TorchIntegration(hu.HypothesisTestCase):
    @given(
        roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10),
        num_classes=st.integers(1, 10),
        rotated=st.booleans(),
        angle_bound_on=st.booleans(),
        clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
        **hu.gcs_cpu_only
    )
    def test_bbox_transform(
        self,
        roi_counts,
        num_classes,
        rotated,
        angle_bound_on,
        clip_angle_thresh,
        gc,
        dc,
    ):
        """
        Test with rois for multiple images in a batch
        """
        rois, deltas, im_info = create_bbox_transform_inputs(
            roi_counts, num_classes, rotated
        )

        def bbox_transform_ref():
            ref_op = core.CreateOperator(
                "BBoxTransform",
                ["rois", "deltas", "im_info"],
                ["box_out"],
                apply_scale=False,
                rotated=rotated,
                angle_bound_on=angle_bound_on,
                clip_angle_thresh=clip_angle_thresh,
            )
            workspace.FeedBlob("rois", rois)
            workspace.FeedBlob("deltas", deltas)
            workspace.FeedBlob("im_info", im_info)
            workspace.RunOperatorOnce(ref_op)
            return workspace.FetchBlob("box_out")

        box_out = torch.tensor(bbox_transform_ref())
        a, b = torch.ops._caffe2.BBoxTransform(
            torch.tensor(rois),
            torch.tensor(deltas),
            torch.tensor(im_info),
            [1.0, 1.0, 1.0, 1.0],
            False,
            rotated,
            angle_bound_on,
            -90,
            90,
            clip_angle_thresh,
        )

        torch.testing.assert_allclose(box_out, a)

    @given(
        roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10),
        num_classes=st.integers(1, 10),
        rotated=st.booleans(),
        angle_bound_on=st.booleans(),
        clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
        **hu.gcs_cpu_only
    )
    def test_box_with_nms_limits(
        self,
        roi_counts,
        num_classes,
        rotated,
        angle_bound_on,
        clip_angle_thresh,
        gc,
        dc,
    ):
        rotated = False  # FIXME remove this after rotation is supported
        rois, deltas, im_info = create_bbox_transform_inputs(
            roi_counts, num_classes, rotated
        )
        pred_bbox, batch_splits = [
            t.detach().numpy()
            for t in torch.ops._caffe2.BBoxTransform(
                torch.tensor(rois),
                torch.tensor(deltas),
                torch.tensor(im_info),
                [1.0, 1.0, 1.0, 1.0],
                False,
                rotated,
                angle_bound_on,
                -90,
                90,
                clip_angle_thresh,
            )
        ]
        class_prob = np.random.randn(sum(roi_counts), num_classes).astype(np.float32)
        score_thresh = 0.5
        nms_thresh = 0.5
        topk_per_image = sum(roi_counts) / 2

        def box_with_nms_limit_ref():
            input_blobs = ["class_prob", "pred_bbox", "batch_splits"]
            output_blobs = ["score_nms", "bbox_nms", "class_nms", "batch_splits_nms"]
            ref_op = core.CreateOperator(
                "BoxWithNMSLimit",
                input_blobs,
                output_blobs,
                score_thresh=float(score_thresh),
                nms=float(nms_thresh),
                detections_per_im=int(topk_per_image),
                soft_nms_enabled=False,
                soft_nms_method="linear",
                soft_nms_sigma=0.5,
                soft_nms_min_score_thres=0.001,
                rotated=rotated,
            )
            workspace.FeedBlob("class_prob", class_prob)
            workspace.FeedBlob("pred_bbox", pred_bbox)
            workspace.FeedBlob("batch_splits", batch_splits)
            workspace.RunOperatorOnce(ref_op)
            return (workspace.FetchBlob(b) for b in output_blobs)

        output_refs = box_with_nms_limit_ref()
        outputs = torch.ops._caffe2.BoxWithNMSLimit(
            torch.tensor(class_prob),
            torch.tensor(pred_bbox),
            torch.tensor(batch_splits),
            score_thresh=float(score_thresh),
            nms=float(nms_thresh),
            detections_per_im=int(topk_per_image),
            soft_nms_enabled=False,
            soft_nms_method="linear",
            soft_nms_sigma=0.5,
            soft_nms_min_score_thres=0.001,
            rotated=rotated,
        )

        for o, o_ref in zip(outputs, output_refs):
            torch.testing.assert_allclose(o, o_ref)

    @given(
        A=st.integers(min_value=4, max_value=4),
        H=st.integers(min_value=10, max_value=10),
        W=st.integers(min_value=8, max_value=8),
        img_count=st.integers(min_value=3, max_value=3),
    )
    def test_generate_proposals(self, A, H, W, img_count):
        scores = np.ones((img_count, A, H, W)).astype(np.float32)
        bbox_deltas = (
            np.linspace(0, 10, num=img_count * 4 * A * H * W)
            .reshape((img_count, 4 * A, H, W))
            .astype(np.float32)
        )
        im_info = np.ones((img_count, 3)).astype(np.float32) / 10
        anchors = np.ones((A, 4)).astype(np.float32)

        def generate_proposals_ref():
            ref_op = core.CreateOperator(
                "GenerateProposals",
                ["scores", "bbox_deltas", "im_info", "anchors"],
                ["rois", "rois_probs"],
                spatial_scale=2.0,
            )
            workspace.FeedBlob("scores", scores)
            workspace.FeedBlob("bbox_deltas", bbox_deltas)
            workspace.FeedBlob("im_info", im_info)
            workspace.FeedBlob("anchors", anchors)
            workspace.RunOperatorOnce(ref_op)
            return workspace.FetchBlob("rois"), workspace.FetchBlob("rois_probs")

        rois, rois_probs = generate_proposals_ref()
        rois = torch.tensor(rois)
        rois_probs = torch.tensor(rois_probs)
        a, b = torch.ops._caffe2.GenerateProposals(
            torch.tensor(scores),
            torch.tensor(bbox_deltas),
            torch.tensor(im_info),
            torch.tensor(anchors),
            2.0,
            6000,
            300,
            0.7,
            16,
            True,
            -90,
            90,
            1.0,
        )
        torch.testing.assert_allclose(rois, a)
        torch.testing.assert_allclose(rois_probs, b)

    @given(
        bsz=st.integers(1, 5),
        seq_lens=st.integers(1, 6),
        emb_lens=st.integers(5, 10),
        hidden_size=st.integers(3, 7),
        num_layers=st.integers(1, 4),
        has_biases=st.booleans(),
        is_bidirectional=st.booleans(),
        batch_first=st.booleans(),
    )
    def test_inference_lstm(
        self,
        bsz,
        seq_lens,
        emb_lens,
        hidden_size,
        num_layers,
        has_biases,
        is_bidirectional,
        batch_first,
    ):
        num_directions = 2 if is_bidirectional else 1
        hx = np.zeros((num_layers * num_directions, bsz, hidden_size), dtype=np.float32)

        if batch_first:
            inputs = np.random.randn(bsz, seq_lens, emb_lens).astype(np.float32)
        else:
            inputs = np.random.randn(seq_lens, bsz, emb_lens).astype(np.float32)

        torch_lstm = torch.nn.LSTM(
            emb_lens,
            hidden_size,
            batch_first=batch_first,
            bidirectional=is_bidirectional,
            bias=has_biases,
            num_layers=num_layers,
        )

        def inference_lstm_ref():
            input_names = ["inputs", "hidden_0", "hidden_1"]
            workspace.FeedBlob("inputs", inputs)
            workspace.FeedBlob("hidden_0", hx)
            workspace.FeedBlob("hidden_1", hx)
            for i, param in enumerate(torch_lstm._flat_weights):
                input_names.append("param_{}".format(i))
                workspace.FeedBlob("param_{}".format(i), param.detach().numpy())

            ref_op = core.CreateOperator(
                "InferenceLSTM",
                input_names,
                ["output", "hidden", "cell"],
                num_layers=num_layers,
                has_biases=has_biases,
                batch_first=batch_first,
                bidirectional=is_bidirectional,
            )
            workspace.RunOperatorOnce(ref_op)
            return (
                workspace.FetchBlob("output"),
                workspace.FetchBlob("hidden"),
                workspace.FetchBlob("cell")
            )

        output, hidden, cell = inference_lstm_ref()
        output = torch.tensor(output)
        hidden = torch.tensor(hidden)
        cell = torch.tensor(cell)
        lstm_in = [
            torch.from_numpy(inputs),
            torch.from_numpy(hx),
            torch.from_numpy(hx),
        ] + [param.detach() for param in torch_lstm._flat_weights]

        a, b, c = torch.ops._caffe2.InferenceLSTM(
            lstm_in, num_layers, has_biases, batch_first, is_bidirectional
        )
        torch.testing.assert_allclose(output, a)
        torch.testing.assert_allclose(hidden, b)
        torch.testing.assert_allclose(cell, c)

    # Test case is using workspace.has_cuda_support and not workspace.has_gpu_support
    # to exclude it from HIP because tensor interop doesn't work for HIP tensors yet
    @unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
    @given(
        A=st.integers(min_value=4, max_value=4),
        H=st.integers(min_value=10, max_value=10),
        W=st.integers(min_value=8, max_value=8),
        img_count=st.integers(min_value=3, max_value=3),
    )
    def test_generate_proposals_cuda(self, A, H, W, img_count):
        scores = np.ones((img_count, A, H, W)).astype(np.float32)
        bbox_deltas = (
            np.linspace(0, 10, num=img_count * 4 * A * H * W)
            .reshape((img_count, 4 * A, H, W))
            .astype(np.float32)
        )
        im_info = np.ones((img_count, 3)).astype(np.float32) / 10
        anchors = np.ones((A, 4)).astype(np.float32)

        def generate_proposals_ref():
            ref_op = core.CreateOperator(
                "GenerateProposals",
                ["scores", "bbox_deltas", "im_info", "anchors"],
                ["rois", "rois_probs"],
                spatial_scale=2.0,
            )
            workspace.FeedBlob("scores", scores)
            workspace.FeedBlob("bbox_deltas", bbox_deltas)
            workspace.FeedBlob("im_info", im_info)
            workspace.FeedBlob("anchors", anchors)
            workspace.RunOperatorOnce(ref_op)
            return workspace.FetchBlob("rois"), workspace.FetchBlob("rois_probs")

        rois, rois_probs = generate_proposals_ref()
        rois = torch.tensor(rois)
        rois_probs = torch.tensor(rois_probs)
        a, b = torch.ops._caffe2.GenerateProposals(
            torch.tensor(scores).cuda(),
            torch.tensor(bbox_deltas).cuda(),
            torch.tensor(im_info).cuda(),
            torch.tensor(anchors).cuda(),
            2.0,
            6000,
            300,
            0.7,
            16,
            True,
            -90,
            90,
            1.0,
        )
        torch.testing.assert_allclose(rois, a.cpu())
        torch.testing.assert_allclose(rois_probs, b.cpu())

    @given(
        N=st.integers(min_value=1, max_value=2),
        C=st.integers(min_value=4, max_value=4),
        H=st.integers(min_value=10, max_value=10),
        W=st.integers(min_value=8, max_value=8),
    )
    def _test_roi_align(self, N, C, H, W, device):
        def rand_roi():
            return np.array(
                [
                    float(int(N * np.random.rand())),
                    0.5 * np.random.rand() * W,
                    0.5 * np.random.rand() * H,
                    (0.5 + 0.5 * np.random.rand()) * W,
                    (0.5 + 0.5 * np.random.rand()) * H,
                ]
            ).astype(np.float32)

        feature = np.random.randn(N, C, H, W).astype(np.float32)
        rois = np.array([rand_roi() for _ in range(10)])

        def roi_align_ref(_feature, _rois):
            ref_op = core.CreateOperator(
                "RoIAlign",
                ["feature", "rois"],
                ["roi_feature"],
                spatial_scale=1.0,
                pooled_h=3,
                pooled_w=3,
                sampling_ratio=0,
            )
            workspace.FeedBlob("feature", _feature)
            workspace.FeedBlob("rois", _rois)
            workspace.RunOperatorOnce(ref_op)
            return workspace.FetchBlob("roi_feature")

        roi_feature_ref = roi_align_ref(feature, rois)
        roi_feature = torch.ops._caffe2.RoIAlign(
            torch.Tensor(feature).to(device),
            torch.Tensor(rois).to(device),
            order="NCHW",
            spatial_scale=1.0,
            pooled_h=3,
            pooled_w=3,
            sampling_ratio=0,
        )
        torch.testing.assert_allclose(roi_feature_ref, roi_feature.cpu())

    def test_roi_align_cpu(self):
        self._test_roi_align(device="cpu")

    @unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
    def test_roi_align_cuda(self):
        self._test_roi_align(device="cuda")

    @given(X=hu.tensor(),
           fast_gelu=st.booleans())
    def _test_gelu_op(self, X, fast_gelu, device):
        def _gelu_ref(_X):
            return (_X * norm.cdf(_X).astype(np.float32), )
        expected_output, = _gelu_ref(X)
        actual_output = torch.ops._caffe2.Gelu(torch.tensor(X), fast_gelu)

        rtol = 1e-3 if fast_gelu else 1e-4
        atol = 1e-5
        torch.testing.assert_allclose(
            expected_output, actual_output.cpu(), rtol=rtol, atol=atol)

    def test_gelu_op(self):
        self._test_gelu_op(device="cpu")

    @unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
    def test_gelu_op_cuda(self):
        self._test_gelu_op(device="cuda")
Example #51
0
def test_doubleDouble(l):
    assert hst.doubleDouble(l) == l


@given(tuples(c_ints, c_chars, c_doubles))
def test_intCharDouble(l):
    assert hst.intCharDouble(l) == l


@given(tuples(c_doubles, c_floats, c_chars, c_doubles))
def test_doubleFloatCharDouble(l):
    assert hst.doubleFloatCharDouble(l) == l


# Lists and Nested


@given(tuples(c_ints, lists(c_strings)))
def test_intListStrings(l):
    assert hst.intListStrings(l) == l


@given(tuples(c_strings, lists(tuples(c_ints, c_chars))))
def test_stringListTupleIntChar(l):
    assert hst.stringListTupleIntChar(l) == l


@given(tuples(tuples(c_ints, c_doubles), tuples(c_ints, c_doubles)))
def test_nestedIntDouble(l):
    assert hst.nestedIntDouble(l) == l
Example #52
0
        method="GET",
        url=FULL_URL,
        status_code=200,
    )
    assert span.meta[http.URL] == FULL_URL


# This generates a list of (key, value) tuples, with values given by nested
# dictionaries
@given(
    lists(
        tuples(
            text(),
            recursive(
                none() | booleans() | floats() | text(),
                lambda children: lists(children, min_size=1) | dictionaries(
                    text(), children, min_size=1),
                max_leaves=10,
            ),
        ),
        max_size=4,
    ))
def test_set_flattened_tags_is_flat(items):
    """Ensure that flattening of a nested dict results in a normalized, 1-level dict"""
    span = Span(None, "test")
    trace_utils.set_flattened_tags(span, items)
    assert isinstance(span.meta, dict)
    assert not any(isinstance(v, dict) for v in span.meta.values())


def test_set_flattened_tags_keys():
    """Ensure expected keys in flattened dictionary"""
Example #53
0
    assert not pa.types.is_boolean_value(1)
    assert pa.types.is_boolean_value(True)
    assert pa.types.is_boolean_value(False)
    assert pa.types.is_boolean_value(np.bool_(True))
    assert pa.types.is_boolean_value(np.bool_(False))


@h.given(past.all_types | past.all_fields | past.all_schemas)
@h.example(pa.field(name='', type=pa.null(), metadata={'0': '', '': ''}))
def test_pickling(field):
    data = pickle.dumps(field)
    assert pickle.loads(data) == field


@h.given(
    st.lists(past.all_types) | st.lists(past.all_fields)
    | st.lists(past.all_schemas))
def test_hashing(items):
    h.assume(
        # well, this is still O(n^2), but makes the input unique
        all(not a.equals(b) for i, a in enumerate(items) for b in items[:i]))

    container = {}
    for i, item in enumerate(items):
        assert hash(item) == hash(item)
        container[item] = i

    assert len(container) == len(items)

    for i, item in enumerate(items):
        assert container[item] == i
Example #54
0
#
# You should have received a copy of the GNU Affero General Public License
# along with hashtable. If not, see <http://www.gnu.org/licenses/>.

import hypothesis
import hypothesis.strategies as hs

import hashtable

# TODO(mastensg): parameterize variant
ht_lib = hashtable.Library("one")

keys = hs.integers(min_value=0, max_value=((1 << 64) - 1))
values = keys
key_value_pairs = hs.tuples(keys, values)
key_value_pair_lists = hs.lists(elements=key_value_pairs, min_size=0)


@hypothesis.settings(max_examples=10000,
                     verbosity=hypothesis.Verbosity.verbose)
@hypothesis.given(key_value_pair_lists)
def test_insert_then_find(l):
    h = hashtable.Hashtable(ht_lib)
    d = dict()

    for kv in l:
        k, v = kv
        d[k] = v
        h[k] = v

    for k in d:
Example #55
0

@given(variable_inds=st.integers())
def test_check_variables_scalar(variable_inds):
    result = BasePrior.check_variables(variable_inds, 1)
    assert result == [variable_inds]


@given(variable_inds=st.integers(), length=st.integers())
def test_check_variables_scalar_bad(variable_inds, length):
    assume(length != 1)
    with pytest.raises(ValueError):
        BasePrior.check_variables(variable_inds, length)


@given(variable_inds=st.lists(st.integers(), unique=True))
def test_check_variables_list(variable_inds):
    result = BasePrior.check_variables(variable_inds, len(variable_inds))
    assert result == variable_inds


@given(variable_inds=st.lists(st.integers(), unique=True), length=st.integers())
def test_check_variables_list_bad_length(variable_inds, length):
    assume(length != len(variable_inds))
    with pytest.raises(ValueError):
        BasePrior.check_variables(variable_inds, length)


def test_check_variables_list_not_int():
    with pytest.raises(TypeError):
        BasePrior.check_variables([1, 3, 2.0, 4], 4)
Example #56
0
H_BIDICT_TYPES = strat.sampled_from(BIDICT_TYPES)
H_MUTABLE_BIDICT_TYPES = strat.sampled_from(MUTABLE_BIDICT_TYPES)
H_IMMUTABLE_BIDICT_TYPES = strat.sampled_from(IMMUTABLE_BIDICT_TYPES)
H_ORDERED_BIDICT_TYPES = strat.sampled_from(ORDERED_BIDICT_TYPES)
H_MAPPING_TYPES = strat.sampled_from(MAPPING_TYPES)
H_NAMES = strat.sampled_from(('valid1', 'valid2', 'valid3', 'in-valid'))

H_DUP_POLICIES = strat.sampled_from((IGNORE, OVERWRITE, RAISE))
H_BOOLEANS = strat.booleans()
H_TEXT = strat.text()
H_NONE = strat.none()
H_IMMUTABLES = H_BOOLEANS | H_TEXT | H_NONE | strat.integers() | strat.floats(
    allow_nan=False)
H_NON_MAPPINGS = H_NONE
H_PAIRS = strat.tuples(H_IMMUTABLES, H_IMMUTABLES)
H_LISTS_PAIRS = strat.lists(H_PAIRS)
H_LISTS_PAIRS_NODUP = H_LISTS_PAIRS.map(prune_dup)
H_LISTS_PAIRS_DUP = (H_LISTS_PAIRS.map(ensure_dup(key=True))
                     | H_LISTS_PAIRS.map(ensure_dup(val=True))
                     | H_LISTS_PAIRS.map(ensure_dup(key=True, val=True)))
H_TEXT_PAIRS = strat.tuples(H_TEXT, H_TEXT)
H_LISTS_TEXT_PAIRS_NODUP = strat.lists(H_TEXT_PAIRS).map(prune_dup)
H_METHOD_ARGS = strat.sampled_from((
    # 0-arity
    ('clear', ()),
    ('popitem', ()),
    # 1-arity
    ('__delitem__', (H_IMMUTABLES, )),
    ('pop', (H_IMMUTABLES, )),
    ('setdefault', (H_IMMUTABLES, )),
    ('move_to_end', (H_IMMUTABLES, )),
def test_validates_min_size():
    with pytest.raises(InvalidArgument):
        st.lists(st.nothing(), min_size=1).validate()
Example #58
0
    encode_canonical_json,
    get_integer_bounds,
    get_number_bounds,
    get_type,
    make_validator,
    merged,
    resolve_all_refs,
)

JSON_STRATEGY: st.SearchStrategy[JSONType] = st.recursive(
    st.none()
    | st.booleans()
    | st.integers()
    | st.floats(allow_nan=False, allow_infinity=False).map(lambda x: x or 0.0)
    | st.text(),
    lambda strategy: st.lists(strategy, max_size=3)
    | st.dictionaries(st.text(), strategy, max_size=3),
)
_FORMATS_TOKEN = object()


def merged_as_strategies(
    schemas: List[Schema],
    custom_formats: Optional[Dict[str, st.SearchStrategy[str]]]
) -> st.SearchStrategy[JSONType]:
    assert schemas, "internal error: must pass at least one schema to merge"
    if len(schemas) == 1:
        return from_schema(schemas[0], custom_formats=custom_formats)
    # Try to merge combinations of strategies.
    strats = []
    combined: Set[str] = set()
Example #59
0

class Foo:
    pass


foos = st.tuples().map(lambda _: Foo())


def test_can_create_arrays_of_composite_types():
    arr = minimal(nps.arrays(object, 100, elements=foos))
    for x in arr:
        assert isinstance(x, Foo)


@given(st.lists(st.integers()), st.data())
def test_can_create_zero_dim_arrays_of_lists(x, data):
    arr = data.draw(nps.arrays(object, (), elements=st.just(x)))
    assert arr.shape == ()
    assert arr.dtype == np.dtype(object)
    assert arr.item() == x


def test_can_create_arrays_of_tuples():
    arr = minimal(
        nps.arrays(object,
                   10,
                   elements=st.tuples(st.integers(), st.integers())),
        lambda x: all(t0 != t1 for t0, t1 in x),
    )
    assert all(a in ((1, 0), (0, 1)) for a in arr)
Example #60
0
def array_schema(custom_formats: Dict[str, st.SearchStrategy[str]],
                 schema: dict) -> st.SearchStrategy[List[JSONType]]:
    """Handle schemata for arrays."""
    _from_schema_ = partial(from_schema, custom_formats=custom_formats)
    items = schema.get("items", {})
    additional_items = schema.get("additionalItems", {})
    min_size = schema.get("minItems", 0)
    max_size = schema.get("maxItems")
    unique = schema.get("uniqueItems")
    if isinstance(items, list):
        min_size = max(0, min_size - len(items))
        if max_size is not None:
            max_size -= len(items)

        items_strats = [_from_schema_(s) for s in items]
        additional_items_strat = _from_schema_(additional_items)

        # If we have a contains schema to satisfy, we try generating from it when
        # allowed to do so.  We'll skip the None (unmergable / no contains) cases
        # below, and let Hypothesis ignore the FALSEY cases for us.
        if "contains" in schema:
            for i, mrgd in enumerate(
                    merged([schema["contains"], s]) for s in items):
                if mrgd is not None:
                    items_strats[i] |= _from_schema_(mrgd)
            contains_additional = merged(
                [schema["contains"], additional_items])
            if contains_additional is not None:
                additional_items_strat |= _from_schema_(contains_additional)

        if unique:

            @st.composite  # type: ignore
            def compose_lists_with_filter(draw: Any) -> List[JSONType]:
                elems = []
                seen: Set[str] = set()

                def not_seen(elem: JSONType) -> bool:
                    return encode_canonical_json(elem) not in seen

                for strat in items_strats:
                    elems.append(draw(strat.filter(not_seen)))
                    seen.add(encode_canonical_json(elems[-1]))
                extra_items = st.lists(
                    additional_items_strat.filter(not_seen),
                    min_size=min_size,
                    max_size=max_size,
                    unique_by=encode_canonical_json,
                )
                more_elems: List[JSONType] = draw(extra_items)
                return elems + more_elems

            strat = compose_lists_with_filter()
        else:
            strat = st.builds(
                operator.add,
                st.tuples(*items_strats).map(list),
                st.lists(additional_items_strat,
                         min_size=min_size,
                         max_size=max_size),
            )
    else:
        items_strat = _from_schema_(items)
        if "contains" in schema:
            contains_strat = _from_schema_(schema["contains"])
            if merged([items, schema["contains"]]) != schema["contains"]:
                # We only need this filter if we couldn't merge items in when
                # canonicalising.  Note that for list-items, above, we just skip
                # the mixed generation in this case (because they tend to be
                # heterogeneous) and hope it works out anyway.
                contains_strat = contains_strat.filter(
                    make_validator(items).is_valid)
            items_strat |= contains_strat

        strat = st.lists(
            items_strat,
            min_size=min_size,
            max_size=max_size,
            unique_by=encode_canonical_json if unique else None,
        )
    if "contains" not in schema:
        return strat
    contains = make_validator(schema["contains"]).is_valid
    return strat.filter(lambda val: any(contains(x) for x in val))