def test_provided_kwargs_are_defaults():
    @given(hello=booleans(), world=booleans())
    def greet(hello, **kwargs):
        assert hello == u"salve"
        assert kwargs == {u"world": u"mundi"}

    greet(u"salve", world=u"mundi")
def test_provided_kwargs_are_defaults():
    @given(hello=booleans(), world=booleans())
    def greet(hello, **kwargs):
        assert hello == u'salve'
        assert kwargs == {u'world': u'mundi'}

    greet(u'salve', world=u'mundi')
def test_given_warns_when_mixing_positional_with_keyword():
    @given(booleans(), y=booleans())
    @settings(strict=False)
    def test(x, y):
        pass
    with pytest.raises(InvalidArgument):
        test()
def fstrings(draw):
    """
    Generate a valid f-string.
    See https://www.python.org/dev/peps/pep-0498/#specification

    :param draw: Let hypothsis draw from other strategies.

    :return: A valid f-string.
    """
    character_strategy = st.characters(
        blacklist_characters='\r\n\'\\s{}',
        min_codepoint=1,
        max_codepoint=1000,
    )
    is_raw = draw(st.booleans())
    integer_strategy = st.integers(min_value=0, max_value=3)
    expression_count = draw(integer_strategy)
    content = []
    for _ in range(expression_count):
        expression = draw(expressions())
        conversion = draw(st.sampled_from(('', '!s', '!r', '!a',)))
        has_specifier = draw(st.booleans())
        specifier = ':' + draw(format_specifiers()) if has_specifier else ''
        content.append('{{{}{}}}'.format(expression, conversion, specifier))
        content.append(draw(st.text(character_strategy)))
    content = ''.join(content)
    return "f{}'{}'".format('r' if is_raw else '', content)
def test_can_simplify_hard_recursive_data_into_boolean_alternative(rnd):
    """This test forces us to exercise the simplification through redrawing
    functionality, thus testing that we can deal with bad templates."""
    def leaves(ls):
        if isinstance(ls, (bool,) + integer_types):
            return [ls]
        else:
            return sum(map(leaves, ls), [])

    def hard(base):
        return recursive(
            base, lambda x: lists(x, max_size=5), max_leaves=20)
    r = find(
        hard(booleans()) |
        hard(booleans()) |
        hard(booleans()) |
        hard(integers()) |
        hard(booleans()),
        lambda x:
            len(leaves(x)) >= 3 and
            any(isinstance(t, bool) for t in leaves(x)),
        random=rnd, settings=settings(
            database=None, max_examples=5000, max_shrinks=1000))
    lvs = leaves(r)
    assert lvs == [False] * 3
    assert all(isinstance(v, bool) for v in lvs), repr(lvs)
def test_given_does_not_warn_when_using_strategies_directly(recwarn):
    @given(booleans(), booleans())
    def foo(x, y):
        pass

    foo()
    with pytest.raises(AssertionError):
        recwarn.pop(DeprecationWarning)
def make_ontologyword_details(draw, unit_id, ontologyword_id):
    result = {
        'unit_id': unit_id,
        'ontologyword_id': ontologyword_id
    }
    if draw(booleans()):
        result['schoolyear'] = '2017-2018'  # TODO
    if draw(booleans()):
        result.update(translated_field(draw, 'clarification', allow_missing=False))
    return result
Example #8
0
def test_can_apply_simplifiers_to_other_types():
    r = Random(0)
    s = one_of(booleans(), lists(booleans()))
    template1 = s.draw_and_produce(r)
    while True:
        template2 = s.draw_and_produce(r)
        if template2[0] != template1[0]:
            break
    for simplify in s.simplifiers(r, template1):
        assert list(simplify(r, template2)) == []
Example #9
0
def test_mixed_list_flatmap():
    s = lists(booleans().flatmap(lambda b: booleans() if b else text()))

    def criterion(ls):
        c = Counter(type(l) for l in ls)
        return len(c) >= 2 and min(c.values()) >= 3

    result = find(s, criterion)
    assert len(result) == 6
    assert set(result) == set([False, u""])
def requirements_list(draw, length, answers=False):
    if answers:
        elements = booleans() if length is not None else one_of(booleans(), none())
    else:
        elements = text(min_size=1, alphabet='abcdefgh', average_size=10)
    return draw(lists(
        elements=elements,
        min_size=length,
        max_size=length if length is not None else 10
    ))
def test_lists_of_incompatible_sizes_are_checked():
    s10 = lists(booleans(), min_size=10)
    s2 = lists(booleans(), max_size=9)

    x10 = s10.to_basic(some_template(s10))
    x2 = s2.to_basic(some_template(s2))
    with pytest.raises(BadData):
        s2.from_basic(x10)
    with pytest.raises(BadData):
        s10.from_basic(x2)
Example #12
0
def simple_attrs_with_metadata(draw):
    """
    Create a simple attribute with arbitrary metadata.
    """
    c_attr = draw(simple_attrs)
    keys = st.booleans() | st.binary() | st.integers() | st.text()
    vals = st.booleans() | st.binary() | st.integers() | st.text()
    metadata = draw(st.dictionaries(keys=keys, values=vals))

    return attr.ib(c_attr._default, c_attr._validator, c_attr.repr,
                   c_attr.cmp, c_attr.hash, c_attr.init, c_attr.convert,
                   metadata)
def requirements_list(draw, length, answers=False):
    if answers:
        elements = booleans() if length is not None else one_of(booleans(), none())
    else:
        elements = text(min_size=1, max_size=300, alphabet=_descriptive_alphabet).filter(
            partial(_word_count_filter, max_words=30)
        )
    return draw(lists(
        elements=elements,
        min_size=length,
        max_size=length if length is not None else 10
    ))
def test_generate_arbitrary_indices(data):
    min_size = data.draw(st.integers(0, 10), "min_size")
    max_size = data.draw(st.none() | st.integers(min_size, min_size + 10), "max_size")
    unique = data.draw(st.booleans(), "unique")
    dtype = data.draw(npst.scalar_dtypes(), "dtype")
    assume(supported_by_pandas(dtype))

    # Pandas bug: https://github.com/pandas-dev/pandas/pull/14916 until 0.20;
    # then int64 indexes are inferred from uint64 values.
    assume(dtype.kind != "u")

    pass_elements = data.draw(st.booleans(), "pass_elements")

    converted_dtype = pandas.Index([], dtype=dtype).dtype

    try:
        inferred_dtype = pandas.Index([data.draw(npst.from_dtype(dtype))]).dtype

        if pass_elements:
            elements = npst.from_dtype(dtype)
            dtype = None
        else:
            elements = None

        index = data.draw(
            pdst.indexes(
                elements=elements,
                dtype=dtype,
                min_size=min_size,
                max_size=max_size,
                unique=unique,
            )
        )

    except Exception as e:
        if type(e).__name__ == "OutOfBoundsDatetime":
            # See https://github.com/HypothesisWorks/hypothesis-python/pull/826
            reject()
        else:
            raise
    if dtype is None:
        if pandas.__version__ >= "0.19":
            assert index.dtype == inferred_dtype
    else:
        assert index.dtype == converted_dtype

    if unique:
        assert len(set(index.values)) == len(index)
Example #15
0
 def to_structure(tree_or_message):
     if isinstance(tree_or_message, list):
         return ActionStructure(
             type=draw(labels), failed=draw(st.booleans()), children=[to_structure(o) for o in tree_or_message]
         )
     else:
         return tree_or_message
Example #16
0
    def test_resolveObjects(self, jsonObject, data):
        """
        A JSON serializable object that may contain L{Deferred}s or a
        L{Deferred} that resolves to a JSON serializable object
        resolves to an object that contains no L{Deferred}s.
        """
        deferredValues = []
        choose = st.booleans()

        def maybeWrapInDeferred(value):
            if data.draw(choose):
                deferredValues.append(DeferredValue(value))
                return deferredValues[-1].deferred
            else:
                return value

        deferredJSONObject = transformJSONObject(
            jsonObject,
            maybeWrapInDeferred,
        )

        resolved = resolveDeferredObjects(deferredJSONObject)

        for value in deferredValues:
            value.resolve()

        self.assertEqual(self.successResultOf(resolved), jsonObject)
def reusable():
    return st.one_of(
        st.sampled_from(base_reusable_strategies),

        st.builds(
            st.floats, min_value=st.none() | st.floats(),
            max_value=st.none() | st.floats(), allow_infinity=st.booleans(),
            allow_nan=st.booleans()
        ),

        st.builds(st.just, st.lists(max_size=0)),
        st.builds(st.sampled_from, st.lists(st.lists(max_size=0))),

        st.lists(reusable).map(st.one_of),
        st.lists(reusable).map(lambda ls: st.tuples(*ls)),
    )
def test_raises_unsatisfiable_if_all_false_in_finite_set():
    @given(booleans())
    def test_assume_false(x):
        assume(False)

    with pytest.raises(Unsatisfiable):
        test_assume_false()
def test_ordered_dictionaries_preserve_keys():
    r = Random()
    keys = list(range(100))
    r.shuffle(keys)
    x = fixed_dictionaries(
        OrderedDict([(k, booleans()) for k in keys])).example()
    assert list(x.keys()) == keys
def test_can_use_recursive_data_in_sets(rnd):
    nested_sets = st.recursive(
        st.booleans(),
        lambda js: st.frozensets(js, average_size=2.0),
        max_leaves=10
    )
    nested_sets.example(rnd)

    def flatten(x):
        if isinstance(x, bool):
            return frozenset((x,))
        else:
            result = frozenset()
            for t in x:
                result |= flatten(t)
                if len(result) == 2:
                    break
            return result
    assert rnd is not None
    x = find(
        nested_sets, lambda x: len(flatten(x)) == 2, random=rnd,
        settings=settings(database=None, max_shrinks=1000, max_examples=1000))
    assert x in (
        frozenset((False, True)),
        frozenset((False, frozenset((True,)))),
        frozenset((frozenset((False, True)),))
    )
Example #21
0
def test_finite_space_errors_if_all_unsatisfiable():
    @given(booleans())
    def test_no(x):
        assume(False)

    with pytest.raises(Unsatisfiable):
        test_no()
Example #22
0
def test_can_form_sets_of_recursive_data():
    trees = st.sets(st.recursive(
        st.booleans(), lambda x: st.lists(x).map(tuple), max_leaves=4))
    xs = find(trees, lambda x: len(x) >= 10)
    assert len(xs) == 10
    assert False in xs
    assert True in xs
def test_can_find_nested():
    x = find(
        st.recursive(st.booleans(), lambda x: st.tuples(x, x)),
        lambda x: isinstance(x, tuple) and isinstance(x[0], tuple)
    )

    assert x == ((False, False), False)
def test_given_twice_deprecated():
    @given(booleans())
    @given(integers())
    def inner(a, b):
        pass
    with validate_deprecation():
        inner()
def test_prints_intermediate_in_success():
    with capture_verbosity(Verbosity.verbose) as o:
        @given(booleans())
        def test_works(x):
            pass
        test_works()
    assert 'Trying example' in o.getvalue()
Example #26
0
def test_can_shrink_through_a_binding(n):
    bool_lists = integers(0, 100).flatmap(
        lambda k: lists(booleans(), min_size=k, max_size=k))

    assert find(
        bool_lists, lambda x: len(list(filter(bool, x))) >= n
    ) == [True] * n
Example #27
0
def jenkins_build_results(inQueue=None, builds=None):
    """Create a strategy for generating Jenkins API information for a job.

    :param strategy inQueue: strategy for the inQueue key, or None to use
        the default.
    :param strategy builds: strategy for populating the builds key, or None
        for the default. The special value `NO_BUILDS` will mean that the
        builds key is not in the resulting dict at all.
    :return strategy: a strategy.
    """
    strats = []
    if inQueue is None:
        inQueue = booleans()
        strats.append(just(pmap()))
    without_builds = fixed_dictionaries(dict(
        inQueue=inQueue))
    if builds is None or builds is NO_BUILDS:
        strats.append(without_builds)
    if builds is None:
        builds = lists(jenkins_builds, average_size=1)
    if builds is not NO_BUILDS:
        with_builds = fixed_dictionaries(dict(
            inQueue=inQueue,
            builds=builds,
            property=dictionaries(
                text(max_size=2), text(max_size=2),
                average_size=1, max_size=2)))
        strats.append(with_builds)
    return one_of(*strats)
def test_can_delete_in_middle_of_a_binding(n):
    bool_lists = integers(1, 100).flatmap(
        lambda k: lists(booleans(), min_size=k, max_size=k))

    assert find(
        bool_lists, lambda x: x[0] and x[-1] and x.count(False) >= n
    ) == [True] + [False] * n + [True]
Example #29
0
def host_json():
    return st.fixed_dictionaries(
        {
            "metadata":
                st.fixed_dictionaries({
                    "update_time": st.floats(),
                    "update_user": st.one_of(st.none(), st.text()),
                    "update_action": st.integers(),
                    "creator": st.text(),
                    "create_time": st.integers(),
                    "update_controller_action": st.text(),
                    "owner": st.one_of(st.none(), st.text()),
                    "command_id": st.one_of(st.none(), st.text(), st.integers()),}),
            "name": st.one_of(st.none(), st.text()),
            "ip": st.one_of(st.none(), st.text()),
            "_rev": st.one_of(st.none(), st.text()),
            "description": st.one_of(st.none(), st.text()),
            "default_gateway": st.one_of(st.none(), st.text()),
            "owned": st.booleans(),
            "services": st.one_of(st.none(), st.integers()),
            "hostnames": st.lists(st.text()),
            "vulns": st.one_of(st.none(), st.integers()),
            "owner": st.one_of(st.none(), st.text()),
            "credentials": st.one_of(st.none(), st.integers()),
            "_id": st.one_of(st.none(), st.integers()),
            "os": st.one_of(st.none(), st.text()),
            "id": st.one_of(st.none(), st.integers()),
            "icon": st.one_of(st.none(), st.text())}
    )
Example #30
0
def charset(draw):
    negated = draw(st.booleans())
    chars = draw(st.text(string.ascii_letters + string.digits, min_size=1))
    if negated:
        return u"[^%s]" % (chars,)
    else:
        return u"[%s]" % (chars,)
Example #31
0
class DNNLowPBatchMatMulOpTest(hu.HypothesisTestCase):
    # correctness test with no quantization error in inputs
    @given(m=st.integers(0, 32),
           n=st.integers(4, 32),
           k=st.integers(4, 32),
           batch_size=st.integers(0, 4),
           **hu.gcs_cpu_only)
    def test_dnnlowp_batch_matmul_int(self, m, n, k, batch_size, gc, dc):
        # A and B have scale 1, so exactly represented after quantization
        A_min = -77
        A_max = A_min + 255
        A = np.round(np.random.rand(batch_size, m, k) * 255 + A_min)
        A = A.astype(np.float32)
        # input channels 0 and 1 are all A_min to avoid overflow from vpmaddubsw
        # when multiplied with B_min and B_max
        if batch_size > 0 and m > 0:
            A[0, :, 0] = A_min
            A[0, 0, 1] = A_max

        B_min = -100
        B_max = B_min + 255
        B = np.round(np.random.rand(batch_size, n, k) * 255 + B_min)
        B = B.astype(np.float32)
        if batch_size > 0:
            B[0, 0, 0] = B_min
            B[0, 1, 0] = B_max

        for i in range(batch_size):
            avoid_vpmaddubsw_overflow_fc(m, k, n, A[i, ], A_min, A_max, B[i, ],
                                         B_min, B_max)

        for trans_a, trans_b in product([0, 1], [0, 1]):
            Output = collections.namedtuple("Output",
                                            ["Y", "op_type", "engine"])
            outputs = []

            op_engine_list = [
                ("BatchMatMul", ""),
                ("BatchMatMul", "DNNLOWP"),
                ("BatchMatMul", "DNNLOWP_16"),
                ("Int8BatchMatMul", "DNNLOWP"),
            ]

            for op_type, engine in op_engine_list:
                net = core.Net("test_net")

                if "DNNLOWP" in engine:
                    quantize_A = core.CreateOperator("Quantize", ["A"],
                                                     ["A_q"],
                                                     engine=engine,
                                                     device_option=gc)
                    net.Proto().op.extend([quantize_A])

                    quantize_B = core.CreateOperator("Quantize", ["B"],
                                                     ["B_q"],
                                                     engine=engine,
                                                     device_option=gc)
                    net.Proto().op.extend([quantize_B])

                batch_matmul = core.CreateOperator(
                    op_type,
                    [
                        "A_q" if "DNNLOWP" in engine else "A",
                        "B_q" if "DNNLOWP" in engine else "B",
                    ],
                    ["Y_q" if "DNNLOWP" in engine else "Y"],
                    trans_a=trans_a,
                    trans_b=trans_b,
                    engine=engine,
                    device_option=gc,
                )
                net.Proto().op.extend([batch_matmul])

                if "DNNLOWP" in engine:
                    dequantize = core.CreateOperator("Dequantize", ["Y_q"],
                                                     ["Y"],
                                                     engine=engine,
                                                     device_option=gc)
                    net.Proto().op.extend([dequantize])

                self.ws.create_blob("A").feed(
                    np.transpose(A, (0, 2, 1)) if trans_a else A,
                    device_option=gc)
                self.ws.create_blob("B").feed(
                    B if trans_b else np.transpose(B, (0, 2, 1)),
                    device_option=gc)
                self.ws.run(net)
                outputs.append(
                    Output(Y=self.ws.blobs["Y"].fetch(),
                           op_type=op_type,
                           engine=engine))

            check_quantized_results_close(outputs)

    # correctness test with no quantization error in inputs
    @given(
        m=st.integers(0, 32),
        n=st.integers(4, 32),
        k=st.integers(4, 32),
        C_1=st.integers(0, 3),  # number of batch dims
        C_2=st.integers(0, 3),
        A_quantized=st.booleans(),
        B_quantized=st.booleans(),
        out_quantized=st.booleans(),
        **hu.gcs_cpu_only)
    def test_dnnlowp_batch_matmul_int_constant_B(self, m, n, k, C_1, C_2,
                                                 A_quantized, B_quantized,
                                                 out_quantized, gc, dc):
        batch_dims = tuple(np.random.randint(3, size=max(C_1, C_2)))
        batch_dims_A = batch_dims[-C_1:]
        batch_dims_B = batch_dims[-C_2:]
        A = np.zeros(batch_dims_A + (m, k)).astype(np.float32)
        B = np.zeros(batch_dims_B + (n, k)).astype(np.float32)

        if np.prod(batch_dims) > 0:
            for index in np.ndindex(batch_dims_A):
                # When both input and output are float, each input of the batch has
                # scale 1 but with different offset, so input-wise quantization
                # shouldn't have any input quantization error
                # A_min = -77 if (A_quantized or out_quantized) else -77 + i
                A_min = -77
                A_max = A_min + 255
                A[index] = np.round(np.random.rand(m, k) * 255 + A_min)
                # input channels 0 and 1 are all A_min to avoid overflow from vpmaddubsw
                # when multiplied with B_min and B_max
                A[index][:, 0] = A_min
                if m != 0:
                    A[index][0, 1] = A_max

            i = 0
            for index in np.ndindex(batch_dims_B):
                # When weight is quantized in a lazy manner, each input of the batch has
                # scale 1 but with different offset, so input-wise quantization
                # shouldn't have any input quantization error when weight is quantized
                # in a lazy manner.
                B_min = -100 if B_quantized else -100 + i
                # B_min = -100
                B_max = B_min + 255
                B[index] = np.round(np.random.rand(n, k) * 255 + B_min)
                B[index][0, 0] = B_min
                B[index][1, 0] = B_max

                if C_1 > C_2:
                    # A has more dims
                    for outer_index in np.ndindex(batch_dims_A[:C_1 - C_2]):
                        avoid_vpmaddubsw_overflow_fc(
                            m,
                            k,
                            n,
                            A[outer_index] if C_2 == 0 else A[outer_index +
                                                              index],
                            A_min,
                            A_max,
                            B[index],
                            B_min,
                            B_max,
                        )
                else:
                    avoid_vpmaddubsw_overflow_fc(m, k, n, A[index[-C_1:]],
                                                 A_min, A_max, B[index], B_min,
                                                 B_max)
                i += 1

        for trans_a, trans_b in product([0, 1], [0, 1]):
            Output = collections.namedtuple("Output",
                                            ["Y", "op_type", "engine"])
            outputs = []

            op_engine_list = [
                ("BatchMatMul", ""),
                ("BatchMatMul", "DNNLOWP"),
                ("Int8BatchMatMul", "DNNLOWP"),
            ]

            for op_type, engine in op_engine_list:
                net = core.Net("test_net")

                do_quantize_A = "DNNLOWP" in engine and A_quantized
                do_quantize_B = "DNNLOWP" in engine and B_quantized
                do_dequantize = "DNNLOWP" in engine and out_quantized

                if do_quantize_A:
                    quantize_A = core.CreateOperator("Quantize", ["A"],
                                                     ["A_q"],
                                                     engine=engine,
                                                     device_option=gc)
                    net.Proto().op.extend([quantize_A])

                if do_quantize_B:
                    int8_given_tensor_fill, B_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
                        B if trans_b else B.swapaxes(-1, -2), "B_q")
                    net.Proto().op.extend([int8_given_tensor_fill])

                batch_matmul = core.CreateOperator(
                    op_type,
                    [
                        "A_q" if do_quantize_A else "A",
                        "B_q" if do_quantize_B else "B"
                    ],
                    ["Y_q" if do_dequantize else "Y"],
                    trans_a=trans_a,
                    trans_b=trans_b,
                    broadcast=True,
                    constant_B=True,
                    dequantize_output=not do_dequantize,
                    engine=engine,
                    device_option=gc,
                )
                if do_quantize_B:
                    # When quantized weight is provided, we can't rescale the
                    # output dynamically by looking at the range of output of each
                    # batch, so here we provide the range of output observed from
                    # fp32 reference implementation
                    dnnlowp_utils.add_quantization_param_args(
                        batch_matmul, outputs[0][0])
                net.Proto().op.extend([batch_matmul])

                if do_dequantize:
                    dequantize = core.CreateOperator("Dequantize", ["Y_q"],
                                                     ["Y"],
                                                     engine=engine,
                                                     device_option=gc)
                    net.Proto().op.extend([dequantize])

                self.ws.create_blob("A").feed(
                    A.swapaxes(-1, -2) if trans_a else A, device_option=gc)
                self.ws.create_blob("B").feed(
                    B if trans_b else B.swapaxes(-1, -2), device_option=gc)
                self.ws.run(net)
                outputs.append(
                    Output(Y=self.ws.blobs["Y"].fetch(),
                           op_type=op_type,
                           engine=engine))

            if np.prod(batch_dims) > 0:
                check_quantized_results_close(outputs)
Example #32
0
        assert rbigint.fromint(-8388608).tobytes(3, 'little',
                                                 signed=True) == '\x00\x00\x80'
        i = rbigint.fromint(-8388608)
        py.test.raises(InvalidEndiannessError,
                       i.tobytes,
                       3,
                       'foo',
                       signed=True)
        py.test.raises(InvalidSignednessError,
                       i.tobytes,
                       3,
                       'little',
                       signed=False)
        py.test.raises(OverflowError, i.tobytes, 2, 'little', signed=True)

    @given(strategies.binary(), strategies.booleans(), strategies.booleans())
    def test_frombytes_tobytes_hypothesis(self, s, big, signed):
        # check the roundtrip from binary strings to bigints and back
        byteorder = 'big' if big else 'little'
        bigint = rbigint.frombytes(s, byteorder=byteorder, signed=signed)
        t = bigint.tobytes(len(s), byteorder=byteorder, signed=signed)
        assert s == t


class TestInternalFunctions(object):
    def test__inplace_divrem1(self):
        # signs are not handled in the helpers!
        for x, y in [(1238585838347L, 3), (1234123412311231L, 1231231),
                     (99, 100)]:
            if y > MASK:
                continue
Example #33
0
def get_language(data):
    return data.draw(sampled_from(list(supported_languages.values())))


@given(lists(text()), text())
def test_shift(fragments, default):
    if fragments == []:
        assert p.shift(fragments, default) == default
    else:
        fragments2 = copy.copy(fragments)
        head = p.shift(fragments, default)
        assert [head] + fragments == fragments2


@given(text(), booleans(), text(min_size=1))
@example("/foo", True, "0")
def test_destination(filepath, preserve_paths, outdir):
    dest = p.destination(
        filepath, preserve_paths=preserve_paths, outdir=outdir)
    assert dest.startswith(outdir)
    assert dest.endswith(".html")


@given(data(), text())
def test_parse(data, source):
    lang = get_language(data)
    parsed = p.parse(source, lang)
    for s in parsed:
        assert {"code_text", "docs_text"} == set(s.keys())
Example #34
0
def tick_classes(request):
    """
    Fixture for Tick based datetime offsets available for a time series.
    """
    return request.param


# ----------------------------------------------------------------
# Global setup for tests using Hypothesis

# Registering these strategies makes them globally available via st.from_type,
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split():
    cls = getattr(pd.tseries.offsets, name)
    st.register_type_strategy(
        cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans()))

for name in "YearBegin YearEnd BYearBegin BYearEnd".split():
    cls = getattr(pd.tseries.offsets, name)
    st.register_type_strategy(
        cls,
        st.builds(
            cls,
            n=st.integers(-5, 5),
            normalize=st.booleans(),
            month=st.integers(min_value=1, max_value=12),
        ),
    )

for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split():
    cls = getattr(pd.tseries.offsets, name)
class TestQuantizedTensor(TestCase):
    def test_qtensor(self):
        num_elements = 10
        scale = 1.0
        zero_point = 2
        for device in get_supported_device_types():
            for dtype in [torch.qint8, torch.quint8, torch.qint32]:
                r = torch.ones(num_elements, dtype=torch.float, device=device)
                qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
                self.assertEqual(qr.q_scale(), scale)
                self.assertEqual(qr.q_zero_point(), zero_point)
                self.assertTrue(qr.is_quantized)
                self.assertFalse(r.is_quantized)
                self.assertEqual(qr.qscheme(), torch.per_tensor_affine)
                self.assertTrue(isinstance(qr.qscheme(), torch.qscheme))
                # slicing and int_repr
                int_repr = qr.int_repr()
                for num in int_repr:
                    self.assertEqual(num, 3)
                for num in qr[2:].int_repr():
                    self.assertEqual(num, 3)
                # dequantize
                rqr = qr.dequantize()
                for i in range(num_elements):
                    self.assertEqual(r[i], rqr[i])
                # we can also print a qtensor
                empty_r = torch.ones((0, 1), dtype=torch.float, device=device)
                empty_qr = torch.quantize_per_tensor(empty_r, scale,
                                                     zero_point, dtype)

                device_msg = "" if device == 'cpu' else "device='" + device + ":0', "
                dtype_msg = str(dtype) + ", "
                self.assertEqual(
                    ' '.join(str(empty_qr).split()), "tensor([], " +
                    device_msg + "size=(0, 1), dtype=" + dtype_msg +
                    "quantization_scheme=torch.per_tensor_affine, " +
                    "scale=1.0, zero_point=2)")

    def test_qtensor_float_assignment(self):
        # Scalar Tensor
        # item
        scale = 1.0
        zero_point = 2
        r = torch.ones(1, dtype=torch.float)
        for dtype in [torch.qint8, torch.quint8, torch.qint32]:
            qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)
            self.assertEqual(qr.item(), 1)
            self.assertEqual(qr[0].item(), 1)
            # assignment
            self.assertTrue(qr[0].is_quantized)
            qr[0] = 11.3  # float assignment
            self.assertEqual(qr.item(), 11)
            x = torch.ones(1, dtype=torch.float) * 15.3
            # Copying from a float Tensor
            qr[:] = x
            self.assertEqual(qr.item(), 15)

            dtype_msg = str(dtype) + ", "
            self.assertEqual(
                ' '.join(str(qr).split()), "tensor([15.], size=(1,), dtype=" +
                dtype_msg + "quantization_scheme=torch.per_tensor_affine, " +
                "scale=1.0, zero_point=2)")

    def test_qtensor_quant_dequant(self):
        scale = 0.02
        zero_point = 2
        for device in get_supported_device_types():
            r = torch.rand(3, 2, 4, 5, dtype=torch.float,
                           device=device) * 4 - 2
            for memory_format in [
                    torch.contiguous_format, torch.channels_last
            ]:
                r = r.contiguous(memory_format=memory_format)
                for dtype in [torch.qint8, torch.quint8, torch.qint32]:
                    qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
                    rqr = qr.dequantize()
                    self.assertTrue(
                        np.allclose(r.cpu().numpy(),
                                    rqr.cpu().numpy(),
                                    atol=2 / scale))
        # Also check 5D tensors work.
        for device in get_supported_device_types():
            r = torch.rand(3, 2, 4, 5, 6, dtype=torch.float,
                           device=device) * 4 - 2
            for dtype in [torch.qint8, torch.quint8, torch.qint32]:
                qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)
                rqr = qr.dequantize()
                self.assertTrue(
                    np.allclose(r.cpu().numpy(),
                                rqr.cpu().numpy(),
                                atol=2 / scale))

    # legacy constructor/new doesn't support qtensors
    def test_qtensor_legacy_new_failure(self):
        r = torch.rand(3, 2, dtype=torch.float) * 4 - 2
        scale = 0.02
        zero_point = 2
        qr = torch.quantize_per_tensor(r, scale, zero_point, torch.quint8)
        self.assertRaises(RuntimeError, lambda: qr.new(device='cpu'))
        self.assertRaises(RuntimeError, lambda: qr.new(r.storage()))
        self.assertRaises(RuntimeError, lambda: qr.new(r))
        self.assertRaises(RuntimeError, lambda: qr.new(torch.Size([2, 3])))
        self.assertRaises(RuntimeError, lambda: qr.new([6]))

    def test_per_channel_qtensor_creation(self):
        numel = 10
        ch_axis = 0
        scales = torch.rand(numel)
        zero_points_int = torch.randint(0, 10, size=(numel, ))
        zero_points_float = torch.randn(numel)
        for dtype, zero_points in itertools.product(
            [torch.qint8, torch.quint8], [zero_points_float, zero_points_int]):
            q = torch._empty_per_channel_affine_quantized(
                [numel],
                scales=scales,
                zero_points=zero_points,
                axis=ch_axis,
                dtype=dtype)
            # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
            self.assertEqualIgnoreType(scales, q.q_per_channel_scales())
            self.assertEqual(zero_points, q.q_per_channel_zero_points())
            self.assertEqual(ch_axis, q.q_per_channel_axis())

        # create Tensor from uint8_t Tensor, scales and zero_points
        for zero_points in [zero_points_float, zero_points_int]:
            int_tensor = torch.randint(0,
                                       100,
                                       size=(numel, ),
                                       dtype=torch.uint8)
            q = torch._make_per_channel_quantized_tensor(
                int_tensor, scales, zero_points, ch_axis)
            self.assertEqual(int_tensor, q.int_repr())
            # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
            self.assertEqualIgnoreType(scales, q.q_per_channel_scales())
            self.assertEqual(zero_points, q.q_per_channel_zero_points())
            self.assertEqual(ch_axis, q.q_per_channel_axis())

    def test_qtensor_creation(self):
        scale = 0.5
        zero_point = 10
        numel = 10
        for device in get_supported_device_types():
            q = torch._empty_affine_quantized([numel],
                                              scale=scale,
                                              zero_point=zero_point,
                                              device=device,
                                              dtype=torch.quint8)
            self.assertEqual(scale, q.q_scale())
            self.assertEqual(zero_point, q.q_zero_point())

            # create Tensor from uint8_t Tensor, scale and zero_point
            int_tensor = torch.randint(0,
                                       100,
                                       size=(10, ),
                                       device=device,
                                       dtype=torch.uint8)
            q = torch._make_per_tensor_quantized_tensor(
                int_tensor, scale, zero_point)
            self.assertEqual(int_tensor, q.int_repr())
            self.assertEqual(scale, q.q_scale())
            self.assertEqual(zero_point, q.q_zero_point())

            # create via empty_like
            q = torch._empty_affine_quantized([numel],
                                              scale=scale,
                                              zero_point=zero_point,
                                              device=device,
                                              dtype=torch.quint8)
            q_el = torch.empty_like(q)
            self.assertEqual(q.q_scale(), q_el.q_scale())
            self.assertEqual(q.q_zero_point(), q_el.q_zero_point())
            self.assertEqual(q.dtype, q_el.dtype)

            # create via empty_like but change the dtype (currently not supported)
            with self.assertRaises(RuntimeError):
                torch.empty_like(q, dtype=torch.qint8)

    def test_qtensor_dtypes(self):
        r = torch.rand(3, 2, dtype=torch.float) * 4 - 2
        scale = 0.2
        zero_point = 2
        qr = torch.quantize_per_tensor(r, scale, zero_point, torch.qint8)
        rqr = qr.dequantize()
        self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale))
        qr = torch.quantize_per_tensor(r, scale, zero_point, torch.quint8)
        rqr = qr.dequantize()
        self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale))
        qr = torch.quantize_per_tensor(r, scale, zero_point, torch.qint32)
        rqr = qr.dequantize()
        self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale))

    def _test_quantize_per_channel(self, r, scales, zero_points, axis,
                                   float_params):
        def _quantize_per_channel_ref_nd(data, scales, zero_points,
                                         float_params):
            dims = data.size()
            data = data.view(-1, dims[axis], np.prod(dims[axis + 1:]))
            res = torch.empty_like(data)
            quant_min, quant_max = 0, 255
            for i in range(res.size()[0]):
                for j in range(res.size()[1]):
                    for k in range(res.size()[2]):
                        if float_params:
                            inv_scale = 1.0 / scales[j]
                            res[i][j][k] = np.clip(
                                np.round(data[i][j][k] * inv_scale +
                                         zero_points[j]), quant_min, quant_max)
                        else:
                            res[i][j][k] = np.clip(
                                np.round(data[i][j][k] / scales[j]) +
                                zero_points[j], quant_min, quant_max)
            res = res.view(*dims)
            return res

        contig_format = torch.channels_last if r.ndim == 4 else torch.channels_last_3d
        for memory_format in [torch.contiguous_format, contig_format]:
            ref_res = _quantize_per_channel_ref_nd(r, scales, zero_points,
                                                   float_params)
            r_contig = r.contiguous(memory_format=memory_format)
            qr = torch.quantize_per_channel(r_contig, scales, zero_points,
                                            axis, torch.quint8)
            rqr = qr.dequantize()
            self.assertTrue(np.allclose(qr.int_repr(), ref_res))
            self.assertTrue(
                np.allclose(r.numpy(),
                            rqr.numpy(),
                            atol=2 / np.min(scales.numpy())))

    def test_qtensor_quantize_per_channel(self):
        r = torch.rand(3, 2, dtype=torch.float) * 4 - 2
        scales = torch.tensor([0.2, 0.03], dtype=torch.double)
        zero_points = torch.tensor([5, 10], dtype=torch.long)
        axis = 1

        def quantize_c(data, scales, zero_points):
            res = torch.empty((3, 2))
            quant_min, quant_max = 0, 255
            for i in range(3):
                for j in range(2):
                    res[i][j] = np.clip(
                        np.round(data[i][j] / scales[j]) + zero_points[j],
                        quant_min, quant_max)
            return res

        qr = torch.quantize_per_channel(r, scales, zero_points, axis,
                                        torch.quint8)
        rqr = qr.dequantize()
        self.assertTrue(
            np.allclose(qr.int_repr(), quantize_c(r, scales, zero_points)))
        self.assertTrue(
            np.allclose(r.numpy(),
                        rqr.numpy(),
                        atol=2 / np.min(scales.numpy())))

        # Check 4D tensor with 2 different memory formats.
        r = torch.rand(3, 2, 4, 5, dtype=torch.float) * 4 - 2
        scales = torch.tensor([0.2, 0.03], dtype=torch.double)
        zero_points = torch.tensor([5, 10], dtype=torch.long)
        self._test_quantize_per_channel(r, scales, zero_points, 1, False)

        scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.double)
        zero_points = torch.tensor([5, 10, 7], dtype=torch.long)
        self._test_quantize_per_channel(r, scales, zero_points, 0, False)

        # Check 5D tensor.
        r = torch.rand(3, 2, 4, 5, 7, dtype=torch.float) * 4 - 2
        scales = torch.tensor([0.2, 0.03], dtype=torch.double)
        zero_points = torch.tensor([5, 10], dtype=torch.long)
        self._test_quantize_per_channel(r, scales, zero_points, 1, False)

        scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.double)
        zero_points = torch.tensor([5, 10, 7], dtype=torch.long)
        self._test_quantize_per_channel(r, scales, zero_points, 0, False)

    def test_quantize_per_channel_float_qparams(self):
        r = torch.rand(3, 2, dtype=torch.float) * 4
        scales = torch.tensor([0.2, 0.03], dtype=torch.float)
        zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)
        axis = 1

        # Reference quantize function with FP zero_point.
        def quantize_ref(data, scales, zero_points):
            res = torch.empty((3, 2))
            quant_min, quant_max = 0, 255
            for i in range(3):
                for j in range(2):
                    inv_scale = 1.0 / scales[j]
                    res[i][j] = np.clip(
                        np.round(data[i][j] * inv_scale + zero_points[j]),
                        quant_min, quant_max)
            return res

        qr = torch.quantize_per_channel(r, scales, zero_points, axis,
                                        torch.quint8)
        dequant_tensor = qr.dequantize()
        ref = quantize_ref(r, scales, zero_points)
        self.assertTrue(np.allclose(qr.int_repr(), ref))
        self.assertTrue(np.allclose(r.numpy(), dequant_tensor.numpy(), atol=1))

        # Check 4D tensor with 2 different memory formats.
        r = torch.rand(3, 2, 4, 5, dtype=torch.float) * 4
        scales = torch.tensor([0.2, 0.03], dtype=torch.float)
        zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)
        self._test_quantize_per_channel(r, scales, zero_points, 1, True)

        scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.float)
        zero_points = torch.tensor([0.1, 0.2, 1.], dtype=torch.float)
        self._test_quantize_per_channel(r, scales, zero_points, 0, True)

        # Check 5D tensor.
        r = torch.rand(3, 2, 4, 5, 7, dtype=torch.float) * 4 - 2
        scales = torch.tensor([0.2, 0.03], dtype=torch.float)
        zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)
        self._test_quantize_per_channel(r, scales, zero_points, 1, True)

        scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.float)
        zero_points = torch.tensor([0.1, 0.2, 1.], dtype=torch.float)
        self._test_quantize_per_channel(r, scales, zero_points, 0, True)

    def test_qtensor_permute(self):
        scale = 0.02
        zero_point = 1
        for device in get_supported_device_types():
            r = torch.rand(10, 30, 2, 2, device=device,
                           dtype=torch.float) * 4 - 2
            for dtype in [torch.qint8, torch.quint8, torch.qint32]:
                qr = torch.quantize_per_tensor(r,
                                               scale,
                                               zero_point,
                                               dtype=dtype)
                qr = qr.transpose(0, 1)
                rqr = qr.dequantize()
                # compare transpose + dequantized result with orignal transposed result
                self.assertTrue(
                    np.allclose(r.cpu().numpy().transpose([1, 0, 2, 3]),
                                rqr.cpu().numpy(),
                                atol=2 / scale))

                qr = torch.quantize_per_tensor(r,
                                               scale,
                                               zero_point,
                                               dtype=dtype)
                qr1 = qr.permute([1, 0, 2, 3])
                qr2 = qr.transpose(0, 1)
                # compare int representation after transformations
                self.assertEqual(qr1.int_repr(), qr2.int_repr())
                self.assertEqual(qr1.q_scale(), qr2.q_scale())
                self.assertEqual(qr1.q_zero_point(), qr2.q_zero_point())
                # compare dequantized result
                self.assertEqual(qr1.dequantize(), qr2.dequantize())
                # compare permuted + dequantized result with original transposed result
                self.assertTrue(
                    np.allclose(qr2.dequantize().cpu().numpy(),
                                r.cpu().numpy().transpose([1, 0, 2, 3]),
                                atol=2 / scale))
                # make permuted result contiguous
                self.assertEqual(qr2.contiguous().int_repr(), qr2.int_repr())

                # change memory format
                qlast = qr.contiguous(memory_format=torch.channels_last)
                self.assertEqual(qr.stride(),
                                 list(reversed(sorted(qr.stride()))))
                self.assertNotEqual(qlast.stride(),
                                    list(reversed(sorted(qlast.stride()))))
                self.assertEqual(qr.int_repr(), qlast.int_repr())
                self.assertEqual(qr.q_scale(), qlast.q_scale())
                self.assertEqual(qr.q_zero_point(), qlast.q_zero_point())
                self.assertEqual(qlast.dequantize(), qr.dequantize())

                # permuting larger tensors
                x = torch.randn(64, 64, device=device)
                qx = torch.quantize_per_tensor(x, 1.0, 0, dtype)
                # should work
                qx.permute([1, 0])

    def test_qtensor_per_channel_permute(self):
        r = torch.rand(20, 10, 2, 2, dtype=torch.float) * 4 - 2
        dtype = torch.qint8
        scales = torch.rand(10) * 0.02 + 0.01
        zero_points = torch.round(torch.rand(10) * 2 - 1).to(torch.long)
        qr = torch.quantize_per_channel(r, scales, zero_points, 1, dtype)

        # we can't reorder the axis
        with self.assertRaises(RuntimeError):
            qr.transpose(0, 1)

        # but we can change memory format
        qlast = qr.contiguous(memory_format=torch.channels_last)
        self.assertEqual(qr.stride(), list(reversed(sorted(qr.stride()))))
        self.assertNotEqual(qlast.stride(),
                            list(reversed(sorted(qlast.stride()))))
        self.assertEqual(qr.int_repr(), qlast.int_repr())
        # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
        self.assertEqualIgnoreType(scales, qlast.q_per_channel_scales())
        self.assertEqual(zero_points, qlast.q_per_channel_zero_points())
        self.assertEqual(1, qlast.q_per_channel_axis())
        self.assertEqual(qlast.dequantize(), qr.dequantize())

    def test_qtensor_load_save(self):
        scale = 0.2
        zero_point = 10
        # storage is not accessible on the cuda right now
        device = "cpu"
        r = torch.rand(15, 2, dtype=torch.float32, device=device) * 2
        for dtype in [torch.qint8, torch.quint8, torch.qint32]:
            qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)
            qrv = qr[:, 1]
            with tempfile.NamedTemporaryFile() as f:
                # Serializing and Deserializing Tensor
                torch.save((qr, qrv), f)
                f.seek(0)
                qr2, qrv2 = torch.load(f)
                self.assertEqual(qr, qr2)
                self.assertEqual(qrv, qrv2)
                self.assertEqual(qr2.storage().data_ptr(),
                                 qrv2.storage().data_ptr())

    def test_qtensor_per_channel_load_save(self):
        r = torch.rand(20, 10, dtype=torch.float) * 4 - 2
        scales = torch.rand(10, dtype=torch.double) * 0.02 + 0.01
        zero_points = torch.round(torch.rand(10) * 20 + 1).to(torch.long)
        # quint32, cuda is not supported yet
        for dtype in [torch.quint8, torch.qint8]:
            qr = torch.quantize_per_channel(r, scales, zero_points, 1, dtype)
            with tempfile.NamedTemporaryFile() as f:
                # Serializing and Deserializing Tensor
                torch.save(qr, f)
                f.seek(0)
                qr2 = torch.load(f)
                self.assertEqual(qr, qr2)

    def test_qtensor_copy(self):
        scale = 0.5
        zero_point = 10
        numel = 10
        for dtype in [torch.qint8, torch.quint8, torch.qint32]:
            for device in get_supported_device_types():
                # copy from same scale and zero_point
                q = torch._empty_affine_quantized([numel],
                                                  scale=scale,
                                                  zero_point=zero_point,
                                                  device=device,
                                                  dtype=dtype)
                q2 = torch._empty_affine_quantized([numel],
                                                   scale=scale,
                                                   zero_point=zero_point,
                                                   device=device,
                                                   dtype=dtype)
                q.copy_(q2)
                self.assertEqual(q.int_repr(), q2.int_repr())
                self.assertEqual(q.q_scale(), q2.q_scale())
                self.assertEqual(q.q_zero_point(), q2.q_zero_point())
                # copying from different scale and zero_point
                new_scale = 3.2
                new_zero_point = 5
                q = torch._empty_affine_quantized([numel],
                                                  scale=new_scale,
                                                  zero_point=new_zero_point,
                                                  device=device,
                                                  dtype=dtype)
                # check original scale and zero_points are set correctly
                self.assertEqual(q.q_scale(), new_scale)
                self.assertEqual(q.q_zero_point(), new_zero_point)
                q.copy_(q2)
                # check scale and zero_points has been copied
                self.assertEqual(q, q2)
                # can't copy from quantized tensor to non-quantized tensor
                r = torch.empty([numel], dtype=torch.float)
                q = torch._empty_affine_quantized([numel],
                                                  scale=scale,
                                                  zero_point=zero_point,
                                                  dtype=dtype)
                with self.assertRaisesRegex(RuntimeError,
                                            "please use dequantize"):
                    r.copy_(q)
            # copy from float doesn't support cuda
            device = 'cpu'
            # check copy from non-quantized to quantized
            r = torch.randn([numel], dtype=torch.float).to(device)
            q = torch._empty_affine_quantized([numel],
                                              scale=scale,
                                              zero_point=zero_point,
                                              dtype=dtype,
                                              device=device)
            q.copy_(r)
            qr = torch.quantize_per_tensor(r,
                                           scale=scale,
                                           zero_point=zero_point,
                                           dtype=dtype)
            self.assertEqual(q, qr)

    def test_torch_qtensor_deepcopy(self):
        # cuda is not supported yet
        device = "cpu"
        q_int = torch.randint(0, 100, [3, 5], device=device, dtype=torch.uint8)
        scale, zero_point = 2.0, 3
        q = torch._make_per_tensor_quantized_tensor(q_int,
                                                    scale=scale,
                                                    zero_point=zero_point)
        qc = deepcopy(q)
        self.assertEqual(qc, q)

    def test_clone(self):
        numel = 10
        scale = 0.5
        zero_point = 10

        options = itertools.product(get_supported_device_types(),
                                    [torch.qint8, torch.quint8, torch.qint32])

        for device, dtype in options:
            per_tensor_quantized = torch._empty_affine_quantized(
                [numel],
                scale=scale,
                zero_point=zero_point,
                device=device,
                dtype=dtype)
            per_channel_quantized = torch._empty_per_channel_affine_quantized(
                [numel],
                scales=torch.tensor([scale]),
                zero_points=torch.tensor([zero_point]),
                axis=0,
                device=device,
                dtype=dtype)
            qtensors = [per_tensor_quantized, per_channel_quantized]

            for q in qtensors:
                q2 = q.clone()
                # Check to make sure the scale and zero_point has been copied.
                self.assertEqual(q, q2)

    def test_qtensor_fill(self):
        numel = 10
        scale = 0.5
        zero_point = 10

        ones = torch.ones(numel).to(torch.float)

        types = [torch.qint8, torch.quint8, torch.qint32]
        fills = [-1, 1, 2**32]  # positive, negative, overflow

        # `fill_` uses `copy_(float)`, which doesn't support CUDA
        device = 'cpu'
        ones = ones.to(device)
        for qtype, fill_with in itertools.product(types, fills):
            q_filled = torch._empty_affine_quantized([numel],
                                                     scale=scale,
                                                     zero_point=zero_point,
                                                     device=device,
                                                     dtype=qtype)
            q_filled.fill_(fill_with)
            int_repr = torch.quantize_per_tensor(ones * fill_with, scale,
                                                 zero_point, qtype)
            fill_with = int_repr.dequantize()
            int_repr = int_repr.int_repr()

            self.assertEqual(q_filled.int_repr(), int_repr)
            self.assertEqual(q_filled.dequantize(), fill_with)
            # Make sure the scale and zero_point don't change
            self.assertEqual(q_filled.q_scale(), scale)
            self.assertEqual(q_filled.q_zero_point(), zero_point)

    def test_qtensor_view(self):
        scale, zero_point, dtype = 1.0, 2, torch.uint8
        for device in get_supported_device_types():
            q_int = torch.randint(0,
                                  100, [1, 2, 3],
                                  device=device,
                                  dtype=dtype)
            q = torch._make_per_tensor_quantized_tensor(q_int,
                                                        scale=scale,
                                                        zero_point=zero_point)
            q2 = q.view(1, 3, 2)
            self.assertEqual(q.numel(), q2.numel())
            # testing -1
            self.assertEqual(q, q2.view(1, -1, 3))

            a_int = torch.randint(0,
                                  100, [1, 2, 3, 4],
                                  device=device,
                                  dtype=dtype)
            a = torch._make_per_tensor_quantized_tensor(a_int,
                                                        scale=scale,
                                                        zero_point=zero_point)
            b = a.transpose(1, 2)  # swaps 2nd and 3rd dimension
            c = a.view(1, 3, 2, 4)  # does not change tensor layout in memory
            self.assertEqual(b.size(), c.size())
            self.assertEqual(b.q_scale(), c.q_scale())
            self.assertEqual(b.q_zero_point(), c.q_zero_point())
            self.assertNotEqual(b.stride(), c.stride())
            # size is the same but the underlying data is different
            self.assertNotEqual(b.int_repr(), c.int_repr())
            # torch.equal is not supported for the cuda backend
            if device == 'cpu':
                self.assertFalse(torch.equal(b, c))
            else:
                self.assertRaises(RuntimeError, lambda: torch.equal(b, c))

            # a case can't view non-contiguos Tensor
            a_int = torch.randint(0,
                                  100, [1, 2, 3, 4],
                                  device=device,
                                  dtype=dtype)
            a = torch._make_per_tensor_quantized_tensor(a_int,
                                                        scale=scale,
                                                        zero_point=zero_point)
            b = a.transpose(1, 2)  # swaps 2nd and 3rd dimension
            err_str = "view size is not compatible with input tensor's size and stride*"
            with self.assertRaisesRegex(RuntimeError, err_str):
                b.view(1, 4, 2, 3)
            # view on contiguous tensor is fine
            b.contiguous().view(1, 4, 2, 3)

    def test_qtensor_resize(self):
        scale, zero_point, dtype = 1.0, 2, torch.uint8
        sizes1 = [1, 2, 3, 4]
        sizes2 = [1 * 2, 3 * 4]
        sizes3 = [1, 2 * 3, 4]
        sizes4 = [1 * 2 * 3 * 4]
        sizes5 = [1, 2, 1, 3, 1, 4]

        q1_int = torch.randint(0, 100, sizes1, dtype=dtype)
        q1 = torch._make_per_tensor_quantized_tensor(q1_int,
                                                     scale=scale,
                                                     zero_point=zero_point)
        q2 = q1.resize(*sizes2)
        q3 = q2.resize(*sizes3)
        q4 = q3.resize(*sizes4)
        q5 = q4.resize(*sizes5)

        self.assertEqual(q1.numel(), q2.numel())
        self.assertEqual(q1.numel(), q3.numel())
        self.assertEqual(q1.numel(), q4.numel())
        self.assertEqual(q1.numel(), q5.numel())

        # Compare original and post-transpose
        a_int = torch.randint(0, 100, sizes1, dtype=dtype)
        a = torch._make_per_tensor_quantized_tensor(a_int,
                                                    scale=scale,
                                                    zero_point=zero_point)
        b = a.transpose(1, 2)  # swaps 2nd and 3rd dimension
        c = b.resize(*sizes1)  # Change the sizes back to the original

        self.assertEqual(a.size(), c.size())
        self.assertEqual(b.q_scale(), c.q_scale())
        self.assertEqual(b.q_zero_point(), c.q_zero_point())
        self.assertNotEqual(b.stride(), c.stride())
        # size is the same but the underlying data is different
        self.assertNotEqual(b.int_repr(), c.int_repr())
        self.assertFalse(torch.equal(b, c))

        # Throws an error if numel is wrong
        q1_int = torch.randint(0, 100, sizes1, dtype=dtype)
        q1 = torch._make_per_tensor_quantized_tensor(a_int,
                                                     scale=scale,
                                                     zero_point=zero_point)
        err_str = "requested resize to*"
        with self.assertRaisesRegex(RuntimeError, err_str):
            q2 = q1.resize(*sizes1[:-1])
        # resize on both contiguous and non-contiguous tensor should be fine
        q3 = q1.resize(*sizes2)
        q4 = q1.contiguous().resize(*sizes2)

    def test_qtensor_reshape(self):
        scale, zero_point, dtype = 1.0, 2, torch.uint8
        for device in get_supported_device_types():
            q_int = torch.randint(0, 100, [3, 5], dtype=dtype, device=device)
            q = torch._make_per_tensor_quantized_tensor(q_int,
                                                        scale=scale,
                                                        zero_point=zero_point)
            q2 = q.reshape([15])
            self.assertEqual(q.numel(), q2.numel())
            self.assertEqual(q2.size(), [15])
            # testing -1
            self.assertEqual(q, q2.reshape([3, -1]))

            a_int = torch.randint(0,
                                  100, [1, 2, 3, 4],
                                  dtype=dtype,
                                  device=device)
            a = torch._make_per_tensor_quantized_tensor(a_int,
                                                        scale=scale,
                                                        zero_point=zero_point)
            b = a.transpose(1, 2)  # swaps 2nd and 3rd dimension
            c = a.reshape(1, 3, 2, 4)  # does not change tensor layout
            self.assertEqual(b.size(), c.size())
            self.assertEqual(b.q_scale(), c.q_scale())
            self.assertEqual(b.q_zero_point(), c.q_zero_point())
            self.assertNotEqual(b.stride(), c.stride())
            self.assertNotEqual(b.int_repr(), c.int_repr())
            # torch.equal is not supported for the cuda backend
            if device == 'cpu':
                self.assertFalse(torch.equal(b, c))
            else:
                self.assertRaises(RuntimeError, lambda: torch.equal(b, c))

            # we can use reshape for non-contiguous Tensor
            a_int = torch.randint(0,
                                  100, [1, 2, 3, 4],
                                  dtype=dtype,
                                  device=device)
            a = torch._make_per_tensor_quantized_tensor(a_int,
                                                        scale=scale,
                                                        zero_point=zero_point)
            b = a.transpose(1, 2)  # swaps 2nd and 3rd dimension
            c = b.reshape(1, 4, 2, 3)

    def test_qtensor_unsqueeze(self):
        x = torch.randn((1, 3, 4))
        qx = torch.quantize_per_tensor(x,
                                       scale=1.0,
                                       zero_point=0,
                                       dtype=torch.quint8)
        qy = qx.unsqueeze(2)
        self.assertEqual(qy.size(), (1, 3, 1, 4))
        qy = qy.squeeze(2)
        self.assertEqual(qy.size(), qx.size())

        # Per channel qtensor
        scales = torch.tensor([1.0])
        zero_points = torch.tensor([0])
        qx = torch.quantize_per_channel(x,
                                        scales=scales,
                                        zero_points=zero_points,
                                        dtype=torch.quint8,
                                        axis=0)
        qy = qx.unsqueeze(0)
        self.assertEqual(qy.size(), (1, 1, 3, 4))
        self.assertEqual(qy.q_per_channel_axis(), 1)

        qz = qy.squeeze(0)
        self.assertEqual(qz.size(), x.size())
        self.assertEqual(qz.q_per_channel_axis(), 0)
        with self.assertRaisesRegex(
                RuntimeError,
                "Squeeze is only possible on non-axis dimension for Per-Channel"
        ):
            qz = qy.squeeze(1)

        # squeeze without dim specified
        x = torch.randn((3, 1, 2, 1, 4))
        scales = torch.tensor([1.0, 1.0])
        zero_points = torch.tensor([0, 0])
        qx = torch.quantize_per_channel(x,
                                        scales=scales,
                                        zero_points=zero_points,
                                        dtype=torch.quint8,
                                        axis=2)
        qz = qx.squeeze()
        self.assertEqual(qz.size(), (3, 2, 4))
        self.assertEqual(qz.q_per_channel_axis(), 1)
        with self.assertRaisesRegex(
                RuntimeError,
                "Squeeze is only possible on non-axis dimension for Per-Channel"
        ):
            qz = qy.squeeze()

    def test_repeat(self):
        scale, zero_point, dtype = 1.0, 2, torch.uint8
        for device in get_supported_device_types():
            q_int = torch.randint(0, 100, [3], dtype=dtype, device=device)
            q_int_repeat = q_int.repeat(4, 2)
            q_ref = torch._make_per_tensor_quantized_tensor(
                q_int_repeat, scale=scale, zero_point=zero_point)

            q = torch._make_per_tensor_quantized_tensor(q_int,
                                                        scale=scale,
                                                        zero_point=zero_point)
            q_repeat = q.repeat(4, 2)
            self.assertEqual(q_ref, q_repeat)

    def test_qscheme_pickle(self):
        f = Foo()
        buf = io.BytesIO()
        torch.save(f, buf)

        buf.seek(0)
        f2 = torch.load(buf)

        self.assertEqual(f2.qscheme, torch.per_tensor_symmetric)

    @given(X=hu.tensor(shapes=hu.array_shapes(min_dims=2,
                                              max_dims=4,
                                              min_side=1,
                                              max_side=10),
                       qparams=hu.qparams()),
           reduce_range=st.booleans())
    def test_choose_qparams(self, X, reduce_range):
        X, (scale, zero_point, torch_type) = X
        X = torch.from_numpy(X)
        X_scale, X_zp = _calculate_dynamic_qparams(X,
                                                   torch.quint8,
                                                   reduce_range=reduce_range)
        qparams = torch._choose_qparams_per_tensor(X, reduce_range)
        np.testing.assert_array_almost_equal(X_scale, qparams[0], decimal=3)
        self.assertEqual(X_zp, qparams[1])

    @unittest.skipIf(not torch.cuda.is_available() or TEST_WITH_ROCM,
                     'CUDA is not available')
    def test_cuda_cpu_implementation_consistency(self):
        numel, zero_point, scale = 100, 2, 0.02
        r = torch.rand(numel, dtype=torch.float32, device='cpu') * 25 - 4
        for dtype in [torch.qint8, torch.quint8, torch.qint32]:
            qr_cpu = torch.quantize_per_tensor(r,
                                               scale,
                                               zero_point,
                                               dtype=dtype)
            qr_cuda = torch.quantize_per_tensor(r.cuda(),
                                                scale,
                                                zero_point,
                                                dtype=dtype)
            # intr repr must be the same
            np.testing.assert_equal(qr_cpu.int_repr().numpy(),
                                    qr_cuda.int_repr().cpu().numpy())
            # dequantized values must be the same
            r_cpu, r_cuda = qr_cpu.dequantize().numpy(), qr_cuda.dequantize(
            ).cpu().numpy()
            np.testing.assert_almost_equal(r_cuda, r_cpu, decimal=5)

    @unittest.skipIf(not torch.cuda.is_available() or TEST_WITH_ROCM,
                     'CUDA is not available')
    def test_cuda_quantization_does_not_pin_memory(self):
        # Context - https://github.com/pytorch/pytorch/issues/41115
        x = torch.randn(3)
        self.assertEqual(x.is_pinned(), False)

        q_int = torch.randint(0,
                              100, [1, 2, 3],
                              device="cuda",
                              dtype=torch.uint8)
        q = torch._make_per_tensor_quantized_tensor(q_int,
                                                    scale=0.1,
                                                    zero_point=0)

        x = torch.randn(3)
        self.assertEqual(x.is_pinned(), False)

    def test_fp16_saturate_op(self):
        x = torch.ones(5, 5, dtype=torch.float32) * 65532
        x[0] = torch.ones(5) * -65532
        # range of fp16 value is [-65504, + 65504]
        ref = torch.ones(5, 5) * 65504
        ref[0] = torch.ones(5) * -65504
        y = torch._saturate_weight_to_fp16(x)
        self.assertEqual(y, ref)

    def test_choose_qparams_optimized(self):
        for bit_width in [4, 2]:
            x = torch.randn(64, dtype=torch.float)
            y = torch.choose_qparams_optimized(x,
                                               numel=64,
                                               n_bins=200,
                                               ratio=0.16,
                                               bit_width=bit_width)
            ref = param_search_greedy(x.numpy(), bit_rate=bit_width)
            self.assertEqual(y[0].numpy(), ref[0])
            self.assertEqual(y[1].numpy(), ref[1])
Example #36
0
    def slow_always_true(x):
        time.sleep(0.1)
        return True

    start = time.time()
    find(s.lists(s.booleans()),
         slow_always_true,
         settings=settings(timeout=0.1, database=None))
    finish = time.time()
    run_time = finish - start
    assert run_time <= 0.3


some_normal_settings = settings()


def test_is_not_normally_default():
    assert settings.default is not some_normal_settings


@given(s.booleans())
@some_normal_settings
def test_settings_are_default_in_given(x):
    assert settings.default is some_normal_settings


def test_settings_are_default_in_find():
    find(s.booleans(),
         lambda x: settings.default is some_normal_settings,
         settings=some_normal_settings)
Example #37
0
from pymor.operators.constructions import induced_norm
from pymor.operators.numpy import NumpyMatrixOperator
from pymor.tools.floatcmp import float_cmp
from pymor.vectorarrays.numpy import NumpyVectorSpace
from pymortests.fixtures.operator import operator_with_arrays_and_products
from pymortests.strategies import valid_inds, valid_inds_of_same_length
from pymortests.vectorarray import indexed
import pymortests.strategies as pyst


@pyst.given_vector_arrays(count=2,
                          tolerances=hyst.sampled_from([(1e-5, 1e-8),
                                                        (1e-10, 1e-12),
                                                        (0., 1e-8),
                                                        (1e-5, 1e-8)]),
                          sup_norm=hyst.booleans())
def test_almost_equal(vector_arrays, tolerances, sup_norm):
    v1, v2 = vector_arrays
    rtol, atol = tolerances
    try:
        dv1 = v1.to_numpy()
        dv2 = v2.to_numpy()
    except NotImplementedError:
        dv1 = dv2 = None
    for ind1, ind2 in valid_inds_of_same_length(v1, v2):
        r = almost_equal(v1[ind1],
                         v2[ind2],
                         sup_norm=sup_norm,
                         rtol=rtol,
                         atol=atol)
        assert isinstance(r, np.ndarray)
class TestSpecializedSegmentOps(hu.HypothesisTestCase):
    @given(batchsize=st.integers(1, 20),
           fptype=st.sampled_from([np.float16, np.float32]),
           fp16asint=st.booleans(),
           blocksize=st.sampled_from([8, 17, 32, 64, 85, 96, 128, 163]),
           normalize_by_lengths=st.booleans(),
           **hu.gcs)
    def test_sparse_lengths_sum_cpu(self, batchsize, fptype, fp16asint,
                                    blocksize, normalize_by_lengths, gc, dc):

        if normalize_by_lengths == False:
            print("<test_sparse_lengths_sum_cpu>")
        else:
            print("<test_sparse_lengths_sum_mean_cpu>")

        tblsize = 300
        if fptype == np.float32:
            Tbl = np.random.rand(tblsize, blocksize).astype(np.float32)
            atol = 1e-5
        else:
            if fp16asint:
                Tbl = (10.0 *
                       np.random.rand(tblsize, blocksize)).round().astype(
                           np.float16)
                atol = 1e-3
            else:
                Tbl = np.random.rand(tblsize, blocksize).astype(np.float16)
                atol = 1e-1

        # array of each row length
        Lengths = np.random.randint(1, 30, size=batchsize).astype(np.int32)
        # flat indices
        Indices = np.random.randint(0, tblsize,
                                    size=sum(Lengths)).astype(np.int64)

        if normalize_by_lengths == False:
            op = core.CreateOperator("SparseLengthsSum",
                                     ["Tbl", "Indices", "Lengths"], "out")
        else:
            op = core.CreateOperator("SparseLengthsMean",
                                     ["Tbl", "Indices", "Lengths"], "out")

        self.ws.create_blob("Tbl").feed(Tbl)
        self.ws.create_blob("Indices").feed(Indices)
        self.ws.create_blob("Lengths").feed(Lengths)
        self.ws.run(op)

        def sparse_lengths_sum_ref(Tbl, Indices, Lengths):
            rptr = np.cumsum(np.insert(Lengths, [0], [0]))
            out = np.zeros((len(Lengths), blocksize))
            if normalize_by_lengths == False:
                for i in range(0, len(rptr[0:-1])):
                    out[i] = Tbl[Indices[rptr[i]:rptr[i + 1]]].sum(axis=0)
            else:
                for i in range(0, len(rptr[0:-1])):
                    out[i] = Tbl[Indices[rptr[i]:rptr[i + 1]]].sum(
                        axis=0) * 1.0 / float(Lengths[i])

            return out

        np.testing.assert_allclose(self.ws.blobs[("out")].fetch(),
                                   sparse_lengths_sum_ref(
                                       Tbl, Indices, Lengths),
                                   rtol=1e-3,
                                   atol=atol)

    @given(batchsize=st.integers(1, 20),
           fptype=st.sampled_from([np.float16, np.float32]),
           fp16asint=st.booleans(),
           blocksize=st.sampled_from([8, 17, 32, 64, 85, 96, 128, 163]),
           **hu.gcs)
    def test_sparse_lengths_weightedsum_cpu(self, batchsize, fptype, fp16asint,
                                            blocksize, gc, dc):

        print("<test_sparse_lengths_weightedsum_cpu>")

        tblsize = 300
        if fptype == np.float32:
            Tbl = np.random.rand(tblsize, blocksize).astype(np.float32)
            atol = 1e-5
        else:
            if fp16asint:
                Tbl = (10.0 *
                       np.random.rand(tblsize, blocksize)).round().astype(
                           np.float16)
                atol = 1e-3
            else:
                Tbl = np.random.rand(tblsize, blocksize).astype(np.float16)
                atol = 1e-1

        # array of each row length
        Lengths = np.random.randint(1, 30, size=batchsize).astype(np.int32)
        # flat indices
        Indices = np.random.randint(0, tblsize,
                                    size=sum(Lengths)).astype(np.int64)
        Weights = np.random.rand(sum(Lengths)).astype(np.float32)

        op = core.CreateOperator("SparseLengthsWeightedSum",
                                 ["Tbl", "Weights", "Indices", "Lengths"],
                                 "out")

        self.ws.create_blob("Tbl").feed(Tbl)
        self.ws.create_blob("Indices").feed(Indices)
        self.ws.create_blob("Lengths").feed(Lengths)
        self.ws.create_blob("Weights").feed(Weights)
        self.ws.run(op)

        def sparse_lengths_weightedsum_ref(Tbl, Weights, Indices, Lengths):
            rptr = np.cumsum(np.insert(Lengths, [0], [0]))
            out = np.zeros((len(Lengths), blocksize))
            for i in range(0, len(rptr[0:-1])):
                w = Weights[rptr[i]:rptr[i + 1]]
                out[i] = (Tbl[Indices[rptr[i]:rptr[i + 1]]] *
                          w[:, np.newaxis]).sum(axis=0)
            return out

        #print("Weights: " + str(Weights))
        #print("computed_out: " + str(self.ws.blobs[("out")].fetch()))
        #print("referenc_out: " + str(sparse_lengths_weightedsum_ref(Tbl, Weights, Indices, Lengths)))
        np.testing.assert_allclose(self.ws.blobs[("out")].fetch(),
                                   sparse_lengths_weightedsum_ref(
                                       Tbl, Weights, Indices, Lengths),
                                   rtol=1e-3,
                                   atol=atol)
Example #39
0
    to_bound_with_ported_diagrams_pair,
    to_bound_with_ported_edges_pair,
    to_bound_with_ported_points_pair,
    to_bound_with_ported_site_events_pair,
    to_bound_with_ported_vertices_pair)
from tests.port_tests.hints import (PortedPoint,
                                    PortedSegment)
from tests.port_tests.utils import ported_source_categories
from tests.strategies import (integers_32,
                              sizes)
from tests.utils import (RawSegment,
                         to_maybe_pairs,
                         to_pairs,
                         transpose_pairs)

booleans = strategies.booleans()
coordinates = integers_32
points_pairs = strategies.builds(to_bound_with_ported_points_pair,
                                 coordinates, coordinates)


def raw_segment_to_segments_pair(raw: RawSegment) -> BoundPortedSegmentsPair:
    return (BoundSegment(BoundPoint(raw.start.x, raw.start.y),
                         BoundPoint(raw.end.x, raw.end.y)),
            PortedSegment(PortedPoint(raw.start.x, raw.start.y),
                          PortedPoint(raw.end.x, raw.end.y)))


segments_pairs = planar.segments(coordinates).map(raw_segment_to_segments_pair)

from hydra.experimental import compose, initialize_config_dir
from hypothesis import given, settings
from hypothesis.strategies import booleans

from scripts.efficientnets.run import main as efficientnets_main


@pytest.mark.parametrize(
    ["name", "dm", "num_classes"],
    [
        pytest.param("efficientnet-b5", "cifar10", 10),
        pytest.param("efficientnet-b5", "cifar100", 100),
    ],
)
@settings(deadline=None)
@given(pretrained=booleans())
def test_efficientnets(name, dm, num_classes, pretrained):
    with initialize_config_dir(os.getcwd() + "/conf"):
        cfg = compose(
            config_name="efficientnets",
            overrides=[
                f"name={name}",
                f"dm={dm}",
                f"pretrained={pretrained}",
                f"num_classes={num_classes}",
                "logger=false",
                "pl.max_epochs=1",
                "pl.gpus=0",
                "pl.limit_train_batches=5",
                "pl.limit_val_batches=5",
                "pl.limit_test_batches=5",
Example #41
0
    res = Resources()
    res.c_sources = c_sources
    res.s_sources = s_sources
    res.cpp_sources = cpp_sources
    assert res.detect_duplicates(toolchain) == 1,\
        "Not Enough duplicates found"

    notification = notify.messages[0]
    assert "dupe.o" in notification["message"]
    assert "dupe.s" in notification["message"]
    assert "dupe.c" in notification["message"]
    assert "dupe.cpp" in notification["message"]


@given(text(alphabet=ALPHABET + ["/"], min_size=1))
@given(booleans())
@given(booleans())
@settings(max_examples=20)
def test_path_specified_gcc(gcc_loc, exists_at_loc, exists_in_path):
    with patch('tools.toolchains.gcc.exists') as _exists:
        with patch('tools.toolchains.gcc.find_executable') as _find:
            _exists.return_value = exists_at_loc
            _find.return_value = exists_in_path
            TOOLCHAIN_PATHS['GCC_ARM'] = gcc_loc
            toolchain_class = TOOLCHAIN_CLASSES["GCC_ARM"]
            found_p = toolchain_class.check_executable()
            assert found_p == (exists_at_loc or exists_in_path)
            if exists_at_loc:
                assert TOOLCHAIN_PATHS['GCC_ARM'] == gcc_loc
            elif exists_in_path:
                assert TOOLCHAIN_PATHS['GCC_ARM'] == ''
Example #42
0
# def test_init_genius_bridge():
#     if not genius_bridge_is_running():
#         init_genius_bridge()
#     assert genius_bridge_is_running()


@pytest.mark.skipif(
    condition=not genius_bridge_is_running(),
    reason="No Genius Bridge, skipping genius-agent tests",
)
@settings(max_examples=10)
@given(
    agent_name1=st.sampled_from(GeniusNegotiator.robust_negotiators()),
    agent_name2=st.sampled_from(GeniusNegotiator.robust_negotiators()),
    single_issue=st.booleans(),
    keep_issue_names=st.booleans(),
    keep_value_names=st.booleans(),
)
def test_genius_agents_run_using_hypothesis(
    agent_name1,
    agent_name2,
    single_issue,
    keep_issue_names,
    keep_value_names,
):
    from negmas import convert_genius_domain_from_folder

    utils = (1, 2)
    src = pkg_resources.resource_filename("negmas",
                                          resource_name="tests/data/Laptop")
Example #43
0
class DPMultiheadAttention_test(DPModules_test):
    @given(
        batch_size=st.integers(1, 5),
        src_seq_len=st.integers(1, 6),
        tgt_seq_len=st.integers(1, 6),
        num_heads=st.integers(1, 3),
        bias=st.booleans(),
        add_bias_kv=st.booleans(),
        add_zero_attn=st.booleans(),
        kdim=st.integers(2, 8) | st.none(),
        vdim=st.integers(2, 8) | st.none(),
    )
    @settings(deadline=10000)
    def test_attn(
        self,
        batch_size: int,
        src_seq_len: int,
        tgt_seq_len: int,
        num_heads: int,
        bias: bool,
        add_bias_kv: bool,
        add_zero_attn: bool,
        kdim: Optional[int],
        vdim: Optional[int],
    ):
        embed_dim = 4 * num_heads  # embed_dim must be divisible by num_heads

        attn = nn.MultiheadAttention(
            embed_dim,
            num_heads,
            dropout=0.0,  # Untestable between two different implementations
            bias=bias,
            add_bias_kv=add_bias_kv,
            add_zero_attn=add_zero_attn,
            kdim=kdim,
            vdim=vdim,
        )
        dp_attn = DPMultiheadAttention(
            embed_dim,
            num_heads,
            dropout=0.0,  # Untestable between two different implementations
            bias=bias,
            add_bias_kv=add_bias_kv,
            add_zero_attn=add_zero_attn,
            kdim=kdim,
            vdim=vdim,
        )

        dp_attn.load_state_dict(attn.state_dict())

        q = torch.randn(tgt_seq_len, batch_size, embed_dim)
        k = torch.randn(src_seq_len, batch_size,
                        kdim if kdim is not None else embed_dim)
        v = torch.randn(src_seq_len, batch_size,
                        vdim if vdim is not None else embed_dim)

        self.compare_forward_outputs(
            attn,
            dp_attn,
            q,
            k,
            v,
            output_names=("attn_out", "attn_out_weights"),
            atol=1e-5,
            rtol=1e-3,
            key_padding_mask=None,
            need_weights=True,
            attn_mask=None,
        )

        self.compare_gradients(
            attn,
            dp_attn,
            attn_train_fn,
            q,
            k,
            v,
            atol=1e-5,
            rtol=1e-3,
            key_padding_mask=None,
            need_weights=True,
            attn_mask=None,
        )
Example #44
0
def test_settings_are_default_in_find():
    find(s.booleans(),
         lambda x: settings.default is some_normal_settings,
         settings=some_normal_settings)
Example #45
0
    assert p.stages == list()
    assert p.state == states.INITIAL
    assert p.state_history == [states.INITIAL]
    assert p._stage_count == 0
    assert p.current_stage == 0
    assert isinstance(p.lock, type(threading.Lock()))
    assert isinstance(p._completed_flag, type(threading.Event()))
    assert p.completed is False


# ------------------------------------------------------------------------------
#
@given(t=st.text(),
       l=st.lists(st.text()),
       i=st.integers().filter(lambda x: type(x) == int),
       b=st.booleans(),
       se=st.sets(st.text()))
def test_pipeline_assignment_exceptions(t, l, i, b, se):

    p = Pipeline()

    data_type = [t, l, i, b, se]

    for data in data_type:

        if not isinstance(data, str):
            with pytest.raises(TypeError):
                p.name = data

        with pytest.raises(TypeError):
            p.stages = data
Example #46
0
class TestLayers(LayersTestCase):
    def testAddLoss(self):
        input_record_LR = self.new_record(
            schema.Struct(('label', schema.Scalar((np.float64, (1, )))),
                          ('prediction', schema.Scalar((np.float32, (2, )))),
                          ('weight', schema.Scalar((np.float64, (1, ))))))
        loss_LR = self.model.BatchLRLoss(input_record_LR)

        self.model.add_loss(loss_LR)
        assert 'unnamed' in self.model.loss
        self.assertEqual(schema.Scalar((np.float32, tuple())),
                         self.model.loss.unnamed)
        self.assertEqual(loss_LR, self.model.loss.unnamed)

        self.model.add_loss(loss_LR, 'addLoss')
        assert 'addLoss' in self.model.loss
        self.assertEqual(schema.Scalar((np.float32, tuple())),
                         self.model.loss.addLoss)
        self.assertEqual(loss_LR, self.model.loss.addLoss)

        self.model.add_loss(
            schema.Scalar(dtype=np.float32,
                          blob=core.BlobReference('loss_blob_1')), 'addLoss')
        assert 'addLoss_auto_0' in self.model.loss
        self.assertEqual(schema.Scalar((np.float32, tuple())),
                         self.model.loss.addLoss_auto_0)
        assert core.BlobReference(
            'loss_blob_1') in self.model.loss.field_blobs()

        self.model.add_loss(
            schema.Struct(
                ('structName',
                 schema.Scalar(dtype=np.float32,
                               blob=core.BlobReference('loss_blob_2')))),
            'addLoss')
        assert 'addLoss_auto_1' in self.model.loss
        self.assertEqual(
            schema.Struct(('structName', schema.Scalar(
                (np.float32, tuple())))), self.model.loss.addLoss_auto_1)
        assert core.BlobReference(
            'loss_blob_2') in self.model.loss.field_blobs()

        loss_in_tuple_0 = schema.Scalar(
            dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_0'))

        loss_in_tuple_1 = schema.Scalar(
            dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_1'))

        loss_tuple = schema.NamedTuple('loss_in_tuple',
                                       *[loss_in_tuple_0, loss_in_tuple_1])
        self.model.add_loss(loss_tuple, 'addLoss')
        assert 'addLoss_auto_2' in self.model.loss
        self.assertEqual(
            schema.Struct(
                ('loss_in_tuple_0', schema.Scalar((np.float32, tuple()))),
                ('loss_in_tuple_1', schema.Scalar((np.float32, tuple())))),
            self.model.loss.addLoss_auto_2)
        assert core.BlobReference('loss_blob_in_tuple_0')\
         in self.model.loss.field_blobs()
        assert core.BlobReference('loss_blob_in_tuple_1')\
         in self.model.loss.field_blobs()

    def _test_net(self, net, ops_list):
        """
        Helper function to assert the net contains some set of operations and
        then to run the net.

        Inputs:
            net -- the network to test and run
            ops_list -- the list of operation specifications to check for
                        in the net
        """
        ops_output = self.assertNetContainOps(net, ops_list)
        workspace.RunNetOnce(net)
        return ops_output

    def testFCWithoutBias(self):
        output_dims = 2
        fc_without_bias = self.model.FCWithoutBias(
            self.model.input_feature_schema.float_features, output_dims)
        self.model.output_schema = fc_without_bias

        self.assertEqual(schema.Scalar((np.float32, (output_dims, ))),
                         fc_without_bias)

        train_init_net, train_net = self.get_training_nets()

        init_ops = self.assertNetContainOps(train_init_net, [
            OpSpec("UniformFill", None, None),
        ])

        mat_mul_spec = OpSpec("MatMul", [
            self.model.input_feature_schema.float_features(),
            init_ops[0].output[0],
        ], fc_without_bias.field_blobs())

        self.assertNetContainOps(train_net, [mat_mul_spec])

        predict_net = self.get_predict_net()
        self.assertNetContainOps(predict_net, [mat_mul_spec])

    def testSamplingTrain(self):
        output_dims = 1000

        indices = self.new_record(schema.Scalar((np.int32, (10, ))))
        sampling_prob = self.new_record(schema.Scalar((np.float32, (10, ))))

        sampled_fc = self.model.SamplingTrain(
            schema.Struct(
                ('input', self.model.input_feature_schema.float_features),
                ('indices', indices),
                ('sampling_prob', sampling_prob),
            ),
            "FC",
            output_dims,
        )
        self.model.output_schema = sampled_fc

        # Check that we don't add prediction layer into the model
        self.assertEqual(1, len(self.model.layers))

        self.assertEqual(schema.Scalar((np.float32, (output_dims, ))),
                         sampled_fc)

        train_init_net, train_net = self.get_training_nets()

        init_ops = self.assertNetContainOps(train_init_net, [
            OpSpec("UniformFill", None, None),
            OpSpec("UniformFill", None, None),
        ])

        sampled_fc_layer = self.model.layers[0]

        gather_w_spec = OpSpec("Gather", [
            init_ops[0].output[0],
            indices(),
        ], [sampled_fc_layer._prediction_layer.train_param_blobs[0]])
        gather_b_spec = OpSpec("Gather", [
            init_ops[1].output[0],
            indices(),
        ], [sampled_fc_layer._prediction_layer.train_param_blobs[1]])
        train_fc_spec = OpSpec("FC", [
            self.model.input_feature_schema.float_features(),
        ] + sampled_fc_layer._prediction_layer.train_param_blobs,
                               sampled_fc.field_blobs())
        log_spec = OpSpec("Log", [sampling_prob()], [None])
        sub_spec = OpSpec("Sub", [sampled_fc.field_blobs()[0], None],
                          sampled_fc.field_blobs())

        train_ops = self.assertNetContainOps(
            train_net,
            [gather_w_spec, gather_b_spec, train_fc_spec, log_spec, sub_spec])

        self.assertEqual(train_ops[3].output[0], train_ops[4].input[1])

        predict_net = self.get_predict_net()
        self.assertNetContainOps(predict_net, [
            OpSpec("FC", [
                self.model.input_feature_schema.float_features(),
                init_ops[0].output[0],
                init_ops[1].output[0],
            ], sampled_fc.field_blobs())
        ])

    def testBatchLRLoss(self):
        input_record = self.new_record(
            schema.Struct(('label', schema.Scalar((np.float64, (1, )))),
                          ('prediction', schema.Scalar((np.float32, (2, )))),
                          ('weight', schema.Scalar((np.float64, (1, ))))))
        loss = self.model.BatchLRLoss(input_record)
        self.assertEqual(schema.Scalar((np.float32, tuple())), loss)

    def testMarginRankLoss(self):
        input_record = self.new_record(
            schema.Struct(
                ('pos_prediction', schema.Scalar((np.float32, (1, )))),
                ('neg_prediction', schema.List(np.float32)),
            ))
        pos_items = np.array([0.1, 0.2, 0.3], dtype=np.float32)
        neg_lengths = np.array([1, 2, 3], dtype=np.int32)
        neg_items = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=np.float32)
        schema.FeedRecord(input_record, [pos_items, neg_lengths, neg_items])
        loss = self.model.MarginRankLoss(input_record)
        self.run_train_net_forward_only()
        self.assertEqual(schema.Scalar((np.float32, tuple())), loss)

    def testBatchMSELoss(self):
        input_record = self.new_record(
            schema.Struct(
                ('label', schema.Scalar((np.float64, (1, )))),
                ('prediction', schema.Scalar((np.float32, (2, )))),
            ))
        loss = self.model.BatchMSELoss(input_record)
        self.assertEqual(schema.Scalar((np.float32, tuple())), loss)

    def testBatchSigmoidCrossEntropyLoss(self):
        input_record = self.new_record(
            schema.Struct(('label', schema.Scalar((np.float32, (32, )))),
                          ('prediction', schema.Scalar((np.float32, (32, ))))))
        loss = self.model.BatchSigmoidCrossEntropyLoss(input_record)
        self.assertEqual(schema.Scalar((np.float32, tuple())), loss)

    def testBatchSoftmaxLoss(self):
        input_record = self.new_record(
            schema.Struct(('label', schema.Scalar((np.float32, tuple()))),
                          ('prediction', schema.Scalar((np.float32, (32, ))))))
        loss = self.model.BatchSoftmaxLoss(input_record)
        self.assertEqual(
            schema.Struct(
                ('softmax', schema.Scalar((np.float32, (32, )))),
                ('loss', schema.Scalar(np.float32)),
            ), loss)

    def testBatchSoftmaxLossWeight(self):
        input_record = self.new_record(
            schema.Struct(('label', schema.Scalar((np.float32, tuple()))),
                          ('prediction', schema.Scalar((np.float32, (32, )))),
                          ('weight', schema.Scalar((np.float64, (1, ))))))
        loss = self.model.BatchSoftmaxLoss(input_record)
        self.assertEqual(
            schema.Struct(
                ('softmax', schema.Scalar((np.float32, (32, )))),
                ('loss', schema.Scalar(np.float32)),
            ), loss)

    @given(
        X=hu.arrays(dims=[2, 5]), )
    def testBatchNormalization(self, X):
        input_record = self.new_record(schema.Scalar((np.float32, (5, ))))
        schema.FeedRecord(input_record, [X])
        bn_output = self.model.BatchNormalization(input_record)
        self.assertEqual(schema.Scalar((np.float32, (5, ))), bn_output)
        self.model.output_schema = schema.Struct()

        train_init_net, train_net = self.get_training_nets()

        init_ops = self.assertNetContainOps(train_init_net, [
            OpSpec("ConstantFill", None, None),
            OpSpec("ConstantFill", None, None),
            OpSpec("ConstantFill", None, None),
            OpSpec("ConstantFill", None, None),
        ])

        input_blob = input_record.field_blobs()[0]
        output_blob = bn_output.field_blobs()[0]

        expand_dims_spec = OpSpec(
            "ExpandDims",
            [input_blob],
            None,
        )

        train_bn_spec = OpSpec(
            "SpatialBN",
            [
                None, init_ops[0].output[0], init_ops[1].output[0],
                init_ops[2].output[0], init_ops[3].output[0]
            ],
            [
                output_blob, init_ops[2].output[0], init_ops[3].output[0],
                None, None
            ],
            {
                'is_test': 0,
                'order': 'NCHW',
                'momentum': 0.9
            },
        )

        test_bn_spec = OpSpec(
            "SpatialBN",
            [
                None, init_ops[0].output[0], init_ops[1].output[0],
                init_ops[2].output[0], init_ops[3].output[0]
            ],
            [output_blob],
            {
                'is_test': 1,
                'order': 'NCHW',
                'momentum': 0.9
            },
        )

        squeeze_spec = OpSpec(
            "Squeeze",
            [output_blob],
            [output_blob],
        )

        self.assertNetContainOps(
            train_net, [expand_dims_spec, train_bn_spec, squeeze_spec])

        eval_net = self.get_eval_net()

        self.assertNetContainOps(
            eval_net, [expand_dims_spec, test_bn_spec, squeeze_spec])

        predict_net = self.get_predict_net()

        self.assertNetContainOps(
            predict_net, [expand_dims_spec, test_bn_spec, squeeze_spec])

        workspace.RunNetOnce(train_init_net)
        workspace.RunNetOnce(train_net)

        schema.FeedRecord(input_record, [X])
        workspace.RunNetOnce(eval_net)

        schema.FeedRecord(input_record, [X])
        workspace.RunNetOnce(predict_net)

    @given(
        X=hu.arrays(dims=[5, 2]),
        num_to_collect=st.integers(min_value=1, max_value=10),
    )
    def testLastNWindowCollector(self, X, num_to_collect):
        input_record = self.new_record(schema.Scalar(np.float32))
        schema.FeedRecord(input_record, [X])
        last_n = self.model.LastNWindowCollector(input_record, num_to_collect)
        self.run_train_net_forward_only()
        output_record = schema.FetchRecord(last_n.last_n)
        start = max(0, 5 - num_to_collect)
        npt.assert_array_equal(X[start:], output_record())
        num_visited = schema.FetchRecord(last_n.num_visited)
        npt.assert_array_equal([5], num_visited())

    def testUniformSampling(self):
        input_record = self.new_record(schema.Scalar(np.int32))
        input_array = np.array([3, 10, 11, 15, 20, 99], dtype=np.int32)
        schema.FeedRecord(input_record, [input_array])
        num_samples = 20
        num_elements = 100
        uniform_sampling_output = self.model.UniformSampling(
            input_record, num_samples, num_elements)
        self.model.loss = uniform_sampling_output
        self.run_train_net()
        samples = workspace.FetchBlob(uniform_sampling_output.samples())
        sampling_prob = workspace.FetchBlob(
            uniform_sampling_output.sampling_prob())
        self.assertEqual(num_samples, len(samples))
        np.testing.assert_array_equal(input_array, samples[:len(input_array)])
        np.testing.assert_almost_equal(
            np.array([float(num_samples) / num_elements] * num_samples,
                     dtype=np.float32), sampling_prob)

    def testUniformSamplingWithIncorrectSampleSize(self):
        input_record = self.new_record(schema.Scalar(np.int32))
        num_samples = 200
        num_elements = 100
        with self.assertRaises(AssertionError):
            self.model.UniformSampling(input_record, num_samples, num_elements)

    def testGatherRecord(self):
        indices = np.array([1, 3, 4], dtype=np.int32)
        dense = np.array(list(range(20)), dtype=np.float32).reshape(10, 2)
        lengths = np.array(list(range(10)), dtype=np.int32)
        items = np.array(list(range(lengths.sum())), dtype=np.int64)
        items_lengths = np.array(list(range(lengths.sum())), dtype=np.int32)
        items_items = np.array(list(range(items_lengths.sum())),
                               dtype=np.int64)
        record = self.new_record(
            schema.Struct(
                ('dense', schema.Scalar(np.float32)),
                ('sparse',
                 schema.Struct(
                     ('list', schema.List(np.int64)),
                     ('list_of_list', schema.List(schema.List(np.int64))),
                 )), ('empty_struct', schema.Struct())))
        indices_record = self.new_record(schema.Scalar(np.int32))
        input_record = schema.Struct(
            ('indices', indices_record),
            ('record', record),
        )
        schema.FeedRecord(input_record, [
            indices, dense, lengths, items, lengths, items_lengths, items_items
        ])
        gathered_record = self.model.GatherRecord(input_record)
        self.assertTrue(schema.equal_schemas(gathered_record, record))

        self.run_train_net_forward_only()
        gathered_dense = workspace.FetchBlob(gathered_record.dense())
        np.testing.assert_array_equal(
            np.concatenate([dense[i:i + 1] for i in indices]), gathered_dense)
        gathered_lengths = workspace.FetchBlob(
            gathered_record.sparse.list.lengths())
        np.testing.assert_array_equal(
            np.concatenate([lengths[i:i + 1] for i in indices]),
            gathered_lengths)
        gathered_items = workspace.FetchBlob(
            gathered_record.sparse.list.items())
        offsets = lengths.cumsum() - lengths
        np.testing.assert_array_equal(
            np.concatenate(
                [items[offsets[i]:offsets[i] + lengths[i]] for i in indices]),
            gathered_items)

        gathered_items_lengths = workspace.FetchBlob(
            gathered_record.sparse.list_of_list.items.lengths())
        np.testing.assert_array_equal(
            np.concatenate([
                items_lengths[offsets[i]:offsets[i] + lengths[i]]
                for i in indices
            ]), gathered_items_lengths)

        nested_offsets = []
        nested_lengths = []
        nested_offset = 0
        j = 0
        for l in lengths:
            nested_offsets.append(nested_offset)
            nested_length = 0
            for _i in range(l):
                nested_offset += items_lengths[j]
                nested_length += items_lengths[j]
                j += 1
            nested_lengths.append(nested_length)

        gathered_items_items = workspace.FetchBlob(
            gathered_record.sparse.list_of_list.items.items())
        np.testing.assert_array_equal(
            np.concatenate([
                items_items[nested_offsets[i]:nested_offsets[i] +
                            nested_lengths[i]] for i in indices
            ]), gathered_items_items)

    def testMapToRange(self):
        input_record = self.new_record(schema.Scalar(np.int32))
        indices_blob = self.model.MapToRange(input_record,
                                             max_index=100).indices
        self.model.output_schema = schema.Struct()

        train_init_net, train_net = self.get_training_nets()

        schema.FeedRecord(
            input_record,
            [np.array([10, 3, 20, 99, 15, 11, 3, 11], dtype=np.int32)])
        workspace.RunNetOnce(train_init_net)
        workspace.RunNetOnce(train_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([1, 2, 3, 4, 5, 6, 2, 6], dtype=np.int32), indices)

        schema.FeedRecord(
            input_record,
            [np.array([10, 3, 23, 35, 60, 15, 10, 15], dtype=np.int32)])
        workspace.RunNetOnce(train_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([1, 2, 7, 8, 9, 5, 1, 5], dtype=np.int32), indices)

        eval_net = self.get_eval_net()

        schema.FeedRecord(
            input_record,
            [np.array([10, 3, 23, 35, 60, 15, 200], dtype=np.int32)])
        workspace.RunNetOnce(eval_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([1, 2, 7, 8, 9, 5, 0], dtype=np.int32), indices)

        schema.FeedRecord(
            input_record,
            [np.array([10, 3, 23, 15, 101, 115], dtype=np.int32)])
        workspace.RunNetOnce(eval_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([1, 2, 7, 5, 0, 0], dtype=np.int32), indices)

        predict_net = self.get_predict_net()

        schema.FeedRecord(
            input_record,
            [np.array([3, 3, 20, 23, 151, 35, 60, 15, 200], dtype=np.int32)])
        workspace.RunNetOnce(predict_net)
        indices = workspace.FetchBlob(indices_blob())
        np.testing.assert_array_equal(
            np.array([2, 2, 3, 7, 0, 8, 9, 5, 0], dtype=np.int32), indices)

    def testSelectRecordByContext(self):
        float_features = self.model.input_feature_schema.float_features

        float_array = np.array([1.0, 2.0], dtype=np.float32)

        schema.FeedRecord(float_features, [float_array])

        with Tags(Tags.EXCLUDE_FROM_PREDICTION):
            log_float_features = self.model.Log(float_features, 1)
        joined = self.model.SelectRecordByContext(
            schema.Struct(
                (InstantiationContext.PREDICTION, float_features),
                (InstantiationContext.TRAINING, log_float_features),
                # TODO: TRAIN_ONLY layers are also generated in eval
                (InstantiationContext.EVAL, log_float_features),
            ))

        # model.output_schema has to a struct
        self.model.output_schema = schema.Struct(('joined', joined))
        predict_net = layer_model_instantiator.generate_predict_net(self.model)
        workspace.RunNetOnce(predict_net)
        predict_output = schema.FetchRecord(predict_net.output_record())
        npt.assert_array_equal(float_array, predict_output['joined']())
        eval_net = layer_model_instantiator.generate_eval_net(self.model)
        workspace.RunNetOnce(eval_net)
        eval_output = schema.FetchRecord(eval_net.output_record())
        npt.assert_array_equal(np.log(float_array), eval_output['joined']())
        _, train_net = (
            layer_model_instantiator.generate_training_nets_forward_only(
                self.model))
        workspace.RunNetOnce(train_net)
        train_output = schema.FetchRecord(train_net.output_record())
        npt.assert_array_equal(np.log(float_array), train_output['joined']())

    def testFunctionalLayer(self):
        def normalize(net, in_record, out_record):
            mean = net.ReduceFrontMean(in_record(), 1)
            net.Sub([in_record(), mean], out_record(), broadcast=1)

        normalized = self.model.Functional(
            self.model.input_feature_schema.float_features,
            1,
            normalize,
            name="normalizer")

        # Attach metadata to one of the outputs and use it in FC
        normalized.set_type((np.float32, 32))
        self.model.output_schema = self.model.FC(normalized, 2)

        predict_net = layer_model_instantiator.generate_predict_net(self.model)
        ops = predict_net.Proto().op
        assert len(ops) == 3
        assert ops[0].type == "ReduceFrontMean"
        assert ops[1].type == "Sub"
        assert ops[2].type == "FC"
        assert len(ops[0].input) == 1
        assert ops[0].input[0] ==\
            self.model.input_feature_schema.float_features()
        assert len(ops[1].output) == 1
        assert ops[1].output[0] in ops[2].input

    def testFunctionalLayerHelper(self):
        mean = self.model.ReduceFrontMean(
            self.model.input_feature_schema.float_features, 1)
        normalized = self.model.Sub(schema.Tuple(
            self.model.input_feature_schema.float_features, mean),
                                    1,
                                    broadcast=1)
        # Attach metadata to one of the outputs and use it in FC
        normalized.set_type((np.float32, (32, )))
        self.model.output_schema = self.model.FC(normalized, 2)

        predict_net = layer_model_instantiator.generate_predict_net(self.model)
        ops = predict_net.Proto().op
        assert len(ops) == 3
        assert ops[0].type == "ReduceFrontMean"
        assert ops[1].type == "Sub"
        assert ops[2].type == "FC"
        assert len(ops[0].input) == 1
        assert ops[0].input[0] ==\
            self.model.input_feature_schema.float_features()
        assert len(ops[1].output) == 1
        assert ops[1].output[0] in ops[2].input

    def testFunctionalLayerHelperAutoInference(self):
        softsign = self.model.Softsign(
            schema.Tuple(self.model.input_feature_schema.float_features), 1)
        assert softsign.field_type().base == np.float32
        assert softsign.field_type().shape == (32, )
        self.model.output_schema = self.model.FC(softsign, 2)

        predict_net = layer_model_instantiator.generate_predict_net(self.model)
        ops = predict_net.Proto().op
        assert len(ops) == 2
        assert ops[0].type == "Softsign"
        assert ops[1].type == "FC"
        assert len(ops[0].input) == 1
        assert ops[0].input[0] ==\
            self.model.input_feature_schema.float_features()
        assert len(ops[0].output) == 1
        assert ops[0].output[0] in ops[1].input

    def testFunctionalLayerHelperAutoInferenceScalar(self):
        loss = self.model.AveragedLoss(self.model.input_feature_schema, 1)
        self.assertEqual(1, len(loss.field_types()))
        self.assertEqual(np.float32, loss.field_types()[0].base)
        self.assertEqual(tuple(), loss.field_types()[0].shape)

    def testFunctionalLayerInputCoercion(self):
        one = self.model.global_constants['ONE']
        two = self.model.Add([one, one], 1)
        self.model.loss = two
        self.run_train_net()
        data = workspace.FetchBlob(two.field_blobs()[0])
        np.testing.assert_array_equal([2.0], data)

    def testFunctionalLayerWithOutputNames(self):
        k = 3
        topk = self.model.TopK(
            self.model.input_feature_schema,
            output_names_or_num=['values', 'indices'],
            k=k,
        )
        self.assertEqual(2, len(topk.field_types()))
        self.assertEqual(np.float32, topk.field_types()[0].base)
        self.assertEqual((k, ), topk.field_types()[0].shape)
        self.assertEqual(np.int32, topk.field_types()[1].base)
        self.assertEqual((k, ), topk.field_types()[1].shape)
        self.assertEqual(['TopK/values', 'TopK/indices'], topk.field_blobs())

    def testFunctionalLayerSameOperatorOutputNames(self):
        Con1 = self.model.ConstantFill([], 1, value=1)
        Con2 = self.model.ConstantFill([], 1, value=2)
        self.assertNotEqual(str(Con1), str(Con2))

    def testFunctionalLayerWithOutputDtypes(self):
        loss = self.model.AveragedLoss(
            self.model.input_feature_schema,
            1,
            output_dtypes=(np.float32, (1, )),
        )
        self.assertEqual(1, len(loss.field_types()))
        self.assertEqual(np.float32, loss.field_types()[0].base)
        self.assertEqual((1, ), loss.field_types()[0].shape)

    def testPropagateRequestOnly(self):
        # test case when output is request only
        input_record = self.new_record(
            schema.Struct(
                ('input1', schema.Scalar((np.float32, (32, )))),
                ('input2', schema.Scalar((np.float32, (64, )))),
                ('input3', schema.Scalar((np.float32, (16, )))),
            ))

        set_request_only(input_record)
        concat_output = self.model.Concat(input_record)
        self.assertEqual(is_request_only_scalar(concat_output), True)

        # test case when output is not request only
        input_record2 = self.new_record(
            schema.Struct(('input4', schema.Scalar(
                (np.float32, (100, )))))) + input_record

        concat_output2 = self.model.Concat(input_record2)
        self.assertEqual(is_request_only_scalar(concat_output2), False)

    def testSetRequestOnly(self):
        input_record = schema.Scalar(np.int64)
        schema.attach_metadata_to_scalars(
            input_record,
            schema.Metadata(
                categorical_limit=100000000,
                expected_value=99,
                feature_specs=schema.FeatureSpec(feature_ids=[1, 100, 1001])))

        set_request_only(input_record)
        self.assertEqual(input_record.metadata.categorical_limit, 100000000)
        self.assertEqual(input_record.metadata.expected_value, 99)
        self.assertEqual(input_record.metadata.feature_specs.feature_ids,
                         [1, 100, 1001])

    @given(
        X=hu.arrays(dims=[5, 5]),  # Shape of X is irrelevant
    )
    def testDropout(self, X):
        input_record = self.new_record(schema.Scalar((np.float32, (1, ))))
        schema.FeedRecord(input_record, [X])
        d_output = self.model.Dropout(input_record)
        self.assertEqual(schema.Scalar((np.float32, (1, ))), d_output)
        self.model.output_schema = schema.Struct()

        train_init_net, train_net = self.get_training_nets()

        input_blob = input_record.field_blobs()[0]
        output_blob = d_output.field_blobs()[0]

        train_d_spec = OpSpec("Dropout", [input_blob], [output_blob, None], {
            'is_test': 0,
            'ratio': 0.5
        })

        test_d_spec = OpSpec("Dropout", [input_blob], [output_blob, None], {
            'is_test': 1,
            'ratio': 0.5
        })

        self.assertNetContainOps(train_net, [train_d_spec])

        eval_net = self.get_eval_net()

        self.assertNetContainOps(eval_net, [test_d_spec])

        predict_net = self.get_predict_net()

        self.assertNetContainOps(predict_net, [test_d_spec])

        workspace.RunNetOnce(train_init_net)
        workspace.RunNetOnce(train_net)

        schema.FeedRecord(input_record, [X])
        workspace.RunNetOnce(eval_net)

        schema.FeedRecord(input_record, [X])
        workspace.RunNetOnce(predict_net)

    @given(num_inputs=st.integers(1, 3), batch_size=st.integers(5, 10))
    def testMergeIdListsLayer(self, num_inputs, batch_size):
        inputs = []
        for _ in range(num_inputs):
            lengths = np.random.randint(5, size=batch_size).astype(np.int32)
            size = lengths.sum()
            values = np.random.randint(1, 10, size=size).astype(np.int64)
            inputs.append(lengths)
            inputs.append(values)
        input_schema = schema.Tuple(*[
            schema.List(
                schema.Scalar(dtype=np.int64,
                              metadata=schema.Metadata(categorical_limit=20)))
            for _ in range(num_inputs)
        ])

        input_record = schema.NewRecord(self.model.net, input_schema)
        schema.FeedRecord(input_record, inputs)
        output_schema = self.model.MergeIdLists(input_record)
        assert schema.equal_schemas(output_schema,
                                    IdList,
                                    check_field_names=False)

    @given(
        batch_size=st.integers(min_value=2, max_value=10),
        input_dims=st.integers(min_value=5, max_value=10),
        output_dims=st.integers(min_value=5, max_value=10),
        bandwidth=st.floats(min_value=0.1, max_value=5),
    )
    def testRandomFourierFeatures(self, batch_size, input_dims, output_dims,
                                  bandwidth):
        def _rff_hypothesis_test(rff_output, X, W, b, scale):
            """
            Runs hypothesis test for Semi Random Features layer.

            Inputs:
                rff_output -- output of net after running random fourier features layer
                X -- input data
                W -- weight parameter from train_init_net
                b -- bias parameter from train_init_net
                scale -- value by which to scale the output vector
            """
            output = workspace.FetchBlob(rff_output)
            output_ref = scale * np.cos(np.dot(X, np.transpose(W)) + b)
            npt.assert_allclose(output, output_ref, rtol=1e-3, atol=1e-3)

        X = np.random.random((batch_size, input_dims)).astype(np.float32)
        scale = np.sqrt(2.0 / output_dims)
        input_record = self.new_record(
            schema.Scalar((np.float32, (input_dims, ))))
        schema.FeedRecord(input_record, [X])
        input_blob = input_record.field_blobs()[0]
        rff_output = self.model.RandomFourierFeatures(input_record,
                                                      output_dims, bandwidth)
        self.model.output_schema = schema.Struct()

        self.assertEqual(schema.Scalar((np.float32, (output_dims, ))),
                         rff_output)

        train_init_net, train_net = self.get_training_nets()

        # Init net assertions
        init_ops_list = [
            OpSpec("GaussianFill", None, None),
            OpSpec("UniformFill", None, None),
        ]
        init_ops = self._test_net(train_init_net, init_ops_list)
        W = workspace.FetchBlob(self.model.layers[0].w)
        b = workspace.FetchBlob(self.model.layers[0].b)

        # Operation specifications
        fc_spec = OpSpec(
            "FC", [input_blob, init_ops[0].output[0], init_ops[1].output[0]],
            None)
        cosine_spec = OpSpec("Cos", None, None)
        scale_spec = OpSpec("Scale", None, rff_output.field_blobs(),
                            {'scale': scale})
        ops_list = [fc_spec, cosine_spec, scale_spec]

        # Train net assertions
        self._test_net(train_net, ops_list)
        _rff_hypothesis_test(rff_output(), X, W, b, scale)

        # Eval net assertions
        eval_net = self.get_eval_net()
        self._test_net(eval_net, ops_list)
        _rff_hypothesis_test(rff_output(), X, W, b, scale)

        # Predict net assertions
        predict_net = self.get_predict_net()
        self._test_net(predict_net, ops_list)
        _rff_hypothesis_test(rff_output(), X, W, b, scale)

    @given(batch_size=st.integers(min_value=2, max_value=10),
           input_dims=st.integers(min_value=5, max_value=10),
           output_dims=st.integers(min_value=5, max_value=10),
           s=st.integers(min_value=0, max_value=3),
           scale=st.floats(min_value=0.1, max_value=5),
           set_weight_as_global_constant=st.booleans())
    def testArcCosineFeatureMap(self, batch_size, input_dims, output_dims, s,
                                scale, set_weight_as_global_constant):
        def _arc_cosine_hypothesis_test(ac_output, X, W, b, s):
            """
            Runs hypothesis test for Arc Cosine layer.

            Inputs:
                ac_output -- output of net after running arc cosine layer
                X -- input data
                W -- weight parameter from train_init_net
                b -- bias parameter from train_init_net
                s -- degree parameter
            """
            # Get output from net
            net_output = workspace.FetchBlob(ac_output)

            # Computing output directly
            x_rand = np.matmul(X, np.transpose(W)) + b
            x_pow = np.power(x_rand, s)
            if s > 0:
                h_rand_features = np.piecewise(x_rand,
                                               [x_rand <= 0, x_rand > 0],
                                               [0, 1])
            else:
                h_rand_features = np.piecewise(x_rand,
                                               [x_rand <= 0, x_rand > 0],
                                               [0, lambda x: x / (1 + x)])
            output_ref = np.multiply(x_pow, h_rand_features)

            # Comparing net output and computed output
            npt.assert_allclose(net_output, output_ref, rtol=1e-3, atol=1e-3)

        X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32)
        input_record = self.new_record(
            schema.Scalar((np.float32, (input_dims, ))))
        schema.FeedRecord(input_record, [X])
        input_blob = input_record.field_blobs()[0]

        ac_output = self.model.ArcCosineFeatureMap(
            input_record,
            output_dims,
            s=s,
            scale=scale,
            set_weight_as_global_constant=set_weight_as_global_constant)
        self.model.output_schema = schema.Struct()
        self.assertEqual(schema.Scalar((np.float32, (output_dims, ))),
                         ac_output)

        train_init_net, train_net = self.get_training_nets()

        # Run create_init_net to initialize the global constants, and W and b
        workspace.RunNetOnce(train_init_net)
        workspace.RunNetOnce(self.model.create_init_net(name='init_net'))

        if set_weight_as_global_constant:
            W = workspace.FetchBlob(
                self.model.
                global_constants['arc_cosine_feature_map_fixed_rand_W'])
            b = workspace.FetchBlob(
                self.model.
                global_constants['arc_cosine_feature_map_fixed_rand_b'])
        else:
            W = workspace.FetchBlob(self.model.layers[0].random_w)
            b = workspace.FetchBlob(self.model.layers[0].random_b)

        # Operation specifications
        fc_spec = OpSpec("FC", [input_blob, None, None], None)
        softsign_spec = OpSpec("Softsign", None, None)
        relu_spec = OpSpec("Relu", None, None)
        relu_spec_output = OpSpec("Relu", None, ac_output.field_blobs())
        pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
        mul_spec = OpSpec("Mul", None, ac_output.field_blobs())

        if s == 0:
            ops_list = [
                fc_spec,
                softsign_spec,
                relu_spec_output,
            ]
        elif s == 1:
            ops_list = [
                fc_spec,
                relu_spec_output,
            ]
        else:
            ops_list = [
                fc_spec,
                relu_spec,
                pow_spec,
                mul_spec,
            ]

        # Train net assertions
        self._test_net(train_net, ops_list)
        _arc_cosine_hypothesis_test(ac_output(), X, W, b, s)

        # Eval net assertions
        eval_net = self.get_eval_net()
        self._test_net(eval_net, ops_list)
        _arc_cosine_hypothesis_test(ac_output(), X, W, b, s)

        # Predict net assertions
        predict_net = self.get_predict_net()
        self._test_net(predict_net, ops_list)
        _arc_cosine_hypothesis_test(ac_output(), X, W, b, s)

    @given(
        batch_size=st.integers(min_value=2, max_value=10),
        input_dims=st.integers(min_value=5, max_value=10),
        output_dims=st.integers(min_value=5, max_value=10),
        s=st.integers(min_value=0, max_value=3),
        scale=st.floats(min_value=0.1, max_value=5),
        set_weight_as_global_constant=st.booleans(),
        use_struct_input=st.booleans(),
    )
    def testSemiRandomFeatures(self, batch_size, input_dims, output_dims, s,
                               scale, set_weight_as_global_constant,
                               use_struct_input):
        def _semi_random_hypothesis_test(srf_output, X_full, X_random, rand_w,
                                         rand_b, s):
            """
            Runs hypothesis test for Semi Random Features layer.

            Inputs:
                srf_output -- output of net after running semi random features layer
                X_full -- full input data
                X_random -- random-output input data
                rand_w -- random-initialized weight parameter from train_init_net
                rand_b -- random-initialized bias parameter from train_init_net
                s -- degree parameter

            """
            # Get output from net
            net_output = workspace.FetchBlob(srf_output)

            # Fetch learned parameter blobs
            learned_w = workspace.FetchBlob(self.model.layers[0].learned_w)
            learned_b = workspace.FetchBlob(self.model.layers[0].learned_b)

            # Computing output directly
            x_rand = np.matmul(X_random, np.transpose(rand_w)) + rand_b
            x_learn = np.matmul(X_full, np.transpose(learned_w)) + learned_b
            x_pow = np.power(x_rand, s)
            if s > 0:
                h_rand_features = np.piecewise(x_rand,
                                               [x_rand <= 0, x_rand > 0],
                                               [0, 1])
            else:
                h_rand_features = np.piecewise(x_rand,
                                               [x_rand <= 0, x_rand > 0],
                                               [0, lambda x: x / (1 + x)])
            output_ref = np.multiply(np.multiply(x_pow, h_rand_features),
                                     x_learn)

            # Comparing net output and computed output
            npt.assert_allclose(net_output, output_ref, rtol=1e-3, atol=1e-3)

        X_full = np.random.normal(size=(batch_size,
                                        input_dims)).astype(np.float32)
        if use_struct_input:
            X_random = np.random.normal(size=(batch_size, input_dims)).\
                astype(np.float32)
            input_data = [X_full, X_random]
            input_record = self.new_record(
                schema.Struct(
                    ('full', schema.Scalar((np.float32, (input_dims, )))),
                    ('random', schema.Scalar((np.float32, (input_dims, ))))))
        else:
            X_random = X_full
            input_data = [X_full]
            input_record = self.new_record(
                schema.Scalar((np.float32, (input_dims, ))))

        schema.FeedRecord(input_record, input_data)
        srf_output = self.model.SemiRandomFeatures(
            input_record,
            output_dims,
            s=s,
            scale_random=scale,
            scale_learned=scale,
            set_weight_as_global_constant=set_weight_as_global_constant)

        self.model.output_schema = schema.Struct()

        self.assertEqual(
            schema.Struct(
                ('full', schema.Scalar((np.float32, (output_dims, )))),
                ('random', schema.Scalar((np.float32, (output_dims, ))))),
            srf_output)

        init_ops_list = [
            OpSpec("GaussianFill", None, None),
            OpSpec("UniformFill", None, None),
            OpSpec("GaussianFill", None, None),
            OpSpec("UniformFill", None, None),
        ]
        train_init_net, train_net = self.get_training_nets()

        # Need to run to initialize the global constants for layer
        workspace.RunNetOnce(self.model.create_init_net(name='init_net'))

        if set_weight_as_global_constant:
            # If weight params are global constants, they won't be in train_init_net
            init_ops = self._test_net(train_init_net, init_ops_list[:2])
            rand_w = workspace.FetchBlob(
                self.model.
                global_constants['semi_random_features_fixed_rand_W'])
            rand_b = workspace.FetchBlob(
                self.model.
                global_constants['semi_random_features_fixed_rand_b'])

            # Operation specifications
            fc_random_spec = OpSpec("FC", [None, None, None], None)
            fc_learned_spec = OpSpec(
                "FC", [None, init_ops[0].output[0], init_ops[1].output[0]],
                None)
        else:
            init_ops = self._test_net(train_init_net, init_ops_list)
            rand_w = workspace.FetchBlob(self.model.layers[0].random_w)
            rand_b = workspace.FetchBlob(self.model.layers[0].random_b)

            # Operation specifications
            fc_random_spec = OpSpec(
                "FC", [None, init_ops[0].output[0], init_ops[1].output[0]],
                None)
            fc_learned_spec = OpSpec(
                "FC", [None, init_ops[2].output[0], init_ops[3].output[0]],
                None)

        softsign_spec = OpSpec("Softsign", None, None)
        relu_spec = OpSpec("Relu", None, None)
        relu_output_spec = OpSpec("Relu", None,
                                  srf_output.random.field_blobs())
        pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
        mul_interim_spec = OpSpec("Mul", None, srf_output.random.field_blobs())
        mul_spec = OpSpec("Mul", None, srf_output.full.field_blobs())

        if s == 0:
            ops_list = [
                fc_learned_spec,
                fc_random_spec,
                softsign_spec,
                relu_output_spec,
                mul_spec,
            ]
        elif s == 1:
            ops_list = [
                fc_learned_spec,
                fc_random_spec,
                relu_output_spec,
                mul_spec,
            ]
        else:
            ops_list = [
                fc_learned_spec,
                fc_random_spec,
                relu_spec,
                pow_spec,
                mul_interim_spec,
                mul_spec,
            ]

        # Train net assertions
        self._test_net(train_net, ops_list)
        _semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
                                     rand_w, rand_b, s)

        # Eval net assertions
        eval_net = self.get_eval_net()
        self._test_net(eval_net, ops_list)
        _semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
                                     rand_w, rand_b, s)

        # Predict net assertions
        predict_net = self.get_predict_net()
        self._test_net(predict_net, ops_list)
        _semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
                                     rand_w, rand_b, s)

    def testConv(self):
        batch_size = 50
        H = 1
        W = 10
        C = 50
        output_dims = 32
        kernel_h = 1
        kernel_w = 3
        stride_h = 1
        stride_w = 1
        pad_t = 0
        pad_b = 0
        pad_r = None
        pad_l = None

        input_record = self.new_record(schema.Scalar((np.float32, (H, W, C))))
        X = np.random.random((batch_size, H, W, C)).astype(np.float32)
        schema.FeedRecord(input_record, [X])
        conv = self.model.Conv(input_record,
                               output_dims,
                               kernel_h=kernel_h,
                               kernel_w=kernel_w,
                               stride_h=stride_h,
                               stride_w=stride_w,
                               pad_t=pad_t,
                               pad_b=pad_b,
                               pad_r=pad_r,
                               pad_l=pad_l,
                               order='NHWC')

        self.assertEqual(schema.Scalar((np.float32, (output_dims, ))), conv)

        self.run_train_net_forward_only()
        output_record = schema.FetchRecord(conv)
        # check the number of output channels is the same as input in this example
        assert output_record.field_types()[0].shape == (H, W, output_dims)
        assert output_record().shape == (batch_size, H, W, output_dims)

        train_init_net, train_net = self.get_training_nets()
        # Init net assertions
        init_ops = self.assertNetContainOps(train_init_net, [
            OpSpec("XavierFill", None, None),
            OpSpec("ConstantFill", None, None),
        ])
        conv_spec = OpSpec("Conv", [
            input_record.field_blobs()[0],
            init_ops[0].output[0],
            init_ops[1].output[0],
        ], conv.field_blobs())

        # Train net assertions
        self.assertNetContainOps(train_net, [conv_spec])

        # Predict net assertions
        predict_net = self.get_predict_net()
        self.assertNetContainOps(predict_net, [conv_spec])

        # Eval net assertions
        eval_net = self.get_eval_net()
        self.assertNetContainOps(eval_net, [conv_spec])
@composite
def single_bool_lists(draw):
    n = draw(integers(0, 20))
    result = [False] * (n + 1)
    result[n] = True
    return result


@example([True, False, False, False], [3], None)
@example([False, True, False, False], [3], None)
@example([False, False, True, False], [3], None)
@example([False, False, False, True], [3], None)
@settings(deadline=None)
@given(
    lists(booleans()) | single_bool_lists(), lists(integers(1, 3)),
    random_module())
def test_failure_sequence_inducing(building, testing, rnd):
    buildit = iter(building)
    testit = iter(testing)

    def build(x):
        try:
            assume(not next(buildit))
        except StopIteration:
            pass
        return x

    @given(integers().map(build))
    @settings(
        verbosity=Verbosity.quiet,
Example #48
0
class RNNCellTest(hu.HypothesisTestCase):

    @given(
        input_tensor=hu.tensor(min_dim=3, max_dim=3, max_value=3),
        num_layers=st.integers(1, 4),
        outputs_with_grad=st.sampled_from(
            [[0], [1], [0, 1]]
        ),
    )
    @ht_settings(max_examples=10)
    def test_unroll_mul(self, input_tensor, num_layers, outputs_with_grad):
        outputs = []
        nets = []
        input_blob = None
        for T in [input_tensor.shape[0], None]:
            model = ModelHelper("rnn_mul_{}".format(
                "unroll" if T else "dynamic"))
            input_blob = model.net.AddExternalInputs("input_blob")
            outputs.append(
                prepare_mul_rnn(model, input_blob, input_tensor.shape, T,
                                outputs_with_grad, num_layers))
            workspace.RunNetOnce(model.param_init_net)
            nets.append(model.net)

            workspace.blobs[input_blob] = input_tensor
            gradient_checker.NetGradientChecker.CompareNets(
                nets, outputs, outputs_with_grad_ids=outputs_with_grad,
                inputs_with_grads=[input_blob],
            )

    @given(
        input_tensor=hu.tensor(min_dim=3, max_dim=3, max_value=3),
        forget_bias=st.floats(-10.0, 10.0),
        drop_states=st.booleans(),
        dim_out=st.lists(
            elements=st.integers(min_value=1, max_value=3),
            min_size=1, max_size=3,
        ),
        outputs_with_grads=st.sampled_from(
            [[0], [1], [0, 1], [0, 2], [0, 1, 2, 3]]
        )
    )
    @ht_settings(max_examples=10)
    @utils.debug
    def test_unroll_lstm(self, input_tensor, dim_out, outputs_with_grads,
                         **kwargs):
        lstms = [
            _prepare_rnn(
                *input_tensor.shape,
                create_rnn=rnn_cell.LSTM,
                outputs_with_grads=outputs_with_grads,
                T=T,
                two_d_initial_states=False,
                dim_out=dim_out,
                **kwargs
            ) for T in [input_tensor.shape[0], None]
        ]
        outputs, nets, inputs = zip(*lstms)
        workspace.FeedBlob(inputs[0][-1], input_tensor)

        assert inputs[0] == inputs[1]
        gradient_checker.NetGradientChecker.CompareNets(
            nets, outputs, outputs_with_grads,
            inputs_with_grads=inputs[0],
        )

    @given(
        input_tensor=hu.tensor(min_dim=3, max_dim=3, max_value=3),
        encoder_length=st.integers(min_value=1, max_value=3),
        encoder_dim=st.integers(min_value=1, max_value=3),
        hidden_units=st.integers(min_value=1, max_value=3),
        num_layers=st.integers(min_value=1, max_value=3),
        residual=st.booleans(),
        final_dropout=st.booleans(),
    )
    @ht_settings(max_examples=10)
    @utils.debug
    def test_unroll_attention(self, input_tensor, encoder_length,
                                    encoder_dim, hidden_units,
                                    num_layers, residual,
                                    final_dropout):

        dim_out = [hidden_units] * num_layers
        encoder_tensor = np.random.random(
            (encoder_length, input_tensor.shape[1], encoder_dim),
        ).astype('float32')

        print('Decoder input shape: {}'.format(input_tensor.shape))
        print('Encoder output shape: {}'.format(encoder_tensor.shape))

        # Necessary because otherwise test fails for networks with fewer
        # layers than previous test. TODO: investigate why.
        workspace.ResetWorkspace()

        net, unrolled = [
            _prepare_attention(
                t=input_tensor.shape[0],
                n=input_tensor.shape[1],
                dim_in=input_tensor.shape[2],
                encoder_dim=encoder_dim,
                T=T,
                dim_out=dim_out,
                residual=residual,
                final_dropout=final_dropout,
            ) for T in [input_tensor.shape[0], None]
        ]

        workspace.FeedBlob(net['input_blob'], input_tensor)
        workspace.FeedBlob(net['encoder_outputs'], encoder_tensor)
        workspace.FeedBlob(
            net['weighted_encoder_outputs'],
            np.random.random(encoder_tensor.shape).astype('float32'),
        )

        for input_name in [
            'input_blob',
            'encoder_outputs',
            'weighted_encoder_outputs',
        ]:
            assert net[input_name] == unrolled[input_name]
        for state_name, unrolled_state_name in zip(
            net['initial_states'],
            unrolled['initial_states'],
        ):
            assert state_name == unrolled_state_name

        inputs_with_grads = net['initial_states'] + [
            net['input_blob'],
            net['encoder_outputs'],
            net['weighted_encoder_outputs'],
        ]

        gradient_checker.NetGradientChecker.CompareNets(
            [net['net'], unrolled['net']],
            [[net['final_output']], [unrolled['final_output']]],
            [0],
            inputs_with_grads=inputs_with_grads,
            threshold=0.000001,
        )

    @given(
        input_tensor=hu.tensor(min_dim=3, max_dim=3),
        forget_bias=st.floats(-10.0, 10.0),
        forward_only=st.booleans(),
        drop_states=st.booleans(),
    )
    @ht_settings(max_examples=10)
    def test_layered_lstm(self, input_tensor, **kwargs):
        for outputs_with_grads in [[0], [1], [0, 1, 2, 3]]:
            for memory_optim in [False, True]:
                _, net, inputs = _prepare_rnn(
                    *input_tensor.shape,
                    create_rnn=rnn_cell.LSTM,
                    outputs_with_grads=outputs_with_grads,
                    memory_optim=memory_optim,
                    **kwargs
                )
                workspace.FeedBlob(inputs[-1], input_tensor)
                workspace.RunNetOnce(net)
                workspace.ResetWorkspace()

    @given(
        input_tensor=lstm_input(),
        forget_bias=st.floats(-10.0, 10.0),
        fwd_only=st.booleans(),
        drop_states=st.booleans(),
    )
    @ht_settings(max_examples=3, timeout=100)
    @utils.debug
    def test_lstm_main(self, **kwargs):
        for lstm_type in [(rnn_cell.LSTM, lstm_reference),
                          (rnn_cell.MILSTM, milstm_reference)]:
            for outputs_with_grads in [[0], [1], [0, 1, 2, 3]]:
                for memory_optim in [False, True]:
                    self.lstm_base(lstm_type,
                                   outputs_with_grads=outputs_with_grads,
                                   memory_optim=memory_optim,
                                   **kwargs)

    def lstm_base(self, lstm_type, outputs_with_grads, memory_optim,
                  input_tensor, forget_bias, fwd_only, drop_states):
        print("LSTM test parameters: ", locals())
        create_lstm, ref = lstm_type
        ref = partial(ref, forget_bias=forget_bias)

        t, n, d = input_tensor.shape
        assert d % 4 == 0
        d = d // 4
        ref = partial(ref, forget_bias=forget_bias, drop_states=drop_states)

        net = _prepare_rnn(t, n, d, create_lstm,
                            outputs_with_grads=outputs_with_grads,
                            memory_optim=memory_optim,
                            forget_bias=forget_bias,
                            forward_only=fwd_only,
                            drop_states=drop_states)[1]
        # here we don't provide a real input for the net but just for one of
        # its ops (RecurrentNetworkOp). So have to hardcode this name
        workspace.FeedBlob("test_name_scope/external/recurrent/i2h",
                           input_tensor)
        op = net._net.op[-1]
        inputs = [workspace.FetchBlob(name) for name in op.input]

        # Validate forward only mode is in effect
        if fwd_only:
            for arg in op.arg:
                self.assertFalse(arg.name == 'backward_step_net')

        self.assertReferenceChecks(
            hu.cpu_do,
            op,
            inputs,
            ref,
            outputs_to_check=list(range(4)),
        )

        # Checking for input, gates_t_w and gates_t_b gradients
        if not fwd_only:
            for param in range(5):
                self.assertGradientChecks(
                    device_option=hu.cpu_do,
                    op=op,
                    inputs=inputs,
                    outputs_to_check=param,
                    outputs_with_grads=outputs_with_grads,
                    threshold=0.01,
                    stepsize=0.005,
                )

    def test_lstm_extract_predictor_net(self):
        model = ModelHelper(name="lstm_extract_test")

        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
            output, _, _, _ = rnn_cell.LSTM(
                model=model,
                input_blob="input",
                seq_lengths="seqlengths",
                initial_states=("hidden_init", "cell_init"),
                dim_in=20,
                dim_out=40,
                scope="test",
                drop_states=True,
                return_last_layer_only=True,
            )
        # Run param init net to get the shapes for all inputs
        shapes = {}
        workspace.RunNetOnce(model.param_init_net)
        for b in workspace.Blobs():
            shapes[b] = workspace.FetchBlob(b).shape

        # But export in CPU
        (predict_net, export_blobs) = ExtractPredictorNet(
            net_proto=model.net.Proto(),
            input_blobs=["input"],
            output_blobs=[output],
            device=core.DeviceOption(caffe2_pb2.CPU, 1),
        )

        # Create the net and run once to see it is valid
        # Populate external inputs with correctly shaped random input
        # and also ensure that the export_blobs was constructed correctly.
        workspace.ResetWorkspace()
        shapes['input'] = [10, 4, 20]
        shapes['cell_init'] = [1, 4, 40]
        shapes['hidden_init'] = [1, 4, 40]

        print(predict_net.Proto().external_input)
        self.assertTrue('seqlengths' in predict_net.Proto().external_input)
        for einp in predict_net.Proto().external_input:
            if einp == 'seqlengths':
                    workspace.FeedBlob(
                        "seqlengths",
                        np.array([10] * 4, dtype=np.int32)
                    )
            else:
                workspace.FeedBlob(
                    einp,
                    np.zeros(shapes[einp]).astype(np.float32),
                )
                if einp != 'input':
                    self.assertTrue(einp in export_blobs)

        print(str(predict_net.Proto()))
        self.assertTrue(workspace.CreateNet(predict_net.Proto()))
        self.assertTrue(workspace.RunNet(predict_net.Proto().name))

        # Validate device options set correctly for the RNNs
        import google.protobuf.text_format as protobuftx
        for op in predict_net.Proto().op:
            if op.type == 'RecurrentNetwork':
                for arg in op.arg:
                    if arg.name == "step_net":
                        step_proto = caffe2_pb2.NetDef()
                        protobuftx.Merge(arg.s.decode("ascii"), step_proto)
                        for step_op in step_proto.op:
                            self.assertEqual(0, step_op.device_option.device_type)
                            self.assertEqual(1, step_op.device_option.cuda_gpu_id)
                    elif arg.name == 'backward_step_net':
                        self.assertEqual(b"", arg.s)

    def test_lstm_params(self):
        model = ModelHelper(name="lstm_params_test")

        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
            output, _, _, _ = rnn_cell.LSTM(
                model=model,
                input_blob="input",
                seq_lengths="seqlengths",
                initial_states=None,
                dim_in=20,
                dim_out=40,
                scope="test",
                drop_states=True,
                return_last_layer_only=True,
            )
        for param in model.GetParams():
            self.assertNotEqual(model.get_param_info(param), None)

    def test_milstm_params(self):
        model = ModelHelper(name="milstm_params_test")

        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
            output, _, _, _ = rnn_cell.MILSTM(
                model=model,
                input_blob="input",
                seq_lengths="seqlengths",
                initial_states=None,
                dim_in=20,
                dim_out=[40, 20],
                scope="test",
                drop_states=True,
                return_last_layer_only=True,
            )
        for param in model.GetParams():
            self.assertNotEqual(model.get_param_info(param), None)

    @given(encoder_output_length=st.integers(1, 3),
           encoder_output_dim=st.integers(1, 3),
           decoder_input_length=st.integers(1, 3),
           decoder_state_dim=st.integers(1, 3),
           batch_size=st.integers(1, 3),
           **hu.gcs)
    def test_lstm_with_regular_attention(
        self,
        encoder_output_length,
        encoder_output_dim,
        decoder_input_length,
        decoder_state_dim,
        batch_size,
        gc,
        dc,
    ):
        self.lstm_with_attention(
            partial(
                rnn_cell.LSTMWithAttention,
                attention_type=AttentionType.Regular,
            ),
            encoder_output_length,
            encoder_output_dim,
            decoder_input_length,
            decoder_state_dim,
            batch_size,
            lstm_with_regular_attention_reference,
            gc,
        )

    @given(encoder_output_length=st.integers(1, 3),
           encoder_output_dim=st.integers(1, 3),
           decoder_input_length=st.integers(1, 3),
           decoder_state_dim=st.integers(1, 3),
           batch_size=st.integers(1, 3),
           **hu.gcs)
    def test_lstm_with_recurrent_attention(
        self,
        encoder_output_length,
        encoder_output_dim,
        decoder_input_length,
        decoder_state_dim,
        batch_size,
        gc,
        dc,
    ):
        self.lstm_with_attention(
            partial(
                rnn_cell.LSTMWithAttention,
                attention_type=AttentionType.Recurrent,
            ),
            encoder_output_length,
            encoder_output_dim,
            decoder_input_length,
            decoder_state_dim,
            batch_size,
            lstm_with_recurrent_attention_reference,
            gc,
        )

    @given(encoder_output_length=st.integers(2, 2),
           encoder_output_dim=st.integers(4, 4),
           decoder_input_length=st.integers(3, 3),
           decoder_state_dim=st.integers(4, 4),
           batch_size=st.integers(5, 5),
           **hu.gcs)
    def test_lstm_with_dot_attention_same_dim(
        self,
        encoder_output_length,
        encoder_output_dim,
        decoder_input_length,
        decoder_state_dim,
        batch_size,
        gc,
        dc,
    ):
        self.lstm_with_attention(
            partial(
                rnn_cell.LSTMWithAttention,
                attention_type=AttentionType.Dot,
            ),
            encoder_output_length,
            encoder_output_dim,
            decoder_input_length,
            decoder_state_dim,
            batch_size,
            lstm_with_dot_attention_reference_same_dim,
            gc,
        )

    @given(encoder_output_length=st.integers(1, 3),
           encoder_output_dim=st.integers(4, 4),
           decoder_input_length=st.integers(1, 3),
           decoder_state_dim=st.integers(5, 5),
           batch_size=st.integers(1, 3),
           **hu.gcs)
    def test_lstm_with_dot_attention_different_dim(
        self,
        encoder_output_length,
        encoder_output_dim,
        decoder_input_length,
        decoder_state_dim,
        batch_size,
        gc,
        dc,
    ):
        self.lstm_with_attention(
            partial(
                rnn_cell.LSTMWithAttention,
                attention_type=AttentionType.Dot,
            ),
            encoder_output_length,
            encoder_output_dim,
            decoder_input_length,
            decoder_state_dim,
            batch_size,
            lstm_with_dot_attention_reference_different_dim,
            gc,
        )

    def lstm_with_attention(
        self,
        create_lstm_with_attention,
        encoder_output_length,
        encoder_output_dim,
        decoder_input_length,
        decoder_state_dim,
        batch_size,
        ref,
        gc,
    ):
        model = ModelHelper(name='external')
        with core.DeviceScope(gc):
            (
                encoder_outputs,
                decoder_inputs,
                decoder_input_lengths,
                initial_decoder_hidden_state,
                initial_decoder_cell_state,
                initial_attention_weighted_encoder_context,
            ) = model.net.AddExternalInputs(
                'encoder_outputs',
                'decoder_inputs',
                'decoder_input_lengths',
                'initial_decoder_hidden_state',
                'initial_decoder_cell_state',
                'initial_attention_weighted_encoder_context',
            )
            create_lstm_with_attention(
                model=model,
                decoder_inputs=decoder_inputs,
                decoder_input_lengths=decoder_input_lengths,
                initial_decoder_hidden_state=initial_decoder_hidden_state,
                initial_decoder_cell_state=initial_decoder_cell_state,
                initial_attention_weighted_encoder_context=(
                    initial_attention_weighted_encoder_context
                ),
                encoder_output_dim=encoder_output_dim,
                encoder_outputs=encoder_outputs,
                encoder_lengths=None,
                decoder_input_dim=decoder_state_dim,
                decoder_state_dim=decoder_state_dim,
                scope='external/LSTMWithAttention',
            )
            op = model.net._net.op[-2]
        workspace.RunNetOnce(model.param_init_net)

        # This is original decoder_inputs after linear layer
        decoder_input_blob = op.input[0]

        workspace.FeedBlob(
            decoder_input_blob,
            np.random.randn(
                decoder_input_length,
                batch_size,
                decoder_state_dim * 4,
            ).astype(np.float32))
        workspace.FeedBlob(
            'external/LSTMWithAttention/encoder_outputs_transposed',
            np.random.randn(
                batch_size,
                encoder_output_dim,
                encoder_output_length,
            ).astype(np.float32),
        )
        workspace.FeedBlob(
            'external/LSTMWithAttention/weighted_encoder_outputs',
            np.random.randn(
                encoder_output_length,
                batch_size,
                encoder_output_dim,
            ).astype(np.float32),
        )
        workspace.FeedBlob(
            decoder_input_lengths,
            np.random.randint(
                0,
                decoder_input_length + 1,
                size=(batch_size,)
            ).astype(np.int32))
        workspace.FeedBlob(
            initial_decoder_hidden_state,
            np.random.randn(1, batch_size, decoder_state_dim).astype(np.float32)
        )
        workspace.FeedBlob(
            initial_decoder_cell_state,
            np.random.randn(1, batch_size, decoder_state_dim).astype(np.float32)
        )
        workspace.FeedBlob(
            initial_attention_weighted_encoder_context,
            np.random.randn(
                1, batch_size, encoder_output_dim).astype(np.float32)
        )
        inputs = [workspace.FetchBlob(name) for name in op.input]
        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=inputs,
            reference=ref,
            grad_reference=None,
            output_to_grad=None,
            outputs_to_check=list(range(6)),
        )
        gradients_to_check = [
            index for (index, input_name) in enumerate(op.input)
            if input_name != 'decoder_input_lengths'
        ]
        for param in gradients_to_check:
            self.assertGradientChecks(
                device_option=gc,
                op=op,
                inputs=inputs,
                outputs_to_check=param,
                outputs_with_grads=[0, 4],
                threshold=0.01,
                stepsize=0.001,
            )

    @given(n=st.integers(1, 10),
           d=st.integers(1, 10),
           t=st.integers(1, 10),
           **hu.gcs)
    def test_lstm_unit_recurrent_network(self, n, d, t, dc, gc):
        op = core.CreateOperator(
            'LSTMUnit',
            [
                'hidden_t_prev',
                'cell_t_prev',
                'gates_t',
                'seq_lengths',
                'timestep',
            ],
            ['hidden_t', 'cell_t'])
        cell_t_prev = np.random.randn(1, n, d).astype(np.float32)
        hidden_t_prev = np.random.randn(1, n, d).astype(np.float32)
        gates = np.random.randn(1, n, 4 * d).astype(np.float32)
        seq_lengths = np.random.randint(1, t + 1, size=(n,)).astype(np.int32)
        timestep = np.random.randint(0, t, size=(1,)).astype(np.int32)
        inputs = [hidden_t_prev, cell_t_prev, gates, seq_lengths, timestep]
        input_device_options = {'timestep': hu.cpu_do}
        self.assertDeviceChecks(
            dc, op, inputs, [0],
            input_device_options=input_device_options)
        self.assertReferenceChecks(
            gc, op, inputs, lstm_unit,
            input_device_options=input_device_options)
        for i in range(2):
            self.assertGradientChecks(
                gc, op, inputs, i, [0, 1],
                input_device_options=input_device_options)

    @given(input_length=st.integers(2, 5),
           dim_in=st.integers(1, 3),
           max_num_units=st.integers(1, 3),
           num_layers=st.integers(2, 3),
           batch_size=st.integers(1, 3))
    def test_multi_lstm(
        self,
        input_length,
        dim_in,
        max_num_units,
        num_layers,
        batch_size,
    ):
        model = ModelHelper(name='external')
        (
            input_sequence,
            seq_lengths,
        ) = model.net.AddExternalInputs(
            'input_sequence',
            'seq_lengths',
        )
        dim_out = [
            np.random.randint(1, max_num_units + 1)
            for _ in range(num_layers)
        ]
        h_all, h_last, c_all, c_last = rnn_cell.LSTM(
            model=model,
            input_blob=input_sequence,
            seq_lengths=seq_lengths,
            initial_states=None,
            dim_in=dim_in,
            dim_out=dim_out,
            scope='test',
            outputs_with_grads=(0,),
            return_params=False,
            memory_optimization=False,
            forget_bias=0.0,
            forward_only=False,
            return_last_layer_only=True,
        )

        workspace.RunNetOnce(model.param_init_net)

        seq_lengths_val = np.random.randint(
            1,
            input_length + 1,
            size=(batch_size),
        ).astype(np.int32)
        input_sequence_val = np.random.randn(
            input_length,
            batch_size,
            dim_in,
        ).astype(np.float32)
        workspace.FeedBlob(seq_lengths, seq_lengths_val)
        workspace.FeedBlob(input_sequence, input_sequence_val)

        hidden_input_list = []
        cell_input_list = []
        i2h_w_list = []
        i2h_b_list = []
        gates_w_list = []
        gates_b_list = []

        for i in range(num_layers):
            hidden_input_list.append(
                workspace.FetchBlob('test/initial_hidden_state_{}'.format(i)),
            )
            cell_input_list.append(
                workspace.FetchBlob('test/initial_cell_state_{}'.format(i)),
            )
            i2h_w_list.append(
                workspace.FetchBlob('test/layer_{}/i2h_w'.format(i)),
            )
            i2h_b_list.append(
                workspace.FetchBlob('test/layer_{}/i2h_b'.format(i)),
            )
            gates_w_list.append(
                workspace.FetchBlob('test/layer_{}/gates_t_w'.format(i)),
            )
            gates_b_list.append(
                workspace.FetchBlob('test/layer_{}/gates_t_b'.format(i)),
            )

        workspace.RunNetOnce(model.net)
        h_all_calc = workspace.FetchBlob(h_all)
        h_last_calc = workspace.FetchBlob(h_last)
        c_all_calc = workspace.FetchBlob(c_all)
        c_last_calc = workspace.FetchBlob(c_last)

        h_all_ref, h_last_ref, c_all_ref, c_last_ref = multi_lstm_reference(
            input_sequence_val,
            hidden_input_list,
            cell_input_list,
            i2h_w_list,
            i2h_b_list,
            gates_w_list,
            gates_b_list,
            seq_lengths_val,
            forget_bias=0.0,
        )

        h_all_delta = np.abs(h_all_ref - h_all_calc).sum()
        h_last_delta = np.abs(h_last_ref - h_last_calc).sum()
        c_all_delta = np.abs(c_all_ref - c_all_calc).sum()
        c_last_delta = np.abs(c_last_ref - c_last_calc).sum()

        self.assertAlmostEqual(h_all_delta, 0.0, places=5)
        self.assertAlmostEqual(h_last_delta, 0.0, places=5)
        self.assertAlmostEqual(c_all_delta, 0.0, places=5)
        self.assertAlmostEqual(c_last_delta, 0.0, places=5)

        input_values = {
            'input_sequence': input_sequence_val,
            'seq_lengths': seq_lengths_val,
        }
        for param in model.GetParams():
            value = workspace.FetchBlob(param)
            input_values[str(param)] = value

        output_sum = model.net.SumElements(
            [h_all],
            'output_sum',
            average=True,
        )
        fake_loss = model.net.Tanh(
            output_sum,
        )
        for param in model.GetParams():
            gradient_checker.NetGradientChecker.Check(
                model.net,
                outputs_with_grad=[fake_loss],
                input_values=input_values,
                input_to_check=str(param),
                print_net=False,
                step_size=0.0001,
                threshold=0.05,
            )
Example #49
0
                                 ]
    assert expand_compound_token('E-Mobility-Strategy',
                                 split_on_len=2,
                                 split_on_casechange=True) == [
                                     'EMobility', 'Strategy'
                                 ]

    assert expand_compound_token('E-Mobility-Strategy', split_on_len=1) == [
        'E', 'Mobility', 'Strategy'
    ]


@given(s=st.text(),
       split_chars=st.lists(st.characters()),
       split_on_len=st.integers(0),
       split_on_casechange=st.booleans())
def test_expand_compound_token_hypothesis(s, split_chars, split_on_len,
                                          split_on_casechange):
    if not split_on_len and not split_on_casechange:
        with pytest.raises(ValueError):
            expand_compound_token(s,
                                  split_chars,
                                  split_on_len=split_on_len,
                                  split_on_casechange=split_on_casechange)
    else:
        res = expand_compound_token(s,
                                    split_chars,
                                    split_on_len=split_on_len,
                                    split_on_casechange=split_on_casechange)

        assert type(res) is list
Example #50
0
class TestSpatialBN(serial.SerializedTestCase):
    @serial.given(size=st.integers(7, 10),
                  input_channels=st.integers(1, 10),
                  batch_size=st.integers(0, 3),
                  seed=st.integers(0, 65535),
                  order=st.sampled_from(["NCHW", "NHWC"]),
                  epsilon=st.floats(min_value=1e-5, max_value=1e-2),
                  inplace=st.booleans(),
                  engine=st.sampled_from(["", "CUDNN"]),
                  **hu.gcs)
    def test_spatialbn_test_mode_3d(self, size, input_channels, batch_size,
                                    seed, order, epsilon, inplace, engine, gc,
                                    dc):
        op = core.CreateOperator(
            "SpatialBN",
            ["X", "scale", "bias", "mean", "var"],
            ["X" if inplace else "Y"],
            order=order,
            is_test=True,
            epsilon=epsilon,
            engine=engine,
        )

        def reference_spatialbn_test(X, scale, bias, mean, var):
            if order == "NCHW":
                scale = scale[np.newaxis, :, np.newaxis, np.newaxis,
                              np.newaxis]
                bias = bias[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
                mean = mean[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
                var = var[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]

            return ((X - mean) / np.sqrt(var + epsilon) * scale + bias, )

        np.random.seed(1701)
        scale = np.random.rand(input_channels).astype(np.float32) + 0.5
        bias = np.random.rand(input_channels).astype(np.float32) - 0.5
        mean = np.random.randn(input_channels).astype(np.float32)
        var = np.random.rand(input_channels).astype(np.float32) + 0.5
        X = np.random.rand(batch_size, input_channels, size, size, size)\
            .astype(np.float32) - 0.5

        if order == "NHWC":
            X = utils.NCHW2NHWC(X)
        self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
                                   reference_spatialbn_test)
        self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])

    @unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
    @given(size=st.integers(7, 10),
           input_channels=st.integers(1, 10),
           batch_size=st.integers(0, 3),
           seed=st.integers(0, 65535),
           order=st.sampled_from(["NCHW", "NHWC"]),
           epsilon=st.floats(min_value=1e-5, max_value=1e-2),
           inplace=st.booleans(),
           engine=st.sampled_from(["", "CUDNN"]),
           **hu.gcs)
    def test_spatialbn_test_mode_1d(self, size, input_channels, batch_size,
                                    seed, order, epsilon, inplace, engine, gc,
                                    dc):
        # Currently MIOPEN SpatialBN only supports 2D
        if hiputl.run_in_hip(gc, dc):
            assume(engine != "CUDNN")
        op = core.CreateOperator(
            "SpatialBN",
            ["X", "scale", "bias", "mean", "var"],
            ["X" if inplace else "Y"],
            order=order,
            is_test=True,
            epsilon=epsilon,
            engine=engine,
        )

        def reference_spatialbn_test(X, scale, bias, mean, var):
            if order == "NCHW":
                scale = scale[np.newaxis, :, np.newaxis]
                bias = bias[np.newaxis, :, np.newaxis]
                mean = mean[np.newaxis, :, np.newaxis]
                var = var[np.newaxis, :, np.newaxis]
            return ((X - mean) / np.sqrt(var + epsilon) * scale + bias, )

        np.random.seed(1701)
        scale = np.random.rand(input_channels).astype(np.float32) + 0.5
        bias = np.random.rand(input_channels).astype(np.float32) - 0.5
        mean = np.random.randn(input_channels).astype(np.float32)
        var = np.random.rand(input_channels).astype(np.float32) + 0.5
        X = np.random.rand(batch_size, input_channels, size).astype(
            np.float32) - 0.5

        if order == "NHWC":
            X = X.swapaxes(1, 2)
        self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
                                   reference_spatialbn_test)
        self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])

    @given(size=st.integers(7, 10),
           input_channels=st.integers(1, 10),
           batch_size=st.integers(0, 3),
           seed=st.integers(0, 65535),
           order=st.sampled_from(["NCHW", "NHWC"]),
           epsilon=st.floats(min_value=1e-5, max_value=1e-2),
           engine=st.sampled_from(["", "CUDNN"]),
           inplace=st.booleans(),
           **hu.gcs)
    def test_spatialbn_test_mode(self, size, input_channels, batch_size, seed,
                                 order, epsilon, inplace, engine, gc, dc):
        # Currently HIP SpatialBN only supports NCHW
        if hiputl.run_in_hip(gc, dc):
            assume(order == "NCHW")

        op = core.CreateOperator("SpatialBN",
                                 ["X", "scale", "bias", "mean", "var"],
                                 ["X" if inplace else "Y"],
                                 order=order,
                                 is_test=True,
                                 epsilon=epsilon,
                                 engine=engine)

        def reference_spatialbn_test(X, scale, bias, mean, var):
            if order == "NCHW":
                scale = scale[np.newaxis, :, np.newaxis, np.newaxis]
                bias = bias[np.newaxis, :, np.newaxis, np.newaxis]
                mean = mean[np.newaxis, :, np.newaxis, np.newaxis]
                var = var[np.newaxis, :, np.newaxis, np.newaxis]
            return ((X - mean) / np.sqrt(var + epsilon) * scale + bias, )

        np.random.seed(1701)
        scale = np.random.rand(input_channels).astype(np.float32) + 0.5
        bias = np.random.rand(input_channels).astype(np.float32) - 0.5
        mean = np.random.randn(input_channels).astype(np.float32)
        var = np.random.rand(input_channels).astype(np.float32) + 0.5
        X = np.random.rand(batch_size, input_channels, size, size).astype(
            np.float32) - 0.5

        if order == "NHWC":
            X = X.swapaxes(1, 2).swapaxes(2, 3)

        self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
                                   reference_spatialbn_test)
        self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])

    @given(size=st.integers(1, 10),
           input_channels=st.integers(1, 10),
           batch_size=st.integers(0, 3),
           seed=st.integers(0, 65535),
           order=st.sampled_from(["NCHW", "NHWC"]),
           epsilon=st.floats(1e-5, 1e-2),
           momentum=st.floats(0.5, 0.9),
           engine=st.sampled_from(["", "CUDNN"]),
           inplace=st.sampled_from([True, False]),
           **hu.gcs)
    def test_spatialbn_train_mode(self, size, input_channels, batch_size, seed,
                                  order, epsilon, momentum, inplace, engine,
                                  gc, dc):
        # Currently HIP SpatialBN only supports NCHW
        if hiputl.run_in_hip(gc, dc):
            assume(order == "NCHW")

        assume(batch_size == 0 or batch_size * size * size > 1)

        op = core.CreateOperator(
            "SpatialBN",
            ["X", "scale", "bias", "running_mean", "running_var"],
            [
                "X" if inplace else "Y", "running_mean", "running_var",
                "saved_mean", "saved_var"
            ],
            order=order,
            is_test=False,
            epsilon=epsilon,
            momentum=momentum,
            engine=engine,
        )
        np.random.seed(1701)
        scale = np.random.randn(input_channels).astype(np.float32)
        bias = np.random.rand(input_channels).astype(np.float32) - 0.5
        mean = np.random.randn(input_channels).astype(np.float32)
        var = np.random.rand(input_channels).astype(np.float32) + 0.5
        X = np.random.randn(batch_size, input_channels, size,
                            size).astype(np.float32)

        if order == "NHWC":
            X = np.transpose(X, (0, 2, 3, 1))

        def batch_norm_ref(X, scale, bias, running_mean, running_var):
            if batch_size == 0:
                Y = np.zeros(X.shape)
                saved_mean = np.zeros(running_mean.shape)
                saved_var = np.zeros(running_var.shape)
                return (Y, running_mean, running_var, saved_mean, saved_var)

            if order == "NHWC":
                X = np.transpose(X, (0, 3, 1, 2))

            C = X.shape[1]
            reduce_size = batch_size * size * size
            saved_mean = np.mean(X, (0, 2, 3))
            saved_var = np.var(X, (0, 2, 3))
            if reduce_size == 1:
                unbias_scale = float('inf')
            else:
                unbias_scale = reduce_size / (reduce_size - 1)
            running_mean = momentum * running_mean + (1.0 -
                                                      momentum) * saved_mean
            running_var = momentum * running_var + (
                1.0 - momentum) * unbias_scale * saved_var
            std = np.sqrt(saved_var + epsilon)
            broadcast_shape = (1, C, 1, 1)
            Y = (X - np.reshape(saved_mean, broadcast_shape)) / np.reshape(
                std, broadcast_shape) * np.reshape(
                    scale, broadcast_shape) + np.reshape(
                        bias, broadcast_shape)

            if order == "NHWC":
                Y = np.transpose(Y, (0, 2, 3, 1))

            return (Y, running_mean, running_var, saved_mean, 1.0 / std)

        self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
                                   batch_norm_ref)
        self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var],
                                [0, 1, 2, 3, 4])

    @given(size=st.integers(7, 10),
           input_channels=st.integers(1, 10),
           batch_size=st.integers(0, 3),
           seed=st.integers(0, 65535),
           order=st.sampled_from(["NCHW", "NHWC"]),
           epsilon=st.floats(min_value=1e-5, max_value=1e-2),
           momentum=st.floats(0.5, 0.9),
           engine=st.sampled_from(["", "CUDNN"]),
           **hu.gcs)
    @settings(deadline=None, max_examples=50)
    def test_spatialbn_train_mode_gradient_check(self, size, input_channels,
                                                 batch_size, seed, order,
                                                 epsilon, momentum, engine, gc,
                                                 dc):
        # Currently HIP SpatialBN only supports NCHW
        if hiputl.run_in_hip(gc, dc):
            assume(order == "NCHW")

        op = core.CreateOperator(
            "SpatialBN", ["X", "scale", "bias", "mean", "var"],
            ["Y", "mean", "var", "saved_mean", "saved_var"],
            order=order,
            is_test=False,
            epsilon=epsilon,
            momentum=momentum,
            engine=engine)
        np.random.seed(seed)
        scale = np.random.rand(input_channels).astype(np.float32) + 0.5
        bias = np.random.rand(input_channels).astype(np.float32) - 0.5
        mean = np.random.randn(input_channels).astype(np.float32)
        var = np.random.rand(input_channels).astype(np.float32) + 0.5
        X = np.random.rand(batch_size, input_channels, size, size).astype(
            np.float32) - 0.5
        if order == "NHWC":
            X = X.swapaxes(1, 2).swapaxes(2, 3)

        for input_to_check in [0, 1, 2]:  # dX, dScale, dBias
            self.assertGradientChecks(gc, op, [X, scale, bias, mean, var],
                                      input_to_check, [0])

    @given(size=st.integers(7, 10),
           input_channels=st.integers(1, 10),
           batch_size=st.integers(0, 3),
           seed=st.integers(0, 65535),
           order=st.sampled_from(["NCHW", "NHWC"]),
           epsilon=st.floats(min_value=1e-5, max_value=1e-2),
           momentum=st.floats(min_value=0.5, max_value=0.9),
           engine=st.sampled_from(["", "CUDNN"]),
           **hu.gcs)
    @settings(deadline=10000)
    def test_spatialbn_train_mode_gradient_check_1d(self, size, input_channels,
                                                    batch_size, seed, order,
                                                    epsilon, momentum, engine,
                                                    gc, dc):
        # Currently MIOPEN SpatialBN only supports 2D
        if hiputl.run_in_hip(gc, dc):
            assume(engine != "CUDNN")
        op = core.CreateOperator(
            "SpatialBN",
            ["X", "scale", "bias", "mean", "var"],
            ["Y", "mean", "var", "saved_mean", "saved_var"],
            order=order,
            is_test=False,
            epsilon=epsilon,
            momentum=momentum,
            engine=engine,
        )
        np.random.seed(seed)
        scale = np.random.rand(input_channels).astype(np.float32) + 0.5
        bias = np.random.rand(input_channels).astype(np.float32) - 0.5
        mean = np.random.randn(input_channels).astype(np.float32)
        var = np.random.rand(input_channels).astype(np.float32) + 0.5
        X = np.random.rand(batch_size, input_channels, size).astype(
            np.float32) - 0.5
        if order == "NHWC":
            X = X.swapaxes(1, 2)

        for input_to_check in [0, 1, 2]:  # dX, dScale, dBias
            self.assertGradientChecks(gc,
                                      op, [X, scale, bias, mean, var],
                                      input_to_check, [0],
                                      stepsize=0.01)

    @given(N=st.integers(0, 5),
           C=st.integers(1, 10),
           H=st.integers(1, 5),
           W=st.integers(1, 5),
           epsilon=st.floats(1e-5, 1e-2),
           momentum=st.floats(0.5, 0.9),
           order=st.sampled_from(["NCHW", "NHWC"]),
           num_batches=st.integers(2, 5),
           in_place=st.booleans(),
           engine=st.sampled_from(["", "CUDNN"]),
           **hu.gcs)
    def test_spatial_bn_multi_batch(self, N, C, H, W, epsilon, momentum, order,
                                    num_batches, in_place, engine, gc, dc):
        if in_place:
            outputs = ["Y", "mean", "var", "batch_mean", "batch_var"]
        else:
            outputs = ["Y", "mean", "var", "saved_mean", "saved_var"]
        op = core.CreateOperator(
            "SpatialBN",
            ["X", "scale", "bias", "mean", "var", "batch_mean", "batch_var"],
            outputs,
            order=order,
            is_test=False,
            epsilon=epsilon,
            momentum=momentum,
            num_batches=num_batches,
            engine=engine,
        )
        if order == "NCHW":
            X = np.random.randn(N, C, H, W).astype(np.float32)
        else:
            X = np.random.randn(N, H, W, C).astype(np.float32)
        scale = np.random.randn(C).astype(np.float32)
        bias = np.random.randn(C).astype(np.float32)
        mean = np.random.randn(C).astype(np.float32)
        var = np.random.rand(C).astype(np.float32)
        batch_mean = np.random.rand(C).astype(np.float32) - 0.5
        batch_var = np.random.rand(C).astype(np.float32) + 1.0
        inputs = [X, scale, bias, mean, var, batch_mean, batch_var]

        def spatial_bn_multi_batch_ref(X, scale, bias, mean, var, batch_mean,
                                       batch_var):
            if N == 0:
                batch_mean = np.zeros(C).astype(np.float32)
                batch_var = np.zeros(C).astype(np.float32)
            else:
                size = num_batches * N * H * W
                batch_mean /= size
                batch_var = batch_var / size - np.square(batch_mean)
                mean = momentum * mean + (1.0 - momentum) * batch_mean
                var = momentum * var + (1.0 -
                                        momentum) * (size /
                                                     (size - 1)) * batch_var
                batch_var = 1.0 / np.sqrt(batch_var + epsilon)
            if order == "NCHW":
                scale = np.reshape(scale, (C, 1, 1))
                bias = np.reshape(bias, (C, 1, 1))
                batch_mean = np.reshape(batch_mean, (C, 1, 1))
                batch_var = np.reshape(batch_var, (C, 1, 1))
            Y = (X - batch_mean) * batch_var * scale + bias
            if order == "NCHW":
                batch_mean = np.reshape(batch_mean, (C))
                batch_var = np.reshape(batch_var, (C))
            return (Y, mean, var, batch_mean, batch_var)

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=inputs,
            reference=spatial_bn_multi_batch_ref,
        )
        self.assertDeviceChecks(dc, op, inputs, [0, 1, 2, 3, 4])

    @given(N=st.integers(0, 5),
           C=st.integers(1, 10),
           H=st.integers(1, 5),
           W=st.integers(1, 5),
           epsilon=st.floats(1e-5, 1e-2),
           order=st.sampled_from(["NCHW", "NHWC"]),
           num_batches=st.integers(2, 5),
           in_place=st.booleans(),
           engine=st.sampled_from(["", "CUDNN"]),
           **hu.gcs)
    @settings(deadline=None)
    def test_spatial_bn_multi_batch_grad(self, N, C, H, W, epsilon, order,
                                         num_batches, in_place, engine, gc,
                                         dc):
        if in_place:
            outputs = ["dX", "dscale_sum", "dbias_sum"]
        else:
            outputs = ["dX", "dscale", "dbias"]
        op = core.CreateOperator(
            "SpatialBNGradient",
            ["X", "scale", "dY", "mean", "rstd", "dscale_sum", "dbias_sum"],
            outputs,
            order=order,
            epsilon=epsilon,
            num_batches=num_batches,
            engine=engine,
        )
        if order == "NCHW":
            dY = np.random.randn(N, C, H, W).astype(np.float32)
            X = np.random.randn(N, C, H, W).astype(np.float32)
        else:
            dY = np.random.randn(N, H, W, C).astype(np.float32)
            X = np.random.randn(N, H, W, C).astype(np.float32)
        scale = np.random.randn(C).astype(np.float32)
        mean = np.random.randn(C).astype(np.float32)
        rstd = np.random.rand(C).astype(np.float32)
        dscale_sum = np.random.randn(C).astype(np.float32)
        dbias_sum = np.random.randn(C).astype(np.float32)
        inputs = [X, scale, dY, mean, rstd, dscale_sum, dbias_sum]

        def spatial_bn_multi_batch_grad_ref(X, scale, dY, mean, rstd,
                                            dscale_sum, dbias_sum):
            if N == 0:
                dscale = np.zeros(C).astype(np.float32)
                dbias = np.zeros(C).astype(np.float32)
                alpha = np.zeros(C).astype(np.float32)
                beta = np.zeros(C).astype(np.float32)
                gamma = np.zeros(C).astype(np.float32)
            else:
                dscale = dscale_sum / num_batches
                dbias = dbias_sum / num_batches
                alpha = scale * rstd
                beta = -alpha * dscale * rstd / (N * H * W)
                gamma = alpha * (mean * dscale * rstd - dbias) / (N * H * W)
            if order == "NCHW":
                alpha = np.reshape(alpha, (C, 1, 1))
                beta = np.reshape(beta, (C, 1, 1))
                gamma = np.reshape(gamma, (C, 1, 1))
            dX = alpha * dY + beta * X + gamma
            return (dX, dscale, dbias)

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=inputs,
            reference=spatial_bn_multi_batch_grad_ref,
        )
        self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])

    @given(size=st.integers(7, 10),
           input_channels=st.integers(1, 10),
           batch_size=st.integers(0, 3),
           seed=st.integers(0, 65535),
           epsilon=st.floats(1e-5, 1e-2),
           engine=st.sampled_from(["", "CUDNN"]),
           **hu.gcs)
    def test_spatialbn_brew_wrapper(self, size, input_channels, batch_size,
                                    seed, epsilon, engine, gc, dc):
        np.random.seed(seed)
        X = np.random.rand(batch_size, input_channels, size,
                           size).astype(np.float32)

        workspace.FeedBlob('X', X)

        model = ModelHelper(name='test_spatialbn_brew_wrapper')

        brew.spatial_bn(
            model,
            'X',
            'Y',
            input_channels,
            epsilon=epsilon,
            is_test=False,
        )

        workspace.RunNetOnce(model.param_init_net)
        workspace.RunNetOnce(model.net)
Example #51
0
@given(
    st.dictionaries(
        keys=st.sampled_from([
            "/Author",
            "/Subject",
            "/Title",
            "/Keywords",
            "/Producer",
            "/CreationDate",
            "/Creator",
            "/ModDate",
            "/Dummy",
        ]),
        values=st.recursive(
            st.none() | st.binary(max_size=16) | st.booleans(),
            lambda children: st.lists(children, min_size=0, max_size=4),
            max_leaves=2,
        ),
    ))
def test_random_docinfo(docinfo):
    p = pikepdf.new()
    with p.open_metadata() as m:
        pdf_docinfo = pikepdf.Dictionary(docinfo)

        try:
            m.load_from_docinfo(pdf_docinfo, raise_failure=True)
        except ValueError as e:
            assert 'could not be copied to XMP' in str(e) or '/Dummy' in str(e)
        else:
            ET.fromstring(str(m))  # ensure we can parse it
Example #52
0
    def sample_program_configs(self, draw):
        alpha = draw(st.floats(min_value=1, max_value=1))  #required in pass
        x_num_col_dims = draw(st.floats(min_value=0, max_value=1))
        y_num_col_dims = draw(st.floats(min_value=0, max_value=1))
        int32_values_1 = draw(st.integers(min_value=1, max_value=40))
        int32_values_2 = draw(st.integers(min_value=1, max_value=40))
        int32_values_3 = draw(st.integers(min_value=1, max_value=40))

        squeeze2_input_shape = [int32_values_1, int32_values_2, 1, 1]
        matmul_input_shape = [squeeze2_input_shape[1], int32_values_3]
        scale_x = draw(st.sampled_from([0.1, 1.1]))
        scale_y = draw(st.sampled_from([0.1, 1.1]))
        scale_out = draw(st.sampled_from([0.1, 1.1]))
        force_fp32_output = draw(st.booleans())
        squeeze2_op = OpConfig(
            type="squeeze2",
            inputs={"X": ["squeeze2_input_x"]},
            outputs={
                "Out": ["squeeze2_output"],
                "XShape": ["squeeze2_output_XShape"]
            },
            attrs={
                "axes": [2, 3]  #required in pass
            })

        matmul_op = OpConfig(
            type="matmul",
            inputs={
                "X": ["squeeze2_output"],
                "Y": ["matmul_input"]
            },
            outputs={"Out": ["output_data"]},
            attrs={
                "transpose_X": False,  #required in pass
                "transpose_Y": False,  #required in pass
                "x_num_col_dims": x_num_col_dims,
                "y_num_col_dims": y_num_col_dims,
                "Scale_x": scale_x,
                "Scale_y": scale_y,
                "Scale_out": scale_out,
                "force_fp32_output": force_fp32_output,
                "alpha": alpha,
                "fused_reshape_X": [],
                "fused_transpose_X": [],
                "fused_reshape_Y": [],
                "fused_transpose_Y": [],
                "fused_reshape_Out": [],
                "fused_transpose_Out": [],
                "head_number": int(1)
            })

        ops = [squeeze2_op, matmul_op]
        program_config = ProgramConfig(
            ops=ops,
            weights={},
            inputs={
                "squeeze2_input_x": TensorConfig(shape=squeeze2_input_shape),
                "matmul_input": TensorConfig(shape=matmul_input_shape)
            },
            outputs=["output_data"])
        return program_config
Example #53
0
class TestBoxWithNMSLimitOp(serial.SerializedTestCase):
    @serial.given(**HU_CONFIG)
    def test_simple(self, gc):
        in_centers = [(0, 0), (20, 20), (50, 50)]
        in_scores = [0.9, 0.8, 0.6]
        boxes, scores = gen_multiple_boxes(in_centers, in_scores, 10, 2)

        gt_boxes, gt_scores = gen_multiple_boxes(in_centers, in_scores, 1, 1)
        gt_classes = np.ones(gt_boxes.shape[0], dtype=np.float32)

        op = get_op(2, 3, {"score_thresh": 0.5, "nms": 0.9})

        def ref(*args, **kwargs):
            return (gt_scores.flatten(), gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(**HU_CONFIG)
    def test_score_thresh(self, gc):
        in_centers = [(0, 0), (20, 20), (50, 50)]
        in_scores = [0.7, 0.85, 0.6]
        boxes, scores = gen_multiple_boxes(in_centers, in_scores, 10, 2)

        gt_centers = [(20, 20)]
        gt_scores = [0.85]
        gt_boxes, gt_scores = gen_multiple_boxes(gt_centers, gt_scores, 1, 1)
        gt_classes = np.ones(gt_boxes.shape[0], dtype=np.float32)

        op = get_op(2, 3, {"score_thresh": 0.8, "nms": 0.9})

        def ref(*args, **kwargs):
            return (gt_scores.flatten(), gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(det_per_im=st.integers(1, 3), **HU_CONFIG)
    def test_detections_per_im(self, det_per_im, gc):
        in_centers = [(0, 0), (20, 20), (50, 50)]
        in_scores = [0.7, 0.85, 0.6]
        boxes, scores = gen_multiple_boxes(in_centers, in_scores, 10, 2)

        gt_centers = [(20, 20), (0, 0), (50, 50)][:det_per_im]
        gt_scores = [0.85, 0.7, 0.6][:det_per_im]
        gt_boxes, gt_scores = gen_multiple_boxes(gt_centers, gt_scores, 1, 1)
        gt_classes = np.ones(gt_boxes.shape[0], dtype=np.float32)

        op = get_op(2, 3, {
            "score_thresh": 0.5,
            "nms": 0.9,
            "detections_per_im": det_per_im
        })

        def ref(*args, **kwargs):
            return (gt_scores.flatten(), gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(num_classes=st.integers(2, 10),
           cls_agnostic_bbox_reg=st.booleans(),
           input_boxes_include_bg_cls=st.booleans(),
           output_classes_include_bg_cls=st.booleans(),
           **HU_CONFIG)
    def test_multiclass(self, num_classes, cls_agnostic_bbox_reg,
                        input_boxes_include_bg_cls,
                        output_classes_include_bg_cls, gc):
        in_centers = [(0, 0), (20, 20), (50, 50)]
        in_scores = [0.7, 0.85, 0.6]
        boxes, scores = gen_multiple_boxes(in_centers, in_scores, 10,
                                           num_classes)

        if not input_boxes_include_bg_cls:
            # remove backgound class
            boxes = boxes[:, 4:]
        if cls_agnostic_bbox_reg:
            # only leave one class
            boxes = boxes[:, :4]

        gt_centers = [(20, 20), (0, 0), (50, 50)]
        gt_scores = [0.85, 0.7, 0.6]
        gt_boxes, gt_scores = gen_multiple_boxes(gt_centers, gt_scores, 1, 1)
        # [1, 1, 1, 2, 2, 2, 3, 3, 3, ...]
        gt_classes = np.tile(np.array(range(1, num_classes), dtype=np.float32),
                             (gt_boxes.shape[0], 1)).T.flatten()
        if not output_classes_include_bg_cls:
            # remove backgound class
            gt_classes -= 1
        gt_boxes = np.tile(gt_boxes, (num_classes - 1, 1))
        gt_scores = np.tile(gt_scores, (num_classes - 1, 1)).flatten()

        op = get_op(
            2, 3, {
                "score_thresh": 0.5,
                "nms": 0.9,
                "detections_per_im": 100,
                "cls_agnostic_bbox_reg": cls_agnostic_bbox_reg,
                "input_boxes_include_bg_cls": input_boxes_include_bg_cls,
                "output_classes_include_bg_cls": output_classes_include_bg_cls
            })

        def ref(*args, **kwargs):
            return (gt_scores, gt_boxes, gt_classes)

        self.assertReferenceChecks(gc, op, [scores, boxes], ref)

    @given(det_per_im=st.integers(1, 3), **HU_CONFIG)
    def test_detections_per_im_same_thresh(self, det_per_im, gc):
        in_centers = [(0, 0), (20, 20), (50, 50)]
        in_scores = [0.7, 0.7, 0.7]
        boxes, scores = gen_multiple_boxes(in_centers, in_scores, 10, 2)

        gt_centers = [(20, 20), (0, 0), (50, 50)][:det_per_im]
        gt_scores = [0.7, 0.7, 0.7][:det_per_im]
        gt_boxes, gt_scores = gen_multiple_boxes(gt_centers, gt_scores, 1, 1)
        gt_classes = np.ones(gt_boxes.shape[0], dtype=np.float32)

        op = get_op(2, 3, {
            "score_thresh": 0.5,
            "nms": 0.9,
            "detections_per_im": det_per_im
        })

        # boxes output could be in any order
        def verify(inputs, outputs):
            # check scores
            np.testing.assert_allclose(
                outputs[0],
                gt_scores.flatten(),
                atol=1e-4,
                rtol=1e-4,
            )
            # check classes
            np.testing.assert_allclose(
                outputs[2],
                gt_classes,
                atol=1e-4,
                rtol=1e-4,
            )
            self.assertEqual(outputs[1].shape, gt_boxes.shape)

        self.assertValidationChecks(gc,
                                    op, [scores, boxes],
                                    verify,
                                    as_kwargs=False)

    @given(num_classes=st.integers(2, 10), **HU_CONFIG)
    def test_detections_per_im_same_thresh_multiclass(self, num_classes, gc):
        in_centers = [(0, 0), (20, 20), (50, 50)]
        in_scores = [0.6, 0.7, 0.7]
        boxes, scores = gen_multiple_boxes(in_centers, in_scores, 10,
                                           num_classes)

        det_per_im = 1
        gt_centers = [(20, 20), (50, 50)]
        gt_scores = [0.7, 0.7]
        gt_boxes, gt_scores = gen_multiple_boxes(gt_centers, gt_scores, 1, 1)

        op = get_op(2, 3, {
            "score_thresh": 0.5,
            "nms": 0.9,
            "detections_per_im": det_per_im
        })

        # boxes output could be in any order
        def verify(inputs, outputs):
            # check scores
            self.assertEqual(outputs[0].shape, (1, ))
            self.assertEqual(outputs[0][0], gt_scores[0])

            # check boxes
            self.assertTrue(
                np.allclose(outputs[1], gt_boxes[0, :], atol=1e-4, rtol=1e-4)
                or np.allclose(
                    outputs[1], gt_boxes[1, :], atol=1e-4, rtol=1e-4))

            # check class
            self.assertNotEqual(outputs[2][0], 0)

        self.assertValidationChecks(gc,
                                    op, [scores, boxes],
                                    verify,
                                    as_kwargs=False)
Example #54
0
    time.sleep(0.05)
    assert not calls[2]
    calls[2] = 1


@fails
@timeout_settings
@given(integers())
def test_slow_failing_test_4(x):
    time.sleep(0.05)
    assert not calls[3]
    calls[3] = 1


@fails
@given(one_of(floats(), booleans()), one_of(floats(), booleans()))
def test_one_of_produces_different_values(x, y):
    assert type(x) == type(y)


@given(just(42))
def test_is_the_answer(x):
    assert x == 42


@fails
@given(text(), text())
def test_text_addition_is_not_commutative(x, y):
    assert x + y == y + x

Example #55
0
class TestLeakyRelu(hu.HypothesisTestCase):
    def _get_inputs(self, N, C, H, W, order):
        input_data = np.random.rand(N, C, H, W).astype(np.float32)

        # default step size is 0.05
        input_data[np.logical_and(input_data >= 0,
                                  input_data <= 0.051)] = 0.051
        input_data[np.logical_and(input_data <= 0,
                                  input_data >= -0.051)] = -0.051

        if order == 'NHWC':
            input_data = np.transpose(input_data, axes=(0, 2, 3, 1))

        return input_data,

    def _get_op(self, device_option, alpha, order, inplace=False):
        outputs = ['output' if not inplace else "input"]
        op = core.CreateOperator('LeakyRelu', ['input'],
                                 outputs,
                                 alpha=alpha,
                                 device_option=device_option)
        return op

    def _feed_inputs(self, input_blobs, device_option):
        names = ['input', 'scale', 'bias']
        for name, blob in zip(names, input_blobs):
            self.ws.create_blob(name).feed(blob, device_option=device_option)

    @given(gc=hu.gcs['gc'],
           dc=hu.gcs['dc'],
           N=st.integers(2, 3),
           C=st.integers(2, 3),
           H=st.integers(2, 3),
           W=st.integers(2, 3),
           alpha=st.floats(0, 1),
           order=st.sampled_from(['NCHW', 'NHWC']),
           seed=st.integers(0, 1000))
    def test_leaky_relu_gradients(self, gc, dc, N, C, H, W, order, alpha,
                                  seed):
        np.random.seed(seed)

        op = self._get_op(device_option=gc, alpha=alpha, order=order)
        input_blobs = self._get_inputs(N, C, H, W, order)

        self.assertDeviceChecks(dc, op, input_blobs, [0])
        self.assertGradientChecks(gc, op, input_blobs, 0, [0])

    @given(gc=hu.gcs['gc'],
           dc=hu.gcs['dc'],
           N=st.integers(2, 10),
           C=st.integers(3, 10),
           H=st.integers(5, 10),
           W=st.integers(7, 10),
           alpha=st.floats(0, 1),
           seed=st.integers(0, 1000))
    def test_leaky_relu_layout(self, gc, dc, N, C, H, W, alpha, seed):
        outputs = {}
        for order in ('NCHW', 'NHWC'):
            np.random.seed(seed)
            input_blobs = self._get_inputs(N, C, H, W, order)
            self._feed_inputs(input_blobs, device_option=gc)
            op = self._get_op(device_option=gc, alpha=alpha, order=order)
            self.ws.run(op)
            outputs[order] = self.ws.blobs['output'].fetch()
        np.testing.assert_allclose(outputs['NCHW'],
                                   outputs['NHWC'].transpose((0, 3, 1, 2)),
                                   atol=1e-4,
                                   rtol=1e-4)

    @given(gc=hu.gcs['gc'],
           dc=hu.gcs['dc'],
           N=st.integers(2, 10),
           C=st.integers(3, 10),
           H=st.integers(5, 10),
           W=st.integers(7, 10),
           order=st.sampled_from(['NCHW', 'NHWC']),
           alpha=st.floats(0, 1),
           seed=st.integers(0, 1000),
           inplace=st.booleans())
    def test_leaky_relu_reference_check(self, gc, dc, N, C, H, W, order, alpha,
                                        seed, inplace):
        np.random.seed(seed)

        if order != "NCHW":
            assume(not inplace)

        inputs = self._get_inputs(N, C, H, W, order)
        op = self._get_op(device_option=gc,
                          alpha=alpha,
                          order=order,
                          inplace=inplace)

        def ref(input_blob):
            result = input_blob.copy()
            result[result < 0] *= alpha
            return result,

        self.assertReferenceChecks(gc, op, inputs, ref)

    @given(gc=hu.gcs['gc'],
           dc=hu.gcs['dc'],
           N=st.integers(2, 10),
           C=st.integers(3, 10),
           H=st.integers(5, 10),
           W=st.integers(7, 10),
           order=st.sampled_from(['NCHW', 'NHWC']),
           alpha=st.floats(0, 1),
           seed=st.integers(0, 1000))
    def test_leaky_relu_device_check(self, gc, dc, N, C, H, W, order, alpha,
                                     seed):
        np.random.seed(seed)

        inputs = self._get_inputs(N, C, H, W, order)
        op = self._get_op(device_option=gc, alpha=alpha, order=order)

        self.assertDeviceChecks(dc, op, inputs, [0])

    @given(N=st.integers(2, 10),
           C=st.integers(3, 10),
           H=st.integers(5, 10),
           W=st.integers(7, 10),
           order=st.sampled_from(['NCHW', 'NHWC']),
           alpha=st.floats(0, 1),
           seed=st.integers(0, 1000))
    def test_leaky_relu_model_helper_helper(self, N, C, H, W, order, alpha,
                                            seed):
        np.random.seed(seed)
        arg_scope = {'order': order}
        model = model_helper.ModelHelper(name="test_model",
                                         arg_scope=arg_scope)
        model.LeakyRelu('input', 'output', alpha=alpha)

        input_blob = np.random.rand(N, C, H, W).astype(np.float32)
        if order == 'NHWC':
            input_blob = np.transpose(input_blob, axes=(0, 2, 3, 1))

        self.ws.create_blob('input').feed(input_blob)

        self.ws.create_net(model.param_init_net).run()
        self.ws.create_net(model.net).run()

        output_blob = self.ws.blobs['output'].fetch()
        if order == 'NHWC':
            output_blob = np.transpose(output_blob, axes=(0, 3, 1, 2))

        assert output_blob.shape == (N, C, H, W)
class DNNLowPGatherOpTest(hu.HypothesisTestCase):
    @given(dim1=st.integers(256, 512),
           dim2=st.integers(32, 256),
           in_quantized=st.booleans(),
           out_quantized=st.booleans(),
           **hu.gcs_cpu_only)
    def test_dnnlowp_gather(self, dim1, dim2, in_quantized, out_quantized, gc,
                            dc):
        # FIXME : DNNLOWP Gather doesn't support quantized input and
        # dequantized output
        if in_quantized:
            out_quantized = True

        data = (np.random.rand(dim1) * 2 - 1).astype(np.float32)
        index = np.floor(np.random.rand(dim2) * dim1).astype(np.int32)

        Output = collections.namedtuple("Output", ["out", "op_type", "engine"])
        outputs = []

        op_engine_list = [
            ("Gather", ""),
            ("Gather", "DNNLOWP"),
            ("Int8Gather", "DNNLOWP"),
        ]

        for op_type, engine in op_engine_list:
            net = core.Net("test_net")

            do_quantize = "DNNLOWP" in engine and in_quantized
            do_dequantize = "DNNLOWP" in engine and out_quantized

            if do_quantize:
                quantize_data = core.CreateOperator("Quantize", ["data"],
                                                    ["data_q"],
                                                    engine=engine,
                                                    device_option=gc)
                net.Proto().op.extend([quantize_data])

            gather = core.CreateOperator(
                op_type,
                ["data_q" if do_quantize else "data", "index"],
                ["out_q" if do_dequantize else "out"],
                dequantize_output=not do_dequantize,
                engine=engine,
                device_option=gc,
            )
            net.Proto().op.extend([gather])

            if do_dequantize:
                dequantize = core.CreateOperator("Dequantize", ["out_q"],
                                                 ["out"],
                                                 engine=engine,
                                                 device_option=gc)
                net.Proto().op.extend([dequantize])

            self.ws.create_blob("data").feed(data, device_option=gc)
            self.ws.create_blob("index").feed(index, device_option=gc)
            self.ws.run(net)
            outputs.append(
                Output(out=self.ws.blobs["out"].fetch(),
                       op_type=op_type,
                       engine=engine))

        check_quantized_results_close(outputs, ref=data)
def test_can_override_label():
    d = ConjectureData.for_buffer(hbytes(2))
    d.draw(st.booleans(), label=7)
    d.freeze()
    assert any(ex.label == 7 for ex in d.examples)
    | commands(st.just('zincrby'), keys, scores, fields)
    | commands(st.sampled_from(['zrange', 'zrevrange']), keys, counts, counts,
               st.none() | st.just('withscores'))
    | commands(st.sampled_from(['zrangebyscore', 'zrevrangebyscore']),
               keys, score_tests, score_tests,
               limits,
               st.none() | st.just('withscores'))
    | commands(st.sampled_from(['zrank', 'zrevrank']), keys, fields)
    | commands(st.just('zrem'), keys, st.lists(fields))
    | commands(st.just('zremrangebyrank'), keys, counts, counts)
    | commands(st.just('zremrangebyscore'), keys, score_tests, score_tests)
    | commands(st.just('zscore'), keys, fields)
    | st.builds(build_zstore,
                command=st.sampled_from(['zunionstore', 'zinterstore']),
                dest=keys, sources=st.lists(st.tuples(keys, float_as_bytes)),
                weights=st.booleans(),
                aggregate=st.sampled_from([None, 'sum', 'min', 'max']))
)

zset_no_score_create_commands = (
    commands(st.just('zadd'), keys, st.lists(st.tuples(st.just(0), fields), min_size=1))
)
zset_no_score_commands = (
    # TODO: test incr
    commands(st.just('zadd'), keys, st.none() | st.just('nx'),
             st.none() | st.just('xx'), st.none() | st.just('ch'),
             st.lists(st.tuples(st.just(0), fields)))
    | commands(st.just('zlexcount'), keys, string_tests, string_tests)
    | commands(st.sampled_from(['zrangebylex', 'zrevrangebylex']),
               keys, string_tests, string_tests,
               limits)
Example #59
0
# These builtin and standard-library types have Hypothesis strategies,
# seem likely to appear in type annotations, or are otherwise notable.
#
# The strategies below must cover all possible values from the type, because
# many users treat them as comprehensive and one of Hypothesis' design goals
# is to avoid testing less than expected.
#
# As a general rule, we try to limit this to scalars because from_type()
# would have to decide on arbitrary collection elements, and we'd rather
# not (with typing module generic types and some builtins as exceptions).
_global_type_lookup: typing.Dict[type, typing.Union[
    st.SearchStrategy, typing.Callable[[type], st.SearchStrategy]]] = {
        type(None):
        st.none(),
        bool:
        st.booleans(),
        int:
        st.integers(),
        float:
        st.floats(),
        complex:
        st.complex_numbers(),
        fractions.Fraction:
        st.fractions(),
        decimal.Decimal:
        st.decimals(),
        str:
        st.text(),
        bytes:
        st.binary(),
        datetime.datetime:
Example #60
0
 def bool_strategy(cls, settings):
     return st.booleans()