예제 #1
0
def test_can_not_falsify_true_things(desc, random):
    assume(size(desc) <= MAX_SIZE)

    @given(desc, settings=settings, random=random)
    def test(x):
        pass
    test()
예제 #2
0
파일: test_banque.py 프로젝트: Bafou/SVL
    def test_le_solde_est_augmente(self, somme):
        compte = Compte()

        assume(somme > 0)
        
        compte.crediter(somme)
        self.assertEqual(compte.montant, somme)
예제 #3
0
def test__natsort_key_with_tuple_input_returns_nested_tuples(x):
    # Iterables are parsed recursively so you can sort lists of lists.
    assume(len(x) <= 10)
    assume(not any(type(y) == float and isnan(y) for y in x))
    s = ''.join(repr(y) if type(y) in (float, long, int) else y for y in x)
    t = tuple(_number_extracter(s, _int_nosign_re, *int_nosafe_nolocale_nogroup))
    assert _natsort_key((s, s), None, ns.I) == (t, t)
예제 #4
0
 def test_convolution_correctness(self, stride, pad, kernel, size,
                                  input_channels, output_channels,
                                  batch_size):
     assume(stride <= kernel)
     X = np.random.rand(
         batch_size, input_channels, size, size).astype(np.float32) - 0.5
     w = np.random.rand(
         output_channels, input_channels, kernel, kernel).astype(np.float32)\
         - 0.5
     b = np.random.rand(output_channels).astype(np.float32) - 0.5
     order = "NCHW"
     outputs = {}
     for engine in ["", "NNPACK"]:
         op = core.CreateOperator(
             "Conv",
             ["X", "w", "b"],
             ["Y"],
             stride=stride,
             kernel=kernel,
             pad=pad,
             order=order,
             kts="TUPLE",
             engine=engine,
         )
         self.ws.create_blob("X").feed(X)
         self.ws.create_blob("w").feed(w)
         self.ws.create_blob("b").feed(b)
         self.ws.run(op)
         outputs[engine] = self.ws.blobs["Y"].fetch()
     np.testing.assert_allclose(
         outputs[""],
         outputs["NNPACK"],
         atol=1e-4,
         rtol=1e-4)
예제 #5
0
def test__natsort_key_with_tuple_input_but_itemgetter_key_returns_split_second_element(x):
    # A key is applied before recursion, but not in the recursive calls.
    assume(len(x) <= 10)
    assume(not any(type(y) == float and isnan(y) for y in x))
    s = ''.join(repr(y) if type(y) in (float, long, int) else y for y in x)
    t = tuple(_number_extracter(s, _int_nosign_re, *int_nosafe_nolocale_nogroup))
    assert _natsort_key((s, s), itemgetter(1), ns.I) == t
 def float_range(self, left, right):
     for f in (math.isnan, math.isinf):
         for x in (left, right):
             assume(not f(x))
     left, right = sorted((left, right))
     assert left <= right
     return strategy(floats(left, right))
예제 #7
0
def test__natsort_key_with_float_and_signed_splits_input_into_string_and_signed_float_with_exponent(x):
    assume(len(x) <= 10)
    assume(not any(type(y) == float and isnan(y) for y in x))
    s = ''.join(repr(y) if type(y) in (float, long, int) else y for y in x)
    assert ns.F == ns.FLOAT
    assert ns.S == ns.SIGNED
    assert _natsort_key(s, None, ns.F | ns.S) == tuple(_number_extracter(s, _float_sign_exp_re, *float_nosafe_nolocale_nogroup))
예제 #8
0
    def test_pooling_separate_stride_pad(self, stride_h, stride_w,
                                         pad_t, pad_l, pad_b,
                                         pad_r, kernel, size,
                                         input_channels,
                                         batch_size, order,
                                         op_type,
                                         gc, dc):
        assume(np.max([pad_t, pad_l, pad_b, pad_r]) < kernel)

        op = core.CreateOperator(
            op_type,
            ["X"],
            ["Y"],
            stride_h=stride_h,
            stride_w=stride_w,
            pad_t=pad_t,
            pad_l=pad_l,
            pad_b=pad_b,
            pad_r=pad_r,
            kernel=kernel,
            order=order,
        )
        X = np.random.rand(
            batch_size, size, size, input_channels).astype(np.float32)

        if order == "NCHW":
            X = X.transpose((0, 3, 1, 2))
        self.assertDeviceChecks(dc, op, [X], [0])
        if 'MaxPool' not in op_type:
            self.assertGradientChecks(gc, op, [X], 0, [0])
예제 #9
0
 def test_can_get_multiple_models_with_unique_field(self, companies):
     assume(len(companies) > 1)
     for c in companies:
         self.assertIsNotNone(c.pk)
     self.assertEqual(
         len({c.pk for c in companies}), len({c.name for c in companies})
     )
예제 #10
0
파일: conv_test.py 프로젝트: caffe2/caffe2
    def test_convolution_gradients(self, stride, pad, kernel, dilation, size,
                                   input_channels, output_channels, batch_size,
                                   order, engine, gc, dc):
        assume(size >= dilation * (kernel - 1) + 1)
        assume("" == engine or 1 == dilation)

        op = core.CreateOperator(
            "Conv",
            ["X", "w", "b"],
            ["Y"],
            stride=stride,
            kernel=kernel,
            dilation=dilation,
            pad=pad,
            order=order,
            engine=engine,
        )
        X = np.random.rand(
            batch_size, size, size, input_channels).astype(np.float32) - 0.5
        w = np.random.rand(
            output_channels, kernel, kernel, input_channels).astype(np.float32)\
            - 0.5
        b = np.random.rand(output_channels).astype(np.float32) - 0.5
        if order == "NCHW":
            X = X.transpose((0, 3, 1, 2))
            w = w.transpose((0, 3, 1, 2))

        self.assertDeviceChecks(dc, op, [X, w, b], [0])
        for i in range(3):
            self.assertGradientChecks(gc, op, [X, w, b], i, [0])
예제 #11
0
    def test_pooling_3d(self, stride, pad, kernel, size, input_channels,
                        batch_size, order, op_type, engine, gc, dc):
        assume(pad < kernel)
        assume(size + pad + pad >= kernel)
        # some case here could be calculated with global pooling, but instead
        # calculated with general implementation, slower but should still
        # be corect.
        op = core.CreateOperator(
            op_type,
            ["X"],
            ["Y"],
            strides=[stride] * 3,
            kernels=[kernel] * 3,
            pads=[pad] * 6,
            order=order,
            engine=engine,
        )
        X = np.random.rand(
            batch_size, size, size, size, input_channels).astype(np.float32)
        if order == "NCHW":
            X = X.transpose((0, 4, 1, 2, 3))

        self.assertDeviceChecks(dc, op, [X], [0], threshold=0.001)
        if 'MaxPool' not in op_type:
            self.assertGradientChecks(gc, op, [X], 0, [0], threshold=0.001)
예제 #12
0
    def test_transpose(self, dtype, ndims, seed, null_axes, engine, gc, dc):
        if (gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN"):
            # cudnn 5.1 does not support int.
            assume(workspace.GetCuDNNVersion() >= 6000 or dtype != np.int32)

        dims = (np.random.rand(ndims) * 16 + 1).astype(np.int32)
        X = (np.random.rand(*dims) * 16).astype(dtype)

        if null_axes:
            axes = None
            op = core.CreateOperator(
                "Transpose",
                ["input"], ["output"],
                engine=engine)
        else:
            np.random.seed(int(seed))
            axes = [int(v) for v in list(np.random.permutation(X.ndim))]
            op = core.CreateOperator(
                "Transpose",
                ["input"], ["output"],
                axes=axes,
                engine=engine)

        def transpose_ref(x, axes):
            return (np.transpose(x, axes),)

        self.assertReferenceChecks(gc, op, [X, axes],
                                   transpose_ref)
예제 #13
0
    def test_dropout_ratio0(self, X, in_place, output_mask, engine, gc, dc):
        """Test with ratio=0 for a deterministic reference impl."""
        # TODO(lukeyeager): enable this path when the op is fixed
        if in_place:
            # Skip if trying in-place on GPU
            assume(gc.device_type != caffe2_pb2.CUDA)
            # If in-place on CPU, don't compare with GPU
            dc = dc[:1]
        is_test = not output_mask
        op = core.CreateOperator("Dropout", ["X"],
                                 ["X" if in_place else "Y"] +
                                 (["mask"] if output_mask else []),
                                 ratio=0.0, engine=engine,
                                 is_test=is_test)

        self.assertDeviceChecks(dc, op, [X], [0])
        if not is_test:
            self.assertGradientChecks(gc, op, [X], 0, [0])

        def reference_dropout_ratio0(x):
            return (x,) if is_test else (x, np.ones(x.shape, dtype=np.bool))
        self.assertReferenceChecks(
            gc, op, [X], reference_dropout_ratio0,
            # Don't check the mask with cuDNN because it's packed data
            outputs_to_check=None if engine != 'CUDNN' else [0])
예제 #14
0
def test_functiondef_annotated_simple_return(functiondef_node):
    """Test whether type annotations are set properly for a FunctionDef node representing a function definition
    with type annotations."""
    arg_names = [arg.name for arg in functiondef_node.args.args]
    assume(functiondef_node.name not in arg_names)
    for arg in functiondef_node.args.args:
        assume(arg_names.count(arg.name) == 1)
    module, inferer = cs._parse_text(functiondef_node)
    functiondef_node = next(module.nodes_of_class(astroid.FunctionDef))
    # arguments and annotations are not changing, so test this once.
    for i in range(len(functiondef_node.args.annotations)):
        arg_name = functiondef_node.args.args[i].name
        expected_type = inferer.type_constraints.resolve(functiondef_node.type_environment.lookup_in_env(arg_name)).getValue()
        # need to do by name because annotations must be name nodes.
        if isinstance(expected_type, _GenericAlias):
            assert _gorg(expected_type).__name__ == functiondef_node.args.annotations[i].name
        else:
            assert expected_type.__name__ == functiondef_node.args.annotations[i].name
    # test return type
    return_node = functiondef_node.body[0].value
    expected_rtype = inferer.type_constraints.resolve(functiondef_node.type_environment.lookup_in_env(return_node.name)).getValue()
    if isinstance(expected_rtype, _GenericAlias):
        assert _gorg(expected_rtype).__name__ == functiondef_node.returns.name
    else:
        assert expected_rtype.__name__ == functiondef_node.returns.name
예제 #15
0
        def test_id_wwn_with_extension(self, a_device):
            """
            Test that the ID_WWN_WITH_EXTENSION has a corresponding link.

            Assert that the device is a block device if it has an
            ID_WWN_WITH_EXTENSION property.

            Skip any multipathed paths, see:
            https://bugzilla.redhat.com/show_bug.cgi?id=1263441.
            """
            assume(not 'DM_MULTIPATH_DEVICE_PATH' in a_device)
            id_wwn = a_device['ID_WWN_WITH_EXTENSION']
            assert a_device.subsystem == u'block'

            id_path = '/dev/disk/by-id'
            link_name = "wwn-%s" % id_wwn
            match = next(
               (d for d in os.listdir(id_path) if d == link_name),
               None
            )
            assert match is not None

            link_path = os.path.join(id_path, match)
            link_target = os.readlink(link_path)
            target_path = os.path.normpath(os.path.join(id_path, link_target))
            assert target_path == a_device.device_node
예제 #16
0
def problem(draw):
    b = hbytes(draw(st.binary(min_size=1, max_size=8)))
    m = int_from_bytes(b) * 256
    assume(m > 0)
    marker = draw(st.binary(max_size=8))
    bound = draw(st.integers(0, m - 1))
    return (b, marker, bound)
예제 #17
0
 def test_timings(self, stride, pad, kernel, size,
                  input_channels, output_channels, batch_size):
     assume(stride <= kernel)
     X = np.random.rand(
         batch_size, input_channels, size, size).astype(np.float32) - 0.5
     w = np.random.rand(output_channels, input_channels,
                        kernel, kernel).astype(np.float32) - 0.5
     b = np.random.rand(output_channels).astype(np.float32) - 0.5
     order = "NCHW"
     times = {}
     for engine in ["", "NNPACK"]:
         net = core.Net(engine + "_test")
         net.Conv(
             ["X", "W", "b"], "Y",
             order=order,
             kernel=kernel,
             stride=stride,
             pad=pad,
             kts="TUPLE",
             engine=engine,
         )
         self.ws.create_blob("X").feed(X)
         self.ws.create_blob("W").feed(w)
         self.ws.create_blob("b").feed(b)
         self.ws.run(net)
         times[engine] = benchmark(self.ws, net)
     print("Speedup for NNPACK: {:.2f}".format(
         times[""] / times["NNPACK"]))
예제 #18
0
    def test_elementwise_sqrsum(self, n, dtype, gc, dc):
        if dtype == np.float16:
            # fp16 is only supported with CUDA
            assume(gc.device_type == caffe2_pb2.CUDA)
            dc = [d for d in dc if d.device_type == caffe2_pb2.CUDA]

        X = np.random.rand(n).astype(dtype)

        def sumsqr_op(X):
            return [np.sum(X * X)]

        op = core.CreateOperator(
            "SumSqrElements",
            ["X"],
            ["y"]
        )

        threshold = 0.01 if dtype == np.float16 else 0.005

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[X],
            reference=sumsqr_op,
            threshold=threshold,
        )
예제 #19
0
def test_write_count(data, length):
    """write throws errors when invalid count is given"""
    assume(not(0 <= length <= len(data)))

    with raises(WriteError):
        stream = BitStream()
        stream.write(data, uint8, length)
예제 #20
0
    def test_parse_stream(
        self, structure_and_messages1, structure_and_messages2, structure_and_messages3
    ):
        """
        L{Parser.parse_stream} returns an iterable of completed and then
        incompleted tasks.
        """
        _, messages1 = structure_and_messages1
        _, messages2 = structure_and_messages2
        _, messages3 = structure_and_messages3
        # Need at least one non-dropped message in partial tree:
        assume(len(messages3) > 1)
        # Need unique UUIDs per task:
        assume(
            len(set(m[0][TASK_UUID_FIELD] for m in (messages1, messages2, messages3)))
            == 3
        )

        # Two complete tasks, one incomplete task:
        all_messages = (messages1, messages2, messages3[:-1])

        all_tasks = list(
            Parser.parse_stream(
                [m for m in chain(*zip_longest(*all_messages)) if m is not None]
            )
        )
        assertCountEqual(
            self, all_tasks, [parse_to_task(msgs) for msgs in all_messages]
        )
예제 #21
0
파일: tokens_test.py 프로젝트: hypothesis/h
    def test_returns_none_for_malformed_header_fuzz(self, header, pyramid_request):
        assume(not header.startswith("Bearer "))
        pyramid_request.headers["Authorization"] = header

        result = tokens.auth_token(pyramid_request)

        assert result is None
예제 #22
0
 def test_returns_none_on_missing_creds(self, args):
     _, key, secret, _, _ = args
     assume(key is None or secret is None)
     with self.mock_auth(succeed=True):
         client = api.EPOClient(*args)
         token = client.authenticate()
         self.assertIsNone(token)
예제 #23
0
def test_group_ref_is_not_shared_between_identical_regex(data):
    pattern = re.compile(u"^(.+)\\1\\Z", re.UNICODE)
    x = data.draw(base_regex_strategy(pattern))
    y = data.draw(base_regex_strategy(pattern))
    assume(x != y)
    assert pattern.match(x).end() == len(x)
    assert pattern.match(y).end() == len(y)
예제 #24
0
 def inner(ex):
     # See https://github.com/python/typing/issues/177
     if sys.version_info[:2] >= (3, 6):
         assume(ex)
     assert isinstance(ex, type({}.items()))
     assert all(isinstance(elem, tuple) and len(elem) == 2 for elem in ex)
     assert all(all(isinstance(e, int) for e in elem) for elem in ex)
def test_floats_in_tiny_interval_within_bounds(data, center):
    assume(not (math.isinf(next_down(center)) or math.isinf(next_up(center))))
    lo = Decimal.from_float(next_down(center)).next_plus()
    hi = Decimal.from_float(next_up(center)).next_minus()
    assert float(lo) < lo < center < hi < float(hi)
    val = data.draw(st.floats(lo, hi))
    assert lo < val < hi
예제 #26
0
 def _dtype_conversion(x, dtype, gc, dc):
     """SequenceMask only supports fp16 with CUDA."""
     if dtype == np.float16:
         assume(gc.device_type == caffe2_pb2.CUDA)
         dc = [d for d in dc if d.device_type == caffe2_pb2.CUDA]
         x = x.astype(dtype)
     return x, dc
def test_on_offset_implementations(dt, offset):
    assume(not offset.normalize)
    # check that the class-specific implementations of onOffset match
    # the general case definition:
    #   (dt + offset) - offset == dt
    compare = (dt + offset) - offset
    assert offset.onOffset(dt) == (compare == dt)
예제 #28
0
def test_fido_indexing(queries):
    query1, query2 = queries

    # This is a work around for an aberration where the filter was not catching
    # this.
    assume(query1.attrs[1].start != query2.attrs[1].start)

    res = Fido.search(query1 | query2)

    assert len(res) == 2
    assert len(res[0]) == 1
    assert len(res[1]) == 1

    aa = res[0, 0]
    assert isinstance(aa, UnifiedResponse)
    assert len(aa) == 1
    assert len(aa.get_response(0)) == 1

    aa = res[:, 0]
    assert isinstance(aa, UnifiedResponse)
    assert len(aa) == 2
    assert len(aa.get_response(0)) == 1

    aa = res[0, :]
    assert isinstance(aa, UnifiedResponse)
    assert len(aa) == 1

    with pytest.raises(IndexError):
        res[0, 0, 0]

    with pytest.raises(IndexError):
        res["saldkal"]

    with pytest.raises(IndexError):
        res[1.0132]
    def test_jitter(self, step, jitter):
        """
        When ``jitter`` is specified, the values will all be within +/-
        ``jitter``.
        """
        jitter_values = backoff(
            step=step,
            jitter=jitter,
            maximum_step=None,
            timeout=None,
        )

        non_jitter_values = backoff(
            step=step,
            jitter=None,
            maximum_step=None,
            timeout=None,
        )

        some_values = islice(
            izip(jitter_values, non_jitter_values),
            0, 10
        )
        for x, y in some_values:
            assume(float('inf') not in (x, y))
            difference = abs(x - y)
            self.assertLessEqual(
                difference,
                jitter * 2,
                "x: {!r}, y: {!r}".format(x, y)
            )
예제 #30
0
    def test_sparse_momentum_sgd(
        self, inputs, momentum, nesterov, lr, data_strategy, gc, dc
    ):
        w, grad, m = inputs

        # Create an indexing array containing values which index into grad
        indices = data_strategy.draw(
            hu.tensor(
                max_dim=1,
                min_value=1,
                max_value=grad.shape[0],
                dtype=np.int64,
                elements=st.sampled_from(np.arange(grad.shape[0])),
            ),
        )

        # Verify that the generated indices are unique
        hypothesis.assume(
            np.array_equal(
                np.unique(indices.flatten()),
                np.sort(indices.flatten())))

        # Sparsify grad
        grad = grad[indices]

        # Make momentum >= 0
        m = np.abs(m)

        # Convert lr to a numpy array
        lr = np.asarray([lr], dtype=np.float32)

        op = core.CreateOperator(
            "SparseMomentumSGDUpdate", ["grad", "m", "lr", "param", "indices"],
            ["adjusted_grad", "m", "param"],
            momentum=momentum,
            nesterov=int(nesterov),
            device_option=gc
        )

        # Reference
        def momentum_sgd(grad, m, lr):
            lr = lr[0]
            if not nesterov:
                adjusted_gradient = lr * grad + momentum * m
                return (adjusted_gradient, adjusted_gradient)
            else:
                m_new = momentum * m + lr * grad
                return ((1 + momentum) * m_new - momentum * m, m_new)

        def sparse(grad, m, lr, param, i):
            grad_new, m_new = momentum_sgd(grad, m[i], lr)
            m[i] = m_new
            param[i] -= grad_new
            return (grad_new, m, param)

        self.assertReferenceChecks(
            gc,
            op,
            [grad, m, lr, w, indices],
            sparse)
예제 #31
0
def test_can_generate_non_utc():
    times(timezones=timezones()).filter(
        lambda d: assume(d.tzinfo) and d.tzinfo.zone != "UTC"
    ).validate()
예제 #32
0
def test_surface_set_device_offset(x_offset, y_offset):
    assume(not any(math.isnan(v) for v in [x_offset, y_offset]))

    surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 10, 10)
    surface.set_device_offset(x_offset, y_offset)
예제 #33
0
def assume_not_overflowing(tensor, qparams):
    min_value, max_value = _get_valid_min_max(qparams)
    assume(tensor.min() >= min_value)
    assume(tensor.max() <= max_value)
    return True
예제 #34
0
    def _test_conv_api_impl(
        self, module_name, qconv_module, conv_module, batch_size,
        in_channels_per_group, input_feature_map_size, out_channels_per_group,
        groups, kernel_size, stride, padding, dilation, X_scale, X_zero_point,
        W_scale, W_zero_point, Y_scale, Y_zero_point, use_bias, use_fused,
        use_channelwise,
    ):
        for i in range(len(kernel_size)):
            assume(input_feature_map_size[i] + 2 * padding[i]
                   >= dilation[i] * (kernel_size[i] - 1) + 1)

        in_channels = in_channels_per_group * groups
        out_channels = out_channels_per_group * groups
        (X, X_q, W, W_q, b) = _make_conv_test_input(
            batch_size, in_channels_per_group, input_feature_map_size,
            out_channels_per_group, groups, kernel_size, X_scale, X_zero_point,
            W_scale, W_zero_point, use_bias, use_channelwise)

        qconv_module.set_weight_bias(W_q, b)
        qconv_module.scale = Y_scale
        qconv_module.zero_point = Y_zero_point

        if use_fused:
            conv_module[0].weight.data = W
            if use_bias:
                conv_module[0].bias.data = b
        else:
            conv_module.weight.data = W
            if use_bias:
                conv_module.bias.data = b

        # Test members
        self.assertTrue(module_name in str(qconv_module))
        self.assertTrue(hasattr(qconv_module, '_packed_params'))
        self.assertTrue(hasattr(qconv_module, 'scale'))
        self.assertTrue(hasattr(qconv_module, 'zero_point'))

        # Test properties
        self.assertEqual(W_q, qconv_module.weight())
        if use_bias:
            self.assertEqual(b, qconv_module.bias())
        self.assertEqual(Y_scale, qconv_module.scale)
        self.assertEqual(Y_zero_point, qconv_module.zero_point)

        # Test forward
        Y_exp = conv_module(X)
        Y_exp = torch.quantize_per_tensor(
            Y_exp, scale=Y_scale, zero_point=Y_zero_point, dtype=torch.quint8)
        Y_act = qconv_module(X_q)

        # Make sure the results match
        # assert_array_almost_equal compares using the following formula:
        #     abs(desired-actual) < 1.5 * 10**(-decimal)
        # (https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html)
        # We use decimal = 0 to ignore off-by-1 differences between reference
        # and test. Off-by-1 differences arise due to the order of round and
        # zero_point addition operation, i.e., if addition followed by round is
        # used by reference and round followed by addition is used by test, the
        # results may differ by 1.
        # For example, the result of round(2.5) + 1 is 3 while round(2.5 + 1) is
        # 4 assuming the rounding mode is round-to-nearest, ties-to-even.
        np.testing.assert_array_almost_equal(
            Y_exp.int_repr().numpy(), Y_act.int_repr().numpy(), decimal=0)

        # Test serialization of quantized Conv Module using state_dict
        model_dict = qconv_module.state_dict()
        self.assertEqual(model_dict['weight'], W_q)
        if use_bias:
            self.assertEqual(model_dict['bias'], b)
        bytes_io = io.BytesIO()
        torch.save(model_dict, bytes_io)
        bytes_io.seek(0)
        loaded_dict = torch.load(bytes_io)
        for key in loaded_dict:
            self.assertEqual(model_dict[key], loaded_dict[key])
        loaded_qconv_module = type(qconv_module)(
            in_channels, out_channels, kernel_size, stride, padding, dilation,
            groups, use_bias, padding_mode="zeros")
        loaded_qconv_module.load_state_dict(loaded_dict)

        self.assertTrue(dir(loaded_qconv_module) == dir(qconv_module))
        self.assertTrue(module_name in str(loaded_qconv_module))
        self.assertTrue(hasattr(loaded_qconv_module, '_packed_params'))
        self.assertTrue(hasattr(loaded_qconv_module, '_weight_bias'))

        self.assertEqual(qconv_module.weight(), loaded_qconv_module.weight())
        if use_bias:
            self.assertEqual(qconv_module.bias(), loaded_qconv_module.bias())
        self.assertEqual(qconv_module.scale, loaded_qconv_module.scale)
        self.assertEqual(qconv_module.zero_point,
                         loaded_qconv_module.zero_point)
        Y_loaded = loaded_qconv_module(X_q)
        np.testing.assert_array_almost_equal(
            Y_exp.int_repr().numpy(), Y_loaded.int_repr().numpy(), decimal=0)

        # The below check is meant to ensure that `torch.save` and `torch.load`
        # serialization works, however it is currently broken by the following:
        # https://github.com/pytorch/pytorch/issues/24045
        #
        # Instead, we currently check that the proper exception is thrown on
        # save.
        # <start code>
        # b = io.BytesIO()
        # torch.save(conv_under_test, b)
        # b.seek(0)
        # loaded_conv = torch.load(b)
        #
        # self.assertEqual(loaded_qconv_module.bias(), qconv_module.bias())
        # self.assertEqual(loaded_qconv_module.scale, qconv_module.scale)
        # self.assertEqual(loaded_qconv_module.zero_point,
        #                  qconv_module.zero_point)
        # <end code>
        with self.assertRaisesRegex(
            RuntimeError, r'torch.save\(\) is not currently supported'
        ):
            bytes_io = io.BytesIO()
            torch.save(qconv_module, bytes_io)

        # JIT testing
        self.checkScriptable(
            qconv_module, list(zip([X_q], [Y_exp])),
            check_save_load=True)

        # Test from_float
        conv_module.qconfig = torch.quantization.default_qconfig
        torch.quantization.prepare(conv_module, inplace=True)
        conv_module(X.float())
        converted_qconv_module = torch.nn.Sequential(conv_module)
        torch.quantization.convert(converted_qconv_module, inplace=True)

        # Smoke test to make sure the module actually runs
        if use_bias:
            if use_fused:
                self.assertEqual(conv_module[0].bias,
                                 converted_qconv_module[0].bias())
            else:
                self.assertEqual(conv_module.bias,
                                 converted_qconv_module[0].bias())
        # Smoke test extra_repr
        self.assertTrue(module_name in str(converted_qconv_module))
예제 #35
0
def broadcasting_params(draw,
                        batch_shape,
                        params_event_ndims,
                        event_dim=None,
                        enable_vars=False,
                        constraint_fn_for=lambda param: identity_fn,
                        mutex_params=(),
                        dtype=np.float32):
    """Streategy for drawing parameters which jointly have the given batch shape.

  Specifically, the batch shapes of the returned parameters will broadcast to
  the requested batch shape.

  The dtypes of the returned parameters are determined by their respective
  constraint functions.

  Args:
    draw: Hypothesis strategy sampler supplied by `@hps.composite`.
    batch_shape: A `TensorShape`.  The returned parameters' batch shapes will
      broadcast to this.
    params_event_ndims: Python `dict` mapping the name of each parameter to a
      Python `int` giving the event ndims for that parameter.
    event_dim: Optional Python int giving the size of each parameter's event
      dimensions (except where overridden by any applicable constraint
      functions).  This is shared across all parameters, permitting square event
      matrices, compatible location and scale Tensors, etc. If omitted,
      Hypothesis will choose one.
    enable_vars: TODO(bjp): Make this `True` all the time and put variable
      initialization in slicing_test. If `False`, the returned parameters are
      all `tf.Tensor`s and not {`tf.Variable`, `tfp.util.DeferredTensor`
      `tfp.util.TransformedVariable`}.
    constraint_fn_for: Python callable mapping parameter name to constraint
      function.  The latter is itself a Python callable which converts an
      unconstrained Tensor (currently with float32 values from -200 to +200)
      into one that meets the parameter's validity constraints.
    mutex_params: Python iterable of Python sets.  Each set gives a clique of
      mutually exclusive parameters (e.g., the 'probs' and 'logits' of a
      Categorical).  At most one parameter from each set will appear in the
      result.
    dtype: Dtype for generated parameters.

  Returns:
    params: A Hypothesis strategy for drawing Python `dict`s mapping parameter
      name to a `tf.Tensor`, `tf.Variable`, `tfp.util.DeferredTensor`, or
      `tfp.util.TransformedVariable`.  The batch shapes of the returned
      parameters broadcast together to the supplied `batch_shape`.  Only
      parameters whose names appear as keys in `params_event_ndims` will appear
      (but possibly not all of them, depending on `mutex_params`).
  """
    if event_dim is None:
        event_dim = draw(hps.integers(min_value=2, max_value=6))

    params_event_ndims = params_event_ndims or {}
    remaining_params = set(params_event_ndims.keys())
    params_to_use = []
    while remaining_params:
        param = draw(hps.sampled_from(sorted(remaining_params)))
        params_to_use.append(param)
        remaining_params.remove(param)
        for mutex_set in mutex_params:
            if param in mutex_set:
                remaining_params -= mutex_set

    param_batch_shapes = draw(
        broadcasting_named_shapes(batch_shape, params_to_use))
    params_kwargs = dict()
    for param in params_to_use:
        param_batch_shape = param_batch_shapes[param]
        param_event_rank = params_event_ndims[param]
        param_shape = (tensorshape_util.as_list(param_batch_shape) +
                       [event_dim] * param_event_rank)

        # Reduce our risk of exceeding TF kernel broadcast limits.
        hp.assume(len(param_shape) < 6)

        # TODO(axch): Can I replace `params_event_ndims` and `constraint_fn_for`
        # with a map from params to `Suppport`s, and use `tensors_in_support` here
        # instead of this explicit `constrained_tensors` function?
        param_strategy = constrained_tensors(constraint_fn_for(param),
                                             param_shape,
                                             dtype=dtype)
        params_kwargs[param] = draw(
            maybe_variable(param_strategy,
                           enable_vars=enable_vars,
                           dtype=dtype,
                           name=param))
    return params_kwargs
예제 #36
0
def test_unique_array_without_fill(arr):
    # This test covers the collision-related branchs for fully dense unique arrays.
    # Choosing 25 of 256 possible elements means we're almost certain to see colisions
    # thanks to the 'birthday paradox', but finding unique elemennts is still easy.
    assume(len(set(arr)) == arr.size)
예제 #37
0
 def build_complex(draw):
     value = draw(strategy)
     hypothesis.assume(min_value <= value)
     if max_value is not None:
         hypothesis.assume(max_value >= value)
     return value
예제 #38
0
    def test_1x1_conv(
        self,
        op_type,
        N,
        G,
        DX,
        DY,
        H,
        W,
        use_bias,
        order,
        force_algo_fwd,
        force_algo_dgrad,
        force_algo_wgrad,
        gc,
        dc,
    ):
        if hiputl.run_in_hip(gc, dc):
            assume(order == "NCHW")
        if order == "NHWC":
            G = 1

        C = G * DX
        M = G * DY

        op = core.CreateOperator(
            op_type,
            ["X", "filter", "bias"] if use_bias else ["X", "filter"],
            ["Y"],
            stride_h=1,
            stride_w=1,
            pad_t=0,
            pad_l=0,
            pad_b=0,
            pad_r=0,
            kernel=1,
            order=order,
            group=G,
            force_algo_fwd=force_algo_fwd,
            force_algo_dgrad=force_algo_dgrad,
            force_algo_wgrad=force_algo_wgrad,
        )

        if order == "NCHW":
            X = np.random.randn(N, C, H, W).astype(np.float32)
            filter = np.random.randn(M, DX, 1, 1).astype(np.float32)
        else:
            X = np.random.randn(N, H, W, C).astype(np.float32)
            filter = np.random.randn(M, 1, 1, DX).astype(np.float32)
        bias = np.random.randn(M).astype(np.float32)
        inputs = [X, filter, bias] if use_bias else [X, filter]

        def conv_1x1_nchw_ref(X, filter, bias=None):
            if N == 0:
                Y = np.zeros(shape=(N, M, H, W), dtype=np.float32)
                return [Y]

            X = X.reshape(N, G, DX, -1)
            filter = filter.reshape(G, DY, DX)
            Y = np.zeros(shape=(N, G, DY, H * W), dtype=np.float32)
            for i in range(N):
                for j in range(G):
                    Y[i, j, :, :] = np.dot(filter[j, :, :], X[i, j, :, :])
            Y = Y.reshape(N, M, H, W)
            if bias is not None:
                bias = bias.reshape(1, M, 1, 1)
                Y = np.add(Y, bias)
            return [Y]

        def conv_1x1_nhwc_ref(X, filter, bias=None):
            if N == 0:
                Y = np.zeros(shape=(N, H, W, M), dtype=np.float32)
                return [Y]

            X = X.reshape(N, -1, G, DX)
            filter = filter.reshape(G, DY, DX)
            Y = np.zeros(shape=(N, H * W, G, DY), dtype=np.float32)
            for i in range(N):
                for j in range(G):
                    Y[i, :, j, :] = np.dot(X[i, :, j, :],
                                           filter[j, :, :].transpose())
            Y = Y.reshape(N, H, W, M)
            if bias is not None:
                bias = bias.reshape(1, 1, 1, M)
                Y = np.add(Y, bias)
            return [Y]

        if order == "NCHW":
            conv_1x1_ref = conv_1x1_nchw_ref
        else:
            conv_1x1_ref = conv_1x1_nhwc_ref
        self.assertReferenceChecks(device_option=gc,
                                   op=op,
                                   inputs=inputs,
                                   reference=conv_1x1_ref)
        self.assertDeviceChecks(dc, op, inputs, [0])
        for i in range(len(inputs)):
            self.assertGradientChecks(gc, op, inputs, i, [0])
예제 #39
0
def draw_ordered_with_assume(draw):
    x = draw(st.floats())
    y = draw(st.floats())
    assume(x < y)
    return (x, y)
예제 #40
0
    def test_convolution_layout(
        self,
        op_type,
        stride,
        pad,
        kernel,
        dilation,
        size,
        input_channels,
        output_channels,
        batch_size,
        use_bias,
        gc,
        dc,
    ):
        assume(size >= dilation * (kernel - 1) + 1)

        X = (np.random.rand(batch_size, size, size, input_channels).astype(
            np.float32) - 0.5)
        w = (np.random.rand(output_channels, kernel, kernel,
                            input_channels).astype(np.float32) - 0.5)
        b = np.random.rand(output_channels).astype(np.float32) - 0.5
        Output = collections.namedtuple("Output", ["Y", "engine", "order"])
        outputs = []

        for order in ["NCHW", "NHWC"]:
            engine_list = [""]
            if hiputl.run_in_hip(gc, dc):
                if order == "NCHW":
                    engine_list.append("MIOPEN")
            else:
                if _cudnn_supports(dilation=(dilation > 1),
                                   nhwc=(order == "NHWC")):
                    engine_list.append("CUDNN")

            for engine in engine_list:
                op = core.CreateOperator(
                    op_type,
                    ["X", "w", "b"] if use_bias else ["X", "w"],
                    ["Y"],
                    stride=stride,
                    kernel=kernel,
                    dilation=dilation,
                    pad=pad,
                    order=order,
                    engine=engine,
                    device_option=gc,
                    exhaustive_search=True,
                )
                if order == "NCHW":
                    X_f = utils.NHWC2NCHW(X)
                    w_f = utils.NHWC2NCHW(w)
                else:
                    X_f = X
                    w_f = w
                self.assertDeviceChecks(
                    dc, op, [X_f, w_f, b] if use_bias else [X_f, w_f], [0])
                self.ws.create_blob("X").feed(X_f, device_option=gc)
                self.ws.create_blob("w").feed(w_f, device_option=gc)
                self.ws.create_blob("b").feed(b, device_option=gc)
                self.ws.run(op)
                outputs.append(
                    Output(Y=self.ws.blobs["Y"].fetch(),
                           engine=engine,
                           order=order))

        def canonical(o):
            if o.order == "NHWC":
                return utils.NHWC2NCHW(o.Y)
            else:
                return o.Y

        for o in outputs:
            np.testing.assert_allclose(canonical(outputs[0]),
                                       canonical(o),
                                       atol=1e-4,
                                       rtol=1e-4)
예제 #41
0
def test_unique_array_with_fill_can_use_all_elements(arr):
    assume(len(set(arr)) == arr.size)
예제 #42
0
    def test_convolution_gradients(
        self,
        op_type,
        stride,
        pad,
        kernel,
        dilation,
        size,
        input_channels,
        output_channels,
        batch_size,
        group,
        order,
        engine,
        use_bias,
        force_algo_fwd,
        force_algo_dgrad,
        force_algo_wgrad,
        gc,
        dc,
    ):
        # TODO: Group conv in NHWC not implemented for GPU yet.
        assume(group == 1
               or (order == "NCHW" or gc.device_type == caffe2_pb2.CPU)
               and engine != "MKLDNN")
        if group != 1 and order == "NHWC":
            dc = [d for d in dc if d.device_type == caffe2_pb2.CPU]

        input_channels *= group
        output_channels *= group
        dkernel = dilation * (kernel - 1) + 1

        if engine == "CUDNN":
            if hiputl.run_in_hip(gc, dc):
                assume((order == "NCHW") and not (dilation > 1 and group > 1))
            else:
                assume(
                    _cudnn_supports(dilation=(dilation > 1),
                                    nhwc=(order == "NHWC"),
                                    backward=True))

        assume(engine != "MKLDNN" or use_bias is True)

        op = core.CreateOperator(
            op_type,
            ["X", "w", "b"] if use_bias else ["X", "w"],
            ["Y"],
            stride=stride,
            kernel=kernel,
            dilation=dilation,
            pad=pad,
            group=group,
            order=order,
            engine=engine,
            force_algo_fwd=force_algo_fwd,
            force_algo_dgrad=force_algo_dgrad,
            force_algo_wgrad=force_algo_wgrad,
        )
        X = (np.random.rand(batch_size, size, size, input_channels).astype(
            np.float32) - 0.5)
        w = (np.random.rand(output_channels, kernel, kernel,
                            int(input_channels / group)).astype(np.float32) -
             0.5)
        b = np.random.rand(output_channels).astype(np.float32) - 0.5
        if order == "NCHW":
            X = utils.NHWC2NCHW(X)
            w = utils.NHWC2NCHW(w)

        inputs = [X, w, b] if use_bias else [X, w]
        # Error handling path.
        if size + pad + pad < dkernel or size + pad + pad < dkernel:
            with self.assertRaises(RuntimeError):
                self.assertDeviceChecks(dc, op, inputs, [0])
            return

        try:
            self.assertDeviceChecks(dc, op, inputs, [0])
        except RuntimeError as e:
            es = str(e)
            # CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM should always have
            # implementation
            if ("status == CUDNN_STATUS_SUCCESS" not in es
                    or "CUDNN_STATUS_NOT_SUPPORTED" not in es
                    or force_algo_fwd == 0):
                raise e

        for i in range(len(inputs)):
            try:
                self.assertGradientChecks(gc, op, inputs, i, [0])
            except RuntimeError as e:
                es = str(e)
                if ("status == CUDNN_STATUS_SUCCESS" not in es
                        or "CUDNN_STATUS_NOT_SUPPORTED" not in es):
                    raise e
예제 #43
0
    def test_convolution_separate_stride_pad_gradients(
        self,
        op_type,
        stride_h,
        stride_w,
        pad_t,
        pad_l,
        pad_b,
        pad_r,
        kernel,
        size,
        input_channels,
        output_channels,
        batch_size,
        group,
        order,
        engine,
        shared_buffer,
        use_bias,
        gc,
        dc,
    ):
        # TODO: Group conv in NHWC not implemented for GPU yet.
        assume(group == 1 or order == "NCHW"
               or gc.device_type == caffe2_pb2.CPU)
        if group != 1 and order == "NHWC":
            dc = [d for d in dc if d.device_type == caffe2_pb2.CPU]
        # Group conv not implemented with EIGEN engine.
        assume(group == 1 or engine != "EIGEN")

        input_channels *= group
        output_channels *= group

        op = core.CreateOperator(
            op_type,
            ["X", "w", "b"] if use_bias else ["X", "w"],
            ["Y"],
            stride_h=stride_h,
            stride_w=stride_w,
            pad_t=pad_t,
            pad_l=pad_l,
            pad_b=pad_b,
            pad_r=pad_r,
            kernel=kernel,
            group=group,
            order=order,
            engine=engine,
            shared_buffer=int(shared_buffer),
        )
        X = (np.random.rand(batch_size, size, size, input_channels).astype(
            np.float32) - 0.5)
        w = (np.random.rand(output_channels, kernel, kernel,
                            int(input_channels / group)).astype(np.float32) -
             0.5)
        b = np.random.rand(output_channels).astype(np.float32) - 0.5
        if order == "NCHW":
            X = utils.NHWC2NCHW(X)
            w = utils.NHWC2NCHW(w)

        inputs = [X, w, b] if use_bias else [X, w]

        # Error handling path.
        if size + pad_r + pad_l < kernel or size + pad_t + pad_b < kernel:
            with self.assertRaises(RuntimeError):
                self.assertDeviceChecks(dc, op, inputs, [0])
            return

        self.assertDeviceChecks(dc, op, inputs, [0])
        for i in range(len(inputs)):
            self.assertGradientChecks(gc, op, inputs, i, [0])
예제 #44
0
 def test_close_with_text(self, text):
     assume(len(text) > 0)
     response = WebSocketConnection().send({'close': True, 'text': text})
     self.assert_websocket_upgrade(response,
                                   text.encode('ascii'),
                                   expect_close=True)
예제 #45
0
    def _nd_convolution(
        self,
        n,
        input_channels_per_group,
        output_channels_per_group,
        batch_size,
        stride,
        size,
        kernel,
        dilation,
        pad,
        group,
        order,
        use_bias,
        engine,
        force_algo_fwd,
        force_algo_dgrad,
        force_algo_wgrad,
        gc,
        dc,
    ):
        # TODO: Group conv in NHWC not implemented for GPU yet.
        # TODO: Group 1D conv in NCHW not implemented for GPU yet.
        assume(group == 1 or (n != 1 and order == "NCHW")
               or gc.device_type == caffe2_pb2.CPU)
        if group != 1 and (n == 1 or order == "NHWC"):
            dc = [d for d in dc if d.device_type == caffe2_pb2.CPU]

        input_channels = group * input_channels_per_group
        output_channels = group * output_channels_per_group

        dkernel = dilation * (kernel - 1) + 1
        for op_type in ["Conv", "Conv" + str(n) + "D"]:
            op = core.CreateOperator(
                op_type,
                ["X", "w", "b"] if use_bias else ["X", "w"],
                ["Y"],
                strides=[stride] * n,
                kernels=[kernel] * n,
                dilations=[dilation] * n,
                pads=[pad] * n * 2,
                group=group,
                order=order,
                engine=engine,
                force_algo_fwd=force_algo_fwd,
                force_algo_dgrad=force_algo_dgrad,
                force_algo_wgrad=force_algo_wgrad,
            )

            input_dims = [batch_size, input_channels]
            input_dims.extend([size] * n)
            filter_dims = [output_channels, input_channels // group]
            filter_dims.extend([kernel] * n)

            X = np.random.rand(*input_dims).astype(np.float32) - 0.5
            w = np.random.rand(*filter_dims).astype(np.float32) - 0.5
            b = np.random.rand(output_channels).astype(np.float32) - 0.5
            if order == "NHWC":
                X = utils.NCHW2NHWC(X)
                w = utils.NCHW2NHWC(w)

            inputs = [X, w, b] if use_bias else [X, w]

            if size + pad + pad < dkernel or size + pad + pad < dkernel:
                with self.assertRaises(RuntimeError):
                    self.assertDeviceChecks(dc, op, inputs, [0])
                return

            self.assertDeviceChecks(dc, op, inputs, [0])
            for i in range(len(inputs)):
                self.assertGradientChecks(gc, op, inputs, i, [0])
예제 #46
0
 def test_just_text(self, text):
     assume(len(text) > 0)
     # If content is sent, accept=True is implied.
     response = WebSocketConnection().send({'text': text})
     self.assert_websocket_upgrade(response, text.encode('ascii'))
예제 #47
0
 def test_close_with_data(self, data):
     assume(len(data) > 0)
     response = WebSocketConnection().send({'close': True, 'bytes': data})
     self.assert_websocket_upgrade(response, data, expect_close=True)
    def _test_slicing(self, data, dist_name, dist):
        strm = test_util.test_seed_stream()
        batch_shape = dist.batch_shape
        slices = data.draw(dhps.valid_slices(batch_shape))
        slice_str = 'dist[{}]'.format(', '.join(dhps.stringify_slices(slices)))
        # Make sure the slice string appears in Hypothesis' attempted example log
        hp.note('Using slice ' + slice_str)
        if not slices:  # Nothing further to check.
            return
        sliced_zeros = np.zeros(batch_shape)[slices]
        sliced_dist = dist[slices]
        hp.note('Using sliced distribution {}.'.format(sliced_dist))

        # Check that slicing modifies batch shape as expected.
        self.assertAllEqual(sliced_zeros.shape, sliced_dist.batch_shape)

        if not sliced_zeros.size:
            # TODO(b/128924708): Fix distributions that fail on degenerate empty
            #     shapes, e.g. Multinomial, DirichletMultinomial, ...
            return

        # Check that sampling of sliced distributions executes.
        with tfp_hps.no_tf_rank_errors():
            samples = self.evaluate(dist.sample(seed=strm()))
            sliced_dist_samples = self.evaluate(
                sliced_dist.sample(seed=strm()))

        # Come up with the slices for samples (which must also include event dims).
        sample_slices = (tuple(slices) if isinstance(
            slices, collections.Sequence) else (slices, ))
        if Ellipsis not in sample_slices:
            sample_slices += (Ellipsis, )
        sample_slices += tuple([slice(None)] *
                               tensorshape_util.rank(dist.event_shape))

        sliced_samples = samples[sample_slices]

        # Report sub-sliced samples (on which we compare log_prob) to hypothesis.
        hp.note('Sample(s) for testing log_prob ' + str(sliced_samples))

        # Check that sampling a sliced distribution produces the same shape as
        # slicing the samples from the original.
        self.assertAllEqual(sliced_samples.shape, sliced_dist_samples.shape)

        # Check that a sliced distribution can compute the log_prob of its own
        # samples (up to numerical validation errors).
        with tfp_hps.no_tf_rank_errors():
            try:
                lp = self.evaluate(dist.log_prob(samples))
            except tf.errors.InvalidArgumentError:
                # TODO(b/129271256): d.log_prob(d.sample()) should not fail
                #     validate_args checks.
                # We only tolerate this case for the non-sliced dist.
                return
            sliced_lp = self.evaluate(sliced_dist.log_prob(sliced_samples))

        # Check that the sliced dist's log_prob agrees with slicing the original's
        # log_prob.

        # This `hp.assume` is suppressing array sizes that cause the sliced and
        # non-sliced distribution to follow different Eigen code paths.  Those
        # different code paths lead to arbitrarily large variations in the results
        # at parameter settings that Hypothesis is all too good at finding.  Since
        # the purpose of this test is just to check that we got slicing right, those
        # discrepancies are a distraction.
        # TODO(b/140229057): Remove this `hp.assume`, if and when Eigen's numerics
        # become index-independent.
        all_packetized = (_all_packetized(dist)
                          and _all_packetized(sliced_dist)
                          and _all_packetized(samples)
                          and _all_packetized(sliced_samples))
        hp.note('Packetization check {}'.format(all_packetized))
        all_non_packetized = (_all_non_packetized(dist)
                              and _all_non_packetized(sliced_dist)
                              and _all_non_packetized(samples)
                              and _all_non_packetized(sliced_samples))
        hp.note('Non-packetization check {}'.format(all_non_packetized))
        hp.assume(all_packetized or all_non_packetized)

        self.assertAllClose(lp[slices],
                            sliced_lp,
                            atol=SLICING_LOGPROB_ATOL[dist_name],
                            rtol=SLICING_LOGPROB_RTOL[dist_name])
예제 #49
0
 def test_just_bytes(self, data):
     assume(len(data) > 0)
     # If content is sent, accept=True is implied.
     response = WebSocketConnection().send({'bytes': data})
     self.assert_websocket_upgrade(response, data)