コード例 #1
0
ファイル: test_conv.py プロジェクト: samaocarpenter/MyGrad
def test_input_validation(
    shapes: st.SearchStrategy[Tuple[Tuple[int, ...], Tuple[int, ...]]],
    data: st.DataObject,
):
    x_shape, k_shape = data.draw(shapes, label="x_shape, k_shape")
    x = mg.zeros(x_shape, dtype="float")
    k = mg.zeros(k_shape, dtype="float")

    with raises(ValueError):
        conv_nd(x, k, stride=1)
コード例 #2
0
ファイル: test_conv.py プロジェクト: samaocarpenter/MyGrad
def test_padding(ndim: int, data: st.DataObject):
    """Ensure that convolving a padding-only image with a commensurate kernel yields the single entry: 0"""
    padding = data.draw(st.integers(1, 3)
                        | st.tuples(*[st.integers(1, 3)] * ndim),
                        label="padding")
    x = Tensor(
        data.draw(
            hnp.arrays(shape=(1, 1) + (0, ) * ndim,
                       dtype=float,
                       elements=st.floats()),
            label="x",
        ))
    pad_tuple = padding if isinstance(padding, tuple) else (padding, ) * ndim
    kernel = data.draw(
        hnp.arrays(
            shape=(1, 1) + tuple(2 * p for p in pad_tuple),
            dtype=float,
            elements=st.floats(allow_nan=False, allow_infinity=False),
        ))
    out = conv_nd(x, kernel, padding=padding, stride=1)
    assert out.shape == (1, ) * x.ndim
    assert out.item() == 0.0

    out.sum().backward()
    assert x.grad.shape == x.shape
コード例 #3
0
def _conv_nd(x, w, stride, dilation=1):
    """ use mygrad-conv_nd forward pass for numerical derivative

        Returns
        -------
        numpy.ndarray"""
    return conv_nd(x, w, stride=stride, dilation=dilation, constant=True).data
コード例 #4
0
def test_conv_ND_fwd(data, x, num_filters):
    """ Test convs 1D-4D with various strides and dilations."""
    x = x[0:min(x.shape[0], 3), 0:min(x.shape[1], 3)]
    win_dim = x.ndim - 2
    win_shape = data.draw(st.tuples(*(st.integers(1, s)
                                      for s in x.shape[-win_dim:])),
                          label="win_shape")
    kernels = data.draw(hnp.arrays(dtype=float,
                                   shape=(num_filters, x.shape[1], *win_shape),
                                   elements=st.floats(-10, 10)),
                        label="kernels")

    stride = data.draw(st.tuples(*(st.integers(1, s)
                                   for s in x.shape[-win_dim:])),
                       label="stride")

    max_dilation = np.array(x.shape[-win_dim:]) // win_shape
    dilation = data.draw(st.tuples(*(st.integers(1, s) for s in max_dilation)),
                         label="dilation")
    conf = dict(stride=stride, dilation=dilation)

    # skip invalid data/kernel/stride/dilation combinations
    assume(
        get_outshape(x.shape[2:], kernels.shape[2:], stride, dilation)
        is not None)
    numpy_conv = conv_bank(x, kernels, **conf)
    mygrad_conv = conv_nd(x, kernels, **conf).data
    assert_allclose(actual=mygrad_conv,
                    desired=numpy_conv,
                    atol=1e-6,
                    rtol=1e-6)
コード例 #5
0
ファイル: test_conv.py プロジェクト: samaocarpenter/MyGrad
def test_convnd_fwd_trivial():

    # trivial by-hand test: 1-dimensional conv
    # x:
    # [ 1,  2,  3,  4]

    # k:
    # [-1, -2],

    # stride = (2,)
    x = Tensor(np.arange(1, 5).reshape(1, 1, 4).astype(float))
    k = Tensor(-1 * np.arange(1, 3).reshape(1, 1, 2).astype(float))

    o = conv_nd(x, k, stride=(2, ), constant=True)

    out = np.array([[[-5.0, -11.0]]])
    assert isinstance(o, Tensor)
    assert o.constant is True
    assert o.scalar_only is False
    assert_allclose(actual=o.data,
                    desired=out,
                    err_msg="1d trivial test failed")

    # trivial by-hand test: 2-dimensional conv
    # x:
    # [ 1,  2,  3,  4],
    # [ 5,  6,  7,  8],
    # [ 9, 10, 11, 12]]

    # k:
    # [-1, -2],
    # [-3, -4]

    # stride = (1, 2)
    x = Tensor(np.arange(1, 13).reshape(1, 1, 3, 4).astype(float))
    k = Tensor(-1 * np.arange(1, 5).reshape(1, 1, 2, 2).astype(float))

    o = conv_nd(Tensor(x), k, stride=(1, 2), constant=True)

    out = np.array([[[[-44.0, -64.0], [-84.0, -104.0]]]])
    assert isinstance(o, Tensor)
    assert o.constant is True
    assert o.scalar_only is False
    assert_allclose(actual=o.data,
                    desired=out,
                    err_msg="2d trivial test failed")
コード例 #6
0
def test_conv_ND_bkwd(data, x, num_filters):
    """ Test conv-backprop 1D-3D with various strides and dilations."""
    x = x[0:min(x.shape[0], 1), 0:min(x.shape[1], 1)]
    win_dim = x.ndim - 2
    win_shape = data.draw(st.tuples(*(st.integers(1, s)
                                      for s in x.shape[-win_dim:])),
                          label="win_shape")
    kernels = data.draw(hnp.arrays(dtype=float,
                                   shape=(num_filters, x.shape[1], *win_shape),
                                   elements=st.floats(-10, 10)),
                        label="kernels")

    stride = data.draw(st.tuples(*(st.integers(1, s)
                                   for s in x.shape[-win_dim:])),
                       label="stride")

    max_dilation = np.array(x.shape[-win_dim:]) // win_shape
    dilation = data.draw(st.tuples(*(st.integers(1, s) for s in max_dilation)),
                         label="dilation")
    conf = dict(stride=stride, dilation=dilation)

    # skip invalid data/kernel/stride/dilation combinations
    assume(
        get_outshape(x.shape[2:], kernels.shape[2:], stride, dilation)
        is not None)

    x = Tensor(x)
    kernels = Tensor(kernels)

    out = conv_nd(x, kernels, **conf)
    grad = data.draw(hnp.arrays(shape=out.shape,
                                dtype=float,
                                elements=st.floats(-10, 10),
                                unique=True),
                     label="grad")

    out.backward(grad)
    grads_numerical = numerical_gradient_full(_conv_nd,
                                              *(i.data for i in (x, kernels)),
                                              back_grad=grad,
                                              kwargs=conf,
                                              as_decimal=False)

    for n, (arr, d_num) in enumerate(zip((x, kernels), grads_numerical)):
        assert_allclose(
            arr.grad,
            d_num,
            atol=1e-4,
            rtol=1e-4,
            err_msg=
            "arr-{}: numerical derivative and mygrad derivative do not match".
            format(n))
コード例 #7
0
ファイル: test_conv.py プロジェクト: samaocarpenter/MyGrad
def test_conv_ND_fwd(data, shape, num_filters, num_batch, num_channel):
    img_shape = (num_batch, num_channel) + shape

    padding = data.draw(
        st.integers(0, 2) | st.tuples(*[st.integers(0, 2)] * len(shape)),
        label="padding",
    )

    if isinstance(padding, tuple):
        shape = tuple(s + 2 * p for s, p in zip(shape, padding))
    else:
        shape = tuple(s + 2 * padding for s in shape)

    win_dim = len(shape)
    shape = (num_batch, num_channel) + shape
    win_shape = data.draw(st.tuples(*(st.integers(1, s)
                                      for s in shape[-win_dim:])),
                          label="win_shape")
    kernel_shape = (num_filters, shape[1], *win_shape)
    stride = data.draw(st.tuples(*(st.integers(1, s)
                                   for s in shape[-win_dim:])),
                       label="stride")
    max_dilation = np.array(shape[-win_dim:]) // win_shape
    dilation = data.draw(st.tuples(*(st.integers(1, s) for s in max_dilation)),
                         label="dilation")
    conf = dict(stride=stride, dilation=dilation, padding=padding)

    # skip invalid data/kernel/stride/dilation combinations
    assume(
        get_outshape(shape[2:], kernel_shape[2:], stride, dilation)
        is not None)

    kernels = data.draw(
        hnp.arrays(dtype=float,
                   shape=kernel_shape,
                   elements=st.floats(-10, 10)),
        label="kernels",
    )
    x = data.draw(hnp.arrays(dtype=float,
                             shape=img_shape,
                             elements=st.floats(-10, 10)),
                  label="x")

    mygrad_conv = conv_nd(x, kernels, **conf).data
    numpy_conv = conv_bank(x, kernels, **conf)
    assert_allclose(actual=mygrad_conv,
                    desired=numpy_conv,
                    atol=1e-6,
                    rtol=1e-6)
コード例 #8
0
ファイル: test_conv.py プロジェクト: samaocarpenter/MyGrad
def test_conv_ND_bkwd(data, shape, num_filters, num_batch, num_channel):
    """ Test conv-backprop 1D-3D with various strides and dilations."""
    img_shape = (num_batch, num_channel) + shape

    padding = data.draw(
        st.integers(0, 2) | st.tuples(*[st.integers(0, 2)] * len(shape)),
        label="padding",
    )

    if isinstance(padding, tuple):
        shape = tuple(s + 2 * p for s, p in zip(shape, padding))
    else:
        shape = tuple(s + 2 * padding for s in shape)

    win_dim = len(shape)
    shape = (num_batch, num_channel) + shape
    win_shape = data.draw(st.tuples(*(st.integers(1, s)
                                      for s in shape[-win_dim:])),
                          label="win_shape")
    kernel_shape = (num_filters, shape[1], *win_shape)

    stride = data.draw(st.tuples(*(st.integers(1, s)
                                   for s in shape[-win_dim:])),
                       label="stride")

    max_dilation = np.array(shape[-win_dim:]) // win_shape
    dilation = data.draw(st.tuples(*(st.integers(1, s) for s in max_dilation)),
                         label="dilation")
    conf = dict(stride=stride, dilation=dilation, padding=padding)

    # skip invalid data/kernel/stride/dilation combinations
    assume(
        get_outshape(shape[2:], kernel_shape[2:], stride, dilation)
        is not None)

    kernels = data.draw(
        hnp.arrays(dtype=float,
                   shape=kernel_shape,
                   elements=st.floats(-10, 10)),
        label="kernels",
    )
    x = data.draw(hnp.arrays(dtype=float,
                             shape=img_shape,
                             elements=st.floats(-10, 10)),
                  label="x")

    x = Tensor(x)
    kernels = Tensor(kernels)

    out = conv_nd(x, kernels, **conf)
    grad = data.draw(
        hnp.arrays(shape=out.shape,
                   dtype=float,
                   elements=st.floats(-10, 10),
                   unique=True),
        label="grad",
    )

    out.backward(grad)
    grads_numerical = numerical_gradient_full(_conv_nd,
                                              *(i.data for i in (x, kernels)),
                                              back_grad=grad,
                                              kwargs=conf)

    for n, (arr, d_num) in enumerate(zip((x, kernels), grads_numerical)):
        assert_allclose(
            arr.grad,
            d_num,
            atol=1e-4,
            rtol=1e-4,
            err_msg=
            "arr-{}: numerical derivative and mygrad derivative do not match".
            format(n),
        )
コード例 #9
0
ファイル: test_conv.py プロジェクト: samaocarpenter/MyGrad
def test_bad_conv_shapes():
    x = np.zeros((1, 2, 2, 2))
    w = np.zeros((1, 3, 2, 2))
    with raises(ValueError):
        conv_nd(x, w, stride=1, padding=0)  # mismatched channels

    w = np.zeros((1, 2, 3, 2))
    with raises(ValueError):
        conv_nd(x, w, stride=1, padding=0)  # large filter

    w = np.zeros((1, 2, 2, 2))
    with raises(AssertionError):
        conv_nd(x, w, stride=0, padding=0)  # bad stride

    with raises(AssertionError):
        conv_nd(x, w, stride=[1, 2, 3])  # bad stride

    with raises(AssertionError):
        conv_nd(x, w, stride=1, padding=-1)  # bad pad

    with raises(AssertionError):
        conv_nd(x, w, stride=1, padding=[1, 2, 3])  # bad pad

    with raises(ValueError):
        conv_nd(x, w, stride=3, padding=1)  # shape mismatch