Beispiel #1
0
 def check_conv(precision):
     cgt.reset_config()
     cgt.set_precision(precision)
     f = cgt.function([], nn.conv2d(cgt.constant(x), cgt.constant(filt), kernelshape=(filtrows,filtcols), pad=(filtrows-1, filtcols-1)))
     out1 = f()
     # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
     np.testing.assert_allclose(out, out1, atol={"single":1e-3,"double":1e-6}[precision])
Beispiel #2
0
def test_conv():
    try:
        import scipy.signal
    except ImportError:
        raise SkipTest("skipping because we don't have ndimage")

    np.random.seed(0)
    x = np.random.randn(2,2,5,17)
    filt = np.random.randn(3,2,4,7)

    filtrows = filt.shape[2]
    filtcols = filt.shape[3]

    batchsize = x.shape[0]
    outchans = filt.shape[0]

    out = np.zeros((batchsize,outchans,x.shape[2]+filtrows-1,x.shape[3]+filtcols-1))
    for b in xrange(x.shape[0]):
        for inchan in xrange(x.shape[1]):
            for outchan in xrange(outchans):
                out[b,outchan] += scipy.signal.convolve2d(x[b,inchan],filt[outchan,inchan][::-1,::-1],mode='full')

    f = cgt.function([], nn.conv2d(cgt.constant(x), cgt.constant(filt), kernelshape=(filtrows,filtcols), pad=(filtrows-1, filtcols-1)))
    out1 = f()
    # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
    np.testing.assert_allclose(out, out1, atol={"single":1e-3,"double":1e-6}[cgt.get_precision()])
Beispiel #3
0
 def check_conv(precision):
     cgt.reset_config()
     cgt.set_precision(precision)
     f = cgt.function([],
                      nn.conv2d(cgt.constant(x),
                                cgt.constant(filt),
                                kernelshape=(filtrows, filtcols),
                                pad=(filtrows - 1, filtcols - 1)))
     out1 = f()
     # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
     np.testing.assert_allclose(out,
                                out1,
                                atol={
                                    "single": 1e-3,
                                    "double": 1e-6
                                }[precision])
Beispiel #4
0
def test_conv():
    try:
        import scipy.signal
    except ImportError:
        raise SkipTest("skipping because we don't have ndimage")

    np.random.seed(0)
    x = np.random.randn(2, 2, 5, 17)
    filt = np.random.randn(3, 2, 4, 7)

    filtrows = filt.shape[2]
    filtcols = filt.shape[3]

    batchsize = x.shape[0]
    outchans = filt.shape[0]

    out = np.zeros((batchsize, outchans, x.shape[2] + filtrows - 1,
                    x.shape[3] + filtcols - 1))
    for b in xrange(x.shape[0]):
        for inchan in xrange(x.shape[1]):
            for outchan in xrange(outchans):
                out[b, outchan] += scipy.signal.convolve2d(
                    x[b, inchan],
                    filt[outchan, inchan][::-1, ::-1],
                    mode='full')

    f = cgt.function([],
                     nn.conv2d(cgt.constant(x),
                               cgt.constant(filt),
                               kernelshape=(filtrows, filtcols),
                               pad=(filtrows - 1, filtcols - 1)))
    out1 = f()
    # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
    np.testing.assert_allclose(out,
                               out1,
                               atol={
                                   "single": 1e-3,
                                   "double": 1e-6
                               }[cgt.get_precision()])
Beispiel #5
0
def test_im2col():
    for settings in [ ((4,4),(0,0),(1,1)), ((3,3),(1,1),(2,2)), ((3,3),(1,1),(3,3)) ]:
        xval = np.arange(2*1*28*28).reshape(2,1,28,28).astype(cgt.floatX)
        x = cgt.tensor4("x", fixed_shape=xval.shape)
        y = im2col(x, *settings)
        h = cgt.constant(np.random.randn(*cgt.infer_shape(y)))
        cost = (y*h).sum()

        fcost = cgt.function([x],cost)
        fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

        from cgt.numeric_diff import numeric_grad
        gnum = numeric_grad(fcost, xval,eps=1e-5)
        gana = fgrad(xval)
        assert np.allclose(gnum, gana)
Beispiel #6
0
def test_pool(**kwargs):
    np.random.seed(0)
    x = cgt.tensor4("x", fixed_shape=(2,3,5,7))
    y = max_pool_2d(x, (4,4),(0,0),(1,1))
    xval = np.random.randn(2,3,5,7)
    hval = np.random.randn(*cgt.infer_shape(y))
    h = cgt.constant(hval)

    cost = (y*h).sum()

    fcost = cgt.function([x], cost)
    fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

    from cgt.numeric_diff import numeric_grad
    gnum = numeric_grad(fcost, xval)
    gana = fgrad(xval)
    assert np.allclose(gnum,gana)
Beispiel #7
0
def test_im2col():
    for settings in [((4, 4), (0, 0), (1, 1)), ((3, 3), (1, 1), (2, 2)),
                     ((3, 3), (1, 1), (3, 3))]:
        xval = np.arange(2 * 1 * 28 * 28).reshape(2, 1, 28,
                                                  28).astype(cgt.floatX)
        x = cgt.tensor4("x", fixed_shape=xval.shape)
        y = im2col(x, *settings)
        h = cgt.constant(np.random.randn(*cgt.infer_shape(y)))
        cost = (y * h).sum()

        fcost = cgt.function([x], cost)
        fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

        from cgt.numeric_diff import numeric_grad
        gnum = numeric_grad(fcost, xval, eps=1e-5)
        gana = fgrad(xval)
        assert np.allclose(gnum, gana)
Beispiel #8
0
def test_cpu_pool(**kwargs):
    np.random.seed(0)
    x = cgt.tensor4("x", fixed_shape=(2, 3, 5, 7))
    y = max_pool_2d(x, (4, 4), (0, 0), (1, 1))
    xval = np.random.randn(2, 3, 5, 7)
    hval = np.random.randn(*cgt.infer_shape(y))
    h = cgt.constant(hval)

    cost = (y * h).sum()

    fcost = cgt.function([x], cost)
    fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

    from cgt.numeric_diff import numeric_grad
    gnum = numeric_grad(fcost, xval)
    gana = fgrad(xval)
    assert np.allclose(gnum, gana)
Beispiel #9
0
def test_cpu_pool():
    with cgt.scoped_update_config(precision="quad", backend="native"):
        print cgt.get_precision()
        ci = get_compile_info()

        np.random.seed(0)
        x = cgt.tensor4("x", fixed_shape=(2, 3, 5, 7))
        y = max_pool_2d(x, (4, 4), (0, 0), (1, 1))
        xval = np.random.randn(2, 3, 5, 7)
        hval = np.random.randn(*cgt.infer_shape(y))
        h = cgt.constant(hval)

        cost = (y * h).sum()

        fcost = cgt.function([x], cost)
        fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

        from cgt.numeric_diff import numeric_grad
        gnum = numeric_grad(fcost, xval)
        gana = fgrad(xval)

        assert np.allclose(gnum, gana)
Beispiel #10
0
def test_cpu_pool():
    with cgt.scoped_update_config(precision="quad",backend="native"):
        print cgt.get_precision()
        ci = get_compile_info()

        np.random.seed(0)
        x = cgt.tensor4("x", fixed_shape=(2,3,5,7))
        y = max_pool_2d(x, (4,4),(0,0),(1,1))
        xval = np.random.randn(2,3,5,7)
        hval = np.random.randn(*cgt.infer_shape(y))
        h = cgt.constant(hval)

        cost = (y*h).sum()

        fcost = cgt.function([x], cost)
        fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

        from cgt.numeric_diff import numeric_grad
        gnum = numeric_grad(fcost, xval)
        gana = fgrad(xval)

        assert np.allclose(gnum,gana)
Beispiel #11
0
def constant(x):
    return cgt.constant(x)
Beispiel #12
0
def constant(x):
    return cgt.constant(x)