Example #1
0
def test_matmuls():
    with cgt.scoped_update_config(parallel=True):

        m = 8
        d = 1000

        # build graph

        X = cgt.matrix("X")
        Y = cgt.matrix("Y")
        loss = 0
        for k in xrange(m):
            # loss = loss+cgt.sin(X*Y+k).sum()
            loss = loss + (X.dot(Y + k)).sum()

            f = cgt.function([X, Y], loss)

        # test things out!

        seed(0)

        X_val = randn(d, d)
        Y_val = randn(d, d)
        vals = [X_val, Y_val]

        tic = time.time()
        out = f(*vals)
        toc = time.time()

        print toc - tic
Example #2
0
def test_shape_err():
    with CaptureStderr():
        with cgt.scoped_update_config(debug=True, backend="python"):
            x = cgt.vector()
            y = cgt.vector()
            f = cgt.function([x,y],x+y)
            f(np.zeros(3),np.zeros(4))
Example #3
0
def test_matmuls():
    with cgt.scoped_update_config(parallel = True, backend="native"):

        m = 8
        d = 1000

        # build graph

        X = cgt.matrix("X")
        Y = cgt.matrix("Y")
        loss=0
        for k in xrange(m):
            # loss = loss+cgt.sin(X*Y+k).sum()
            loss = loss+(X.dot(Y+k)).sum()

            f = cgt.function([X,Y], loss)

        # test things out!

        seed(0)

        X_val = randn(d, d)
        Y_val = randn(d, d)
        vals = [X_val, Y_val]

        tic = time.time()
        out = f(*vals)
        toc = time.time()

        print toc-tic
Example #4
0
def test_optimizers():
    tests = [run_sgd, run_momentum, run_nesterov_momenutm, run_adagrad, run_rmsprop, run_adagrad]

    for backend in ["python","native"]:
        with cgt.scoped_update_config(backend=backend):
            for test in tests:
                yield test
Example #5
0
def test_cudnn():
    with cgt.scoped_update_config(precision="double",backend="native"):
        if not get_compile_info()["CGT_ENABLE_CUDNN"]:
            raise SkipTest("CUDNN not enabled. Skipping this test")

        Xval = nr.randn(2,3,19,18)
        Wval = nr.randn(5,3,3,3)
        bval = nr.randn(1,5,1,1)

        X = cgt.tensor4("X", fixed_shape=Xval.shape)
        W = cgt.tensor4("W", fixed_shape=Wval.shape)
        b = cgt.tensor4("b", fixed_shape=bval.shape)


        Y = cgt.core.Result(cudnn_ops.CudnnConvForward(1,1,1,1),[X, W, b])

        Y2 = nr.randn(*cgt.core.infer_shape(Y))

        fY = cgt.function([X,W,b],Y)
        Yval = fY(Xval,Wval,bval)
        cost = (Y*Y2).sum()
        fcost = cgt.function([X,W,b],cost)
        fgrad = cgt.function([X,W,b],cgt.grad(cost, [X,W,b]))
        angrads = fgrad(Xval,Wval,bval)
        nugrads = numeric_grad_multi(fcost, [Xval, Wval, bval],eps=1e-3)
        for (nugrad,angrad) in zip(nugrads,angrads):
            assert np.allclose(nugrad, angrad)
Example #6
0
def test_devices():
    N = 10
    K = 3

    compile_info = cgt.compilation.get_compile_info()
    cuda_enabled = compile_info["CGT_ENABLE_CUDA"]
    if not cuda_enabled:
        raise SkipTest("cuda disabled")

    Xval = np.random.randn(N,K).astype(cgt.floatX)
    wval = np.random.randn(K).astype(cgt.floatX)
    bval = np.asarray(np.random.randn()).astype(cgt.floatX)
    yval = np.random.randn(N).astype(cgt.floatX)

    with cgt.scoped_update_config(default_device=cgt.Device(devtype="gpu")):

        X_nk = cgt.shared(Xval, "X", device=cgt.Device(devtype='gpu'))
        y_n = cgt.shared(yval, "y")
        w_k = cgt.shared(wval, "w")
        b = cgt.shared(bval, name="b")

        print "bval",bval

        ypred = cgt.dot(cgt.square(X_nk), w_k) + b

        err = cgt.sum(cgt.sin(ypred - y_n))
        g = cgt.grad(err, [w_k, b])
        outputs = [err]+g
        f = cgt.function([], [err]+g)
        results = f()
        print results
        assert np.allclose(results[0] , np.sin(np.square(Xval).dot(wval)+bval-yval).sum())
Example #7
0
 def check_func_with_config(backend, precision):
     with cgt.scoped_update_config(backend=backend,
                                   precision=precision):
         if pass_settings:
             check_func(backend=backend, precision=precision)
         else:
             check_func()
Example #8
0
def test_devices():
    N = 10
    K = 3

    compile_info = cgt.compilation.get_compile_info()
    cuda_enabled = compile_info["CGT_ENABLE_CUDA"]
    if not cuda_enabled:
        raise SkipTest("cuda disabled")

    Xval = np.random.randn(N, K).astype(cgt.floatX)
    wval = np.random.randn(K).astype(cgt.floatX)
    bval = np.asarray(np.random.randn()).astype(cgt.floatX)
    yval = np.random.randn(N).astype(cgt.floatX)

    with cgt.scoped_update_config(default_device=cgt.Device(devtype="gpu")):

        X_nk = cgt.shared(Xval, "X", device=cgt.Device(devtype='gpu'))
        y_n = cgt.shared(yval, "y")
        w_k = cgt.shared(wval, "w")
        b = cgt.shared(bval, name="b")

        print "bval", bval

        ypred = cgt.dot(cgt.square(X_nk), w_k) + b

        err = cgt.sum(cgt.sin(ypred - y_n))
        g = cgt.grad(err, [w_k, b])
        outputs = [err] + g
        f = cgt.function([], [err] + g)
        results = f()
        print results
        assert np.allclose(
            results[0],
            np.sin(np.square(Xval).dot(wval) + bval - yval).sum())
Example #9
0
def test_cudnn():
    with cgt.scoped_update_config(precision="double", backend="native"):
        if not get_compile_info()["CGT_ENABLE_CUDNN"]:
            raise SkipTest("CUDNN not enabled. Skipping this test")

        Xval = nr.randn(2, 3, 19, 18)
        Wval = nr.randn(5, 3, 3, 3)
        bval = nr.randn(1, 5, 1, 1)

        X = cgt.tensor4("X", fixed_shape=Xval.shape)
        W = cgt.tensor4("W", fixed_shape=Wval.shape)
        b = cgt.tensor4("b", fixed_shape=bval.shape)

        Y = cgt.core.Result(cudnn_ops.CudnnConvForward(1, 1, 1, 1), [X, W, b])

        Y2 = nr.randn(*cgt.core.infer_shape(Y))

        fY = cgt.function([X, W, b], Y)
        Yval = fY(Xval, Wval, bval)
        cost = (Y * Y2).sum()
        fcost = cgt.function([X, W, b], cost)
        fgrad = cgt.function([X, W, b], cgt.grad(cost, [X, W, b]))
        angrads = fgrad(Xval, Wval, bval)
        nugrads = numeric_grad_multi(fcost, [Xval, Wval, bval], eps=1e-3)
        for (nugrad, angrad) in zip(nugrads, angrads):
            assert np.allclose(nugrad, angrad)
Example #10
0
def test_update():
    with cgt.scoped_update_config(parallel=True):
        xval = np.array(1.5)
        x = cgt.shared(xval)
        f = cgt.function([], x.sum(), updates=[(x, x + 1)])
        before = x.op.get_value().copy()
        f()
        after = x.op.get_value()
        assert np.allclose(after, before + 1)
Example #11
0
def test_update():
    with cgt.scoped_update_config(parallel = True, backend="native"):
        xval = np.array(1.5)
        x = cgt.shared(xval)
        f = cgt.function([], x.sum(), updates=[(x,x+1)])
        before = x.op.get_value().copy()
        f()
        after = x.op.get_value()
        assert np.allclose(after , before+1)
Example #12
0
File: __init__.py Project: zxie/cgt
 def check_func_with_config(backend, precision, devtype):
     with cgt.scoped_update_config(
             backend=backend,
             precision=precision,
             default_device=cgt.core.Device(devtype=devtype)):
         if pass_settings:
             check_func(backend=backend, precision=precision)
         else:
             check_func()
Example #13
0
def test_shape_err():
    try:
        with CaptureStderr() as s:
            with cgt.scoped_update_config(debug=True):
                x = cgt.vector()
                y = cgt.vector()
                f = cgt.function([x,y],x+y)
                f(np.zeros(3),np.zeros(4))
    except Exception as e:
        assert "f = cgt.function([x,y],x+y)" in s.getvalue()
Example #14
0
def test_sleeps():
    with cgt.scoped_update_config(parallel = True, backend="native"):
        x = cgt.scalar('x')
        y1 = sleepfor(x, .1)
        y2 = sleepfor(x, .1)

        z=y1+y2
        fpar = cgt.function([x],z)
        
        tstart = time.time()
        fpar(0)
        elapsed = time.time() - tstart
        assert elapsed < .11
Example #15
0
def test_sleeps():
    with cgt.scoped_update_config(parallel=True):
        x = cgt.scalar('x')
        y1 = sleepfor(x, .1)
        y2 = sleepfor(x, .1)

        z = y1 + y2
        fpar = cgt.function([x], z)

        tstart = time.time()
        fpar(0)
        elapsed = time.time() - tstart
        assert elapsed < .11
Example #16
0
def runtest(backend, precision):
    with cgt.scoped_update_config(backend='native',precision=precision):
        xval = np.zeros(10)
        x = cgt.shared(xval)
        f = cgt.function([],[],updates=[(x,x+1)])
        f()
        g = cgt.function([],x.sum())
        assert np.allclose(x.op.get_value(), xval+1)
        xval2 = np.arange(10)
        x.op.set_value(xval2)
        print x.op.get_value()
        assert np.allclose(x.op.get_value(), xval2)
        assert g() == xval2.sum()
        f()
        assert np.allclose(x.op.get_value(), xval2+1)
        assert g() == (xval2+1).sum()
Example #17
0
def runtest(backend, precision):
    with cgt.scoped_update_config(backend='native', precision=precision):
        xval = np.zeros(10)
        x = cgt.shared(xval)
        f = cgt.function([], [], updates=[(x, x + 1)])
        f()
        g = cgt.function([], x.sum())
        assert np.allclose(x.op.get_value(), xval + 1)
        xval2 = np.arange(10)
        x.op.set_value(xval2)
        print x.op.get_value()
        assert np.allclose(x.op.get_value(), xval2)
        assert g() == xval2.sum()
        f()
        assert np.allclose(x.op.get_value(), xval2 + 1)
        assert g() == (xval2 + 1).sum()
Example #18
0
def test_im2col():

    with cgt.scoped_update_config(precision="quad",backend="native"):


        for settings in [ ((4,4),(0,0),(1,1)), ((3,3),(1,1),(2,2)), ((3,3),(1,1),(3,3)) ]:
            xval = np.arange(2*1*28*28).reshape(2,1,28,28).astype(cgt.floatX)
            x = cgt.tensor4("x", fixed_shape=xval.shape)
            y = im2col(x, *settings)
            h = cgt.constant(np.random.randn(*cgt.infer_shape(y)))
            cost = (y*h).sum()

            fcost = cgt.function([x],cost)
            fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

            from cgt.numeric_diff import numeric_grad
            gnum = numeric_grad(fcost, xval,eps=1e-5)
            gana = fgrad(xval)
            assert np.allclose(gnum, gana)
Example #19
0
def test_im2col():

    with cgt.scoped_update_config(precision="quad", backend="native"):

        for settings in [((4, 4), (0, 0), (1, 1)), ((3, 3), (1, 1), (2, 2)),
                         ((3, 3), (1, 1), (3, 3))]:
            xval = np.arange(2 * 1 * 28 * 28).reshape(2, 1, 28,
                                                      28).astype(cgt.floatX)
            x = cgt.tensor4("x", fixed_shape=xval.shape)
            y = im2col(x, *settings)
            h = cgt.constant(np.random.randn(*cgt.infer_shape(y)))
            cost = (y * h).sum()

            fcost = cgt.function([x], cost)
            fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

            from cgt.numeric_diff import numeric_grad
            gnum = numeric_grad(fcost, xval, eps=1e-5)
            gana = fgrad(xval)
            assert np.allclose(gnum, gana)
Example #20
0
def test_lrn():
    if not get_compile_info()["CGT_ENABLE_CUDA"]:
        raise SkipTest("Skipping because CUDA disabled")

    with cgt.scoped_update_config(precision="double",backend="native"):
        from cgt.tests import gradcheck_model
        cgt.set_precision('double')
        nr.seed(0)
        Xval = nr.randn(4,8,16,16)
        X = cgt.shared(Xval, name="X", fixed_shape_mask="all")
        # X = cgt.tensor4(name='X')
        y = cross_channel_lrn(X, localsize=4, alpha=.1, beta=.5)
        f = cgt.function([],y)
        print f().sum()
        print f().sum()
        print f().sum()
        assert np.isfinite(f().sum())
        # print f(Xval).sum()
        a = nr.rand(*cgt.infer_shape(y))
        loss = (y*a).sum()
        gradcheck_model(loss, [X],eps=1e-5)
Example #21
0
def test_lrn():
    if not get_compile_info()["CGT_ENABLE_CUDA"]:
        raise SkipTest("Skipping because CUDA disabled")

    with cgt.scoped_update_config(precision="double", backend="native"):
        from cgt.tests import gradcheck_model
        cgt.set_precision('double')
        nr.seed(0)
        Xval = nr.randn(4, 8, 16, 16)
        X = cgt.shared(Xval, name="X", fixed_shape_mask="all")
        # X = cgt.tensor4(name='X')
        y = cross_channel_lrn(X, localsize=4, alpha=.1, beta=.5)
        f = cgt.function([], y)
        print f().sum()
        print f().sum()
        print f().sum()
        assert np.isfinite(f().sum())
        # print f(Xval).sum()
        a = nr.rand(*cgt.infer_shape(y))
        loss = (y * a).sum()
        gradcheck_model(loss, [X], eps=1e-5)
Example #22
0
def test_cpu_pool():
    with cgt.scoped_update_config(precision="quad",backend="native"):
        print cgt.get_precision()
        ci = get_compile_info()

        np.random.seed(0)
        x = cgt.tensor4("x", fixed_shape=(2,3,5,7))
        y = max_pool_2d(x, (4,4),(0,0),(1,1))
        xval = np.random.randn(2,3,5,7)
        hval = np.random.randn(*cgt.infer_shape(y))
        h = cgt.constant(hval)

        cost = (y*h).sum()

        fcost = cgt.function([x], cost)
        fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

        from cgt.numeric_diff import numeric_grad
        gnum = numeric_grad(fcost, xval)
        gana = fgrad(xval)

        assert np.allclose(gnum,gana)
Example #23
0
def test_cpu_pool():
    with cgt.scoped_update_config(precision="quad", backend="native"):
        print cgt.get_precision()
        ci = get_compile_info()

        np.random.seed(0)
        x = cgt.tensor4("x", fixed_shape=(2, 3, 5, 7))
        y = max_pool_2d(x, (4, 4), (0, 0), (1, 1))
        xval = np.random.randn(2, 3, 5, 7)
        hval = np.random.randn(*cgt.infer_shape(y))
        h = cgt.constant(hval)

        cost = (y * h).sum()

        fcost = cgt.function([x], cost)
        fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

        from cgt.numeric_diff import numeric_grad
        gnum = numeric_grad(fcost, xval)
        gana = fgrad(xval)

        assert np.allclose(gnum, gana)
Example #24
0
 def check_func_with_config(backend, precision, devtype):
     with cgt.scoped_update_config(backend=backend, precision=precision, default_device=cgt.core.Device(devtype=devtype)):
         if pass_settings:
             check_func(backend=backend, precision=precision)
         else:
             check_func()
        y = cgt.vector("y", dtype="i8")
        stepsize = cgt.scalar("stepsize")

        loss = build_fc_return_loss(X, y)
        params = nn.get_parameters(loss)
        m = nn.Module([X, y], [loss])
        split_loss = 0
        for start in xrange(0, batch_size, batch_size // 4):
            sli = slice(start, start + batch_size // 4)
            split_loss += m([X[sli], y[sli]])[0]
        split_loss /= 4
        gparams = cgt.grad(split_loss, params)
        updates2 = [(p, p - stepsize * gp) for (p, gp) in zip(params, gparams)]
        return cgt.function([X, y, stepsize], split_loss, updates=updates2)

    with cgt.scoped_update_config(parallel=True, num_threads=4):
        updater_fc_par = make_updater_fc_parallel()

    print "Fully-connected Network with Split Input for Data Parallelism"
    run_sgd_epochs(Xtrain, ytrain, updater_fc_par)

    # Convnet on CPU
    # -----------------------

    Xtrainimg = Xtrain.reshape(-1, 1, 28, 28)

    def build_convnet_return_loss(X, y):
        np.random.seed(0)
        conv1 = nn.rectify(
            nn.SpatialConvolution(1, 32, kernelshape=(3, 3), pad=(0, 0), weight_init=nn.IIDGaussian(std=0.1))(X)
        )
Example #26
0
        y = cgt.vector("y", dtype='i8')
        stepsize = cgt.scalar("stepsize")

        loss = build_fc_return_loss(X, y)
        params = nn.get_parameters(loss)
        m = nn.Module([X, y], [loss])
        split_loss = 0
        for start in xrange(0, batch_size, batch_size // 4):
            sli = slice(start, start + batch_size // 4)
            split_loss += m([X[sli], y[sli]])[0]
        split_loss /= 4
        gparams = cgt.grad(split_loss, params)
        updates2 = [(p, p - stepsize * gp) for (p, gp) in zip(params, gparams)]
        return cgt.function([X, y, stepsize], split_loss, updates=updates2)

    with cgt.scoped_update_config(parallel=True, num_threads=4):
        updater_fc_par = make_updater_fc_parallel()

    print "Fully-connected Network with Split Input for Data Parallelism"
    run_sgd_epochs(Xtrain, ytrain, updater_fc_par)

    # Convnet on CPU
    # -----------------------

    Xtrainimg = Xtrain.reshape(-1, 1, 28, 28)

    def build_convnet_return_loss(X, y):
        np.random.seed(0)
        conv1 = nn.rectify(
            nn.SpatialConvolution(1,
                                  32,
Example #27
0
 def check_func_with_config(backend, precision):
     with cgt.scoped_update_config(backend=backend, precision=precision):
         if pass_settings:
             check_func(backend=backend, precision=precision)
         else:
             check_func()