Esempio n. 1
0
def test_conv():
    try:
        import scipy.signal
    except ImportError:
        raise SkipTest("skipping because we don't have ndimage")

    np.random.seed(0)
    x = np.random.randn(2,2,5,17)
    filt = np.random.randn(3,2,4,7)

    filtrows = filt.shape[2]
    filtcols = filt.shape[3]

    batchsize = x.shape[0]
    outchans = filt.shape[0]

    out = np.zeros((batchsize,outchans,x.shape[2]+filtrows-1,x.shape[3]+filtcols-1))
    for b in xrange(x.shape[0]):
        for inchan in xrange(x.shape[1]):
            for outchan in xrange(outchans):
                out[b,outchan] += scipy.signal.convolve2d(x[b,inchan],filt[outchan,inchan][::-1,::-1],mode='full')

    f = cgt.function([], nn.conv2d(cgt.constant(x), cgt.constant(filt), kernelshape=(filtrows,filtcols), pad=(filtrows-1, filtcols-1)))
    out1 = f()
    # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
    np.testing.assert_allclose(out, out1, atol={"single":1e-3,"double":1e-6}[cgt.get_precision()])
Esempio n. 2
0
File: __init__.py Progetto: zxie/cgt
def gradcheck_model(cost,
                    params,
                    extravars=(),
                    extravals=(),
                    atol=1e-8,
                    eps=1e-9):
    precision = cgt.get_precision()
    if precision == "single":
        cgt.utils.warn(
            "You're doing a gradient check with %s precision. Use double or better yet quad for best results"
            % (precision))
    assert all(param.is_input() for param in params)
    assert len(extravars) == len(extravals)

    # Convert to Argument nodes
    param_args = [
        cgt.core.Argument(typ=s.typ, name=s.name) if s.is_data() else s
        for s in params
    ]

    # Get new cost in terms o farguments
    cost = cgt.core.clone(cost, replace=dict(zip(params, param_args)))

    grads = cgt.grad(cost, param_args)
    paramvals = [param.op.get_value() for param in params]
    fcost = cgt.function(param_args, cost, givens=zip(extravars, extravals))
    fgrad = cgt.function(param_args, grads, givens=zip(extravars, extravals))

    angrads = fgrad(*paramvals)
    nugrads = numeric_grad_multi(fcost, paramvals, eps=eps)

    for (angrad, nugrad) in zip(angrads, nugrads):
        assert np.allclose(angrad, nugrad, atol=atol)
Esempio n. 3
0
def test_einsum():
    x = cgt.tensor3()
    y = cgt.tensor3()

    sizes = {'i': 2, 'j': 3, 'k': 5, 'l': 7}
    xaxes = 'ijk'
    yaxes = 'ikl'
    zaxes = 'ijl'
    for i in xrange(10):
        xperm = xaxes
        (yperm,
         zperm) = permaxes = [[chars[i] for i in np.random.permutation(3)]
                              for chars in [yaxes, zaxes]]
        desc = "%s,%s->%s" % tuple("".join(chars)
                                   for chars in [xperm] + permaxes)
        z = cgt.einsum(desc, x, y)
        xval = nr.randn(*(sizes[c] for c in xperm))
        yval = nr.randn(*(sizes[c] for c in yperm))
        np.testing.assert_allclose(cgt.numeric_eval(z, {
            x: xval,
            y: yval
        }),
                                   np.einsum(desc, xval, yval),
                                   atol={
                                       "single": 1e-3,
                                       "double": 1e-6
                                   }[cgt.get_precision()])
Esempio n. 4
0
def test_scalars():
    np.random.seed(0)
    x = cgt.scalar('x')
    y = cgt.scalar('y')
    z = cgt.scalar('z')
    vars = [x,y,z] #pylint: disable=W0622
    vals = nr.rand(len(vars))+1

    PROB2RESULT = {}

    for ((key,_), cls) in it.chain(
            it.izip(core.UNARY_INFO.items(),it.repeat(core.ElwiseUnary)),
            it.izip(core.BINARY_INFO.items(),it.repeat(core.ElwiseBinary))
            ):
        if key == "conj":
            print "skipping conj"
            continue
        utils.colorprint(utils.Color.YELLOW, "Testing %s\n"%key)
        if cls == core.ElwiseUnary:
            n_in = 1
            op = cls(key)
        else:
            n_in = 2
            op = cls(key, (True,True))
        inputvars = vars[0:n_in]
        inputvals = vals[0:n_in]
        out = core.Result(op, inputvars)
        f = cgt.function(inputvars, out)
        try:
            grads = cgt.grad(out, inputvars)
        except core.NonDifferentiable:
            print "nondiff"
            continue
        if DISPLAY:
            print "Function:"
            cgt.print_tree(out)
            print "Gradient original:"
            cgt.print_tree(grads)
            print "Gradient simplified:"
        grads_simple = core.simplify(grads)
        if DISPLAY: cgt.print_tree(grads_simple)
        gradf = cgt.function(inputvars, grads)
        eps = {"single":1e-4,"double":1e-9}[cgt.get_precision()]
        nugrad = numeric_grad(lambda li: f(*li), inputvals,eps=eps) #pylint: disable=W0640
        cgtgrad = gradf(*inputvals)
        np.testing.assert_almost_equal(nugrad,cgtgrad,decimal={"single":3,"double":6}[cgt.get_precision()])

        grad_count = core.count_nodes(grads_simple)
        PROB2RESULT[key] = {}
        PROB2RESULT[key]["grad"] = grad_count

    if DISPLAY:
        from thirdparty.tabulate import tabulate
        print tabulate([[key,val["grad"]] for (key,val) in PROB2RESULT.iteritems()],headers=["funcname","gradcount"])    
Esempio n. 5
0
def check_affine(f, *nu_inputs):
    types = ",".join(["{%s,%s}" % (x.dtype, x.ndim) for x in nu_inputs])
    cgt.utils.colorprint(cgt.utils.Color.YELLOW,
                         "Testing %s(%s)\n" % (f.__name__, types))
    sy_inputs = map(tensor_like, nu_inputs)
    for (i, sy) in enumerate(sy_inputs):
        sy.name = "x%i" % i

    sy_result = f(*sy_inputs)

    def maybeprint(msg):
        if DISPLAY: print msg

    maybeprint("Function:")
    if DISPLAY: cgt.print_tree([sy_result])

    f_cgt = cgt.function(sy_inputs, sy_result)
    sy_grads = cgt.grad(sy_result, sy_inputs)
    gradf_cgt = cgt.function(sy_inputs, sy_grads)

    sy_result_simple = core.simplify([sy_result])
    sy_grads_simple = core.simplify(sy_grads)

    maybeprint("Gradient:")
    if DISPLAY: cgt.print_tree(sy_grads)

    maybeprint("Gradient after simplification:")
    if DISPLAY: cgt.print_tree(sy_grads_simple)

    out_true = f(*nu_inputs)
    out_cgt = f_cgt(*nu_inputs)

    grads_true = gradients_affine(f_cgt,
                                  nu_inputs,
                                  h=1e-4 if "max" in f.__name__ else 1e-1)
    grads_cgt = gradf_cgt(*nu_inputs)

    rtol = {"single": 1e-3, "double": 1e-5}[cgt.get_precision()]
    np.testing.assert_allclose(out_cgt, out_true, rtol=rtol)

    for (g_cgt, g_true) in zip(grads_cgt, grads_true):
        np.testing.assert_allclose(g_cgt, g_true, rtol=rtol)

    result_count = cgt.count_nodes(sy_result_simple)
    grad_count = cgt.count_nodes(sy_grads_simple)
    maybeprint("Result before: %i. after: %i" %
               (cgt.count_nodes([sy_result]), result_count))
    maybeprint("Grad before: %i. after: %i" %
               (cgt.count_nodes(sy_grads), grad_count))

    PROB2RESULT[f.__name__] = {}
    PROB2RESULT[f.__name__]["fn"] = result_count
    PROB2RESULT[f.__name__]["grad"] = grad_count
Esempio n. 6
0
def test_linreg():
    N = 10
    K = 3

    Xval = np.random.randn(N,K)
    wval = np.random.randn(K)
    bval = np.random.randn()
    yval = np.random.randn(N)

    X_nk = cgt.matrix("X")
    y_n = cgt.vector("y")
    w_k = cgt.vector("w")
    b = cgt.scalar(name="b")

    ypred = cgt.dot(X_nk, w_k) + b

    err = cgt.sum(cgt.square(ypred - y_n))
    g = cgt.grad(err, [w_k, b])

    g_simple,an,_ = cgt.core.simplify_and_analyze(g)


    print "Loss function:"
    cgt.print_tree([err])
    print "Gradient:"
    cgt.print_tree(g)

    print "Gradient simplified"
    cgt.print_tree(g_simple, nodefn=lambda node,o: o.write(" " + an["node2hash"][node][:5]))

    print "-------"

    d = {X_nk : Xval, w_k : wval, b : bval, y_n : yval}

    np.testing.assert_allclose(cgt.numeric_eval(err,d), np.linalg.norm(Xval.dot(wval) + bval - yval)**2,
        atol={"single":1e-3,"double":1e-6}[cgt.get_precision()])
    np.testing.assert_allclose(cgt.numeric_eval(g[0],d), 2 * Xval.T.dot(Xval.dot(wval) + bval - yval),
        atol={"single":1e-3,"double":1e-6}[cgt.get_precision()])
    np.testing.assert_allclose(cgt.numeric_eval(g[1],d), 2 *  np.sum(Xval.dot(wval) + bval - yval, 0),
        atol={"single":1e-3,"double":1e-6}[cgt.get_precision()])
Esempio n. 7
0
def check_affine(f, *nu_inputs):
    types = ",".join(["{%s,%s}" % (x.dtype, x.ndim) for x in nu_inputs])
    cgt.utils.colorprint(cgt.utils.Color.YELLOW, "Testing %s(%s)\n" % (f.__name__, types))
    sy_inputs = map(tensor_like, nu_inputs)
    for (i, sy) in enumerate(sy_inputs):
        sy.name = "x%i" % i

    sy_result = f(*sy_inputs)

    def maybeprint(msg):
        if DISPLAY:
            print msg

    maybeprint("Function:")
    if DISPLAY:
        cgt.print_tree([sy_result])

    f_cgt = cgt.function(sy_inputs, sy_result)
    sy_grads = cgt.grad(sy_result, sy_inputs)
    gradf_cgt = cgt.function(sy_inputs, sy_grads)

    sy_result_simple = core.simplify([sy_result])
    sy_grads_simple = core.simplify(sy_grads)

    maybeprint("Gradient:")
    if DISPLAY:
        cgt.print_tree(sy_grads)

    maybeprint("Gradient after simplification:")
    if DISPLAY:
        cgt.print_tree(sy_grads_simple)

    out_true = f(*nu_inputs)
    out_cgt = f_cgt(*nu_inputs)

    grads_true = gradients_affine(f_cgt, nu_inputs, h=1e-4 if "max" in f.__name__ else 1e-1)
    grads_cgt = gradf_cgt(*nu_inputs)

    rtol = {"single": 1e-3, "double": 1e-5}[cgt.get_precision()]
    np.testing.assert_allclose(out_cgt, out_true, rtol=rtol)

    for (g_cgt, g_true) in zip(grads_cgt, grads_true):
        np.testing.assert_allclose(g_cgt, g_true, rtol=rtol)

    result_count = cgt.count_nodes(sy_result_simple)
    grad_count = cgt.count_nodes(sy_grads_simple)
    maybeprint("Result before: %i. after: %i" % (cgt.count_nodes([sy_result]), result_count))
    maybeprint("Grad before: %i. after: %i" % (cgt.count_nodes(sy_grads), grad_count))

    PROB2RESULT[f.__name__] = {}
    PROB2RESULT[f.__name__]["fn"] = result_count
    PROB2RESULT[f.__name__]["grad"] = grad_count
Esempio n. 8
0
def test_cpu_pool():
    with cgt.scoped_update_config(precision="quad", backend="native"):
        print cgt.get_precision()
        ci = get_compile_info()

        np.random.seed(0)
        x = cgt.tensor4("x", fixed_shape=(2, 3, 5, 7))
        y = max_pool_2d(x, (4, 4), (0, 0), (1, 1))
        xval = np.random.randn(2, 3, 5, 7)
        hval = np.random.randn(*cgt.infer_shape(y))
        h = cgt.constant(hval)

        cost = (y * h).sum()

        fcost = cgt.function([x], cost)
        fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

        from cgt.numeric_diff import numeric_grad
        gnum = numeric_grad(fcost, xval)
        gana = fgrad(xval)

        assert np.allclose(gnum, gana)
Esempio n. 9
0
def test_cpu_pool():
    with cgt.scoped_update_config(precision="quad",backend="native"):
        print cgt.get_precision()
        ci = get_compile_info()

        np.random.seed(0)
        x = cgt.tensor4("x", fixed_shape=(2,3,5,7))
        y = max_pool_2d(x, (4,4),(0,0),(1,1))
        xval = np.random.randn(2,3,5,7)
        hval = np.random.randn(*cgt.infer_shape(y))
        h = cgt.constant(hval)

        cost = (y*h).sum()

        fcost = cgt.function([x], cost)
        fgrad = cgt.function([x], cgt.grad(cost, [x])[0])

        from cgt.numeric_diff import numeric_grad
        gnum = numeric_grad(fcost, xval)
        gana = fgrad(xval)

        assert np.allclose(gnum,gana)
Esempio n. 10
0
def test_einsum():
    x = cgt.tensor3()
    y = cgt.tensor3()

    sizes = {"i": 2, "j": 3, "k": 5, "l": 7}
    xaxes = "ijk"
    yaxes = "ikl"
    zaxes = "ijl"
    for i in xrange(10):
        xperm = xaxes
        (yperm, zperm) = permaxes = [[chars[i] for i in np.random.permutation(3)] for chars in [yaxes, zaxes]]
        desc = "%s,%s->%s" % tuple("".join(chars) for chars in [xperm] + permaxes)
        z = cgt.einsum(desc, x, y)
        xval = nr.randn(*(sizes[c] for c in xperm))
        yval = nr.randn(*(sizes[c] for c in yperm))
        np.testing.assert_allclose(
            cgt.numeric_eval(z, {x: xval, y: yval}),
            np.einsum(desc, xval, yval),
            atol={"single": 1e-3, "double": 1e-6}[cgt.get_precision()],
        )
Esempio n. 11
0
def test_conv():
    try:
        import scipy.signal
    except ImportError:
        raise SkipTest("skipping because we don't have ndimage")

    np.random.seed(0)
    x = np.random.randn(2, 2, 5, 17)
    filt = np.random.randn(3, 2, 4, 7)

    filtrows = filt.shape[2]
    filtcols = filt.shape[3]

    batchsize = x.shape[0]
    outchans = filt.shape[0]

    out = np.zeros((batchsize, outchans, x.shape[2] + filtrows - 1,
                    x.shape[3] + filtcols - 1))
    for b in xrange(x.shape[0]):
        for inchan in xrange(x.shape[1]):
            for outchan in xrange(outchans):
                out[b, outchan] += scipy.signal.convolve2d(
                    x[b, inchan],
                    filt[outchan, inchan][::-1, ::-1],
                    mode='full')

    f = cgt.function([],
                     nn.conv2d(cgt.constant(x),
                               cgt.constant(filt),
                               kernelshape=(filtrows, filtcols),
                               pad=(filtrows - 1, filtcols - 1)))
    out1 = f()
    # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
    np.testing.assert_allclose(out,
                               out1,
                               atol={
                                   "single": 1e-3,
                                   "double": 1e-6
                               }[cgt.get_precision()])
Esempio n. 12
0
def gradcheck_model(cost, params, extravars=(), extravals=(), atol=1e-8, eps=1e-9):
    precision = cgt.get_precision()
    if precision == "single":
        cgt.utils.warn("You're doing a gradient check with %s precision. Use double or better yet quad for best results"%(precision))
    assert all(param.is_input() for param in params)
    assert len(extravars) == len(extravals)

    # Convert to Argument nodes
    param_args = [cgt.core.Argument(typ=s.typ,name=s.name)if s.is_data() else s for s in params]

    # Get new cost in terms o farguments
    cost = cgt.core.clone(cost, replace=dict(zip(params,param_args)))

    grads = cgt.grad(cost, param_args)
    paramvals = [param.op.get_value() for param in params]
    fcost = cgt.function(param_args, cost, givens=zip(extravars,extravals))
    fgrad = cgt.function(param_args, grads,givens=zip(extravars,extravals))

    angrads = fgrad(*paramvals)
    nugrads = numeric_grad_multi(fcost, paramvals, eps=eps)

    for (angrad,nugrad) in zip(angrads,nugrads):
        assert np.allclose(angrad,nugrad,atol=atol)