Ejemplo n.º 1
0
Archivo: __init__.py Proyecto: zxie/cgt
def gradcheck_model(cost,
                    params,
                    extravars=(),
                    extravals=(),
                    atol=1e-8,
                    eps=1e-9):
    precision = cgt.get_precision()
    if precision == "single":
        cgt.utils.warn(
            "You're doing a gradient check with %s precision. Use double or better yet quad for best results"
            % (precision))
    assert all(param.is_input() for param in params)
    assert len(extravars) == len(extravals)

    # Convert to Argument nodes
    param_args = [
        cgt.core.Argument(typ=s.typ, name=s.name) if s.is_data() else s
        for s in params
    ]

    # Get new cost in terms o farguments
    cost = cgt.core.clone(cost, replace=dict(zip(params, param_args)))

    grads = cgt.grad(cost, param_args)
    paramvals = [param.op.get_value() for param in params]
    fcost = cgt.function(param_args, cost, givens=zip(extravars, extravals))
    fgrad = cgt.function(param_args, grads, givens=zip(extravars, extravals))

    angrads = fgrad(*paramvals)
    nugrads = numeric_grad_multi(fcost, paramvals, eps=eps)

    for (angrad, nugrad) in zip(angrads, nugrads):
        assert np.allclose(angrad, nugrad, atol=atol)
Ejemplo n.º 2
0
def test_cudnn():
    compile_info = get_compile_info()
    if not (compile_info["CGT_ENABLE_CUDNN"] and compile_info["CGT_ENABLE_CUDA"]):
        raise SkipTest("CUDNN not enabled. Skipping this test")

    Xval = nr.randn(2,3,19,18)
    Wval = nr.randn(5,3,3,3)
    bval = nr.randn(1,5,1,1)

    X = cgt.tensor4("X", fixed_shape=Xval.shape)
    W = cgt.tensor4("W", fixed_shape=Wval.shape)
    b = cgt.tensor4("b", fixed_shape=bval.shape)


    Y = cgt.core.Result(cudnn_ops.CudnnConvForward(1,1,1,1),[X, W, b])

    Y2 = nr.randn(*cgt.core.infer_shape(Y))

    fY = cgt.function([X,W,b],Y)
    Yval = fY(Xval,Wval,bval)
    cost = (Y*Y2).sum()
    fcost = cgt.function([X,W,b],cost)
    fgrad = cgt.function([X,W,b],cgt.grad(cost, [X,W,b]))
    angrads = fgrad(Xval,Wval,bval)
    nugrads = numeric_grad_multi(fcost, [Xval, Wval, bval],eps=1e-3)
    for (nugrad,angrad) in zip(nugrads,angrads):
        assert np.allclose(nugrad, angrad, rtol=9e-3, atol=1e-7) 
Ejemplo n.º 3
0
def test_cudnn():
    if not get_compile_info()["CGT_ENABLE_CUDNN"]:
        raise SkipTest("CUDNN not enabled. Skipping this test")

    Xval = nr.randn(2, 3, 19, 18)
    Wval = nr.randn(5, 3, 3, 3)
    bval = nr.randn(1, 5, 1, 1)

    X = cgt.tensor4("X", fixed_shape=Xval.shape)
    W = cgt.tensor4("W", fixed_shape=Wval.shape)
    b = cgt.tensor4("b", fixed_shape=bval.shape)

    Y = cgt.core.Result(cudnn_ops.CudnnConvForward(1, 1, 1, 1), [X, W, b])

    Y2 = nr.randn(*cgt.core.infer_shape(Y))

    fY = cgt.function([X, W, b], Y)
    Yval = fY(Xval, Wval, bval)
    cost = (Y * Y2).sum()
    fcost = cgt.function([X, W, b], cost)
    fgrad = cgt.function([X, W, b], cgt.grad(cost, [X, W, b]))
    angrads = fgrad(Xval, Wval, bval)
    nugrads = numeric_grad_multi(fcost, [Xval, Wval, bval], eps=1e-3)
    for (nugrad, angrad) in zip(nugrads, angrads):
        assert np.allclose(nugrad, angrad)
Ejemplo n.º 4
0
def test_cudnn():
    with cgt.scoped_update_config(precision="double",backend="native"):
        if not get_compile_info()["CGT_ENABLE_CUDNN"]:
            raise SkipTest("CUDNN not enabled. Skipping this test")

        Xval = nr.randn(2,3,19,18)
        Wval = nr.randn(5,3,3,3)
        bval = nr.randn(1,5,1,1)

        X = cgt.tensor4("X", fixed_shape=Xval.shape)
        W = cgt.tensor4("W", fixed_shape=Wval.shape)
        b = cgt.tensor4("b", fixed_shape=bval.shape)


        Y = cgt.core.Result(cudnn_ops.CudnnConvForward(1,1,1,1),[X, W, b])

        Y2 = nr.randn(*cgt.core.infer_shape(Y))

        fY = cgt.function([X,W,b],Y)
        Yval = fY(Xval,Wval,bval)
        cost = (Y*Y2).sum()
        fcost = cgt.function([X,W,b],cost)
        fgrad = cgt.function([X,W,b],cgt.grad(cost, [X,W,b]))
        angrads = fgrad(Xval,Wval,bval)
        nugrads = numeric_grad_multi(fcost, [Xval, Wval, bval],eps=1e-3)
        for (nugrad,angrad) in zip(nugrads,angrads):
            assert np.allclose(nugrad, angrad)
Ejemplo n.º 5
0
def gradcheck_model(cost, params, extravars=(), extravals=(), atol=1e-8, eps=1e-9):
    precision = cgt.get_precision()
    if precision == "single":
        cgt.utils.warn("You're doing a gradient check with %s precision. Use double or better yet quad for best results"%(precision))
    assert all(param.is_input() for param in params)
    assert len(extravars) == len(extravals)

    # Convert to Argument nodes
    param_args = [cgt.core.Argument(typ=s.typ,name=s.name)if s.is_data() else s for s in params]

    # Get new cost in terms o farguments
    cost = cgt.core.clone(cost, replace=dict(zip(params,param_args)))

    grads = cgt.grad(cost, param_args)
    paramvals = [param.op.get_value() for param in params]
    fcost = cgt.function(param_args, cost, givens=zip(extravars,extravals))
    fgrad = cgt.function(param_args, grads,givens=zip(extravars,extravals))

    angrads = fgrad(*paramvals)
    nugrads = numeric_grad_multi(fcost, paramvals, eps=eps)

    for (angrad,nugrad) in zip(angrads,nugrads):
        assert np.allclose(angrad,nugrad,atol=atol)