Example #1
0
File: nn.py Project: mesnilgr/cgt
def max_pool_2d(x, kernelshape, pad=(0, 0), stride=(1, 1)):
    devtype = cgt.get_config()["default_device"].devtype
    kernel_h, kernel_w = kernelshape
    pad_h, pad_w = pad
    stride_h, stride_w = stride
    info = PoolInfo(kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w)
    if devtype == "gpu":
        return core.Result(cudnn_ops.CudnnPoolForward(info), [x])
    else:
        return core.Result(MaxPool(info), [x])[0]
Example #2
0
 def pullback(self, inputs, _output, gout):
     X, W, b = inputs
     # pass in an extra first argument to make output shape computation simpler
     return [
         core.Result(
             CudnnConvBackwardData(self.ph, self.pw, self.sv, self.sh),
             [X, gout, W]),
         core.Result(
             CudnnConvBackwardFilter(self.ph, self.pw, self.sv, self.sh),
             [W, gout, X]),
         core.Result(
             CudnnConvBackwardBias(self.ph, self.pw, self.sv, self.sh),
             [b, gout])
     ]
Example #3
0
def max_pool_2d(x, kernelshape, pad=(0, 0), stride=(1, 1)):
    kernel_h, kernel_w = kernelshape
    pad_h, pad_w = pad
    stride_h, stride_w = stride
    return core.Result(
        MaxPool(PoolInfo(kernel_h, kernel_w, pad_h, pad_w, stride_h,
                         stride_w)), [x])[0]
Example #4
0
File: nn.py Project: x724/cgt
def softplus(x):
    op = core.ElwiseUnary(
        "softplus",
        core.UnaryInfo("SoftPlus", _nu_softplus, True, 'f',
                       lambda x, g, gy: gy / (cgt.exp(-x) + 1.0),
                       "(x > 0) ? (x + log(exp(-x) + 1)) : log(1+exp(x))"))
    return core.Result(op, [x])
Example #5
0
File: nn.py Project: mesnilgr/cgt
def conv2d(x_BKRC, f_LKrc, kernelshape, pad=(0, 0), stride=(1, 1)):
    devtype = cgt.get_config()["default_device"].devtype
    L, K, r, c = f_LKrc.shape
    if devtype == "gpu":
        b_1K11 = cgt.zeros((1, L, 1, 1), cgt.floatX)
        return core.Result(
            cudnn_ops.CudnnConvForward(pad[0], pad[1], stride[0], stride[1]),
            [x_BKRC, f_LKrc, b_1K11])
    else:
        assert devtype == "cpu"
        col_BmnZ = im2col(x_BKRC, kernelshape, pad, stride)
        f_LZ = f_LKrc.reshape([L, K * r * c])
        B, m, n, Z = col_BmnZ.shape
        col_Bmn_Z = col_BmnZ.reshape([B * m * n, Z])
        col_Bmn_L = core.Result(core.Mul22(False, True), [col_Bmn_Z, f_LZ])
        return col_Bmn_L.reshape([B, m, n, L]).transpose([0, 3, 1, 2])
Example #6
0
File: nn.py Project: x724/cgt
def conv2d(x_BKRC, f_LKrc, kernelshape, pad=(0, 0), stride=(1, 1)):
    col_BmnZ = im2col(x_BKRC, kernelshape, pad, stride)
    L, K, r, c = f_LKrc.shape
    f_LZ = f_LKrc.reshape([L, K * r * c])
    B, m, n, Z = col_BmnZ.shape
    col_Bmn_Z = col_BmnZ.reshape([B * m * n, Z])
    col_Bmn_L = core.Result(core.Mul22(False, True), [col_Bmn_Z, f_LZ])
    return col_Bmn_L.reshape([B, m, n, L]).transpose([0, 3, 1, 2])
Example #7
0
 def pullback(self, inputs, output, goutput):
     if self.pullback_impl is None:
         raise core.MethodNotDefined
     pb_input_types = self.input_types + [self.output_type]*2
     pb_output_type = core.TupleType(*self.input_types)
     pbop = EasyCustomOp(pb_input_types, pb_output_type, 
         forward_impl=self.pullback_impl, pullback_impl=None,
         shapefun = lambda *args : tuple(cgt.shape(x) for x in inputs)  )
     return cgt.core.unpack(core.Result(pbop, inputs + [output, goutput]))
Example #8
0
def test_multi_output():
    for x in (cgt.scalar('x'), cgt.vector('x'), cgt.matrix('x')):
        for cls in (SinCos, SinCos2):
            y, z = core.unpack(core.Result(cls(), [x]))
            xnum = np.ones((3, ) * x.ndim, cgt.floatX)
            correct = (np.sin(xnum), np.cos(xnum))
            yznum = cgt.numeric_eval([y, z], {x: xnum})
            np.testing.assert_allclose(yznum, correct)
            f = cgt.function([x], [y, z])
            np.testing.assert_allclose(f(xnum), correct)
Example #9
0
def test_scalars():
    np.random.seed(0)
    x = cgt.scalar('x')
    y = cgt.scalar('y')
    z = cgt.scalar('z')
    vars = [x,y,z] #pylint: disable=W0622
    vals = nr.rand(len(vars))+1

    PROB2RESULT = {}

    for ((key,_), cls) in it.chain(
            it.izip(core.UNARY_INFO.items(),it.repeat(core.ElwiseUnary)),
            it.izip(core.BINARY_INFO.items(),it.repeat(core.ElwiseBinary))
            ):
        if key == "conj":
            print "skipping conj"
            continue
        utils.colorprint(utils.Color.YELLOW, "Testing %s\n"%key)
        if cls == core.ElwiseUnary:
            n_in = 1
            op = cls(key)
        else:
            n_in = 2
            op = cls(key, (True,True))
        inputvars = vars[0:n_in]
        inputvals = vals[0:n_in]
        out = core.Result(op, inputvars)
        f = cgt.function(inputvars, out)
        try:
            grads = cgt.grad(out, inputvars)
        except core.NonDifferentiable:
            print "nondiff"
            continue
        if DISPLAY:
            print "Function:"
            cgt.print_tree(out)
            print "Gradient original:"
            cgt.print_tree(grads)
            print "Gradient simplified:"
        grads_simple = core.simplify(grads)
        if DISPLAY: cgt.print_tree(grads_simple)
        gradf = cgt.function(inputvars, grads)
        eps = {"single":1e-4,"double":1e-9}[cgt.get_precision()]
        nugrad = numeric_grad(lambda li: f(*li), inputvals,eps=eps) #pylint: disable=W0640
        cgtgrad = gradf(*inputvals)
        np.testing.assert_almost_equal(nugrad,cgtgrad,decimal={"single":3,"double":6}[cgt.get_precision()])

        grad_count = core.count_nodes(grads_simple)
        PROB2RESULT[key] = {}
        PROB2RESULT[key]["grad"] = grad_count

    if DISPLAY:
        from thirdparty.tabulate import tabulate
        print tabulate([[key,val["grad"]] for (key,val) in PROB2RESULT.iteritems()],headers=["funcname","gradcount"])    
Example #10
0
class Im2Col(core.Op):
    available_impls = ("native_cpu", )

    def __init__(self, info):
        assert info.stride_h > 0 and info.stride_w > 0
        self.info = info

    def get_diff(self, _):
        return [True]

    def get_py_impl(self):
        raise core.MethodNotDefined

    def pullback(self, (x, ), _y, gy):
        return [core.Result(Col2Im(self.info), [gy] + cgt.shape(x))]
Example #11
0
class MaxPool(core.Op):
    available_impls = ("native_cpu", )

    def __init__(self, info):
        assert info.stride_h > 0 and info.stride_w > 0
        self.info = info

    def get_diff(self, _):
        return [True]

    def get_py_impl(self):
        raise core.MethodNotDefined

    def pullback(self, (x, ), y, gy):
        pool, mask = core.unpack(y)
        gpool, _gmask = gy
        return [
            core.Result(MaxPoolPullback(self.info), [x, pool, mask, gpool])
        ]
Example #12
0
def im2col(x, kernelshape, pad, stride):
    assert stride[0] > 0 and stride[1] > 0
    assert kernelshape[0] > 0 and kernelshape[1] > 0
    kernelshape, pad, stride = map(tuple, (kernelshape, pad, stride))
    return core.Result(Im2Col(Im2ColInfo(*(kernelshape + pad + stride))), [x])
Example #13
0
 def pullback(self, inputs, output, gout):
     top, scaling = cgt.core.unpack(output)
     gtop, _ = gout
     return [core.Result(CrossChannelLRNBackward(self.info), [inputs[0], top, scaling, gtop])]
Example #14
0
def cross_channel_lrn(X, localsize, alpha, beta):
    assert X.ndim == 4
    return core.Result(CrossChannelLRNForward(LRNInfo(localsize,alpha,beta)), [X])[0]
Example #15
0
def matmat00a(X, Y):
    if isinstance(X, np.ndarray):
        return np.dot(X, Y).sum()
    else:
        return sum(core.Result(core.Mul22(False, False), [X, Y]))
Example #16
0
def matmat11a(X, Y):
    if isinstance(X, np.ndarray):
        return np.dot(X.T, Y.T).sum()
    else:
        return sum(core.Result(core.Mul22(True, True), [X, Y]))
Example #17
0
 def pullback(self, inputs, output, gout):
     return [
         core.Result(CudnnPoolBackward(self.info),
                     [inputs[0], output, gout])
     ]