コード例 #1
0
def tinyconv_model(X, w, w2, p_drop):
    l1 = nn.conv2d(X, w, kernelshape=(3, 3), pad=(1, 1), stride=(3, 3))
    l1a = nn.dropout(l1, p_drop)
    batchsize, channels, rows, cols = l1.shape
    l1flat = cgt.reshape(l1, [batchsize, channels * rows * cols])
    pyx = nn.softmax(l1flat.dot(w2))
    return l1, pyx
コード例 #2
0
ファイル: demo_mnist.py プロジェクト: EdsterG/cgt
def tinyconv_model(X, w, w2, p_drop):
    l1 = nn.conv2d(X, w, kernelshape=(3,3), pad=(1,1),stride=(3,3))
    l1a = nn.dropout(l1, p_drop)
    batchsize,channels,rows,cols = l1.shape
    l1flat = cgt.reshape(l1, [batchsize,channels*rows*cols])
    pyx = nn.softmax(l1flat.dot(w2))
    return l1, pyx
コード例 #3
0
ファイル: test_conv.py プロジェクト: EdsterG/cgt
def test_conv():
    try:
        import scipy.signal
    except ImportError:
        raise SkipTest("skipping because we don't have ndimage")

    np.random.seed(0)
    x = np.random.randn(2,2,5,17)
    filt = np.random.randn(3,2,4,7)

    filtrows = filt.shape[2]
    filtcols = filt.shape[3]

    batchsize = x.shape[0]
    outchans = filt.shape[0]

    out = np.zeros((batchsize,outchans,x.shape[2]+filtrows-1,x.shape[3]+filtcols-1))
    for b in xrange(x.shape[0]):
        for inchan in xrange(x.shape[1]):
            for outchan in xrange(outchans):
                out[b,outchan] += scipy.signal.convolve2d(x[b,inchan],filt[outchan,inchan][::-1,::-1],mode='full')

    f = cgt.function([], nn.conv2d(cgt.constant(x), cgt.constant(filt), kernelshape=(filtrows,filtcols), pad=(filtrows-1, filtcols-1)))
    out1 = f()
    # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
    np.testing.assert_allclose(out, out1, atol={"single":1e-3,"double":1e-6}[cgt.get_precision()])
コード例 #4
0
ファイル: test_conv.py プロジェクト: ketranm/cgt
 def check_conv(precision):
     cgt.reset_config()
     cgt.set_precision(precision)
     f = cgt.function([], nn.conv2d(cgt.constant(x), cgt.constant(filt), kernelshape=(filtrows,filtcols), pad=(filtrows-1, filtcols-1)))
     out1 = f()
     # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
     np.testing.assert_allclose(out, out1, atol={"single":1e-3,"double":1e-6}[precision])
コード例 #5
0
def convnet_model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden):
    l1a = nn.rectify(nn.conv2d(X, w, kernelshape=(3, 3), pad=(1, 1)))
    l1 = nn.max_pool_2d(l1a, kernelshape=(2, 2), stride=(2, 2))
    l1 = nn.dropout(l1, p_drop_conv)

    l2a = nn.rectify(nn.conv2d(l1, w2, kernelshape=(3, 3), pad=(1, 1)))
    l2 = nn.max_pool_2d(l2a, kernelshape=(2, 2), stride=(2, 2))
    l2 = nn.dropout(l2, p_drop_conv)

    l3a = nn.rectify(nn.conv2d(l2, w3, kernelshape=(3, 3), pad=(1, 1)))
    l3b = nn.max_pool_2d(l3a, kernelshape=(2, 2), stride=(2, 2))
    batchsize, channels, rows, cols = l3b.shape
    l3 = cgt.reshape(l3b, [batchsize, channels * rows * cols])
    l3 = nn.dropout(l3, p_drop_conv)

    l4 = nn.rectify(cgt.dot(l3, w4))
    l4 = nn.dropout(l4, p_drop_hidden)

    pyx = nn.softmax(cgt.dot(l4, w_o))
    return pyx
コード例 #6
0
ファイル: demo_mnist.py プロジェクト: EdsterG/cgt
def convnet_model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden):
    l1a = nn.rectify(nn.conv2d(X, w, kernelshape=(3,3), pad=(1,1)))
    l1 = nn.max_pool_2d(l1a, kernelshape=(2, 2), stride=(2,2))
    l1 = nn.dropout(l1, p_drop_conv)

    l2a = nn.rectify(nn.conv2d(l1, w2, kernelshape=(3,3), pad=(1,1)))
    l2 = nn.max_pool_2d(l2a, kernelshape=(2, 2), stride=(2,2))
    l2 = nn.dropout(l2, p_drop_conv)

    l3a = nn.rectify(nn.conv2d(l2, w3, kernelshape=(3,3), pad=(1,1)))
    l3b = nn.max_pool_2d(l3a, kernelshape=(2, 2), stride=(2,2))
    batchsize,channels,rows,cols = l3b.shape
    l3 = cgt.reshape(l3b, [batchsize, channels*rows*cols])
    l3 = nn.dropout(l3, p_drop_conv)

    l4 = nn.rectify(cgt.dot(l3, w4))
    l4 = nn.dropout(l4, p_drop_hidden)
    
    pyx = nn.softmax(cgt.dot(l4, w_o))
    return pyx
コード例 #7
0
 def check_conv(precision):
     cgt.reset_config()
     cgt.set_precision(precision)
     f = cgt.function([],
                      nn.conv2d(cgt.constant(x),
                                cgt.constant(filt),
                                kernelshape=(filtrows, filtcols),
                                pad=(filtrows - 1, filtcols - 1)))
     out1 = f()
     # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
     np.testing.assert_allclose(out,
                                out1,
                                atol={
                                    "single": 1e-3,
                                    "double": 1e-6
                                }[precision])
コード例 #8
0
def test_conv():
    try:
        import scipy.signal
    except ImportError:
        raise SkipTest("skipping because we don't have ndimage")

    np.random.seed(0)
    x = np.random.randn(2, 2, 5, 17)
    filt = np.random.randn(3, 2, 4, 7)

    filtrows = filt.shape[2]
    filtcols = filt.shape[3]

    batchsize = x.shape[0]
    outchans = filt.shape[0]

    out = np.zeros((batchsize, outchans, x.shape[2] + filtrows - 1,
                    x.shape[3] + filtcols - 1))
    for b in xrange(x.shape[0]):
        for inchan in xrange(x.shape[1]):
            for outchan in xrange(outchans):
                out[b, outchan] += scipy.signal.convolve2d(
                    x[b, inchan],
                    filt[outchan, inchan][::-1, ::-1],
                    mode='full')

    f = cgt.function([],
                     nn.conv2d(cgt.constant(x),
                               cgt.constant(filt),
                               kernelshape=(filtrows, filtcols),
                               pad=(filtrows - 1, filtcols - 1)))
    out1 = f()
    # out1 = cgt.numeric_eval1(nn.conv2d(cgt.constant(x), cgt.constant(f), kersize=(filtrows,filtcols)), {})
    np.testing.assert_allclose(out,
                               out1,
                               atol={
                                   "single": 1e-3,
                                   "double": 1e-6
                               }[cgt.get_precision()])
コード例 #9
0
ファイル: caffe2cgt.py プロジェクト: zxie/cgt
     Wshape = (param.num_output, nchanin, kh, kw)
     Wname = layer.param[0].name or layer.name + ":W"
     Wval = np.empty(Wshape, dtype=cgt.floatX)
     W = name2node[Wname] = cgt.shared(Wval,
                                       name=Wname,
                                       fixed_shape_mask="all")
     bshape = (1, param.num_output, 1, 1)
     bname = layer.param[1].name or layer.name + ":b"
     bval = np.empty(bshape, dtype=cgt.floatX)
     b = name2node[bname] = cgt.shared(bval,
                                       name=bname,
                                       fixed_shape_mask="all")
     sh,sw = (param.stride, param.stride) if param.HasField("stride")\
         else (param.stride_h, param.stride_w)
     output = [
         cgt.broadcast("+", nn.conv2d(X, W, subsample=(sh, sw)), b,
                       "xxxx,1x11")
     ]
 elif layer.type == "Pooling":
     param = layer.pooling_param
     X = inputs[0]
     pool_type = {param.MAX: "max", param.AVE: "mean"}[param.pool]
     height_in, width_in = infer_shape(X)[2:4]
     kernel = (param.kernel_size, param.kernel_size) if param.HasField("kernel_size")\
         else (param.kernel_h, param.kernel_w)
     stride = (param.stride, param.stride) if param.HasField("stride")\
         else (param.stride_h, param.stride_w)
     pad = (param.pad, param.pad) if param.HasField("pad")\
         else (param.pad_h, param.pad_w)
     output = [nn.pool(pool_type, X, stride, kernel, pad)]
 elif layer.type == "InnerProduct":
コード例 #10
0
ファイル: caffe2cgt.py プロジェクト: EdsterG/cgt
     X = inputs[0]
     param = layer.convolution_param
     kh,kw = (param.kernel_size, param.kernel_size) if param.HasField("kernel_size")\
         else (param.kernel_h, param.kernel_w)
     nchanin = infer_shape(X)[0]
     Wshape = (param.num_output, nchanin, kh, kw)
     Wname = layer.param[0].name or layer.name+":W"
     Wval = np.empty(Wshape, dtype=cgt.floatX)
     W = name2node[Wname] = cgt.shared(Wval, name=Wname, fixed_shape_mask="all")
     bshape = (1, param.num_output, 1, 1)
     bname = layer.param[1].name or layer.name+":b"
     bval = np.empty(bshape, dtype=cgt.floatX)
     b = name2node[bname] = cgt.shared(bval, name=bname, fixed_shape_mask="all")
     sh,sw = (param.stride, param.stride) if param.HasField("stride")\
         else (param.stride_h, param.stride_w)
     output = [cgt.broadcast("+",nn.conv2d(X, W, subsample=(sh,sw)), b, "xxxx,1x11")]
 elif layer.type == "Pooling":
     param = layer.pooling_param
     X = inputs[0]
     pool_type = {param.MAX : "max", param.AVE : "mean"}[param.pool]
     height_in,width_in = infer_shape(X)[2:4]
     kernel = (param.kernel_size, param.kernel_size) if param.HasField("kernel_size")\
         else (param.kernel_h, param.kernel_w)
     stride = (param.stride, param.stride) if param.HasField("stride")\
         else (param.stride_h, param.stride_w)
     pad = (param.pad, param.pad) if param.HasField("pad")\
         else (param.pad_h, param.pad_w)
     output = [nn.pool(pool_type, X, stride, kernel, pad)]
 elif layer.type == "InnerProduct":
     X = inputs[0]
     if X.ndim == 4: