예제 #1
0
파일: test_name.py 프로젝트: zxzang/mobula
def test_name():
    a = np.arange(10)
    b = np.zeros((3, 5))
    c = np.arange(1, 11)
    test1 = [str(L.Data(a)), str(L.Data([a, b])), str(L.Data(None))]
    test1_truth = [
        "<Data '/Data' input: (10,) num_output: (1)>",
        "<Data '/Data' input: [(10,), (3, 5)] num_output: (2)>",
        "<Data '/Data' input: None num_output: (0)>"
    ]
    print(test1)
    assert test1 == test1_truth

    test2 = [str(L.ReLU(L.Data(a))), str(L.ReLU(L.FC(L.Data(b), dim_out=10)))]
    test2_truth = [
        "<ReLU '/ReLU' input: /Data:0 num_output: (1)>",
        "<ReLU '/ReLU' input: /FC:0 num_output: (1)>"
    ]
    print(test2)
    assert test2 == test2_truth

    la, lc = L.Data([a, c])
    concat = L.Concat([la, lc], axis=0)
    test3 = [str(concat)]
    test3_truth = [
        "<Concat '/Concat' input: [/Data:0,/Data:1] num_output: (1)>"
    ]
    print(test3)
    assert test3 == test3_truth

    l = L.ReLU(a)
    test4 = [str(l)]
    test4_truth = ["<ReLU '/ReLU' input: (10,) num_output: (1)>"]
    print(test4)
    assert test4 == test4_truth
예제 #2
0
    def __init__(self, X, labels):

        data, label = L.Data([X, labels], "data", batch_size=100)
        conv1 = L.Conv(data, dim_out=20, kernel=5)
        pool1 = L.Pool(conv1, pool=L.Pool.MAX, kernel=2, stride=2)
        relu1 = L.ReLU(pool1)
        conv2 = L.Conv(relu1, dim_out=50, kernel=5)
        pool2 = L.Pool(conv2, pool=L.Pool.MAX, kernel=2, stride=2)
        relu2 = L.ReLU(pool2)
        fc3 = L.FC(relu2, dim_out=500)
        relu3 = L.ReLU(fc3)
        pred = L.FC(relu3, "pred", dim_out=10)
        loss = L.SoftmaxWithLoss(pred, "loss", label=label)

        # Net Instance
        self.net = mobula.Net()

        # Set Loss Layer
        self.net.set_loss(loss)
예제 #3
0
def test_relu():
    l = L.ReLU(data, "ReLU")
    l.reshape()
    assert l.Y.shape == X.shape
    l.forward()
    l.dY = np.random.random(l.Y.shape) * 10
    l.backward()
    Y = np.zeros(X.shape)
    b = (X > 0)
    Y[b] = X[b]
    dX = np.zeros(X.shape)
    dX[b] = l.dY[b]
    '''
    d = (l.dX != dX)
    print (l.dX[d], dX[d])
    '''
    assert np.allclose(l.Y.ravel(), Y.ravel())
    assert np.allclose(l.dX.ravel(), dX.ravel())
예제 #4
0
def test_relu():
    X = ((np.arange(10000) - 5000) / 1000.0).reshape((-1, 1, 1, 1))
    data = L.Data(X, "data")
    data.reshape()
    l = L.ReLU(data)
    l.reshape()
    assert l.Y.shape == X.shape
    l.forward()
    l.dY = np.random.random(l.Y.shape) * 10
    l.backward()
    Y = np.zeros(X.shape)
    b = (X > 0)
    Y[b] = X[b]
    dX = np.zeros(X.shape)
    dX[b] = l.dY[b]
    '''
    d = (l.dX != dX)
    print (l.dX[d], dX[d])
    '''
    assert np.allclose(l.Y.ravel(), Y.ravel())
    assert np.allclose(l.dX.ravel(), dX.ravel())
예제 #5
0
Xmean = np.mean(X, 0)
np.save("xmean.npy", Xmean)

# Subtract mean and normalize
X = (X - Xmean) / 255.0

# transfer the shape of X to NCHW
# N, C, H, W = n, 1, 28, 28
X.resize((n, 1, 28, 28))

# LeNet-5
data, label = L.Data([X, labels], "data", batch_size = 100)
conv1 = L.Conv(data, "conv1", dim_out = 20, kernel = 5)
pool1 = L.Pool(conv1, "pool1", pool = L.Pool.MAX, kernel = 2, stride = 2)
relu1 = L.ReLU(pool1, "relu1")
conv2 = L.Conv(relu1, "conv2", dim_out = 50, kernel = 5)
pool2 = L.Pool(conv2, "pool2", pool = L.Pool.MAX, kernel = 2, stride = 2)
relu2 = L.ReLU(pool2, "relu2")
fc3   = L.FC(relu2, "fc3", dim_out = 500)
relu3 = L.ReLU(fc3, "relu3")
pred  = L.FC(relu3, "pred", dim_out = 10)
loss = L.SoftmaxWithLoss(pred, "loss", label = label)

# Net Instance
net = mobula.Net()

# Set Loss Layer
net.set_loss(loss)

# Set Solver
예제 #6
0
파일: logo.py 프로젝트: zxzang/mobula
im = imread("./mobula.png")

# Reshape
im = imresize(im, target_size)

# TO GRAY
im = im[:, :, 0] * 0.299 + im[:, :, 1] * 0.587 + im[:, :, 2] * 0.114
h, w = im.shape

t = 1
Y = im.reshape((1, h, w, t)).transpose((0, 3, 1, 2))
X = np.random.random((1, t, h, w)) - 0.5

data, label = L.Data([X, Y])
conv = L.Conv(data, dim_out=42, kernel=3, pad=1)
relu = L.ReLU(conv)
convt = L.ConvT(relu, dim_out=t, kernel=3, pad=1)
relu2 = L.ReLU(convt)
loss = L.MSE(relu2, label=label)

# Net Instance
net = mobula.Net()
# Set Loss Layer
net.set_loss(loss)
# Set Solver
net.set_solver(S.Momentum())

# Learning Rate
net.lr = 2e-6

start_iter = 0
예제 #7
0
# Subtract mean and normalize
X = (X - Xmean) / 255.0

# transfer the shape of X to NCHW
# N, C, H, W = n, 1, 28, 28
X.resize((n, 1, 28, 28))

# LeNet-5
data, label = L.Data([X, labels], "data", batch_size = 100)()
conv1 = L.Conv(data, "conv1", dim_out = 20, kernel = 5)
pool1 = L.Pool(conv1, "pool1", pool = L.Pool.MAX, kernel = 2, stride = 2)
conv2 = L.Conv(pool1, "conv2", dim_out = 50, kernel = 5)
pool2 = L.Pool(conv2, "pool2", pool = L.Pool.MAX, kernel = 2, stride = 2)
fc3   = L.FC(pool2, "fc3", dim_out = 500)
relu3 = L.ReLU(fc3, "relu3")
pred  = L.FC(relu3, "pred", dim_out = 10)
loss = L.SoftmaxWithLoss(pred, "loss", label = label)

# Net Instance
net = mobula.Net()

# Set Loss Layer
net.set_loss(loss)

# Set Solver
net.set_solver(S.Momentum())

# Learning Rate
net.lr = 0.2