Exemplo n.º 1
0
def test_fc():
    X = np.zeros((4, 2, 1, 1))
    X[0, :, 0, 0] = [0., 0.]
    X[1, :, 0, 0] = [0., 1.]
    X[2, :, 0, 0] = [1., 0.]
    X[3, :, 0, 0] = [1., 1.]

    Y = np.array([8., 10., 12., 14.]).reshape((-1, 1))

    data, label = L.Data([X, Y], "Data")()
    fc1 = L.FC(data, "fc1", dim_out=1)
    loss = L.MSE(fc1, "MSE", label=label)

    fc1.reshape()

    fc1.W = np.array([1.0, 3.0]).reshape(fc1.W.shape)
    fc1.b = np.array([0.0]).reshape(fc1.b.shape)

    net = mobula.Net()
    net.set_loss(loss)

    net.lr = 0.5
    for i in range(30):
        net.forward()
        net.backward()
        print("Iter: %d, Cost: %f" % (i, loss.Y))

    # forward one more time, because of the change of weights last backward
    net.forward()
    target = np.dot(X.reshape((4, 2)), fc1.W.T) + fc1.b
    print(target, fc1.Y)
    assert np.allclose(fc1.Y, target)
Exemplo n.º 2
0
def test_fc2():
    X = np.random.random((4, 2, 1, 1))
    Y1 = np.random.random((4, 10))
    Y2 = np.random.random((4, 10))

    [x, y1, y2] = L.Data([X, Y1, Y2])
    fc1 = L.FC(x, dim_out=10)
    fc2 = L.FC(x, dim_out=10)
    loss1 = L.MSE(fc1, label=Y1)
    loss2 = L.MSE(fc2, label=Y2)

    net = mobula.Net()
    loss = L.L1Loss(loss1 + loss2)
    net.set_loss(loss)
    L1Loss = mobula.get_layer("L1Loss")
    Add = mobula.get_layer(L1Loss.model.name)

    net.lr = 0.5
    for i in range(30):
        net.forward()
        net.backward()
        print("Iter: %d, Cost: %f" % (i, loss.Y))
    # check forward
    t1 = np.dot(X.reshape((4, 2)), fc1.W.T) + fc1.b.T
    t2 = np.dot(X.reshape((4, 2)), fc2.W.T) + fc2.b.T
    # forward one more time, because of the change of weights last backward
    net.forward()
    assert np.allclose(fc1.Y, t1)
    assert np.allclose(fc2.Y, t2)
Exemplo n.º 3
0
    def __init__(self, X, labels):

        data, label = L.Data([X, labels], "data", batch_size=100)()
        conv1 = L.Conv(data, "conv1", dim_out=20, kernel=5)
        pool1 = L.Pool(conv1, "pool1", pool=L.Pool.MAX, kernel=2, stride=2)
        conv2 = L.Conv(pool1, "conv2", dim_out=50, kernel=5)
        pool2 = L.Pool(conv2, "pool2", pool=L.Pool.MAX, kernel=2, stride=2)
        fc3 = L.FC(pool2, "fc3", dim_out=500)
        relu3 = L.ReLU(fc3, "relu3")
        pred = L.FC(relu3, "pred", dim_out=10)
        loss = L.SoftmaxWithLoss(pred, "loss", label=label)

        # Net Instance
        self.net = mobula.Net()

        # Set Loss Layer
        self.net.set_loss(loss)
Exemplo n.º 4
0
def test_saver():
    filename = "tmp.net"

    X = np.random.random((4,2,1,1))
    Y = np.random.random((4, 10))
    x, y = L.Data([X, Y])
    fc = L.FC(x, dim_out = 10) 
    with M.name_scope("mobula"): 
        prelu = L.PReLU(fc)
    loss = L.MSE(prelu, label = y)

    net = M.Net()
    net.set_loss(loss)

    init_params(fc)
    init_params(prelu)
    # save mobula
    M.save_scope(filename, "mobula")

    params_f = clear_params(fc)
    params_p = clear_params(prelu)
    for p in fc.params + prelu.params:
        assert np.isnan(p).all()
    M.load_scope(filename)
    for p in fc.params:
        assert np.isnan(p).all()
    for i, p in enumerate(prelu.params):
        assert np.allclose(p, params_p[i])

    init_params(fc)
    init_params(prelu)
    # save all
    M.save_scope(filename)

    params_f = clear_params(fc)
    params_p = clear_params(prelu)

    for p in fc.params + prelu.params:
        assert np.isnan(p).all()
    M.load_scope(filename)
    for i, p in enumerate(fc.params):
        assert np.allclose(p, params_f[i])
    for i, p in enumerate(prelu.params):
        assert np.allclose(p, params_p[i])
    os.remove(filename)
Exemplo n.º 5
0
def test_net_saver():
    filename = "tmp.net"

    X = np.random.random((4,2,1,1))
    Y = np.random.random((4, 10))
    x, y = L.Data([X, Y])
    x = L.FC(x, dim_out = 10) 
    with M.name_scope("mobula"): 
        x = L.PReLU(x)
    loss = L.MSE(x, label = y)

    net = M.Net()
    net.set_loss(loss)
    net.lr = 0.01

    for i in range(10):
        net.forward()
        net.backward()

    net.save(filename)
    # random init layers
    lst = M.get_layers("/")
    assert len(lst) == 4 # Data, FC, PReLU, MSE 

    k = 0
    rec = []
    for l in lst:
        for i in range(len(l.params)):
            rec.append(l.params[i])
            l.params[i][...] = None
            k += 1
    assert k == 3 # FC.W, FC.b, PReLU.a 

    for l in lst:
        for i in range(len(l.params)):
            assert np.isnan(l.params[i]).all()

    net.load(filename)
    h = 0
    for l in lst:
        for i in range(len(l.params)):
            assert np.allclose(rec[h], l.params[i])
            h += 1
    os.remove(filename)
Exemplo n.º 6
0
def test_net():
    X = np.random.random((4, 2, 1, 1))
    Y1 = np.random.random((4, 5))
    Y2 = np.random.random((4, 5))

    [x, y1, y2] = L.Data([X, Y1, Y2])
    fc0 = L.FC(x, dim_out=10)
    fc1 = L.FC(fc0, dim_out=5)
    fc2 = L.FC(fc0, dim_out=5)

    loss1 = L.MSE(fc1, label=y1)
    loss2 = L.MSE(fc2, label=y2)

    net = mobula.Net()
    net.set_loss(loss1 + loss2)

    net.lr = 0.01
    for i in range(10):
        net.forward()
        net.backward()
        net.time()
        print("Iter: %d, Cost: %f" % (i, loss1.Y + loss2.Y))

    assert np.allclose(fc0.dY, fc1.dX + fc2.dX)
Exemplo n.º 7
0
# LeNet-5
data, label = L.Data([X, labels], "data", batch_size = 100)
conv1 = L.Conv(data, "conv1", dim_out = 20, kernel = 5)
pool1 = L.Pool(conv1, "pool1", pool = L.Pool.MAX, kernel = 2, stride = 2)
relu1 = L.ReLU(pool1, "relu1")
conv2 = L.Conv(relu1, "conv2", dim_out = 50, kernel = 5)
pool2 = L.Pool(conv2, "pool2", pool = L.Pool.MAX, kernel = 2, stride = 2)
relu2 = L.ReLU(pool2, "relu2")
fc3   = L.FC(relu2, "fc3", dim_out = 500)
relu3 = L.ReLU(fc3, "relu3")
pred  = L.FC(relu3, "pred", dim_out = 10)
loss = L.SoftmaxWithLoss(pred, "loss", label = label)

# Net Instance
net = mobula.Net()

# Set Loss Layer
net.set_loss(loss)

# Set Solver
solver = S.Momentum(gamma = 0.1, stepsize = 1000)
solver.lr_policy = S.LR_POLICY.STEP
net.set_solver(S.Momentum())

# Learning Rate
net.lr = 0.005

'''
If start_iter > 0, load the existed model and continue to train.
Otherwise, initialize weights and start to train.