Beispiel #1
0
conv2 = L.Conv(relu1, "conv2", dim_out = 50, kernel = 5)
pool2 = L.Pool(conv2, "pool2", pool = L.Pool.MAX, kernel = 2, stride = 2)
relu2 = L.ReLU(pool2, "relu2")
fc3   = L.FC(relu2, "fc3", dim_out = 500)
relu3 = L.ReLU(fc3, "relu3")
pred  = L.FC(relu3, "pred", dim_out = 10)
loss = L.SoftmaxWithLoss(pred, "loss", label = label)

# Net Instance
net = mobula.Net()

# Set Loss Layer
net.set_loss(loss)

# Set Solver
solver = S.Momentum(gamma = 0.1, stepsize = 1000)
solver.lr_policy = S.LR_POLICY.STEP
net.set_solver(S.Momentum())

# Learning Rate
net.lr = 0.005

'''
If start_iter > 0, load the existed model and continue to train.
Otherwise, initialize weights and start to train. 
'''

start_iter = 0 
max_iter = 100000
filename = RESULT_PATH + "/kaggle%d.net"
if start_iter > 0:
Beispiel #2
0
Y = im.reshape((1, h, w, t)).transpose((0, 3, 1, 2))
X = np.random.random((1, t, h, w)) - 0.5

data, label = L.Data([X, Y])
conv = L.Conv(data, dim_out=42, kernel=3, pad=1)
relu = L.ReLU(conv)
convt = L.ConvT(relu, dim_out=t, kernel=3, pad=1)
relu2 = L.ReLU(convt)
loss = L.MSE(relu2, label=label)

# Net Instance
net = mobula.Net()
# Set Loss Layer
net.set_loss(loss)
# Set Solver
net.set_solver(S.Momentum())

# Learning Rate
net.lr = 2e-6

start_iter = 0
max_iter = 10000
plt.ion()
for i in range(start_iter, max_iter + 1):
    net.forward()
    net.backward()

    if i % 100 == 0:
        print("Iter: %d, Cost: %f" % (i, loss.loss))
        net.time()
        if i % 100 == 0: