Exemple #1
0
def make_test_graph():
    x_tsr = np.arange(4).reshape([2, 2, 1]).astype(np.float)
    w_tsr = np.arange(6).reshape([3, 2]).astype(np.float)
    b_tsr = np.arange(3).reshape([3, 1]).astype(np.float)
    ylabel_tsr = np.arange(6).reshape([2, 3, 1]).astype(np.float)

    x = dg.identity(x_tsr, name='x')
    w = dg.identity(w_tsr, name='w')
    b = dg.identity(b_tsr, name='b')
    y = dg.mat_add(dg.mat_mul(w, x, 'mat_mul'), b, name='y')
    y_label = dg.identity(ylabel_tsr, name='ylabel')
    w_l2 = dg.l2(w, name='w_l2')

    loss = dg.mse(y_label, y, name='MSEloss')
    loss_reg = dg.mat_add(w_l2, loss, name='loss_reg')

    print('Constructed the following graph:')

    for n in dg.forward_iter(loss_reg):
        print(n.name)

    return [x, w, b, y_label, loss, loss_reg]
Exemple #2
0
print('type(x_data):', type(x_data))
print('x_data.shape:', x_data.shape)
print('type(y_data):', type(y_data))
print('y_data.shape:', y_data.shape)

print()

img_in = dg.identity(x_data)
y_label = dg.identity(y_data)
conv1 = dg.cnn.conv(img_in, 3, 6)
relu1 = dg.relu(conv1)
pool1 = dg.cnn.max_pool(relu1, 3, stride=1)
print('pool1.shape:', pool1.shape())
fl = dg.reshape(pool1, (15, 3456, 1))
print('fl.shape:', fl.shape())
w = dg.identity(np.random.randn(10, 3456))
print('w shape:', w.shape())
fc = dg.mat_mul(w, fl)
print('fc.shape:', fc.shape())
b = dg.identity(np.random.randn(10, 1))
out = dg.mat_add(fc, b)
print('out.shape:', out.shape())
loss = dg.softmax_cross(out, y_label)

lr = 0.001
sgd_optim = dg.optim.SGD(loss, [w, b] + conv1.op.params(), lr)
epoch = 50
for i in range(epoch):
    sgd_optim.step()
    print(loss.data())
Exemple #3
0
                                         shuffle=False,
                                         num_workers=2)

# In[8]:

w1_stdev = 0.1
w2_stdev = 0.01

x = dg.identity(np.random.randn(batch_size, 784, 1), name='x')
w_1 = dg.identity(np.random.randn(500, 784) * w1_stdev, name='w_1')
b_1 = dg.identity(np.random.randn(500, 1), name='b_1')
w_2 = dg.identity(np.random.randn(10, 500) * w2_stdev, name='w_2')
b_2 = dg.identity(np.random.randn(10, 1), name='b_2')
y_label = dg.identity(np.random.randn(batch_size, 10, 1), name='ylabel')

y_1 = dg.mat_add(dg.mat_mul(w_1, x, 'mat_mul_1'), b_1, name='y_1')
y_2 = dg.mat_add(dg.mat_mul(w_2, y_1, 'mat_mul_2'), b_2, name='y_2')

loss = dg.mse(y_label, y_2, name='MSEloss')

# In[26]:

sgd_optim = dg.optim.SGD(loss, [w_1, w_2, b_1, b_2], 0.0001)
train_iter = iter(trainloader)
for i in range(1000):
    inputs, labels = train_iter.next()
    inputs = inputs.reshape([batch_size, 784, 1]).numpy()
    labels = label2onehot(labels)
    sgd_optim.step({x: inputs, y_label: labels})
    if i % 100 == 0:
        print('Loss after training {} times: {}'.format(i + 1, loss.data()))