Esempio n. 1
0
 def test_update(self):
     sgd = SGD()
     gnn = GraphNeuralNetwork(vector_size=2)
     expected = gnn.params
     sgd.update(gnn)
     actual = gnn.params
     self.assertEqual(expected, actual)
     params = copy.deepcopy(gnn.params)
     for _ in range(100):
         gnn.grads["W"] = np.random.rand()
         gnn.grads["A"] = np.random.rand()
         gnn.grads["b"] = np.random.rand()
         sgd.update(gnn)
         for key, param in params.items():
             params[key] = param - gnn.grads[key] * sgd.lr
             expected = repr(params[key])
             actual = repr(gnn.params[key])
             self.assertEqual(expected, actual)
Esempio n. 2
0
from gnn import GraphNeuralNetwork
from sgd import SGD

if __name__ == "__main__":
    gnn = GraphNeuralNetwork(8, 0)
    sgd = SGD()
    graph = [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
             [1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
             [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
             [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
             [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
             [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
             [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
             [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
             [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
             [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
             [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0]]
    vertex_size = 11
    label = 1
    loss = []
    print("A")
    for i in range(0, 50):
        loss.append(gnn.loss(graph, vertex_size, label))
        gnn.backward(graph, vertex_size, label)
        sgd.update(gnn)
    loss.append(gnn.loss(graph, vertex_size, label))

    print(f"最初の損失:{loss[0]}, 最後の損失:{loss[-1]}")
    plt.plot(np.arange(len(loss)), loss)
    plt.show()
Esempio n. 3
0
offsets = [i * jump for i in range(batch_size)]
for epoch in range(max_epoch):
    for iter in range(max_iters):
        # 2. Get mini bathes
        batch_x = np.empty((batch_size, time_size), dtype='i')
        batch_t = np.empty((batch_size, time_size), dtype='i')
        for t in range(time_size):
            for i, offset in enumerate(offsets):
                batch_x[i, t] = xs[(offset + time_index) % data_size]
                batch_t[i, t] = ts[(offset + time_index) % data_size]
            time_index += 1

        # Calculate gradients and update parameters
        loss = model.forward(batch_x, batch_t)
        model.backward()
        optimiser.update(model.params, model.grads)
        total_loss += loss
        loss_count += 1

    # 3. Eveluate perplexity in each epoch
    ppl = np.exp(total_loss / loss_count)
    print('Epoch %d | Perplexity %.2f' % (epoch + 1, ppl))
    ppl_list.append(float(ppl))
    total_loss = 0
    loss_count = 0

x = np.arange(len(ppl_list))
plt.figure()
plt.plot(x, ppl_list, label='train')
plt.xlabel('Epochs')
plt.ylabel('Perplexity')