Exemple #1
0
 def test_backward(self):
     gnn = GraphNeuralNetwork(vector_size=2)
     gnn2 = copy.deepcopy(gnn)
     graphs = [[[0, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1], [0, 1, 1, 0]]
               ] * 10
     vertex_sizes = [4] * 10
     labels = [1] * 10
     step_size = 4
     epsilon = 1.0e-4
     params = gnn.params
     gnn.backward(graphs, vertex_sizes, labels, step_size, epsilon)
     for key, param in params.items():
         expected = repr(
             gnn2.gradient(key, graphs, vertex_sizes, labels, step_size,
                           epsilon))
         actual = repr(gnn.grads[key])
         self.assertEqual(expected, actual)
Exemple #2
0
from gnn import GraphNeuralNetwork
from sgd import SGD

if __name__ == "__main__":
    gnn = GraphNeuralNetwork(8, 0)
    sgd = SGD()
    graph = [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
             [1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
             [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
             [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
             [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
             [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
             [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
             [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
             [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
             [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
             [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0]]
    vertex_size = 11
    label = 1
    loss = []
    print("A")
    for i in range(0, 50):
        loss.append(gnn.loss(graph, vertex_size, label))
        gnn.backward(graph, vertex_size, label)
        sgd.update(gnn)
    loss.append(gnn.loss(graph, vertex_size, label))

    print(f"最初の損失:{loss[0]}, 最後の損失:{loss[-1]}")
    plt.plot(np.arange(len(loss)), loss)
    plt.show()