def test_loss(self): gnn = GraphNeuralNetwork(vector_size=3) gnn.params["W"] = -np.arange(1, 10).reshape(3, 3) gnn.params["b"] = np.array([-100]) graphs = [[[0, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1], [0, 1, 1, 0]] ] * 10 vertex_sizes = [4] * 10 labels = [1] * 10 expected = "array([100.])" actual = repr(gnn.loss(graphs, vertex_sizes, labels)) self.assertEqual(expected, actual)
def test_loss(self): gnn = GraphNeuralNetwork(vector_size=2) gnn.params["W"] = -np.arange(1, 5).reshape(2, 2) gnn.params["b"] = np.array([0]) vertex_size = 4 graph = [[0, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1], [0, 1, 1, 0]] label = [0, 1] params = [[0], [-100], [100]] expecteds = [[0.6931471805599453]*2, [0., 100.], [100., 0.]] for param, expected in zip(params, expecteds): gnn.params["b"] = np.array(param) actual1 = gnn.loss(graph, vertex_size, label[0]) self.assertEqual(expected[0], actual1) actual2 = gnn.loss(graph, vertex_size, label[1]) self.assertEqual(expected[1], actual2)
from gnn import GraphNeuralNetwork from sgd import SGD if __name__ == "__main__": gnn = GraphNeuralNetwork(8, 0) sgd = SGD() graph = [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0]] vertex_size = 11 label = 1 loss = [] print("A") for i in range(0, 50): loss.append(gnn.loss(graph, vertex_size, label)) gnn.backward(graph, vertex_size, label) sgd.update(gnn) loss.append(gnn.loss(graph, vertex_size, label)) print(f"最初の損失:{loss[0]}, 最後の損失:{loss[-1]}") plt.plot(np.arange(len(loss)), loss) plt.show()
import sys sys.path.append("src/") from gnn import GraphNeuralNetwork from optimizer import SGD from train import read_graph, read_label if __name__ == "__main__": graph = read_graph(0) label = read_label(0) print("label: {}".format(label)) gnn = GraphNeuralNetwork(optimizer=SGD()) for i in range(1000): print("[{}th iteration] loss: {}, p: {}".format( i, gnn.loss(graph, label)[0], gnn._get_p(graph)[0], )) gnn.gradient_descent([(graph, label)])