コード例 #1
0
def main(n_aggregation, dim_feature, n_epochs, batch_size, eps):
    W = np.random.normal(0, 0.4, [dim_feature, dim_feature])
    A = np.random.normal(0, 0.4, dim_feature)
    b = np.array([0.])
    model = GraphNeuralNetwork(W, A, b, n_aggregation=n_aggregation)
    optimizer = Adam(model)

    dataset = util.get_train_data('../../datasets')
    train_data, valid_data = util.random_split(dataset, train_ratio=0.5)
    print('train_size: %d, valid_size: %d' %
          (len(train_data), len(valid_data)))

    for epoch in range(n_epochs):
        train_loss = util.AverageMeter()
        train_acc = util.AverageMeter()
        for graphs, labels in util.get_shuffled_batches(
                train_data, batch_size):
            grads_flat = 0
            for graph, label in zip(graphs, labels):
                x = np.zeros([len(graph), dim_feature])
                x[:, 0] = 1
                grads_flat += calc_grads(model, graph, x, label,
                                         bce_with_logit, eps) / batch_size

                outputs = model(graph, x)
                train_loss.update(bce_with_logit(outputs, label), 1)
                train_acc.update((sigmoid(outputs) > 0.5) == label, 1)

            optimizer.update(grads_flat)

        valid_loss, valid_acc = test(model, valid_data, dim_feature)
        print(
            'epoch: %d, train_loss: %f, train_acc: %f, valid_loss: %f, vald_acc: %f'
            % (epoch, train_loss.avg, train_acc.avg, valid_loss, valid_acc))
コード例 #2
0
def main():
    n = np.random.randint(2, 20)

    # Generate an undirected graph
    graph = np.random.randint(2, size=[n, n])
    graph = np.tril(graph, -1) + np.tril(graph, -1).T

    label = np.random.randint(2)
    print(graph)
    print(graph.shape)
    print('label', label)

    dim_feature = 10
    x = np.zeros([n, dim_feature])
    x[:, 0] = 1
    W = np.random.normal(0, 0.4, [dim_feature, dim_feature])
    A = np.random.normal(0, 0.4, dim_feature)
    b = np.array([0.])
    model = GraphNeuralNetwork(W, A, b, 2)
    optimizer = SGD(model, lr=0.001)
    for i in range(500):
        grads_flat = calc_grads(model, graph, x, label, lossfunc=bce_with_logit, eps=1e-4)

        outputs = model(graph, x)
        train_loss = bce_with_logit(outputs, label)
        optimizer.update(grads_flat)
        print('step: %d, train_loss: %.15f' % (i, train_loss))
コード例 #3
0
        def test_sample(filename):
            with open(filename, 'r') as o:
                logit = float(o.readline().strip())
                label = int(o.readline().strip())
                ans = float(o.readline().strip())

            self.assertTrue(np.isclose(function.bce_with_logit(logit, label), ans).all(), filename)
コード例 #4
0
def test(model, dataset, dim_feature):
    """Function for model evaluation"""
    acc = util.AverageMeter()
    loss = util.AverageMeter()
    for graph, label in dataset:
        x = np.zeros([len(graph), dim_feature])
        x[:, 0] = 1
        outputs = model(graph, x)
        loss.update(bce_with_logit(outputs, label), 1)
        acc.update((sigmoid(outputs) > 0.5) == label, 1)
    return loss.avg, acc.avg
コード例 #5
0
ファイル: main.py プロジェクト: yoshnary/pfn-intern-task-2019
def main(n_aggregation, dim_feature, n_epochs, batch_size, eps, outputfile):
    W = np.random.normal(0, 0.4, [dim_feature, dim_feature])
    A = np.random.normal(0, 0.4, dim_feature)
    b = np.array([0.])
    model = GraphNeuralNetwork(W, A, b, n_aggregation=n_aggregation)
    optimizer = Adam(model)

    # Training
    train_data = util.get_train_data('../../datasets')
    print('train_size: %d' % len(train_data))
    for epoch in range(n_epochs):
        train_loss = util.AverageMeter()
        train_acc = util.AverageMeter()
        for graphs, labels in util.get_shuffled_batches(
                train_data, batch_size):
            grads_flat = 0
            for graph, label in zip(graphs, labels):
                x = np.zeros([len(graph), dim_feature])
                x[:, 0] = 1
                grads_flat += calc_grads(model, graph, x, label,
                                         bce_with_logit, eps) / batch_size

                outputs = model(graph, x)
                train_loss.update(bce_with_logit(outputs, label), 1)
                train_acc.update((sigmoid(outputs) > 0.5) == label, 1)

            optimizer.update(grads_flat)

        print('epoch: %d, train_loss: %f, train_acc: %f' %
              (epoch, train_loss.avg, train_acc.avg))

    # Prediction
    test_data = util.get_test_data('../../datasets')
    with open(outputfile, 'w') as o:
        for graph in test_data:
            x = np.zeros([len(graph), dim_feature])
            x[:, 0] = 1
            logit = model(graph, x)
            pred = sigmoid(logit) > 0.5
            o.write(str(int(pred[0])) + '\n')