예제 #1
0
def main(n_aggregation, dim_feature, n_epochs, batch_size, eps):
    W = np.random.normal(0, 0.4, [dim_feature, dim_feature])
    A = np.random.normal(0, 0.4, dim_feature)
    b = np.array([0.])
    model = GraphNeuralNetwork(W, A, b, n_aggregation=n_aggregation)
    optimizer = Adam(model)

    dataset = util.get_train_data('../../datasets')
    train_data, valid_data = util.random_split(dataset, train_ratio=0.5)
    print('train_size: %d, valid_size: %d' %
          (len(train_data), len(valid_data)))

    for epoch in range(n_epochs):
        train_loss = util.AverageMeter()
        train_acc = util.AverageMeter()
        for graphs, labels in util.get_shuffled_batches(
                train_data, batch_size):
            grads_flat = 0
            for graph, label in zip(graphs, labels):
                x = np.zeros([len(graph), dim_feature])
                x[:, 0] = 1
                grads_flat += calc_grads(model, graph, x, label,
                                         bce_with_logit, eps) / batch_size

                outputs = model(graph, x)
                train_loss.update(bce_with_logit(outputs, label), 1)
                train_acc.update((sigmoid(outputs) > 0.5) == label, 1)

            optimizer.update(grads_flat)

        valid_loss, valid_acc = test(model, valid_data, dim_feature)
        print(
            'epoch: %d, train_loss: %f, train_acc: %f, valid_loss: %f, vald_acc: %f'
            % (epoch, train_loss.avg, train_acc.avg, valid_loss, valid_acc))
예제 #2
0
def train():
    graphs, labels = load_data("datasets/train")
    train_inputs, train_targets, val_inputs, val_targets = utils.split_train_val(
        graphs, labels, val_rate=0.3)

    model = GNNModel(8)
    loss_func = BinaryCrossEntropy()
    optimizer = Adam()
    batch_generator = utils.BatchGenerator(batch_size=32)

    min_loss = 100000
    for epoch in range(50):
        print(f"Epoch{epoch + 1}")

        train_losses = []
        for inputs, targets in batch_generator.generator(
                train_inputs, train_targets):
            train_loss, loss_grad = loss_func(model,
                                              inputs,
                                              targets,
                                              is_grad=True)
            optimizer.update(model, loss_grad)

            train_losses.append(train_loss)

        train_mean_loss = np.mean(train_losses)
        pred = np.array([model.predict(input_)
                         for input_ in train_inputs]).squeeze()
        train_accuracy = accuracy(pred, train_targets)

        val_losses = []
        for inputs, targets in batch_generator.generator(
                val_inputs, val_targets):
            val_loss, _ = loss_func(model, inputs, targets, is_grad=False)
            val_losses.append(val_loss)

        val_mean_loss = np.mean(val_losses)
        pred = np.array([model.predict(input_)
                         for input_ in val_inputs]).squeeze()
        val_accuracy = accuracy(pred, val_targets)

        if min(min_loss, val_mean_loss) < min_loss:
            min_loss = val_mean_loss
            print(
                f"Train loss: {train_mean_loss}\tTrain accuracy: {train_accuracy}"
            )
            print(
                f"Validation loss: {val_mean_loss}\tValidation accuracy: {val_accuracy}"
            )
            print("")
예제 #3
0
def main(n_aggregation, dim_feature, n_epochs, batch_size, eps, outputfile):
    W = np.random.normal(0, 0.4, [dim_feature, dim_feature])
    A = np.random.normal(0, 0.4, dim_feature)
    b = np.array([0.])
    model = GraphNeuralNetwork(W, A, b, n_aggregation=n_aggregation)
    optimizer = Adam(model)

    # Training
    train_data = util.get_train_data('../../datasets')
    print('train_size: %d' % len(train_data))
    for epoch in range(n_epochs):
        train_loss = util.AverageMeter()
        train_acc = util.AverageMeter()
        for graphs, labels in util.get_shuffled_batches(
                train_data, batch_size):
            grads_flat = 0
            for graph, label in zip(graphs, labels):
                x = np.zeros([len(graph), dim_feature])
                x[:, 0] = 1
                grads_flat += calc_grads(model, graph, x, label,
                                         bce_with_logit, eps) / batch_size

                outputs = model(graph, x)
                train_loss.update(bce_with_logit(outputs, label), 1)
                train_acc.update((sigmoid(outputs) > 0.5) == label, 1)

            optimizer.update(grads_flat)

        print('epoch: %d, train_loss: %f, train_acc: %f' %
              (epoch, train_loss.avg, train_acc.avg))

    # Prediction
    test_data = util.get_test_data('../../datasets')
    with open(outputfile, 'w') as o:
        for graph in test_data:
            x = np.zeros([len(graph), dim_feature])
            x[:, 0] = 1
            logit = model(graph, x)
            pred = sigmoid(logit) > 0.5
            o.write(str(int(pred[0])) + '\n')