transforms=[LayerPreprocess(GCNConv),
                           AdjToSpTensor()])
graph = dataset[0]
x, a, y = graph.x, graph.a, graph.y
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te

model = GCN(n_labels=dataset.n_labels)
optimizer = Adam(lr=1e-2)
loss_fn = CategoricalCrossentropy()


# Training step
@tf.function
def train():
    with tf.GradientTape() as tape:
        predictions = model([x, a], training=True)
        loss = loss_fn(y[mask_tr], predictions[mask_tr])
        loss += sum(model.losses)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    return loss


# Time the execution of 200 epochs of training
train()  # Warm up to ignore tracing times when timing
tic()
for epoch in range(1, 201):
    loss = train()
toc("Spektral - GCN (200 epochs)")
print(f"Final loss = {loss}")
Esempio n. 2
0
fltr_in = Input((X.shape[0],), sparse=True)
X_1 = GraphConv(16, 'relu', True, kernel_regularizer=l2(5e-4))([X_in, fltr_in])
X_1 = Dropout(0.5)(X_1)
X_2 = GraphConv(y.shape[1], 'softmax', True)([X_1, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=X_2)
optimizer = Adam(lr=1e-2)
model.compile(optimizer=optimizer, loss='categorical_crossentropy')
loss_fn = model.loss_functions[0]


# Training step
@tf.function
def train():
    with tf.GradientTape() as tape:
        predictions = model([X, fltr], training=True)
        loss = loss_fn(y[train_mask], predictions[train_mask])
        loss += sum(model.losses)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    return loss


# Time the execution of 200 epochs of training
train()  # Warm up to ignore tracing times when timing
tic()
for epoch in range(1, 201):
    train()
toc('Spektral - GCN (200 epochs)')
Esempio n. 3
0
        loss += sum(model.losses)
        losses.append(loss)
        acc = tf.reduce_mean(categorical_accuracy(y[mask], predictions[mask]))
        accuracies.append(acc)
    return losses, accuracies


best_val_loss = 99999
best_test_acc = 0
current_patience = patience = 100
epochs = 999999
tic()
for epoch in range(1, epochs + 1):
    train()
    l, a = evaluate()
    print("Loss tr: {:.4f}, Acc tr: {:.4f}, "
          "Loss va: {:.4f}, Acc va: {:.4f}, "
          "Loss te: {:.4f}, Acc te: {:.4f}".format(l[0], a[0], l[1], a[1],
                                                   l[2], a[2]))
    if l[1] < best_val_loss:
        best_val_loss = l[1]
        best_test_acc = a[2]
        current_patience = patience
        print("Improved")
    else:
        current_patience -= 1
        if current_patience == 0:
            print("Test accuracy: {}".format(best_test_acc))
            break
toc("GAT ({} epochs)".format(epoch))
Esempio n. 4
0
        loss = loss_fn(y[mask], predictions[mask])
        loss += sum(model.losses)
        losses.append(loss)
        acc = tf.reduce_mean(categorical_accuracy(y[mask], predictions[mask]))
        accuracies.append(acc)
    return losses, accuracies


best_val_loss = 99999
best_test_acc = 0
current_patience = patience = 100
tic()
for epoch in range(1, 99999):
    train()
    l, a = test()
    print('Loss tr: {:.4f}, Acc tr: {:.4f}, '
          'Loss va: {:.4f}, Acc va: {:.4f}, '
          'Loss te: {:.4f}, Acc te: {:.4f}'.format(l[0], a[0], l[1], a[1],
                                                   l[2], a[2]))
    if l[1] < best_val_loss:
        best_val_loss = l[1]
        best_test_acc = a[2]
        current_patience = patience
        print('Improved')
    else:
        current_patience -= 1
        if current_patience == 0:
            print('Best test acc: {}'.format(best_test_acc))
            break
toc('GAT ({} epochs)'.format(epoch))