def main(args): # load and preprocess dataset data = load_data(args) if args.gpu < 0: device = "/cpu:0" else: device = "/gpu:{}".format(args.gpu) with tf.device(device): features = tf.convert_to_tensor(data.features, dtype=tf.float32) labels = tf.convert_to_tensor(data.labels, dtype=tf.int64) train_mask = tf.convert_to_tensor(data.train_mask, dtype=tf.bool) val_mask = tf.convert_to_tensor(data.val_mask, dtype=tf.bool) test_mask = tf.convert_to_tensor(data.test_mask, dtype=tf.bool) num_feats = features.shape[1] n_classes = data.num_labels n_edges = data.graph.number_of_edges() print("""----Data statistics------' #Edges %d #Classes %d #Train samples %d #Val samples %d #Test samples %d""" % (n_edges, n_classes, train_mask.numpy().sum(), val_mask.numpy().sum(), test_mask.numpy().sum())) g = data.graph # add self loop g.remove_edges_from(nx.selfloop_edges(g)) g = DGLGraph(g) g.add_edges(g.nodes(), g.nodes()) n_edges = g.number_of_edges() # create model heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads] model = GAT(g, args.num_layers, num_feats, args.num_hidden, n_classes, heads, tf.nn.elu, args.in_drop, args.attn_drop, args.negative_slope, args.residual) print(model) if args.early_stop: stopper = EarlyStopping(patience=100) # loss_fcn = tf.keras.losses.SparseCategoricalCrossentropy( # from_logits=False) loss_fcn = tf.nn.sparse_softmax_cross_entropy_with_logits # use optimizer optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr, epsilon=1e-8) # initialize graph dur = [] for epoch in range(args.epochs): if epoch >= 3: t0 = time.time() # forward with tf.GradientTape() as tape: tape.watch(model.trainable_weights) logits = model(features, training=True) loss_value = tf.reduce_mean( loss_fcn(labels=labels[train_mask], logits=logits[train_mask])) # Manually Weight Decay # We found Tensorflow has a different implementation on weight decay # of Adam(W) optimizer with PyTorch. And this results in worse results. # Manually adding weights to the loss to do weight decay solves this problem. for weight in model.trainable_weights: loss_value = loss_value + \ args.weight_decay*tf.nn.l2_loss(weight) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights)) if epoch >= 3: dur.append(time.time() - t0) train_acc = accuracy(logits[train_mask], labels[train_mask]) if args.fastmode: val_acc = accuracy(logits[val_mask], labels[val_mask]) else: val_acc = evaluate(model, features, labels, val_mask) if args.early_stop: if stopper.step(val_acc, model): break print( "Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | TrainAcc {:.4f} |" " ValAcc {:.4f} | ETputs(KTEPS) {:.2f}".format( epoch, np.mean(dur), loss_value.numpy().item(), train_acc, val_acc, n_edges / np.mean(dur) / 1000)) print() if args.early_stop: model.load_weights('es_checkpoint.pb') acc = evaluate(model, features, labels, test_mask) print("Test Accuracy {:.4f}".format(acc))
weighted_metrics=['categorical_crossentropy', 'acc']) model_input = [features, A.toarray()] val_data = (model_input, y_val, val_mask) mc_callback = ModelCheckpoint('./best_model.h5', monitor='val_weighted_categorical_crossentropy', save_best_only=True, save_weights_only=True) print("start training") model.fit(model_input, y_train, sample_weight=train_mask, validation_data=val_data, batch_size=A.shape[0], epochs=200, shuffle=False, verbose=2, callbacks=[mc_callback]) # test model.load_weights('./best_model.h5') eval_results = model.evaluate( model_input, y_test, sample_weight=test_mask, batch_size=A.shape[0]) print('Done.\n' 'Test loss: {}\n' 'Test weighted_loss: {}\n' 'Test accuracy: {}'.format(*eval_results)) gcn_embedding = model.layers[-1] embedding_model = Model(model.input, outputs=Lambda(lambda x: gcn_embedding.output)(model.input)) embedding_weights = embedding_model.predict(model_input, batch_size=A.shape[0]) y = np.genfromtxt("{}{}.content".format('../data/cora/', 'cora'), dtype=np.dtype(str))[:, -1] plot_embeddings(embedding_weights, np.arange(A.shape[0]), y)