def build_simple_gcn(num_filters, graph_conv_filters): model = Sequential() model.add( GraphCNN(Y.shape[1], num_filters, graph_conv_filters, input_shape=(X.shape[1], ), activation='elu', kernel_regularizer=l2(5e-4))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['acc']) model.summary() return model
Y = to_categorical( Y_o_dim ) # labels, whatever we wanna classify things into... in categorical form. train_on_weight = np.array([1, 1, 0]) print("Now we won't do any fancy preprocessing, just basic training.") NUM_FILTERS = 1 graph_conv_filters = A # you may try np.eye(3) graph_conv_filters = K.constant(graph_conv_filters) model = Sequential() model.add( GraphCNN(Y.shape[1], NUM_FILTERS, graph_conv_filters, input_shape=(X.shape[1], ), activation='elu', kernel_regularizer=l2(5e-4))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['acc']) model.summary() model.fit(X, Y, batch_size=A.shape[0], sample_weight=train_on_weight, epochs=200, shuffle=False, verbose=0)
# print(X.shape, Y_train.shape) # Build Graph Convolution filters SYM_NORM = True A_norm = preprocess_adj_numpy(A, SYM_NORM) num_filters = 2 graph_conv_filters = np.concatenate([A_norm, np.matmul(A_norm, A_norm)], axis=0) graph_conv_filters = K.constant(graph_conv_filters) # Build Model input_tensor = Input(shape=(X.shape[1], )) x = GraphCNN(16, num_filters, graph_conv_filters, input_shape=(X.shape[1], ), activation='elu', kernel_regularizer=l2(5e-4))(input_tensor) x = Dropout(0.2)(x) x = GraphCNN(16, num_filters, graph_conv_filters, activation='elu', kernel_regularizer=l2(5e-4))(x) print(x.shape) output_tensor = Dense(Y.shape[1], activation='softmax')(x) model = Model(input_tensor, output_tensor) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['acc'])
def load_gnn_model(): X, Y = get_data() A = get_adj_matrix(X) _, Y_val, _, train_idx, val_idx, test_idx, train_mask = get_splits(Y) train_idx = np.array(train_idx) val_idx = np.array(val_idx) test_idx = np.array(test_idx) labels = np.argmax(Y, axis=1) + 1 Y_train = np.zeros(Y.shape) labels_train = np.zeros(labels.shape) Y_train[train_idx] = Y[train_idx] labels_train[train_idx] = labels[train_idx] Y_test = np.zeros(Y.shape) labels_test = np.zeros(labels.shape) Y_test[test_idx] = Y[test_idx] labels_test[test_idx] = labels[test_idx] # Build Graph Convolution filters SYM_NORM = True # A_norm = preprocess_adj_numpy(A, SYM_NORM) A_norm = A num_filters = 2 graph_conv_filters = np.concatenate( [A_norm, np.matmul(A_norm, A_norm)], axis=0) graph_conv_filters = K.constant(graph_conv_filters) # build GAT model input_tensor = Input(shape=(X.shape[1], )) x = Dropout(0.6, input_shape=(X.shape[1], ))(x) model.add( GraphAttentionCNN(8, A, num_filters, graph_conv_filters, num_attention_heads=8, attention_combine='concat', attention_dropout=0.6, activation='elu', kernel_regularizer=l2(5e-4))) model.add(Dropout(0.6)) model.add( GraphAttentionCNN(Y.shape[1], A, num_filters, graph_conv_filters, num_attention_heads=1, attention_combine='average', attention_dropout=0.6, activation='elu', kernel_regularizer=l2(5e-4))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.005), metrics=['accuracy']) # Build Model input_tensor = Input(shape=(X.shape[1], )) x = GraphCNN(16, num_filters, graph_conv_filters, input_shape=(X.shape[1], ), activation='elu', kernel_regularizer=l2(5e-4))(input_tensor) x = Dropout(0.2)(x) x = GraphCNN(16, num_filters, graph_conv_filters, activation='elu', kernel_regularizer=l2(5e-4), name='g_out')(x) output_tensor = Dense(Y.shape[1], activation='softmax', name='sub_out')(x) model = Model(input_tensor, output_tensor) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['acc']) model.summary() # load model.load_weights('gnn.h5') # # Y_pred = model.predict(X, batch_size=A.shape[0]) sub_model = Model(inputs=model.input, outputs=model.get_layer('g_out').output) # result = sub_model.predict(X, batch_size=A.shape[0]) # print(result) return sub_model
# ## The model itself # + {"outputId": "a3142dd2-4ff0-4bb6-a833-a7046f4e0596", "colab_type": "code", "id": "3lOBizVa4rVt", "colab": {"base_uri": "https://localhost:8080/", "height": 1295}} A_norm = normalize_adj_numpy(A, True) num_filters = 2 graph_conv_filters = np.concatenate([A_norm, np.matmul(A_norm, A_norm)], axis=0) print graph_conv_filters.shape graph_conv_filters = K.constant(graph_conv_filters) # Build Model inp = Input(shape=(X.shape[1], )) x = GraphCNN(16, num_filters, graph_conv_filters, activation='elu', kernel_regularizer=l2(5e-4))(inp) x = Dropout(0.2)(x) x = GraphCNN(Y_train.shape[1], num_filters, graph_conv_filters, activation='elu', kernel_regularizer=l2(5e-4))(x) x = Activation('softmax')(x) model = Model(inputs=inp, outputs=x) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['acc']) model.summary()
Y_test = np.zeros(Y.shape) labels_test = np.zeros(labels.shape) Y_test[test_idx] = Y[test_idx] labels_test[test_idx] = labels[test_idx] # Build Graph Convolution filters SYM_NORM = True A_norm = preprocess_adj_numpy(A, SYM_NORM) num_filters = 2 graph_conv_filters = np.concatenate([A_norm, np.matmul(A_norm, A_norm)], axis=0) graph_conv_filters = K.constant(graph_conv_filters) # Build Model model = Sequential() model.add(GraphCNN(16, num_filters, graph_conv_filters, input_shape=(X.shape[1],), activation='elu', kernel_regularizer=l2(5e-4))) model.add(Dropout(0.2)) model.add(GraphCNN(Y.shape[1], num_filters, graph_conv_filters, activation='elu', kernel_regularizer=l2(5e-4))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['acc']) nb_epochs = 500 start_time = time.time() # START: Training Time Tracker for epoch in range(nb_epochs): model.fit(X, Y_train, sample_weight=train_mask, batch_size=A.shape[0], epochs=1, shuffle=False, verbose=0) Y_pred = model.predict(X, batch_size=A.shape[0]) Y_pred_proba = model.predict_proba(X, batch_size=A.shape[0]) _, train_acc = evaluate_preds(Y_pred, [Y_train], [train_idx]) _, test_acc = evaluate_preds(Y_pred, [Y_test], [test_idx]) print("Epoch: {:04d}".format(epoch), "train_acc= {:.4f}".format(train_acc[0]), "test_acc= {:.4f}".format(test_acc[0]))