def test_predict_proba(self): pickle_fname = 'abp_CV_fold_1_tlXlY_trn.pkl' gcn_graph = GCNDataset.load_transkribus_pickle(pickle_fname) gcn_graph_train = [gcn_graph[8], gcn_graph[18], gcn_graph[29]] node_dim = gcn_graph[0].X.shape[1] edge_dim = gcn_graph[0].E.shape[1] - 2.0 nb_class = gcn_graph[0].Y.shape[1] gcn_model = GraphAttNet(node_dim, nb_class, num_layers=1, learning_rate=0.01, node_indim=-1, nb_attention=3) gcn_model.dropout_rate_node = 0.2 gcn_model.dropout_rate_attention = 0.2 gcn_model.create_model() with tf.Session() as session: session.run([gcn_model.init]) # Get the Test Prediction gcn_model.train_lG(session, gcn_graph) g_proba = gcn_model.prediction_prob(session, gcn_graph_train[1]) print(g_proba.shape) print(type(g_proba)) print(gcn_graph_train[1].X.shape) self.assertTrue(g_proba.shape == (gcn_graph_train[1].X.shape[0], 5))
def test_graphattnet_train_dropout(self): pickle_fname = 'abp_CV_fold_1_tlXlY_trn.pkl' gcn_graph = GCNDataset.load_transkribus_pickle(pickle_fname) gcn_graph_train = [gcn_graph[8], gcn_graph[18], gcn_graph[29]] node_dim = gcn_graph[0].X.shape[1] edge_dim = gcn_graph[0].E.shape[1] - 2.0 nb_class = gcn_graph[0].Y.shape[1] gcn_model = GraphAttNet(node_dim, nb_class, num_layers=1, learning_rate=0.01, node_indim=-1, nb_attention=3) gcn_model.dropout_rate_node = 0.2 gcn_model.dropout_rate_attention = 0.2 gcn_model.create_model() with tf.Session() as session: session.run([gcn_model.init]) # Get the Test Prediction g_acc, node_acc = gcn_model.test_lG(session, gcn_graph_train) print('Mean Accuracy', g_acc, node_acc) gcn_model.train_lG(session, gcn_graph) g_acc, node_acc = gcn_model.test_lG(session, gcn_graph_train) print('Mean Accuracy', g_acc, node_acc)
def test_train_ensemble_NN_model(self): #TODO Make a proper synthetic dataset for test Purpose pickle_fname = 'abp_CV_fold_1_tlXlY_trn.pkl' gcn_graph = GCNDataset.load_transkribus_pickle(pickle_fname) gcn_graph_train = [gcn_graph[8], gcn_graph[18], gcn_graph[29]] node_dim = gcn_graph[0].X.shape[1] edge_dim = gcn_graph[0].E.shape[1] - 2.0 nb_class = gcn_graph[0].Y.shape[1] gat_model = GraphAttNet(node_dim, nb_class, num_layers=1, learning_rate=0.01, node_indim=-1, nb_attention=3) gat_model.dropout_rate_node = 0.2 gat_model.dropout_rate_attention = 0.2 gat_model.create_model() nb_layers = 3 lr = 0.001 nb_conv = 2 ecn_model = EdgeConvNet( node_dim, edge_dim, nb_class, num_layers=nb_layers, learning_rate=lr, mu=0.0, node_indim=-1, nconv_edge=nb_conv, ) ecn_model.create_model() #Check Graphs #Are we recopying the models and graph definition implicitly ? ensemble = EnsembleGraphNN([ecn_model, gat_model]) with tf.Session() as session: session.run([ensemble.models[0].init]) for iter in range(500): ensemble.train_lG(session, gcn_graph_train) prediction = ensemble.predict_lG(session, gcn_graph_train) print(prediction) self.assertTrue(len(prediction) == len(gcn_graph_train)) print('Ensemble Prediction') accs = ensemble.test_lG(session, gcn_graph_train) print('Base Predictions') for m in ensemble.models: accs = m.test_lG(session, gcn_graph_train) print(accs) print(accs)
def test_dense_attn_layer(self): gcn_graph = get_graph_test() node_dim = gcn_graph.X.shape[1] edge_dim = gcn_graph.E.shape[1] - 2.0 nb_class = gcn_graph.Y.shape[1] gcn_model = GraphAttNet(node_dim, nb_class, num_layers=1, learning_rate=0.01, node_indim=8, nb_attention=1) gcn_model.create_model() Wa = tf.eye(node_dim) va = tf.ones([2, node_dim]) # elf.Ssparse, self.Tspars alphas, nH = gcn_model.dense_graph_attention_layer( gcn_model.node_input, Wa, va, gcn_model.nb_node, gcn_model.dropout_p_attn, gcn_model.dropout_p_node) alphas_shape = tf.shape(alphas) init = tf.global_variables_initializer() graph = gcn_graph with tf.Session() as session: session.run([init]) print('### Graph', graph.X.shape, graph.F.shape[0]) # print(graph.Sind) # print(graph.Tind) nb_node = graph.X.shape[0] Aind = np.array(np.stack([graph.Sind[:, 0], graph.Tind[:, 1]], axis=-1), dtype='int64') print("Adjacency Indices:", Aind.shape, Aind) feed_batch = { gcn_model.nb_node: graph.X.shape[0], gcn_model.nb_edge: graph.F.shape[0], gcn_model.node_input: graph.X, gcn_model.Ssparse: np.array(graph.Sind, dtype='int64'), gcn_model.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'), gcn_model.Tsparse: np.array(graph.Tind, dtype='int64'), gcn_model.Aind: Aind, # self.F: graph.F, gcn_model.y_input: graph.Y, # self.dropout_p_H: self.dropout_rate_H, gcn_model.dropout_p_node: 0.0, gcn_model.dropout_p_attn: 0.0, } [c_alphas, c_nH, c_alphas_shape] = session.run([alphas, nH, alphas_shape], feed_dict=feed_batch) print('alphas', c_alphas, c_alphas_shape) #sp_mat = sp.coo_matrix((c_alphas.data, (c_alphas.indices[:, 0], c_alphas.indices[:, 1])), # shape=(nb_node, nb_node)) Att_dense = c_alphas print(Att_dense) #TODO Update True Value self.assertTrue(c_alphas_shape[0] == 3) self.assertTrue(c_alphas_shape[1] == 3)
def test_graphattnet_attnlayer_selfloop(self): gcn_graph = get_graph_test() node_dim = gcn_graph.X.shape[1] edge_dim = gcn_graph.E.shape[1] - 2.0 nb_class = gcn_graph.Y.shape[1] gcn_model = GraphAttNet(node_dim, nb_class, num_layers=1, learning_rate=0.01, node_indim=8, nb_attention=1) gcn_model.create_model() Wa = tf.eye(node_dim) va = tf.ones([2, node_dim]) # elf.Ssparse, self.Tspars alphas, nH = gcn_model.simple_graph_attention_layer( gcn_model.node_input, Wa, va, gcn_model.Ssparse, gcn_model.Tsparse, gcn_model.Aind, gcn_model.Sshape, gcn_model.nb_edge, gcn_model.dropout_p_attn, gcn_model.dropout_p_node, add_self_loop=True) alphas_shape = tf.shape(alphas) node_indices = tf.range(gcn_model.Sshape[0]) # Sparse Idendity # Debug id_indices = tf.stack([node_indices, node_indices], axis=1) val = tf.squeeze(tf.matmul(gcn_model.node_input, va, transpose_b=True)) spI = tf.SparseTensor( indices=id_indices, values=val, dense_shape=[gcn_model.Sshape[0], gcn_model.Sshape[0]]) init = tf.global_variables_initializer() #AI=tf.sparse_add(alphas,spI) graph = gcn_graph with tf.Session() as session: session.run([init]) print('### Graph', graph.X.shape, graph.F.shape[0]) # print(graph.Sind) # print(graph.Tind) nb_node = graph.X.shape[0] Aind = np.array(np.stack([graph.Sind[:, 0], graph.Tind[:, 1]], axis=-1), dtype='int64') print("Adjacency Indices:", Aind.shape, Aind) feed_batch = { gcn_model.nb_node: graph.X.shape[0], gcn_model.nb_edge: graph.F.shape[0], gcn_model.node_input: graph.X, gcn_model.Ssparse: np.array(graph.Sind, dtype='int64'), gcn_model.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'), gcn_model.Tsparse: np.array(graph.Tind, dtype='int64'), gcn_model.Aind: Aind, # self.F: graph.F, gcn_model.y_input: graph.Y, # self.dropout_p_H: self.dropout_rate_H, gcn_model.dropout_p_node: 0.0, gcn_model.dropout_p_attn: 0.0, } [c_alphas, c_nH, c_alphas_shape, spI] = session.run([alphas, nH, alphas_shape, spI], feed_dict=feed_batch) print('alphas', c_alphas, c_alphas_shape) print('spI', spI) #print('AI',AI) sp_mat = sp.coo_matrix( (c_alphas.values, (c_alphas.indices[:, 0], c_alphas.indices[:, 1])), shape=(nb_node, nb_node)) Att_dense = sp_mat.todense() print(Att_dense) self.assertTrue(c_alphas_shape[0] == 3) self.assertTrue(c_alphas_shape[1] == 3) self.assertTrue(Att_dense[0, 2] == 0)