def get_adj(arr, k_lst, no_agents): """ Take as input the new obs. In position 4 to k, there are the x and y coordinates of each agent Make an adjacency matrix, where each agent communicates with the k closest ones """ points = [i[2:4] for i in arr] adj = np.zeros((no_agents, no_agents), dtype=float) # construct a kd-tree tree = cKDTree(points) for cnt, row in enumerate(points): # find k nearest neighbors for each element of data, squeezing out the zero result (the first nearest # neighbor is always itself) dd, ii = tree.query(row, k=k_lst) # apply an index filter on data to get the nearest neighbor elements adj[cnt][ii] = 1 # adjacency[cnt, ii] = 1.0 # add self-loops and symmetric normalization adj = GCNConv.preprocess(adj).astype('f4') return adj
def getdata(self): # Load data self.data adj = self.data.a # The adjacency matrix is stored as an attribute of the dataset. # Create filter for GCN and convert to sparse tensor. self.data.a = GCNConv.preprocess(self.data.a) self.data.a = sp_matrix_to_sp_tensor(self.data.a) # Train/valid/test split data_tr, data_te = self.data[:-10000], self.data[-10000:] np.random.shuffle(data_tr) data_tr, data_va = data_tr[:-10000], data_tr[-10000:] # We use a MixedLoader since the dataset is in mixed mode loader_tr = MixedLoader(data_tr, batch_size=batch_size, epochs=epochs) loader_va = MixedLoader(data_va, batch_size=batch_size) loader_te = MixedLoader(data_te, batch_size=batch_size) return adj, loader_tr, loader_va, loader_te
#we then build a graph using NetworkX(Adjacency Matrix) usingt he obtained nodes and edges list G = nx.Graph() G.add_nodes_from(nodes) G.add_edges_from(edge_list) A = nx.adjacency_matrix(G) print('Graph info: ', nx.info(G)) #We now build and train the Graph Convolution Networks #initializing the parameters channels = 16 #number of channels in the first layer dropout = 0.5 l2_reg = 5e-4 learning_rate = 1e-2 epochs = 200 es_patience = 10 A = GCNConv.preprocess(A).astype('f4') #defining the model X_in = Input(shape = (F, )) fltr_in = Input((N, ), sparse=True) dropout_1= Dropout(dropout)(X_in) graph_conv_1 = GCNConv(channels, activation='relu', kernel_regularizer = l2(l2_reg), use_bias=False)([dropout_1, fltr_in]) dropout_2 = Dropout(dropout)(graph_conv_1) graph_conv_2 = GCNConv(num_classes, activation='softmax', use_bias=False)([dropout_2, fltr_in]) #we then build the mode as follows: model = Model(inputs=[X_in, fltr_in], outputs = graph_conv_2) optimizer = Adam(lr=learning_rate) model.compile(optimizer=optimizer, loss = 'categorical_crossentropy', weighted_metrics=['acc'])
from spektral.layers.ops import sp_matrix_to_sp_tensor tf.config.experimental_run_functions_eagerly(True) # Parameters batch_size = 32 # Batch size epochs = 1000 # Number of training epochs patience = 10 # Patience for early stopping l2_reg = 5e-4 # Regularization rate for l2 # Load data data = MNIST() # The adjacency matrix is stored as an attribute of the dataset. # Create filter for GCN and convert to sparse tensor. data.a = GCNConv.preprocess(data.a) data.a = sp_matrix_to_sp_tensor(data.a) # Train/valid/test split data_tr, data_te = data[:-10000], data[-10000:] np.random.shuffle(data_tr) data_tr, data_va = data_tr[:-10000], data_tr[-10000:] # We use a MixedLoader since the dataset is in mixed mode loader_tr = MixedLoader(data_tr, batch_size=batch_size, epochs=epochs) loader_va = MixedLoader(data_va, batch_size=batch_size) loader_te = MixedLoader(data_te, batch_size=batch_size) # Build model class Net(Model):
from spektral.layers import GCNConv from spektral.layers.ops import sp_matrix_to_sp_tensor # Parameters batch_size = 32 # Batch size epochs = 1000 # Number of training epochs patience = 10 # Patience for early stopping l2_reg = 5e-4 # Regularization rate for l2 # Load data data = MNIST() # The adjacency matrix is stored as an attribute of the dataset. # Create filter for GCN and convert to sparse tensor. adj = data.a adj = GCNConv.preprocess(adj) adj = sp_matrix_to_sp_tensor(adj) # Train/valid/test split data_tr, data_te = data[:-10000], data[-10000:] np.random.shuffle(data_tr) data_tr, data_va = data_tr[:-10000], data_tr[-10000:] # Build model class Net(Model): def __init__(self, **kwargs): super().__init__(**kwargs) self.conv1 = GCNConv(32, activation='elu', kernel_regularizer=l2(l2_reg))