Esempio n. 1
0
 def _preprocess_features(self, features):
     """ The propagation step is made here"""
     if self.normalize_features:
         features = row_normalize(features)
     for i in range(self.num_layers):
         features = self.graph_adj @ features
     return to_sparse_tensor(features)
Esempio n. 2
0
 def _preprocess_adj(self, graph_adj):
     adj_with_self_loops = add_self_loops(graph_adj)
     self.adj_dense_shape = adj_with_self_loops.shape
     adj_with_self_loops_tensor = to_sparse_tensor(adj_with_self_loops)
     adj_with_self_loops_coo = adj_with_self_loops.tocoo()
     self.degrees = adj_with_self_loops.sum(axis=1).astype(np.float32)
     if self.aggregator == 'maxpool':
         # maxpool aggregator requires special preprocessing:
         # Fill a matrix with up to max_degree neighbours. max_degree is the maximum degree appearing in the
         # graph. If a node has less than max_degree neighbours, the remaining entries in the matrix are set
         # to 0. This finally refers to node 0. However, for the maxpool aggregator, duplicates do not matter
         # since the max-operation is invariant to duplicates in the input.
         neighbours_matrix = np.zeros(
             (self.num_nodes, int(np.max(self.degrees))), dtype=np.int32)
         insert_index = 0
         self_node_old = 0
         for i, self_node in enumerate(adj_with_self_loops_coo.row):
             if self_node != self_node_old:
                 insert_index = 0
             neighbours_matrix[
                 self_node, insert_index] = adj_with_self_loops_coo.col[i]
             insert_index += 1
             self_node_old = self_node
         self.adj_with_self_loops_indices = neighbours_matrix
     else:
         # extract the coordinates of all the edges
         # since both row and column coordinates are ordered, row[0] corresponds to col[0] etc.
         self.adj_with_self_loops_indices = np.mat(
             [adj_with_self_loops_coo.row, adj_with_self_loops_coo.col])
     return adj_with_self_loops_tensor
Esempio n. 3
0
 def _preprocess_adj(self, graph_adj):
     adj_with_self_loops = add_self_loops(graph_adj)
     self.adj_dense_shape = adj_with_self_loops.shape
     adj_with_self_loops_tensor = to_sparse_tensor(adj_with_self_loops)
     adj_with_self_loops_coo = adj_with_self_loops.tocoo()
     # extract the coordinates of all the edges
     # since both row and column coordinates are ordered, row[0] corresponds to col[0] etc.
     self.adj_with_self_loops_indices = np.mat(
         [adj_with_self_loops_coo.row, adj_with_self_loops_coo.col])
     return adj_with_self_loops_tensor
Esempio n. 4
0
 def _preprocess_features(self, features):
     """ The propagation step is made here"""
     if self.normalize_features:
         features = row_normalize(features)
     initial_features = features.copy()
     alpha = np.float32(self.teleport_prob)
     for i in range(self.num_layers):
         features = (
             1 -
             alpha) * self.graph_adj @ features + alpha * initial_features
     return to_sparse_tensor(features)
Esempio n. 5
0
 def _preprocess_features(self, features):
     if self.normalize_features:
         features = row_normalize(features)
     return to_sparse_tensor(features)
Esempio n. 6
0
 def _preprocess_adj(self, graph_adj):
     return to_sparse_tensor(graph_adj)
Esempio n. 7
0
 def _preprocess_adj(self, graph_adj):
     return to_sparse_tensor(renormalize_adj(graph_adj))
Esempio n. 8
0
 def _preprocess_adj(self, graph_adj):
     self.degrees = graph_adj.sum(axis=1).astype(np.float32)
     if self.prop_type == 'smoothed':
         graph_adj = normalize_adj(graph_adj)
     return to_sparse_tensor(graph_adj)
Esempio n. 9
0
 def _preprocess_features(self, features):
     return to_sparse_tensor(features)