def sparse_edge_to_sparse_tensor(edge_index: np.ndarray, edge_weight: np.ndarray = None, shape: tuple = None) -> tf.SparseTensor: """ edge_index: shape [M, 2] or [2, M] edge_weight: shape [M,] """ edge_index = gf.asedge(edge_index, shape="row_wise") if edge_weight is None: edge_weight = tf.ones(edge_index.shape[0], dtype=gg.floatx()) if shape is None: shape = gf.maybe_shape(edge_index) return tf.SparseTensor(edge_index, edge_weight, shape)
def sparse_edge_to_sparse_tensor(edge_index: np.ndarray, edge_weight: np.ndarray = None, shape: tuple = None) -> torch.Tensor: """ edge_index: shape [2, M] edge_weight: shape [M,] """ edge_index = gf.asedge(edge_index, shape="col_wise") edge_index = torch.LongTensor(edge_index) if edge_weight is None: edge_weight = torch.ones(edge_index.shape[1], dtype=getattr(torch, "float32")) else: edge_weight = torch.tensor(edge_weight) if shape is None: shape = gf.maybe_shape(edge_index) shape = torch.Size(shape) dtype = str(edge_weight.dtype) return getattr(torch.sparse, dtype_to_tensor_class(dtype))(edge_index, edge_weight, shape)
def split_edges(self, *, val: float = 0.05, test: float = 0.1, train: Optional[float] = None, random_state: Optional[int] = None) -> dict: graph = self.graph assert not graph.is_multiple(), "NOT Supported for multiple graph" if train is not None: train = 1 - (val + test) assert train + val + test <= 1 else: assert val + test < 1 np.random.seed(random_state) is_directed = graph.is_directed() graph = graph.to_directed() num_nodes = graph.num_nodes row, col = graph.edge_index splits = gf.BunchDict() # Return upper triangular portion. mask = row < col row, col = row[mask], col[mask] # TODO: `edge_attr` processing edge_attr = getattr(graph, "edge_attr", None) if edge_attr is not None: edge_attr = edge_attr[mask] n_val = int(math.floor(val * row.shape[0])) n_test = int(math.floor(test * row.shape[0])) # Positive edges. perm = np.random.permutation(row.shape[0]) row, col = row[perm], col[perm] r, c = row[n_val + n_test:], col[n_val + n_test:] if train is not None: n_train = int(math.floor(train * row.shape[0])) r, c = row[:n_train], col[:n_train] splits.train_pos_edge_index = np.stack([r, c], axis=0) if not is_directed: splits.train_pos_edge_index = gf.asedge( splits.train_pos_edge_index, shape='col_wise', symmetric=True) r, c = row[:n_val], col[:n_val] splits.val_pos_edge_index = np.stack([r, c], axis=0) r, c = row[n_val:n_val + n_test], col[n_val:n_val + n_test] splits.test_pos_edge_index = np.stack([r, c], axis=0) # Negative edges. neg_adj_mask = np.ones((num_nodes, num_nodes), dtype=np.bool) neg_adj_mask = np.triu(neg_adj_mask, k=1) neg_adj_mask[row, col] = False neg_row, neg_col = neg_adj_mask.nonzero() perm = np.random.permutation(neg_row.shape[0])[:n_val + n_test] neg_row, neg_col = neg_row[perm], neg_col[perm] row, col = neg_row[:n_val], neg_col[:n_val] splits.val_neg_edge_index = np.stack([row, col], axis=0) row, col = neg_row[n_val:n_val + n_test], neg_col[n_val:n_val + n_test] splits.test_neg_edge_index = np.stack([row, col], axis=0) self.splits.update(**splits) return self.splits
def _check_edge_index(edge_index, copy=False): edge_index = np.array(edge_index, dtype=np.int64, copy=copy) return gf.asedge(edge_index, shape="col_wise")