def dropout_adj(edge_index, edge_attr=None, force_undirected=False, num_nodes=None, degrees=None, cutoff=10, alpha=1.): N = num_nodes row, col = edge_index if force_undirected: row, col, edge_attr = filter_adj(row, col, edge_attr, row < col) filter = (degrees > cutoff)[row].float() keep_probability = filter * torch.pow( (degrees[row] + 1 - cutoff).float(), -alpha / math.log(cutoff + 1, 2)) keep_probability[(1 - filter).byte()] = 1. mask = torch.bernoulli(keep_probability).byte() row, col, edge_attr = filter_adj(row, col, edge_attr, mask) if force_undirected: edge_index = torch.stack( [torch.cat([row, col], dim=0), torch.cat([col, row], dim=0)], dim=0) edge_attr = torch.cat([edge_attr, edge_attr], dim=0) edge_index, edge_attr = coalesce(edge_index, edge_attr, N, N) else: edge_index = torch.stack([row, col], dim=0) return edge_index, edge_attr
def __init__(self, root, data, split_edge, num_hops, percent=100, split='train', use_coalesce=False, node_label='drnl', ratio_per_hop=1.0, max_nodes_per_hop=None, directed=False, **kwargs): self.data = data self.split_edge = split_edge self.num_hops = num_hops self.percent = percent self.use_coalesce = use_coalesce self.node_label = node_label self.ratio_per_hop = ratio_per_hop self.max_nodes_per_hop = max_nodes_per_hop self.directed = directed super(SEALDynamicDataset, self).__init__(root) pos_edge, neg_edge = get_pos_neg_edges(split, self.split_edge, self.data.edge_index, self.data.num_nodes, self.percent) self.links = torch.cat([pos_edge, neg_edge], 1).t().tolist() self.labels = [1] * pos_edge.size(1) + [0] * neg_edge.size(1) if self.use_coalesce: # compress mutli-edge into edge with weight self.data.edge_index, self.data.edge_weight = coalesce( self.data.edge_index, self.data.edge_weight, self.data.num_nodes, self.data.num_nodes) if 'edge_weight' in self.data: edge_weight = self.data.edge_weight.view(-1) else: edge_weight = torch.ones(self.data.edge_index.size(1), dtype=int) self.A = ssp.csr_matrix( (edge_weight, (self.data.edge_index[0], self.data.edge_index[1])), shape=(self.data.num_nodes, self.data.num_nodes) )
def process(self): with open(self.raw_paths[0], 'r') as f: graph_data = json.load(f) mask = torch.zeros(len(graph_data['nodes']), dtype=torch.uint8) for i in graph_data['nodes']: mask[i['id']] = 1 if i['val'] else (2 if i['test'] else 0) train_mask, val_mask, test_mask = mask == 0, mask == 1, mask == 2 row, col = [], [] for i in graph_data['links']: row.append(i['source']) col.append(i['target']) edge_index = torch.stack([torch.tensor(row), torch.tensor(col)], dim=0) edge_index, _ = remove_self_loops(edge_index) edge_index, _ = coalesce(edge_index, None, mask.size(0), mask.size(0)) x = torch.from_numpy(np.load(self.raw_paths[1])).float() with open(self.raw_paths[2], 'r') as f: y_data = json.load(f) y = [] for i in range(len(y_data)): y.append(y_data[str(i)]) y = torch.tensor(y, dtype=torch.float) data = Data(x=x, edge_index=edge_index, y=y) data.train_mask = train_mask data.val_mask = val_mask data.test_mask = test_mask data = data if self.pre_transform is None else self.pre_transform(data) data, slices = self.collate([data]) torch.save((data, slices), self.processed_paths[0])
def process(self): VM = sparse.load_npz(self.path_to_VM) VMkw = sparse.load_npz(self.path_to_VMkw) R = sparse.hstack((VM, VMkw)) N0 = R.shape[1] row_years = np.loadtxt(self.path_to_yrs) R = R[(row_years >= self.year - self.memory) * (row_years <= self.year - 1), :] # keeping only the nodes that are not isolated after filtering # the papers (hyperedges) non_isolated_nodes = np.unique(R.nonzero()[1]) R = R[:, non_isolated_nodes] adj = R.T * R adj = adj.tocoo() row = torch.from_numpy(adj.row).to(torch.long) col = torch.from_numpy(adj.col).to(torch.long) edge_index = torch.stack([row, col], dim=0) edge_index, _ = coalesce(edge_index, None, adj.shape[0], adj.shape[0]) x = torch.from_numpy(np.ones(adj.shape[0])).to(torch.float) y = torch.from_numpy(non_isolated_nodes).to(torch.long) data = Data(x=x, edge_index=edge_index, y=y) data = data if self.pre_transform is None else self.pre_transform(data) torch.save(self.collate([data]), self.processed_paths[0])
def process(self): with open(self.raw_paths[0], 'r') as f: data = f.read().split('\n')[1:-1] x = [[float(v) for v in r.split('\t')[1].split(',')] for r in data] x = torch.tensor(x, dtype=torch.float) y = [int(r.split('\t')[2]) for r in data] y = torch.tensor(y, dtype=torch.float) with open(self.raw_paths[1], 'r') as f: data = f.read().split('\n')[1:-1] data = [[int(v) for v in r.split('\t')] for r in data] edge_index = torch.tensor(data, dtype=torch.long).t().contiguous() edge_index, _ = coalesce(edge_index, None, x.size(0), x.size(0)) train_masks, val_masks, test_masks = [], [], [] for f in self.raw_paths[2:]: tmp = np.load(f) train_masks += [torch.from_numpy(tmp['train_mask']).to(torch.bool)] val_masks += [torch.from_numpy(tmp['val_mask']).to(torch.bool)] test_masks += [torch.from_numpy(tmp['test_mask']).to(torch.bool)] train_mask = torch.stack(train_masks, dim=1) val_mask = torch.stack(val_masks, dim=1) test_mask = torch.stack(test_masks, dim=1) data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask, val_mask=val_mask, test_mask=test_mask) data = data if self.pre_transform is None else self.pre_transform(data) torch.save(self.collate([data]), self.processed_paths[0])
def create_edge_index(loader, num_nodes=None): adj_indptr_source = loader['adj_indptr'] adj_indices_source = loader['adj_indices'] adj_matrix = np.zeros((num_nodes, num_nodes), dtype=np.int) for i in range(1, len(adj_indptr_source)): left = adj_indptr_source[i - 1] right = adj_indptr_source[i] if left == right: continue indices = adj_indices_source[left:right] adj_matrix[i - 1, indices] = 1 adj_matrix[indices, i - 1] = 1 row, col = [], [] for i in range(len(adj_matrix)): current_row = adj_matrix[i] for j in range(len(current_row)): if current_row[j] == 1: row.append(i) col.append(j) edge_index = torch.stack( [torch.tensor(row), torch.tensor(col)], dim=0) # shape(2, 2*num_edges) # NOTE: There are duplicated edges and self loops in the datasets. Other # implementations do not remove them! edge_index, _ = remove_self_loops(edge_index) edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes) return edge_index
def coalesce(self): r""""Orders and removes duplicated entries from edge indices.""" self.edge_index, self.edge_attr = coalesce(self.edge_index, self.edge_attr, self.num_nodes, self.num_nodes) return self
def test_coalesce(): row = torch.tensor([1, 0, 1, 0, 2, 1]) col = torch.tensor([0, 1, 1, 1, 0, 0]) index = torch.stack([row, col], dim=0) index, _ = coalesce(index, None, m=3, n=2) assert index.tolist() == [[0, 1, 1, 2], [1, 0, 1, 0]]
def is_coalesced(self): r"""Returns :obj:`True`, if edge indices are ordered and do not contain duplicate entries.""" edge_index, _ = coalesce(self.edge_index, None, self.num_nodes, self.num_nodes) return self.edge_index.numel() == edge_index.numel() and ( self.edge_index != edge_index).sum().item() == 0
def read_sigle_data(data_dir,filename): temp = h5py.File(osp.join(data_dir, filename), 'r') # read edge and edge attribute pcorr = np.abs(temp['pcorr'].value) # only keep the top 10% edges th = np.percentile(pcorr.reshape(-1),95) pcorr[pcorr < th] = 0 # set a threshold num_nodes = pcorr.shape[0] G = from_numpy_matrix(pcorr) A = nx.to_scipy_sparse_matrix(G) adj = A.tocoo() edge_att = np.zeros((len(adj.row))) for i in range(len(adj.row)): edge_att[i] = pcorr[adj.row[i], adj.col[i]] edge_index = np.stack([adj.row, adj.col]) edge_index, edge_att = remove_self_loops(torch.from_numpy(edge_index).long(), torch.from_numpy(edge_att).float()) edge_index, edge_att = coalesce(edge_index, edge_att, num_nodes, num_nodes) att = temp['corr'].value return edge_att.data.numpy(),edge_index.data.numpy(),att,temp['indicator'].value, num_nodes
def is_undirected(edge_index, edge_attr=None, num_nodes=None): r"""Returns :obj:`True` if the graph given by :attr:`edge_index` is undirected. Args: edge_index (LongTensor): The edge indices. edge_attr (Tensor, optional): Edge weights or multi-dimensional edge features. (default: :obj:`None`) num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) :rtype: bool """ num_nodes = maybe_num_nodes(edge_index, num_nodes) edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, num_nodes) if edge_attr is None: undirected_edge_index = to_undirected(edge_index, num_nodes=num_nodes) return edge_index.size(1) == undirected_edge_index.size(1) else: edge_index_t, edge_attr_t = transpose(edge_index, edge_attr, num_nodes, num_nodes, coalesced=True) index_symmetric = torch.all(edge_index == edge_index_t) attr_symmetric = torch.all(edge_attr == edge_attr_t) return index_symmetric and attr_symmetric
def forward(self, edge_index, edge_attr): """Randomly drops edges from the adjacency matrix :obj:`(edge_index, edge_attr)` with propability :obj:`p` using samples from a Bernoulli distribution. Args: edge_index (LongTensor): The edge indices. edge_attr (Tensor): Edge weights or multi-dimensional edge features. (default: :obj:`None`) force_undirected (bool, optional): If set to :obj:`True`, forces undirected output. (default: :obj:`False`) num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) """ N = maybe_num_nodes(edge_index, None) row, col = edge_index mask = edge_attr <= self.t row, col, edge_attr = self.filter_adj(row, col, edge_attr, mask) edge_index = torch.stack( [torch.cat([row, col], dim=0), torch.cat([col, row], dim=0)], dim=0) edge_attr = torch.cat([edge_attr, edge_attr], dim=0) edge_index, edge_attr = coalesce(edge_index, edge_attr, N, N) return edge_index, edge_attr
def process(self): pos_edge, neg_edge = get_pos_neg_edges(self.split, self.split_edge, self.data.edge_index, self.data.num_nodes, self.percent) if self.use_coalesce: # compress mutli-edge into edge with weight self.data.edge_index, self.data.edge_weight = coalesce( self.data.edge_index, self.data.edge_weight, self.data.num_nodes, self.data.num_nodes) if 'edge_weight' in self.data: edge_weight = self.data.edge_weight.view(-1) else: edge_weight = torch.ones(self.data.edge_index.size(1), dtype=int) A = ssp.csr_matrix( (edge_weight, (self.data.edge_index[0], self.data.edge_index[1])), shape=(self.data.num_nodes, self.data.num_nodes)) # Extract enclosing subgraphs for pos and neg edges pos_list = extract_enclosing_subgraphs(pos_edge, A, self.data.x, 1, self.num_hops, self.node_label, self.ratio_per_hop, self.max_nodes_per_hop) neg_list = extract_enclosing_subgraphs(neg_edge, A, self.data.x, 0, self.num_hops, self.node_label, self.ratio_per_hop, self.max_nodes_per_hop) torch.save(self.collate(pos_list + neg_list), self.processed_paths[0]) del pos_list, neg_list
def __call__(self, data): edge_index = data.edge_index num_nodes = data.num_nodes edge_index = add_self_loops(edge_index, num_nodes=num_nodes) edge_index, _ = coalesce(edge_index, None, num_nodes, num_nodes) data.edge_index = edge_index return data
def __neighbors_to_graph__(self, neighbors, neighbor_weights, normalization='row', device='cpu'): r"""Combine a list of neighbors and neighbor weights to create a sparse graph. Args: neighbors (List[List[int]]): List of neighbors for each node. neighbor_weights (List[List[float]]): List of weights for the neighbors of each node. normalization (str): Normalization of resulting matrix (options: :obj:`"row"`, :obj:`"col"`). (default: :obj:`"row"`) device (torch.device): Device to create output tensors on. (default: :obj:`"cpu"`) :rtype: (:class:`LongTensor`, :class:`Tensor`) """ edge_weight = torch.Tensor(np.concatenate(neighbor_weights)).to(device) i = np.repeat(np.arange(len(neighbors)), np.fromiter(map(len, neighbors), dtype=int)) j = np.concatenate(neighbors) if normalization == 'col': edge_index = torch.Tensor(np.vstack([j, i])).to(device) N = len(neighbors) edge_index, edge_weight = coalesce(edge_index, edge_weight, N, N) elif normalization == 'row': edge_index = torch.Tensor(np.vstack([i, j])).to(device) else: raise ValueError( f"PPR matrix normalization {normalization} unknown.") return edge_index, edge_weight
def pool_edge(cluster, edge_index, edge_attr=None): num_nodes = cluster.size(0) edge_index = cluster[edge_index.view(-1)].view(2, -1) edge_index, edge_attr = remove_self_loops(edge_index, edge_attr) edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, num_nodes) return edge_index, edge_attr
def graph_dropout(M, dropout): if dropout == 0: return M # TODO: change based on future sparse matrix support: https://github.com/pytorch/pytorch/projects/24#card-59611437 index, values = torch_sparse.coalesce( M.index, torch.nn.functional.dropout(M.values, dropout), M.shape[0], M.shape[1]) return TorchSparseGraphData(index, values, M.shape)
def pool_edge(cluster, edge_index, edge_attr: Optional[torch.Tensor] = None): num_nodes = cluster.size(0) edge_index = cluster[edge_index.view(-1)].view(2, -1) edge_index, edge_attr = remove_self_loops(edge_index, edge_attr) if edge_index.numel() > 0: edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, num_nodes) return edge_index, edge_attr
def process(self): if self.geom_gcn_preprocess: with open(self.raw_paths[0], 'r') as f: data = f.read().split('\n')[1:-1] x = [[float(v) for v in r.split('\t')[1].split(',')] for r in data] x = torch.tensor(x, dtype=torch.float) y = [int(r.split('\t')[2]) for r in data] y = torch.tensor(y, dtype=torch.long) with open(self.raw_paths[1], 'r') as f: data = f.read().split('\n')[1:-1] data = [[int(v) for v in r.split('\t')] for r in data] edge_index = torch.tensor(data, dtype=torch.long).t().contiguous() edge_index, _ = coalesce(edge_index, None, x.size(0), x.size(0)) train_masks, val_masks, test_masks = [], [], [] for filepath in self.raw_paths[2:]: f = np.load(filepath) train_masks += [torch.from_numpy(f['train_mask'])] val_masks += [torch.from_numpy(f['val_mask'])] test_masks += [torch.from_numpy(f['test_mask'])] train_mask = torch.stack(train_masks, dim=1).to(torch.bool) val_mask = torch.stack(val_masks, dim=1).to(torch.bool) test_mask = torch.stack(test_masks, dim=1).to(torch.bool) data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask, val_mask=val_mask, test_mask=test_mask) else: data = np.load(self.raw_paths[0], 'r', allow_pickle=True) x = torch.from_numpy(data['features']).to(torch.float) edge_index = torch.from_numpy(data['edges']).to(torch.long) edge_index = edge_index.t().contiguous() edge_index, _ = coalesce(edge_index, None, x.size(0), x.size(0)) y = torch.from_numpy(data['target']).to(torch.float) data = Data(x=x, edge_index=edge_index, y=y) if self.pre_transform is not None: data = self.pre_transform(data) torch.save(self.collate([data]), self.processed_paths[0])
def augment_adj(self, edge_index, edge_weight, num_nodes): edge_index, edge_weight = coalesce(edge_index, edge_weight, num_nodes, num_nodes) edge_index, edge_weight = sort_edge_index(edge_index, edge_weight, num_nodes) edge_index, edge_weight = spspmm(edge_index, edge_weight, edge_index, edge_weight, num_nodes, num_nodes, num_nodes) return edge_index, edge_weight
def coalesce_(self, op='add'): ''' Add all duplicated entries together ''' self.indices, self.values = torch_sparse.coalesce(self.indices, self.values, self.n, self.m, op) self.indices_diag = self.calc_indices_diag() return self
def __merge_edges__(self, x, edge_index, batch, edge_score): nodes_remaining = set(range(x.size(0))) cluster = torch.empty_like(batch, device=torch.device('cpu')) edge_argsort = torch.argsort(edge_score, descending=True) # Iterate through all edges, selecting it if it is not incident to # another already chosen edge. i = 0 new_edge_indices = [] edge_index_cpu = edge_index.cpu() for edge_idx in edge_argsort.tolist(): source = edge_index_cpu[0, edge_idx].item() if source not in nodes_remaining: continue target = edge_index_cpu[1, edge_idx].item() if target not in nodes_remaining: continue new_edge_indices.append(edge_idx) cluster[source] = i nodes_remaining.remove(source) if source != target: cluster[target] = i nodes_remaining.remove(target) i += 1 # The remaining nodes are simply kept. for node_idx in nodes_remaining: cluster[node_idx] = i i += 1 cluster = cluster.to(x.device) # We compute the new features as an addition of the old ones. new_x = scatter_add(x, cluster, dim=0, dim_size=i) new_edge_score = edge_score[new_edge_indices] if len(nodes_remaining) > 0: remaining_score = x.new_ones( (new_x.size(0) - len(new_edge_indices), )) new_edge_score = torch.cat([new_edge_score, remaining_score]) new_x = new_x * new_edge_score.view(-1, 1) N = new_x.size(0) new_edge_index, _ = coalesce(cluster[edge_index], None, N, N) new_batch = x.new_empty(new_x.size(0), dtype=torch.long) new_batch = new_batch.scatter_(0, cluster, batch) unpool_info = self.unpool_description(edge_index=edge_index, cluster=cluster, batch=batch, new_edge_score=new_edge_score) return new_x, new_edge_index, new_batch, unpool_info
def mol_df_to_graph(df, bonds=None, allowable_atoms=None, edge_dist_cutoff=4.5, onehot_edges=True): """ Converts molecule in dataframe to a graph compatible with Pytorch-Geometric :param df: Molecule structure in dataframe format :type mol: pandas.DataFrame :param bonds: Molecule structure in dataframe format :type bonds: pandas.DataFrame :param allowable_atoms: List containing allowable atom types :type allowable_atoms: list[str], optional :return: Tuple containing \n - node_feats (torch.FloatTensor): Features for each node, one-hot encoded by atom type in ``allowable_atoms``. - edge_index (torch.LongTensor): Edges from chemical bond graph in COO format. - edge_feats (torch.FloatTensor): Edge features given by bond type. Single = 1.0, Double = 2.0, Triple = 3.0, Aromatic = 1.5. - node_pos (torch.FloatTensor): x-y-z coordinates of each node. """ if allowable_atoms is None: allowable_atoms = mol_atoms node_pos = torch.FloatTensor(df[['x', 'y', 'z']].to_numpy()) N = df.shape[0] bond_mapping = {1.0: 0, 2.0: 1, 3.0: 2, 1.5: 3} if bonds is not None: bond_data = torch.FloatTensor(bonds.to_numpy()) edge_tuples = torch.cat( (bond_data[:, :2], torch.flip(bond_data[:, :2], dims=(1, ))), dim=0) edge_index = edge_tuples.t().long().contiguous() if onehot_edges: bond_idx = list( map(lambda x: bond_mapping[x], bond_data[:, -1].tolist()) ) + list(map(lambda x: bond_mapping[x], bond_data[:, -1].tolist())) edge_attr = F.one_hot(torch.tensor(bond_idx), num_classes=4).to(torch.float) edge_index, edge_attr = coalesce(edge_index, edge_attr, N, N) else: edge_attr = torch.cat( (torch.FloatTensor(bond_data[:, -1]).view(-1), torch.FloatTensor(bond_data[:, -1]).view(-1)), dim=0) else: kd_tree = ss.KDTree(node_pos) edge_tuples = list(kd_tree.query_pairs(edge_dist_cutoff)) edge_index = torch.LongTensor(edge_tuples).t().contiguous() edge_index = to_undirected(edge_index) edge_attr = torch.FloatTensor([ 1.0 / (np.linalg.norm(node_pos[i] - node_pos[j]) + 1e-5) for i, j in edge_index.t() ]).view(-1) node_feats = torch.FloatTensor( [one_of_k_encoding_unk(e, allowable_atoms) for e in df['element']]) return node_feats, edge_index, edge_attr, node_pos
def __call__(self, data): edge_attr = None edge_index = to_undirected(data.edge_index, data.x.size(0)) num_nodes = edge_index.max().item() + 1 if data.x is None else data.x.size(0) # edge_index, edge_attr = add_self_loops(edge_index, edge_attr) edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, num_nodes) data.edge_index = edge_index data.edge_attr = edge_attr return data
def is_coalesced(self) -> bool: for value in self.values('adj', 'adj_t'): return value.is_coalesced() edge_index = self.edge_index new_edge_index, _ = coalesce(edge_index, None, self.size(0), self.size(1)) return (edge_index.numel() == new_edge_index.numel() and bool( (edge_index == new_edge_index).all()))
def __call__(self, data): N = data.num_nodes edge_index = data.edge_index assert data.edge_attr is None edge_index, _ = add_self_loops(edge_index, num_nodes=N) edge_index, _ = coalesce(edge_index, None, N, N) data.edge_index = edge_index return data
def form_edges(P): # forming the connectivities row = torch.from_numpy(P.tocoo().row).to(torch.long) col = torch.from_numpy(P.tocoo().col).to(torch.long) edge_index = torch.stack([row, col], dim=0) edge_index, _ = coalesce(edge_index, None, P.shape[0], P.shape[0]) return edge_index
def __merge_stars_with_attr_gpu__(self, x, edge_index, edge_attr, batch, node_score): device = x.device nodes_remaining = set(range(x.size(0))) node_argsort = torch.argsort(node_score, descending=True) cluster = torch.empty_like(batch, device=torch.device('cpu')) # Iterate through all edges, selecting it if it is not incident to another already chosen edge. edge_index_cpu = edge_index.cpu() center_nodes = set() i = 0 for node_idx in node_argsort.tolist(): if node_idx not in nodes_remaining: continue dest_bool = edge_index_cpu[0] == node_idx # get the connected nodes dests = set(edge_index_cpu[1][dest_bool].numpy()) # remove the previous combined nodes dests.difference_update(center_nodes) nodes_remaining.difference_update(dests) nodes_remaining.remove(node_idx) # add node_idx to center_nodes center_nodes.add(node_idx) cluster[node_idx] = i cluster[list(dests)] = i i += 1 # The remaining nodes are simply kept. for node_idx in nodes_remaining: cluster[node_idx] = i i += 1 cluster = cluster.to(x.device) new_x = scatter_add(x, cluster, dim=0, dim_size=i) N = new_x.size(0) new_edge_index, new_edge_attr = coalesce(cluster[edge_index], edge_attr, N, N) new_batch = x.new_empty(new_x.size(0), dtype=torch.long) new_batch = new_batch.scatter_(0, cluster, batch) unpool_info = self.unpool_description(edge_index=edge_index, cluster=cluster, batch=batch) perm = sorted(center_nodes) perm = torch.from_numpy(np.array(perm)).view(-1).to(device) return new_x, new_edge_index, new_edge_attr, new_batch, unpool_info, perm
def test_coalesce_add(): row = torch.tensor([1, 0, 1, 0, 2, 1]) col = torch.tensor([0, 1, 1, 1, 0, 0]) index = torch.stack([row, col], dim=0) value = torch.tensor([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]]) index, value = coalesce(index, value, m=3, n=2) assert index.tolist() == [[0, 1, 1, 2], [1, 0, 1, 0]] assert value.tolist() == [[6, 8], [7, 9], [3, 4], [5, 6]]
def test_data(): x = torch.tensor([[1, 3, 5], [2, 4, 6]], dtype=torch.float).t() edge_index = torch.tensor([[0, 0, 1, 1, 2], [1, 1, 0, 2, 1]]) data = Data(x=x, edge_index=edge_index).to(torch.device('cpu')) N = data.num_nodes assert data.x.tolist() == x.tolist() assert data['x'].tolist() == x.tolist() assert sorted(data.keys) == ['edge_index', 'x'] assert len(data) == 2 assert 'x' in data and 'edge_index' in data and 'pos' not in data assert data.__cat_dim__('x', data.x) == 0 assert data.__cat_dim__('edge_index', data.edge_index) == -1 assert data.__cumsum__('x', data.x) is False assert data.__cumsum__('edge_index', data.edge_index) is True assert not data.x.is_contiguous() data.contiguous() assert data.x.is_contiguous() assert not data.is_coalesced() data.edge_index, _ = coalesce(data.edge_index, None, N, N) assert data.is_coalesced() clone = data.clone() assert clone != data assert len(clone) == len(data) assert clone.x.tolist() == data.x.tolist() assert clone.edge_index.tolist() == data.edge_index.tolist() data['x'] = x + 1 assert data.x.tolist() == (x + 1).tolist() assert data.__repr__() == 'Data(edge_index=[2, 4], x=[3, 2])' dictionary = {'x': data.x, 'edge_index': data.edge_index} data = Data.from_dict(dictionary) assert sorted(data.keys) == ['edge_index', 'x'] assert not data.contains_isolated_nodes() assert not data.contains_self_loops() assert data.is_undirected() assert not data.is_directed() assert data.num_nodes == 3 assert data.num_edges == 4 assert data.num_features == 2 data.x = None assert data.num_nodes == 3 data.edge_index = None assert data.num_nodes is None assert data.num_edges is None