class PyGNeighborSampler: def __init__(self, edge_index: Tensor, num_nodes: Optional[int] = None): edge_index = edge_index.to('cpu') # Obtain a *transposed* `SparseTensor` instance. num_nodes = int(edge_index.max()) + 1 from torch_sparse import SparseTensor self.adj_t = SparseTensor(row=edge_index[0], col=edge_index[1], value=None, sparse_sizes=(num_nodes, num_nodes)).t() self.adj_t.storage.rowptr() def sample(self, batch, sizes: List[int] = [-1]): batch_size: int = len(batch) if not isinstance(batch, Tensor): batch = torch.tensor(batch) adjs = [] n_id = batch for size in sizes: adj_t, n_id = self.adj_t.sample_adj(n_id, size, replace=False) e_id = adj_t.storage.value() size = adj_t.sparse_sizes()[::-1] row, col, _ = adj_t.coo() edge_index = torch.stack([col, row], dim=0) adjs.append(EdgeIndex(edge_index, e_id, size)) adjs = adjs[0] if len(adjs) == 1 else adjs[::-1] out = (batch_size, n_id, adjs) return out def __repr__(self): return f'{self.__class__.__name__}'
class NeighborSampler(torch.utils.data.DataLoader): r"""The neighbor sampler from the `"Inductive Representation Learning on Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper, which allows for mini-batch training of GNNs on large-scale graphs where full-batch training is not feasible. Given a GNN with :math:`L` layers and a specific mini-batch of nodes :obj:`node_idx` for which we want to compute embeddings, this module iteratively samples neighbors and constructs bipartite graphs that simulate the actual computation flow of GNNs. More specifically, :obj:`sizes` denotes how much neighbors we want to sample for each node in each layer. This module then takes in these :obj:`sizes` and iteratively samples :obj:`sizes[l]` for each node involved in layer :obj:`l`. In the next layer, sampling is repeated for the union of nodes that were already encountered. The actual computation graphs are then returned in reverse-mode, meaning that we pass messages from a larger set of nodes to a smaller one, until we reach the nodes for which we originally wanted to compute embeddings. Hence, an item returned by :class:`NeighborSampler` holds the current :obj:`batch_size`, the IDs :obj:`n_id` of all nodes involved in the computation, and a list of bipartite graph objects via the tuple :obj:`(edge_index, e_id, size)`, where :obj:`edge_index` represents the bipartite edges between source and target nodes, :obj:`e_id` denotes the IDs of original edges in the full graph, and :obj:`size` holds the shape of the bipartite graph. For each bipartite graph, target nodes are also included at the beginning of the list of source nodes so that one can easily apply skip-connections or add self-loops. .. warning:: :class:`~torch_geometric.loader.NeighborSampler` is deprecated and will be removed in a future release. Use :class:`torch_geometric.loader.NeighborLoader` instead. .. note:: For an example of using :obj:`NeighborSampler`, see `examples/reddit.py <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ reddit.py>`_ or `examples/ogbn_products_sage.py <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/ ogbn_products_sage.py>`_. Args: edge_index (Tensor or SparseTensor): A :obj:`torch.LongTensor` or a :obj:`torch_sparse.SparseTensor` that defines the underlying graph connectivity/message passing flow. :obj:`edge_index` holds the indices of a (sparse) symmetric adjacency matrix. If :obj:`edge_index` is of type :obj:`torch.LongTensor`, its shape must be defined as :obj:`[2, num_edges]`, where messages from nodes :obj:`edge_index[0]` are sent to nodes in :obj:`edge_index[1]` (in case :obj:`flow="source_to_target"`). If :obj:`edge_index` is of type :obj:`torch_sparse.SparseTensor`, its sparse indices :obj:`(row, col)` should relate to :obj:`row = edge_index[1]` and :obj:`col = edge_index[0]`. The major difference between both formats is that we need to input the *transposed* sparse adjacency matrix. sizes ([int]): The number of neighbors to sample for each node in each layer. If set to :obj:`sizes[l] = -1`, all neighbors are included in layer :obj:`l`. node_idx (LongTensor, optional): The nodes that should be considered for creating mini-batches. If set to :obj:`None`, all nodes will be considered. num_nodes (int, optional): The number of nodes in the graph. (default: :obj:`None`) return_e_id (bool, optional): If set to :obj:`False`, will not return original edge indices of sampled edges. This is only useful in case when operating on graphs without edge features to save memory. (default: :obj:`True`) transform (callable, optional): A function/transform that takes in a sampled mini-batch and returns a transformed version. (default: :obj:`None`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. """ def __init__(self, edge_index: Union[Tensor, SparseTensor], sizes: List[int], node_idx: Optional[Tensor] = None, num_nodes: Optional[int] = None, return_e_id: bool = True, transform: Callable = None, **kwargs): edge_index = edge_index.to('cpu') if 'collate_fn' in kwargs: del kwargs['collate_fn'] if 'dataset' in kwargs: del kwargs['dataset'] # Save for Pytorch Lightning... self.edge_index = edge_index self.node_idx = node_idx self.num_nodes = num_nodes self.sizes = sizes self.return_e_id = return_e_id self.transform = transform self.is_sparse_tensor = isinstance(edge_index, SparseTensor) self.__val__ = None # Obtain a *transposed* `SparseTensor` instance. if not self.is_sparse_tensor: if (num_nodes is None and node_idx is not None and node_idx.dtype == torch.bool): num_nodes = node_idx.size(0) if (num_nodes is None and node_idx is not None and node_idx.dtype == torch.long): num_nodes = max(int(edge_index.max()), int(node_idx.max())) + 1 if num_nodes is None: num_nodes = int(edge_index.max()) + 1 value = torch.arange(edge_index.size(1)) if return_e_id else None self.adj_t = SparseTensor(row=edge_index[0], col=edge_index[1], value=value, sparse_sizes=(num_nodes, num_nodes)).t() else: adj_t = edge_index if return_e_id: self.__val__ = adj_t.storage.value() value = torch.arange(adj_t.nnz()) adj_t = adj_t.set_value(value, layout='coo') self.adj_t = adj_t self.adj_t.storage.rowptr() if node_idx is None: node_idx = torch.arange(self.adj_t.sparse_size(0)) elif node_idx.dtype == torch.bool: node_idx = node_idx.nonzero(as_tuple=False).view(-1) super().__init__(node_idx.view(-1).tolist(), collate_fn=self.sample, **kwargs) def sample(self, batch): if not isinstance(batch, Tensor): batch = torch.tensor(batch) batch_size: int = len(batch) adjs = [] n_id = batch for size in self.sizes: adj_t, n_id = self.adj_t.sample_adj(n_id, size, replace=False) e_id = adj_t.storage.value() size = adj_t.sparse_sizes()[::-1] if self.__val__ is not None: adj_t.set_value_(self.__val__[e_id], layout='coo') if self.is_sparse_tensor: adjs.append(Adj(adj_t, e_id, size)) else: row, col, _ = adj_t.coo() edge_index = torch.stack([col, row], dim=0) adjs.append(EdgeIndex(edge_index, e_id, size)) adjs = adjs[0] if len(adjs) == 1 else adjs[::-1] out = (batch_size, n_id, adjs) out = self.transform(*out) if self.transform is not None else out return out def __repr__(self) -> str: return f'{self.__class__.__name__}(sizes={self.sizes})'
class HeteroNeighborSampler(torch.utils.data.DataLoader): def __init__(self, edge_index: Union[Tensor, SparseTensor], sizes: List[int], node_idx: Optional[Tensor] = None, num_nodes: Optional[int] = None, return_e_id: bool = True, **kwargs): """ Args: edge_index: sizes: node_idx: num_nodes: return_e_id (bool): **kwargs: """ self.sizes = sizes self.return_e_id = return_e_id self.is_sparse_tensor = isinstance(edge_index, SparseTensor) self.__val__ = None # Obtain a *transposed* `SparseTensor` instance. edge_index = edge_index.to('cpu') if not self.is_sparse_tensor: num_nodes = maybe_num_nodes(edge_index, num_nodes) value = torch.arange(edge_index.size(1)) if return_e_id else None # Sampling source_to_target self.adj_t = SparseTensor(row=edge_index[1], col=edge_index[0], value=value, sparse_sizes=(num_nodes, num_nodes)).t() else: adj_t = edge_index if return_e_id: self.__val__ = adj_t.storage.value() value = torch.arange(adj_t.nnz()) adj_t = adj_t.set_value(value, layout='coo') self.adj_t = adj_t self.adj_t.storage.rowptr() if node_idx is None: node_idx = torch.arange(self.adj_t.sparse_size(0)) elif node_idx.dtype == torch.bool: node_idx = node_idx.nonzero(as_tuple=False).view(-1) super(HeteroNeighborSampler, self).__init__(node_idx.view(-1).tolist(), collate_fn=self.sample, **kwargs) def sample(self, batch): """ Args: batch: """ if not isinstance(batch, Tensor): batch = torch.tensor(batch) batch_size: int = len(batch) adjs = [] n_id = batch for size in self.sizes: adj_t, n_id = self.adj_t.sample_adj(n_id, size, replace=False) e_id = adj_t.storage.value() size = adj_t.sparse_sizes()[::-1] if self.__val__ is not None: adj_t.set_value_(self.__val__[e_id], layout='coo') if self.is_sparse_tensor: adjs.append(Adj(adj_t, e_id, size)) else: row, col, _ = adj_t.coo() edge_index = torch.stack([row, col], dim=0) adjs.append(EdgeIndex(edge_index, e_id, size)) return batch_size, n_id, adjs
class NeighborSampler(torch.utils.data.DataLoader): r"""The neighbor sampler from the `"Inductive Representation Learning on Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper, which allows for mini-batch training of GNNs on large-scale graphs where full-batch training is not feasible. Given a GNN with :math:`L` layers and a specific mini-batch of nodes :obj:`node_idx` for which we want to compute embeddings, this module iteratively samples neighbors and constructs bipartite graphs that simulate the actual computation flow of GNNs. More specifically, :obj:`sizes` denotes how much neighbors we want to sample for each node in each layer. This module then takes in these :obj:`sizes` and iteratively samples :obj:`sizes[l]` for each node involved in layer :obj:`l`. In the next layer, sampling is repeated for the union of nodes that were already encountered. The actual computation graphs are then returned in reverse-mode, meaning that we pass messages from a larger set of nodes to a smaller one, until we reach the nodes for which we originally wanted to compute embeddings. Hence, an item returned by :class:`NeighborSampler` holds the current :obj:`batch_size`, the IDs :obj:`n_id` of all nodes involved in the computation, and a list of bipartite graph objects via the tuple :obj:`(edge_index, e_id, size)`, where :obj:`edge_index` represents the bipartite edges between source and target nodes, :obj:`e_id` denotes the IDs of original edges in the full graph, and :obj:`size` holds the shape of the bipartite graph. For each bipartite graph, target nodes are also included at the beginning of the list of source nodes so that one can easily apply skip-connections or add self-loops. .. note:: For an example of using :obj:`NeighborSampler`, see `examples/reddit.py <https://github.com/rusty1s/pytorch_geometric/blob/master/examples/ reddit.py>`_ or `examples/ogbn_products_sage.py <https://github.com/rusty1s/pytorch_geometric/blob/master/examples/ ogbn_products_sage.py>`_. Args: edge_index (LongTensor): The edge indices of the full-graph. size ([int]): The number of neighbors to sample for each node in each layer. If set to :obj:`sizes[i] = -1`, all neighbors are included in layer :obj:`l`. node_idx (LongTensor, optional): The nodes that should be considered for creating mini-batches. If set to :obj:`None`, all nodes will be considered. flow (string, optional): The flow direction of message passing (:obj:`"source_to_target"` or :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) **kwargs (optional): Additional arguments of :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`. """ def __init__(self, edge_index: torch.Tensor, sizes: List[int], split_idx=None, node_idx: Optional[torch.Tensor] = None, num_nodes: Optional[int] = None, flow: str = "source_to_target", prune=False, prune_set='train', prune_type='adaptive', **kwargs): self.N = N = int(edge_index.max() + 1) if num_nodes is None else num_nodes edge_attr = torch.arange(edge_index.size(1)) adj = SparseTensor(row=edge_index[0], col=edge_index[1], value=edge_attr, sparse_sizes=(N, N), is_sorted=False) adj = adj.t() if flow == 'source_to_target' else adj self.adj = adj.to('cpu') if node_idx is None: node_idx = torch.arange(N) elif node_idx.dtype == torch.bool: node_idx = node_idx.nonzero(as_tuple=False).view(-1) self.sizes = sizes self.flow = flow assert self.flow in ['source_to_target', 'target_to_source'] super(NeighborSampler, self).__init__(node_idx.tolist(), collate_fn=self.sample, **kwargs) if prune == True: self.split_idx = split_idx self.E = edge_index.size(1) self.edge_index = edge_index if prune_set == 'train': self.train_idx = self.split_idx['train'] else: self.train_idx = torch.cat([self.split_idx['train'], self.split_idx['valid']]) subadj, _ = self.adj.saint_subgraph(self.train_idx) # subadj = self.adj.to_dense()[self.train_idx][:,self.train_idx].view(-1) _,_,e_idx = subadj.coo() self.train_e_idx = e_idx.squeeze().long() self.train_edge_index = self.edge_index[:, self.train_e_idx] self.rest_idx = torch.cat([self.split_idx['valid'], self.split_idx['test']]) subadj2, _ = self.adj.saint_subgraph(self.rest_idx) _,_,rest_e_idx = subadj2.coo() self.rest_e_idx = rest_e_idx.squeeze().long() # def prune_naive(self, ratio=0): # mask = torch.randperm(self.E) # edge_index, values = self.adj.coo() # newvalues = torch.zeros(edge_index.size(1)) # newvalues[values>0] = 1 # num_edges = spmm(edge_index, values, 1, values.size) def prune(self, loss, ratio=0, method='ada', glob=False): if glob == False: if method=='ada': diff_loss = torch.abs(loss[self.train_edge_index[0]] - loss[self.train_edge_index[1]]) print(diff_loss.size(0)) #print(diff_loss.nonzero().size()) # print(int(len(diff_loss)*ratio)) _, mask = torch.topk(diff_loss, int(len(diff_loss)*ratio), largest=False) elif method=='random': newE =self.train_edge_index.size(1) mask = torch.randperm(newE)[:int(newE*ratio)] elif method == 'naive': degrees = scatter(torch.ones(self.train_edge_index.size(1)), self.train_edge_index[0]) thold = (degrees.max()*ratio).long() prunemask = (degrees>thold).nonzero() mymask = torch.ones(self.train_edge_index.size(1)) taway = [] for pru in prunemask: p_eid = (self.train_edge_index[0]==pru.squeeze()).nonzero().squeeze() p = p_eid[torch.randperm(p_eid.size(0))][thold:] taway.append(p) nonmask = torch.cat(taway) mask = torch.ones(self.train_edge_index.size(1), dtype=torch.bool) mask[nonmask] = False #print('len diff_loss', len(diff_loss)) #print('len mask', len(mask)) self.train_e_idx = self.train_e_idx[mask] self.train_edge_index = self.train_edge_index[:, mask] # self.edge_attr = self.data.edge_attr[torch.cat([self.train_e_idx, self.rest_e_idx])] self.edge_index = self.edge_index[:,torch.cat([self.train_e_idx, self.rest_e_idx])] # print(self.data.edge_attr.size(), self.data.edge_index.size()) self.train_e_idx = torch.arange(self.train_e_idx.size(0)) self.rest_e_idx = torch.arange(self.train_e_idx.size(0),self.train_e_idx.size(0) + self.rest_e_idx.size(0)) # print(len(self.train_e_idx),len(self.rest_e_idx), self.train_edge_index.size(),self.edge_index.size()) self.E = self.edge_index.size(1) self.adj = SparseTensor( row=self.edge_index[0], col=self.edge_index[1], value=torch.arange(self.E, device=self.edge_index.device), sparse_sizes=(self.N, self.N)) def sample(self, batch): if not isinstance(batch, torch.Tensor): batch = torch.tensor(batch) batch_size: int = len(batch) adjs: List[Adj] = [] n_id = batch for size in self.sizes: adj, n_id = self.adj.sample_adj(n_id, size, replace=False) if self.flow == 'source_to_target': adj = adj.t() row, col, e_id = adj.coo() size = adj.sparse_sizes() edge_index = torch.stack([row, col], dim=0) adjs.append(Adj(edge_index, e_id, size)) if len(adjs) > 1: return batch_size, n_id, adjs[::-1] else: return batch_size, n_id, adjs[0] def __repr__(self): return '{}(sizes={})'.format(self.__class__.__name__, self.sizes)