Example #1
0
def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True):
    """
    Description
    -----------
    The neighbor matching procedure of edge coarsening used in
    `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__
    and
    `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__
    for homogeneous graph coarsening. This procedure keeps picking an unmarked
    vertex and matching it with one its unmarked neighbors (that maximizes its
    edge weight) until no match can be done.

    If no edge weight is given, this procedure will randomly pick neighbor for each
    vertex.

    The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching
    <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__

    NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected`
    if you are not sure your graph is bi-directed.

    Parameters
    ----------
    graph : HeteroGraphIndex
        The input homogeneous graph.
    num_nodes : int
        The number of nodes in this homogeneous graph.
    edge_weight : tensor, optional
        The edge weight tensor holding non-negative scalar weight for each edge.
        default: :obj:`None`
    relabel_idx : bool, optional
        If true, relabel resulting node labels to have consecutive node ids.
        default: :obj:`True`

    Returns
    -------
    a 1-D tensor
        A vector with each element that indicates the cluster ID of a vertex.
    """
    edge_weight_capi = nd.NULL["int64"]
    if edge_weights is not None:
        edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights)
    node_label = F.full_1d(
        num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx))
    node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label)
    _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi)
    if F.reduce_sum(node_label < 0).item() != 0:
        raise DGLError("Find unmatched node")

    # reorder node id
    # TODO: actually we can add `return_inverse` option for `unique`
    #       function in backend for efficiency.
    if relabel_idx:
        node_label_np = F.zerocopy_to_numpy(node_label)
        _, node_label_np = np.unique(node_label_np, return_inverse=True)
        return F.tensor(node_label_np)
    else:
        return node_label
Example #2
0
 def __init__(
     self,
     in_feats,
     out_feats,
     num_heads=1,
     feat_drop=0.0,
     attn_drop=0.0,
     negative_slope=0.2,
     residual=False,
     activation=None,
     allow_zero_in_degree=False,
     norm="none",
 ):
     super(GATConv, self).__init__()
     if norm not in ("none", "both"):
         raise DGLError('Invalid norm value. Must be either "none", "both".'
                        ' But got "{}".'.format(norm))
     self._num_heads = num_heads
     self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
     self._out_feats = out_feats
     self._allow_zero_in_degree = allow_zero_in_degree
     self._norm = norm
     if isinstance(in_feats, tuple):
         self.fc_src = nn.Linear(self._in_src_feats,
                                 out_feats * num_heads,
                                 bias=False)
         self.fc_dst = nn.Linear(self._in_dst_feats,
                                 out_feats * num_heads,
                                 bias=False)
     else:
         self.fc = nn.Linear(self._in_src_feats,
                             out_feats * num_heads,
                             bias=False)
     self.attn_l = nn.Parameter(
         torch.FloatTensor(size=(1, num_heads, out_feats)))
     self.attn_r = nn.Parameter(
         torch.FloatTensor(size=(1, num_heads, out_feats)))
     self.feat_drop = nn.Dropout(feat_drop)
     self.attn_drop = nn.Dropout(attn_drop)
     self.leaky_relu = nn.LeakyReLU(negative_slope)
     if residual:
         if self._in_dst_feats != out_feats:
             self.res_fc = nn.Linear(self._in_dst_feats,
                                     num_heads * out_feats,
                                     bias=False)
         else:
             self.res_fc = Identity()
     else:
         self.register_buffer("res_fc", None)
     self.reset_parameters()
     self._activation = activation
Example #3
0
File: conv.py Project: jkx19/cogdl
    def forward(self, graph, feat, e_feat, res_attn=None):
        with graph.local_scope():
            if not self._allow_zero_in_degree:
                if (graph.in_degrees() == 0).any():
                    raise DGLError(
                        "There are 0-in-degree nodes in the graph, "
                        "output for those nodes will be invalid. "
                        "This is harmful for some applications, "
                        "causing silent performance regression. "
                        "Adding self-loop on the input graph by "
                        "calling `g = dgl.add_self_loop(g)` will resolve "
                        "the issue. Setting ``allow_zero_in_degree`` "
                        "to be `True` when constructing this module will "
                        "suppress the check and let the code run.")

            if isinstance(feat, tuple):
                h_src = self.feat_drop(feat[0])
                h_dst = self.feat_drop(feat[1])
                if not hasattr(self, "fc_src"):
                    self.fc_src, self.fc_dst = self.fc, self.fc
                feat_src = self.fc_src(h_src).view(-1, self._num_heads,
                                                   self._out_feats)
                feat_dst = self.fc_dst(h_dst).view(-1, self._num_heads,
                                                   self._out_feats)
            else:
                h_src = h_dst = self.feat_drop(feat)
                feat_src = feat_dst = self.fc(h_src).view(
                    -1, self._num_heads, self._out_feats)
                if graph.is_block:
                    feat_dst = feat_src[:graph.number_of_dst_nodes()]
            e_feat = self.edge_emb(e_feat)
            e_feat = self.fc_e(e_feat).view(-1, self._num_heads,
                                            self._edge_feats)
            ee = (e_feat * self.attn_e).sum(dim=-1).unsqueeze(-1)
            el = (feat_src * self.attn_l).sum(dim=-1).unsqueeze(-1)
            er = (feat_dst * self.attn_r).sum(dim=-1).unsqueeze(-1)
            graph.srcdata.update({"ft": feat_src, "el": el})
            graph.dstdata.update({"er": er})
            graph.edata.update({"ee": ee})
            graph.apply_edges(fn.u_add_v("el", "er", "e"))
            e = self.leaky_relu(graph.edata.pop("e") + graph.edata.pop("ee"))
            # compute softmax
            graph.edata["a"] = self.attn_drop(edge_softmax(graph, e))
            if res_attn is not None:
                graph.edata["a"] = graph.edata["a"] * (
                    1 - self.alpha) + res_attn * self.alpha
            # message passing
            graph.update_all(fn.u_mul_e("ft", "a", "m"), fn.sum("m", "ft"))
            rst = graph.dstdata["ft"]
            # residual
            if self.res_fc is not None:
                resval = self.res_fc(h_dst).view(h_dst.shape[0], -1,
                                                 self._out_feats)
                rst = rst + resval
            # bias
            if self.bias:
                rst = rst + self.bias_param
            # activation
            if self.activation:
                rst = self.activation(rst)
            return rst, graph.edata.pop("a").detach()