Example #1
0
def test_laplacian_lambda_max():
    N = 20
    eps = 1e-6
    # test DGLGraph
    g = dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3))
    l_max = dgl.laplacian_lambda_max(g)
    assert (l_max[0] < 2 + eps)
    # test batched DGLGraph
    N_arr = [20, 30, 10, 12]
    bg = dgl.batch([dgl.DGLGraph(nx.erdos_renyi_graph(N, 0.3)) for N in N_arr])
    l_max_arr = dgl.laplacian_lambda_max(bg)
    assert len(l_max_arr) == len(N_arr)
    for l_max in l_max_arr:
        assert l_max < 2 + eps
Example #2
0
 def forward(self, graph: dgl.DGLGraph, feat, lambda_max=None):
     shp = (len(graph.nodes()), ) + tuple(1 for _ in range(feat.dim() - 1))
     with graph.local_scope():
         norm = torch.pow(graph.in_degrees().float().clamp(min=1),
                          -0.5).reshape(shp).to(feat.device)
         if lambda_max is None:
             try:
                 lambda_max = laplacian_lambda_max(graph)
             except ArpackNoConvergence:
                 lambda_max = [2.] * graph.batch_size
         if isinstance(lambda_max, list):
             lambda_max = torch.tensor(lambda_max).to(feat.device)
         if lambda_max.dim() < 1:
             lambda_max = lambda_max.unsqueeze(-1)  # (B,) to (B, 1)
         # broadcast from (B, 1) to (N, 1)
         lambda_max = torch.reshape(broadcast_nodes(graph, lambda_max),
                                    shp).float()
         # T0(X)
         Tx_0 = feat
         rst = self.fc[0](Tx_0)
         # T1(X)
         if self._k > 1:
             graph.ndata['h'] = Tx_0 * norm
             graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
             h = graph.ndata.pop('h') * norm
             # Λ = 2 * (I - D ^ -1/2 A D ^ -1/2) / lambda_max - I
             #   = - 2(D ^ -1/2 A D ^ -1/2) / lambda_max + (2 / lambda_max - 1) I
             Tx_1 = -2. * h / lambda_max + Tx_0 * (2. / lambda_max - 1)
             rst = rst + self.fc[1](Tx_1)
         # Ti(x), i = 2...k
         for i in range(2, self._k):
             graph.ndata['h'] = Tx_1 * norm
             graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
             h = graph.ndata.pop('h') * norm
             # Tx_k = 2 * Λ * Tx_(k-1) - Tx_(k-2)
             #      = - 4(D ^ -1/2 A D ^ -1/2) / lambda_max Tx_(k-1) +
             #        (4 / lambda_max - 2) Tx_(k-1) -
             #        Tx_(k-2)
             Tx_2 = -4. * h / lambda_max + Tx_1 * (4. / lambda_max -
                                                   2) - Tx_0
             rst = rst + self.fc[i](Tx_2)
             Tx_1, Tx_0 = Tx_2, Tx_1
         # add bias
         if self.bias is not None:
             rst = rst + self.bias
         return rst
Example #3
0
    def forward(self, g, feature, e):
        h_in = feature  # to be used for residual connection
        lambda_max = [2] * g.batch_size

        def unnLaplacian(feature, D_sqrt, graph):
            """ Operation D^-1/2 A D^-1/2 """
            graph.ndata['h'] = feature * D_sqrt
            graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
            return graph.ndata.pop('h') * D_sqrt

        with g.local_scope():
            D_sqrt = torch.pow(g.in_degrees().float().clamp(
                min=1), -0.5).unsqueeze(-1).to(feature.device)

            lambda_max = [2] * g.batch_size
            
            if lambda_max is None:
                try:
                    lambda_max = dgl.laplacian_lambda_max(g)
                except BaseException:
                    # if the largest eigonvalue is not found
                    lambda_max = [2]

            if isinstance(lambda_max, list):
                lambda_max = torch.Tensor(lambda_max).to(feature.device)
            if lambda_max.dim() == 1:
                lambda_max = lambda_max.unsqueeze(-1)  # (B,) to (B, 1)

            # broadcast from (B, 1) to (N, 1)
            lambda_max = dgl.broadcast_nodes(g, lambda_max)

            # X_0(f)
            Xt = X_0 = feature

            # X_1(f)
            if self._k > 1:
                re_norm = (2. / lambda_max).to(feature.device)
                h = unnLaplacian(X_0, D_sqrt, g)
                # print('h',h,'norm',re_norm,'X0',X_0)
                X_1 = - re_norm * h + X_0 * (re_norm - 1)

                Xt = torch.cat((Xt, X_1), 1)

            # Xi(x), i = 2...k
            for _ in range(2, self._k):
                h = unnLaplacian(X_1, D_sqrt, g)
                X_i = - 2 * re_norm * h + X_1 * 2 * (re_norm - 1) - X_0

                Xt = torch.cat((Xt, X_i), 1)
                X_1, X_0 = X_i, X_1

            h = self.linear(Xt)

        if self.batch_norm:
            h = self.batchnorm_h(h)  # batch normalization

        if self.activation:
            h = self.activation(h)

        if self.residual:
            h = h_in + h  # residual connection

        h = self.dropout(h)
        return h, e
Example #4
0
    def forward(self, graph, feat):
        r"""

        Description
        -----------
        Compute GraphSAGE layer.

        Parameters
        ----------
        graph : DGLGraph
            The graph.
        feat : torch.Tensor or pair of torch.Tensor
            If a torch.Tensor is given, it represents the input feature of shape
            :math:`(N, D_{in})`
            where :math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
            If a pair of torch.Tensor is given, the pair must contain two tensors of shape
            :math:`(N_{in}, D_{in_{src}})` and :math:`(N_{out}, D_{in_{dst}})`.

        Returns
        -------
        torch.Tensor
            The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
            is size of output feature.
        """
        with graph.local_scope():
            if isinstance(feat, tuple):
                feat_src = self.feat_drop(feat[0])
                feat_dst = self.feat_drop(feat[1])
            else:
                feat_src = feat_dst = self.feat_drop(feat)
                if graph.is_block:
                    feat_dst = feat_src[:graph.number_of_dst_nodes()]

            h_self = feat_dst

            # Handle the case of graphs without edges
            if graph.number_of_edges() == 0:
                graph.dstdata['neigh'] = torch.zeros(
                    feat_dst.shape[0], self._in_src_feats).to(feat_dst)

            if self._aggre_type == 'mean':
                graph.srcdata['h'] = feat_src
                graph.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'neigh'))
                h_neigh = graph.dstdata['neigh']
            elif self._aggre_type == 'gcn':
                check_eq_shape(feat)
                graph.srcdata['h'] = feat_src
                graph.dstdata['h'] = feat_dst  # same as above if homogeneous
                graph.update_all(fn.copy_src('h', 'm'), fn.sum('m', 'neigh'))
                # divide in_degrees
                degs = graph.in_degrees().to(feat_dst)
                h_neigh = (graph.dstdata['neigh'] +
                           graph.dstdata['h']) / (degs.unsqueeze(-1) + 1)
            elif self._aggre_type == 'pool':
                graph.srcdata['h'] = F.relu(self.fc_pool(feat_src))
                graph.update_all(fn.copy_src('h', 'm'), fn.max('m', 'neigh'))
                h_neigh = graph.dstdata['neigh']
            elif self._aggre_type == 'lstm':
                graph.srcdata['h'] = feat_src
                graph.update_all(fn.copy_src('h', 'm'), self._lstm_reducer)
                h_neigh = graph.dstdata['neigh']
            elif self._aggre_type == 'ginmean':
                graph.srcdata['h'] = feat_src
                graph.update_all(fn.copy_src('h', 'm'),
                                 self._gin_reducer('m', 'neigh'))
                h_neigh = graph.dstdata['neigh']
            elif self._aggre_type == 'cheb':

                def unnLaplacian(feat, D_invsqrt_left, D_invsqrt_right, graph):
                    """ Operation Feat * D^-1/2 A D^-1/2 但是如果写成矩阵乘法:D^-1/2 A D^-1/2 Feat"""
                    #tmp = torch.zeros((D_invsqrt.shape[0],D_invsqrt.shape[0])).to(graph.device)
                    # sparse tensor没有broadcast机制,最后还依赖于srcnode在feat中从0开始连续排布
                    #print("adj : ",graph.adj(transpose=False,ctx = graph.device).shape)
                    #graph.srcdata['h'] = (torch.mm((graph.adj(transpose=False,ctx = graph.device)),(feat * D_invsqrt)))*D_invsqrt[::graph.number_of_dst_nodes()]
                    #graph.update_all(fn.copy_src('h', 'm'), fn.sum('m', 'h'))
                    #return graph.srcdata['h']
                    graph.srcdata[
                        'h'] = feat * D_invsqrt_right  # feat is srcfeat
                    graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
                    return graph.dstdata.pop('h') * D_invsqrt_left

                D_invsqrt_right = torch.pow(
                    graph.out_degrees().float().clamp(min=1),
                    -0.5).unsqueeze(-1)
                D_invsqrt_left = torch.pow(
                    graph.in_degrees().float().clamp(min=1),
                    -0.5).unsqueeze(-1)
                #print("D_invsqrt shape: ",D_invsqrt.shape)
                #print(graph.__dict__)
                #print(dir(graph))
                #graph.srcdata['h']=feat_src
                #graph.dstdata['h']=feat_dst
                #g = dgl.to_homogeneous(graph,ndata=['h'])
                #dgl._ffi.base.DGLError: Expect number of features to match number of nodes (len(u)). Got 70 and 76 instead.
                #print(g)
                # since the block is different every time so it's safe to call dgl's method every time instead of calculating the l_m ahead
                try:
                    lambda_max = laplacian_lambda_max(graph)
                except BaseException:
                    # if the largest eigenvalue is not found
                    dgl_warning(
                        "Largest eigonvalue not found, using default value 2 for lambda_max",
                        RuntimeWarning)
                    lambda_max = torch.tensor(2)  # .to(feat.device)
                if isinstance(lambda_max, list):
                    lambda_max = torch.tensor(lambda_max)  # .to(feat.device)
                if lambda_max.dim() == 1:
                    lambda_max = lambda_max.unsqueeze(-1)  # (B,) to (B, 1)
                # broadcast from (B, 1) to (N, 1)
                # lambda_max = lambda_max * torch.ones((feat.shape[0],1))
                #re_norm = (2 / lambda_max ) * torch.ones((graph.number_of_dst_nodes(),1)).to(graph.device)
                re_norm = (2 / lambda_max.to(graph.device)) * torch.ones(
                    (graph.number_of_dst_nodes(), 1), device=graph.device)
                self._cheb_Xt = X_0 = feat_dst
                graph.srcdata[
                    'h'] = feat_src * D_invsqrt_right  # feat is srcfeat
                graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
                X_1 = -re_norm * graph.dstdata['h'] * D_invsqrt_left + X_0 * (
                    re_norm - 1)
                self._cheb_Xt = torch.cat((self._cheb_Xt, X_1.float()), 1)
            else:
                raise KeyError('Aggregator type {} not recognized.'.format(
                    self._aggre_type))

            # GraphSAGE GCN does not require fc_self.
            if self._aggre_type == 'gcn':
                rst = self.fc_neigh(h_neigh)
            elif self._aggre_type == 'ginmean':
                rst = (1 + self.eps) * h_self + h_neigh
                rst = self.fc_gin(rst)
                if self.norm is not None:
                    rst = self.norm(rst)
                return rst
            elif self._aggre_type == 'cheb':
                rst = self._cheb_linear(self._cheb_Xt)
            else:
                rst = self.fc_self(h_self) + self.fc_neigh(h_neigh)

            # activation
            if self.activation is not None:
                rst = self.activation(rst)
            # normalization
            if self.norm is not None:
                rst = self.norm(rst)
            return rst
Example #5
0
 def forward(self, graph, feat):
     lambda_max = dgl.laplacian_lambda_max(graph)  # may be very slow
     return super().forward(graph, feat, lambda_max=lambda_max)
Example #6
0
    def forward(self, graph, feat, lambda_max=None):
        r"""

        Description
        -----------
        Compute ChebNet layer.

        Parameters
        ----------
        graph : DGLGraph
            The graph.
        feat : torch.Tensor
            The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
            is size of input feature, :math:`N` is the number of nodes.
        lambda_max : list or tensor or None, optional.
            A list(tensor) with length :math:`B`, stores the largest eigenvalue
            of the normalized laplacian of each individual graph in ``graph``,
            where :math:`B` is the batch size of the input graph. Default: None.
            If None, this method would compute the list by calling
            ``dgl.laplacian_lambda_max``.

        Returns
        -------
        torch.Tensor
            The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
            is size of output feature.
        """
        def unnLaplacian(feat, D_invsqrt, graph):
            """ Operation Feat * D^-1/2 A D^-1/2 但是如果写成矩阵乘法:D^-1/2 A D^-1/2 Feat"""
            graph.ndata['h'] = feat * D_invsqrt
            graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
            return graph.ndata.pop('h') * D_invsqrt

        with graph.local_scope():
            #一点修改,这是原来的代码
            if self.is_mnist:
                graph.update_all(fn.copy_edge('v', 'm'),
                                 fn.sum('m', 'h'))  # 'v'与coordinate.py有关
                D_invsqrt = th.pow(
                    graph.ndata.pop('h').float().clamp(min=1),
                    -0.5).unsqueeze(-1).to(feat.device)

            #D_invsqrt = th.pow(graph.in_degrees().float().clamp(
            #   min=1), -0.5).unsqueeze(-1).to(feat.device)
            #print("in_degree : ",graph.in_degrees().shape)
            else:
                D_invsqrt = th.pow(graph.in_degrees().float().clamp(min=1),
                                   -0.5).unsqueeze(-1).to(feat.device)
            #print("D_invsqrt : ",D_invsqrt.shape)
            #print("ndata : ",graph.ndata['h'].shape)
            if lambda_max is None:
                try:
                    lambda_max = laplacian_lambda_max(graph)
                except BaseException:
                    # if the largest eigenvalue is not found
                    dgl_warning(
                        "Largest eigonvalue not found, using default value 2 for lambda_max",
                        RuntimeWarning)
                    lambda_max = th.Tensor(2).to(feat.device)

            if isinstance(lambda_max, list):
                lambda_max = th.Tensor(lambda_max).to(feat.device)
            if lambda_max.dim() == 1:
                lambda_max = lambda_max.unsqueeze(-1)  # (B,) to (B, 1)

            # broadcast from (B, 1) to (N, 1)
            lambda_max = broadcast_nodes(graph, lambda_max)
            re_norm = 2. / lambda_max

            # X_0 is the raw feature, Xt refers to the concatenation of X_0, X_1, ... X_t
            Xt = X_0 = feat

            # X_1(f)
            if self._k > 1:
                h = unnLaplacian(X_0, D_invsqrt, graph)
                X_1 = -re_norm * h + X_0 * (re_norm - 1)
                # Concatenate Xt and X_1
                Xt = th.cat((Xt, X_1), 1)

            # Xi(x), i = 2...k
            for _ in range(2, self._k):
                h = unnLaplacian(X_1, D_invsqrt, graph)
                X_i = -2 * re_norm * h + X_1 * 2 * (re_norm - 1) - X_0
                # Concatenate Xt and X_i
                Xt = th.cat((Xt, X_i), 1)
                X_1, X_0 = X_i, X_1

            # linear projection
            h = self.linear(Xt)

            # activation
            if self.activation:
                h = self.activation(h)
        #print('ChebConv.py Line163 h : ',h.shape)
        return h