Пример #1
0
def test_softmax_nodes():
    # test#1: basic
    g0 = dgl.DGLGraph(nx.path_graph(9))

    feat0 = F.randn((g0.number_of_nodes(), 10))
    g0.ndata['x'] = feat0
    ground_truth = F.softmax(feat0, dim=0)
    assert F.allclose(dgl.softmax_nodes(g0, 'x'), ground_truth)
    g0.ndata.pop('x')

    # test#2: batched graph
    g1 = dgl.DGLGraph(nx.path_graph(5))
    g2 = dgl.DGLGraph(nx.path_graph(3))
    g3 = dgl.DGLGraph()
    g4 = dgl.DGLGraph(nx.path_graph(10))
    bg = dgl.batch([g0, g1, g2, g3, g4])
    feat1 = F.randn((g1.number_of_nodes(), 10))
    feat2 = F.randn((g2.number_of_nodes(), 10))
    feat4 = F.randn((g4.number_of_nodes(), 10))
    bg.ndata['x'] = F.cat([feat0, feat1, feat2, feat4], 0)
    ground_truth = F.cat([
        F.softmax(feat0, 0),
        F.softmax(feat1, 0),
        F.softmax(feat2, 0),
        F.softmax(feat4, 0)
    ], 0)
    assert F.allclose(dgl.softmax_nodes(bg, 'x'), ground_truth)
Пример #2
0
    def forward(self, g, node_feats, g_feats, get_node_weight=False):
        """
        Parameters
        ----------
        g : DGLGraph or BatchedDGLGraph
            Constructed DGLGraphs.
        node_feats : float32 tensor of shape (V, N1)
            Input node features. V for the number of nodes and N1 for the feature size.
        g_feats : float32 tensor of shape (G, N2)
            Input graph features. G for the number of graphs and N2 for the feature size.
        get_node_weight : bool
            Whether to get the weights of atoms during readout.

        Returns
        -------
        float32 tensor of shape (G, N2)
            Updated graph features.
        float32 tensor of shape (V, 1)
            The weights of nodes in readout.
        """
        with g.local_scope():
            g.ndata['z'] = self.compute_logits(
                torch.cat([dgl.broadcast_nodes(g, F.relu(g_feats)), node_feats], dim=1))
            g.ndata['a'] = dgl.softmax_nodes(g, 'z')
            g.ndata['hv'] = self.project_nodes(node_feats)
            context = F.elu(dgl.sum_nodes(g, 'hv', 'a'))

            if get_node_weight:
                return self.gru(context, g_feats), g.ndata['a']
            else:
                return self.gru(context, g_feats)
Пример #3
0
    def forward(self, features, G):
        with G.local_scope():
            G.ndata['h'] = features['0']
            features_0_softmax = dgl.softmax_nodes(G, 'h')
            # G.ndata['h3'] = (features_0_softmax[:, 0] * (G.ndata['x'] +
            #                                              features['1'][:, 0]))
            # weighted_sum = dgl.readout_nodes(G, 'h3', op='sum')

        return G.ndata['x'] + features['1'][:, 0]
Пример #4
0
    def forward(self, g):
        g.ndata['a'] = self.leaky_relu(self.attn(g.ndata[self.attn_key]))
        g.ndata['a'] = dgl.softmax_nodes(g, 'a')
        attn_emb = g.ndata[self.msg_key]
        if attn_emb.ndimension() == 2:
            g.ndata[self.msg_key] = attn_emb.view(g.number_of_nodes(),
                                                  self.n_heads, -1)
        g.ndata['a'] = g.ndata[self.msg_key] * g.ndata['a'].unsqueeze(-1)
        graph_emb = dgl.sum_nodes(g, 'a')

        return graph_emb.view(graph_emb.shape[0], -1)
Пример #5
0
 def forward(self, g, feat, last_nodes):
     with g.local_scope():
         if self.batch_norm is not None:
             feat = self.batch_norm(feat)
         feat_u = self.fc_u(feat)
         feat_v = self.fc_v(feat[last_nodes])
         feat_v = dgl.broadcast_nodes(g, feat_v)
         g.ndata['e'] = self.attn_e(th.sigmoid(feat_u + feat_v))
         alpha = dgl.softmax_nodes(g, 'e')
         g.ndata['w'] = feat * alpha
         rst = dgl.sum_nodes(g, 'w')
         rst = self.fc_out(rst)
         return rst
Пример #6
0
    def forward(self, graph: dgl.DGLHeteroGraph):
        # embedding block
        if self.embed is not None:
            graph.ndata['data'] = self.embed(graph.ndata['data'])
        src_embed = graph.ndata['data']  # [B*N, 100]
        graph.ndata['data'] = self.embed_modules(src_embed)  # [B*N, 64]
        # MLP-GNN

        graph = self.gnn_modules(graph)
        graph.ndata['data'] = torch.cat((src_embed, graph.ndata['data']),
                                        dim=-1)  # [B*N, 164]
        with graph.local_scope():
            graph.ndata['scoring_out'] = self.score_mlp(graph.ndata['data'])
            weights = dgl.softmax_nodes(graph, 'scoring_out')
            node_embed = self.transform_mlp(graph.ndata['data'])
            graph.ndata['node_embed'] = weights * node_embed  # [B*N, 8]
            node_embed = dgl.sum_nodes(graph, 'node_embed')
        node_embed = self.out_linear(node_embed)
        return node_embed
Пример #7
0
def test_softmax(g, idtype):
    g = g.astype(idtype).to(F.ctx())
    g.ndata['h'] = F.randn((g.number_of_nodes(), 3))
    g.edata['h'] = F.randn((g.number_of_edges(), 2))

    # Test.1: node readout
    x = dgl.softmax_nodes(g, 'h')
    subg = dgl.unbatch(g)
    subx = []
    for sg in subg:
        subx.append(F.softmax(sg.ndata['h'], dim=0))
    assert F.allclose(x, F.cat(subx, dim=0))

    # Test.2: edge readout
    x = dgl.softmax_edges(g, 'h')
    subg = dgl.unbatch(g)
    subx = []
    for sg in subg:
        subx.append(F.softmax(sg.edata['h'], dim=0))
    assert F.allclose(x, F.cat(subx, dim=0))
    def forward(self, graph, feat):
        with graph.local_scope():
            batch_size = graph.batch_size

            h = (feat.new_zeros((self.n_layers, batch_size, self.input_dim)),
                 feat.new_zeros((self.n_layers, batch_size, self.input_dim))
                 )  #(6, 32, 100)

            q_star = feat.new_zeros(batch_size, self.output_dim)  #(32, 200)
            #print(q_star.shape)
            for i in range(self.n_iters):
                q, h = self.lstm(q_star.unsqueeze(0), h)
                q = q.view(batch_size, self.input_dim)
                e = (feat * dgl.broadcast_nodes(graph, q)).sum(dim=-1,
                                                               keepdim=True)

                graph.ndata['e'] = e
                alpha = dgl.softmax_nodes(graph, 'e')
                graph.ndata['r'] = feat * alpha
                readout = dgl.sum_nodes(graph, 'r')
                q_star = torch.cat([q, readout], dim=-1)

            return q_star
Пример #9
0
    def forward(self, g, node_feats, g_feats, get_node_weight=False):
        """Perform one-step readout

        Parameters
        ----------
        g : DGLGraph
            DGLGraph for a batch of graphs.
        node_feats : float32 tensor of shape (V, node_feat_size)
            Input node features. V for the number of nodes.
        g_feats : float32 tensor of shape (G, graph_feat_size)
            Input graph features. G for the number of graphs.
        get_node_weight : bool
            Whether to get the weights of atoms during readout.

        Returns
        -------
        float32 tensor of shape (G, graph_feat_size)
            Updated graph features.
        float32 tensor of shape (V, 1)
            The weights of nodes in readout.
        """
        with g.local_scope():
            g.ndata['z'] = self.compute_logits(
                torch.cat([dgl.broadcast_nodes(g, F.relu(g_feats)), node_feats], dim=1))
            g.ndata['a'] = dgl.softmax_nodes(g, 'z')
            g.ndata['hv'] = self.project_nodes(node_feats)

            if isinstance(g, BatchedDGLGraph):
                g_repr = dgl.sum_nodes(g, 'hv', 'a')
            else:
                g_repr = dgl.sum_nodes(g, 'hv', 'a').unsqueeze(0)
            context = F.elu(g_repr)

            if get_node_weight:
                return self.gru(context, g_feats), g.ndata['a']
            else:
                return self.gru(context, g_feats)
Пример #10
0
    def forward(self, graph: dgl.DGLGraph, feat: torch.Tensor) -> torch.Tensor:
        """
        Compute set2set pooling.

        Args:
            graph: the input graph
            feat: The input feature with shape :math:`(N, D)` where  :math:`N` is the
                number of nodes in the graph, and :math:`D` means the size of features.

        Returns:
            The output feature with shape :math:`(B, D)`, where :math:`B` refers to
            the batch size, and :math:`D` means the size of features.
        """
        with graph.local_scope():
            batch_size = graph.batch_size

            h = (
                feat.new_zeros((self.n_layers, batch_size, self.input_dim)),
                feat.new_zeros((self.n_layers, batch_size, self.input_dim)),
            )

            q_star = feat.new_zeros(batch_size, self.output_dim)

            for _ in range(self.n_iters):
                q, h = self.lstm(q_star.unsqueeze(0), h)
                q = q.view(batch_size, self.input_dim)
                e = (feat *
                     dgl.broadcast_nodes(graph, q, ntype=self.ntype)).sum(
                         dim=-1, keepdim=True)
                graph.nodes[self.ntype].data["e"] = e
                alpha = dgl.softmax_nodes(graph, "e", ntype=self.ntype)
                graph.nodes[self.ntype].data["r"] = feat * alpha
                readout = dgl.sum_nodes(graph, "r", ntype=self.ntype)
                q_star = torch.cat([q, readout], dim=-1)

            return q_star