Esempio n. 1
0
 def forward(self, block, H, HBar=None):
     if self.training:
         with block.local_scope():
             H_src, H_dst = H
             HBar_src, agg_HBar_dst = HBar
             block.dstdata["agg_hbar"] = agg_HBar_dst
             block.srcdata["hdelta"] = H_src - HBar_src
             block.update_all(fn.copy_u("hdelta", "m"),
                              fn.mean("m", "hdelta_new"))
             h_neigh = (block.dstdata["agg_hbar"] +
                        block.dstdata["hdelta_new"])
             h = self.W(th.cat([H_dst, h_neigh], 1))
             if self.activation is not None:
                 h = self.activation(h)
             return h
     else:
         with block.local_scope():
             H_src, H_dst = H
             block.srcdata["h"] = H_src
             block.update_all(fn.copy_u("h", "m"), fn.mean("m", "h_new"))
             h_neigh = block.dstdata["h_new"]
             h = self.W(th.cat([H_dst, h_neigh], 1))
             if self.activation is not None:
                 h = self.activation(h)
             return h
Esempio n. 2
0
    def forward(self, g, h, h_en):
        """Forward computation

        """
        with g.local_scope():
            h_src, h_dst = expand_as_pair(h)
            h_src_en, h_dst_en = expand_as_pair(h_en)

            g.srcdata['x'] = h_src
            g.dstdata['x'] = h_dst

            g.srcdata['en'] = h_src_en
            g.dstdata['en'] = h_dst_en

            if not self.batch_norm:
                #g.update_all(self.message, fn.mean('e', 'x'))
                g.apply_edges(self.message)
                g.update_all(fn.copy_e('e', 'e'), fn.max('e', 'x'))
                g.update_all(fn.copy_e('e_en', 'e_en'), fn.mean('e_en', 'en'))
            else:
                g.apply_edges(self.message)

                g.edata['e'] = self.bn(g.edata['e'])

                g.update_all(fn.copy_e('e', 'e'), fn.max('e', 'x'))

                g.update_all(fn.copy_e('e_en', 'e_en'), fn.mean('e_en', 'en'))

            return g.dstdata['x'], g.dstdata['en']  #+  h_en
Esempio n. 3
0
    def _add_ndata(self):
        vectorizer = CountVectorizer(min_df=5)
        features = vectorizer.fit_transform(
            self.data['plot_keywords'].fillna('').values)
        self.g.nodes['movie'].data['feat'] = torch.from_numpy(
            features.toarray()).float()
        self.g.nodes['movie'].data['label'] = torch.from_numpy(
            self.labels).long()

        # actor和director顶点的特征为其关联的movie顶点特征的平均
        self.g.multi_update_all(
            {
                'ma': (fn.copy_u('feat', 'm'), fn.mean('m', 'feat')),
                'md': (fn.copy_u('feat', 'm'), fn.mean('m', 'feat'))
            }, 'sum')

        n_movies = len(self.movies)
        train_idx, val_idx, test_idx = split_idx(np.arange(n_movies), 400, 400,
                                                 self._seed)
        self.g.nodes['movie'].data['train_mask'] = generate_mask_tensor(
            idx2mask(train_idx, n_movies))
        self.g.nodes['movie'].data['val_mask'] = generate_mask_tensor(
            idx2mask(val_idx, n_movies))
        self.g.nodes['movie'].data['test_mask'] = generate_mask_tensor(
            idx2mask(test_idx, n_movies))
Esempio n. 4
0
 def forward(self, block, H, HBar=None):
     if self.training:
         with block.local_scope():
             H_src, H_dst = H
             HBar_src, agg_HBar_dst = HBar
             block.dstdata['agg_hbar'] = agg_HBar_dst
             block.srcdata['hdelta'] = H_src - HBar_src
             block.update_all(fn.copy_u('hdelta', 'm'),
                              fn.mean('m', 'hdelta_new'))
             h_neigh = block.dstdata['agg_hbar'] + block.dstdata[
                 'hdelta_new']
             h = self.W(th.cat([H_dst, h_neigh], 1))
             if self.activation is not None:
                 h = self.activation(h)
             return h
     else:
         with block.local_scope():
             H_src, H_dst = H
             block.srcdata['h'] = H_src
             block.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'h_new'))
             h_neigh = block.dstdata['h_new']
             h = self.W(th.cat([H_dst, h_neigh], 1))
             if self.activation is not None:
                 h = self.activation(h)
             return h
Esempio n. 5
0
    def forward(self, graph_obj):

        for layer in range(self.num_layers):
            # --------------------------------------
            # If first layer initialize features with input features, then perform computation
            # --------------------------------------
            if layer == 0:
                # For the 1st layer initilize features with input feature
                graph_obj.ndata['features'] = graph_obj.ndata[
                    self.input_feature_label]
                graph_obj.update_all(fn.copy_u('features', 'm'),
                                     fn.mean('m', 'h'))
            else:
                # ------------------------------
                # Update along the given edge
                # ------------------------------
                graph_obj.update_all(fn.copy_u('features', 'm'),
                                     fn.mean('m', 'h'))
                concat_ftrs = torch.cat([
                    graph_obj.ndata['features'], graph_obj.ndata['h'],
                    graph_obj.ndata[self.input_feature_label]
                ],
                                        dim=1)

                ftrs = self.FC_w(concat_ftrs)
                ftrs = F.tanh(ftrs)
                if self.norm:
                    ftrs = ftrs / torch.norm(ftrs, p=2)
                graph_obj.ndata['features'] = ftrs
        return
Esempio n. 6
0
    def forward(self, g, feat):
        with g.local_scope():
            if self.aggre_type == 'attention':
                if isinstance(feat, tuple):
                    h_src = self.feat_drop(feat[0]).view(
                        -1, self.num_heads, self.in_size)
                    h_dst = self.feat_drop(feat[1]).view(
                        -1, self.num_heads, self.in_size)
                el = (h_src * self.attn_l).sum(dim=-1).unsqueeze(-1)
                g.srcdata.update({'ft': h_src, 'el': el})
                g.apply_edges(fn.copy_u('el', 'e'))
                e = self.leaky_relu(g.edata.pop('e'))
                g.edata['a'] = self.attn_drop(edge_softmax(g, e))
                g.update_all(fn.u_mul_e('ft', 'a', 'm'), fn.sum('m', 'ft'))
                rst = g.dstdata['ft'].flatten(1)
                if self.residual:
                    rst = rst + h_dst
                if self.activation:
                    rst = self.activation(rst)

            elif self.aggre_type == 'mean':
                h_src = self.feat_drop(feat[0]).view(
                    -1, self.in_size * self.num_heads)
                g.srcdata['ft'] = h_src
                g.update_all(fn.copy_u('ft', 'm'), fn.mean('m', 'ft'))
                rst = g.dstdata['ft']

            elif self.aggre_type == 'pool':
                h_src = self.feat_drop(feat[0]).view(
                    -1, self.in_size * self.num_heads)
                g.srcdata['ft'] = F.relu(self.fc_pool(h_src))
                g.update_all(fn.copy_u('ft', 'm'), fn.mean('m', 'ft'))
                rst = g.dstdata['ft']
            return rst
Esempio n. 7
0
def convert_mag_to_homograph(g, device):
    """
    Featurize node types that don't have input features (i.e. author,
    institution, field_of_study) by averaging their neighbor features.
    Then convert the graph to a undirected homogeneous graph.
    """
    src_writes, dst_writes = g.all_edges(etype="writes")
    src_topic, dst_topic = g.all_edges(etype="has_topic")
    src_aff, dst_aff = g.all_edges(etype="affiliated_with")
    new_g = dgl.heterograph({
        ("paper", "written", "author"): (dst_writes, src_writes),
        ("paper", "has_topic", "field"): (src_topic, dst_topic),
        ("author", "aff", "inst"): (src_aff, dst_aff)
    })
    new_g = new_g.to(device)
    new_g.nodes["paper"].data["feat"] = g.nodes["paper"].data["feat"]
    new_g["written"].update_all(fn.copy_u("feat", "m"), fn.mean("m", "feat"))
    new_g["has_topic"].update_all(fn.copy_u("feat", "m"), fn.mean("m", "feat"))
    new_g["aff"].update_all(fn.copy_u("feat", "m"), fn.mean("m", "feat"))
    g.nodes["author"].data["feat"] = new_g.nodes["author"].data["feat"]
    g.nodes["institution"].data["feat"] = new_g.nodes["inst"].data["feat"]
    g.nodes["field_of_study"].data["feat"] = new_g.nodes["field"].data["feat"]

    # Convert to homogeneous graph
    # Get DGL type id for paper type
    target_type_id = g.get_ntype_id("paper")
    g = dgl.to_homogeneous(g, ndata=["feat"])
    g = dgl.add_reverse_edges(g, copy_ndata=True)
    # Mask for paper nodes
    g.ndata["target_mask"] = g.ndata[dgl.NTYPE] == target_type_id
    return g
Esempio n. 8
0
    def mean_agg(self, g):
        x = g.ndata['x']
        x = self.dropout(x)
        g.srcdata['h'] = x
        for i in range(self.K):
            if i == 0:
                g.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'neigh'))
                h_neigh = g.dstdata['neigh']
                h = torch.matmul(torch.cat([g.srcdata['h'], h_neigh], dim=1), self.weight_in)
                if self.activation:
                    h = self.activation(h, inplace=False)
                norm = torch.norm(h, dim=1)
                h = h / (norm.unsqueeze(-1) + 0.05)
                g.srcdata['h'] = h
            elif i == self.K - 1:
                g.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'neigh'))
                h_neigh = g.dstdata['neigh']
                h = torch.matmul(torch.cat([g.srcdata['h'], h_neigh], dim=1), self.weight_out)

                norm = torch.norm(h, dim=1)
                h = h / (norm.unsqueeze(-1) + 0.05)
                g.ndata['z'] = h
            else:
                g.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'neigh'))
                h_neigh = g.dstdata['neigh']
                h = torch.matmul(torch.cat([g.srcdata['h'], h_neigh], dim=1), self.weight_hid[i-1, :, :])
                if self.activation:
                    h = self.activation(h, inplace=False)
                norm = torch.norm(h, dim=1)
                h = h / (norm.unsqueeze(-1) + 0.05)
                g.srcdata['h'] = h
        return g
Esempio n. 9
0
    def forward(self, g, xs):
        """Forward computation

        Parameters
        ----------
        g : DGLHeteroGraph
            Input block graph.
        xs : dict[str, torch.Tensor]
            Node feature for each node type.

        Returns
        -------
        list of torch.Tensor
            New node features for each node type.
        """
        g = g.local_var()
        for ntype, x in xs.items():
            g.srcnodes[ntype].data['x'] = x
        if self.use_weight:
            ws = self.basis_weight()
            funcs = {}
            for i, (srctype, etype, dsttype) in enumerate(g.canonical_etypes):
                if srctype not in xs:
                    continue
                g.srcnodes[srctype].data['h%d' % i] = th.matmul(
                    g.srcnodes[srctype].data['x'], ws[etype])
                funcs[(srctype, etype, dsttype)] = (fn.copy_u('h%d' % i, 'm'),
                                                    fn.mean('m', 'h'))
        else:
            funcs = {}
            for i, (srctype, etype, dsttype) in enumerate(g.canonical_etypes):
                if srctype not in xs:
                    continue
                g.srcnodes[srctype].data['h%d' %
                                         i] = g.srcnodes[srctype].data['x']
                funcs[(srctype, etype, dsttype)] = (fn.copy_u('h%d' % i, 'm'),
                                                    fn.mean('m', 'h'))
        # message passing
        g.multi_update_all(funcs, 'sum')

        hs = {}
        for ntype in g.dsttypes:
            if 'h' in g.dstnodes[ntype].data:
                hs[ntype] = g.dstnodes[ntype].data['h']

        def _apply(ntype, h):
            # apply bias and activation
            if self.self_loop:
                h = h + th.matmul(xs[ntype][:h.shape[0]], self.loop_weight)
            if self.activation:
                h = self.activation(h)
            h = self.dropout(h)
            return h

        hs = {ntype: _apply(ntype, h) for ntype, h in hs.items()}
        return hs
Esempio n. 10
0
    def forward(self, graph, n_feats, e_weights=None):
        graph.ndata['h'] = n_feats

        if e_weights == None:
            graph.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'h'))
        else:
            graph.edata['ew'] = e_weights
            graph.update_all(fn.u_mul_e('h', 'ew', 'm'), fn.mean('m', 'h'))

        graph.ndata['h'] = self.layer(
            th.cat([graph.ndata['h'], n_feats], dim=-1))

        output = graph.ndata['h']
        return output
Esempio n. 11
0
    def gen_mail(self, args, emb, input_nodes, pair_graph, frontier, mode='train'):
        pair_graph.ndata['feat'] = emb

        pair_graph = dgl.add_reverse_edges(pair_graph, copy_edata=True)

        pair_graph.update_all(MSG.get_edge_msg, fn.mean('m','msg')) 
        frontier.ndata['msg'] = torch.zeros((frontier.num_nodes(), self.nfeat_dim + 2))
        frontier.ndata['msg'][pair_graph.ndata[dgl.NID]] = pair_graph.ndata['msg'].to('cpu')

        for _ in range(args.n_layer):
            frontier.update_all(fn.copy_u('msg','m'), fn.mean('m','msg'))

        mail = MSG.msg2mail(frontier.ndata['mail'][input_nodes], frontier.ndata['msg'][input_nodes])
        return mail
Esempio n. 12
0
 def forward(self, g, h):
     with g.local_scope():
         g.ndata['x'] = h
         # generate the message and store it on the edges
         g.apply_edges(self.message)
         # process the message
         e = g.edata['e']
         for i in range(self.num_layers):
             if i > 0:
                 e = self.fcs[i - 1](e)
             if self.batch_norm:
                 e = self.bns[i](e)
             if self.activation:
                 e = self.acts[i](e)
         g.edata['e'] = e
         # pass the message and update the nodes
         g.update_all(fn.copy_e('e', 'e'), fn.mean('e', 'x'))
         # shortcut connection
         x = g.ndata.pop('x')
         g.edata.pop('e')
         if self.sc is None:
             sc = h
         else:
             sc = self.sc(h)
             if self.batch_norm:
                 sc = self.sc_bn(sc)
         if self.activation:
             return self.sc_act(x + sc)
         else:
             return x + sc
Esempio n. 13
0
    def forward(self, graph, feat_dict):
        funcs = {}

        #  print("graph1:","graph")
        #  print("graph1:",graph.canonical_etypes)
        for srctype, etype, dsttype in graph.canonical_etypes:
            Wh = self.weight[etype](feat_dict[srctype])
            graph.srcnodes[srctype].data['Wh_{}'.format(etype)] = Wh
            funcs[etype] = (fn.copy_u('Wh_{}'.format(etype),
                                      'm'), fn.mean('m', 'h'))

    #     print("success")
    #   for i,k in funcs.items():
    #       print('func',i,k)

        graph.multi_update_all(funcs, 'sum')

        result = {}
        for ntype in graph.ntypes:
            if 'h' in graph.dstnodes[ntype].data:
                result[ntype] = graph.dstnodes[ntype].data['h']
            else:
                result[ntype] = torch.zeros(graph.number_of_dst_nodes(ntype),
                                            self.out_size).to(device)

        return result
def track_time(graph_name, format, feat_size, msg_type, reduce_type):
    device = utils.get_bench_device()
    graph = utils.get_graph(graph_name, format)
    graph = graph.to(device)
    graph.ndata['h'] = torch.randn((graph.num_nodes(), feat_size),
                                   device=device)
    graph.edata['e'] = torch.randn((graph.num_edges(), 1), device=device)

    msg_builtin_dict = {
        'copy_u': fn.copy_u('h', 'x'),
        'u_mul_e': fn.u_mul_e('h', 'e', 'x'),
    }

    reduce_builtin_dict = {
        'sum': fn.sum('x', 'h_new'),
        'mean': fn.mean('x', 'h_new'),
        'max': fn.max('x', 'h_new'),
    }

    # dry run
    graph.update_all(msg_builtin_dict[msg_type],
                     reduce_builtin_dict[reduce_type])

    # timing

    with utils.Timer() as t:
        for i in range(3):
            graph.update_all(msg_builtin_dict[msg_type],
                             reduce_builtin_dict[reduce_type])

    return t.elapsed_secs / 3
Esempio n. 15
0
    def forward(self):
        """ Forward computation

        Returns
        -------
        torch.Tensor
            New node features.
        """
        g = self.g.local_var()
        funcs = {}
        for i, (srctype, etype, dsttype) in enumerate(g.canonical_etypes):
            g.nodes[srctype].data['embed-%d' % i] = self.embeds["{}-{}-{}".format(srctype, etype, dsttype)]
            funcs[(srctype, etype, dsttype)] = (fn.copy_u('embed-%d' % i, 'm'), fn.mean('m', 'h'))
        g.multi_update_all(funcs, 'sum')
        
        hs = [g.nodes[ntype].data['h'] for ntype in g.ntypes]
        for i in range(len(hs)):
            h = hs[i]
            # apply bias and activation
            if self.self_loop:
                h = h + self.self_embeds[i]
            if self.bias:
                h = h + self.h_bias
            if self.activation:
                h = self.activation(h)
            h = self.dropout(h)
            hs[i] = h
        return hs
Esempio n. 16
0
 def forward(self, G, feat_dict):
     # 输入一个DGL图 一个feature dict
     # The input is a dictionary of node features for each type
     funcs = {}
     for srctype, etype, dsttype in G.canonical_etypes:
         # g.etype : ['written-by', 'writing', 'citing', 'cited', 'is-about', 'has']
         #  G.canonical_etypes : [('paper', 'written-by', 'author'),
         # ('author', 'writing', 'paper'),
         # ('paper', 'citing', 'paper'),
         # ('paper', 'cited', 'paper'),
         # ('paper', 'is-about', 'subject'),
         # ('subject', 'has', 'paper')]
         # Compute W_r * h
         # weight schema is [name : nn linear]
         Wh = self.weight[etype](feat_dict[srctype])
         # Save it in graph for message passing
         G.nodes[srctype].data['Wh_%s' % etype] = Wh
         # Specify per-relation message passing functions: (message_func, reduce_func).
         # Note that the results are saved to the same destination feature 'h', which
         # hints the type wise reducer for aggregation.
         funcs[etype] = (fn.copy_u('Wh_%s' % etype, 'm'), fn.mean('m', 'h'))
     # Trigger message passing of multiple types.
     # The first argument is the message passing functions for each relation.
     # The second one is the type wise reducer, could be "sum", "max",
     # "min", "mean", "stack"
     G.multi_update_all(funcs, 'sum')
     # return the updated node feature dictionary
     return {ntype: G.nodes[ntype].data['h'] for ntype in G.ntypes}
    def forward(self, g, h, e):
        h_in = h  # for residual connection

        if self.dgl_builtin == False:
            h = self.dropout(h)
            g.ndata['h'] = h
            #g.update_all(fn.copy_src(src='h', out='m'),
            #             self.aggregator,
            #             self.nodeapply)
            if self.aggregator_type == 'maxpool':
                g.ndata['h'] = self.aggregator.linear(g.ndata['h'])
                g.ndata['h'] = self.aggregator.activation(g.ndata['h'])
                g.update_all(fn.copy_src('h', 'm'), fn.max('m', 'c'),
                             self.nodeapply)
            elif self.aggregator_type == 'lstm':
                g.update_all(fn.copy_src(src='h', out='m'), self.aggregator,
                             self.nodeapply)
            else:
                g.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'c'),
                             self.nodeapply)
            h = g.ndata['h']
        else:
            # For original graphs
            # h = self.sageconv(g, h)
            # For reduced graphs
            h = self.sageconv(g, h, edge_weight=e)

        if self.batch_norm:
            h = self.batchnorm_h(h)

        if self.residual:
            h = h_in + h  # residual connection

        return h
Esempio n. 18
0
def test_mean_zero_degree(g, idtype):
    g = g.astype(idtype).to(F.ctx())
    g.ndata['h'] = F.ones((g.number_of_nodes(), 3))
    g.update_all(fn.copy_u('h', 'm'), fn.mean('m', 'x'))
    deg = F.asnumpy(g.in_degrees())
    v = F.tensor(np.where(deg == 0)[0])
    assert F.allclose(F.gather_row(g.ndata['x'], v), F.zeros((len(v), 3)))
Esempio n. 19
0
def load_subtensor(g,
                   labels,
                   blocks,
                   hist_blocks,
                   dev_id,
                   aggregation_on_device=False):
    """
    Copys features and labels of a set of nodes onto GPU.
    """
    blocks[0].srcdata["features"] = g.ndata["features"][blocks[0].srcdata[
        dgl.NID]].to(dev_id)
    blocks[-1].dstdata["label"] = labels[blocks[-1].dstdata[dgl.NID]].to(
        dev_id)
    for i, (block, hist_block) in enumerate(zip(blocks, hist_blocks)):
        hist_col = "features" if i == 0 else "hist_%d" % i
        block.srcdata["hist"] = g.ndata[hist_col][block.srcdata[dgl.NID]].to(
            dev_id)

        # Aggregate history
        hist_block.srcdata["hist"] = g.ndata[hist_col][hist_block.srcdata[
            dgl.NID]]
        if aggregation_on_device:
            hist_block.srcdata["hist"] = hist_block.srcdata["hist"].to(dev_id)
        hist_block.update_all(fn.copy_u("hist", "m"), fn.mean("m", "agg_hist"))
        block.dstdata["agg_hist"] = hist_block.dstdata["agg_hist"]
        if not aggregation_on_device:
            block.dstdata["agg_hist"] = block.dstdata["agg_hist"].to(dev_id)
Esempio n. 20
0
def load_subtensor(g,
                   labels,
                   blocks,
                   hist_blocks,
                   dev_id,
                   aggregation_on_device=False):
    """
    Copys features and labels of a set of nodes onto GPU.
    """
    blocks[0].srcdata['features'] = g.ndata['features'][blocks[0].srcdata[
        dgl.NID]]
    blocks[-1].dstdata['label'] = labels[blocks[-1].dstdata[dgl.NID]]
    ret_blocks = []
    ret_hist_blocks = []
    for i, (block, hist_block) in enumerate(zip(blocks, hist_blocks)):
        hist_col = 'features' if i == 0 else 'hist_%d' % i
        block.srcdata['hist'] = g.ndata[hist_col][block.srcdata[dgl.NID]]

        # Aggregate history
        hist_block.srcdata['hist'] = g.ndata[hist_col][hist_block.srcdata[
            dgl.NID]]
        if aggregation_on_device:
            hist_block = hist_block.to(dev_id)
            hist_block.srcdata['hist'] = hist_block.srcdata['hist']
        hist_block.update_all(fn.copy_u('hist', 'm'), fn.mean('m', 'agg_hist'))

        block = block.to(dev_id)
        if not aggregation_on_device:
            hist_block = hist_block.to(dev_id)
        block.dstdata['agg_hist'] = hist_block.dstdata['agg_hist']
        ret_blocks.append(block)
        ret_hist_blocks.append(hist_block)
    return ret_blocks, ret_hist_blocks
Esempio n. 21
0
    def forward(self, h, G=None, basis=None, **kwargs):
        """Forward pass of the linear layer

        Args:
            G: minibatch of (h**o)graphs
            h: dict of features
            basis: pre-computed Q * Y
        Returns: 
            tensor with new features [B, n_points, n_features_out]
        """
        with G.local_scope():
            # Add node features to local graph scope
            for k, v in h.items():
                G.ndata[k] = v

            # Add edge features
            for (mi, di) in self.f_in.structure:
                for (mo, do) in self.f_out.structure:
                    etype = f'({di},{do})'
                    G.edata[etype] = self.kernel_unary[etype](G.edata['feat'], basis)

            # Perform message-passing for each output feature type
            for d in self.f_out.degrees:
                G.apply_edges(self.udf_u_mul_e(d))
                G.update_all(fn.copy_e('msg', 'msg'), fn.mean('msg', f'out{d}'))

            return {f'{d}': G.ndata[f'out{d}'] for d in self.f_out.degrees}
Esempio n. 22
0
    def forward(self, graph, feat, e_feat):
        r"""Compute GraphSAGE layer.

        Parameters
        ----------
        graph : DGLGraph
            The graph.
        feat : torch.Tensor
            The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
            is size of input feature, :math:`N` is the number of nodes.

        Returns
        -------
        torch.Tensor
            The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
            is size of output feature.
        """
        graph = graph.local_var()
        feat = self.feat_drop(feat)
        h_self = feat
        graph.edata['e'] = e_feat
        if self._aggre_type == 'sum':
            graph.ndata['h'] = feat
            graph.update_all(fn.u_mul_e('h', 'e', 'm'), fn.sum('m', 'neigh'))
            h_neigh = graph.ndata['neigh']
        elif self._aggre_type == 'mean':
            graph.ndata['h'] = feat
            graph.update_all(fn.u_mul_e('h', 'e', 'm'), fn.mean('m', 'neigh'))
            h_neigh = graph.ndata['neigh']
        elif self._aggre_type == 'gcn':
            graph.ndata['h'] = feat
            graph.update_all(fn.u_mul_e('h', 'e', 'm'), fn.sum('m', 'neigh'))
            # divide in_degrees
            degs = graph.in_degrees().float()
            degs = degs.to(feat.device)
            h_neigh = (graph.ndata['neigh'] +
                       graph.ndata['h']) / (degs.unsqueeze(-1) + 1)
        elif self._aggre_type == 'pool':
            graph.ndata['h'] = F.relu(self.fc_pool(feat))
            graph.update_all(fn.u_mul_e('h', 'e', 'm'), fn.max('m', 'neigh'))
            h_neigh = graph.ndata['neigh']
        elif self._aggre_type == 'lstm':
            graph.ndata['h'] = feat
            graph.update_all(fn.u_mul_e('h', 'e', 'm'), self._lstm_reducer)
            h_neigh = graph.ndata['neigh']
        else:
            raise KeyError('Aggregator type {} not recognized.'.format(
                self._aggre_type))
        # GraphSAGE GCN does not require fc_self.
        if self._aggre_type == 'gcn':
            rst = self.fc_neigh(h_neigh)
        else:
            rst = self.fc_self(h_self) + self.fc_neigh(h_neigh)
        # activation
        if self.activation is not None:
            rst = self.activation(rst)
        # normalization
        if self.norm is not None:
            rst = self.norm(rst)
        return rst
Esempio n. 23
0
    def forward(self, graph, feat):
        r"""Compute GraphSAGE layer.

        Parameters
        ----------
        graph : DGLGraph
            The graph.
        feat : torch.Tensor or pair of torch.Tensor
            If a torch.Tensor is given, the input feature of shape :math:`(N, D_{in})` where
            :math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
            If a pair of torch.Tensor is given, the pair must contain two tensors of shape
            :math:`(N_{in}, D_{in_{src}})` and :math:`(N_{out}, D_{in_{dst}})`.

        Returns
        -------
        torch.Tensor
            The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
            is size of output feature.
        """
        graph = graph.local_var()

        if isinstance(feat, tuple):
            feat_src, feat_dst = feat
        else:
            feat_src = feat_dst = feat

        h_self = feat_dst

        graph.srcdata['h'] = feat_src
        graph.update_all(fn.copy_src('h', 'm'), fn.mean('m', 'neigh'))
        h_neigh = graph.dstdata['neigh']
        rst = self.fc_self(h_self) + self.fc_neigh(h_neigh)

        return rst
Esempio n. 24
0
    def forward(self, g, node_feats, edge_feats):
        with g.local_scope():
            # Node and edge feature dimension need to match.
            g.ndata['h'] = node_feats
            g.edata['h'] = self.edge_encoder(edge_feats)
            g.apply_edges(fn.u_add_e('h', 'h', 'm'))

            if self.aggr == 'softmax':
                g.edata['m'] = F.relu(g.edata['m']) + self.eps
                g.edata['a'] = edge_softmax(g, g.edata['m'] * self.beta)
                g.update_all(
                    lambda edge: {'x': edge.data['m'] * edge.data['a']},
                    fn.sum('x', 'm'))

            elif self.aggr == 'power':
                minv, maxv = 1e-7, 1e1
                torch.clamp_(g.edata['m'], minv, maxv)
                g.update_all(
                    lambda edge: {'x': torch.pow(edge.data['m'], self.p)},
                    fn.mean('x', 'm'))
                torch.clamp_(g.ndata['m'], minv, maxv)
                g.ndata['m'] = torch.pow(g.ndata['m'], self.p)

            else:
                raise NotImplementedError(
                    f'Aggregator {self.aggr} is not supported.')

            if self.msg_norm is not None:
                g.ndata['m'] = self.msg_norm(node_feats, g.ndata['m'])

            feats = node_feats + g.ndata['m']

            return self.mlp(feats)
Esempio n. 25
0
def neighbor_average_features(g, args):
    """
    Compute multi-hop neighbor-averaged node features
    """
    print("Compute neighbor-averaged feats")
    g.ndata["feat_0"] = g.ndata["feat"]
    for hop in range(1, args.R + 1):
        g.update_all(fn.copy_u(f"feat_{hop-1}", "msg"),
                     fn.mean("msg", f"feat_{hop}"))
    res = []
    for hop in range(args.R + 1):
        res.append(g.ndata.pop(f"feat_{hop}"))

    if args.dataset == "ogbn-mag":
        # For MAG dataset, only return features for target node types (i.e.
        # paper nodes)
        target_mask = g.ndata["target_mask"]
        target_ids = g.ndata[dgl.NID][target_mask]
        num_target = target_mask.sum().item()
        new_res = []
        for x in res:
            feat = torch.zeros((num_target, ) + x.shape[1:],
                               dtype=x.dtype,
                               device=x.device)
            feat[target_ids] = x[target_mask]
            new_res.append(feat)
        res = new_res
    return res
Esempio n. 26
0
File: model.py Progetto: yuk12/dgl
    def forward(self, g, feat):
        with g.local_scope():
            g.ndata['h'] = feat

            hr = {}
            for i, etype in enumerate(g.canonical_etypes):
                g.apply_edges(self._calc_distance, etype=etype)
                self.dist[etype] = g.edges[etype].data['d']
                sampled_edges = self._top_p_sampling(g[etype], self.p[etype])

                # formula 8
                g.send_and_recv(sampled_edges,
                                fn.copy_u('h', 'm'),
                                fn.mean('m', 'h_%s' % etype[1]),
                                etype=etype)
                hr[etype] = g.ndata['h_%s' % etype[1]]
                if self.activation is not None:
                    hr[etype] = self.activation(hr[etype])

            # formula 9 using mean as inter-relation aggregator
            p_tensor = th.Tensor(list(self.p.values())).view(-1, 1,
                                                             1).to(g.device)
            h_homo = th.sum(th.stack(list(hr.values())) * p_tensor, dim=0)
            h_homo += feat
            if self.activation is not None:
                h_homo = self.activation(h_homo)

            return self.linear(h_homo)
Esempio n. 27
0
def general_outcome_correlation(graph, y0, n_prop=50, alpha=0.8, use_norm=False, post_step=None):
    with graph.local_scope():
        y = y0
        for _ in range(n_prop):
            if use_norm:
                degs = graph.in_degrees().float().clamp(min=1)
                norm = torch.pow(degs, -0.5)
                shp = norm.shape + (1,) * (y.dim() - 1)
                norm = torch.reshape(norm, shp)
                y = y * norm

            graph.srcdata.update({"y": y})
            graph.update_all(fn.copy_u("y", "m"), fn.mean("m", "y"))
            y = graph.dstdata["y"]

            if use_norm:
                degs = graph.in_degrees().float().clamp(min=1)
                norm = torch.pow(degs, 0.5)
                shp = norm.shape + (1,) * (y.dim() - 1)
                norm = torch.reshape(norm, shp)
                y = y * norm

            y = alpha * y + (1 - alpha) * y0

            if post_step is not None:
                y = post_step(y)

        return y
Esempio n. 28
0
    def call(self, graph, op_feats, device_feats, edge_feats):
        op_dst, device_dst = [], []
        for stype, etype, dtype in graph.canonical_etypes:
            g = graph[etype].local_var()

            if stype == 'op':
                g.srcdata['i'] = op_feats
            elif stype == 'device':
                g.srcdata['i'] = device_feats

            g.apply_edges(fn.copy_u('i', 's'))
            edata = tf.concat([g.edata.pop('s'), edge_feats[etype]], axis=1)
            g.edata['e'] = self.layers[etype](edata)
            g.update_all(fn.copy_e('e', 'm'), fn.mean(msg='m', out='o'))

            if dtype == 'op':
                op_dst.append(g.dstdata['o'])
            elif dtype == 'device':
                device_dst.append(g.dstdata['o'])

        op_dst = tf.math.add_n(op_dst) / len(op_dst)
        device_dst = tf.math.add_n(device_dst) / len(device_dst)

        return self.activation(op_feats +
                               op_dst), self.activation(device_feats +
                                                        device_dst)
Esempio n. 29
0
    def forward(self, h, G=None, r=None, basis=None, **kwargs):
        """Forward pass of the linear layer

        Args:
            G: minibatch of (h**o)graphs
            h: dict of features
            r: inter-atomic distances
            basis: pre-computed Q * Y
        Returns: 
            tensor with new features [B, n_points, n_features_out]
        """
        with G.local_scope():
            # Add node features to local graph scope
            for k, v in h.items():
                G.ndata[k] = v

            # Add edge features
            if 'w' in G.edata.keys():
                w = G.edata['w']
                feat = torch.cat([w, r], -1)
            else:
                feat = torch.cat([r, ], -1)

            for (mi, di) in self.f_in.structure:
                for (mo, do) in self.f_out.structure:
                    etype = f'({di},{do})'
                    G.edata[etype] = self.kernel_unary[etype](feat, basis)

            # Perform message-passing for each output feature type
            for d in self.f_out.degrees:
                G.update_all(self.udf_u_mul_e(d), fn.mean('msg', f'out{d}'))

            return {f'{d}': G.ndata[f'out{d}'] for d in self.f_out.degrees}
Esempio n. 30
0
File: model.py Progetto: yxd886/fuse
    def call(self, graph, instruction_feats, computation_feats, final_feats,
             edge_feats):
        instruction_dst, computation_dst, final_dst = [], [], []
        for stype, etype, dtype in graph.canonical_etypes:
            g = graph[etype].local_var()

            if stype == 'instruction':
                g.srcdata['i'] = instruction_feats
            elif stype == 'computation':
                g.srcdata['i'] = computation_feats
            elif stype == "final":
                g.srcdata['i'] = final_feats

            g.apply_edges(fn.copy_u('i', 's'))
            edata = tf.concat([g.edata.pop('s'), edge_feats[etype]], axis=1)
            g.edata['e'] = self.layers[etype](edata)
            g.update_all(fn.copy_e('e', 'm'), fn.mean(msg='m', out='o'))

            if dtype == 'instruction':
                instruction_dst.append(g.dstdata['o'])
            elif dtype == 'computation':
                computation_dst.append(g.dstdata['o'])
            elif dtype == "final":
                final_dst.append(g.dstdata['o'])
        instruction_dst = tf.math.add_n(instruction_dst) / len(instruction_dst)
        computation_dst = tf.math.add_n(computation_dst) / len(computation_dst)
        final_dst = tf.math.add_n(final_dst) / len(final_dst)
        return self.activation(
            instruction_feats + instruction_dst), self.activation(
                computation_feats +
                computation_dst), self.activation(final_feats + final_dst)