def pass_messages(self, g):
     g.apply_edges(GF.u_mul_v('norm', 'norm', 'coef'))
     g.apply_edges(GF.u_mul_v('x', 'x', 'm2'))
     g.apply_edges(GF.copy_u('x', 'm1'))
     g.apply_edges(self.edge_sum)
     g.update_all(GF.copy_e('m1', 'm1'), GF.sum('m1', 'f1'))
     g.update_all(GF.copy_e('m2', 'm2'), GF.sum('m2', 'f2'))
示例#2
0
    def forward(self, g, h, h_en):
        """Forward computation

        """
        with g.local_scope():
            h_src, h_dst = expand_as_pair(h)
            h_src_en, h_dst_en = expand_as_pair(h_en)

            g.srcdata['x'] = h_src
            g.dstdata['x'] = h_dst

            g.srcdata['en'] = h_src_en
            g.dstdata['en'] = h_dst_en

            if not self.batch_norm:
                #g.update_all(self.message, fn.mean('e', 'x'))
                g.apply_edges(self.message)
                g.update_all(fn.copy_e('e', 'e'), fn.max('e', 'x'))
                g.update_all(fn.copy_e('e_en', 'e_en'), fn.mean('e_en', 'en'))
            else:
                g.apply_edges(self.message)

                g.edata['e'] = self.bn(g.edata['e'])

                g.update_all(fn.copy_e('e', 'e'), fn.max('e', 'x'))

                g.update_all(fn.copy_e('e_en', 'e_en'), fn.mean('e_en', 'en'))

            return g.dstdata['x'], g.dstdata['en']  #+  h_en
示例#3
0
    def forward(self, graph, feat):
        """Compute graph convolution.

        Parameters
        ----------
        graph : DGLGraph
            The graph.
        feat : torch.Tensor
            The input edge features.

        Returns
        -------
        torch.Tensor
            The output features.
        """
        graph = graph.local_var()

        if self.in_feats > self.out_feats:
            # multiply by W first to reduce the feature size for aggregation.
            feat = self.linear(feat)
            graph.edata['h'] = feat
            graph.update_all(fn.copy_e('h', 'm'), fn.sum('m', 'h'))
            rst = graph.ndata['h']
        else:
            # aggregate first then multiply by W
            graph.edata['h'] = feat
            graph.update_all(fn.copy_e('h', 'm'), fn.sum('m', 'h'))
            rst = graph.ndata['h']
            rst = self.linear(rst)

        if self.activation is not None:
            rst = self.activation(rst)

        return rst
示例#4
0
    def _test(mfunc):

        g = create_test_heterograph(idtype)
        feat_size = 2

        x1 = F.randn((4, feat_size))
        x2 = F.randn((4, feat_size))
        x3 = F.randn((3, feat_size))
        x4 = F.randn((3, feat_size))
        F.attach_grad(x1)
        F.attach_grad(x2)
        F.attach_grad(x3)
        F.attach_grad(x4)
        g['plays'].edata['eid'] = x1
        g['follows'].edata['eid'] = x2
        g['develops'].edata['eid'] = x3
        g['wishes'].edata['eid'] = x4

        #################################################################
        #  apply_edges() is called on each relation type separately
        #################################################################
        with F.record_grad():
            [
                g.apply_edges(fn.copy_e('eid', 'm'), etype=rel)
                for rel in g.canonical_etypes
            ]
            r1 = g['develops'].edata['m']
            F.backward(r1, F.ones(r1.shape))
            e_grad1 = F.grad(g['develops'].edata['eid'])

        #################################################################
        #  apply_edges() is called on all relation types
        #################################################################

        g.apply_edges(fn.copy_e('eid', 'm'))
        r2 = g['develops'].edata['m']
        F.backward(r2, F.ones(r2.shape))
        e_grad2 = F.grad(g['develops'].edata['eid'])

        # # correctness check
        def _print_error(a, b):
            for i, (x, y) in enumerate(
                    zip(F.asnumpy(a).flatten(),
                        F.asnumpy(b).flatten())):
                if not np.allclose(x, y):
                    print('@{} {} v.s. {}'.format(i, x, y))

        if not F.allclose(r1, r2):
            _print_error(r1, r2)
        assert F.allclose(r1, r2)
        if not F.allclose(e_grad1, e_grad2):
            print('edge grad')
            _print_error(e_grad1, e_grad2)
        assert (F.allclose(e_grad1, e_grad2))
示例#5
0
    def forward(self, g, feat_dict):

        funcs = {}  #message and reduce functions dict
        #for each type of edges, compute messages and reduce them all
        for srctype, etype, dsttype in g.canonical_etypes:
            if srctype == dsttype:  #for self loops
                messages = self.W1(feat_dict[srctype])
                g.nodes[srctype].data[etype] = messages  #store in ndata
                funcs[(srctype, etype,
                       dsttype)] = (fn.copy_u(etype, 'm'), fn.sum('m', 'h')
                                    )  #define message and reduce functions
            else:
                src, dst = g.edges(etype=(srctype, etype, dsttype))
                norm = self.norm_dict[(srctype, etype, dsttype)]
                messages = norm * (self.W1(feat_dict[srctype][src]) + self.W2(
                    feat_dict[srctype][src] * feat_dict[dsttype][dst])
                                   )  #compute messages
                g.edges[(srctype, etype,
                         dsttype)].data[etype] = messages  #store in edata
                funcs[(srctype, etype,
                       dsttype)] = (fn.copy_e(etype, 'm'), fn.sum('m', 'h')
                                    )  #define message and reduce functions

        g.multi_update_all(
            funcs, 'sum'
        )  #update all, reduce by first type-wisely then across different types
        feature_dict = {}
        for ntype in g.ntypes:
            h = self.leaky_relu(g.nodes[ntype].data['h'])  #leaky relu
            h = self.dropout(h)  #dropout
            h = F.normalize(h, dim=1, p=2)  #l2 normalize
            feature_dict[ntype] = h
        return feature_dict
示例#6
0
    def update_all_p_norm(self, graph):

        """ 
        Attempt at robust p-norm á:
        def robust_norm(x, p):
                a = np.abs(x).max()
                return a * norm1(x / a, p)

            def norm1(x, p):
                "First-pass implementation of p-norm."
                return (np.abs(x)**p).sum() ** (1./p) """

        p = torch.clamp(self.P,1,100)
        
        graph.apply_edges(fn.u_add_v('Dh', 'Eh', 'DEh'))
        graph.edata['e'] = graph.edata['DEh'] + graph.edata['Ce']
        graph.edata['sigma'] = torch.sigmoid(graph.edata['e']) # n_{ij}

        alpha = torch.max(torch.abs(torch.cat((graph.ndata['Bh'],graph.edata['sigma']), dim=0)))

        graph.ndata['Bh_pow'] = (torch.abs(graph.ndata['Bh'])/alpha).pow(p)
        graph.edata['sig_pow'] = (torch.abs(graph.edata['sigma'])/alpha).pow(p)
        graph.update_all(fn.u_mul_e('Bh_pow', 'sig_pow', 'm'), fn.sum('m', 'sum_sigma_h')) # u_mul_e = elementwise mul. Output "m" = n_{ij}***Vh. Then sum! 
                                                                                 # Update_all - send messages through all edges and update all nodes.
        
        graph.update_all(fn.copy_e('sig_pow', 'm'), fn.sum('m', 'sum_sigma')) # copy_e - eqv to 'm': graph.edata['sigma']. Output "m". Then sum. 
                                                                        # Again, send messages and update all nodes. Why do this step?????
        
        graph.ndata['h'] = graph.ndata['Ah'] + ((graph.ndata['sum_sigma_h'] / (graph.ndata['sum_sigma'] + 1e-6))*alpha).pow(torch.div(1,p)) # Uh + sum()

        #graph.update_all(self.message_func,self.reduce_func) 
        h = graph.ndata['h'] # result of graph convolution
        e = graph.edata['e'] # result of graph convolution
        # Call update function outside of update_all
        return h, e
示例#7
0
 def forward(self, g, h, weights):
     """
     g : graph
     h : node features
     weights : scalar edge weights
     """
     h_src, h_dst = h
     with g.local_scope():
         # 将src节点上的原始特征映射成hidden_dims长,存储于'n'字段
         g.srcdata['n'] = self.act(self.Q(self.dropout(h_src)))
         g.edata['w'] = weights.float()
         # src节点上的特征'n'乘以边上的权重,构成消息'm'
         # dst节点将所有接收到的消息'm',相加起来,存入dst节点的'n'字段
         g.update_all(fn.u_mul_e('n', 'w', 'm'), fn.sum('m', 'n'))
         # 将边上的权重w拷贝成消息'm'
         # dst节点将所有接收到的消息'm',相加起来,存入dst节点的'ws'字段
         g.update_all(fn.copy_e('w', 'm'), fn.sum('m', 'ws'))
         n = g.dstdata['n']  # 邻居节点的embedding的加权和
         ws = g.dstdata['ws'].unsqueeze(1).clamp(min=1)  # 边上权重之和
         # 先将邻居节点的embedding,做加权平均
         # 再拼接上一轮卷积后,dst节点自身的embedding
         # 再经过线性变化与非线性激活,得到这一轮卷积后各dst节点的embedding
         z = self.act(self.W(self.dropout(torch.cat([n / ws, h_dst], 1))))
         z_norm = z.norm(2, 1, keepdim=True)
         z_norm = torch.where(z_norm == 0,
                              torch.tensor(1.).to(z_norm), z_norm)
         z = z / z_norm
         return z
示例#8
0
    def forward(self, g, node_feats, edge_feats):
        """Update node representations.

        Parameters
        ----------
        g : DGLGraph
            DGLGraph for a batch of graphs
        node_feats : FloatTensor of shape (N, node_feats)
            Input node features. N for the total number of nodes in the batch of graphs.
        edge_feats : FloatTensor of shape (E, in_edge_feats)
            Input edge features. E for the total number of edges in the batch of graphs.

        Returns
        -------
        FloatTensor of shape (N, node_feats)
            Updated node representations.
        """
        g = g.local_var()
        edge_feats = self.project_in_edge_feats(edge_feats)

        g.ndata['feat'] = node_feats
        g.apply_edges(fn.copy_u('feat', 'e'))
        g.edata['e'] = F.relu(edge_feats + g.edata['e'])
        g.update_all(fn.copy_e('e', 'm'), fn.sum('m', 'feat'))

        rst = g.ndata['feat']
        rst = self.project_out(rst + (1 + self.eps) * node_feats)

        return rst
示例#9
0
    def call(self, graph, op_feats, device_feats, edge_feats):
        op_dst, device_dst = [], []
        for stype, etype, dtype in graph.canonical_etypes:
            g = graph[etype].local_var()

            if stype == 'op':
                g.srcdata['i'] = op_feats
            elif stype == 'device':
                g.srcdata['i'] = device_feats

            g.apply_edges(fn.copy_u('i', 's'))
            edata = tf.concat([g.edata.pop('s'), edge_feats[etype]], axis=1)
            g.edata['e'] = self.layers[etype](edata)
            g.update_all(fn.copy_e('e', 'm'), fn.mean(msg='m', out='o'))

            if dtype == 'op':
                op_dst.append(g.dstdata['o'])
            elif dtype == 'device':
                device_dst.append(g.dstdata['o'])

        op_dst = tf.math.add_n(op_dst) / len(op_dst)
        device_dst = tf.math.add_n(device_dst) / len(device_dst)

        return self.activation(op_feats +
                               op_dst), self.activation(device_feats +
                                                        device_dst)
示例#10
0
    def run(self, mol_graph, mol_line_graph):
        n_nodes = mol_graph.number_of_nodes()

        mol_graph.apply_edges(func=lambda edges: {'src_x': edges.src['x']}, )
        mol_line_graph.ndata.update(mol_graph.edata)

        e_repr = mol_line_graph.ndata
        bond_features = e_repr['x']
        source_features = e_repr['src_x']

        features = torch.cat([source_features, bond_features], 1)
        msg_input = self.W_i(features)
        mol_line_graph.ndata.update({
            'msg_input': msg_input,
            'msg': F.relu(msg_input),
            'accum_msg': torch.zeros_like(msg_input),
        })
        mol_graph.ndata.update({
            'm':
            bond_features.new(n_nodes, self.hidden_size).zero_(),
            'h':
            bond_features.new(n_nodes, self.hidden_size).zero_(),
        })

        for i in range(self.depth - 1):
            mol_line_graph.update_all(DGLF.copy_u('msg', 'msg'),
                                      DGLF.sum('msg', 'accum_msg'))
            mol_line_graph.apply_nodes(self.loopy_bp_updater)

        mol_graph.edata.update(mol_line_graph.ndata)
        mol_graph.update_all(DGLF.copy_e('msg', 'msg'), DGLF.sum('msg', 'm'))
        mol_graph.apply_nodes(self.gather_updater)

        return mol_graph
示例#11
0
    def forward(self, g, node_feats, edge_feats, expanded_dists):
        """Performs message passing and updates node and edge representations.

        Parameters
        ----------
        g : DGLGraph
            DGLGraph for a batch of graphs.
        node_feats : float32 tensor of shape (V, feats)
            Input node features.
        edge_feats : float32 tensor of shape (E, feats)
            Input edge features.
        expanded_dists : float32 tensor of shape (E, dist_feats)
            Expanded distances, i.e. the output of RBFExpansion.

        Returns
        -------
        node_feats : float32 tensor of shape (V, feats)
            Updated node representations.
        edge_feats : float32 tensor of shape (E, feats)
            Edge representations, updated if ``update_edge == True`` in initialization.
        """
        expanded_dists = self.update_dists(expanded_dists)
        if self.update_edge_feats is not None:
            edge_feats = self.update_edge_feats(edge_feats)

        g = g.local_var()
        g.ndata.update({'hv': node_feats})
        g.edata.update({'dist': expanded_dists, 'he': edge_feats})
        g.update_all(message_func=[fn.u_mul_e('hv', 'dist', 'm_0'),
                                   fn.copy_e('he', 'm_1')],
                     reduce_func=[fn.sum('m_0', 'hv_0'),
                                  fn.sum('m_1', 'hv_1')])
        node_feats = g.ndata.pop('hv_0') + g.ndata.pop('hv_1')

        return node_feats, edge_feats
示例#12
0
def calculate_homophily(g,
                        labels,
                        K=1,
                        method="edge",
                        multilabels=False,
                        heterograph=False):
    assert method in ["edge", "node"]
    if multilabels:
        assert len(labels.shape) == 2
    else:
        if (labels.max() == 1) and len(labels.shape) > 1:
            labels = labels.argmax(dim=1)
    if heterograph:
        target_mask = g.ndata['target_mask']
        target_ids = g.ndata[dgl.NID][target_mask]
        num_target = target_mask.sum().item()
        g = g.subgraph(np.arange(g.number_of_nodes())[target_mask])
    g = dgl.khop_graph(g, K)
    src, dst = g.edges()
    # if multilabels:
    #     out = 0
    #     for c in labels.shape[1]:
    #         mask = (labels[src, c])
    mask = (labels[src] == labels[dst]).float()
    if method == "edge":
        out = mask.mean(dim=0)
    elif method == "node":
        g.edata["mask"] = mask
        g.update_all(fn.copy_e("mask", "m"), fn.mean("m", "out"))
        out = g.ndata.pop("out").mean(dim=0)
    # for multilabels, we average homophily across labels

    return out.mean(0).item()
示例#13
0
    def forward(self, h, G=None, basis=None, **kwargs):
        """Forward pass of the linear layer

        Args:
            G: minibatch of (h**o)graphs
            h: dict of features
            basis: pre-computed Q * Y
        Returns: 
            tensor with new features [B, n_points, n_features_out]
        """
        with G.local_scope():
            # Add node features to local graph scope
            for k, v in h.items():
                G.ndata[k] = v

            # Add edge features
            for (mi, di) in self.f_in.structure:
                for (mo, do) in self.f_out.structure:
                    etype = f'({di},{do})'
                    G.edata[etype] = self.kernel_unary[etype](G.edata['feat'], basis)

            # Perform message-passing for each output feature type
            for d in self.f_out.degrees:
                G.apply_edges(self.udf_u_mul_e(d))
                G.update_all(fn.copy_e('msg', 'msg'), fn.mean('msg', f'out{d}'))

            return {f'{d}': G.ndata[f'out{d}'] for d in self.f_out.degrees}
示例#14
0
 def forward(self, g, h):
     with g.local_scope():
         g.ndata['x'] = h
         # generate the message and store it on the edges
         g.apply_edges(self.message)
         # process the message
         e = g.edata['e']
         for i in range(self.num_layers):
             if i > 0:
                 e = self.fcs[i - 1](e)
             if self.batch_norm:
                 e = self.bns[i](e)
             if self.activation:
                 e = self.acts[i](e)
         g.edata['e'] = e
         # pass the message and update the nodes
         g.update_all(fn.copy_e('e', 'e'), fn.mean('e', 'x'))
         # shortcut connection
         x = g.ndata.pop('x')
         g.edata.pop('e')
         if self.sc is None:
             sc = h
         else:
             sc = self.sc(h)
             if self.batch_norm:
                 sc = self.sc_bn(sc)
         if self.activation:
             return self.sc_act(x + sc)
         else:
             return x + sc
示例#15
0
    def forward(self, mol_graph):
        mol_graph = mol_graph.local_var()
        line_mol_graph = dgl.line_graph(mol_graph, backtracking=False)

        line_input = self.W_i(mol_graph.edata['x'])
        line_mol_graph.ndata['msg_input'] = line_input
        line_mol_graph.ndata['msg'] = F.relu(line_input)

        # Message passing over the line graph
        for _ in range(self.depth - 1):
            line_mol_graph.update_all(message_func=fn.copy_u('msg', 'msg'),
                                      reduce_func=fn.sum('msg', 'nei_msg'))
            nei_msg = self.W_h(line_mol_graph.ndata['nei_msg'])
            line_mol_graph.ndata['msg'] = F.relu(line_input + nei_msg)

        # Message passing over the raw graph
        mol_graph.edata['msg'] = line_mol_graph.ndata['msg']
        mol_graph.update_all(message_func=fn.copy_e('msg', 'msg'),
                             reduce_func=fn.sum('msg', 'nei_msg'))

        raw_input = torch.cat([mol_graph.ndata['x'], mol_graph.ndata['nei_msg']], dim=1)
        mol_graph.ndata['atom_hiddens'] = self.W_o(raw_input)

        # Readout
        mol_vecs = dgl.mean_nodes(mol_graph, 'atom_hiddens')

        return mol_vecs
示例#16
0
def calc_author_citation(g):
    """使用论文引用数加权求和计算学者引用数

    :param g: DGLGraph 学者-论文二分图
    :return: tensor(N_author) 学者引用数
    """
    import dgl.function as fn
    from dgl.ops import edge_softmax
    with g.local_scope():
        # 第k作者的权重为1/k,最后一个视为通讯作者,权重为1/2
        g.edges['writes'].data['w'] = 1.0 / g.edges['writes'].data['order']
        g.update_all(fn.copy_e('w', 'w'), fn.min('w', 'mw'), etype='writes')
        g.apply_edges(fn.copy_u('mw', 'mw'), etype='writes_rev')
        w, mw = g.edges['writes'].data.pop(
            'w'), g.edges['writes_rev'].data.pop('mw')
        w[w == mw] = 0.5

        # 每篇论文所有作者的权重归一化,每个学者所有论文的引用数加权求和
        p = edge_softmax(g['author', 'writes', 'paper'],
                         torch.log(w).unsqueeze(dim=1))
        g.edges['writes_rev'].data['p'] = p.squeeze(dim=1)
        g.update_all(fn.u_mul_e('citation', 'p', 'c'),
                     fn.sum('c', 'c'),
                     etype='writes_rev')
        return g.nodes['author'].data['c']
    def forward(self, g, h, e):

        h_in = h  # for residual connection

        g.ndata['h'] = h
        g.ndata['Ah'] = self.A(h)
        g.ndata['Bh'] = self.B(h)
        g.ndata['Dh'] = self.D(h)
        g.ndata['Eh'] = self.E(h)
        #g.update_all(self.message_func,self.reduce_func)
        g.apply_edges(fn.u_add_v('Dh', 'Eh', 'e'))
        g.edata['sigma'] = torch.sigmoid(g.edata['e'])
        g.update_all(fn.u_mul_e('Bh', 'sigma', 'm'),
                     fn.sum('m', 'sum_sigma_h'))
        g.update_all(fn.copy_e('sigma', 'm'), fn.sum('m', 'sum_sigma'))
        g.ndata['h'] = g.ndata['Ah'] + g.ndata['sum_sigma_h'] / (
            g.ndata['sum_sigma'] + 1e-6)
        h = g.ndata['h']  # result of graph convolution

        if self.batch_norm:
            h = self.bn_node_h(h)  # batch normalization

        h = F.relu(h)  # non-linear activation

        if self.residual:
            h = h_in + h  # residual connection

        h = F.dropout(h, self.dropout, training=self.training)

        return h, e
示例#18
0
    def forward(self, graph):
        node_num = graph.ndata['h'].size(0)

        Q = self.query(graph.ndata['h'])
        K = self.key(graph.ndata['h'])
        V = self.value(graph.ndata['h'])

        Q = self.transpose_for_scores(Q)
        K = self.transpose_for_scores(K)
        V = self.transpose_for_scores(V)

        graph.ndata['Q'] = Q
        graph.ndata['K'] = K
        graph.ndata['V'] = V

        graph.apply_edges(fn.u_mul_v('K', 'Q', 'attn_probs'))
        graph.edata['attn_probs'] = graph.edata['attn_probs'].sum(-1,
                                                                  keepdim=True)
        graph.edata['attn_probs'] = edge_softmax(graph,
                                                 graph.edata['attn_probs'])
        graph.edata['attn_probs'] = self.dropout(graph.edata['attn_probs'])
        graph.apply_edges(fn.u_mul_e('V', 'attn_probs', 'attn_values'))

        graph.register_message_func(fn.copy_e('attn_values', 'm'))
        graph.register_reduce_func(fn.sum('m', 'h'))
        graph.update_all()
        graph.ndata['h'] = graph.ndata['h'].view([node_num, -1])

        return graph
示例#19
0
文件: model.py 项目: yxd886/fuse
    def call(self, graph, instruction_feats, computation_feats, final_feats,
             edge_feats):
        instruction_dst, computation_dst, final_dst = [], [], []
        for stype, etype, dtype in graph.canonical_etypes:
            g = graph[etype].local_var()

            if stype == 'instruction':
                g.srcdata['i'] = instruction_feats
            elif stype == 'computation':
                g.srcdata['i'] = computation_feats
            elif stype == "final":
                g.srcdata['i'] = final_feats

            g.apply_edges(fn.copy_u('i', 's'))
            edata = tf.concat([g.edata.pop('s'), edge_feats[etype]], axis=1)
            g.edata['e'] = self.layers[etype](edata)
            g.update_all(fn.copy_e('e', 'm'), fn.mean(msg='m', out='o'))

            if dtype == 'instruction':
                instruction_dst.append(g.dstdata['o'])
            elif dtype == 'computation':
                computation_dst.append(g.dstdata['o'])
            elif dtype == "final":
                final_dst.append(g.dstdata['o'])
        instruction_dst = tf.math.add_n(instruction_dst) / len(instruction_dst)
        computation_dst = tf.math.add_n(computation_dst) / len(computation_dst)
        final_dst = tf.math.add_n(final_dst) / len(final_dst)
        return self.activation(
            instruction_feats + instruction_dst), self.activation(
                computation_feats +
                computation_dst), self.activation(final_feats + final_dst)
示例#20
0
    def collate(self, items):
        '''
        items: edge id in graph g.
        We sample iteratively k-times and batch them into one single subgraph.
        '''
        current_ts = self.g.edata['timestamp'][
            items[0]]  #only sample edges before current timestamp
        self.graph_sampler.ts = current_ts  # restore the current timestamp to the graph sampler.

        # if link prefiction, we use a negative_sampler to generate neg-graph for loss computing.
        if self.negative_sampler is None:
            neg_pair_graph = None
            input_nodes, pair_graph, blocks = self._collate(items)
        else:
            input_nodes, pair_graph, neg_pair_graph, blocks = self._collate_with_negative_sampling(
                items)

        # we sampling k-hop subgraph and batch them into one graph
        for i in range(self.n_layer - 1):
            self.graph_sampler.frontiers[0].add_edges(
                *self.graph_sampler.frontiers[i + 1].edges())
        frontier = self.graph_sampler.frontiers[0]
        # computing node last-update timestamp
        frontier.update_all(fn.copy_e('timestamp', 'ts'),
                            fn.max('ts', 'timestamp'))

        return input_nodes, pair_graph, neg_pair_graph, [frontier]
示例#21
0
    def forward(self, G, save_steps=False, *, inside_loop_callback=None):
        G_steps = []
        num_steps = 0

        params = G.edata['potential_parameters']
        initial_x = torch.clone(G.ndata['x'])
        initial_x.requires_grad_()

        converged = False

        for i in range(self.STEPS_LIMIT):
            num_steps += 1
            update_relative_positions(G)
            G.edata['r'] = get_r(G)

            if inside_loop_callback is not None:
                inside_loop_callback(G)

            if save_steps:
                update_potential_values(G)  # only needed if save_steps==True
                G_steps.append(copy_dgl_graph(G))

            update_direction = G.edata['d'] / torch.norm(
                G.edata['d'], dim=2, keepdim=True)
            update_size = -potential_gradient(G.edata['r'],
                                              params) * self._step_size
            update_size = torch.clamp(update_size, -0.1, 0.1)

            G.edata['update'] = update_size * update_direction
            G.update_all(fn.copy_e('update', 'message'),
                         fn.sum('message', 'update'))
            G.ndata['x'] = G.ndata['x'] + G.ndata['update']

            update_norms = torch.norm(G.ndata['update'], dim=2)
            max_update_norm = torch.max(update_norms)

            if max_update_norm < (self._convergence_tolerance *
                                  self._step_size):
                if inside_loop_callback is not None:
                    inside_loop_callback(G)
                converged = True
                break

        if not converged:
            print("Gradient Descent failed to converge after 5000 steps.")

        overall_update = G.ndata['x'] - initial_x
        G.ndata['update_norm'] = torch.sqrt(
            torch.sum(overall_update**2, -1, keepdim=True))

        update_potential_values(G)  # compute regardless of save_steps

        if save_steps:
            G_steps.append(copy_dgl_graph(G))
            num_steps = 10
            indices_to_keep = np.linspace(0, len(G_steps) - 1, num_steps)
            G_steps = [G_steps[int(i)] for i in indices_to_keep]

        return G, G_steps
示例#22
0
def get_current_ts(pos_graph, neg_graph):
    with pos_graph.local_scope():
        pos_graph_ = dgl.add_reverse_edges(pos_graph, copy_edata=True)
        pos_graph_.update_all(fn.copy_e('timestamp', 'times'),
                              fn.max('times', 'ts'))
        current_ts = pos_ts = pos_graph_.ndata['ts']
        num_pos_nodes = pos_graph_.num_nodes()
    with neg_graph.local_scope():
        neg_graph_ = dgl.add_reverse_edges(neg_graph)
        neg_graph_.edata['timestamp'] = pos_graph_.edata['timestamp']
        neg_graph_.update_all(fn.copy_e('timestamp', 'times'),
                              fn.max('times', 'ts'))
        num_pos_nodes = torch.where(pos_graph_.ndata['ts'] > 0)[0].shape[0]
        pos_ts = pos_graph_.ndata['ts'][:num_pos_nodes]
        neg_ts = neg_graph_.ndata['ts'][num_pos_nodes:]
        current_ts = torch.cat([pos_ts, neg_ts])
    return current_ts, pos_ts, num_pos_nodes
 def forward(self, graph, feat):
     graph = graph.local_var()
     if isinstance(feat, tuple):
         feat_src, feat_dst = feat
     else:
         feat_src = feat_dst = feat
     h_self = feat_dst
     # DIN attention: 两个向量、两个向量的差、两个向量的积,分别mlp到n_hidden,再相加,再mlp到1
     ## 计算两个向量的差和积
     graph.srcdata.update({'e_src': feat_src})
     graph.dstdata.update({'e_dst': feat_dst})
     graph.apply_edges(fn.u_sub_v('e_src', 'e_dst', 'e_sub'))
     graph.apply_edges(fn.u_mul_v('e_src', 'e_dst', 'e_mul'))
     ## 分别mlp
     graph.srcdata["e_src"] = self.atten_src(feat_src)
     graph.dstdata["e_dst"] = self.atten_dst(feat_dst)
     graph.edata["e_sub"] = self.atten_sub(graph.edata["e_sub"])
     graph.edata["e_mul"] = self.atten_mul(graph.edata["e_mul"])
     ## “mlp后相加”代替“concat后mlp”
     graph.edata["e"] = graph.edata.pop("e_sub") + graph.edata.pop("e_mul")
     graph.apply_edges(fn.e_add_u('e', 'e_src', 'e'))
     graph.apply_edges(fn.e_add_v('e', 'e_dst', 'e'))
     graph.srcdata.pop("e_src")
     graph.dstdata.pop("e_dst")
     ## 第一层激活函数
     graph.edata["e"] = F.gelu(graph.edata["e"])
     ## 第二层mlp变换到1
     graph.edata["e"] = self.leaky_relu(self.atten_out(graph.edata["e"]))
     # max pool
     graph.srcdata['h'] = F.gelu(self.fc_pool(feat_src))
     graph.apply_edges(fn.e_mul_u('e', 'h', 'h'))
     graph.update_all(fn.copy_e('h', 'm'), fn.max('m', 'neigh'))
     h_neigh = graph.dstdata['neigh']
     # mean pool
     graph.srcdata['h'] = F.gelu(self.fc_pool2(feat_src))
     graph.apply_edges(fn.e_mul_u('e', 'h', 'h'))
     graph.update_all(fn.copy_e('h', 'm'), fn.mean('m', 'neigh'))
     h_neigh2 = graph.dstdata['neigh']
     # concat
     rst = self.fc_self(h_self) + self.fc_neigh(h_neigh) + self.fc_neigh2(h_neigh2)
     # mlps
     if len(self.out_mlp) > 0:
         for layer in self.out_mlp:
             o = layer(F.gelu(rst))
             rst = rst + o
     return rst
示例#24
0
def calc_weight(g):
    """计算行归一化的D^(-1/2)AD(-1/2)"""
    with g.local_scope():
        g.ndata['in_degree'] = g.in_degrees().float().pow(-0.5)
        g.ndata['out_degree'] = g.out_degrees().float().pow(-0.5)
        g.apply_edges(fn.u_mul_v('out_degree', 'in_degree', 'weight'))
        g.update_all(fn.copy_e('weight', 'msg'), fn.sum('msg', 'norm'))
        g.apply_edges(fn.e_div_v('weight', 'norm', 'weight'))
        return g.edata['weight']
示例#25
0
    def forward(self, g, h, e):

        ########## Message-passing sub-layer ##########

        h_in = h  # for residual connection
        e_in = e  # for residual connection

        if self.batch_norm == True:
            h = self.norm1_h(h)  # batch normalization
            e = self.norm1_e(e)  # batch normalization

        # Linear transformations of nodes and edges
        g.ndata['h'] = h
        g.edata['e'] = e
        g.ndata['Ah'] = self.A(h)  # node update, self-connection
        g.ndata['Bh'] = self.B(h)  # node update, neighbor projection
        g.ndata['Ch'] = self.C(h)  # edge update, source node projection
        g.ndata['Dh'] = self.D(h)  # edge update, destination node projection
        g.edata['Ee'] = self.E(e)  # edge update, edge projection

        # Graph convolution with dense attention mechanism
        g.apply_edges(fn.u_add_v('Ch', 'Dh', 'CDh'))
        g.edata['e'] = g.edata['CDh'] + g.edata['Ee']
        # Dense attention mechanism
        g.edata['sigma'] = torch.sigmoid(g.edata['e'])
        g.update_all(fn.u_mul_e('Bh', 'sigma', 'm'),
                     fn.sum('m', 'sum_sigma_h'))
        g.update_all(fn.copy_e('sigma', 'm'), fn.sum('m', 'sum_sigma'))
        # Gated-Mean aggregation
        g.ndata['h'] = g.ndata['Ah'] + g.ndata['sum_sigma_h'] / (
            g.ndata['sum_sigma'] + 1e-10)
        h = g.ndata['h']  # result of graph convolution
        e = g.edata['e']  # result of graph convolution

        if self.residual == True:
            h = h_in + h  # residual connection
            e = e_in + e  # residual connection

        ############ Feedforward sub-layer ############

        h_in = h  # for residual connection
        e_in = e  # for residual connection

        if self.batch_norm == True:
            h = self.norm2_h(h)  # batch normalization
            e = self.norm2_e(e)  # batch normalization

        # MLPs on updated node and edge features
        h = self.ff_h(h)
        e = self.ff_e(e)

        if self.residual == True:
            h = h_in + h  # residual connection
            e = e_in + e  # residual connection

        return h, e
示例#26
0
    def forward(self, g):
        g.edata["h"] = self.bond_layer(g.edata["w"])

        #g.send(g.edges(), lambda edges: {"msg": edges.data["h"]})
        #g.recv(g.nodes(), lambda nodes: {"h": torch.sum(nodes.mailbox["msg"], dim=1)})
        g.send_and_recv(g.edges(), fn.copy_e("h", "m"), fn.sum("m", "h"))
        
        h = g.ndata.pop("h") + self.atom_layer(g.ndata["x"])
        h = self.activation(self.bn(h))
        return self.dropout(h)
示例#27
0
文件: conv.py 项目: edwardelson/ogb
    def forward(self, g, x, edge_attr):
        with g.local_scope():
            edge_embedding = self.bond_encoder(edge_attr)
            g.ndata['x'] = x
            g.apply_edges(fn.copy_u('x', 'm'))
            g.edata['m'] = F.relu(g.edata['m'] + edge_embedding)
            g.update_all(fn.copy_e('m', 'm'), fn.sum('m', 'new_x'))
            out = self.mlp((1 + self.eps) * x + g.ndata['new_x'])

            return out
示例#28
0
文件: gat.py 项目: lukovnikov/dgl
def preprocess(graph, labels):
    global n_node_feats

    # The sum of the weights of adjacent edges is used as node features.
    graph.update_all(fn.copy_e("feat", "feat_copy"), fn.sum("feat_copy", "feat"))
    n_node_feats = graph.ndata["feat"].shape[-1]

    graph.create_formats_()

    return graph, labels
示例#29
0
文件: model.py 项目: yifeim/dgl
    def forward(self, g, inputs):
        """Forward computation

        Parameters
        ----------
        g : DGLHeteroGraph
            Input graph.
        inputs : dict[str, torch.Tensor]
            Node feature for each node type.

        Returns
        -------
        dict[str, torch.Tensor]
            New node features for each node type.
        """
        g = g.local_var()
        if self.use_weight:
            weight = self.basis() if self.use_basis else self.weight
            wdict = {
                self.rel_names[i]: {
                    'weight': w.squeeze(0)
                }
                for i, w in enumerate(th.split(weight, 1, dim=0))
            }
        else:
            wdict = {}

        inputs_src = inputs_dst = inputs

        for srctype, _, _ in g.canonical_etypes:
            g.nodes[srctype].data['h'] = inputs[srctype]

        if self.use_weight:
            g.apply_edges(fn.copy_u('h', 'm'))
            m = g.edata['m']
            for rel in g.canonical_etypes:
                _, etype, _ = rel
                g.edges[rel].data['h*w_r'] = th.matmul(m[rel],
                                                       wdict[etype]['weight'])
        else:
            g.apply_edges(fn.copy_u('h', 'h*w_r'))

        g.update_all(fn.copy_e('h*w_r', 'm'), fn.sum('m', 'h'))

        def _apply(ntype):
            h = g.nodes[ntype].data['h']
            if self.self_loop:
                h = h + th.matmul(inputs_dst[ntype], self.loop_weight)
            if self.bias:
                h = h + self.h_bias
            if self.activation:
                h = self.activation(h)
            return self.dropout(h)

        return {ntype: _apply(ntype) for ntype in g.dsttypes}
示例#30
0
def preprocess(graph, use_label=False):
    # add additional features
    graph.update_all(fn.copy_e("feat", "e"), fn.sum("e", "feat_add"))
    if use_label:
        graph.ndata['feat'] = th.cat(
            (graph.ndata['feat_add'], graph.ndata['feat']), dim=1)
    else:
        graph.ndata['feat'] = graph.ndata['feat_add']
    graph.create_formats_()

    return graph