예제 #1
0
 def __init__(self, in_feats, hid_feats, out_feats, rel_names):
     super().__init__()
     self.conv1 = HeteroGraphConv({
         rel: GraphConv(in_feats, hid_feats) for rel in rel_names
     }, aggregate='sum')
     self.conv2 = HeteroGraphConv({
         rel: GraphConv(hid_feats, out_feats) for rel in rel_names
     }, aggregate='sum')
예제 #2
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 ntypes,
                 etypes,
                 activation=None,
                 dropout=0.0):
        """R-GCN层(用于异构图)

        :param in_dim: 输入特征维数
        :param out_dim: 输出特征维数
        :param ntypes: List[str] 顶点类型列表
        :param etypes: List[str] 边类型列表
        :param activation: callable, optional 激活函数,默认为None
        :param dropout: float, optional Dropout概率,默认为0
        """
        super().__init__()
        self.activation = activation
        self.dropout = nn.Dropout(dropout)

        self.conv = HeteroGraphConv(
            {
                etype: GraphConv(in_dim, out_dim, norm='right', bias=False)
                for etype in etypes
            }, 'sum')
        self.loop_weight = nn.ModuleDict({
            ntype: nn.Linear(in_dim, out_dim, bias=False)
            for ntype in ntypes
        })
예제 #3
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 num_heads,
                 ntypes,
                 etypes,
                 dropout=0.2,
                 use_norm=True):
        """HGT层

        :param in_dim: int 输入特征维数
        :param out_dim: int 输出特征维数
        :param num_heads: int 注意力头数K
        :param ntypes: List[str] 顶点类型列表
        :param etypes: List[(str, str, str)] 规范边类型列表
        :param dropout: dropout: float, optional Dropout概率,默认为0.2
        :param use_norm: bool, optional 是否使用层归一化,默认为True
        """
        super().__init__()
        d_k = out_dim // num_heads
        k_linear = {ntype: nn.Linear(in_dim, out_dim) for ntype in ntypes}
        q_linear = {ntype: nn.Linear(in_dim, out_dim) for ntype in ntypes}
        v_linear = {ntype: nn.Linear(in_dim, out_dim) for ntype in ntypes}
        w_att = {
            r[1]: nn.Parameter(torch.Tensor(num_heads, d_k, d_k))
            for r in etypes
        }
        w_msg = {
            r[1]: nn.Parameter(torch.Tensor(num_heads, d_k, d_k))
            for r in etypes
        }
        mu = {r[1]: nn.Parameter(torch.ones(num_heads)) for r in etypes}
        self.reset_parameters(w_att, w_msg)
        self.conv = HeteroGraphConv(
            {
                etype: HGTAttention(out_dim, num_heads, k_linear[stype],
                                    q_linear[dtype], v_linear[stype],
                                    w_att[etype], w_msg[etype], mu[etype])
                for stype, etype, dtype in etypes
            }, 'mean')

        self.a_linear = nn.ModuleDict(
            {ntype: nn.Linear(out_dim, out_dim)
             for ntype in ntypes})
        self.skip = nn.ParameterDict(
            {ntype: nn.Parameter(torch.ones(1))
             for ntype in ntypes})
        self.drop = nn.Dropout(dropout)

        self.use_norm = use_norm
        if use_norm:
            self.norms = nn.ModuleDict(
                {ntype: nn.LayerNorm(out_dim)
                 for ntype in ntypes})
예제 #4
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 rel_names,
                 num_bases=None,
                 weight=True,
                 self_loop=True,
                 activation=None,
                 dropout=0.0):
        """R-GCN层(用于异构图)

        :param in_dim: 输入特征维数
        :param out_dim: 输出特征维数
        :param rel_names: List[str] 关系名称
        :param num_bases: int, optional 基的个数,默认使用关系个数
        :param weight: bool, optional 是否进行线性变换,默认为True
        :param self_loop: 是否包括自环消息,默认为True
        :param activation: callable, optional 激活函数,默认为None
        :param dropout: float, optional Dropout概率,默认为0
        """
        super().__init__()
        self.rel_names = rel_names
        self.self_loop = self_loop
        self.activation = activation
        self.dropout = nn.Dropout(dropout)

        self.conv = HeteroGraphConv({
            rel: GraphConv(in_dim,
                           out_dim,
                           norm='right',
                           weight=False,
                           bias=False)
            for rel in rel_names
        })

        self.use_weight = weight
        if not num_bases:
            num_bases = len(rel_names)
        self.use_basis = weight and 0 < num_bases < len(rel_names)
        if self.use_weight:
            if self.use_basis:
                self.basis = WeightBasis((in_dim, out_dim), num_bases,
                                         len(rel_names))
            else:
                self.weight = nn.Parameter(
                    torch.Tensor(len(rel_names), in_dim, out_dim))
                nn.init.xavier_uniform_(self.weight,
                                        nn.init.calculate_gain('relu'))

        if self.self_loop:
            self.loop_weight = nn.Parameter(torch.Tensor(in_dim, out_dim))
            nn.init.xavier_uniform_(self.loop_weight,
                                    nn.init.calculate_gain('relu'))