示例#1
0
    def create_conv(self, in_feat, out_feat, rel_names):
        # self.attentive_aggregator = AttentiveAggregator(out_feat, use_checkpoint=self.use_att_checkpoint)
        #
        # num_heads = 1
        # basis_size = 10
        # basis = torch.nn.Parameter(torch.Tensor(2, basis_size, in_feat, out_feat * num_heads))
        # attn_basis = torch.nn.Parameter(torch.Tensor(2, basis_size, num_heads, out_feat))
        # basis_coef = nn.ParameterDict({rel: torch.nn.Parameter(torch.rand(basis_size,)) for rel in rel_names})
        #
        # torch.nn.init.xavier_normal_(basis, gain=1.)
        # torch.nn.init.xavier_normal_(attn_basis, gain=1.)

        self.conv = dglnn.HeteroGraphConv(
            {
                rel: NZBiasGraphConv(
                    in_feat,
                    out_feat,
                    norm='right',
                    weight=False,
                    bias=True,
                    allow_zero_in_degree=True,  # activation=self.activation
                )
                # rel: BasisGATConv(
                #     (in_feat, in_feat), out_feat, num_heads=num_heads,
                #     basis=basis,
                #     attn_basis=attn_basis,
                #     basis_coef=basis_coef[rel],
                #     use_checkpoint=self.use_gcn_checkpoint
                # )
                for rel in rel_names
            },
            aggregate="mean")  #self.attentive_aggregator)
 def create_conv(self, in_feat, out_feat, rel_names):
     self.conv = dglnn.HeteroGraphConv({
         rel: CkptGATConv((in_feat, in_feat),
                          out_feat,
                          num_heads=1,
                          use_checkpoint=self.use_gcn_checkpoint)
         for rel in rel_names
     })
示例#3
0
    def __init__(self,
                 in_feat,
                 out_feat,
                 rel_names,
                 num_bases,
                 *,
                 weight=True,
                 bias=True,
                 activation=None,
                 self_loop=False,
                 dropout=0.0):
        super(RelGraphConvLayer, self).__init__()
        self.in_feat = in_feat
        self.out_feat = out_feat
        self.rel_names = rel_names
        self.num_bases = num_bases
        self.bias = bias
        self.activation = activation
        self.self_loop = self_loop
        self.batchnorm = False

        self.conv = dglnn.HeteroGraphConv({
            rel: dglnn.GraphConv(in_feat,
                                 out_feat,
                                 norm='right',
                                 weight=False,
                                 bias=False)
            for rel in rel_names
        })

        self.use_weight = weight
        self.use_basis = num_bases < len(self.rel_names) and weight
        if self.use_weight:
            if self.use_basis:
                self.basis = dglnn.WeightBasis((in_feat, out_feat), num_bases,
                                               len(self.rel_names))
            else:
                self.weight = nn.Parameter(
                    th.Tensor(len(self.rel_names), in_feat, out_feat))
                nn.init.xavier_uniform_(self.weight,
                                        gain=nn.init.calculate_gain('relu'))

        # bias
        if bias:
            self.h_bias = nn.Parameter(th.Tensor(out_feat))
            nn.init.zeros_(self.h_bias)

        # weight for self loop
        if self.self_loop:
            self.loop_weight = nn.Parameter(th.Tensor(in_feat, out_feat))
            nn.init.xavier_uniform_(self.loop_weight,
                                    gain=nn.init.calculate_gain('relu'))
        # define batch norm layer
        if self.batchnorm:
            self.bn = nn.BatchNorm1d(out_feat)

        self.dropout = nn.Dropout(dropout)
示例#4
0
 def __init__(self, in_dim, hidden_dim, rel_names, n_conv):
     super().__init__()
     self.encoder = nn.ModuleList([
         dglnn.HeteroGraphConv(
             {
                 rel: dglnn.GraphConv(in_dim if i == 0 else hidden_dim,
                                      hidden_dim)
                 for rel in rel_names
             },
             aggregate='sum') for i in range(n_conv)
     ])
示例#5
0
文件: rgat.py 项目: wangxiaoyunNV/dgl
 def __init__(self, etypes, in_feats, n_hidden, n_classes, n_heads=4):
     super().__init__()
     self.layers = nn.ModuleList()
     self.layers.append(
         dglnn.HeteroGraphConv({
             etype: dglnn.GATConv(in_feats, n_hidden // n_heads, n_heads)
             for etype in etypes
         }))
     self.layers.append(
         dglnn.HeteroGraphConv({
             etype: dglnn.GATConv(n_hidden, n_hidden // n_heads, n_heads)
             for etype in etypes
         }))
     self.layers.append(
         dglnn.HeteroGraphConv({
             etype: dglnn.GATConv(n_hidden, n_hidden // n_heads, n_heads)
             for etype in etypes
         }))
     self.dropout = nn.Dropout(0.5)
     self.linear = nn.Linear(n_hidden, n_classes)  # Should be HeteroLinear
示例#6
0
    def __init__(self,
                 in_feat,
                 out_feat,
                 ntypes,
                 rel_names,
                 *,
                 weight=True,
                 bias=True,
                 activation=None,
                 self_loop=False,
                 dropout=0.0):
        super(RelGraphConvLayer, self).__init__()
        self.in_feat = in_feat
        self.out_feat = out_feat
        self.ntypes = ntypes
        self.rel_names = rel_names
        self.bias = bias
        self.activation = activation
        self.self_loop = self_loop

        self.conv = dglnn.HeteroGraphConv({
            rel: dglnn.GraphConv(in_feat,
                                 out_feat,
                                 norm='right',
                                 weight=False,
                                 bias=False)
            for rel in rel_names
        })

        self.use_weight = weight
        if self.use_weight:
            self.weight = nn.ModuleDict({
                rel_name: nn.Linear(in_feat, out_feat, bias=False)
                for rel_name in self.rel_names
            })

        # weight for self loop
        if self.self_loop:
            self.loop_weights = nn.ModuleDict({
                ntype: nn.Linear(in_feat, out_feat, bias=bias)
                for ntype in self.ntypes
            })

        self.dropout = nn.Dropout(dropout)

        self.reset_parameters()
示例#7
0
    def __init__(
        self,
        ntype2in_feat_dim: Dict[str, int],
        out_feat_dim: int,
        etype2ntypes: Dict[str, Tuple[str, str]],
        bias: bool = True,
        activation: Optional[Callable] = None,
        dropout: float = 0.0,
        regularizer: Optional[str] = None,
        ntype_need_basis_reg: Optional[str] = None,
        num_bases: Optional[int] = None,
    ):
        super(RelGraphConvLayer, self).__init__()
        self.ntype2in_feat_dim = ntype2in_feat_dim
        self.out_feat_dim = out_feat_dim
        self.etype2ntypes = etype2ntypes
        self.etypes = list(etype2ntypes.keys())
        self.bias = bias
        self.activation = activation
        self.regularizer = regularizer
        self.num_bases = num_bases
        self.ntype_need_basis_reg = ntype_need_basis_reg
        self.etypes_need_reg = []

        if bias:
            self.h_bias = nn.Parameter(torch.Tensor(out_feat_dim))
            nn.init.zeros_(self.h_bias)

        self.dropout = nn.Dropout(dropout)

        etype2in_feat_dim = {
            etype: self.ntype2in_feat_dim[etype2ntypes[etype][0]]
            for etype in self.etypes
        }

        # weight = False, because we initialize weights in this class
        # to can adding weights regularization
        self.conv = dglnn.HeteroGraphConv({
            etype: dglnn.GraphConv(etype2in_feat_dim[etype],
                                   out_feat_dim,
                                   norm='both',
                                   weight=False,
                                   bias=False)
            for etype in self.etypes
        })

        if regularizer == 'bdd':
            self.bdds = {
                etype: BlockDiagDecomp(
                    (etype2in_feat_dim[etype], out_feat_dim), num_bases)
                for etype in self.etypes
            }
            self.bdds = nn.ModuleDict(self.bdds)
            self.etypes_need_reg = self.etypes
            self.etypes_without_reg = []
            return

        self.etypes_need_reg = []
        if regularizer == 'basis':
            self.etypes_need_reg = [
                etype for etype, ntypes in self.etype2ntypes.items()
                if ntypes[0] == ntype_need_basis_reg
                and ntypes[1] == ntype_need_basis_reg
            ]
            self.basis = WeightBasis(
                (ntype2in_feat_dim[ntype_need_basis_reg], out_feat_dim),
                num_bases, len(self.etypes_need_reg))

        self.weights_without_reg = nn.ParameterDict()
        self.etypes_without_reg = list(
            set(self.etypes) - set(self.etypes_need_reg))
        for etype in self.etypes_without_reg:
            self.weights_without_reg[etype] = nn.Parameter(
                torch.Tensor(etype2in_feat_dim[etype], out_feat_dim))
            nn.init.xavier_uniform_(self.weights_without_reg[etype])