示例#1
0
 def __init__(self,
              g,
              in_dim,
              out_dim,
              num_heads,
              feat_drop,
              attn_drop,
              alpha,
              residual=False):
     super(GraphAttention, self).__init__()
     self.g = g
     self.num_heads = num_heads
     self.fc = nn.Linear(in_dim, num_heads * out_dim, bias=False)
     if feat_drop:
         self.feat_drop = nn.Dropout(feat_drop)
     else:
         self.feat_drop = lambda x : x
     if attn_drop:
         self.attn_drop = nn.Dropout(attn_drop)
     else:
         self.attn_drop = lambda x : x
     self.attn_l = nn.Parameter(torch.Tensor(size=(num_heads, out_dim, 1)))
     self.attn_r = nn.Parameter(torch.Tensor(size=(num_heads, out_dim, 1)))
     nn.init.xavier_normal_(self.fc.weight.data, gain=1.414)
     nn.init.xavier_normal_(self.attn_l.data, gain=1.414)
     nn.init.xavier_normal_(self.attn_r.data, gain=1.414)
     self.leaky_relu = nn.LeakyReLU(alpha)
     self.softmax = EdgeSoftmax()
     self.residual = residual
     if residual:
         if in_dim != out_dim:
             self.res_fc = nn.Linear(in_dim, num_heads * out_dim, bias=False)
             nn.init.xavier_normal_(self.res_fc.weight.data, gain=1.414)
         else:
             self.res_fc = None
示例#2
0
    def __init__(self, in_feat, out_feat, num_rels, num_bases, num_heads=1, 
                 bias=None, activation=None, self_loop=False, dropout=0.0, 
                 concat_attn=True, relation_type="block", relation_size=-1):
        super(RGCN_Attn_BlockLayer, self).__init__(in_feat, out_feat, bias,
                                             activation, self_loop=self_loop,
                                             dropout=dropout)
        self.num_rels = num_rels
        self.num_bases = num_bases
        assert self.num_bases > 0

        self.in_feat = in_feat
        self.out_feat = out_feat
        # Attn stuff
        # attn head transformation to ensure that output vectors are same size as input vcetors

        self.relation_type = relation_type

        self.concat_attn = concat_attn
        self.num_heads = num_heads
        self.softmax = EdgeSoftmax()
        self.attn_k = nn.Parameter(torch.Tensor(size=(num_heads, in_feat, out_feat)))
        nn.init.xavier_uniform_(self.attn_k, gain=nn.init.calculate_gain('relu'))
        self.attn_q = nn.Parameter(torch.Tensor(size=(num_heads, out_feat, out_feat)))
        nn.init.xavier_uniform_(self.attn_q, gain=nn.init.calculate_gain('relu'))

        if concat_attn:
          attn_feat = int(out_feat / num_heads)
        else:
          attn_feat = out_feat
        self.attn_transforms = []
        for head in range(self.num_heads):
          self.attn_transforms.append(torch.nn.Linear(out_feat, attn_feat, bias=False))
          self.add_module("attn_transform_%d" %(head), self.attn_transforms[head])


        # assuming in_feat and out_feat are both divisible by num_bases
        if relation_type == "block":
          self.submat_in = in_feat // self.num_bases
          self.submat_out = out_feat // self.num_bases

          self.weight = nn.Parameter(torch.Tensor(
            self.num_rels, self.num_bases * self.submat_in * self.submat_out))
          nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu'))
        elif relation_type == "vector":
          if relation_size == -1:
            relation_size = out_feat
          assert relation_size <= out_feat
          self.weight = nn.Parameter(torch.Tensor(self.num_rels, relation_size))
          nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu'))
        elif relation_type == "basis":
          self.weight = nn.Parameter(torch.Tensor(self.num_rels, num_bases))
          nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu'))
          self.bases = nn.Parameter(torch.Tensor(num_bases, in_feat * out_feat))
          nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu'))
示例#3
0
 def __init__(self, g, in_channels, out_channels, heads, dropout, slope):
     super(GATConv, self).__init__()
     self.g = g
     self.out_channels = out_channels
     self.heads = heads
     self.fc = Linear(in_channels, heads * out_channels, bias=False)
     self.attn_drop = torch.nn.Dropout(dropout)
     self.attn_l = Parameter(torch.Tensor(heads, out_channels, 1))
     self.attn_r = Parameter(torch.Tensor(heads, out_channels, 1))
     self.leaky_relu = torch.nn.LeakyReLU(slope)
     self.softmax = EdgeSoftmax()
     self.reset_parameters()
示例#4
0
 def __init__(self, g, in_channels, out_channels, heads=1,
              negative_slope=0.2, dropout=0):
     super().__init__()
     self.g = g
     self.out_channels = out_channels
     self.heads = heads
     self.negative_slope = negative_slope
     self.dropout = dropout
     self.weight = Parameter(torch.Tensor(in_channels,
                                          heads * out_channels))
     self.att_l = Parameter(torch.Tensor(heads, out_channels, 1))
     self.att_r = Parameter(torch.Tensor(heads, out_channels, 1))
     self.bias = Parameter(torch.Tensor(heads * out_channels))
     self.softmax = EdgeSoftmax()
     self.reset_parameters()
示例#5
0
 def __init__(self,
              g,
              in_dim,
              edge_in_dim,
              out_dim,
              num_heads,
              feat_drop,
              attn_drop,
              alpha,
              residual=False,
              use_batch_norm=False):
     super(GraphAttention, self).__init__()
     self.g = g
     self.num_heads = num_heads
     self.fc = nn.Linear(in_dim, num_heads * out_dim, bias=True)
     self.fc_e = nn.Linear(edge_in_dim, num_heads * out_dim, bias=True)
     if feat_drop:
         self.feat_drop = nn.Dropout(feat_drop)
     else:
         self.feat_drop = lambda x: x
     if attn_drop:
         self.attn_drop = nn.Dropout(attn_drop)
     else:
         self.attn_drop = lambda x: x
     self.attn_l = nn.Parameter(torch.Tensor(size=(num_heads, out_dim, 1)))
     self.attn_r = nn.Parameter(torch.Tensor(size=(num_heads, out_dim, 1)))
     self.attn_e = nn.Parameter(torch.Tensor(size=(edge_in_dim,
                                                   num_heads)))  # (K X H)
     nn.init.xavier_normal_(self.fc.weight.data, gain=0.1)
     nn.init.xavier_normal_(self.attn_l.data, gain=0.1)
     nn.init.xavier_normal_(self.attn_r.data, gain=0.1)
     nn.init.xavier_normal_(self.attn_e.data, gain=0.1)
     self.leaky_relu = nn.LeakyReLU(alpha)
     self.softmax = EdgeSoftmax()
     self.residual = residual
     if residual:
         if in_dim != out_dim:
             self.res_fc = nn.Linear(in_dim,
                                     num_heads * out_dim,
                                     bias=False)
             nn.init.xavier_normal_(self.res_fc.weight.data, gain=0.1)
         else:
             self.res_fc = None
     self.use_batch_norm = use_batch_norm
     if self.use_batch_norm:
         self.bn = nn.BatchNorm1d(self.out_dim * self.num_heads)