コード例 #1
0
ファイル: gat_modified.py プロジェクト: lovishmadaan/gatcheck
 def __init__(self,
              in_feats,
              out_feats,
              num_heads,
              feat_drop=0.,
              attn_drop=0.,
              negative_slope=0.2,
              residual=False,
              activation=None):
     super(GATConv, self).__init__()
     self._num_heads = num_heads
     self._in_feats = in_feats
     self._out_feats = out_feats
     self.fc = nn.Linear(in_feats, out_feats * num_heads, bias=False)
     self.attn_l = nn.Parameter(
         th.FloatTensor(size=(1, num_heads, out_feats)))
     self.attn_r = nn.Parameter(
         th.FloatTensor(size=(1, num_heads, out_feats)))
     self.feat_drop = nn.Dropout(feat_drop)
     self.attn_drop = nn.Dropout(attn_drop)
     self.leaky_relu = nn.LeakyReLU(negative_slope)
     self.graph = None
     if residual:
         if in_feats != out_feats:
             self.res_fc = nn.Linear(in_feats,
                                     num_heads * out_feats,
                                     bias=False)
         else:
             self.res_fc = Identity()
     else:
         self.register_buffer('res_fc', None)
     self.reset_parameters()
     self.activation = activation
コード例 #2
0
ファイル: hgao.py プロジェクト: ziqiaomeng/dgl
    def __init__(self,
                 in_feats,
                 out_feats,
                 num_heads=8,
                 feat_drop=0.,
                 attn_drop=0.,
                 negative_slope=0.2,
                 residual=True,
                 activation=F.elu,
                 k=8,):
        super(HardGAO, self).__init__()
        self.num_heads = num_heads
        self.in_feats = in_feats
        self.out_feats = out_feats
        self.k = k
        self.residual = residual
        # Initialize Parameters for Additive Attention
        self.fc = nn.Linear(
            self.in_feats, self.out_feats * self.num_heads, bias=False)
        self.attn_l = nn.Parameter(torch.FloatTensor(size=(1, self.num_heads, self.out_feats)))
        self.attn_r = nn.Parameter(torch.FloatTensor(size=(1, self.num_heads, self.out_feats)))
        # Initialize Parameters for Hard Projection
        self.p = nn.Parameter(torch.FloatTensor(size=(1,in_feats)))
        # Initialize Dropouts
        self.feat_drop = nn.Dropout(feat_drop)
        self.attn_drop = nn.Dropout(attn_drop)
        self.leaky_relu = nn.LeakyReLU(negative_slope)
        if self.residual:
            if self.in_feats == self.out_feats:
                self.residual_module = Identity()
            else:
                self.residual_module = nn.Linear(self.in_feats,self.out_feats*num_heads,bias=False)

        self.reset_parameters()
        self.activation = activation
コード例 #3
0
ファイル: conv.py プロジェクト: jkx19/cogdl
 def __init__(
     self,
     edge_feats,
     num_etypes,
     in_feats,
     out_feats,
     num_heads,
     feat_drop=0.0,
     attn_drop=0.0,
     negative_slope=0.2,
     residual=False,
     activation=None,
     allow_zero_in_degree=False,
     bias=False,
     alpha=0.0,
 ):
     super(myGATConv, self).__init__()
     self._edge_feats = edge_feats
     self._num_heads = num_heads
     self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
     self._out_feats = out_feats
     self._allow_zero_in_degree = allow_zero_in_degree
     self.edge_emb = nn.Embedding(num_etypes, edge_feats)
     if isinstance(in_feats, tuple):
         self.fc_src = nn.Linear(self._in_src_feats,
                                 out_feats * num_heads,
                                 bias=False)
         self.fc_dst = nn.Linear(self._in_dst_feats,
                                 out_feats * num_heads,
                                 bias=False)
     else:
         self.fc = nn.Linear(self._in_src_feats,
                             out_feats * num_heads,
                             bias=False)
     self.fc_e = nn.Linear(edge_feats, edge_feats * num_heads, bias=False)
     self.attn_l = nn.Parameter(
         th.FloatTensor(size=(1, num_heads, out_feats)))
     self.attn_r = nn.Parameter(
         th.FloatTensor(size=(1, num_heads, out_feats)))
     self.attn_e = nn.Parameter(
         th.FloatTensor(size=(1, num_heads, edge_feats)))
     self.feat_drop = nn.Dropout(feat_drop)
     self.attn_drop = nn.Dropout(attn_drop)
     self.leaky_relu = nn.LeakyReLU(negative_slope)
     if residual:
         if self._in_dst_feats != out_feats:
             self.res_fc = nn.Linear(self._in_dst_feats,
                                     num_heads * out_feats,
                                     bias=False)
         else:
             self.res_fc = Identity()
     else:
         self.register_buffer("res_fc", None)
     self.reset_parameters()
     self.activation = activation
     self.bias = bias
     if bias:
         self.bias_param = nn.Parameter(th.zeros((1, num_heads, out_feats)))
     self.alpha = alpha
コード例 #4
0
 def __init__(self,
              in_feats,
              out_feats,
              num_heads,
              num_nodes,
              layerid,
              feat_drop=0.,
              attn_drop=0.,
              negative_slope=0.2,
              residual=False,
              activation=None,
              allow_zero_in_degree=False,
              fix=False):
     super(GATConvs, self).__init__()
     self._num_heads = num_heads
     self.num_nodes = num_nodes
     self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
     self._out_feats = out_feats
     self._allow_zero_in_degree = allow_zero_in_degree
     self.layerid = layerid
     if isinstance(in_feats, tuple):
         self.fc_src = nn.Linear(self._in_src_feats,
                                 out_feats * num_heads,
                                 bias=False)
         self.fc_dst = nn.Linear(self._in_dst_feats,
                                 out_feats * num_heads,
                                 bias=False)
     else:
         self.fc1 = nn.Linear(self._in_src_feats,
                              out_feats * num_heads,
                              bias=False)
         self.fc2 = nn.Linear(self._in_src_feats,
                              out_feats * num_heads,
                              bias=False)
         self.fc3 = nn.Linear(self._in_src_feats,
                              out_feats * num_heads,
                              bias=False)
     self.attn_l = nn.Parameter(
         th.FloatTensor(size=(1, num_heads, out_feats)))
     self.attn_r = nn.Parameter(
         th.FloatTensor(size=(1, num_heads, out_feats)))
     self.feat_drop = nn.Dropout(feat_drop)
     self.attn_drop = nn.Dropout(attn_drop)
     self.leaky_relu = nn.LeakyReLU(negative_slope)
     if residual:
         if self._in_dst_feats != out_feats:
             self.res_fc = nn.Linear(self._in_dst_feats,
                                     num_heads * out_feats,
                                     bias=False)
         else:
             self.res_fc = Identity()
     else:
         self.register_buffer('res_fc', None)
     self.reset_parameters()
     self.activation = activation
     self.fix = fix
コード例 #5
0
    def __init__(self,
                 in_feats,
                 out_feats,
                 num_heads,
                 basis,
                 attn_basis,
                 basis_coef,
                 feat_drop=0.,
                 attn_drop=0.,
                 negative_slope=0.2,
                 residual=False,
                 activation=None,
                 allow_zero_in_degree=False,
                 bias=True,
                 use_checkpoint=False):
        super(GATConv, self).__init__()
        self._basis = basis
        self._basis_coef = basis_coef
        self._attn_basis = attn_basis

        self._num_heads = num_heads
        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._allow_zero_in_degree = allow_zero_in_degree
        # if isinstance(in_feats, tuple):
        #     self.fc_src = nn.Linear(
        #         self._in_src_feats, out_feats * num_heads, bias=False)
        #     self.fc_dst = nn.Linear(
        #         self._in_dst_feats, out_feats * num_heads, bias=False)
        # else:
        #     self.fc = nn.Linear(
        #         self._in_src_feats, out_feats * num_heads, bias=False)
        # self.attn_l = nn.Parameter(th.FloatTensor(size=(1, num_heads, out_feats)))
        # self.attn_r = nn.Parameter(th.FloatTensor(size=(1, num_heads, out_feats)))
        self.feat_drop = nn.Dropout(feat_drop)
        self.attn_drop = nn.Dropout(attn_drop)
        self.leaky_relu = nn.LeakyReLU(negative_slope)
        if bias:
            self.bias = nn.Parameter(th.FloatTensor(size=(num_heads * out_feats,)))
        else:
            self.register_buffer('bias', None)
        if residual:
            if self._in_dst_feats != out_feats:
                self.res_fc = nn.Linear(
                    self._in_dst_feats, num_heads * out_feats, bias=False)
            else:
                self.res_fc = Identity()
        else:
            self.register_buffer('res_fc', None)
        self.reset_parameters()
        self.activation = activation

        self.dummy_tensor = th.ones(1, dtype=th.float32, requires_grad=True)
        self.use_checkpoint = use_checkpoint
コード例 #6
0
ファイル: gatconv.py プロジェクト: czkkkkkk/ragdoll
    def __init__(self,
                 in_feats,
                 out_feats,
                 num_heads,
                 n_nodes,
                 local_n_nodes,
                 apply_gather=False,
                 no_remote=True,
                 feat_drop=0.,
                 attn_drop=0.,
                 negative_slope=0.2,
                 residual=False,
                 activation=None):
        super(GATConv, self).__init__()
        self._num_heads = num_heads
        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._n_nodes = n_nodes
        self._local_n_nodes = local_n_nodes
        self._no_remote = no_remote
        self._apply_gather = apply_gather

        if isinstance(in_feats, tuple):
            assert False  # FIXME
            self.fc_src = nn.Linear(self._in_src_feats,
                                    out_feats * num_heads,
                                    bias=False)
            self.fc_dst = nn.Linear(self._in_dst_feats,
                                    out_feats * num_heads,
                                    bias=False)
        else:
            self.fc = nn.Linear(self._in_src_feats,
                                out_feats * num_heads,
                                bias=False)
        self.attn_l = nn.Parameter(
            th.FloatTensor(size=(1, num_heads, out_feats)))
        self.attn_r = nn.Parameter(
            th.FloatTensor(size=(1, num_heads, out_feats)))
        self.feat_drop = nn.Dropout(feat_drop)
        self.attn_drop = nn.Dropout(attn_drop)
        self.leaky_relu = nn.LeakyReLU(negative_slope)
        if residual:
            if self._in_dst_feats != out_feats:
                self.res_fc = nn.Linear(self._in_dst_feats,
                                        num_heads * out_feats,
                                        bias=False)
            else:
                self.res_fc = Identity()
        else:
            self.register_buffer('res_fc', None)
        self.reset_parameters()
        self.activation = activation
コード例 #7
0
ファイル: models.py プロジェクト: rdangovs/6883-project
 def __init__(
     self,
     in_feats,
     out_feats,
     num_heads=1,
     feat_drop=0.0,
     attn_drop=0.0,
     negative_slope=0.2,
     residual=False,
     activation=None,
     allow_zero_in_degree=False,
     norm="none",
 ):
     super(GATConv, self).__init__()
     if norm not in ("none", "both"):
         raise DGLError('Invalid norm value. Must be either "none", "both".'
                        ' But got "{}".'.format(norm))
     self._num_heads = num_heads
     self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
     self._out_feats = out_feats
     self._allow_zero_in_degree = allow_zero_in_degree
     self._norm = norm
     if isinstance(in_feats, tuple):
         self.fc_src = nn.Linear(self._in_src_feats,
                                 out_feats * num_heads,
                                 bias=False)
         self.fc_dst = nn.Linear(self._in_dst_feats,
                                 out_feats * num_heads,
                                 bias=False)
     else:
         self.fc = nn.Linear(self._in_src_feats,
                             out_feats * num_heads,
                             bias=False)
     self.attn_l = nn.Parameter(
         torch.FloatTensor(size=(1, num_heads, out_feats)))
     self.attn_r = nn.Parameter(
         torch.FloatTensor(size=(1, num_heads, out_feats)))
     self.feat_drop = nn.Dropout(feat_drop)
     self.attn_drop = nn.Dropout(attn_drop)
     self.leaky_relu = nn.LeakyReLU(negative_slope)
     if residual:
         if self._in_dst_feats != out_feats:
             self.res_fc = nn.Linear(self._in_dst_feats,
                                     num_heads * out_feats,
                                     bias=False)
         else:
             self.res_fc = Identity()
     else:
         self.register_buffer("res_fc", None)
     self.reset_parameters()
     self._activation = activation
コード例 #8
0
    def __init__(self,
                 in_feats: int,
                 hidden_dim: int,
                 num_heads: int,
                 alpha,
                 hop_num,
                 feat_drop,
                 attn_drop,
                 topk_type='local',
                 top_k=-1,
                 negative_slope=0.2):
        """
        """
        super(gtransformerlayer, self).__init__()
        self.topk_type = topk_type
        self._in_feats = in_feats
        self._out_feats = hidden_dim
        self._num_heads = num_heads
        self.alpha = alpha
        self.hop_num = hop_num
        self.top_k = top_k
        self._att_dim = hidden_dim // num_heads

        self.feat_drop = nn.Dropout(feat_drop)
        self.attn_drop = nn.Dropout(attn_drop)
        self.leaky_relu = nn.LeakyReLU(negative_slope)
        self.fc_head = nn.Linear(in_feats, self._out_feats, bias=False)
        self.fc_tail = nn.Linear(in_feats, self._out_feats, bias=False)
        self.fc = nn.Linear(in_feats, self._out_feats, bias=False)
        self.fc_out = nn.Linear(self._out_feats, self._out_feats, bias=False)
        if in_feats != self._out_feats:
            self.res_fc = nn.Linear(in_feats, self._out_feats, bias=False)
        else:
            self.res_fc = Identity()

        self.attn_h = nn.Parameter(torch.FloatTensor(size=(1, self._num_heads,
                                                           self._att_dim)),
                                   requires_grad=True)
        self.attn_t = nn.Parameter(torch.FloatTensor(size=(1, self._num_heads,
                                                           self._att_dim)),
                                   requires_grad=True)
        self.graph_norm = LayerNorm(
            num_features=in_feats)  # entity feature normalization
        self.feed_forward = PositionwiseFeedForward(
            model_dim=self._out_feats,
            d_hidden=4 * self._out_feats)  # entity feed forward
        self.ff_norm = LayerNorm(
            num_features=self._out_feats)  # entity feed forward normalization
        self.reset_parameters()
        self.attention_mask_value = -1e15
コード例 #9
0
ファイル: gatconvFF.py プロジェクト: xjtuwgt/DiffGCN
    def __init__(self,
                 in_feats,
                 out_feats,
                 num_heads,
                 feat_drop=0.,
                 attn_drop=0.,
                 negative_slope=0.2,
                 residual=True,
                 allow_zero_in_degree=False):
        super(GATConvFF, self).__init__()
        self._num_heads = num_heads
        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._allow_zero_in_degree = allow_zero_in_degree
        if isinstance(in_feats, tuple):
            self.fc_src = nn.Linear(self._in_src_feats,
                                    out_feats * num_heads,
                                    bias=False)
            self.fc_dst = nn.Linear(self._in_dst_feats,
                                    out_feats * num_heads,
                                    bias=False)
        else:
            self.fc = nn.Linear(self._in_src_feats,
                                out_feats * num_heads,
                                bias=False)
        self.attn_l = nn.Parameter(
            th.FloatTensor(size=(1, num_heads, out_feats)))
        self.attn_r = nn.Parameter(
            th.FloatTensor(size=(1, num_heads, out_feats)))
        self.feat_drop = nn.Dropout(feat_drop)
        self.attn_drop = nn.Dropout(attn_drop)
        self.leaky_relu = nn.LeakyReLU(negative_slope)
        if residual:
            if self._in_dst_feats != out_feats:
                self.res_fc = nn.Linear(self._in_dst_feats,
                                        num_heads * out_feats,
                                        bias=False)
            else:
                self.res_fc = Identity()
        else:
            self.register_buffer('res_fc', None)
        self.reset_parameters()

        self.layer_norm = th.nn.LayerNorm(out_feats * num_heads)
        self.ff_layer_norm = th.nn.LayerNorm(out_feats * num_heads)
        self.activation = PositionwiseFeedForward(
            model_dim=out_feats * num_heads,
            d_hidden=4 * out_feats * num_heads)
    def __init__(
        self,
        in_feats,
        out_feats,
        K=3,
        num_heads=1,
        feat_drop=0.0,
        attn_drop=0.0,
        negative_slope=0.2,
        residual=False,
        activation=None,
        allow_zero_in_degree=False,
    ):
        super(GCNHAConv, self).__init__()
        self._num_heads = num_heads
        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._allow_zero_in_degree = allow_zero_in_degree
        self._K = K

        self.fc = nn.Linear(self._in_src_feats,
                            out_feats * num_heads,
                            bias=False)
        self.hop_attn_l = nn.Parameter(
            torch.FloatTensor(size=(1, num_heads, out_feats)))
        self.hop_attn_r = nn.Parameter(
            torch.FloatTensor(size=(1, num_heads, out_feats)))
        self.feat_drop = nn.Dropout(feat_drop)
        self.attn_drop = nn.Dropout(attn_drop)
        self.leaky_relu = nn.LeakyReLU(negative_slope)
        self.position_emb = nn.Parameter(
            torch.FloatTensor(size=(K + 1, num_heads, out_feats)))
        if residual:
            if self._in_dst_feats != out_feats:
                self.res_fc = nn.Linear(self._in_dst_feats,
                                        num_heads * out_feats,
                                        bias=False)
            else:
                self.res_fc = Identity()
        else:
            self.register_buffer("res_fc", None)
        self.reset_parameters()
        self._activation = activation
コード例 #11
0
 def __init__(self,
              in_feats,
              out_feats,
              num_class,
              node_num,
              feat_drop=0.,
              attn_drop=0.,
              residual=False,
              activation=None,
              mlp_layers=0,
              allow_zero_in_degree=False,
              ptype='ind'):
     super(PLPConv, self).__init__()
     self._in_src_feats = in_feats
     self._out_feats = out_feats
     self.ptype = ptype
     self.lr_alpha = nn.Parameter(th.zeros(size=(node_num, 1)))
     self._allow_zero_in_degree = allow_zero_in_degree
     if self.ptype == 'ind':
         self.attn_l = nn.Parameter(th.FloatTensor(size=(1, in_feats)))
     elif self.ptype == 'tra':
         self.fc_emb = nn.Parameter(th.FloatTensor(size=(node_num, 1)))
     else:
         raise ValueError(r'No such ptype!')
     self.feat_drop = nn.Dropout(feat_drop)
     self.attn_drop = nn.Dropout(attn_drop)
     self.mlp_layers = mlp_layers
     if self.mlp_layers > 0:
         self.mlp = MLP2(self.mlp_layers, self._in_src_feats, out_feats, num_class, feat_drop)
     if residual:
         if self._in_dst_feats != out_feats:
             self.res_fc = nn.Linear(
                 self._in_dst_feats, out_feats, bias=False)
         else:
             self.res_fc = Identity()
     else:
         self.register_buffer('res_fc', None)
     self.reset_parameters()
     self.activation = activation
コード例 #12
0
ファイル: models.py プロジェクト: shigen97/Non-localGCN
 def __init__(self,
              in_feats,
              out_feats,
              num_heads,
              feat_drop=0.,
              attn_drop=0.,
              negative_slope=0.2,
              residual=False,
              activation=None):
     super(Conv, self).__init__()
     self._num_heads = num_heads
     self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
     self._out_feats = out_feats
     self.cache_atte = None
     if isinstance(in_feats, tuple):
         self.fc_src = nn.Linear(
             self._in_src_feats, out_feats * num_heads, bias=False)
         self.fc_dst = nn.Linear(
             self._in_dst_feats, out_feats * num_heads, bias=False)
     else:
         self.fc = nn.Linear(
             self._in_src_feats, out_feats * num_heads, bias=False)
     self.attn_l = nn.Parameter(th.FloatTensor(size=(1, num_heads, out_feats)))
     self.attn_r = nn.Parameter(th.FloatTensor(size=(1, num_heads, out_feats)))
     self.feat_drop = nn.Dropout(feat_drop)
     self.attn_drop = nn.Dropout(attn_drop)
     self.leaky_relu = nn.LeakyReLU(negative_slope)
     if residual:
         if self._in_dst_feats != out_feats:
             self.res_fc = nn.Linear(
                 self._in_dst_feats, num_heads * out_feats, bias=False)
         else:
             self.res_fc = Identity()
     else:
         self.register_buffer('res_fc', None)
     self.reset_parameters()
     self.activation = activation
コード例 #13
0
    def __init__(self,
                 in_feats,
                 out_feats,
                 dim,
                 n_kernels,
                 aggregator_type='sum',
                 residual=False,
                 bias=True):
        super(GMMConv, self).__init__()
        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._dim = dim
        self._n_kernels = n_kernels
        if aggregator_type == 'sum':
            self._reducer = fn.sum
        elif aggregator_type == 'mean':
            self._reducer = fn.mean
        elif aggregator_type == 'max':
            self._reducer = fn.max
        else:
            raise KeyError(
                "Aggregator type {} not recognized.".format(aggregator_type))

        self._reducer_e = fn.mean

        self.mu = nn.Parameter(th.Tensor(n_kernels, dim))
        self.inv_sigma = nn.Parameter(th.Tensor(n_kernels, dim))
        #self.fc = nn.Linear(self._in_src_feats, n_kernels * out_feats, bias=False)

        self.fc = nn.Sequential(
            nn.Linear(self._in_src_feats, 20),
            nn.LeakyReLU(-0.8),
            nn.Linear(20, 40),
            nn.LeakyReLU(-0.8),
            nn.Linear(40, 60),
            nn.LeakyReLU(-0.8),
            nn.Linear(60, n_kernels * out_feats),
            nn.LeakyReLU(-0.8),
        )

        self.mu_e = nn.Parameter(th.Tensor(n_kernels, dim))
        self.inv_sigma_e = nn.Parameter(th.Tensor(n_kernels, dim))
        #self.fc_e = nn.Linear(1, n_kernels , bias=False)

        self.fc_e = nn.Sequential(
            nn.Linear(1, 20),
            nn.LeakyReLU(-0.8),
            nn.Linear(20, 40),
            nn.LeakyReLU(-0.8),
            nn.Linear(40, 60),
            nn.LeakyReLU(-0.8),
            nn.Linear(60, n_kernels),
            nn.LeakyReLU(-0.8),
        )

        if residual:
            if self._in_dst_feats != out_feats:
                self.res_fc = nn.Linear(self._in_dst_feats,
                                        out_feats,
                                        bias=False)
            else:
                self.res_fc = Identity()
        else:
            self.register_buffer('res_fc', None)

        if bias:
            self.bias = nn.Parameter(th.Tensor(out_feats))
        else:
            self.register_buffer('bias', None)
        self.reset_parameters()