Example #1
0
    def __init__(self, ncells, ngenes, nfeat, nhid, nclass, dropout, alpha,
                 nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        self.attentions = [
            GraphAttentionLayer(ncells,
                                ngenes,
                                nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(ncells,
                                           ngenes,
                                           nhid * nheads,
                                           nclass,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
Example #2
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """Dense version of GAT."""
        super(GATmodel, self).__init__()
        self.dropout = dropout
        self.number_of_actions = 2
        self.gamma = 0.99
        self.final_epsilon = 0.0001
        self.initial_epsilon = 0.001
        self.number_of_iterations = 10000
        self.replay_memory_size = 5000
        self.minibatch_size = 50
        self.attentions = [
            GraphAttentionLayer(nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(nhid * nheads,
                                           nclass,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
        self.mix_mlp = nn.Linear(16, 2)

        self.apply(weights_init)  # init weight
    def __init__(self, nfeat, nhid, nout, nlmphid, nclass, dropout, alpha,
                 nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        self.attentions = [
            GraphAttentionLayer(nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        # self.out_att = GraphAttentionLayer(nhid * nheads, nout, dropout=dropout, alpha=alpha, concat=False)
        self.out_att = GraphAttentionLayer(nhid,
                                           nout,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)

        self.MLP = torch.nn.Sequential(
            torch.nn.Linear(nout, nlmphid),
            torch.nn.ReLU(),
            torch.nn.Linear(nlmphid, nclass),
        )
Example #4
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """
        Dense version of GAT.
        :param nfeat: 输入特征的维度
        :param nhid:  输出特征的维度
        :param nclass: 分类个数
        :param dropout: dropout
        :param alpha: LeakyRelu中的参数
        :param nheads: 多头注意力机制的个数
        """
        super(GAT, self).__init__()
        self.nfeat = nfeat
        self.nhid = nhid
        self.nclass = nclass
        self.dropout = dropout
        self.alpha = alpha
        self.nheads = nheads

        self.attentions = \
            [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(nhid * nheads,
                                           nclass,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
Example #5
0
 def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
     """Dense version of GAT."""
     super(GAT, self).__init__()
     self.dropout = dropout
     # print('nfeat:',nfeat)   # 引文网络每个结点的特征向量:描述论文的单词词汇表 1433
     # print('nhid:',nhid)
     # print('nclass:',nclass) # 7类论文
     # print('dropout:',dropout)   # 0.6
     # print('alpha:',alpha)       # 0.2
     # print('nheads:',nheads)     # 8
     self.attentions = [
         GraphAttentionLayer(nfeat,
                             nhid,
                             dropout=dropout,
                             alpha=alpha,
                             concat=True) for _ in range(nheads)
     ]
     # 搭建8层GAT
     for i, attention in enumerate(self.attentions):
         self.add_module('attention_{}'.format(i), attention)
     # print('self.add_module:',self.add_module)
     # 考虑周边8个点的注意力机制,特征维度1433->4
     # 输出相应的分类结果,由于综合考虑了周边八个点的注意力机制
     # 输入维度为4*8=32,最终目的的分类结果为7类
     # nhid * nheads 32
     # nclass    7
     # dropout   0.6
     # alpha     0.2
     self.out_att = GraphAttentionLayer(nhid * nheads,
                                        nclass,
                                        dropout=dropout,
                                        alpha=alpha,
                                        concat=False)
Example #6
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, adj):
        """Sparse version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        self.attentions = [
            GraphAttentionLayer(nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                adj=adj,
                                attention=False,
                                last=False) for _ in range(nheads)
        ]
        self.attentions[int(nheads / 2)] = GraphAttentionLayer(nfeat,
                                                               nhid,
                                                               dropout=dropout,
                                                               alpha=alpha,
                                                               adj=adj,
                                                               attention=True,
                                                               last=False)
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(nhid * nheads,
                                           nclass,
                                           dropout=dropout,
                                           alpha=alpha,
                                           adj=adj,
                                           attention=True,
                                           last=True)
Example #7
0
    def __init__(self, nfeat, nhid, dropout, alpha, nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        self.attentions = [
            GraphAttentionLayer(nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(nhid * nheads,
                                           nhid,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
        print('lstm01')
        self.lstm01 = GAT_LSTM(input_size=12, hidden_size=32, nodes_number=228)
        # for weight in self.lstm01.parameters():
        #     init.constant_(weight, 0.5)
        print('lstm02')
        self.lstm02 = GAT_LSTM(input_size=32,
                               hidden_size=128,
                               nodes_number=228)
        # for weight in self.lstm02.parameters():
        #     init.constant_(weight, 0.5)
        self.reg = nn.Linear(128, 9)
        self.batch_norm = nn.BatchNorm1d(12, affine=True)
Example #8
0
    def __init__(self, nfeat, nhid, nclass, dropout, nheads, use_nmf=False):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout
        self.use_nmf = use_nmf

        self.attentions = [
            GraphAttentionLayer(nfeat, nhid, dropout=dropout, concat=True)
            for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        n_out_att_infeat = nhid * nheads

        if use_nmf:
            # H_list = initH(topic_s=500, topic_e=510, input_feature=nhid * nheads)
            self.nmf = NMF_Nodes(input_feature=nhid * nheads,
                                 topic_s=20,
                                 topic_e=25)
            n_out_att_infeat = nhid * nheads * 2

        self.out_att = GraphAttentionLayer(n_out_att_infeat,
                                           nclass,
                                           dropout=dropout,
                                           concat=False)
Example #9
0
 def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
     """Dense version of GAT."""
     super(GAT, self).__init__()
     self.dropout = dropout
     # multi-head attention setting
     self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
     for i, attention in enumerate(self.attentions):
         self.add_module('attention_{}'.format(i), attention)
     # final attention layer use averaging instead of concatenation
     self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
Example #10
0
 def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, lstm_hid=6):
     """Dense version of GAT."""
     super(GAT_with_LSTM, self).__init__()
     self.dropout = dropout
     # LSTM to process time series
     self.lstm = nn.LSTM(2, 2 * lstm_hid, num_layers=2, batch_first=True)
     # multi-head attention setting
     self.attentions = [GraphAttentionLayer(nfeat * lstm_hid, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
     for i, attention in enumerate(self.attentions):
         self.add_module('attention_{}'.format(i), attention)
     # final attention layer use averaging instead of concatenation
     self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
Example #11
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        self.gat1 = GraphAttentionLayer(nfeat, nhid, nheads, dropout, alpha)
        self.gat2 = GraphAttentionLayer(nhid * nheads,
                                        nclass,
                                        nheads,
                                        dropout,
                                        alpha,
                                        last=True)
Example #12
0
    def __init__(self, nfeat, nhid, dropout, alpha, nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout
        self.xent = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([10]))
        self.weight = nn.Parameter(torch.FloatTensor(nhid, nhid))
        init.xavier_uniform_(self.weight, gain=1.414)

        self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(nhid * nheads, nhid, dropout=dropout, alpha=alpha, concat=False)
Example #13
0
    def __init__(self, device, nfeat, nhid, output_dim, dropout, alpha,
                 nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.device = device
        self.dropout = dropout
        self.nhid = nhid
        self.nheads = nheads
        self.nembed = nhid * nheads

        self.attentions = [
            GraphAttentionLayer(nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        # self.classifier = GraphAttentionLayer(nhid * nheads*2, 2, dropout=dropout, alpha=alpha, concat=False)
        self.classifier = nn.Sequential(nn.Linear(64 * 2, 64, bias=True),
                                        nn.ReLU(inplace=True),
                                        nn.Linear(64, output_dim, bias=True))
        self.loss_fn = nn.CrossEntropyLoss()
Example #14
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        super(GAT, self).__init__()
        self.dropout = dropout
        self.attentions = [GraphAttentionLayer(in_features=nfeat,
                                               out_features=nhid,
                                               dropout=dropout,
                                               alpha=alpha,
                                               concat=True) for _ in range(nheads)]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(in_features=nhid * nheads,
                                           out_features=nclass,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
Example #15
0
    def __init__(self, in_dim, hid_dim, class_num, alpha, dropout, nheads,
                 use_cuda):
        super(GAT, self).__init__()
        self.in_dim = in_dim
        self.hid_dim = hid_dim
        self.class_num = class_num
        self.alpha = alpha
        self.dropout = dropout
        self.nheads = nheads
        self.use_cuda = use_cuda

        self.attentions = [
            GraphAttentionLayer(self.in_dim,
                                self.hid_dim,
                                self.alpha,
                                self.dropout,
                                nonlinear=True,
                                use_cuda=self.use_cuda)
            for _ in range(self.nheads)
        ]

        for k in range(self.nheads):
            self.add_module('attention_' + str(k), self.attentions[k])

        ## we change the second-layer attention to fc layers.
        self.classifier = nn.Sequential(
            nn.Linear(self.hid_dim, self.class_num), )
Example #16
0
    def __init__(self, weights, nfeat, nhid, adj, dropout, alpha, nheads, num_domains, batch_size, k, l, device):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        self.attentions = [GraphAttentionLayer(weights, nfeat, nhid, dropout=dropout, alpha=alpha, layer=l, device=device, concat=True) for _ in range(nheads)]
        for i, attention in enumerate(self.attentions): # multi-head attention, 8
            self.add_module('layer_{}'.format(l) + '_attention_{}'.format(i), attention)
Example #17
0
    def __init__(self, n_input, n_hidden, n_classes, dropout, alpha, n_heads):
        super(GAT, self).__init__()
        self.dropout = nn.Dropout(dropout)

        self.attentions = [
            GraphAttentionLayer(n_input,
                                n_hidden,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(n_heads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_attention = GraphAttentionLayer(n_hidden * n_heads,
                                                 n_classes,
                                                 dropout=dropout,
                                                 alpha=alpha,
                                                 concat=False)
Example #18
0
    def __init__(self, n_feature, n_hidden, n_class, dropout, n_head):
        """
        Args:
            n_feature (int): the dimension of feature
            n_hidden (int): the dimension of hidden layer
            n_class (int): the number of classification label
            dropout (float): dropout rate
            n_head (int): the number of attention head
        """
        super(GAT, self).__init__()

        self.dropout = dropout
        # multi-head graph attention
        self.attentions = [
            GraphAttentionLayer(n_feature, n_hidden) for _ in range(n_head)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)
        self.out_attention = GraphAttentionLayer(n_hidden * n_head, n_class)
    def __init__(self, nsize, kgsize, nfeat, kgfeat, nhid, nclass, dropout,
                 alpha, nheads, kgheads):
        """Full connection version of GAT."""
        super(GAFT, self).__init__()
        self.dropout = dropout

        self.sensor_attentions = [
            GraphAttentionLayer(nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.sensor_attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.kg_attentions = [
            GraphAttentionLayer(kgfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(kgheads)
        ]
        for i, attention in enumerate(self.kg_attentions):
            self.add_module('attention_{}'.format(nheads + i), attention)

        self.sensor_out_att = GraphAttentionLayer(nhid * nheads,
                                                  128,
                                                  dropout=dropout,
                                                  alpha=alpha,
                                                  concat=False)
        self.kg_out_att = GraphAttentionLayer(nhid * kgheads,
                                              128,
                                              dropout=dropout,
                                              alpha=alpha,
                                              concat=False)
        self.W_kg = nn.Parameter(torch.empty(size=(nsize, kgsize)))
        self.W_kg = nn.init.xavier_uniform_(self.W_kg)
        # full connection for matching
        self.linear1 = nn.Sequential(nn.Linear(128, 64), nn.ReLU(True))
        self.linear2 = nn.Sequential(nn.Linear(64, nclass), nn.ReLU(True))
    def __init__(self,
                 feature_num,
                 hidden_feature_num,
                 output_num,
                 head_num,
                 dropout_prob,
                 lrelu_alpha,
                 output_softmax=True):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout_prob = dropout_prob
        self.output_softmax = output_softmax

        self.in_att = [
            GraphAttentionLayer(feature_num,
                                feature_num,
                                dropout_prob=dropout_prob,
                                lrelu_alpha=lrelu_alpha,
                                concat=True,
                                identity=True,
                                name="in_attention_{}".format(i))
            for i in range(head_num)
        ]
        self.hid_att = [
            GraphAttentionLayer(feature_num * head_num,
                                hidden_feature_num,
                                dropout_prob=dropout_prob,
                                lrelu_alpha=lrelu_alpha,
                                concat=True,
                                name="hid_attention_{}".format(i))
            for i in range(head_num)
        ]
        for i, attention in enumerate(self.in_att + self.hid_att):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(hidden_feature_num * head_num,
                                           output_num,
                                           dropout_prob=dropout_prob,
                                           lrelu_alpha=lrelu_alpha,
                                           concat=False,
                                           name="out_attention")
Example #21
0
 def __init__(self, nfeat, nhid, nout, dropout, alpha, nheads, cuda):
     """Dense version of GAT. Reference from https://arxiv.org/abs/1710.10903"""
     super(GAT, self).__init__()
     self.dropout = dropout
     self.attentions = [
         GraphAttentionLayer(nfeat,
                             nhid,
                             dropout=dropout,
                             alpha=alpha,
                             concat=True) for _ in range(nheads)
     ]
     for i, attention in enumerate(self.attentions):
         self.add_module('attention_{}'.format(i), attention)
     self.dropout = nn.Dropout(dropout)
     self.out_att = GraphAttentionLayer(nhid * nheads,
                                        nout,
                                        dropout=dropout,
                                        alpha=alpha,
                                        concat=False)
     if cuda:
         self.cuda()
Example #22
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        #形成有nheads个N*nhid的矩阵(注意力机制后的特征矩阵)
        self.attentions = [
            GraphAttentionLayer(nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        # for i, attention in enumerate(self.attentions):
        #     self.add_module('attention_{}'.format(i), attention)  #什么作用?似乎没用到

        self.out_att = GraphAttentionLayer(nhid * nheads,
                                           nclass,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
Example #23
0
    def __init__(self, ncells, ngenes, nfeat, nhid, nclass, dropout, alpha,
                 nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        self.out_att = GraphAttentionLayer(ncells,
                                           ngenes,
                                           nfeat,
                                           nhid,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=True)
Example #24
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """Dense version of GAT."""
        #nheads表示有几个GAL层,最后拼接在一起
        super(GAT, self).__init__()
        self.dropout = dropout

        #定义multi-head的图注意力层
        self.attentions = [
            GraphAttentionLayer(nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i),
                            attention)  #加入pytorch的Mouble模块

        self.out_att = GraphAttentionLayer(nhid * nheads,
                                           nclass,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
Example #25
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        self.encoder = nn.Parameter(torch.zeros(size=(nfeat, nhid)))
        nn.init.xavier_uniform_(self.encoder.data, gain=1.414)

        self.attentions = [
            GraphAttentionLayer(nhid,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = GraphAttentionLayer(nhid * nheads,
                                           nclass,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
Example #26
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """密集版本的GAT."""
        #nfeat,nhid, nclass是输入层,特征,类别的数目
        #alpha控制leaky的斜率,实现代码主要在layers.py中
        super(GAT, self).__init__()
        self.dropout = dropout

        #直接调用多头的Attention衡量node领域
        self.attentions = [
            GraphAttentionLayer(nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)
        #还需要一个Attention衡量多头,得到分类结果
        self.out_att = GraphAttentionLayer(nhid * nheads,
                                           nclass,
                                           dropout=dropout,
                                           alpha=alpha,
                                           concat=False)
Example #27
0
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.dropout = dropout

        self.conv_encoder = Conv1dGroup(out_channels=nheads)
        enc_nfeat = self.conv_encoder.calc_out_features(in_features=nfeat)
        self.attentions = [
            GraphAttentionLayer(enc_nfeat,
                                nhid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i),
                            attention)  # important add to graph

        # self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
        self.out_layer1 = nn.Linear(in_features=nhid * nheads, out_features=10)
        self.out_layer2 = nn.Linear(in_features=10, out_features=nclass)
    def __init__(self, params, vocab_size, ntags, pte=None):
        super(Classify, self).__init__()
        self.params = params
        self.word_embeddings = nn.Embedding(vocab_size, params.emb_dim)
        if pte is None:
            nn.init.xavier_uniform_(self.word_embeddings.weight)
        else:
            self.word_embeddings.weight.data.copy_(torch.from_numpy(pte))
        self.text_encoder = CnnEncoder(
            params.filters, params.emb_dim, params.kernel_size) if params.encoder == 1 else LstmEncoder(
            params.hidden_dim, params.emb_dim)
        self.dropout = nn.Dropout(params.dropout)
        if params.encoder == 2:
            self.gcn1 = GraphConvolution(params.hidden_dim, params.node_emb_dim, params.dropout, act=F.relu)
            self.linear_transform = nn.Linear(in_features=params.node_emb_dim,
                                              out_features=ntags)
        elif params.encoder == 3:
            self.gcn1 = GraphConvolution(params.hidden_dim, params.node_emb_dim, params.dropout, act=F.relu)
            # Add the attention thingy
            self.linear_transform = nn.Linear(in_features=params.node_emb_dim,
                                              out_features=ntags)
        elif params.encoder == 4:
            self.gcn1 = GraphAttentionLayer(params.hidden_dim, params.node_emb_dim, params.dropout, 0.2)
            self.attentions = [GraphAttentionLayer(params.hidden_dim, params.node_emb_dim, dropout=params.dropout,
                                                   alpha=0.2, concat=True) for _ in range(0)]
            for i, attention in enumerate(self.attentions):
                self.add_module('attention_{}'.format(i), attention)

            self.out_att = GraphAttentionLayer(params.hidden_dim, params.node_emb_dim, dropout=params.dropout,
                                               alpha=0.2, concat=False)
            # Add the attention thingy
            self.linear_transform = nn.Linear(in_features=params.node_emb_dim,
                                              out_features=ntags)
        elif params.encoder == 5:
            self.gcn1 = GraphAttentionLayer(params.hidden_dim, params.node_emb_dim, params.dropout, 0.2)
            self.attentions = [GraphAttentionLayer(params.hidden_dim, params.node_emb_dim, dropout=params.dropout,
                                                   alpha=0.2, concat=True) for _ in range(2)]
            for i, attention in enumerate(self.attentions):
                self.add_module('attention_{}'.format(i), attention)

            self.out_att = GraphAttentionLayer(params.node_emb_dim * 2, params.node_emb_dim, dropout=params.dropout,
                                               alpha=0.2, concat=False)
            # Add the attention thingy
            self.linear_transform = nn.Linear(in_features=params.node_emb_dim,
                                              out_features=ntags)
        else:
            self.linear_transform = nn.Linear(in_features=params.hidden_dim,
                                              out_features=ntags)
Example #29
0
    def __init__(self, n_feat, n_hid, n_nums, dropout, alpha, n_heads, n_gcn):
        """Dense version of GAT."""
        super(NMA, self).__init__()
        self.L = 2 * n_heads * n_hid
        self.D = n_feat
        self.K = 1
        self.n_nums = n_nums
        self.n_heads = n_heads
        self.dropout = dropout
        self.GCN = GraphConvolution(n_feat, n_feat, n_gcn)

        self.attentions = [
            GraphAttentionLayer(n_feat,
                                n_hid,
                                dropout=dropout,
                                alpha=alpha,
                                concat=True) for _ in range(n_heads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.attention_degs = nn.Sequential(
            nn.Linear(self.n_nums, self.n_nums), nn.ReLU())

        self.attention_distance = nn.Sequential(
            nn.Linear(self.n_nums, self.n_nums), nn.ReLU())

        self.attention_interlayer = nn.Sequential(nn.Linear(self.L, self.D),
                                                  nn.Tanh(),
                                                  nn.Linear(self.D, self.K))

        self.classifier = nn.Sequential(
            nn.Linear(self.L, self.L),
            nn.ReLU(),
            nn.Linear(self.L, 2),
        )