コード例 #1
0
    def __init__(self,
                 nfeat,
                 hidden=16,
                 nb_heads=8,
                 n_output=100,
                 dropout=0.5,
                 alpha=0.3):
        """Sparse version of GAT."""
        super(SpGAT, self).__init__()
        self.dropout = nn.Dropout(dropout)
        # self.uV = uV
        # self.adj = adj.cuda()
        # self.user_tweet_embedding = nn.Embedding(self.uV, 300, padding_idx=0)
        # init.xavier_uniform_(self.user_tweet_embedding.weight)

        self.attentions = nn.ModuleList([
            SpGraphAttentionLayer(in_features=nfeat,
                                  out_features=hidden,
                                  dropout=dropout,
                                  alpha=alpha,
                                  concat=True) for _ in range(nb_heads)
        ])

        self.out_att = SpGraphAttentionLayer(hidden * nb_heads,
                                             n_output,
                                             dropout=dropout,
                                             alpha=alpha,
                                             concat=False)
コード例 #2
0
    def __init__(self, num_nodes, nfeat, nhid, relation_dim, dropout, alpha, nheads):
        """
            Sparse version of GAT
            nfeat -> Entity Input Embedding dimensions
            nhid  -> Entity Output Embedding dimensions
            relation_dim -> Relation Embedding dimensions
            num_nodes -> number of nodes in the Graph
            nheads -> Used for Multihead attention

        """
        super(SpGAT, self).__init__()
        self.dropout = dropout
        self.dropout_layer = nn.Dropout(self.dropout)
        self.attentions = [SpGraphAttentionLayer(num_nodes, nfeat,
                                                 nhid,
                                                 relation_dim,
                                                 dropout=dropout,
                                                 alpha=alpha,
                                                 concat=True)
                           for _ in range(nheads)]

        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        # W matrix to convert h_input to h_output dimension
        self.W = nn.Parameter(torch.zeros(size=(relation_dim, nheads * nhid)))
        nn.init.xavier_uniform_(self.W.data, gain=1.414)

        self.out_att = SpGraphAttentionLayer(num_nodes, nhid * nheads,
                                             nheads * nhid, nheads * nhid,
                                             dropout=dropout,
                                             alpha=alpha,
                                             concat=False
                                             )
コード例 #3
0
ファイル: aggregators.py プロジェクト: SherylHYX/snea
    def __init__(self,
                 _id,
                 features,
                 cuda=False,
                 in_feat_dim=32,
                 out_feat_dim=32,
                 nheads=1):
        super(NonFirstLayerAggregator, self).__init__()
        self.id = _id
        self.features = features
        self.cuda = cuda
        """
        In this part, we only use the one-head attention mechanism.
        Maybe you can set nheads>1 to modify it to multi-head, and use dropout method to get a better result.
        As a result, more computation time will be required.
        """

        self.attentions_bal = [
            SpGraphAttentionLayer(in_features=in_feat_dim,
                                  out_features=out_feat_dim,
                                  cuda_available=cuda) for _ in range(nheads)
        ]
        self.attentions_unbal = [
            SpGraphAttentionLayer(in_features=in_feat_dim,
                                  out_features=out_feat_dim,
                                  cuda_available=cuda) for _ in range(nheads)
        ]

        for i, attention in enumerate(self.attentions_bal +
                                      self.attentions_unbal):
            self.add_module('attention_{}_{}'.format(self.id, i), attention)
コード例 #4
0
ファイル: aggregators.py プロジェクト: SherylHYX/snea
    def __init__(self,
                 _id,
                 features,
                 only_layer,
                 cuda=False,
                 in_feat_dim=64,
                 out_feat_dim=32,
                 nheads=1):
        super(FirstLayerAggregator, self).__init__()
        self.id = _id
        self.features = features
        self.cuda = cuda
        self.only_layer = only_layer

        self.attentions_bal = [
            SpGraphAttentionLayer(in_features=in_feat_dim,
                                  out_features=out_feat_dim,
                                  cuda_available=cuda) for _ in range(nheads)
        ]
        self.attentions_unbal = [
            SpGraphAttentionLayer(in_features=in_feat_dim,
                                  out_features=out_feat_dim,
                                  cuda_available=cuda) for _ in range(nheads)
        ]

        for i, attention in enumerate(self.attentions_bal +
                                      self.attentions_unbal):
            self.add_module('attention_{}_{}'.format(self.id, i), attention)
コード例 #5
0
ファイル: models.py プロジェクト: lukemelas/CS-282-Project
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """Sparse version of GAT."""
        super(SpGAT, self).__init__()
        self.dropout = dropout

        self.attentions = [
            SpGraphAttentionLayer(nfeat,
                                  nhid,
                                  dropout=dropout,
                                  alpha=alpha,
                                  concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.attentions2 = [
            SpGraphAttentionLayer(nfeat * nhid,
                                  nhid,
                                  dropout=dropout,
                                  alpha=alpha,
                                  concat=True) for _ in range(nheads)
        ]  # ADD
        for i, attention in enumerate(self.attentions):
            self.add_module('attention2_{}'.format(i), attention)  # ADD

        self.out_att = SpGraphAttentionLayer(nhid * nheads,
                                             nclass,
                                             dropout=dropout,
                                             alpha=alpha,
                                             concat=False)
コード例 #6
0
    def __init__(self, num_nodes, nfeat, nhid, dropout, alpha, nheads):
        """
            Sparse version of GAT
            nfeat -> Entity Input Embedding dimensions
            nhid  -> Entity Output Embedding dimensions
            relation_dim -> Relation Embedding dimensions
            num_nodes -> number of nodes in the Graph
            nheads -> Used for Multihead attention

        """
        super(SpGAT, self).__init__()
        self.dropout = dropout
        self.dropout_layer = nn.Dropout(self.dropout)
        self.attentions = [
            SpGraphAttentionLayer(num_nodes,
                                  nfeat,
                                  nhid,
                                  dropout=dropout,
                                  alpha=alpha,
                                  concat=True) for _ in range(nheads)
        ]

        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = SpGraphAttentionLayer(num_nodes,
                                             nheads * nhid,
                                             nheads * nhid,
                                             dropout=dropout,
                                             alpha=alpha,
                                             concat=False)
コード例 #7
0
    def __init__(self, device, nfeat, nhid, output_dim, dropout, alpha,
                 nheads):
        """Sparse version of GAT."""
        super(SpGAT, self).__init__()
        self.device = device
        self.dropout = dropout
        self.nhid = nhid
        self.nheads = nheads
        self.nembed = nhid * nheads

        self.attentions = [
            SpGraphAttentionLayer(nfeat,
                                  nhid,
                                  dropout=dropout,
                                  alpha=alpha,
                                  concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        # self.out_att = SpGraphAttentionLayer(nhid * nheads*2,
        #                                      output_dim,
        #                                      dropout=dropout,
        #                                      alpha=alpha,
        #                                      concat=False)
        self.classifier = nn.Sequential(nn.Linear(64 * 2, 64, bias=True),
                                        nn.ReLU(inplace=True),
                                        nn.Linear(64, output_dim, bias=True))
        self.loss_fn = nn.CrossEntropyLoss()
コード例 #8
0
ファイル: models.py プロジェクト: zyy598/Source-Code-Notebook
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """稀疏版本的GAT."""
        #和密集GAT差不多,计算稀疏注意力的代码也在layers.py中。思路一致。
        super(SpGAT, self).__init__()
        self.dropout = dropout

        self.attentions = [
            SpGraphAttentionLayer(nfeat,
                                  nhid,
                                  dropout=dropout,
                                  alpha=alpha,
                                  concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = SpGraphAttentionLayer(nhid * nheads,
                                             nclass,
                                             dropout=dropout,
                                             alpha=alpha,
                                             concat=False)
コード例 #9
0
    def __init__(self, nfeat, nhid, dropout, alpha, nheads):
        """Sparse version of GAT."""
        super(SpGAT, self).__init__()
        self.dropout = dropout
        self.xent = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([10]))
        self.weight = nn.Parameter(torch.FloatTensor(nhid, nhid))
        init.xavier_uniform_(self.weight, gain=1.414)

        self.attentions = [SpGraphAttentionLayer(nfeat,
                                                 nhid,
                                                 dropout=dropout,
                                                 alpha=alpha,
                                                 concat=True) for _ in range(nheads)]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = SpGraphAttentionLayer(nhid * nheads,
                                             nhid,
                                             dropout=dropout,
                                             alpha=alpha,
                                             concat=False)
コード例 #10
0
ファイル: models.py プロジェクト: sy950921/GAT-muse
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        """Sparse version of GAT."""
        super(SpGAT, self).__init__()
        self.dropout = dropout

        self.attentions = [
            SpGraphAttentionLayer(nfeat,
                                  nhid,
                                  dropout=dropout,
                                  alpha=alpha,
                                  concat=True) for _ in range(nheads)
        ]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.out_att = SpGraphAttentionLayer(nhid * nheads,
                                             nclass,
                                             dropout=dropout,
                                             alpha=alpha,
                                             concat=False)
        self.tanh = torch.nn.Tanh()
        self.layer_norm = LayerNorm(nclass)
        self.emb_norm = EmbedNorm()
コード例 #11
0
ファイル: models.py プロジェクト: TraianVidrascu/Mixed-Code
    def __init__(self,
                 num_nodes,
                 nfeat,
                 nhid,
                 relation_dim,
                 dropout,
                 alpha,
                 nheads,
                 use_simple_layer=False):
        """
            Sparse version of GAT
            nfeat -> Entity Input Embedding dimensions
            nhid  -> Entity Output Embedding dimensions
            relation_dim -> Relation Embedding dimensions
            num_nodes -> number of nodes in the Graph
            nheads -> Used for Multihead attention

        """
        super(SpGAT, self).__init__()
        if CUDA:
            dev = 'cuda'
        else:
            dev = 'cpu'
        self.dropout = dropout
        self.dropout_layer = nn.Dropout(self.dropout)
        self.merge_input = MergeLayer(nhid * nheads)
        self.merge_output = MergeLayer(nhid * nheads)

        self.use_simple_layer = use_simple_layer
        if use_simple_layer:
            self.rel_layer = SimplerRelationLayer(relation_dim, nhid * nheads,
                                                  dev)
        else:
            self.rel_layer = RelationLayer(relation_dim, nhid * nheads, dev)

        self.attentions_inbound = [
            SpGraphAttentionLayer(num_nodes,
                                  nfeat,
                                  nhid,
                                  relation_dim,
                                  dropout=dropout,
                                  alpha=alpha,
                                  concat=True) for _ in range(nheads)
        ]

        self.attentions_outbound = [
            SpGraphAttentionLayer(num_nodes,
                                  nfeat,
                                  nhid,
                                  relation_dim,
                                  dropout=dropout,
                                  alpha=alpha,
                                  concat=True) for _ in range(nheads)
        ]

        for i, attention in enumerate(self.attentions_inbound):
            self.add_module('attention_inbound_{}'.format(i), attention)

        for i, attention in enumerate(self.attentions_inbound):
            self.add_module('attention_outbound_{}'.format(i), attention)

        self.out_att_inbound = SpGraphAttentionLayer(num_nodes,
                                                     nhid * nheads,
                                                     nheads * nhid,
                                                     nheads * nhid,
                                                     dropout=dropout,
                                                     alpha=alpha,
                                                     concat=False)

        self.out_att_outbound = SpGraphAttentionLayer(num_nodes,
                                                      nhid * nheads,
                                                      nheads * nhid,
                                                      nheads * nhid,
                                                      dropout=dropout,
                                                      alpha=alpha,
                                                      concat=False)