Esempio n. 1
0
    def __init__(self,
                 in_feats,
                 n_hidden,
                 n_classes,
                 n_layers,
                 activation=F.relu,
                 num_heads=2):
        super(GAT, self).__init__()
        self.layers = nn.ModuleList()

        # input layer
        self.layers.append(
            GATConv(in_feats=in_feats,
                    out_feats=n_hidden,
                    num_heads=num_heads,
                    activation=activation)
        )

        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(
                GATConv(in_feats=in_feats,
                        out_feats=n_hidden,
                        num_heads=num_heads,
                        activation=activation)
            )
        # output layer
        self.linear1 = nn.Linear(n_hidden * num_heads, n_hidden // 2)
        self.linear2 = nn.Linear(n_hidden // 2, n_classes)
Esempio n. 2
0
    def __init__(self, in_dim, hidden_dim, num_classes):

        super(GraphGATClassifier, self).__init__()
        self.layer1 = GATConv(in_dim, hidden_dim, 1, allow_zero_in_degree=True)
        self.layer2 = GATConv(hidden_dim, hidden_dim, 1, allow_zero_in_degree=True)
        self.layer3 = GATConv(hidden_dim, hidden_dim, 1, allow_zero_in_degree=True)
        self.classify = nn.Linear(hidden_dim, num_classes)
Esempio n. 3
0
 def __init__(self, in_dim: int, hidden_dim: int, out_dim: int,
              num_heads: int) -> None:
     super(GAT_Node_Classifier, self).__init__()
     self.layer1 = GATConv(in_dim, hidden_dim, num_heads)
     # Be aware that the input dimension is hidden_dim*num_heads since
     # multiple head outputs are concatenated together. Also, only
     # one attention head in the output layer.
     self.layer2 = GATConv(hidden_dim * num_heads, out_dim, 1)
Esempio n. 4
0
    def __init__(self, args):
        super(GAT, self).__init__()
        self.args = args

        missing_keys = list(
            set(
                [
                    "features_num",
                    "num_class",
                    "num_layers",
                    "hidden",
                    "heads",
                    "dropout",
                    "act",
                ]
            )
            - set(self.args.keys())
        )
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ",".join(missing_keys))

        self.num_layer = int(self.args["num_layers"])
        if not self.num_layer == len(self.args["hidden"]) + 1:
            LOGGER.warn("Warning: layer size does not match the length of hidden units")
        self.convs = torch.nn.ModuleList()

        self.convs.append(
            GATConv(
                self.args["features_num"],
                self.args["hidden"][0],
                num_heads =self.args["heads"],
                feat_drop=self.args.get("feat_drop", self.args["dropout"]),
                attn_drop=self.args["dropout"],
            )
        )
        last_dim = self.args["hidden"][0] * self.args["heads"]
        for i in range(self.num_layer - 2):
            self.convs.append(
                GATConv(
                    last_dim,
                    self.args["hidden"][i + 1],
                    num_heads=self.args["heads"],
                    feat_drop=self.args.get("feat_drop", self.args["dropout"]),
                    attn_drop=self.args["dropout"],
                )
            )
            last_dim = self.args["hidden"][i + 1] * self.args["heads"]
        self.convs.append(
            GATConv(
                last_dim,
                self.args["num_class"],
                num_heads=1,
                feat_drop=self.args.get("feat_drop", self.args["dropout"]),
                attn_drop=self.args["dropout"],
            )
        )
Esempio n. 5
0
 def __init__(self, in_feats, out_feats, n_layers):
     super(MGCN, self).__init__()
     self.in_feats = in_feats
     self.out_feats = out_feats
     self.GATLayers = nn.ModuleList([])
     self.GATLayers.append(GATConv(in_feats, out_feats, num_heads = 2, activation=None))
     self.GATLayers.append(GATConv(out_feats, out_feats, num_heads = 2, activation=None))
     self.GATLayers.append(GATConv(out_feats, out_feats, num_heads = 2, activation=None))
     self.seq_fc1 = nn.Linear(out_feats, out_feats)
     self.seq_fc2 = nn.Linear(out_feats, out_feats)
     self.bias = nn.Parameter(torch.rand(1, out_feats))
     torch.nn.init.uniform_(self.bias, a=-0.2, b=0.2)
Esempio n. 6
0
    def __init__(self, in_dim, hidden_dim, out_dim, heads=4, dropout=0.3):
        super(GNN_GAT, self).__init__()

        self.layers = nn.ModuleList([
            GATConv(in_dim, hidden_dim, heads, residual=True, activation=F.leaky_relu),
            GATConv(heads*hidden_dim, hidden_dim, heads, feat_drop=dropout, residual=True, activation=F.leaky_relu),])

        self.bn = nn.ModuleList([
            nn.BatchNorm1d(heads*hidden_dim),
            nn.BatchNorm1d(heads*hidden_dim),])

        self.last_layer = GATConv(heads*hidden_dim, out_dim, heads, residual=True)
Esempio n. 7
0
    def __init__(
        self,
        batch_size,
        window_size,
        n_dim,
        hidden_dim=32,
        num_heads=7,
        num_layers=1,
        feat_drop=0,
        attn_drop=0,
        residual=False,
    ):
        super().__init__()

        self.n_dim = n_dim
        self.window_size = window_size
        self.batch_size = batch_size
        self.feat_graph = self.__build_graph(n_dim, batch_size)
        self.time_graph = self.__build_graph(window_size, batch_size)

        self.feat_gat = GATConv(
            in_feats=window_size,
            out_feats=hidden_dim,
            num_heads=num_heads,
            feat_drop=feat_drop,
            attn_drop=attn_drop,
            residual=residual,
        )
        self.time_gat = GATConv(
            in_feats=n_dim,
            out_feats=hidden_dim,
            num_heads=num_heads,
            feat_drop=feat_drop,
            attn_drop=attn_drop,
            negative_slope=0.2,
            residual=residual,
        )

        self.rnn = nn.GRU(
            input_size=2 * hidden_dim + n_dim,
            hidden_size=hidden_dim,
            # num_layers=num_layers,
            batch_first=True,
        )

        self.reconst_ln == nn.Linear(hidden_dim, n_dim * window_size)
        self.forcast_ln == nn.Linear(hidden_dim, window_size)
Esempio n. 8
0
 def __init__(self,
              num_layers,
              in_dim,
              num_hidden,
              num_classes,
              num_heads=1,
              feat_drop=0.1,
              attn_drop=0.1,
              negative_slope=None,
              residual=True,
              activation=None):
     super(GAT, self).__init__()
     self.num_layers = num_layers
     self.gat_layers = nn.ModuleList()
     self.activation = activation
     # input projection (no residual)
     self.gat_layers.append(
         GATConv(in_dim,
                 num_hidden,
                 num_heads,
                 feat_drop=feat_drop,
                 attn_drop=attn_drop,
                 residual=residual,
                 activation=activation))
     # hidden layers
     for l in range(1, num_layers):
         # due to multi-head, the in_dim = num_hidden * num_heads
         self.gat_layers.append(
             GATConv(num_hidden * num_heads,
                     num_hidden,
                     num_heads,
                     feat_drop=feat_drop,
                     attn_drop=attn_drop,
                     residual=residual,
                     activation=activation))
     # output projection
     self.gat_layers.append(
         GATConv(num_hidden * num_heads,
                 num_hidden,
                 num_heads,
                 feat_drop=feat_drop,
                 attn_drop=attn_drop,
                 residual=residual,
                 activation=activation))
Esempio n. 9
0
    def __init__(self, in_dim, hidden_dim, n_classes, n_heads=8, drop=.5, attn_drop=.5, device='cuda:0'):
        super(ClassifierGAT, self).__init__()

        self.encoder = nn.Linear(in_dim, hidden_dim).to(torch.device(device))
        
        self.layers = nn.ModuleList([
            GATConv(hidden_dim, hidden_dim, num_heads=n_heads, activation=F.leaky_relu,
                    feat_drop=drop, attn_drop=attn_drop).to(torch.device(device)),
            GATConv(n_heads*hidden_dim, hidden_dim, num_heads=n_heads, activation=F.leaky_relu,
                    feat_drop=drop, attn_drop=attn_drop).to(torch.device(device))
        ])
        
        self.lin = nn.Linear(n_heads*hidden_dim + hidden_dim, 1).to(torch.device(device))
        self.pooling = GlobalAttentionPooling( self.lin ).to(torch.device(device))
        
        self.norm = nn.BatchNorm1d( n_heads*hidden_dim + hidden_dim )
        self.drop = nn.Dropout(drop)
        
        self.classify = nn.Linear( n_heads*hidden_dim + hidden_dim, n_classes).to(torch.device(device))
Esempio n. 10
0
	def __init__(self , num_layers , d , out_d , residual , reinit , **kwargs ):

		super().__init__(emb_size = d)

		self.d = d
		self.num_layers = num_layers 
		self.residual = residual

		self.layers = nn.ModuleList([GATConv(d, d , 1) for _ in range(num_layers)])

		self.ln = nn.Linear(d , out_d)
 def __init__(self, n_heads, residual, **kwargs):
     super().__init__(**kwargs)
     assert self.hidden_dim % n_heads == 0, 'hidden_dim needs to be divisible by n_heads for shapes to align'
     self.layers = nn.ModuleList()
     for _ in range(self.n_layers):
         self.layers.append(GATConv(in_feats=self.hidden_dim,
                                    out_feats=self.hidden_dim // n_heads,
                                    num_heads=n_heads,
                                    feat_drop=self.p_dropout,
                                    attn_drop=self.p_dropout,
                                    residual=residual,
                                    activation=self.get_act()))
Esempio n. 12
0
 def __init__(self, config):
     super().__init__(config)
     self.bert = BertModel(config)
     # graph
     self.dropout = nn.Dropout(config.hidden_dropout_prob)
     self.srl_emb_gat = GATConv(dict_params['in_feats'],
                                dict_params['out_feats'],
                                2,
                                feat_drop=dict_params['feat_drop'],
                                attn_drop=dict_params['attn_drop'],
                                residual=True)
     self.gat = GAT(dict_params['gat_layers'],
                    dict_params['in_feats'],
                    dict_params['in_feats'],
                    dict_params['out_feats'],
                    2,
                    feat_drop=dict_params['feat_drop'],
                    attn_drop=dict_params['attn_drop'])
     ## node classification
     ### ent node
     self.dropout_ent = nn.Dropout(config.hidden_dropout_prob)
     self.ent_classifier = nn.Sequential(
         nn.Linear(2 * dict_params['out_feats'],
                   dict_params['hidden_size_classifier']), nn.ReLU(),
         nn.Linear(dict_params['hidden_size_classifier'], 2))
     ### srl node
     self.dropout_srl = nn.Dropout(config.hidden_dropout_prob)
     self.srl_classifier = nn.Sequential(
         nn.Linear(2 * dict_params['out_feats'],
                   dict_params['hidden_size_classifier']), nn.ReLU(),
         nn.Linear(dict_params['hidden_size_classifier'], 2))
     ### sent node
     self.dropout_sent = nn.Dropout(config.hidden_dropout_prob)
     self.sent_classifier = nn.Sequential(
         nn.Linear(2 * dict_params['out_feats'],
                   dict_params['hidden_size_classifier']), nn.ReLU(),
         nn.Linear(dict_params['hidden_size_classifier'], 2))
     # graph 2 token attention
     self.graph2token_attention = GAT(1, dict_params['in_feats'],
                                      dict_params['in_feats'],
                                      dict_params['out_feats'])
     # span prediction
     self.num_labels = config.num_labels
     self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
     # init weights
     self.init_weights()
     # params
     self.weight_sent_loss = dict_params['weight_sent_loss']
     self.weight_srl_loss = dict_params['weight_srl_loss']
     self.weight_ent_loss = dict_params['weight_ent_loss']
     self.weight_span_loss = dict_params['weight_span_loss']
Esempio n. 13
0
    def __init__(self,
                 in_dim,
                 hidden_dim_1,
                 num_classes,
                 feat_drop=0,
                 attn_drop=0,
                 use_cuda=False):
        """
        Constructor for the GraphAttConvBinaryClassifier class
        Parameters:
            in_dim (int): Dimension of features for each node
            hidden_dim (int): Dimension of hidden embeddings
            num_classes (int): Number of output classes
            feat_drop (float): Indicates the dropout rate for features
            attn_drop (float): Indicates the dropout rate for the attention mechanism
            use_cuda (bool): Indicates whether GPU should be utilized or not
        """
        super(GraphAttentionConvBinaryClassifier, self).__init__()

        # Model layers
        self.conv1 = GATConv(in_dim,
                             hidden_dim_1,
                             feat_drop=feat_drop,
                             attn_drop=attn_drop,
                             num_heads=1)
        self.conv2 = GATConv(hidden_dim_1,
                             hidden_dim_1,
                             feat_drop=feat_drop,
                             attn_drop=attn_drop,
                             num_heads=1)

        self.fc_1 = nn.Linear(hidden_dim_1, num_classes)

        self.out = nn.LogSoftmax(dim=1)

        self.use_cuda = use_cuda
Esempio n. 14
0
 def __init__(self, features_dim, h_dim, out_dim , num_rels, num_bases=-1, num_hidden_layers=2, classifier=False):
     super(Model, self).__init__()
     
     self.features_dim, self.h_dim, self.out_dim = features_dim, h_dim, out_dim
     self.num_hidden_layers = num_hidden_layers
     self.num_rels = num_rels
     self.num_bases = num_bases
     # create rgcn layers
     self.build_model()
     
     
     self.attn = GATConv(in_feats=self.out_dim, out_feats=self.out_dim,num_heads=1)
     self.dense = nn.Linear(self.out_dim,1)
     self.pool = SumPooling()
     self.is_classifier=classifier
Esempio n. 15
0
    def __init__(self,
                 in_feats,
                 n_classes,
                 n_hidden,
                 n_layers,
                 n_heads,
                 activation,
                 dropout=0.0,
                 attn_drop=0.0):
        super().__init__()
        self.in_feats = in_feats
        self.n_classes = n_classes
        self.n_hidden = n_hidden
        self.n_layers = n_layers
        self.num_heads = n_heads

        self.convs = nn.ModuleList()
        self.linear = nn.ModuleList()
        self.bns = nn.ModuleList()

        for i in range(n_layers):
            in_hidden = n_heads * n_hidden if i > 0 else in_feats
            out_hidden = n_hidden if i < n_layers - 1 else n_classes
            out_channels = n_heads

            self.convs.append(
                GATConv(in_hidden,
                        out_hidden,
                        num_heads=n_heads,
                        attn_drop=attn_drop))
            self.linear.append(
                nn.Linear(in_hidden, out_channels * out_hidden, bias=False))
            if i < n_layers - 1:
                self.bns.append(nn.BatchNorm1d(out_channels * out_hidden))

        self.bias_last = Bias(n_classes)

        self.dropout0 = nn.Dropout(min(0.1, dropout))
        self.dropout = nn.Dropout(dropout)
        self.activation = activation
Esempio n. 16
0
    def __init__(self,
                 in_feats: tuple,
                 out_feats,
                 weight=True,
                 device=None,
                 dropout_rate=0.0):
        super(GCMCGraphGAT, self).__init__()
        self._in_feats = in_feats
        self._out_feats = out_feats
        self.device = device
        self.dropout = nn.Dropout(dropout_rate)

        if weight:
            self.feat1 = nn.Parameter(th.Tensor(in_feats[0], 10))
            self.feat2 = nn.Parameter(th.Tensor(in_feats[1], 10))
        else:
            self.register_parameter('weight', None)
        self.reset_parameters()

        self.gat = GATConv((10, 10),
                           out_feats,
                           8,
                           attn_drop=dropout_rate,
                           allow_zero_in_degree=True)
 def __init__(self, nfeat, nhid, nclass, dropout):
     super(GAT, self).__init__()
     self.dropout = dropout
     self.conv1 = GATConv(nfeat, nhid, num_heads=1)
     self.conv2 = GATConv(nhid, nclass, num_heads=1)
Esempio n. 18
0
from data.data_processtor import processtor
from dgl.nn.pytorch.conv import GATConv
import numpy as np
import torch
import logging

LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
DATE_FORMAT = '%Y-%m-%d %H-%M-%S %p'
logging.basicConfig(level=logging.DEBUG,
                    format=LOG_FORMAT,
                    datefmt=DATE_FORMAT)

users, items, graph = processtor('ml-100k')

# init GAT model.
gat = GATConv(in_feats=91, out_feats=32, num_heads=2)

input = torch.from_numpy(items).float()

output = gat(graph, input)

print(output.size())
Esempio n. 19
0
 def __init__(self, in_dim: int, hidden_dim: int, num_heads: int,
              out_dim: int) -> None:
     super(GAT_Graph_Classifier, self).__init__()
     self.conv1 = GATConv(in_dim, hidden_dim, num_heads)
     self.conv2 = GATConv(hidden_dim * num_heads, hidden_dim, num_heads)
     self.classify = torch.nn.Linear(hidden_dim * num_heads, out_dim)
Esempio n. 20
0
    def __init__(self,
                 model_name,
                 nentity,
                 nrelation,
                 hidden_dim,
                 gamma,
                 g,
                 double_entity_embedding=False,
                 double_relation_embedding=False):
        super(KGEModel, self).__init__()
        self.model_name = model_name
        self.nentity = nentity
        self.nrelation = nrelation
        self.hidden_dim = hidden_dim
        self.epsilon = 2.0
        self.g = g

        self.gamma = nn.Parameter(torch.Tensor([gamma]), requires_grad=False)

        self.embedding_range = nn.Parameter(torch.Tensor([
            (self.gamma.item() + self.epsilon) / hidden_dim
        ]),
                                            requires_grad=False)

        self.entity_dim = hidden_dim * 2 if double_entity_embedding else hidden_dim
        self.relation_dim = hidden_dim * 2 if double_relation_embedding else hidden_dim

        self.entity_embedding = nn.Parameter(
            torch.zeros(nentity, self.entity_dim))
        nn.init.uniform_(tensor=self.entity_embedding,
                         a=-self.embedding_range.item(),
                         b=self.embedding_range.item())

        self.gcn = GATConv(self.entity_dim,
                           self.entity_dim,
                           num_heads=3,
                           residual=True)
        self.relation_embedding = nn.Parameter(
            torch.zeros(nrelation, self.relation_dim))
        nn.init.uniform_(tensor=self.relation_embedding,
                         a=-self.embedding_range.item(),
                         b=self.embedding_range.item())

        if model_name == 'pRotatE':
            self.modulus = nn.Parameter(
                torch.Tensor([[0.5 * self.embedding_range.item()]]))

        #Do not forget to modify this line when you add a new model in the "forward" function
        if model_name not in [
                'TransE', 'DistMult', 'ComplEx', 'RotatE', 'pRotatE'
        ]:
            raise ValueError('model %s not supported' % model_name)

        if model_name == 'RotatE' and (not double_entity_embedding
                                       or double_relation_embedding):
            raise ValueError('RotatE should use --double_entity_embedding')

        if model_name == 'ComplEx' and (not double_entity_embedding
                                        or not double_relation_embedding):
            raise ValueError(
                'ComplEx should use --double_entity_embedding and --double_relation_embedding'
            )
Esempio n. 21
0
 def __init__(self, in_dim: int, hidden_dim: int, num_heads: int,
              out_dim: int) -> None:
     super(Instance_Graphs_GAT, self).__init__()
     self.conv1 = GATConv(in_dim, hidden_dim, num_heads)
     self.conv2 = GATConv(hidden_dim * num_heads, out_dim, 1)