def __init__(self, num_nodes, embedding_dim, features_dim, n_hidden,
                 n_classes, n_layers, activation, dropout, aggregator_type,
                 **kwargs):
        super(GraphSAGE, self).__init__()
        # embedding layer
        self.embedding = nn.Embedding(num_nodes, embedding_dim=embedding_dim)
        self.layers = nn.ModuleList()

        in_feats = embedding_dim + features_dim
        # input layer
        self.layers.append(
            SAGEConv(in_feats,
                     n_hidden,
                     aggregator_type,
                     feat_drop=dropout,
                     activation=activation))
        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(
                SAGEConv(n_hidden,
                         n_hidden,
                         aggregator_type,
                         feat_drop=dropout,
                         activation=activation))
        # output layer
        self.layers.append(
            SAGEConv(n_hidden,
                     n_classes,
                     aggregator_type,
                     feat_drop=dropout,
                     activation=None))  # activation None
Beispiel #2
0
    def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation,
                 dropout, aggregator_type, num_genes):
        super(GraphSAGE, self).__init__()

        self.num_genes = num_genes

        self.layers = nn.ModuleList()

        # input layer
        self.layers.append(
            SAGEConv(in_feats,
                     n_hidden,
                     aggregator_type,
                     feat_drop=dropout,
                     activation=activation))
        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(
                SAGEConv(n_hidden,
                         n_hidden,
                         aggregator_type,
                         feat_drop=dropout,
                         activation=activation))

        # self.dropout = nn.Dropout(p=0.5)

        # output layer
        self.linear1 = nn.Linear(n_hidden * 3, n_hidden)
        self.dense1_bn = nn.BatchNorm1d(n_hidden)
        self.linear2 = nn.Linear(n_hidden, n_classes)
Beispiel #3
0
    def __init__(self, g, in_feats, n_hidden, n_classes, n_layers, activation,
                 dropout, aggregator_type):
        super(GraphSAGE, self).__init__()
        self.layers = nn.ModuleList()
        self.g = g

        # input layer
        self.layers.append(
            SAGEConv(in_feats,
                     n_hidden,
                     aggregator_type,
                     feat_drop=dropout,
                     activation=activation))
        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(
                SAGEConv(n_hidden,
                         n_hidden,
                         aggregator_type,
                         feat_drop=dropout,
                         activation=activation))
        # output layer
        self.layers.append(
            SAGEConv(n_hidden,
                     n_classes,
                     aggregator_type,
                     feat_drop=dropout,
                     activation=None))  # activation None
Beispiel #4
0
 def __init__(self, in_dim, hidden_dim, n_classes):
     # noted, lstm and pool have the best performnace reported in GraphSage paper, the typical number of hidden layers is 2
     super(Net, self).__init__()
     self.layers = nn.ModuleList([
         SAGEConv(
             in_dim,
             hidden_dim,
             aggregator_type='lstm',
             feat_drop=0.5,
             activation=F.relu
         ),  # need to change if we have more node or edge features 
         SAGEConv(hidden_dim,
                  hidden_dim,
                  aggregator_type='lstm',
                  feat_drop=0.5,
                  activation=F.relu),
         # SAGEConv(hidden_dim, hidden_dim, aggregator_type='mean'),
         SAGEConv(hidden_dim,
                  n_classes,
                  aggregator_type='lstm',
                  activation=None)
         # SAGEConv(in_dim, hidden_dim, aggregator_type='pool', feat_drop=0.5, activation=F.relu), # need to change if we have more node or edge features
         # SAGEConv(hidden_dim, hidden_dim, aggregator_type='pool', feat_drop=0.5,activation=F.relu),
         # # SAGEConv(hidden_dim, hidden_dim, aggregator_type='mean'),
         # SAGEConv(hidden_dim, n_classes, aggregator_type='pool', activation=None)
     ])
Beispiel #5
0
    def __init__(self, layer_sizes):
        super().__init__()

        input_size, hidden_size, embedding_size = layer_sizes

        self.convs = nn.ModuleList([
            SAGEConv(input_size, hidden_size, 'mean'),
            SAGEConv(hidden_size, hidden_size, 'mean'),
            SAGEConv(hidden_size, embedding_size, 'mean')
        ])

        self.skip_lins = nn.ModuleList([
            nn.Linear(input_size, hidden_size, bias=False),
            nn.Linear(input_size, hidden_size, bias=False),
        ])

        self.layer_norms = nn.ModuleList([
            LayerNorm(hidden_size),
            LayerNorm(hidden_size),
            LayerNorm(embedding_size),
        ])

        self.activations = nn.ModuleList([
            nn.PReLU(),
            nn.PReLU(),
            nn.PReLU(),
        ])
Beispiel #6
0
    def __init__(self,
                 in_feats,
                 n_hidden,
                 n_classes,
                 n_layers,
                 activation,
                 dropout,
                 aggregator_type):
        super(GraphSAGE, self).__init__()
        self.layers = nn.ModuleList()
        # self.dropout = mlpcell.Dropout(dropout)
        self.dropout = nn.Dropout(dropout)
        self.activation = activation

        # self.layers.append(SAGEConvOpt(in_feats, n_hidden, 'mean', feat_drop=dropout, bias=True, norm=None, activation=activation))
        # for i in range(1, n_layers - 1):
        #     self.layers.append(SAGEConvOpt(n_hidden, n_hidden, 'mean', feat_drop=dropout, bias=True, norm=None, activation=activation))
        # self.layers.append(SAGEConvOpt(n_hidden, n_classes, 'mean', feat_drop=0.0, bias=True)) # activation None

        # # input layer
        self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type))
        # # hidden layers
        for i in range(1, n_layers - 1):
            self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type))
        # # output layer
        self.layers.append(SAGEConv(n_hidden, n_classes, aggregator_type)) # activation None
    def __init__(self, in_dim, hidden_dim, n_classes):
        super(Net, self).__init__()

        self.layers = nn.ModuleList([
            SAGEConv(in_dim, hidden_dim, aggregator_type='lstm'),
            # SAGEConv(hidden_dim, hidden_dim, aggregator_type='mean'),
            SAGEConv(hidden_dim, hidden_dim, aggregator_type='lstm')
        ])
        self.linear = nn.Linear(hidden_dim, n_classes)  # predice classes
 def __init__(self, in_dim, hidden_dim, n_classes):
     super(Net, self).__init__()
     self.layers = nn.ModuleList([
         SAGEConv(
             in_dim, hidden_dim, aggregator_type='lstm'
         ),  # need to change if we have more node or edge features 
         SAGEConv(hidden_dim, hidden_dim, aggregator_type='lstm'),
         SAGEConv(hidden_dim, hidden_dim, aggregator_type='lstm')
     ])
     self.linear = nn.Linear(hidden_dim, n_classes)  # predice classes
Beispiel #9
0
 def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation,
              dropout, aggregator_type):
     super(GraphSAGE, self).__init__()
     self.layers = nn.ModuleList()
     self.dropout = nn.Dropout(dropout)
     self.activation = activation
     self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type))
     # hidden layers
     for i in range(n_layers - 1):
         self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type))
     # output layer
     self.layers.append(SAGEConv(n_hidden, n_classes, aggregator_type))
Beispiel #10
0
 def __init__(self, in_dim, hidden_dim, n_classes):
     super(Net, self).__init__()
     self.layers = nn.ModuleList([
         # please see the model implementation in the following
         # https://docs.dgl.ai/_modules/dgl/nn/pytorch/conv/sageconv.html#SAGEConv
         SAGEConv(
             in_dim, hidden_dim, aggregator_type='mean'
         ),  # need to change if we have more node or edge features 
         SAGEConv(hidden_dim, hidden_dim, aggregator_type='mean'),
         SAGEConv(hidden_dim, hidden_dim, aggregator_type='mean')
     ])
     self.linear = nn.Linear(hidden_dim, n_classes)  # predice classes
Beispiel #11
0
    def __init__(self,
                 annotation_size,
                 out_feats,
                 n_steps,
                 device,
                 n_etypes=12,
                 gnn_type='sage',
                 sage_type='gcn',
                 tok_embedding=0,
                 residual=False):

        super(Graph_NN, self).__init__()

        self.annotation_size = annotation_size
        self.out_feats = out_feats
        self.layers = nn.ModuleList()
        # control signals
        self.tok_embedding_flag = tok_embedding
        self.residual = residual
        self.gnn_type = gnn_type

        if tok_embedding == 1:
            self.tok_embedding = nn.Linear(out_feats, out_feats)
        elif tok_embedding == 2:
            self.tok_embedding = nn.Sequential(
                nn.Embedding(annotation_size, out_feats), )
        #ggnn
        if gnn_type == 'ggnn':
            self.ggnn = GatedGraphConv(in_feats=out_feats,
                                       out_feats=out_feats,
                                       n_steps=n_steps,
                                       n_etypes=n_etypes)
        #graphsage
        if gnn_type == 'sage':
            self.layers.append(
                SAGEConv(out_feats,
                         out_feats,
                         sage_type,
                         feat_drop=0.1,
                         activation=F.relu))
            for i in range(n_steps - 1):
                self.layers.append(
                    SAGEConv(out_feats,
                             out_feats,
                             sage_type,
                             feat_drop=0.1,
                             activation=F.relu))

        self.device = device
Beispiel #12
0
 def __init__(self,
              g,
              in_feats,
              n_classes,
              n_layers=1,
              n_hidden=16,
              activation=F.relu,
              dropout=0.5,
              aggregator_type='pool'):
     super(GraphSAGE, self).__init__()
     self.layers = nn.ModuleList()
     self.g = g
     # input layer
     self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type, feat_drop=dropout, activation=activation))
     # output layer
     self.layers.append(SAGEConv(n_hidden, n_classes, aggregator_type, feat_drop=dropout, activation=None)) # activation None
Beispiel #13
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 dropout, aggr_type):
        super(Graph_Predictor, self).__init__()
        self.conv = nn.ModuleList()
        self.conv.append(SAGEConv(in_channels, hidden_channels, aggr_type))
        for _ in range(num_layers - 1):
            self.conv.append(
                SAGEConv(hidden_channels, hidden_channels, aggr_type))

        self.lin = nn.ModuleList()
        for _ in range(num_layers - 1):
            self.lin.append(nn.Linear(hidden_channels, hidden_channels))
        self.lin.append(nn.Linear(hidden_channels, out_channels))

        self.pooling = AvgPooling()

        self.dropout = nn.Dropout(dropout)
Beispiel #14
0
 def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation,
              dropout, aggregator_type):
     super(JKNet_model, self).__init__()
     self.layers = nn.ModuleList()
     # input layer
     self.layers.append(
         SAGEConv(in_feats,
                  n_hidden,
                  aggregator_type,
                  feat_drop=0.,
                  activation=activation))
     # hidden layers
     for i in range(n_layers - 1):
         self.layers.append(
             SAGEConv(n_hidden,
                      n_hidden,
                      aggregator_type,
                      feat_drop=dropout,
                      activation=activation))
     # output layer
     self.layer_output = nn.Linear(n_hidden * n_layers, n_classes)
Beispiel #15
0
    def __init__(self,
                 in_dim,
                 hidden_dim_1,
                 fc_hidden_1,
                 fc_hidden_2,
                 hidden_dim_2,
                 num_classes,
                 feat_drop=0,
                 use_cuda=False):
        """
        Constructor for the GraphAttConvBinaryClassifier class
        Parameters:
            in_dim (int): Dimension of features for each node
            hidden_dim (int): Dimension of hidden embeddings
            num_classes (int): Number of output classes
            use_cuda (bool): Indicates whether GPU should be utilized or not
        """
        super(GraphSageBinaryClassifier, self).__init__()

        # Model layers
        self.conv1 = SAGEConv(in_dim,
                              hidden_dim_1,
                              feat_drop=feat_drop,
                              aggregator_type='mean')
        self.conv2 = SAGEConv(hidden_dim_1,
                              hidden_dim_2,
                              feat_drop=feat_drop,
                              aggregator_type='mean')
        self.conv3 = SAGEConv(hidden_dim_2,
                              fc_hidden_1,
                              feat_drop=feat_drop,
                              aggregator_type='mean')

        self.fc_1 = nn.Linear(fc_hidden_1, fc_hidden_2)
        self.fc_2 = nn.Linear(fc_hidden_2, num_classes)

        self.out = nn.LogSoftmax(dim=1)

        self.use_cuda = use_cuda
Beispiel #16
0
 def __init__(self, input_dimension: int, dimensions: _typing.Sequence[int],
              act: _typing.Optional[str], dropout: _typing.Optional[float],
              agg: str):
     super(_SAGE, self).__init__()
     if agg not in ("gcn", "pool", "mean", "lstm"):
         raise ValueError("Unsupported aggregator type")
     self.__convolution_layers: torch.nn.ModuleList = torch.nn.ModuleList()
     for layer, _dimension in enumerate(dimensions):
         self.__convolution_layers.append(
             SAGEConv(
                 input_dimension if layer == 0 else dimensions[layer - 1],
                 _dimension, agg))
     self._act: _typing.Optional[str] = act
     self._dropout: _typing.Optional[float] = dropout
    def __init__(self,
                 hid_dim,
                 n_heads,
                 pf_dim,
                 dropout,
                 device,
                 mem_dim,
                 gnn_layer_num=2):
        super().__init__()

        self.layer_norm = nn.LayerNorm(hid_dim)
        if mem_dim != 0:
            self.self_attention = MultiHeadAttentionLayer_wMem(hid_dim,
                                                               n_heads,
                                                               dropout,
                                                               device,
                                                               m=mem_dim)
        else:
            self.self_attention = MultiHeadAttentionLayer(
                hid_dim, n_heads, dropout, device)
        self.positionwise_feedforward = PositionwiseFeedforwardLayer(
            hid_dim, pf_dim, dropout)
        self.layers = nn.ModuleList()
        self.dropout = nn.Dropout(dropout)

        self.layers.append(
            SAGEConv(hid_dim, hid_dim, "gcn", feat_drop=0, activation=F.relu))
        for i in range(gnn_layer_num - 1):
            self.layers.append(
                SAGEConv(hid_dim,
                         hid_dim,
                         "gcn",
                         feat_drop=0,
                         activation=F.relu))

        self.hid_dim = hid_dim
        self.dropout = nn.Dropout(dropout)
Beispiel #18
0
    def __init__(self, args):
        super(GraphSAGE, self).__init__()
        self.args = args
        self.num_layer = int(self.args["num_layers"])

        missing_keys = list(
            set([
                "features_num", "num_class", "num_layers", "hidden", "dropout",
                "act", "agg"
            ]) - set(self.args.keys()))
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ",".join(missing_keys))

        if not self.num_layer == len(self.args["hidden"]) + 1:
            LOGGER.warn(
                "Warning: layer size does not match the length of hidden units"
            )

        if self.args["agg"] not in ("gcn", "pool", "mean", "lstm"):
            self.args["agg"] = "gcn"

        self.convs = torch.nn.ModuleList()
        self.convs.append(
            SAGEConv(self.args["features_num"],
                     self.args["hidden"][0],
                     aggregator_type=self.args["agg"]))
        for i in range(self.num_layer - 2):
            self.convs.append(
                SAGEConv(self.args["hidden"][i],
                         self.args["hidden"][i + 1],
                         aggregator_type=self.args["agg"]))

        self.convs.append(
            SAGEConv(self.args["hidden"][-1],
                     self.args["num_class"],
                     aggregator_type=self.args["agg"]))
Beispiel #19
0
    def __init__(self,
                 in_feats: tuple,
                 out_feats,
                 weight=True,
                 device=None,
                 dropout_rate=0.0):
        super(GCMCGraphSage, self).__init__()
        self._in_feats = in_feats
        self._out_feats = out_feats
        self.device = device
        self.dropout = nn.Dropout(dropout_rate)

        if weight:
            self.feat1 = nn.Parameter(th.Tensor(in_feats[0], 10))
            self.feat2 = nn.Parameter(th.Tensor(in_feats[1], 10))
        else:
            self.register_parameter('weight', None)
        self.reset_parameters()
        from dgl.nn.pytorch.conv import SAGEConv
        self.sage = SAGEConv(10, out_feats, aggregator_type='mean')
Beispiel #20
0
    def __init__(self,
                 in_feats,
                 n_hidden,
                 n_classes,
                 n_layers,
                 activation,
                 dropout,
                 aggregator_type):
        super(GraphSAGE, self).__init__()

        self.droplayer = nn.Dropout(p=dropout)

        # input layer
        self.inplayer = nn.Linear(in_feats, n_hidden)

        self.layers = nn.ModuleList()
        # hidden layers
        for i in range(n_layers):
            self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type, feat_drop=dropout, activation=activation))

        # output layer
        self.outlayer = nn.Linear(n_hidden, n_classes)
Beispiel #21
0
    def __init__(self, config):
        super(Procedure, self).__init__()
        self.config = config
        self.node_embedding = torch.nn.Embedding(config['node_number'],
                                                 config['node_hidden_size'])

        self.times_embedding = torch.nn.Embedding(
            config['n_times_types'], config['appearance_time_hidden_size'])

        self.interval_embedding = torch.nn.Embedding(
            config['n_intervals'], config['interval_hidden_size'])

        self.connection_embedding = torch.nn.Embedding(
            config['n_connection_types'], config['connection_hidden_size'])

        self.score_MLP = score_MLP(config)

        self.GNN = SAGEConv(config['node_hidden_size'],
                            config['node_hidden_size'], 'mean')

        if torch.cuda.is_available():
            self.node_embedding = self.node_embedding.cuda()
            self.connection_embedding = self.connection_embedding.cuda()
            self.score_MLP = self.score_MLP.cuda()
 def __init__(
         self,
         g,
         nentity,  # input feat
         n_hidden,  # hidden dimension
         n_classes,  # Output feature size; i.e, the number of dimensions of :math:`h_i^{(l+1)}`.
         n_layers,  # gcn layer
         activation,  # activation str format
         dropout,
         aggregator_type,
         dist_func,
         ent_ini,  # num of training nodes
         in_feats=64,
         freeze=False):
     super(GraphSAGE, self).__init__()
     self.layers = nn.ModuleList()
     self.freeze = freeze
     if activation == 'relu':
         act_fun = Fn.relu
     elif activation == 'gelu':
         act_fun = Fn.gelu
     elif activation == 'sigmoid':
         act_fun = Fn.sigmoid
     elif activation == 'elu':
         act_fun = Fn.elu
     elif activation == 'glu':
         act_fun = Fn.glu
     elif activation == 'none':
         act_fun = None
     else:
         raise ValueError(
             'the graph activation function should be [relu gelu sigmoid elu glu or none] but instead of {}'
             .format(activation))
     self.activation = act_fun
     # input layer
     self.layers.append(
         SAGEConv(in_feats,
                  n_hidden,
                  aggregator_type,
                  feat_drop=dropout,
                  activation=act_fun))
     # hidden layers
     for i in range(n_layers - 1):
         self.layers.append(
             SAGEConv(n_hidden,
                      n_hidden,
                      aggregator_type,
                      feat_drop=dropout,
                      activation=act_fun))
     # output layer
     self.layers.append(
         SAGEConv(n_hidden,
                  n_classes,
                  aggregator_type,
                  feat_drop=dropout,
                  activation=act_fun))  # activation None
     self.predictor = ScorePredictor(dist_func)
     if ent_ini is None:
         self.entity_embedding = nn.Parameter(
             torch.zeros(nentity, self.entity_dim))
         nn.init.xavier_uniform_(tensor=self.entity_embedding,
                                 gain=activation)
         self.entity_dim = in_feats
     else:
         embed_li = []
         hidden_dim = len(ent_ini[0])
         for i in range(nentity):
             embed_li.append(ent_ini[i])
         embed_li = numpy.asarray(embed_li)
         embed_li = torch.from_numpy(embed_li).float()
         self.entity_embedding = nn.Parameter(embed_li,
                                              requires_grad=not freeze)
         # self.entity_dim = hidden_dim
         # self.entity_embedding = nn.Embedding.from_pretrained(embed_li, freeze=freeze)
     self.g = g
     self.g.ndata['feature'] = self.entity_embedding
Beispiel #23
0
    def __init__(self,
                 use_KG,
                 input_node_dim,
                 gnn_model,
                 num_gnn_layers,
                 n_hidden,
                 dropout,
                 use_attention=True,
                 n_entities=None,
                 n_relations=None,
                 relation_dim=None,
                 reg_lambda_kg=0.01,
                 reg_lambda_gnn=0.01,
                 res_type="Bi"):
        super(Model, self).__init__()
        self._use_KG = use_KG
        self._n_entities = n_entities
        self._n_relations = n_relations
        self._gnn_model = gnn_model
        self._use_attention = use_attention
        self._reg_lambda_kg = reg_lambda_kg
        self._reg_lambda_gnn = reg_lambda_gnn

        ### for input node embedding
        self.entity_embed = nn.Embedding(n_entities,
                                         input_node_dim)  ### e_h, e_t
        self.relation_embed = nn.Embedding(n_relations, relation_dim)  ### e_r
        self.W_R = nn.Parameter(
            th.Tensor(n_relations, input_node_dim, relation_dim))  ### W_r
        nn.init.xavier_uniform_(self.W_R, gain=nn.init.calculate_gain('relu'))

        self.layers = nn.ModuleList()
        for i in range(num_gnn_layers):
            r = int(math.pow(2, i))
            act = None if i + 1 == num_gnn_layers else F.relu
            if i == 0:
                if gnn_model == "kgat":
                    self.layers.append(
                        KGATConv(input_node_dim, n_hidden // r, dropout))
                elif gnn_model == "graphsage":
                    self.layers.append(
                        SAGEConv(input_node_dim,
                                 n_hidden // r,
                                 aggregator_type="mean",
                                 feat_drop=dropout,
                                 activation=act))
                else:
                    raise NotImplementedError
            else:
                r2 = int(math.pow(2, i - 1))
                if gnn_model == "kgat":
                    self.layers.append(
                        KGATConv(n_hidden // r2, n_hidden // r, dropout))
                elif gnn_model == "graphsage":
                    self.layers.append(
                        SAGEConv(n_hidden // r2,
                                 n_hidden // r,
                                 aggregator_type="mean",
                                 feat_drop=dropout,
                                 activation=act))
                else:
                    raise NotImplementedError
 def __init__(self, nfeat, nhid, nclass, dropout):
     super(GraphSage, self).__init__()
     self.dropout = dropout
     self.conv1 = SAGEConv(nfeat, nhid,aggregator_type='mean')
     self.conv2 = SAGEConv(nhid, nclass,aggregator_type='mean')
Beispiel #25
0
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 26 16:56:12 2020

@author: user
"""

import dgl
from dgl.nn.pytorch.conv import SAGEConv

g = dgl.graph(data=(train[:, 0], train[:, 1]))

GNN = SAGEConv(8, 8, 'mean')

a = GNN(g, node_embedding.weight)