Ejemplo n.º 1
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_layers,
                 heads,
                 dropout=0.5):
        super(GAT, self).__init__()

        self.num_layers = num_layers
        self.dropout = dropout
        self.convs = torch.nn.ModuleList()
        self.convs.append(GATConv(dataset.num_features, hidden_channels,
                                  heads))
        for _ in range(num_layers - 2):
            self.convs.append(
                GATConv(heads * hidden_channels, hidden_channels, heads))
        self.convs.append(
            GATConv(heads * hidden_channels, out_channels, heads,
                    concat=False))

        self.skips = torch.nn.ModuleList()
        self.skips.append(Lin(dataset.num_features, hidden_channels * heads))
        for _ in range(num_layers - 2):
            self.skips.append(
                Lin(hidden_channels * heads, hidden_channels * heads))
        self.skips.append(Lin(hidden_channels * heads, out_channels))
Ejemplo n.º 2
0
 def __init__(self, num_features, num_classes, n_heads=2):
     super(GATSage, self).__init__()
     self.n_heads = n_heads
     self.num_layers = 2
     self.conv1 = GATConv(num_features, 8, heads=self.n_heads)
     self.conv2 = GATConv(8 * self.n_heads, num_classes, heads=1)
     self.convs = torch.nn.ModuleList([self.conv1, self.conv2])
Ejemplo n.º 3
0
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim']  # node_dim (feat is an integer)
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        num_heads = net_params['n_heads']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        self.n_layers = net_params['L']

        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.dropout = dropout
        self.n_classes = n_classes
        self.device = net_params['device']

        self.embedding_h = nn.Embedding(in_dim_node, hidden_dim * num_heads)  # node feat is an integer

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        self.layers = nn.ModuleList([GATConv(hidden_dim * num_heads, hidden_dim, num_heads,
                                              dropout = dropout) for _ in range(self.n_layers - 1)])
        self.layers.append(GATConv(hidden_dim * num_heads, out_dim, 1, dropout))
        if self.batch_norm:
            self.batchnorm_h = nn.ModuleList([nn.BatchNorm1d(hidden_dim * num_heads) for _ in range(self.n_layers - 1)])
            self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
        # self.layers = nn.ModuleList([GATLayer(hidden_dim * num_heads, hidden_dim, num_heads,
        #                                       dropout, self.batch_norm, self.residual) for _ in range(n_layers - 1)])
        # self.layers.append(GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.batch_norm, self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
Ejemplo n.º 4
0
    def layers(self):
        # TODO adapt to per-layer configurability
        self.layers_list = torch.nn.ModuleList()

        conv_in = GATConv(in_channels=self.config.feature_dimensionality,
                          out_channels=self.config.hidden_units,
                          heads=self.config.kernel_size,
                          concat=True,
                          negative_slope=0.2,
                          dropout=0.0,
                          bias=self.config.use_bias)

        self.layers_list.append(conv_in)

        for i in range(self.config.hidden_layers):
            l = GATConv(in_channels=self.config.hidden_units,
                        out_channels=self.config.hidden_units,
                        heads=self.config.kernel_size,
                        concat=True,
                        negative_slope=0.2,
                        dropout=0.0,
                        bias=self.config.use_bias)
            self.layers_list.append(l)

        conv_out = GATConv(in_channels=self.config.hidden_units *
                           self.config.kernel_size,
                           out_channels=self.model_type.out_channels,
                           heads=self.config.kernel_size,
                           concat=True,
                           negative_slope=0.2,
                           dropout=0.0,
                           bias=self.config.use_bias)

        self.layers_list.append(conv_out)
Ejemplo n.º 5
0
class GAT(torch.nn.Module):
    """
    Graph Attention Networks
    <https://arxiv.org/abs/1710.10903>
    """
    def __init__(self):
        super(GAT, self).__init__()
        self.conv1 = GATConv(75, 8, heads=8, dropout=0.6)
        self.conv2 = GATConv(8 * 8, 128, heads=1, concat=True, dropout=0.6)

        self.gather_layer = nn.Linear(128, 1)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x1 = F.dropout(x, p=0.6, training=self.training)
        x2 = F.elu(self.conv1(x1, edge_index))
        x3 = F.dropout(x2, p=0.6, training=self.training)
        x4 = self.conv2(x3, edge_index)

        y_molecules = global_add_pool(x4, batch)
        z_molecules = self.gather_layer(y_molecules)
        return z_molecules

    def __call__(self, data):
        target = torch.unsqueeze(data.y, 1)
        out = self.forward(data)
        loss = F.mse_loss(out, target)
        z = out.to('cpu').data.numpy()
        t = target.to('cpu').data.numpy()
        return loss, z, t
Ejemplo n.º 6
0
class GATNet(nn.Module):
    def __init__(self, dataset):
        super(GATNet, self).__init__()

        self.conv1 = GATConv(
            dataset.num_features,
            8,
            heads=8,
            dropout=0.6)

        self.conv2 = GATConv(
            8 * 8,
            dataset.num_classes,
            heads=OUTPUT_HEADS,
            concat=False,
            dropout=0.6)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, x, edge_index, training=None):
        training = self.training if training == None else training
        x = F.dropout(x, p=0.6, training=training)
        x = F.elu(self.conv1(x, edge_index))
        x = F.dropout(x, p=0.6, training=training)
        x = self.conv2(x, edge_index)
        return x
Ejemplo n.º 7
0
    def __init__(self, node_size, input_feature, num_classes):
        super(NetGAT, self).__init__()
        self.node_per_graph = node_size

        hidden_size = 256
        gat_head = 8
        head_size = hidden_size // gat_head
        self.input_feature = input_feature

        # self.linprev = MLP([input_feature, 64, 64, 64])
        self.linprev = EdgeConv(MLP([input_feature * 2, 64, 64, 64]),
                                aggr='max')

        self.conv1 = GATConv(64, head_size, gat_head)
        self.bn1 = torch.nn.BatchNorm1d(hidden_size)
        self.lin1 = torch.nn.Linear(64, hidden_size)

        self.conv2 = GATConv(hidden_size, head_size, gat_head)
        self.bn2 = torch.nn.BatchNorm1d(hidden_size)
        self.lin2 = torch.nn.Linear(hidden_size, hidden_size)

        self.conv3 = GATConv(hidden_size, head_size, gat_head)
        self.bn3 = torch.nn.BatchNorm1d(hidden_size)
        self.lin3 = torch.nn.Linear(hidden_size, hidden_size)

        self.conv4 = GATConv(hidden_size, head_size, gat_head)
        self.bn4 = torch.nn.BatchNorm1d(hidden_size)
        self.lin4 = torch.nn.Linear(hidden_size, hidden_size)

        self.mlp = Seq(Lin(2048, 512), Dropout(0.4), Lin(512, 256),
                       Dropout(0.4), Lin(256, num_classes))
Ejemplo n.º 8
0
    def __init__(self, **kwargs):
        super(PAPAGATChannel, self).__init__()
        self.num_steps = kwargs['num_steps']
        self.num_nodes = kwargs['num_nodes']

        self.gat_layers = torch.nn.ModuleList()
        if kwargs['num_steps'] >= 2:
            self.gat_layers.append(
                GATConv(kwargs['emb_dim'],
                        kwargs['hidden_size'],
                        heads=kwargs['num_heads'],
                        dropout=kwargs['dropout']))
            for i in range(kwargs['num_steps'] - 2):
                self.gat_layers.append(
                    GATConv(kwargs['hidden_size'] * kwargs['num_heads'],
                            kwargs['hidden_size'],
                            heads=kwargs['num_heads'],
                            dropout=kwargs['dropout']))
            self.gat_layers.append(
                GATConv(kwargs['hidden_size'] * kwargs['num_heads'],
                        kwargs['repr_dim'],
                        heads=1,
                        dropout=kwargs['dropout']))
        else:
            self.gat_layers.append(
                GATConv(kwargs['emb_dim'],
                        kwargs['repr_dim'],
                        heads=1,
                        dropout=kwargs['dropout']))

        self.reset_parameters()
Ejemplo n.º 9
0
    def __init__(self, num_features, num_classes):
        super(TopKNet, self).__init__()
        self.name = "topknet"
        self.version = "v1"
        self.num_features = num_features
        self.num_classes = num_classes

        self.bn1 = torch.nn.BatchNorm1d(num_features=num_features)

        self.conv1 = GATConv(num_features, 256)
        self.bn2 = torch.nn.BatchNorm1d(num_features=256)
        self.pool1 = TopKPooling(256, ratio=0.8)

        self.conv2 = GATConv(256, 256)
        self.bn3 = torch.nn.BatchNorm1d(num_features=256)
        self.pool2 = TopKPooling(256, ratio=0.8)

        self.conv3 = GATConv(256, 256)
        self.bn4 = torch.nn.BatchNorm1d(num_features=256)
        self.pool3 = TopKPooling(256, ratio=0.8)

        self.lin1 = torch.nn.Linear(512, 256)
        self.bn5 = torch.nn.BatchNorm1d(num_features=256)
        self.lin2 = torch.nn.Linear(256, 128)
        self.bn6 = torch.nn.BatchNorm1d(num_features=128)
        self.lin3 = torch.nn.Linear(128, num_classes)
Ejemplo n.º 10
0
 def __init__(self,dim=64,edge_dim=12,node_in=8,edge_in=19,edge_in3=8):
     super(Net_int_2Edges_attention2, self).__init__()
     self.lin_node = torch.nn.Linear(node_in, dim)
     
     self.conv1 = GATConv(dim, dim, negative_slope=0.2, bias=True)
     
     self.lin_covert1 = Sequential(BatchNorm1d(dim),Linear(dim, dim*2), \
                                  RReLU(), Dropout(),Linear(dim, dim),RReLU())     
     
     self.conv2 = GATConv(dim, dim, negative_slope=0.2, bias=True)
     
     self.lin_covert2 = Sequential(BatchNorm1d(dim),Linear(dim, dim*2), \
                                  RReLU(), Dropout(),Linear(dim, dim),RReLU())     
     
     self.conv3 = GATConv(dim, dim, negative_slope=0.2, bias=True)
     
     self.lin_covert3 = Sequential(BatchNorm1d(dim),Linear(dim, dim*2), \
                          RReLU(), Dropout(),Linear(dim*2, dim),RReLU())
     
     self.conv4 = GATConv(dim, dim, negative_slope=0.2, bias=True)
     
     self.lin_covert4 = Sequential(BatchNorm1d(dim),Linear(dim, dim*2), \
                          RReLU(), Dropout(),Linear(dim*2, dim),RReLU())
     
     self.lin_weight = Linear(8, dim*3, bias=False)
     self.lin_bias = Linear(8, 1, bias=False)
     self.norm = BatchNorm1d(dim*3)
     self.norm_x = BatchNorm1d(node_in)
Ejemplo n.º 11
0
    def __init__(self, num_features, n_classes,num_heads ,num_rels, num_bases, num_hidden, num_hidden_layers_rgcn,num_hidden_layers_gat, dropout, activation, alpha, bias):
        super(PRGAT, self).__init__()
        self.concat = True
        self.neg_slope = alpha
        self.num_hidden_layers_rgcn = num_hidden_layers_rgcn
        self.num_hidden_layers_gat = num_hidden_layers_gat
        # dropout
        if dropout:
            self.dropout = nn.Dropout(p=dropout)
        else:
            self.dropout = nn.Dropout(p=0.)
        # activation
        self.activation = activation
        # RGCN input layer
        self.rgcn_input = RGCNConv(num_features, num_hidden, num_rels, num_bases, bias=bias) #aggr values ['add', 'mean', 'max'] default : add
        # RGCN Hidden layers
        self.layers = nn.ModuleList()
        for _ in range(num_hidden_layers_rgcn):
            self.layers.append(RGCNConv(num_hidden, num_hidden, num_rels, num_bases, bias=bias))
        # GAT input layer
        self.layers.append(GATConv(num_hidden, num_hidden, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias))

        # GAT Hidden layers
        for _ in range(num_hidden_layers_gat):
            if self.concat:
                self.layers.append(GATConv(num_hidden*num_heads, num_hidden, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias))
            else:
                self.layers.append(GATConv(num_hidden, num_hidden, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias))
        # GAT output layer
        if self.concat:
            self.gat_output = GATConv(num_hidden*num_heads, n_classes, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias)
        else:
            self.gat_output = GATConv(num_hidden, n_classes, heads=num_heads, concat= self.concat, negative_slope=self.neg_slope, dropout = dropout, bias=bias)
Ejemplo n.º 12
0
    def __init__(self,
                 num_features_xd=78,
                 n_output=1,
                 num_features_xt=25,
                 n_filters=32,
                 embed_dim=128,
                 output_dim=128,
                 dropout=0.2):
        super(GATNet, self).__init__()

        # graph layers
        self.gcn1 = GATConv(num_features_xd,
                            num_features_xd,
                            heads=10,
                            dropout=dropout)
        self.gcn2 = GATConv(num_features_xd * 10, output_dim, dropout=dropout)
        self.fc_g1 = nn.Linear(output_dim, output_dim)

        # 1D convolution on protein sequence
        self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
        self.conv_xt1 = nn.Conv1d(in_channels=1000,
                                  out_channels=n_filters,
                                  kernel_size=8)
        self.fc_xt1 = nn.Linear(32 * 121, output_dim)

        # combined layers
        self.fc1 = nn.Linear(256, 1024)
        self.fc2 = nn.Linear(1024, 256)
        self.out = nn.Linear(256, n_output)

        # activation and regularization
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)
Ejemplo n.º 13
0
 def __init__(self,
              in_channels,
              out_channels,
              hidden_channels=256,
              kwargs={},
              n_layers=3,
              dropout=0.0):
     super(GATNet, self).__init__()
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.hidden_channels = hidden_channels
     self.n_layers = n_layers
     self.dropout = dropout
     self.kwargs = kwargs
     self.convs = nn.ModuleList()
     for i in range(self.n_layers - 1):
         n_in = self.in_channels if i == 0 else self.hidden_channels
         self.convs.append(
             GATConv(n_in,
                     self.hidden_channels,
                     aggr='add',
                     dropout=self.dropout,
                     add_self_loops=False,
                     **self.kwargs))
     # Last layer
     self.convs.append(
         GATConv(self.hidden_channels,
                 self.out_channels,
                 aggr='add',
                 dropout=self.dropout,
                 add_self_loops=False,
                 **self.kwargs))
Ejemplo n.º 14
0
    def __init__(self, num_features):
        super(GAT, self).__init__()
        self.conv1 = GATConv(num_features, 8, heads=4)
        self.conv2 = GATConv(8 * 4, 16, heads=1)

        # self.fc = torch.nn.Linear(2 * 16, 1)
        self.fc = torch.nn.Linear(2 * 16, 2)
Ejemplo n.º 15
0
    def __init__(self, args, graph, num_features, num_classes):
        super(Net, self).__init__()
        self.graph = graph
        self.num_layers = args.num_layers
        self.gat_layers = torch.nn.ModuleList()
        self.in_drop = args.in_drop
        self.gat_layers.append(
            GATConv(num_features,
                    args.num_hidden,
                    heads=args.num_heads,
                    dropout=args.attn_drop))
        self.dropout = F.dropout
        self.elu = F.elu
        self.log_softmax = F.log_softmax
        for i in range(1, self.num_layers):
            self.gat_layers.append(
                GATConv(args.num_hidden * args.num_heads,
                        args.num_hidden,
                        heads=args.num_heads,
                        dropout=args.attn_drop))

        self.output_layer = GATConv(args.num_hidden * args.num_heads,
                                    num_classes,
                                    heads=args.num_out_heads,
                                    concat=False,
                                    dropout=args.attn_drop)
    def __init__(self,
                 num_layers,
                 in_feats,
                 num_hidden,
                 num_classes,
                 heads,
                 activation=F.elu,
                 dropout=0.):
        super(GAT, self).__init__()

        self.num_layers = num_layers
        self.gat_layers = nn.ModuleList()
        self.gat_layers.append(
            GATConv(in_feats, num_hidden, heads=heads[0], dropout=0.))

        # hidden layers
        for l in range(num_layers - 2):
            # due to multi-head, the in_feats = num_hidden * num_heads
            self.gat_layers.append(
                GATConv(num_hidden * heads[l],
                        num_hidden,
                        heads=heads[l + 1],
                        dropout=dropout))
        # output projection
        self.gat_layers.append(
            GATConv(num_hidden * heads[-2],
                    num_classes,
                    heads=heads[-1],
                    concat=False,
                    dropout=dropout))
        self.activation = activation
Ejemplo n.º 17
0
    def __init__(self, emb_dim, hidden_size, num_heads, dropout, node_num, weighted, predict_layer_type):
        super(Seq2Graph, self).__init__()

        self.weighted = weighted
        self.predict_layer_type = predict_layer_type

        if weighted:
            self.graph_layer1 = WGATConv(in_channels=emb_dim, out_channels=hidden_size, heads=num_heads, dropout=dropout)
            self.graph_layer2 = WGATConv(in_channels=hidden_size, out_channels=hidden_size, heads=num_heads, dropout=dropout)
        else:
            self.graph_layer1 = GATConv(in_channels=emb_dim, out_channels=hidden_size, heads=num_heads, dropout=dropout,
                                        concat=True, add_self_loops=False)
            self.graph_layer2 = GATConv(in_channels=hidden_size * num_heads, out_channels=hidden_size, heads=num_heads,
                                        dropout=dropout,
                                        concat=False, add_self_loops=False)

        if predict_layer_type == 1:
            self.pred_layer = nn.Sequential(nn.Linear(in_features=hidden_size, out_features=node_num), nn.Sigmoid())
        else:  # SR-GNN
            self.W1 = nn.Linear(in_features=hidden_size, out_features=hidden_size)
            self.W2 = nn.Linear(in_features=hidden_size, out_features=hidden_size)
            self.W3 = nn.Linear(in_features=hidden_size, out_features=1)
            self.W4 = nn.Linear(in_features=2 * hidden_size, out_features=hidden_size)

        self.node_embs = nn.Embedding(num_embeddings=node_num, embedding_dim=emb_dim)
Ejemplo n.º 18
0
    def __init__(self, num_nodes, num_relations, hidden_size, emb_dim, heads,
                 repr_dim):
        super(PGATNetEx, self).__init__(emb_dim, repr_dim, num_nodes,
                                        num_relations)
        self.emb_dim = emb_dim
        self.repr_dim = self.repr_dim

        self.node_emb = torch.nn.Embedding(num_nodes,
                                           emb_dim,
                                           max_norm=1,
                                           norm_type=2.0)
        self.r_emb = torch.nn.Embedding(num_relations,
                                        repr_dim,
                                        max_norm=1,
                                        norm_type=2.0)
        self.r_proj = torch.nn.Embedding(num_relations,
                                         emb_dim * repr_dim,
                                         max_norm=1,
                                         norm_type=2.0)

        self.kg_loss_func = torch.nn.MSELoss()

        self.conv1 = GATConv(emb_dim,
                             int(hidden_size // heads),
                             heads=heads,
                             dropout=0.6)
        self.conv2 = PAConv(int(hidden_size // heads) * heads,
                            repr_dim,
                            heads=1,
                            dropout=0.6)
Ejemplo n.º 19
0
    def __init__(self, name='GCNConv'):
        super(Net, self).__init__()
        self.name = name
        if (name == 'GCNConv'):
            self.conv1 = GCNConv(dataset.num_features, 128)
            self.conv2 = GCNConv(128, 64)
        elif (name == 'ChebConv'):
            self.conv1 = ChebConv(dataset.num_features, 128, K=2)
            self.conv2 = ChebConv(128, 64, K=2)
        elif (name == 'GATConv'):
            self.conv1 = GATConv(dataset.num_features, 128)
            self.conv2 = GATConv(128, 64)
        elif (name == 'GINConv'):
            nn1 = Sequential(Linear(dataset.num_features, 128), ReLU(),
                             Linear(128, 64))
            self.conv1 = GINConv(nn1)
            self.bn1 = torch.nn.BatchNorm1d(64)
            nn2 = Sequential(Linear(64, 64), ReLU(), Linear(64, 64))
            self.conv2 = GINConv(nn2)
            self.bn2 = torch.nn.BatchNorm1d(64)

        self.attr = GCNConv(64,
                            dataset.num_classes,
                            cached=True,
                            normalize=not args.use_gdc)

        self.attack = GCNConv(64,
                              dataset.num_classes,
                              cached=True,
                              normalize=not args.use_gdc)
        self.reverse = GradientReversalLayer()
Ejemplo n.º 20
0
    def __init__(self, config, weight_init, gnn_type, with_uv, num_users, num_relations): # TODO: handle multiple layers smartly
        super(GNNLayer, self).__init__()
        self.num_users = num_users
        self.num_relations = num_relations
        # self.n_layers = n_layers
        # self.agg = nn.Linear()
        GNN_u, GNN_v, GNN_uv = [], [], []
        # dims_default = [32, 16, 8]  # TODO: make this proper
        self.with_uv = with_uv
        out_dim = config.hidden_size[0]
        dims_li = \
            [
                (64, out_dim//num_relations),
                # (out_dim, out_dim//num_relations),
            ]
        assert out_dim % num_relations == 0
        for (in_dim, out_dim) in dims_li:
            # form layers
            if gnn_type == GCN:
                GNN_u.append(nn.ModuleList([GCNConv(in_dim, out_dim) for _ in range(num_relations)]))
                GNN_v.append(nn.ModuleList([GCNConv(in_dim, out_dim) for _ in range(num_relations)]))
                GNN_uv.append(nn.ModuleList([GCNConv(out_dim, out_dim) for _ in range(num_relations)]))
            elif gnn_type == GAT:
                GNN_u.append(nn.ModuleList([GATConv(in_dim, out_dim) for _ in range(num_relations)]))
                GNN_v.append(nn.ModuleList([GATConv(in_dim, out_dim) for _ in range(num_relations)]))
                GNN_uv.append(nn.ModuleList([GATConv(out_dim, out_dim) for _ in range(num_relations)]))
            else:
                assert False

        self.GNN_u = nn.ModuleList(GNN_u)
        self.GNN_v = nn.ModuleList(GNN_v)
        self.GNN_uv = nn.ModuleList(GNN_uv)
Ejemplo n.º 21
0
class GAT_Net(torch.nn.Module):
    def __init__(self, features_num, num_class, hidden, heads, output_heads,
                 concat, dropout):
        super(GAT_Net, self).__init__()
        self.dropout = dropout
        self.first_lin = Linear(features_num, hidden)
        self.conv1 = GATConv(in_channels=hidden,
                             out_channels=hidden,
                             concat=concat,
                             heads=heads,
                             dropout=dropout)
        self.conv2 = GATConv(in_channels=hidden * heads,
                             out_channels=num_class,
                             concat=concat,
                             heads=output_heads,
                             dropout=dropout)

    def reset_parameters(self):
        self.first_lin.reset_parameters()
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = F.relu(self.first_lin(x))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = F.elu(self.conv1(x, edge_index))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.conv2(x, edge_index)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Ejemplo n.º 22
0
    def __init__(self, hparams: dict):
        super().__init__()
        self.hparams = hparams

        if hparams["num_conv_layers"] < 1:
            raise Exception("Invalid number of layers!")

        self.conv_modules = nn.ModuleList()

        heads = hparams.get("heads", 1)

        self.conv_modules.append(
            GATConv(hparams["num_node_features"],
                    hparams["conv_size"],
                    heads=heads))

        for _ in range(hparams["num_conv_layers"] - 1):
            conv = GATConv(heads * hparams["conv_size"],
                           hparams["conv_size"],
                           heads=heads)
            self.conv_modules.append(conv)

        self.lin = nn.Linear(hparams["conv_size"], hparams["lin_size"])

        self.output = nn.Linear(hparams["lin_size"], hparams["output_size"])
Ejemplo n.º 23
0
 def __init__(self, args):
     super(GAT, self).__init__()
     self.use_cuda = args.use_cuda
     self.feature_dim = args.feature_dim
     self.embedding_dim = args.embedding_dim
     self.gatconv1 = GATConv(self.feature_dim, 64, heads=4)
     self.gatconv2 = GATConv(64 * 4, self.embedding_dim)
Ejemplo n.º 24
0
    def __init__(self, nfeat, nhid, nclass, heads=8, output_heads=1, dropout=0.5, lr=0.01,
            weight_decay=5e-4, with_bias=True, device=None):

        super(GAT, self).__init__()

        assert device is not None, "Please specify 'device'!"
        self.device = device

        self.conv1 = GATConv(
            nfeat,
            nhid,
            heads=heads,
            dropout=dropout,
            bias=with_bias)

        self.conv2 = GATConv(
            nhid * heads,
            nclass,
            heads=output_heads,
            concat=False,
            dropout=dropout,
            bias=with_bias)

        self.dropout = dropout
        self.weight_decay = weight_decay
        self.lr = lr
        self.output = None
        self.best_model = None
        self.best_output = None
Ejemplo n.º 25
0
    def __init__(self):
        super(GAT, self).__init__()
        self.name = 'GAT'
        self.conv1 = GATConv(75, 8, heads=8, dropout=0.6)
        self.conv2 = GATConv(8 * 8, 128, heads=1, concat=True, dropout=0.6)

        self.gather_layer = nn.Linear(128, 1)
Ejemplo n.º 26
0
    def __init__(self, in_channels: int, hidden_channels: int,
                 out_channels: int, edge_dim: int, num_layers: int,
                 num_timesteps: int, dropout: float = 0.0):
        super(AttentiveFP, self).__init__()

        self.num_layers = num_layers
        self.num_timesteps = num_timesteps
        self.dropout = dropout

        self.lin1 = Linear(in_channels, hidden_channels)

        conv = GATEConv(hidden_channels, hidden_channels, edge_dim, dropout)
        gru = GRUCell(hidden_channels, hidden_channels)
        self.atom_convs = torch.nn.ModuleList([conv])
        self.atom_grus = torch.nn.ModuleList([gru])
        for _ in range(num_layers - 1):
            conv = GATConv(hidden_channels, hidden_channels, dropout=dropout,
                           add_self_loops=False, negative_slope=0.01)
            self.atom_convs.append(conv)
            self.atom_grus.append(GRUCell(hidden_channels, hidden_channels))

        self.mol_conv = GATConv(hidden_channels, hidden_channels,
                                dropout=dropout, add_self_loops=False,
                                negative_slope=0.01)
        self.mol_gru = GRUCell(hidden_channels, hidden_channels)

        self.lin2 = Linear(hidden_channels, out_channels)

        self.reset_parameters()
Ejemplo n.º 27
0
    def __init__(self, hparams: dict):
        super().__init__()
        self.hparams = hparams

        for param_name in [
            "num_node_features",
            "num_conv_layers",
            "conv_size",
            "lin1_size",
            "lin2_size",
            "output_size",
        ]:
            if not isinstance(hparams[param_name], int):
                raise Exception("Wrong hyperparameter type!")

        if hparams["num_conv_layers"] < 1:
            raise Exception("Invalid number of layers!")

        if hparams["activation"] == "relu":
            activation = nn.ReLU
        elif hparams["activation"] == "prelu":
            activation = nn.PReLU
        else:
            raise Exception("Invalid activation function name.")

        if hparams["pool_method"] == "add":
            self.pooling_method = global_add_pool
        elif hparams["pool_method"] == "mean":
            self.pooling_method = global_mean_pool
        elif hparams["pool_method"] == "max":
            self.pooling_method = global_max_pool
        else:
            raise Exception("Invalid pooling method name")

        self.conv_modules = nn.ModuleList()
        self.activ_modules = nn.ModuleList()

        heads = hparams.get("heads", 1)

        self.conv_modules.append(
            GATConv(hparams["num_node_features"], hparams["conv_size"], heads=heads)
        )
        self.activ_modules.append(activation())

        for _ in range(hparams["num_conv_layers"] - 1):
            conv = GATConv(heads * hparams["conv_size"], hparams["conv_size"], heads=heads)
            # nn.init.xavier_uniform_(conv.lin_l.weight)
            # nn.init.xavier_uniform_(conv.lin_r.weight)
            # nn.init.xavier_uniform_(conv.att_l)
            # nn.init.xavier_uniform_(conv.att_r)
            self.conv_modules.append(conv)
            self.activ_modules.append(activation())

        self.lin1 = nn.Linear(heads * hparams["conv_size"], hparams["lin1_size"])
        self.activ_lin1 = activation()

        self.lin2 = nn.Linear(hparams["lin1_size"], hparams["lin2_size"])
        self.activ_lin2 = activation()

        self.output = nn.Linear(hparams["lin2_size"], hparams["output_size"])
Ejemplo n.º 28
0
 def __init__(self, args):
     super(GAT, self).__init__()
     self.args = set_default(args, {
                 'hidden': 64,
                 'hidden2': 32,
                 'dropout': 0.5,
                 'lr': 0.005,
                 'epoches': 300,
                 'weight_decay': 5e-4,
                 'agg': 'self',
                 'act': 'leaky_relu',
                 'withbn': True,
                     })
     self.timer = self.args['timer']
     self.dropout = self.args['dropout']
     self.agg = self.args['agg']
     self.withbn = self.args['withbn']
     self.conv1 = GATConv(self.args['hidden'], self.args['hidden'], self.args['heads'], dropout=self.args['dropout'])
     self.conv2 = GATConv(self.args['hidden']*self.args['heads'], self.args['hidden2'], dropout=self.args['dropout'])
     hd = [self.args['hidden'], self.args['hidden']*self.args['heads'], self.args['hidden2']]
     if self.withbn:
         self.bn1 = BatchNorm1d(self.args['hidden']*self.args['heads'])
         self.bn2 = BatchNorm1d(self.args['hidden2'])
     if self.args['agg'] == 'concat':
         outdim = sum(hd)
     elif self.args['agg'] == 'self':
         outdim = hd[-1]
     if self.args['act'] == 'leaky_relu':
         self.act = F.leaky_relu
     elif self.args['act'] == 'tanh':
         self.act = torch.tanh
     else:
         self.act = lambda x: x
     self.lin2 = Linear(outdim, self.args['num_class'])
     self.first_lin = Linear(self.args['features_num'], self.args['hidden'])
Ejemplo n.º 29
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 heads, dropout, att_dropout):
        super(GAT, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(
            GATConv(in_channels,
                    hidden_channels,
                    heads=heads,
                    dropout=att_dropout,
                    concat=True))
        self.bns = torch.nn.ModuleList()
        self.bns.append(torch.nn.BatchNorm1d(hidden_channels * heads))
        for _ in range(num_layers - 2):
            self.convs.append(
                GATConv(hidden_channels * heads,
                        hidden_channels,
                        heads=heads,
                        dropout=att_dropout,
                        concat=True))
            self.bns.append(torch.nn.BatchNorm1d(hidden_channels * heads))
        self.convs.append(
            GATConv(hidden_channels * heads,
                    out_channels,
                    heads=heads,
                    dropout=att_dropout,
                    concat=False))

        self.dropout = dropout
Ejemplo n.º 30
0
    def __init__(self, nfeat, nhid, nclass, dropout, nlayer=2):
        super(StandGAT2, self).__init__()
        gat_para1 = 4
        self.conv1 = GATConv(nfeat, nclass, heads=gat_para1)
        self.conv2 = GATConv(nhid, nclass, heads=gat_para1)

        self.dropout_p = dropout