Beispiel #1
0
    def __init__(self, num_layer, emb_dim, rank, drop_ratio=0.5):
        '''
            emb_dim (int): node embedding dimensionality
            num_layer (int): number of GNN message passing layers
        '''

        super(GNN_node, self).__init__()
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.rank = rank

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ###List of GNNs
        self.convs = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(num_layer):
            self.convs.append(
                DGLGraphConv(emb_dim, emb_dim, rank,
                             allow_zero_in_degree=True))
            self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
Beispiel #2
0
    def __init__(self, num_layer, emb_dim, drop_ratio=0.5, JK="last"):
        '''
            emb_dim (int): node embedding dimensionality
            num_layer (int): number of GNN message passing layers
        '''

        super(GNN_node, self).__init__()
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.JK = JK
        ### add residual connection or not
        # self.residual = residual

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ###List of GNNs
        self.convs = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(num_layer):
            self.convs.append(Mol_GCNConv(emb_dim))
            self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
    def __init__(self,
                 hidden_channels,
                 out_channels,
                 num_layers=3,
                 dropout=0.5):
        super().__init__()

        self.dropout = dropout

        self.atom_encoder = AtomEncoder(hidden_channels)
        self.bond_encoder = BondEncoder(hidden_channels)

        self.convs = torch.nn.ModuleList()
        for _ in range(num_layers):
            nn = Sequential(
                Linear(hidden_channels, 2 * hidden_channels),
                BatchNorm(2 * hidden_channels),
                ReLU(),
                Linear(2 * hidden_channels, hidden_channels),
                BatchNorm(hidden_channels),
                ReLU(),
            )
            self.convs.append(GINEConv(nn, train_eps=True))

        self.lin = Linear(hidden_channels, out_channels)
Beispiel #4
0
    def __init__(self,
                 node_feat_dim,
                 edge_feat_dim,
                 hid_dim,
                 out_dim,
                 num_layers,
                 dropout=0.,
                 beta=1.0,
                 learn_beta=False,
                 aggr='softmax',
                 mlp_layers=1):
        super(DeeperGCN, self).__init__()

        self.num_layers = num_layers
        self.dropout = dropout
        self.gcns = nn.ModuleList()
        self.norms = nn.ModuleList()

        for _ in range(self.num_layers):
            conv = GENConv(in_dim=hid_dim,
                           out_dim=hid_dim,
                           aggregator=aggr,
                           beta=beta,
                           learn_beta=learn_beta,
                           mlp_layers=mlp_layers)

            self.gcns.append(conv)
            self.norms.append(nn.BatchNorm1d(hid_dim, affine=True))

        self.node_encoder = AtomEncoder(hid_dim)
        self.pooling = AvgPooling()
        self.output = nn.Linear(hid_dim, out_dim)
Beispiel #5
0
    def __init__(self, in_channels, number_hidden_layers, aggr, hidden_out_channel, out_channel, pool_layer, k=1):
        super(GCN_Net, self).__init__()
        self.in_channels = in_channels
        self.number_hidden_layers = number_hidden_layers #number of hidden GraphConv layers
        self.aggr = aggr # "add", "mean" or "max"
        self.pool_layer = pool_layer # 'add', 'max', 'mean' or 'sort'
        self.hidden_out_channel = hidden_out_channel
        self.out_channel = out_channel
        self.atom_encoder = AtomEncoder(emb_dim=self.in_channels)
        self.k = k

        
        self.graph_conv_list = nn.ModuleList()
        self.graph_conv_list.append(GraphConv(in_channels= self.in_channels, out_channels=self.hidden_out_channel, aggr=self.aggr))

        self.batchnorm = BatchNorm(in_channels=self.hidden_out_channel)

        if self.number_hidden_layers != 0 : 
            for i in range(self.number_hidden_layers):
                self.graph_conv_list.append(GraphConv(in_channels= self.hidden_out_channel, out_channels= self.hidden_out_channel, aggr=self.aggr))
           
        self.graph_conv_list.append(GraphConv(in_channels = self.hidden_out_channel, out_channels = self.out_channel, aggr=self.aggr))
         
        self.linear1 = nn.Linear(self.k*self.out_channel, 16)
        self.linear2 = nn.Linear(16, 1)
Beispiel #6
0
    def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin', drop_path_p = 0.01 , net_linear = False, net_seed = 47, edge_p = 0.6):
        '''
            emb_dim (int): node embedding dimensionality
        '''

        super(RandomGNN_node_Virtualnode, self).__init__()
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.JK = JK
        ### add residual connection or not
        self.residual = residual

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ### set the initial virtual node embedding to 0.
        self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)
        torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)

        ###List of GNNs
        self.convs = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()
        self.drop_path_p = drop_path_p
        self.net_linear = net_linear
        self.net_args={'graph_model' : 'ER',
                       'P': edge_p,
                       'seed': net_seed,
                       'net_linear': self.net_linear}
        net_graph = build_graph(self.num_layer-1, self.net_args)
        self.stage = StageBlock(net_graph, emb_dim, self.net_linear, self.drop_ratio, self.drop_path_p, True)
Beispiel #7
0
    def __init__(self, num_layers, num_mlp_layers, hidden_dim, output_dim,
                 final_dropout, learn_eps, graph_pooling_type,
                 neighbor_pooling_type, norm_type):
        super(GIN, self).__init__()
        self.num_layers = num_layers
        self.learn_eps = learn_eps

        self.ginlayers = torch.nn.ModuleList()
        self.atom_encoder = AtomEncoder(hidden_dim)

        self.bond_layers = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):

            mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim * 2, hidden_dim,
                      norm_type)

            self.ginlayers.append(
                GINConv(ApplyNodeFunc(mlp, norm_type), neighbor_pooling_type,
                        0, self.learn_eps))
            self.bond_layers.append(BondEncoder(hidden_dim))

        self.linears_prediction = nn.Linear(hidden_dim, output_dim)

        self.drop = nn.Dropout(final_dropout)

        if graph_pooling_type == 'sum':
            self.pool = SumPooling()
        elif graph_pooling_type == 'mean':
            self.pool = AvgPooling()
        elif graph_pooling_type == 'max':
            self.pool = MaxPooling()
        else:
            raise NotImplementedError
Beispiel #8
0
    def __init__(self, gnn_type, num_tasks, num_layer=4, emb_dim=256,
                 dropout=0.0, batch_norm=True,
                 residual=True, graph_pooling="mean"):
        super().__init__()

        self.num_tasks = num_tasks
        self.num_layer = num_layer
        self.emb_dim = emb_dim
        self.dropout = dropout
        self.batch_norm = batch_norm
        self.residual = residual
        self.graph_pooling = graph_pooling

        self.atom_encoder = AtomEncoder(emb_dim)
        self.bond_encoder = BondEncoder(emb_dim)

        gnn_layer = {
            'Cheb_net': ChebLayer,
            'mlp': MLPLayer,
        }.get(gnn_type, ChebLayer)

        self.layers = nn.ModuleList([
            gnn_layer(emb_dim, emb_dim, dropout=dropout, batch_norm=batch_norm, residual=residual)
            for _ in range(num_layer)
        ])

        self.pooler = {
            "mean": dgl.mean_nodes,
            "sum": dgl.sum_nodes,
            "max": dgl.max_nodes,
        }.get(graph_pooling, dgl.mean_nodes)

        self.graph_pred_linear = MLPReadout(emb_dim, num_tasks)
Beispiel #9
0
    def __init__(
        self,
        batch_size=100,
        hidden=100,
        lr=0.001,
        layers=3,
        dropout=0.5,
        virtual_node=False,
        conv_radius=3,
        plus=False,
        appnp=False,
        out_dim=1,
    ):
        # assert conv_type in ["gcn", "gin", "gin+", "gine"]
        super().__init__()

        self.hidden = hidden
        self.lr = lr
        self.batch_size = batch_size

        self.k = conv_radius

        self.atomencoder = AtomEncoder(hidden)

        self.conv_type = "gin+" if plus else 'gine'
        convs = [
            gine_layer(
                hidden,
                dropout=dropout,
                virtual_node=virtual_node,
                k=min(i + 1, self.k),
                conv_type=self.conv_type,
                edge_embedding=BondEncoder(emb_dim=hidden),
            ) for i in range(layers - 1)
        ]
        convs.append(
            gine_layer(
                hidden,
                dropout=dropout,
                virtual_node=virtual_node,
                virtual_node_agg=
                False,  # on last layer, use but do not update virtual node
                last_layer=True,
                k=min(layers, self.k),
                conv_type=self.conv_type,
                edge_embedding=BondEncoder(emb_dim=hidden),
            ))
        self.convs = convs
        self.main = nn.Sequential(*convs)
        # self.main = nn.Sequential(OGBMolEmbedding(hidden, embed_edge=False, x_as_list=(conv_type == "gin+")), *convs)
        self.readout = nn.Linear(hidden, out_dim)

        self.virtual_node = virtual_node
        if self.virtual_node:
            self.v0 = nn.Parameter(torch.zeros(1, hidden), requires_grad=True)

        self.appnp = APPNP(0.8, 5) if appnp else None

        # Loss and metrics
        self.loss_fn = nn.BCEWithLogitsLoss()
Beispiel #10
0
    def __init__(self, num_layers, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
        '''
            emb_dim (int): node embedding dimensionality
            num_layers (int): number of GNN message passing layers
        '''

        super(GNN_node, self).__init__()
        self.num_layers = num_layers
        self.drop_ratio = drop_ratio
        self.JK = JK
        ### add residual connection or not
        self.residual = residual

        if self.num_layers < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ###List of GNNs
        self.convs = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(num_layers):
            if gnn_type == 'gin':
                self.convs.append(GINConv(emb_dim))
            elif gnn_type == 'gcn':
                self.convs.append(GCNConv(emb_dim))
            else:
                ValueError('Undefined GNN type called {}'.format(gnn_type))
                
            self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
Beispiel #11
0
    def __init__(self, hidden_channels, num_layers, num_heads, num_bases):
        super().__init__()
        if args.use_multi_aggregators:
            aggregators = ['sum', 'mean', 'max']
        else:
            aggregators = ['symnorm']

        self.encoder = AtomEncoder(hidden_channels)

        self.convs = torch.nn.ModuleList()
        self.norms = torch.nn.ModuleList()
        for _ in range(num_layers):
            self.convs.append(
                EGConv(hidden_channels, hidden_channels, aggregators,
                       num_heads, num_bases))
            self.norms.append(BatchNorm1d(hidden_channels))

        self.mlp = Sequential(
            Linear(hidden_channels, hidden_channels // 2, bias=False),
            BatchNorm1d(hidden_channels // 2),
            ReLU(inplace=True),
            Linear(hidden_channels // 2, hidden_channels // 4, bias=False),
            BatchNorm1d(hidden_channels // 4),
            ReLU(inplace=True),
            Linear(hidden_channels // 4, 1),
        )
Beispiel #12
0
    def __init__(self,
                 num_iter,
                 num_layer,
                 emb_dim,
                 drop_ratio=0.5,
                 JK="last",
                 alpha=0.1,
                 **kwargs):
        '''
            emb_dim (int): node embedding dimensionality
            num_layer (int): number of GNN message passing layers
        '''

        super(APPNP_node, self).__init__()
        self.num_iter = num_iter
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.JK = JK

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ###List of GNNs
        self.mlp = torch.nn.ModuleList()

        for l in range(num_layer):
            self.mlp.append(torch.nn.Linear(emb_dim, emb_dim))

        self.appnp = APPNP(num_iter,
                           emb_dim,
                           alpha,
                           dropout=drop_ratio,
                           normalize=True)
Beispiel #13
0
    def __init__(
        self,
        num_layer,
        emb_dim,
        drop_ratio=0.5,
        JK="last",
        residual=False,
        gnn_type="gin",
    ):
        """
            emb_dim (int): node embedding dimensionality
        """

        super(GNN_node_Virtualnode, self).__init__()
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.JK = JK
        ### add residual connection or not
        self.residual = residual

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ### set the initial virtual node embedding to 0.
        self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)
        torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)

        ### List of GNNs
        self.convs = torch.nn.ModuleList()
        ### batch norms applied to node embeddings
        self.batch_norms = torch.nn.ModuleList()

        ### List of MLPs to transform virtual node at every layer
        self.mlp_virtualnode_list = torch.nn.ModuleList()

        for layer in range(num_layer):
            if gnn_type == "gin":
                self.convs.append(GINConv(emb_dim))
            elif gnn_type == "gcn":
                self.convs.append(GCNConv(emb_dim))
            else:
                raise ValueError("Undefined GNN type called {}".format(gnn_type))

            self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))

        for layer in range(num_layer - 1):
            self.mlp_virtualnode_list.append(
                torch.nn.Sequential(
                    torch.nn.Linear(emb_dim, 2 * emb_dim),
                    torch.nn.BatchNorm1d(2 * emb_dim),
                    torch.nn.ReLU(),
                    torch.nn.Linear(2 * emb_dim, emb_dim),
                    torch.nn.BatchNorm1d(emb_dim),
                    torch.nn.ReLU(),
                )
            )
Beispiel #14
0
    def __init__(self, args):

        super(GraphMultisetTransformer_for_OGB, self).__init__(args)

        self.atom_encoder = AtomEncoder(self.nhid)
        self.convs = self.get_convs()

        if self.skip_op is not None:
            self.proj = nn.Linear(self.nhid * self.args.num_convs, self.nhid)
Beispiel #15
0
    def __init__(self,
                 data_info: dict,
                 embed_size: int = 300,
                 num_layers: int = 5,
                 dropout: float = 0.5,
                 virtual_node: bool = False):
        """Graph Isomorphism Network (GIN) variant introduced in baselines
        for OGB graph property prediction datasets

        Parameters
        ----------
        data_info : dict
            The information about the input dataset.
        embed_size : int
            Embedding size.
        num_layers : int
            Number of layers.
        dropout : float
            Dropout rate.
        virtual_node : bool
            Whether to use virtual node.
        """
        super(OGBGGIN, self).__init__()
        self.data_info = data_info
        self.embed_size = embed_size
        self.num_layers = num_layers
        self.virtual_node = virtual_node

        if data_info['name'] in ['ogbg-molhiv', 'ogbg-molpcba']:
            self.node_encoder = AtomEncoder(embed_size)
            self.edge_encoders = nn.ModuleList(
                [BondEncoder(embed_size) for _ in range(num_layers)])
        else:
            # Handle other datasets
            self.node_encoder = nn.Linear(data_info['node_feat_size'],
                                          embed_size)
            self.edge_encoders = nn.ModuleList([
                nn.Linear(data_info['edge_feat_size'], embed_size)
                for _ in range(num_layers)
            ])

        self.conv_layers = nn.ModuleList(
            [GINEConv(MLP(embed_size)) for _ in range(num_layers)])

        self.dropout = nn.Dropout(dropout)
        self.pool = AvgPooling()
        self.pred = nn.Linear(embed_size, data_info['out_size'])

        if virtual_node:
            self.virtual_emb = nn.Embedding(1, embed_size)
            nn.init.constant_(self.virtual_emb.weight.data, 0)
            self.mlp_virtual = nn.ModuleList()
            for _ in range(num_layers - 1):
                self.mlp_virtual.append(MLP(embed_size))
            self.virtual_pool = SumPooling()
Beispiel #16
0
    def __init__(self,
                 dataset,
                 node_feat_dim,
                 edge_feat_dim,
                 hid_dim,
                 out_dim,
                 num_layers,
                 dropout=0.,
                 norm='batch',
                 pooling='mean',
                 beta=1.0,
                 learn_beta=False,
                 aggr='softmax',
                 mlp_layers=1):
        super(DeeperGCN, self).__init__()
        
        self.dataset = dataset
        self.num_layers = num_layers
        self.dropout = dropout
        self.gcns = nn.ModuleList()
        self.norms = nn.ModuleList()

        for i in range(self.num_layers):
            conv = GENConv(dataset=dataset,
                           in_dim=hid_dim,
                           out_dim=hid_dim,
                           aggregator=aggr,
                           beta=beta,
                           learn_beta=learn_beta,
                           mlp_layers=mlp_layers,
                           norm=norm)
            
            self.gcns.append(conv)
            self.norms.append(norm_layer(norm, hid_dim))

        if self.dataset == 'ogbg-molhiv':
            self.node_encoder = AtomEncoder(hid_dim)
        elif self.dataset == 'ogbg-ppa':
            self.node_encoder = nn.Linear(node_feat_dim, hid_dim)
            self.edge_encoder = nn.Linear(edge_feat_dim, hid_dim)
        else:
            raise ValueError(f'Dataset {dataset} is not supported.')

        if pooling == 'sum':
            self.pooling = SumPooling()
        elif pooling == 'mean':
            self.pooling = AvgPooling()
        elif pooling == 'max':
            self.pooling = MaxPooling()
        else:
            raise NotImplementedError(f'{pooling} is not supported.')
        
        self.output = nn.Linear(hid_dim, out_dim)
Beispiel #17
0
    def __init__(self, net_params):
        super().__init__()
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.type_net = net_params['type_net']
        self.pos_enc_dim = net_params['pos_enc_dim']
        if self.pos_enc_dim > 0:
            self.embedding_pos_enc = nn.Linear(self.pos_enc_dim, hidden_dim)
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.residual = net_params['residual']
        self.JK = net_params['JK']
        self.edge_feat = net_params['edge_feat']
        edge_dim = net_params['edge_dim']
        pretrans_layers = net_params['pretrans_layers']
        posttrans_layers = net_params['posttrans_layers']
        self.gru_enable = net_params['gru']
        device = net_params['device']
        self.device = device

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.embedding_h = AtomEncoder(emb_dim=hidden_dim)

        if self.edge_feat:
            self.embedding_e = BondEncoder(emb_dim=edge_dim)

        self.layers = nn.ModuleList([EIGLayer(in_dim=hidden_dim, out_dim=hidden_dim, dropout=dropout, graph_norm=self.graph_norm,
                      batch_norm=self.batch_norm, residual=self.residual, aggregators=self.aggregators,
                      scalers=self.scalers, avg_d=self.avg_d, type_net=self.type_net, edge_features=self.edge_feat,
                      edge_dim=edge_dim, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers).model for _
             in range(n_layers - 1)])
        self.layers.append(EIGLayer(in_dim=hidden_dim, out_dim=out_dim, dropout=dropout,
                                    graph_norm=self.graph_norm, batch_norm=self.batch_norm,
                                    residual=self.residual, aggregators=self.aggregators, scalers=self.scalers,
                                    avg_d=self.avg_d, type_net=self.type_net, edge_features=self.edge_feat,
                                    edge_dim=edge_dim,
                                    pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers).model)
        if self.gru_enable:
            self.gru = GRU(hidden_dim, hidden_dim, device)

        self.MLP_layer = MLPReadout(out_dim, 1)  # 1 out dim since regression problem
Beispiel #18
0
    def __init__(self, num_layers, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
        '''
            num_layers (int): number of GNN message passing layers
            emb_dim (int): node embedding dimensionality
        '''

        super(GNN_node_Virtualnode, self).__init__()
        self.num_layers = num_layers
        self.drop_ratio = drop_ratio
        self.JK = JK
        ### add residual connection or not
        self.residual = residual

        if self.num_layers < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ### set the initial virtual node embedding to 0.
        self.virtualnode_embedding = nn.Embedding(1, emb_dim)
        nn.init.constant_(self.virtualnode_embedding.weight.data, 0)

        ### List of GNNs
        self.convs = nn.ModuleList()
        ### batch norms applied to node embeddings
        self.batch_norms = nn.ModuleList()

        ### List of MLPs to transform virtual node at every layer
        self.mlp_virtualnode_list = nn.ModuleList()

        for layer in range(num_layers):
            if gnn_type == 'gin':
                self.convs.append(GINConv(emb_dim))
            elif gnn_type == 'gcn':
                self.convs.append(GCNConv(emb_dim))
            else:
                ValueError('Undefined GNN type called {}'.format(gnn_type))

            self.batch_norms.append(nn.BatchNorm1d(emb_dim))

        for layer in range(num_layers - 1):
            self.mlp_virtualnode_list.append(nn.Sequential(nn.Linear(emb_dim, emb_dim),
                                                           nn.BatchNorm1d(emb_dim),
                                                           nn.ReLU(),
                                                           nn.Linear(emb_dim, emb_dim),
                                                           nn.BatchNorm1d(emb_dim),
                                                           nn.ReLU()))
        self.pool = SumPooling()
Beispiel #19
0
    def __init__(self, num_layer=5, emb_dim=100, num_task=2):
        super(GIN, self).__init__()

        self.num_layer = num_layer

        self.gins = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()
        for layer in range(self.num_layer):
            self.gins.append(GINConv(emb_dim))
            self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))

        ### convenient module to encode/embed raw molecule node/edge features. (TODO) make it more efficient.
        self.atom_encoder = AtomEncoder(emb_dim)
        self.bond_encoder = BondEncoder(emb_dim)

        self.graph_pred_linear = torch.nn.Linear(emb_dim, num_task)
Beispiel #20
0
    def __init__(self, num_layers, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
        '''
            emb_dim (int): node embedding dimensionality
        '''

        super(Bayesian_GNN_node_Virtualnode, self).__init__()
        self.num_layers = num_layers
        self.drop_ratio = drop_ratio
        self.JK = JK
        ### add residual connection or not
        self.residual = residual

        if self.num_layers < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.atom_encoder = AtomEncoder(emb_dim)

        ### set the initial virtual node embedding to 0.
        self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)
        torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)

        ### List of GNNs
        self.convs = torch.nn.ModuleList()
        ### batch norms applied to node embeddings
        self.batch_norms = torch.nn.ModuleList()

        ### List of MLPs to transform virtual node at every layer
        self.mlp_virtualnode_list = torch.nn.ModuleList()

        for layer in range(num_layers):
            if gnn_type == 'gin':
                self.convs.append(BayesianGINConv(emb_dim))
            elif gnn_type == 'gcn':
                raise Exception("not implemented yet")
            else:
                ValueError('Undefined GNN type called {}'.format(gnn_type))

            self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))

        for layer in range(num_layers - 1):
            self.mlp_virtualnode_list.append(torch.nn.Sequential(
                bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=emb_dim, out_features=emb_dim), 
                torch.nn.BatchNorm1d(emb_dim), 
                torch.nn.ReLU(),
                bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=emb_dim, out_features=emb_dim), 
                torch.nn.BatchNorm1d(emb_dim), 
                torch.nn.ReLU()))
Beispiel #21
0
    def __init__(self,
                 architecture: str = "GCN",
                 num_node_features: int = 300,
                 activation: str = "prelu",
                 num_conv_layers: int = 3,
                 conv_size: int = 256,
                 pool_method: str = "add",
                 lin1_size: int = 128,
                 lin2_size: int = 64,
                 output_size: int = 128,
                 lr: float = 0.001,
                 weight_decay: float = 0,
                 **kwargs):
        super().__init__()

        # this line ensures params passed to LightningModule will be saved to ckpt
        # it also allows to access params with 'self.hparams' attribute
        self.save_hyperparameters(logger=False)

        # init node embedding layer
        self.atom_encoder = AtomEncoder(emb_dim=self.hparams.num_node_features)
        # self.bond_encoder = BondEncoder(emb_dim=self.hparams.edge_emb_size)

        # init network architecture
        if self.hparams.architecture == "GCN":
            self.model = gcn.GCN(hparams=self.hparams)
        elif self.hparams.architecture == "GAT":
            self.model = gat.GAT(hparams=self.hparams)
        elif self.hparams.architecture == "GraphSAGE":
            self.model = graph_sage.GraphSAGE(hparams=self.hparams)
        elif self.hparams.architecture == "GIN":
            self.model = gin.GIN(hparams=self.hparams)
        else:
            raise Exception("Incorrect architecture name!")

        # loss function
        self.criterion = torch.nn.BCEWithLogitsLoss()

        # metric
        self.evaluator = Evaluator(name="ogbg-molpcba")

        self.metric_hist = {
            "train/ap": [],
            "val/ap": [],
            "train/loss": [],
            "val/loss": [],
        }
Beispiel #22
0
    def __init__(self,
                 in_channels,
                 number_hidden_layers,
                 aggr,
                 hidden_out_channel,
                 out_channel,
                 pool_layer,
                 k=1,
                 device=None):
        super(InceptionNet, self).__init__()
        self.pool_layer = pool_layer  # 'add', 'max', 'mean' or 'sort'
        self.device = device
        self.k = k
        self.atom_encoder = AtomEncoder(emb_dim=in_channels)
        self.batchnorm = BatchNorm(in_channels=2 * hidden_out_channel)

        self.rgcn_list = torch.nn.ModuleList()
        self.graphconv_list = torch.nn.ModuleList()
        self.rgcn_list.append(
            FastRGCNConv(in_channels=in_channels,
                         out_channels=hidden_out_channel,
                         num_relations=NUM_RELATIONS))
        self.graphconv_list.append(
            GraphConv(in_channels=in_channels,
                      out_channels=hidden_out_channel))

        if number_hidden_layers != 0:
            for i in range(number_hidden_layers):
                self.rgcn_list.append(
                    FastRGCNConv(in_channels=2 * hidden_out_channel,
                                 out_channels=hidden_out_channel,
                                 num_relations=NUM_RELATIONS))
                self.graphconv_list.append(
                    GraphConv(in_channels=2 * hidden_out_channel,
                              out_channels=hidden_out_channel))

        self.rgcn_list.append(
            FastRGCNConv(in_channels=2 * hidden_out_channel,
                         out_channels=out_channel,
                         num_relations=NUM_RELATIONS))
        self.graphconv_list.append(
            GraphConv(in_channels=2 * hidden_out_channel,
                      out_channels=out_channel))

        self.linear1 = nn.Linear(2 * k * out_channel, 16)
        self.linear2 = nn.Linear(16, 1)
Beispiel #23
0
    def __init__(self, num_features, num_classes, max_num_nodes, num_layers, gnn_hidden_dim,
                 gnn_output_dim, mlp_hidden_dim, pooling_type, invariant, encode_edge, pre_sum_aggr=False):
        super().__init__()

        self.encode_edge = encode_edge
        self.pre_sum_aggr = pre_sum_aggr
        self.max_num_nodes = max_num_nodes
        self.pooling_type = pooling_type
        self.num_diffpool_layers = num_layers

        # Reproduce paper choice about coarse factor
        coarse_factor = 0.1 if num_layers == 1 else 0.25

        gnn_dim_input = num_features
        if encode_edge:
            gnn_dim_input = gnn_hidden_dim
            self.conv1 = GCNConv(gnn_hidden_dim, aggr='add')

        if self.pre_sum_aggr:
            self.conv1 = DenseGraphConv(gnn_dim_input, gnn_dim_input)

        no_new_clusters = ceil(coarse_factor * self.max_num_nodes)
        gnn_embed_dim_output = (NUM_SAGE_LAYERS - 1) * gnn_hidden_dim + gnn_output_dim

        layers = []
        current_num_clusters = self.max_num_nodes
        for i in range(num_layers):

            diffpool_layer = DiffPoolLayer(gnn_dim_input, gnn_hidden_dim, gnn_output_dim, current_num_clusters,
                                           no_new_clusters, pooling_type, invariant)
            layers.append(diffpool_layer)

            # Update embedding sizes
            gnn_dim_input = gnn_embed_dim_output
            current_num_clusters = no_new_clusters
            no_new_clusters = ceil(no_new_clusters * coarse_factor)

        self.diffpool_layers = nn.ModuleList(layers)

        # After DiffPool layers, apply again layers of GraphSAGE convolutions
        self.final_embed = SAGEConvolutions(gnn_embed_dim_output, gnn_hidden_dim, gnn_output_dim, lin=False)
        final_embed_dim_output = gnn_embed_dim_output * (num_layers + 1)

        self.lin1 = nn.Linear(final_embed_dim_output, mlp_hidden_dim)
        self.lin2 = nn.Linear(mlp_hidden_dim, num_classes)
        self.atom_encoder = AtomEncoder(emb_dim=gnn_hidden_dim)
    def __init__(self, net_params):
        super().__init__()
        hidden_dim = net_params['hidden_dim']
        num_heads = net_params['n_heads']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.layer_norm = net_params['layer_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.device = net_params['device']
        self.lap_pos_enc = net_params['lap_pos_enc']
        self.wl_pos_enc = net_params['wl_pos_enc']
        max_wl_role_index = 37  # this is maximum graph size in the dataset

        if self.lap_pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_lap_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
        if self.wl_pos_enc:
            self.embedding_wl_pos_enc = nn.Embedding(max_wl_role_index,
                                                     hidden_dim)

        self.embedding_h = AtomEncoder(emb_dim=hidden_dim)

        if self.edge_feat:
            self.embedding_e = BondEncoder(emb_dim=hidden_dim)
        else:
            self.embedding_e = nn.Linear(1, hidden_dim)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphTransformerLayer(hidden_dim, hidden_dim, num_heads, dropout,
                                  self.layer_norm, self.batch_norm,
                                  self.residual) for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GraphTransformerLayer(hidden_dim, out_dim, num_heads, dropout,
                                  self.layer_norm, self.batch_norm,
                                  self.residual))
        self.MLP_layer = MLPReadout(
            out_dim, 128)  # 128 out dim since regression problem
Beispiel #25
0
    def __init__(self, net_params):
        super().__init__()
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.residual = net_params['residual']
        posttrans_layers = net_params['posttrans_layers']
        device = net_params['device']
        self.device = device

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        self.embedding_h = AtomEncoder(emb_dim=hidden_dim)

        self.layers = nn.ModuleList([
            PNASimpleLayer(in_dim=hidden_dim,
                           out_dim=hidden_dim,
                           dropout=dropout,
                           batch_norm=self.batch_norm,
                           residual=self.residual,
                           aggregators=self.aggregators,
                           scalers=self.scalers,
                           avg_d=self.avg_d,
                           posttrans_layers=posttrans_layers)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            PNASimpleLayer(in_dim=hidden_dim,
                           out_dim=out_dim,
                           dropout=dropout,
                           batch_norm=self.batch_norm,
                           residual=self.residual,
                           aggregators=self.aggregators,
                           scalers=self.scalers,
                           avg_d=self.avg_d,
                           posttrans_layers=posttrans_layers))

        self.MLP_layer = MLPReadout(out_dim,
                                    1)  # 1 out dim since regression problem
Beispiel #26
0
    def __init__(self, emb_dim, num_classes, num_layers, dropout):
        super(GCN, self).__init__()

        self.atom_encoder = AtomEncoder(emb_dim)
        self.layers = nn.ModuleList()
        self.bns = nn.ModuleList()
        # input layer
        self.layers.append(GCNConv(emb_dim, emb_dim))
        self.bns.append(nn.BatchNorm1d(emb_dim))
        # hidden layers
        for _ in range(num_layers - 2):
            self.layers.append(GCNConv(emb_dim, emb_dim))
            self.bns.append(nn.BatchNorm1d(emb_dim))
        # output layer
        self.layers.append(GCNConv(emb_dim, emb_dim))
        self.dropout = nn.Dropout(p=dropout)
        self.readout = dgl.nn.AvgPooling()
        self.graph_pred_fc = nn.Linear(emb_dim, num_classes, bias=False)
Beispiel #27
0
    def __init__(self, config, num_tasks):
        super(Net, self).__init__()
        self.atom_encoder = AtomEncoder(config.hidden)

        self.convs = torch.nn.ModuleList()
        if config.nonlinear_conv == 'GIN':
            for i in range(config.layers):
                self.convs.append(GinConv(config.hidden, config.variants))
        elif config.nonlinear_conv[:2] == 'EB':
            for i in range(config.layers):
                self.convs.append(
                    ExpandingBConv(config.hidden,
                                   int(config.nonlinear_conv[2:]),
                                   config.variants))
        elif config.nonlinear_conv[:2] == 'EA':
            for i in range(config.layers):
                self.convs.append(
                    ExpandingAConv(config.hidden,
                                   int(config.nonlinear_conv[2:]),
                                   config.variants))
        elif config.nonlinear_conv == 'CB':
            for i in range(config.layers):
                self.convs.append(CombBConv(config.hidden, config.variants))
        elif config.nonlinear_conv == 'CA':
            for i in range(config.layers):
                self.convs.append(CombAConv(config.hidden, config.variants))
        else:
            ValueError('Undefined conv called {}'.format(
                config.nonlinear_conv))

        self.JK = JumpingKnowledge(config.JK)

        if config.JK == 'cat':
            self.graph_pred_linear = torch.nn.Linear(
                config.hidden * config.layers, num_tasks)
        else:
            self.graph_pred_linear = torch.nn.Linear(config.hidden, num_tasks)

        if config.pooling == 'add':
            self.pool = global_add_pool
        elif config.pooling == 'mean':
            self.pool = global_mean_pool

        self.dropout = config.dropout
    def __init__(self,
                 num_features,
                 num_classes,
                 max_num_nodes,
                 hidden,
                 pooling_type,
                 num_layers,
                 encode_edge=False):
        super(MincutPool, self).__init__()
        self.encode_edge = encode_edge

        self.atom_encoder = AtomEncoder(emb_dim=hidden)

        self.pooling_type = pooling_type
        self.convs = nn.ModuleList()
        self.pools = nn.ModuleList()
        self.num_layers = num_layers

        for i in range(num_layers):
            if i == 0:
                if encode_edge:
                    self.convs.append(GCNConv(hidden, aggr='add'))
                else:
                    self.convs.append(
                        GraphConv(num_features, hidden, aggr='add'))
            else:
                self.convs.append(DenseGraphConv(hidden, hidden))

        self.rms = []
        num_nodes = max_num_nodes
        for i in range(num_layers - 1):
            num_nodes = ceil(0.5 * num_nodes)
            if pooling_type == 'mlp':
                self.pools.append(Linear(hidden, num_nodes))
            else:
                self.rms.append(
                    fetch_assign_matrix('uniform', ceil(2 * num_nodes),
                                        num_nodes))

        self.lin1 = Linear(hidden, hidden)
        self.lin2 = Linear(hidden, num_classes)
Beispiel #29
0
    def __init__(self):
        super(Net, self).__init__()

        self.node_emb = AtomEncoder(emb_dim=70)

        aggregators = ['mean', 'min', 'max', 'std']
        scalers = ['identity', 'amplification', 'attenuation']

        self.convs = ModuleList()
        self.batch_norms = ModuleList()
        for _ in range(4):
            conv = PNAConvSimple(in_channels=70,
                                 out_channels=70,
                                 aggregators=aggregators,
                                 scalers=scalers,
                                 deg=deg,
                                 post_layers=1)
            self.convs.append(conv)
            self.batch_norms.append(BatchNorm(70))

        self.mlp = Sequential(Linear(70, 35), ReLU(), Linear(35, 17), ReLU(),
                              Linear(17, 1))
Beispiel #30
0
    def __init__(self,
                 num_timesteps=4,
                 emb_dim=300,
                 num_layers=5,
                 drop_ratio=0,
                 num_tasks=1,
                 **args):
        super(AttentiveFP, self).__init__()

        self.num_layers = num_layers
        self.num_timesteps = num_timesteps
        self.drop_ratio = drop_ratio

        self.atom_encoder = AtomEncoder(emb_dim)
        self.bond_encoder = BondEncoder(emb_dim=emb_dim)

        conv = GATEConv(emb_dim, emb_dim, emb_dim, drop_ratio)
        gru = GRUCell(emb_dim, emb_dim)
        self.atom_convs = torch.nn.ModuleList([conv])
        self.atom_grus = torch.nn.ModuleList([gru])
        for _ in range(num_layers - 1):
            conv = GATConv(emb_dim,
                           emb_dim,
                           dropout=drop_ratio,
                           add_self_loops=False,
                           negative_slope=0.01)
            self.atom_convs.append(conv)
            self.atom_grus.append(GRUCell(emb_dim, emb_dim))

        self.mol_conv = GATConv(emb_dim,
                                emb_dim,
                                dropout=drop_ratio,
                                add_self_loops=False,
                                negative_slope=0.01)
        self.mol_gru = GRUCell(emb_dim, emb_dim)

        self.graph_pred_linear = Linear(emb_dim, num_tasks)

        self.reset_parameters()