コード例 #1
0
    def __init__(self, num_node_features: int, num_edge_features: int,
                 node_hidden_channels: int, edge_hidden_channels: int,
                 GATv2Conv_hidden_size: int, num_heads: int, num_classes: int):
        super(GCN, self).__init__()

        self.node_encoder = nn.Linear(num_node_features, node_hidden_channels)
        self.edge_encoder = nn.Linear(num_edge_features, edge_hidden_channels)

        # NNConv's nn out dim is in_channels*node_hidden_channels
        self.conv1 = g_nn.NNConv(
            in_channels=node_hidden_channels,
            out_channels=node_hidden_channels,
            nn=nn.Linear(edge_hidden_channels,
                         node_hidden_channels * node_hidden_channels))

        self.gatv2conv = g_nn.GATv2Conv(in_channels=node_hidden_channels,
                                        out_channels=GATv2Conv_hidden_size,
                                        heads=num_heads)
        gatv2conv_out_dim = GATv2Conv_hidden_size * num_heads

        self.conv2 = g_nn.NNConv(in_channels=gatv2conv_out_dim,
                                 out_channels=node_hidden_channels,
                                 nn=nn.Linear(
                                     edge_hidden_channels,
                                     gatv2conv_out_dim * node_hidden_channels))
        self.linear = nn.Linear(node_hidden_channels, num_classes)
コード例 #2
0
    def __init__(self, aggr="mean", **kwargs):
        super().__init__(**kwargs)

        nn1 = nn.Sequential(
            nn.Linear(self.ch_edge_in, 32),
            nn.ReLU(),
            nn.Linear(32, self.ch_feat_in * 32),
        )
        self.conv1 = gnn.NNConv(self.ch_feat_in, 32, nn1, aggr=aggr)
        nn2 = nn.Sequential(nn.Linear(self.ch_edge_in, 32), nn.ReLU(),
                            nn.Linear(32, 32 * 16))
        self.conv2 = gnn.NNConv(32, 16, nn2, aggr=aggr)
コード例 #3
0
ファイル: model.py プロジェクト: EliHei2/scPotter
    def __init__(self, \
        n_features, \
        n_classes, \
        n_hidden_GNN=[10], \
        n_hidden_FC=[], \
        dropout_GNN=0, \
        dropout_FC=0):
        super(NNConvNet, self).__init__(\
            n_features, n_classes, n_hidden_GNN,\
            n_hidden_FC, dropout_FC, dropout_GNN)

        self.layers_GNN.append(pyg_nn.NNConv(1, n_hidden_GNN[0]))
        if self.n_layers_GNN > 1:
            for i in range(self.n_layers_GNN - 1):
                self.layers_GNN.append(
                    pyg_nn.NNConv(n_hidden_GNN[i], n_hidden_GNN[(i + 1)]))
コード例 #4
0
    def __init__(self,
                 node_attr_dim: int,
                 edge_attr_dim: int,
                 state_dim: int = 64,
                 num_conv: int = 3,
                 out_dim: int = 1,
                 attention_pooling: bool = False):

        super(MPNN, self).__init__()

        self.__in_linear = nn.Sequential(nn.Linear(node_attr_dim, state_dim),
                                         nn.ReLU())

        self.__num_conv = num_conv
        self.__nn_conv_linear = nn.Sequential(
            nn.Linear(edge_attr_dim, state_dim), nn.ReLU(),
            nn.Linear(state_dim, state_dim * state_dim))
        self.__nn_conv = pyg_nn.NNConv(state_dim,
                                       state_dim,
                                       self.__nn_conv_linear,
                                       aggr='mean',
                                       root_weight=False)
        self.__gru = nn.GRU(state_dim, state_dim)

        # self.__set2set = pyg_nn.Set2Set(state_dim, processing_steps=3)
        if attention_pooling:
            self.__pooling = pyg_nn.GlobalAttention(
                nn.Linear(state_dim, 1), nn.Linear(state_dim, 2 * state_dim))
        else:
            # Setting the num_layers > 1 will take significantly more time
            self.__pooling = pyg_nn.Set2Set(state_dim, processing_steps=3)

        self.__out_linear = nn.Sequential(
            nn.Linear(2 * state_dim, 2 * state_dim), nn.ReLU(),
            nn.Linear(2 * state_dim, out_dim))
コード例 #5
0
 def __init__(self,
              hidden_dim: int,
              edge_dim: int,
              node_dim: int,
              message_passing_steps: int = 6):
     super().__init__()
     self.mpnn_iters = message_passing_steps
     self.fc = torch.nn.Linear(node_dim, hidden_dim)
     func_ag = nn.Sequential(nn.Linear(edge_dim, hidden_dim),
                             nn.ReLU(inplace=False),
                             nn.Linear(hidden_dim, hidden_dim * hidden_dim))
     self.conv = gnn.NNConv(hidden_dim, hidden_dim, func_ag, aggr='mean')
     self.gru = nn.GRU(hidden_dim, hidden_dim)
コード例 #6
0
ファイル: main.py プロジェクト: chi0tzp/GCNNveMP
def main():
    batch_size = 16
    num_nodes = 4
    num_in_node_features = 16
    num_out_node_features = 64
    num_in_edge_features = 4
    num_out_edge_features = 8

    # Define batch of example graph
    edge_index = torch.tensor(
        [[0, 1, 2, 0, 3, 2, 3, 0], [1, 0, 0, 2, 2, 3, 0, 3]], dtype=torch.long)

    # Node features
    batch_x = torch.randn((batch_size, num_nodes, num_in_node_features),
                          dtype=torch.float)

    # Edge features -- batch_edge_features has shape: torch.Size([4, 42, 8])
    batch_edge_attr = torch.randn(
        (batch_size, edge_index.size(1), num_in_edge_features),
        dtype=torch.float)

    # Wrap input node and edge features, along with the single edge_index, into a `torch_geometric.data.Batch` instance
    l = []
    for i in range(batch_size):
        l.append(
            gData(x=batch_x[i],
                  edge_index=edge_index,
                  edge_attr=batch_edge_attr[i]))
    batch = gBatch.from_data_list(l)

    # Thus,
    # batch.x          -- shape: torch.Size([28, 16])
    # batch.edge_index -- shape: torch.Size([2, 168])
    # batch.edge_attr  -- shape: torch.Size([168, 8])

    # Define NNConv layer
    nn = tnn.Sequential(
        tnn.Linear(num_in_edge_features, 25), tnn.ReLU(),
        tnn.Linear(25, num_in_node_features * num_out_node_features))
    gconv = gnn.NNConv(in_channels=num_in_node_features,
                       out_channels=num_out_node_features,
                       nn=nn,
                       aggr='mean')

    # Forward pass
    y = gconv(x=batch.x,
              edge_index=batch.edge_index,
              edge_attr=batch.edge_attr)
コード例 #7
0
    def __init__(self,
                 node_feature_size,
                 edge_feature_size,
                 node_hidden_size,
                 edge_hidden_size,
                 dropout_ratio=0.5,
                 steps=6):
        super(MoleculeMPNN, self).__init__()
        self.node_feature_size = node_feature_size
        self.edge_feature_size = edge_feature_size
        self.node_hidden_size = node_hidden_size
        self.edge_feature_size = edge_hidden_size
        self.dropout_ratio = dropout_ratio

        self.embedder = nn.Sequential(
            LinearBlock(node_feature_size, 64, self.dropout_ratio, True,
                        nn.ReLU()),
            LinearBlock(64, self.node_hidden_size, self.dropout_ratio, False),
        )

        self.steps = steps

        self.edge_net = nn.Sequential(
            LinearBlock(edge_feature_size, 32, self.dropout_ratio, True,
                        nn.ReLU()),
            LinearBlock(32, 64, self.dropout_ratio, True, nn.ReLU()),
            LinearBlock(64, self.edge_feature_size, self.dropout_ratio, True,
                        nn.ReLU()),
            LinearBlock(self.edge_feature_size,
                        self.node_hidden_size * self.node_hidden_size,
                        self.dropout_ratio, True))

        self.mpnn = gnn.NNConv(self.node_hidden_size,
                               self.node_hidden_size,
                               self.edge_net,
                               aggr="mean",
                               root_weight=True)

        self.gru = nn.GRUCell(self.node_hidden_size, self.node_hidden_size)

        self.set2set = gnn.Set2Set(self.node_hidden_size, self.steps)

        self.fc = nn.Sequential(
            LinearBlock(self.node_hidden_size * 4 + 8, 1024,
                        self.dropout_ratio, True, nn.ReLU()),
            LinearBlock(1024, 8))
コード例 #8
0
ファイル: models.py プロジェクト: ryosa0915/molan
    def __init__(self, hparams, node_dim=None, edge_dim=None):
        super(MPNN, self).__init__()

        self.node_dim = node_dim
        self.edge_dim = edge_dim
        self.hparams = hparams
        self.output_dim = 1

        # Linear atom embedding
        atom_dim = hparams['atom_dim']
        self.linatoms = torch.nn.Linear(self.node_dim, atom_dim)

        # MPNN part
        conv_dim = atom_dim * 2
        nnet = nn.Sequential(*[
            nn.Linear(self.edge_dim, conv_dim),
            str2act(hparams['conv_act']),
            nn.Linear(conv_dim, atom_dim * atom_dim)
        ])
        self.conv = gnn.NNConv(atom_dim,
                               atom_dim,
                               nnet,
                               aggr=hparams['conv_aggr'],
                               root_weight=False)
        self.gru = nn.GRU(atom_dim, atom_dim)

        # Graph embedding
        self.set2set = gnn.Set2Set(atom_dim,
                                   processing_steps=hparams['emb_steps'])

        # Build mlp
        self.using_mlp = hparams['mlp_layers'] > 0
        if self.using_mlp:
            self.mlp, last_dim = make_mlp(atom_dim * 2, hparams['mlp_layers'],
                                          hparams['mlp_dim_ratio'],
                                          hparams['mlp_act'],
                                          hparams['mlp_batchnorm'],
                                          hparams['mlp_dropout'])
        else:
            last_dim = atom_dim * 2

        # Prediction
        self.pred = nn.Linear(last_dim, self.output_dim)
コード例 #9
0
    def __init__(self, node_dim=13, edge_dim=5, num_target=8):
        super(Net, self).__init__()

        self.num_message_passing = 6
        node_hidden_dim = 128
        edge_hidden_dim = 128

        self.preprocess = nn.Sequential(
            LinearBn(node_dim, 64),
            nn.ReLU(),
            LinearBn(64, node_hidden_dim),
        )
        edge_net = nn.Sequential(
            LinearBn(edge_dim, 32),
            nn.ReLU(),  #Swish(),#nn.ReLU(), LeakyReLU
            LinearBn(32, 64),
            nn.ReLU(),  #Swish(),#nn.ReLU(),
            LinearBn(64, edge_hidden_dim),
            nn.ReLU(),  #Swish(),#nn.ReLU(),
            LinearBn(edge_hidden_dim, node_hidden_dim * node_hidden_dim
                     )  # edge_hidden_dim,  node_hidden_dim *node_hidden_dim
        )

        self.conv = gnn.NNConv(
            node_hidden_dim,
            node_hidden_dim,
            edge_net,
            aggr='mean',
            root_weight=True)  #node_hidden_dim, node_hidden_dim
        self.gru = nn.GRU(node_hidden_dim, node_hidden_dim)

        self.set2set = gnn.Set2Set(node_hidden_dim,
                                   processing_steps=6)  # node_hidden_dim

        #predict coupling constant
        self.predict = nn.Sequential(
            LinearBn(4 * node_hidden_dim, 512),  #node_hidden_dim
            nn.ReLU(),
            nn.Linear(512, num_target),
        )
コード例 #10
0
ファイル: model.py プロジェクト: EgorBu/competitions
    def __init__(self,
                 node_dim=13,
                 edge_dim=5,
                 num_target=8,
                 node_hidden_dim=128,
                 edge_hidden_dim=128,
                 num_message_passing=6,
                 prep_hid_size=64):
        super(ChampsNet, self).__init__()

        self.num_message_passing = num_message_passing

        self.preprocess = nn.Sequential(
            LinearBn(node_dim, node_hidden_dim, act=nn.ReLU()))

        edge_net = nn.Sequential(
            LinearBn(edge_dim, edge_hidden_dim, act=nn.ReLU()),
            LinearBn(edge_hidden_dim, node_hidden_dim * node_hidden_dim)
            # edge_hidden_dim,  node_hidden_dim *node_hidden_dim
        )

        self.conv = gnn.NNConv(
            node_hidden_dim,
            node_hidden_dim,
            edge_net,
            aggr='mean',
            root_weight=True)  #node_hidden_dim, node_hidden_dim
        self.gru = nn.GRU(node_hidden_dim, node_hidden_dim)

        self.set2set = gnn.Set2Set(
            node_hidden_dim,
            processing_steps=num_message_passing)  # node_hidden_dim

        #predict coupling constant
        self.predict = nn.Sequential(
            LinearBn(4 * node_hidden_dim, num_target,
                     act=nn.ReLU()),  #node_hidden_dim
        )