示例#1
0
文件: gat_net.py 项目: Saro00/pna
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        num_heads = net_params['n_heads']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']

        self.dropout = dropout

        self.embedding_h = nn.Embedding(num_atom_type, hidden_dim * num_heads)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GATLayer(hidden_dim * num_heads, hidden_dim, num_heads, dropout,
                     self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GATLayer(hidden_dim * num_heads, out_dim, 1, dropout,
                     self.batch_norm, self.residual))
        self.MLP_layer = MLPReadout(out_dim,
                                    1)  # 1 out dim since regression problem
示例#2
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.NN_eig = net_params['NN_eig']
        self.avg_d = net_params['avg_d']
        self.not_pre = net_params['not_pre']
        self.towers = net_params['towers']
        self.divide_input_first = net_params['divide_input_first']
        self.divide_input_last = net_params['divide_input_last']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        edge_dim = net_params['edge_dim']
        pretrans_layers = net_params['pretrans_layers']
        posttrans_layers = net_params['posttrans_layers']
        self.gru_enable = net_params['gru']
        device = net_params['device']

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)

        if self.edge_feat:
            self.embedding_e = nn.Embedding(num_bond_type, edge_dim)

        self.layers = nn.ModuleList([EIGLayer(in_dim=hidden_dim, out_dim=hidden_dim, dropout=dropout,
                                              graph_norm=self.graph_norm, batch_norm=self.batch_norm,
                                              residual=self.residual, aggregators=self.aggregators, scalers=self.scalers,
                                              avg_d=self.avg_d, not_pre=self.not_pre, towers=self.towers, edge_features=self.edge_feat, NN_eig = self.NN_eig,
                                              edge_dim=edge_dim, divide_input=self.divide_input_first,
                                              pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers) for _
                                     in range(n_layers - 1)])
        self.layers.append(EIGLayer(in_dim=hidden_dim, out_dim=out_dim, dropout=dropout,
                                    graph_norm=self.graph_norm, batch_norm=self.batch_norm,
                                    residual=self.residual, aggregators=self.aggregators, scalers=self.scalers,
                                    avg_d=self.avg_d, not_pre=self.not_pre, towers=self.towers, edge_features=self.edge_feat,
                                    NN_eig=self.NN_eig,
                                    edge_dim=edge_dim, divide_input=self.divide_input_last,
                                    pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers))

        if self.gru_enable:
            self.gru = GRU(hidden_dim, hidden_dim, device)

        self.MLP_layer = MLPReadout(out_dim, 1)  # 1 out dim since regression problem
示例#3
0
文件: pna_net.py 项目: yhjflower/pna
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        in_dim_edge = net_params['in_dim_edge']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.towers = net_params['towers']
        self.divide_input_first = net_params['divide_input_first']
        self.divide_input_last = net_params['divide_input_last']
        self.edge_feat = net_params['edge_feat']
        edge_dim = net_params['edge_dim']
        pretrans_layers = net_params['pretrans_layers']
        posttrans_layers = net_params['posttrans_layers']
        self.gru_enable = net_params['gru']
        device = net_params['device']

        self.embedding_h = nn.Linear(in_dim, hidden_dim)

        if self.edge_feat:
            self.embedding_e = nn.Linear(in_dim_edge, edge_dim)

        self.layers = nn.ModuleList([PNALayer(in_dim=hidden_dim, out_dim=hidden_dim, dropout=dropout,
                                              graph_norm=self.graph_norm, batch_norm=self.batch_norm,
                                              residual=self.residual, aggregators=self.aggregators,
                                              scalers=self.scalers,
                                              avg_d=self.avg_d, towers=self.towers, edge_features=self.edge_feat,
                                              edge_dim=edge_dim, divide_input=self.divide_input_first,
                                              pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers) for _
                                     in range(n_layers - 1)])
        self.layers.append(PNALayer(in_dim=hidden_dim, out_dim=out_dim, dropout=dropout,
                                    graph_norm=self.graph_norm, batch_norm=self.batch_norm,
                                    residual=self.residual, aggregators=self.aggregators, scalers=self.scalers,
                                    avg_d=self.avg_d, towers=self.towers, divide_input=self.divide_input_last,
                                    edge_features=self.edge_feat, edge_dim=edge_dim,
                                    pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers))

        if self.gru_enable:
            self.gru = GRU(hidden_dim, hidden_dim, device)

        self.MLP_layer = MLPReadout(out_dim, n_classes)
示例#4
0
    def __init__(self, net_params):
        super().__init__()

        self.aggregators = net_params['aggregators']
        self.out_dim = net_params['out_dim']
        net_params_copy = net_params.copy()

        net_params_list = []
        for agg in self.aggregators.split():
            net_params_copy['aggregators'] = agg
            net_params_list.append(net_params_copy)

        self.models = nn.ModuleList([EIGHead(net_params_list[i]) for i in range(len(net_params_list))])
        self.MLP_layer = MLPReadout(self.out_dim * len(self.aggregators.split()), 1)
示例#5
0
    def __init__(self, net_params):
        super().__init__()
        in_dim_node = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.type_net = net_params['type_net']
        self.pos_enc_dim = net_params['pos_enc_dim']
        if self.pos_enc_dim > 0:
            self.embedding_pos_enc = nn.Linear(self.pos_enc_dim, hidden_dim)
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.device = net_params['device']
        self.edge_feat = net_params['edge_feat']
        edge_dim = net_params['edge_dim']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.n_classes = n_classes
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.residual = net_params['residual']
        self.JK = net_params['JK']
        pretrans_layers = net_params['pretrans_layers']
        posttrans_layers = net_params['posttrans_layers']
        self.gru_enable = net_params['gru']
        device = net_params['device']
        
        self.embedding_h = nn.Embedding(in_dim_node, hidden_dim) # node feat is an integer
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([EIGLayer(in_dim=hidden_dim, out_dim=hidden_dim, dropout=dropout, graph_norm=self.graph_norm,
                      batch_norm=self.batch_norm, residual=self.residual, aggregators=self.aggregators,
                      scalers=self.scalers, avg_d=self.avg_d, type_net=self.type_net, edge_features=self.edge_feat,
                      edge_dim=edge_dim, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers).model for _
             in range(n_layers - 1)])
        self.layers.append(EIGLayer(in_dim=hidden_dim, out_dim=out_dim, dropout=dropout,
                                    graph_norm=self.graph_norm, batch_norm=self.batch_norm,
                                    residual=self.residual, aggregators=self.aggregators, scalers=self.scalers,
                                    avg_d=self.avg_d, type_net=self.type_net, edge_features=self.edge_feat,
                                    edge_dim=edge_dim,
                                    pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers).model)
        if self.gru_enable:
            self.gru = GRU(hidden_dim, hidden_dim, device)
            
        self.MLP_layer = MLPReadout(out_dim, n_classes)
示例#6
0
文件: pna_net.py 项目: mahi97/pna
    def __init__(self, net_params):
        super().__init__()
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.residual = net_params['residual']
        posttrans_layers = net_params['posttrans_layers']
        device = net_params['device']
        self.device = device

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        self.embedding_h = AtomEncoder(emb_dim=hidden_dim)

        self.layers = nn.ModuleList([
            PNASimpleLayer(in_dim=hidden_dim,
                           out_dim=hidden_dim,
                           dropout=dropout,
                           batch_norm=self.batch_norm,
                           residual=self.residual,
                           aggregators=self.aggregators,
                           scalers=self.scalers,
                           avg_d=self.avg_d,
                           posttrans_layers=posttrans_layers)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            PNASimpleLayer(in_dim=hidden_dim,
                           out_dim=out_dim,
                           dropout=dropout,
                           batch_norm=self.batch_norm,
                           residual=self.residual,
                           aggregators=self.aggregators,
                           scalers=self.scalers,
                           avg_d=self.avg_d,
                           posttrans_layers=posttrans_layers))

        self.MLP_layer = MLPReadout(out_dim,
                                    1)  # 1 out dim since regression problem
示例#7
0
文件: eig_net.py 项目: Saro00/pna
    def __init__(self, net_params):
        super().__init__()
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.type_net = net_params['type_net']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.residual = net_params['residual']
        self.JK = net_params['JK']
        self.edge_feat = net_params['edge_feat']
        edge_dim = net_params['edge_dim']
        pretrans_layers = net_params['pretrans_layers']
        posttrans_layers = net_params['posttrans_layers']
        self.gru_enable = net_params['gru']
        device = net_params['device']

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.embedding_h = AtomEncoder(emb_dim=hidden_dim)

        if self.edge_feat:
            self.embedding_e = BondEncoder(emb_dim=edge_dim)

        self.layers = nn.ModuleList([
            EIGLayer(in_dim=hidden_dim,
                     out_dim=hidden_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers).model
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            EIGLayer(in_dim=hidden_dim,
                     out_dim=out_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers).model)
        if self.gru_enable:
            self.gru = GRU(hidden_dim, hidden_dim, device)

        self.MLP_layer = MLPReadout(out_dim, 128)
示例#8
0
    def __init__(self, net_params):
        super().__init__()
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        decreasing_dim = net_params['decreasing_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.type_net = net_params['type_net']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.towers = net_params['towers']
        edge_dim = net_params['edge_dim']
        pretrans_layers = net_params['pretrans_layers']
        posttrans_layers = net_params['posttrans_layers']
        device = net_params['device']
        self.virtual_node = net_params['virtual_node']

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.embedding_h = AtomEncoder(emb_dim=hidden_dim)

        if self.edge_feat:
            self.embedding_e = BondEncoder(emb_dim=edge_dim)

        self.layers = nn.ModuleList([
            DGNLayer(in_dim=hidden_dim,
                     out_dim=hidden_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers,
                     towers=self.towers).model for _ in range(n_layers - 1)
        ])
        self.layers.append(
            DGNLayer(in_dim=hidden_dim,
                     out_dim=out_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers,
                     towers=self.towers).model)

        self.MLP_layer = MLPReadout(out_dim,
                                    128,
                                    decreasing_dim=decreasing_dim)

        self.virtual_node_layers = None
        if (self.virtual_node
                is not None) and (self.virtual_node.lower() != 'none'):
            self.virtual_node_layers = \
                nn.ModuleList([
                    VirtualNode(dim=hidden_dim, dropout=dropout, batch_norm=self.batch_norm,
                                bias=True, vn_type=self.virtual_node, residual=self.residual)
                    for _ in range(n_layers - 1)])
示例#9
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        in_dim_edge = net_params['in_dim_edge']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.type_net = net_params['type_net']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.edge_feat = net_params['edge_feat']
        edge_dim = net_params['edge_dim']
        pretrans_layers = net_params['pretrans_layers']
        posttrans_layers = net_params['posttrans_layers']

        self.embedding_h = nn.Linear(in_dim, hidden_dim)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        if self.edge_feat:
            self.embedding_e = nn.Linear(in_dim_edge, edge_dim)

        self.layers = nn.ModuleList([
            DGNLayer(in_dim=hidden_dim,
                     out_dim=hidden_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers).model
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            DGNLayer(in_dim=hidden_dim,
                     out_dim=out_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers).model)

        self.MLP_layer = MLPReadout(out_dim, n_classes)
示例#10
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.type_net = net_params['type_net']
        self.pos_enc_dim = net_params['pos_enc_dim']
        if self.pos_enc_dim > 0:
            self.embedding_pos_enc = nn.Linear(self.pos_enc_dim, hidden_dim)
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        edge_dim = net_params['edge_dim']
        pretrans_layers = net_params['pretrans_layers']
        posttrans_layers = net_params['posttrans_layers']
        self.device = net_params['device']

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)
        if self.edge_feat:
            self.embedding_e = nn.Embedding(num_bond_type, edge_dim)

        self.layers = nn.ModuleList([
            DGNLayer(in_dim=hidden_dim,
                     out_dim=hidden_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers).model
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            DGNLayer(in_dim=hidden_dim,
                     out_dim=out_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers).model)

        if self.readout == "directional" or self.readout == "directional_abs":
            self.MLP_layer = MLPReadout(2 * out_dim, 1)
        else:
            self.MLP_layer = MLPReadout(
                out_dim, 1)  # 1 out dim since regression problem
示例#11
0
    def __init__(self, net_params):
        super().__init__()
        embedding_size = net_params['embedding_size']
        self.distance_function = net_params['distance_function']
        num_feat = net_params['num_feat']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.type_net = net_params['type_net']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.aggregators = net_params['aggregators']
        self.scalers = net_params['scalers']
        self.avg_d = net_params['avg_d']
        self.residual = net_params['residual']
        self.JK = net_params['JK']
        self.edge_feat = net_params['edge_feat']
        edge_dim = net_params['edge_dim']
        pretrans_layers = net_params['pretrans_layers']
        posttrans_layers = net_params['posttrans_layers']
        self.gru_enable = net_params['gru']
        device = net_params['device']
        self.device = device

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.embedding_h = nn.Linear(num_feat, hidden_dim)

        self.layers = nn.ModuleList([
            EIGLayer(in_dim=hidden_dim,
                     out_dim=hidden_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers).model
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            EIGLayer(in_dim=hidden_dim,
                     out_dim=out_dim,
                     dropout=dropout,
                     graph_norm=self.graph_norm,
                     batch_norm=self.batch_norm,
                     residual=self.residual,
                     aggregators=self.aggregators,
                     scalers=self.scalers,
                     avg_d=self.avg_d,
                     type_net=self.type_net,
                     edge_features=self.edge_feat,
                     edge_dim=edge_dim,
                     pretrans_layers=pretrans_layers,
                     posttrans_layers=posttrans_layers).model)

        self.MLP_layer = MLPReadout(out_dim, embedding_size)