Esempio n. 1
0
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim']  # node_dim (feat is an integer)
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.n_classes = n_classes
        self.device = net_params['device']

        self.embedding_h = nn.Embedding(in_dim_node + 1,
                                        hidden_dim)  # node feat is an integer
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        self.layers = nn.ModuleList([
            GCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.graph_norm,
                     self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.graph_norm,
                     self.batch_norm, self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
Esempio n. 2
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        activation_name = net_params['activation']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.embedding_h = nn.Linear(in_dim, hidden_dim)

        self.layers = nn.ModuleList([
            GCNLayer(hidden_dim, hidden_dim, activations[activation_name],
                     dropout, self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GCNLayer(hidden_dim, out_dim, activations[activation_name],
                     dropout, self.batch_norm, self.residual))
Esempio n. 3
0
    def __init__(self, net_params):
        super().__init__()

        self.net_params = net_params
        self.readout = self.net_params.readout

        self.embedding_h = nn.Linear(self.net_params.in_dim,
                                     self.net_params.hidden_dim)
        self.in_feat_dropout = nn.Dropout(self.net_params.in_feat_dropout)

        self.layers = nn.ModuleList([
            GCNLayer(self.net_params.hidden_dim, self.net_params.hidden_dim,
                     F.relu, self.net_params.dropout,
                     self.net_params.graph_norm, self.net_params.batch_norm,
                     self.net_params.residual)
            for _ in range(self.net_params.L - 1)
        ])

        self.layers.append(
            GCNLayer(self.net_params.hidden_dim, self.net_params.out_dim,
                     F.relu, self.net_params.dropout,
                     self.net_params.graph_norm, self.net_params.batch_norm,
                     self.net_params.residual))

        self.readout_mlp = MLPReadout(self.net_params.out_dim,
                                      self.net_params.n_classes)
        pass
Esempio n. 4
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.device = net_params['device']

        self.embedding_h = torch.nn.Embedding(in_dim,
                                              hidden_dim).to(self.device)
        torch.nn.init.xavier_uniform_(self.embedding_h.weight)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm,
                     self.residual) for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm,
                     self.residual))
        self.MLP_layer = MLPReadout(2 * out_dim, 1)
Esempio n. 5
0
    def __init__(self):
        super().__init__()
        self.L = 4
        self.readout = "mean"
        self.in_dim = 32
        self.hidden_dim = 146
        self.out_dim = 146
        self.n_classes = 10
        self.in_feat_dropout = 0.0
        self.dropout = 0.0
        self.graph_norm = True
        self.batch_norm = True
        self.residual = True

        self.embedding_h = nn.Linear(self.in_dim, self.hidden_dim)
        self.in_feat_dropout = nn.Dropout(self.in_feat_dropout)

        self.layers = nn.ModuleList([
            GCNLayer(self.hidden_dim, self.hidden_dim, F.relu, self.dropout,
                     self.graph_norm, self.batch_norm, self.residual)
            for _ in range(self.L - 1)
        ])

        self.layers.append(
            GCNLayer(self.hidden_dim, self.out_dim, F.relu, self.dropout,
                     self.graph_norm, self.batch_norm, self.residual))

        self.readout_mlp = MLPReadout(self.out_dim, self.n_classes)
        pass
Esempio n. 6
0
    def __init__(self):
        super().__init__()
        self.in_dim = 146
        self.hidden_dim = 146
        self.out_dim = 146
        self.n_classes = 10
        self.dropout = 0.0
        self.graph_norm = True
        self.batch_norm = True
        self.residual = True

        self.embedding_h = nn.Linear(self.in_dim, self.hidden_dim)
        self.gcn_1 = GCNLayer(self.hidden_dim, self.hidden_dim, F.relu,
                              self.dropout, self.graph_norm, self.batch_norm,
                              self.residual)
        self.gcn_2 = GCNLayer(self.hidden_dim, self.hidden_dim, F.relu,
                              self.dropout, self.graph_norm, self.batch_norm,
                              self.residual)
        self.gcn_3 = GCNLayer(self.hidden_dim, self.hidden_dim, F.relu,
                              self.dropout, self.graph_norm, self.batch_norm,
                              self.residual)
        self.gcn_4 = GCNLayer(self.hidden_dim, self.out_dim, F.relu,
                              self.dropout, self.graph_norm, self.batch_norm,
                              self.residual)
        self.readout_mlp = MLPReadout(self.out_dim, self.n_classes)
        pass
Esempio n. 7
0
    def __init__(self, net_params):
        super(GCNNet, self).__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        activation_name = net_params['activation']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)

        self.layers = nn.ModuleList([
            GCNLayer(hidden_dim, hidden_dim, activations[activation_name],
                     dropout, self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GCNLayer(hidden_dim, out_dim, activations[activation_name],
                     dropout, self.batch_norm, self.residual))
        self.gru = GRU(hidden_dim, hidden_dim) if net_params['gru'] else None
        self.MLP_layer = MLPReadout(out_dim,
                                    1)  # 1 out dim since regression problem
Esempio n. 8
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']

        self.embedding_h = nn.Linear(in_dim, hidden_dim)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm,
                     self.residual) for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm,
                     self.residual))
        self.gru = GRU(hidden_dim, hidden_dim) if net_params['gru'] else None
        self.MLP_layer = MLPReadout(out_dim, n_classes)
    def __init__(self, in_dim=64, out_dim=146):
        super().__init__()
        self.dropout = 0.0
        self.residual = True
        self.graph_norm = True
        self.batch_norm = True

        self.gcn_1 = GCNLayer(in_dim, out_dim, F.relu, self.dropout, self.graph_norm, self.batch_norm, self.residual)
        self.gcn_o = GCNLayer(out_dim, out_dim, F.relu, self.dropout, self.graph_norm, self.batch_norm, self.residual)
        pass
Esempio n. 10
0
    def __init__(self, net_params):
        super().__init__()

        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.n_classes = n_classes
        self.device = net_params['device']
        self.dgl_builtin = net_params['builtin']

        self.layers = nn.ModuleList()
        # input
        self.layers.append(
            GCNLayer(in_dim,
                     hidden_dim,
                     F.relu,
                     dropout,
                     self.graph_norm,
                     self.batch_norm,
                     self.residual,
                     dgl_builtin=self.dgl_builtin))

        # hidden
        self.layers.extend(
            nn.ModuleList([
                GCNLayer(hidden_dim,
                         hidden_dim,
                         F.relu,
                         dropout,
                         self.graph_norm,
                         self.batch_norm,
                         self.residual,
                         dgl_builtin=self.dgl_builtin)
                for _ in range(n_layers - 1)
            ]))

        # output
        self.layers.append(
            GCNLayer(hidden_dim,
                     n_classes,
                     None,
                     0,
                     self.graph_norm,
                     self.batch_norm,
                     self.residual,
                     dgl_builtin=self.dgl_builtin))

        self.dropout = nn.Dropout(p=dropout)
    def __init__(self, in_dim=146, out_dim=146, n_classes=10):
        super().__init__()
        self.dropout = 0.0
        self.residual = True
        self.graph_norm = True
        self.batch_norm = True

        self.gcn_1 = GCNLayer(in_dim, out_dim, F.relu, self.dropout, self.graph_norm, self.batch_norm, self.residual)
        self.gcn_2 = GCNLayer(out_dim, out_dim, F.relu, self.dropout, self.graph_norm, self.batch_norm, self.residual)
        self.gcn_3 = GCNLayer(out_dim, out_dim, F.relu, self.dropout, self.graph_norm, self.batch_norm, self.residual)
        self.gcn_o = GCNLayer(out_dim, out_dim, F.relu, self.dropout, self.graph_norm, self.batch_norm, self.residual)
        self.readout_mlp = MLPReadout(out_dim, n_classes)
        pass
Esempio n. 12
0
 def __init__(self,
              num_feats,
              num_classes,
              num_hidden,
              num_layers,
              bias=False,
              activation=F.relu,
              graph_norm=True,
              batch_norm=False,
              pair_norm=False,
              residual=False,
              dropout=0,
              dropedge=0):
     super(GCNNet, self).__init__()
     self.layers = nn.ModuleList()
     self.inLayer = MLPLayer(num_feats,
                             num_hidden,
                             bias=True,
                             activation=activation,
                             residual=residual,
                             dropout=dropout)
     for _ in range(num_layers):
         self.layers.append(
             GCNLayer(num_hidden, num_hidden, bias, activation, graph_norm,
                      batch_norm, pair_norm, residual, dropout, dropedge))
     self.outLayer = MLPLayer(num_hidden,
                              num_classes,
                              bias=True,
                              activation=None,
                              residual=residual,
                              dropout=dropout)
 def __init__(self, in_dim, hidden_dims):
     super().__init__()
     self.gcn_list = nn.ModuleList()
     for hidden_dim in hidden_dims:
         self.gcn_list.append(GCNLayer(in_dim, hidden_dim, F.relu, 0.0, True, True, True))
         in_dim = hidden_dim
     pass
Esempio n. 14
0
    def __init__(self):
        super().__init__()
        self.in_dim = 64
        self.hidden_dim = 146
        self.out_dim = 146
        self.dropout = 0.0
        self.graph_norm = True
        self.batch_norm = True
        self.residual = True

        self.embedding_h = nn.Linear(self.in_dim, self.hidden_dim)
        self.gcn_1 = GCNLayer(self.hidden_dim, self.hidden_dim, F.relu,
                              self.dropout, self.graph_norm, self.batch_norm,
                              self.residual)
        self.gcn_2 = GCNLayer(self.hidden_dim, self.out_dim, F.relu,
                              self.dropout, self.graph_norm, self.batch_norm,
                              self.residual)
        pass
Esempio n. 15
0
    def __init__(self,
                 num_feats,
                 num_classes,
                 num_hidden,
                 num_inLayer=1,
                 num_vsgc=1,
                 bias=False,
                 activation=F.relu,
                 graph_norm=True,
                 batch_norm=False,
                 pair_norm=False,
                 residual=False,
                 dropout=0,
                 dropedge=0):
        super(VMixNet, self).__init__()
        self.inLayers = nn.ModuleList()

        # self.inLayers.append(MLPLayer(num_feats, num_hidden, True, activation, dropout=dropout))
        # if num_inLayer - 2 > 0:
        #     for _ in range(num_inLayer - 2):
        #         self.inLayers.append(MLPLayer(num_hidden, num_hidden, True, activation, dropout=dropout))
        # self.inLayers.append(MLPLayer(num_hidden, num_hidden, True, None, dropout=dropout))

        self.inLayers.append(
            GCNLayer(num_feats, num_hidden, bias, activation, dropout=dropout))
        if num_inLayer == 2:
            self.inLayers.append(
                GCNLayer(num_hidden, num_hidden, bias, None, dropout=dropout))
        elif num_inLayer > 2:
            for _ in range(num_inLayer - 2):
                self.inLayers.append(
                    GCNLayer(num_hidden,
                             num_hidden,
                             bias,
                             activation,
                             dropout=dropout))
            self.inLayers.append(
                GCNLayer(num_hidden, num_hidden, bias, None, dropout=dropout))

        self.vsgc = VSGCLayer(num_hidden,
                              num_classes,
                              bias,
                              num_vsgc,
                              dropout=dropout)
 def __init__(self, in_dim, hidden_dims, n_classes=200):
     super().__init__()
     self.gcn_list = nn.ModuleList()
     for hidden_dim in hidden_dims:
         self.gcn_list.append(
             GCNLayer(in_dim, hidden_dim, F.relu, 0.0, True, True, True))
         in_dim = hidden_dim
         pass
     self.readout_mlp = MLPReadout(hidden_dims[-1], n_classes, L=1)
     pass
    def __init__(self, in_dim=64, hidden_dims=[146, 146], out_dim=146):
        super().__init__()
        self.hidden_dims = hidden_dims
        assert 2 <= len(self.hidden_dims) <= 3
        self.dropout = 0.0
        self.residual = True
        self.graph_norm = True
        self.batch_norm = True

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])
        self.gcn_1 = GCNLayer(self.hidden_dims[0], self.hidden_dims[1], F.relu,
                              self.dropout, self.graph_norm, self.batch_norm, self.residual)
        if len(self.hidden_dims) >= 3:
            self.gcn_2 = GCNLayer(self.hidden_dims[1], self.hidden_dims[2], F.relu,
                                  self.dropout, self.graph_norm, self.batch_norm, self.residual)
            pass
        self.gcn_o = GCNLayer(self.hidden_dims[-1], out_dim, F.relu,
                              self.dropout, self.graph_norm, self.batch_norm, self.residual)
        pass
 def __init__(self, in_dim, hidden_dims, n_classes=10):
     super().__init__()
     self.embedding_h = nn.Linear(in_dim, in_dim)
     self.gcn_list = nn.ModuleList()
     for hidden_dim in hidden_dims:
         self.gcn_list.append(GCNLayer(in_dim, hidden_dim, F.relu, 0.0, True, True, True))
         in_dim = hidden_dim
         pass
     self.readout_mlp = nn.Linear(hidden_dims[-1], n_classes, bias=False)
     pass
    def __init__(self, in_dim=146, hidden_dims=[146, 146, 146, 146], out_dim=146, n_classes=10):
        super().__init__()
        self.hidden_dims = hidden_dims
        assert 3 <= len(self.hidden_dims) <= 6
        self.dropout = 0.0
        self.residual = True
        self.graph_norm = True
        self.batch_norm = True

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])
        self.gcn_1 = GCNLayer(self.hidden_dims[0], self.hidden_dims[1], F.relu,
                              self.dropout, self.graph_norm, self.batch_norm, self.residual)
        self.gcn_2 = GCNLayer(self.hidden_dims[1], self.hidden_dims[2], F.relu,
                              self.dropout, self.graph_norm, self.batch_norm, self.residual)
        if len(self.hidden_dims) >= 4:
            self.gcn_3 = GCNLayer(self.hidden_dims[2], self.hidden_dims[3], F.relu,
                                  self.dropout, self.graph_norm, self.batch_norm, self.residual)
        if len(self.hidden_dims) >= 5:
            self.gcn_4 = GCNLayer(self.hidden_dims[3], self.hidden_dims[4], F.relu,
                                  self.dropout, self.graph_norm, self.batch_norm, self.residual)
        if len(self.hidden_dims) >= 6:
            self.gcn_5 = GCNLayer(self.hidden_dims[4], self.hidden_dims[5], F.relu,
                                  self.dropout, self.graph_norm, self.batch_norm, self.residual)

        self.gcn_o = GCNLayer(self.hidden_dims[-1], out_dim, F.relu,
                              self.dropout, self.graph_norm, self.batch_norm, self.residual)
        self.readout_mlp = MLPReadout(out_dim, n_classes)
        pass
Esempio n. 20
0
    def __init__(self,
                 num_feats,
                 num_classes,
                 num_hidden,
                 num_layers,
                 bias=False,
                 activation=F.tanh,
                 graph_norm=False,
                 batch_norm=False,
                 pair_norm=False,
                 residual=False,
                 dropout=0,
                 dropedge=0,
                 cutgraph=0,
                 init_beta=1.,
                 learn_beta=True):
        super(ResGCNNet, self).__init__()

        self.num_layers = num_layers
        self.layers = nn.ModuleList()
        #self_gcn在最后加了一个参数,记得删掉
        for i in range(0, num_layers):
            if i == 0:
                self.layers.append(
                    GCNLayer(num_feats, num_hidden, bias, activation,
                             graph_norm, batch_norm, pair_norm, residual,
                             dropout, dropedge, cutgraph, init_beta,
                             learn_beta))
            elif i == num_layers - 1:
                self.layers.append(
                    GCNLayer(num_hidden, num_classes, bias, None, graph_norm,
                             batch_norm, pair_norm, residual, dropout,
                             dropedge, cutgraph, init_beta, learn_beta))
            else:
                self.layers.append(
                    GCNLayer(num_hidden, num_hidden, bias, activation,
                             graph_norm, batch_norm, pair_norm, residual,
                             dropout, dropedge, cutgraph, init_beta,
                             learn_beta))
Esempio n. 21
0
 def __init__(self,
              num_feats,
              num_classes,
              num_hidden,
              num_layers,
              bias=False,
              activation=F.tanh,
              graph_norm=False,
              batch_norm=False,
              dropout=0):
     super(DenseGCNNet, self).__init__()
     self.num_layers = num_layers
     self.layers = nn.ModuleList()
     self.layers.append(
         GCNLayer(num_feats, num_hidden, bias, activation, graph_norm,
                  batch_norm, dropout))
     for i in range(1, num_layers - 1):
         self.layers.append(
             GCNLayer(num_hidden * i, num_hidden, bias, None, graph_norm,
                      batch_norm, dropout))
     self.layers.append(
         GCNLayer(num_hidden * (num_layers - 1), num_classes, bias,
                  activation, graph_norm, batch_norm, dropout))
    def __init__(self, in_dim, hidden_dims, readout="mean"):
        super().__init__()
        self.in_dim = in_dim
        self.hidden_dims = hidden_dims
        self.readout = readout

        self.gcn_list = nn.ModuleList()
        _in_dim = self.in_dim
        for hidden_dim in self.hidden_dims:
            self.gcn_list.append(
                GCNLayer(_in_dim, hidden_dim, F.relu, 0.0, True, True, True))
            _in_dim = hidden_dim
            pass

        Tools.print(
            "GCNNet1 #GNN1={} in_dim={} hidden_dims={} readout={}".format(
                len(self.hidden_dims), self.in_dim, self.hidden_dims,
                self.readout))
        pass
    def __init__(self, in_dim, hidden_dims, skip_dim=128, n_out=1):
        super().__init__()
        self.embedding_h = nn.Linear(in_dim, in_dim)

        _in_dim = in_dim
        self.gcn_list = nn.ModuleList()
        for hidden_dim in hidden_dims:
            self.gcn_list.append(GCNLayer(_in_dim, hidden_dim, F.relu, 0.0, True, True, True))
            _in_dim = hidden_dim
            pass

        self.skip_connect_index = [0, (len(hidden_dims)+1)//2, len(hidden_dims)]
        self.skip_connect_list = nn.ModuleList()
        for hidden_dim in [in_dim, hidden_dims[self.skip_connect_index[1] - 1],
                           hidden_dims[self.skip_connect_index[2] - 1]]:
            self.skip_connect_list.append(nn.Linear(hidden_dim, skip_dim, bias=False))
            pass

        self.readout = nn.Linear(len(self.skip_connect_list) * skip_dim, n_out, bias=False)
        pass
Esempio n. 24
0
    def __init__(self, in_dim, hidden_dims, skip_which, skip_dim=128, n_out=1):
        super().__init__()
        self.embedding_h = nn.Linear(in_dim, in_dim)

        _in_dim = in_dim
        self.gcn_list = nn.ModuleList()
        for hidden_dim in hidden_dims:
            self.gcn_list.append(GCNLayer(_in_dim, hidden_dim, F.relu, 0.0, True, True, True))
            _in_dim = hidden_dim
            pass

        sk_hidden_dims = [in_dim] + [hidden_dims[which-1] for which in skip_which]
        self.skip_connect_index = [0] + skip_which
        self.skip_connect_list = nn.ModuleList()
        for hidden_dim in sk_hidden_dims:
            self.skip_connect_list.append(nn.Linear(hidden_dim, skip_dim, bias=False))
            pass

        self.readout = nn.Linear(len(self.skip_connect_list) * skip_dim, n_out, bias=False)
        pass
    def __init__(self, in_dim, hidden_dims, n_classes=10, readout="mean"):
        super().__init__()
        self.in_dim = in_dim
        self.hidden_dims = hidden_dims
        self.readout = readout

        self.embedding_h = nn.Linear(in_dim, in_dim)
        self.gcn_list = nn.ModuleList()
        _in_dim = self.in_dim
        for hidden_dim in hidden_dims:
            self.gcn_list.append(
                GCNLayer(_in_dim, hidden_dim, F.relu, 0.0, True, True, True))
            _in_dim = hidden_dim
            pass
        self.readout_mlp = nn.Linear(hidden_dims[-1], n_classes, bias=False)

        Tools.print(
            "GCNNet2 #GNN2={} in_dim={} hidden_dims={} readout={}".format(
                len(self.hidden_dims), self.in_dim, self.hidden_dims,
                self.readout))
        pass
Esempio n. 26
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.layer_norm = net_params['layer_norm']
        self.residual = net_params['residual']
        self.agg = net_params['agg']

        self.task = net_params['task']
        if self.task == 'classification':
            self.num_classes = net_params['num_classes']

        
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        
        self.embedding_lin = nn.Linear(num_atom_type, hidden_dim, bias=False)
        
        self.layers = nn.ModuleList(
            [GCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.graph_norm, 
                      self.batch_norm, self.layer_norm, self.agg, self.residual) for _ in range(n_layers)]
        )
        self.linear_ro = nn.Linear(hidden_dim, out_dim, bias=False)        

        #   predict layer for regression & classification
        self.linear_predict = nn.Linear(out_dim, 1, bias=True)

		#	additional parameters for gated residual connection
        if self.residual == "gated":
            self.W_g  = nn.Linear(2*hidden_dim, hidden_dim, False)