コード例 #1
0
    def __init__(self, in_dim, hidden_dims):
        super().__init__()

        self.in_dim_edge = 1
        self.embedding_e = nn.Linear(self.in_dim_edge, in_dim)

        self.gcn_list = nn.ModuleList()
        for hidden_dim in hidden_dims:
            self.gcn_list.append(
                GatedGCNLayer(in_dim, hidden_dim, 0.0, True, True, True))
            in_dim = hidden_dim
            pass
        pass
コード例 #2
0
    def __init__(self, net_params):
        super().__init__()
        num_node_type = net_params['num_node_type']
        num_edge_type = net_params['num_edge_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.device = net_params['device']
        self.pos_enc = net_params['pos_enc']
        if self.pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
        else:
            in_dim = 1
            self.embedding_h = nn.Embedding(in_dim, hidden_dim)

        if self.edge_feat:
            self.embedding_e = nn.Embedding(num_edge_type, hidden_dim)
        else:
            self.embedding_e = nn.Linear(1, hidden_dim)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GatedGCNLayer(hidden_dim, hidden_dim, dropout, self.batch_norm,
                          self.residual) for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm,
                          self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #3
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']

        self.embedding_h = nn.Linear(in_dim, hidden_dim)
        self.embedding_e = nn.Linear(in_dim, hidden_dim)
        self.layers = nn.ModuleList([
            GatedGCNLayer(hidden_dim, hidden_dim, dropout, self.graph_norm,
                          self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GatedGCNLayer(hidden_dim, out_dim, dropout, self.graph_norm,
                          self.batch_norm, self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #4
0
    def __init__(self, in_dim=64, hidden_dims=[70, 70], out_dim=70):
        super().__init__()
        self.hidden_dims = hidden_dims
        self.in_dim_edge = 1
        self.dropout = 0.0
        self.residual = True
        self.graph_norm = True
        self.batch_norm = True

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])
        self.embedding_e = nn.Linear(self.in_dim_edge, self.hidden_dims[0])

        _in_dim = self.hidden_dims[0]
        self.gcn_list = nn.ModuleList()
        for hidden_dim in self.hidden_dims[1:]:
            self.gcn_list.append(
                GatedGCNLayer(_in_dim, hidden_dim, self.dropout,
                              self.graph_norm, self.batch_norm, self.residual))
            _in_dim = hidden_dim
            pass
        self.gcn_list.append(
            GatedGCNLayer(self.hidden_dims[-1], out_dim, self.dropout,
                          self.graph_norm, self.batch_norm, self.residual))
        pass
コード例 #5
0
    def __init__(self, in_dim, hidden_dims, n_classes=200):
        super().__init__()

        self.in_dim_edge = 1
        self.embedding_h = nn.Linear(in_dim, in_dim)
        self.embedding_e = nn.Linear(self.in_dim_edge, in_dim)

        self.gcn_list = nn.ModuleList()
        for hidden_dim in hidden_dims:
            self.gcn_list.append(GatedGCNLayer(in_dim, hidden_dim, 0.0, True, True, True))
            in_dim = hidden_dim
            pass

        self.readout_mlp = nn.Linear(hidden_dims[-1], n_classes, bias=False)
        pass
コード例 #6
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        in_dim_edge = net_params['in_dim_edge']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.device = net_params['device']
        self.freeze_encoder = net_params['freeze_encoder']
        self.freeze_decoder = net_params['freeze_decoder']

        self.embedding_h = nn.Linear(in_dim, hidden_dim)
        self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
        self.layers = nn.ModuleList([
            GatedGCNLayer(hidden_dim, hidden_dim, dropout, self.batch_norm,
                          self.residual) for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm,
                          self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)

        if self.freeze_encoder:
            for param in self.embedding_h.parameters():
                param.requires_grad = False
            for param in self.embedding_e.parameters():
                param.requires_grad = False
        if self.freeze_decoder:
            for param in self.MLP_layer.parameters():
                param.requires_grad = False
コード例 #7
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.layer_norm = net_params['layer_norm']
        self.gated_gcn_agg = net_params['gated_gcn_agg']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.device = net_params['device']

        self.task = net_params['task']
        if self.task == 'classification':
            self.num_classes = net_params['num_classes']

        self.embedding_h_lin = nn.Linear(num_atom_type, hidden_dim, bias=False)

        if self.edge_feat:
            self.embedding_e_lin = nn.Linear(num_bond_type,
                                             hidden_dim,
                                             bias=False)
        else:
            self.embedding_e = nn.Linear(1, hidden_dim, bias=False)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GatedGCNLayer(hidden_dim, hidden_dim, dropout, self.graph_norm,
                          self.batch_norm, self.layer_norm, self.gated_gcn_agg)
            for _ in range(n_layers)
        ])
        self.linear_ro = nn.Linear(hidden_dim, out_dim, bias=False)
        self.linear_predict = nn.Linear(out_dim, 1, bias=True)

        #	additional parameters for gated gcn
        if self.residual == "gated":
            self.W_g = nn.Linear(2 * hidden_dim, hidden_dim, False)
コード例 #8
0
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim'] # node_dim
        in_dim_edge = 1 # edge_dim (feat is a float)
        hidden_dim = net_params['hidden_dim']
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.n_classes = n_classes
        self.device = net_params['device']
        
        self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an float
        self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
        self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
                                                       self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers) ])
        self.MLP_layer = MLPReadout(hidden_dim, n_classes)
コード例 #9
0
    def __init__(self,
                 in_dim=146,
                 hidden_dims=[70, 70, 70, 70],
                 out_dim=70,
                 n_classes=10):
        super().__init__()
        self.hidden_dims = hidden_dims
        self.in_dim_edge = 1
        self.dropout = 0.0
        self.residual = True
        self.graph_norm = True
        self.batch_norm = True

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])
        self.embedding_e = nn.Linear(self.in_dim_edge, self.hidden_dims[0])
        self.gated_gcn_1 = GatedGCNLayer(self.hidden_dims[0],
                                         self.hidden_dims[1], self.dropout,
                                         self.graph_norm, self.batch_norm,
                                         self.residual)
        if len(self.hidden_dims) >= 3:
            self.gated_gcn_2 = GatedGCNLayer(self.hidden_dims[1],
                                             self.hidden_dims[2], self.dropout,
                                             self.graph_norm, self.batch_norm,
                                             self.residual)
        if len(self.hidden_dims) >= 4:
            self.gated_gcn_3 = GatedGCNLayer(self.hidden_dims[2],
                                             self.hidden_dims[3], self.dropout,
                                             self.graph_norm, self.batch_norm,
                                             self.residual)
        if len(self.hidden_dims) >= 5:
            self.gated_gcn_4 = GatedGCNLayer(self.hidden_dims[3],
                                             self.hidden_dims[4], self.dropout,
                                             self.graph_norm, self.batch_norm,
                                             self.residual)
        if len(self.hidden_dims) >= 6:
            self.gated_gcn_5 = GatedGCNLayer(self.hidden_dims[4],
                                             self.hidden_dims[5], self.dropout,
                                             self.graph_norm, self.batch_norm,
                                             self.residual)

        self.gated_gcn_o = GatedGCNLayer(self.hidden_dims[-1], out_dim,
                                         self.dropout, self.graph_norm,
                                         self.batch_norm, self.residual)
        self.readout_mlp = MLPReadout(out_dim, n_classes)
        pass
    def __init__(self, in_dim, hidden_dims, readout="mean"):
        super().__init__()
        self.in_dim = in_dim
        self.hidden_dims = hidden_dims
        self.readout = readout

        self.in_dim_edge = 1
        self.embedding_h = nn.Linear(in_dim, in_dim)
        self.embedding_e = nn.Linear(self.in_dim_edge, in_dim)

        self.gcn_list = nn.ModuleList()
        _in_dim = self.in_dim
        for hidden_dim in hidden_dims:
            self.gcn_list.append(
                GatedGCNLayer(_in_dim, hidden_dim, 0.0, True, True, True))
            _in_dim = hidden_dim
            pass

        Tools.print(
            "GatedGCNNet1 #GNN1={} in_dim={} hidden_dims={} readout={}".format(
                len(self.hidden_dims), self.in_dim, self.hidden_dims,
                self.readout))
        pass