コード例 #1
0
    def __init__(self,
                 in_dim=146,
                 hidden_dims=[70, 70, 70, 70],
                 out_dim=70,
                 n_classes=10):
        super().__init__()
        self.hidden_dims = hidden_dims
        assert 3 <= len(self.hidden_dims) <= 6
        self.in_dim_edge = 1
        self.dropout = 0.0
        self.residual = True
        self.graph_norm = True
        self.batch_norm = True

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])
        self.embedding_e = nn.Linear(self.in_dim_edge, self.hidden_dims[0])
        self.gated_gcn_1 = GatedGCNLayer(self.hidden_dims[0],
                                         self.hidden_dims[1], self.dropout,
                                         self.graph_norm, self.batch_norm,
                                         self.residual)
        self.gated_gcn_2 = GatedGCNLayer(self.hidden_dims[1],
                                         self.hidden_dims[2], self.dropout,
                                         self.graph_norm, self.batch_norm,
                                         self.residual)
        if len(self.hidden_dims) >= 4:
            self.gated_gcn_3 = GatedGCNLayer(self.hidden_dims[2],
                                             self.hidden_dims[3], self.dropout,
                                             self.graph_norm, self.batch_norm,
                                             self.residual)
        if len(self.hidden_dims) >= 5:
            self.gated_gcn_4 = GatedGCNLayer(self.hidden_dims[3],
                                             self.hidden_dims[4], self.dropout,
                                             self.graph_norm, self.batch_norm,
                                             self.residual)
        if len(self.hidden_dims) >= 6:
            self.gated_gcn_5 = GatedGCNLayer(self.hidden_dims[4],
                                             self.hidden_dims[5], self.dropout,
                                             self.graph_norm, self.batch_norm,
                                             self.residual)

        self.gated_gcn_o = GatedGCNLayer(self.hidden_dims[-1], out_dim,
                                         self.dropout, self.graph_norm,
                                         self.batch_norm, self.residual)
        self.readout_mlp = MLPReadout(out_dim, n_classes)
        pass
コード例 #2
0
    def __init__(self,
                 in_dim=146,
                 hidden_dims=[108, 108, 108, 108],
                 out_dim=108,
                 n_classes=10):
        super().__init__()
        self.hidden_dims = hidden_dims
        assert 3 <= len(self.hidden_dims) <= 6
        self.dropout = 0.0
        self.residual = True
        self.sage_aggregator = "meanpool"

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])
        self.graph_sage_1 = GraphSageLayer(self.hidden_dims[0],
                                           self.hidden_dims[1], F.relu,
                                           self.dropout, self.sage_aggregator,
                                           self.residual)
        self.graph_sage_2 = GraphSageLayer(self.hidden_dims[1],
                                           self.hidden_dims[2], F.relu,
                                           self.dropout, self.sage_aggregator,
                                           self.residual)
        if len(self.hidden_dims) >= 4:
            self.graph_sage_3 = GraphSageLayer(self.hidden_dims[2],
                                               self.hidden_dims[3], F.relu,
                                               self.dropout,
                                               self.sage_aggregator,
                                               self.residual)
        if len(self.hidden_dims) >= 5:
            self.graph_sage_4 = GraphSageLayer(self.hidden_dims[3],
                                               self.hidden_dims[4], F.relu,
                                               self.dropout,
                                               self.sage_aggregator,
                                               self.residual)
        if len(self.hidden_dims) >= 6:
            self.graph_sage_5 = GraphSageLayer(self.hidden_dims[4],
                                               self.hidden_dims[5], F.relu,
                                               self.dropout,
                                               self.sage_aggregator,
                                               self.residual)

        self.graph_sage_o = GraphSageLayer(self.hidden_dims[-1], out_dim,
                                           F.relu, self.dropout,
                                           self.sage_aggregator, self.residual)
        self.readout_mlp = MLPReadout(out_dim, n_classes)
        pass
コード例 #3
0
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim']  # node_dim (feat is an integer)
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        num_heads = net_params['n_heads']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']

        self.readout = net_params['readout']
        self.layer_norm = net_params['layer_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.dropout = dropout
        self.n_classes = n_classes
        self.device = net_params['device']
        self.lap_pos_enc = net_params['lap_pos_enc']
        self.wl_pos_enc = net_params['wl_pos_enc']
        max_wl_role_index = 100

        if self.lap_pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_lap_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
        if self.wl_pos_enc:
            self.embedding_wl_pos_enc = nn.Embedding(max_wl_role_index,
                                                     hidden_dim)

        self.embedding_h = nn.Embedding(in_dim_node,
                                        hidden_dim)  # node feat is an integer

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphTransformerLayer(hidden_dim, hidden_dim, num_heads, dropout,
                                  self.layer_norm, self.batch_norm,
                                  self.residual) for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GraphTransformerLayer(hidden_dim, out_dim, num_heads, dropout,
                                  self.layer_norm, self.batch_norm,
                                  self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #4
0
    def __init__(self, net_params):
        super().__init__()
        hidden_dim = net_params['hidden_dim']
        num_heads = net_params['n_heads']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.layer_norm = net_params['layer_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.device = net_params['device']
        self.lap_pos_enc = net_params['lap_pos_enc']
        self.wl_pos_enc = net_params['wl_pos_enc']
        max_wl_role_index = 37  # this is maximum graph size in the dataset

        if self.lap_pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_lap_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
        if self.wl_pos_enc:
            self.embedding_wl_pos_enc = nn.Embedding(max_wl_role_index,
                                                     hidden_dim)

        self.embedding_h = AtomEncoder(emb_dim=hidden_dim)

        if self.edge_feat:
            self.embedding_e = BondEncoder(emb_dim=hidden_dim)
        else:
            self.embedding_e = nn.Linear(1, hidden_dim)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphTransformerLayer(hidden_dim, hidden_dim, num_heads, dropout,
                                  self.layer_norm, self.batch_norm,
                                  self.residual) for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GraphTransformerLayer(hidden_dim, out_dim, num_heads, dropout,
                                  self.layer_norm, self.batch_norm,
                                  self.residual))
        self.MLP_layer = MLPReadout(
            out_dim, 128)  # 128 out dim since regression problem
コード例 #5
0
ファイル: gat_net.py プロジェクト: xyc1207/benchmarking-gnns
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']

        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        num_heads = net_params['n_heads']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.dropout = dropout
        self.device = net_params['device']

        self.layer_type = {
            "dgl": GATLayer,
            "dgl-v1": GATLayerV1,
            "dgl-v2": GATLayerV2,
            "dgl-v3": GATLayerV3,
            "dgl-v4": GATLayerV4,
            "dgl-v5": GATLayerV5,
            "dgl-v6": GATLayerV6,
            "dgl-v7": GATLayerV7,
            "edgereprfeat": CustomGATLayerEdgeReprFeat,
            "edgefeat": CustomGATLayer,
            "isotropic": CustomGATLayerIsotropic,
        }.get(net_params['layer_type'], GATLayer)

        self.embedding_h = torch.nn.Embedding(in_dim, hidden_dim *
                                              num_heads).to(self.device)
        torch.nn.init.xavier_uniform_(self.embedding_h.weight)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            self.layer_type(hidden_dim * num_heads, hidden_dim, num_heads,
                            dropout, self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            self.layer_type(hidden_dim * num_heads, out_dim, 1, dropout,
                            self.batch_norm, self.residual))
        self.MLP_layer = MLPReadout(2 * out_dim, 1)
コード例 #6
0
    def __init__(self, in_dim=146, hidden_dims=[146, 146, 146, 146], out_dim=146, n_classes=10, normalize=False):
        super().__init__()
        self.hidden_dims = hidden_dims

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])

        self.gcn_list = nn.ModuleList()
        _in_dim = self.hidden_dims[0]
        for hidden_dim in self.hidden_dims[1:]:
            self.gcn_list.append(GCNConv(_in_dim, hidden_dim, normalize=normalize))
            _in_dim = hidden_dim
            pass
        self.gcn_list.append(GCNConv(self.hidden_dims[-1], out_dim, normalize=normalize))

        self.readout_mlp = MLPReadout(out_dim, n_classes)
        self.relu = nn.ReLU()

        self.top_k = TopKPooling(out_dim, ratio=0.7)
        pass
コード例 #7
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        in_dim_edge = net_params['in_dim_edge']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        num_heads = net_params['n_heads']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.dropout = dropout
        self.n_classes = n_classes
        self.device = net_params['device']

        self.layer_type = {
            "dgl": GATLayer,
            "edgereprfeat": CustomGATLayerEdgeReprFeat,
            "edgefeat": CustomGATLayer,
            "isotropic": CustomGATLayerIsotropic,
        }.get(net_params['layer_type'], GATLayer)

        self.embedding_h = nn.Linear(in_dim, hidden_dim * num_heads)

        if self.layer_type != GATLayer:
            self.edge_feat = net_params['edge_feat']
            self.embedding_e = nn.Linear(in_dim_edge, hidden_dim * num_heads)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            self.layer_type(hidden_dim * num_heads, hidden_dim, num_heads,
                            dropout, self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            self.layer_type(hidden_dim * num_heads, out_dim, 1, dropout,
                            self.batch_norm, self.residual))
        self.MLP_layer = MLPReadout(2 * out_dim, n_classes)
コード例 #8
0
    def __init__(self, net_params):
        super().__init__()
        self.in_dim_node = net_params['in_dim']
        self.in_dim_edge = net_params['in_dim_edge']
        depth_of_mlp = net_params['depth_of_mlp']
        hidden_dim = net_params['hidden_dim']
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.layer_norm = net_params['layer_norm']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.device = net_params['device']
        self.gin_like_readout = False  # if True, uses GIN like readout, but without diag poool, since node task

        block_features = [hidden_dim] * n_layers  # L here is the block number

        if not self.edge_feat:
            original_features_num = self.in_dim_node + 1  # Number of features of the input
        else:
            original_features_num = self.in_dim_node + self.in_dim_edge + 1  # Number of features of the input

        # sequential mlp blocks
        last_layer_features = original_features_num
        self.reg_blocks = nn.ModuleList()
        for layer, next_layer_features in enumerate(block_features):
            mlp_block = RegularBlock(depth_of_mlp, last_layer_features,
                                     next_layer_features, self.residual)
            self.reg_blocks.append(mlp_block)
            last_layer_features = next_layer_features

        if self.gin_like_readout:
            self.fc_layers = nn.ModuleList()
            for output_features in block_features:
                # each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
                fc = FullyConnected(2 * output_features,
                                    n_classes,
                                    activation_fn=None)
                self.fc_layers.append(fc)
        else:
            self.mlp_prediction = MLPReadout(
                2 * (sum(block_features) + original_features_num), n_classes)
コード例 #9
0
    def __init__(self, net_params):
        super().__init__()
        num_node_type = net_params['num_node_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        kernel = net_params['kernel']  # for MoNet
        dim = net_params['pseudo_dim_MoNet']  # for MoNet
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.device = net_params['device']
        self.pos_enc = net_params['pos_enc']
        if self.pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
        else:
            in_dim = 1
            self.embedding_h = nn.Embedding(in_dim, hidden_dim)

        aggr_type = "sum"  # default for MoNet

        self.layers = nn.ModuleList()
        self.pseudo_proj = nn.ModuleList()

        # Hidden layer
        for _ in range(n_layers - 1):
            self.layers.append(
                GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
                         dropout, batch_norm, residual))
            self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim),
                                                  nn.Tanh()))

        # Output layer
        self.layers.append(
            GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type, dropout,
                     batch_norm, residual))
        self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))

        self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #10
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        graph_norm = net_params['graph_norm']
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        dropout = net_params['dropout']
        n_filters = net_params['K']
        n_layers = net_params['L']
        in_feat_dropout = net_params['in_feat_dropout']
        n_classes = net_params['n_classes']

        num_low = net_params['num_low']
        num_high = net_params['num_high']
        num_mid = net_params['num_mid']
        opt = net_params['opt']
        gate = net_params['gate']

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        self.embedding_h = nn.Linear(in_dim, hidden_dim)
        self.readout = net_params['readout']
        self.device = net_params['device']
        self.layers = nn.ModuleList()
        self.layers = nn.ModuleList([
            AUTOGCNLayer(hidden_dim,
                         hidden_dim,
                         F.relu,
                         dropout,
                         graph_norm,
                         batch_norm,
                         num_low=num_low,
                         num_high=num_high,
                         num_mid=num_mid,
                         opt=opt,
                         K=n_filters,
                         residual=residual,
                         gate=gate) for _ in range(n_layers)
        ])
        self.MLP_layer = MLPReadout(
            hidden_dim, n_classes)  # 1 out dim since regression problem
コード例 #11
0
ファイル: graphsage_net.py プロジェクト: jeba91/GNN_Toxic
 def __init__(self, net_params):
     super().__init__()
     in_dim = net_params['in_dim']
     hidden_dim = net_params['hidden_dim']
     out_dim = net_params['out_dim']
     n_classes = net_params['n_classes']
     in_feat_dropout = net_params['in_feat_dropout']
     dropout = net_params['dropout']
     aggregator_type = net_params['sage_aggregator']
     n_layers = net_params['L']
     self.readout = net_params['readout']
     self.residual = net_params['residual']
     
     self.embedding_h = nn.Linear(in_dim, hidden_dim)
     self.in_feat_dropout = nn.Dropout(in_feat_dropout)
     
     self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
                                           dropout, aggregator_type, self.residual) for _ in range(n_layers-1)])
     self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, self.residual))
     self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #12
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_dim = net_params['in_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        aggregator_type = net_params['sage_aggregator']
        n_layers = net_params['L']
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.readout = net_params['readout']

        #         self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)
        self.embedding_h = nn.Linear(in_dim, hidden_dim)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphSageLayer(hidden_dim,
                           hidden_dim,
                           F.relu,
                           dropout,
                           aggregator_type,
                           batch_norm,
                           residual,
                           dgl_builtin=net_params['dgl_builtin'])
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GraphSageLayer(hidden_dim,
                           out_dim,
                           F.relu,
                           dropout,
                           aggregator_type,
                           batch_norm,
                           residual,
                           dgl_builtin=net_params['dgl_builtin']))
        self.MLP_layer = MLPReadout(out_dim,
                                    1)  # 1 out dim since regression problem
コード例 #13
0
 def __init__(self, net_params):
     super().__init__()
     in_dim = net_params['in_dim']
     in_dim_edge = net_params['in_dim_edge']
     hidden_dim = net_params['hidden_dim']
     out_dim = net_params['out_dim']
     n_classes = net_params['n_classes']
     dropout = net_params['dropout']
     n_layers = net_params['L']
     self.readout = net_params['readout']
     self.norm = net_params['norm']
     self.residual = net_params['residual']
     self.edge_feat = net_params['edge_feat']
     self.device = net_params['device']
     
     self.embedding_h = nn.Linear(in_dim, hidden_dim)
     self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
     self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
                                                 self.norm, self.residual) for _ in range(n_layers-1) ]) 
     self.layers.append(GatedGCNLayer(hidden_dim, out_dim, dropout, self.norm, self.residual))
     self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #14
0
    def __init__(self, in_dim=146, hidden_dims=[146, 146, 146, 146, 146, 146], out_dim=146, n_classes=1000):
        super().__init__()
        self.hidden_dims = hidden_dims
        self.dropout = 0.0
        self.residual = True
        self.graph_norm = True
        self.batch_norm = True

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])

        _in_dim = self.hidden_dims[0]
        self.gcn_list = nn.ModuleList()
        for hidden_dim in self.hidden_dims[1:]:
            self.gcn_list.append(GCNLayer(_in_dim, hidden_dim, F.relu,
                                          self.dropout, self.graph_norm, self.batch_norm, self.residual))
            _in_dim = hidden_dim
            pass
        self.gcn_list.append(GCNLayer(self.hidden_dims[-1], out_dim, F.relu,
                                      self.dropout, self.graph_norm, self.batch_norm, self.residual))
        self.readout_mlp = MLPReadout(out_dim, n_classes, L=1)
        pass
コード例 #15
0
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim'] # node_dim
        in_dim_edge = 1 # edge_dim (feat is a float)
        hidden_dim = net_params['hidden_dim']
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.n_classes = n_classes
        self.device = net_params['device']
        
        self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # node feat is an float
        self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
        self.layers = nn.ModuleList([ GatedGCNLayer(hidden_dim, hidden_dim, dropout,
                                                       self.graph_norm, self.batch_norm, self.residual) for _ in range(n_layers) ])
        self.MLP_layer = MLPReadout(hidden_dim, n_classes)
コード例 #16
0
 def __init__(self, net_params):
     super().__init__()
     in_dim = net_params['in_dim']
     hidden_dim = net_params['hidden_dim']
     out_dim = net_params['out_dim']
     n_classes = net_params['n_classes']
     in_feat_dropout = net_params['in_feat_dropout']
     dropout = net_params['dropout']
     n_layers = net_params['L']
     self.readout = net_params['readout']
     self.batch_norm = net_params['batch_norm']
     self.residual = net_params['residual']
     self.n_classes = n_classes
     self.device = net_params['device']
     
     self.embedding_h = nn.Linear(in_dim, hidden_dim)
     self.in_feat_dropout = nn.Dropout(in_feat_dropout)
     
     self.layers = nn.ModuleList([GCNLayer(hidden_dim, hidden_dim, F.relu, dropout,
                                           self.batch_norm, self.residual) for _ in range(n_layers-1)])
     self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
     self.MLP_layer = MLPReadout(2*out_dim, n_classes)        
コード例 #17
0
    def __init__(self, net_params):
        super().__init__()
        self.num_atom_type = net_params['num_atom_type']    # 'num_atom_type' is 'nodeclasses' as in RingGNN original repo
        self.num_bond_type = net_params['num_bond_type']
        depth_of_mlp = net_params['depth_of_mlp']
        hidden_dim = net_params['hidden_dim']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.layer_norm = net_params['layer_norm']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.device = net_params['device']
        self.diag_pool_readout = True                     # if True, uses the new_suffix readout from original code
        num_classes = 1 # since regression problem
        
        block_features = [hidden_dim] * n_layers  # L here is the block number
        
        if not self.edge_feat:
            original_features_num = self.num_atom_type + 1  # Number of features of the input
        else:
            original_features_num = self.num_atom_type + self.num_bond_type + 1  # Number of features of the input

        # sequential mlp blocks
        last_layer_features = original_features_num
        self.reg_blocks = nn.ModuleList()
        for layer, next_layer_features in enumerate(block_features):
            mlp_block = RegularBlock(depth_of_mlp, last_layer_features, next_layer_features, self.residual)
            self.reg_blocks.append(mlp_block)
            last_layer_features = next_layer_features
        
        
        if self.diag_pool_readout:
            self.fc_layers = nn.ModuleList()
            for output_features in block_features:
                # each block's output will be pooled (thus have 2*output_features), and pass through a fully connected
                fc = FullyConnected(2*output_features, num_classes, activation_fn=None)
                self.fc_layers.append(fc)
        else:
            self.mlp_prediction = MLPReadout(sum(block_features)+original_features_num, num_classes)
コード例 #18
0
    def __init__(self, net_params):
        super().__init__()

        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        kernel = net_params['kernel']  # for MoNet
        dim = net_params['pseudo_dim_MoNet']  # for MoNet
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        graph_norm = net_params['graph_norm']
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.device = net_params['device']
        self.n_classes = n_classes

        aggr_type = "sum"  # default for MoNet

        self.lin = nn.Linear(in_dim, hidden_dim)

        self.layers = nn.ModuleList()
        self.pseudo_proj = nn.ModuleList()

        # Hidden layer
        for _ in range(n_layers - 1):
            self.layers.append(
                GMMLayer(hidden_dim, out_dim, F.relu, dim, kernel, aggr_type,
                         dropout, graph_norm, batch_norm, residual))
            self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim),
                                                  nn.Tanh()))

        # Output layer
        self.layers.append(
            GMMLayer(hidden_dim, out_dim, None, dim, kernel, aggr_type,
                     dropout, graph_norm, batch_norm, residual))
        self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))

        self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #19
0
    def __init__(self):
        super().__init__()
        self.L = 4
        self.out_dim = 108
        self.residual = True
        self.in_dim = 33
        self.hidden_dim = 108
        self.n_classes = 10
        self.in_feat_dropout = 0.0
        self.sage_aggregator = "meanpool"
        self.readout = "mean"
        self.dropout = 0.0

        self.embedding_h = nn.Linear(self.in_dim, self.hidden_dim)
        self.in_feat_dropout = nn.Dropout(self.in_feat_dropout)

        self.layers = nn.ModuleList([GraphSageLayer(self.hidden_dim, self.hidden_dim, F.relu, self.dropout,
                                                    self.sage_aggregator, self.residual) for _ in range(self.L - 1)])
        self.layers.append(GraphSageLayer(self.hidden_dim, self.out_dim, F.relu,
                                          self.dropout, self.sage_aggregator, self.residual))
        self.readout_mlp = MLPReadout(self.out_dim, self.n_classes)
        pass
コード例 #20
0
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim']  # node_dim (feat is an integer)
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        aggregator_type = net_params['sage_aggregator']
        self.n_layers = net_params['L']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.readout = net_params['readout']
        self.n_classes = n_classes
        self.device = net_params['device']

        self.embedding_h = nn.Embedding(in_dim_node,
                                        hidden_dim)  # node feat is an integer
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        self.dropout = nn.Dropout(p=dropout)
        if self.batch_norm:
            self.batchnorm_h = nn.ModuleList(
                [nn.BatchNorm1d(hidden_dim) for _ in range(self.n_layers - 1)])
            self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
        self.layers = nn.ModuleList([
            SAGEConv(hidden_dim, hidden_dim) for _ in range(self.n_layers - 1)
        ])
        self.layers.append(SAGEConv(hidden_dim, out_dim))
        # self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
        #                                             dropout, aggregator_type, batch_norm, residual) for _ in
        #                              range(n_layers - 1)])
        # self.layers.append(
        #     GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
        if aggregator_type == 'maxpool':
            self.aggr = 'max'
        elif aggregator_type == 'mean':
            self.aggr = 'mean'
コード例 #21
0
    def __init__(self, net_params):
        super().__init__()
        num_node_type = net_params['num_node_type']
        num_edge_type = net_params['num_edge_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.gated = net_params['gated']
        self.readout = net_params['readout']
        self.pos_enc = net_params['pos_enc']
        if self.pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
        else:
            in_dim = 1
            self.embedding_h = nn.Embedding(in_dim, hidden_dim)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        feat_mlp_modules = [
            nn.Linear(hidden_dim, hidden_dim, bias=True),
            nn.ReLU(),
            nn.Dropout(dropout),
        ]
        for _ in range(n_layers - 1):
            feat_mlp_modules.append(
                nn.Linear(hidden_dim, hidden_dim, bias=True))
            feat_mlp_modules.append(nn.ReLU())
            feat_mlp_modules.append(nn.Dropout(dropout))
        self.feat_mlp = nn.Sequential(*feat_mlp_modules)

        if self.gated:
            self.gates = nn.Linear(hidden_dim, hidden_dim, bias=True)

        self.readout_mlp = MLPReadout(out_dim, n_classes)
コード例 #22
0
    def __init__(self, net_params):
        super().__init__()
        self.num_atom_type = net_params[
            'num_atom_type']  # 'num_atom_type' is 'nodeclasses' as in RingGNN original repo
        self.num_bond_type = net_params['num_bond_type']
        avg_node_num = net_params['avg_node_num']
        radius = net_params['radius']
        hidden_dim = net_params['hidden_dim']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.layer_norm = net_params['layer_norm']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.device = net_params['device']

        if self.edge_feat:
            self.depth = [
                torch.LongTensor([1 + self.num_atom_type + self.num_bond_type])
            ] + [torch.LongTensor([hidden_dim])] * n_layers
        else:
            self.depth = [torch.LongTensor([1 + self.num_atom_type])
                          ] + [torch.LongTensor([hidden_dim])] * n_layers

        self.equi_modulelist = nn.ModuleList([
            RingGNNEquivLayer(self.device,
                              m,
                              n,
                              layer_norm=self.layer_norm,
                              residual=self.residual,
                              dropout=dropout,
                              radius=radius,
                              k2_init=0.5 / avg_node_num)
            for m, n in zip(self.depth[:-1], self.depth[1:])
        ])

        self.prediction = MLPReadout(
            torch.sum(torch.stack(self.depth)).item(),
            1)  # 1 out dim since regression problem
コード例 #23
0
    def __init__(self, net_params):
        super().__init__()
        num_node_type = net_params['num_node_type']
        num_edge_type = net_params['num_edge_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.edge_feat = net_params['edge_feat']
        self.device = net_params['device']
        self.pos_enc = net_params['pos_enc']
        if self.pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
        else:
            in_dim = 1
            self.embedding_h = nn.Embedding(in_dim, hidden_dim)

        if self.edge_feat:
            self.embedding_e = nn.Embedding(num_edge_type, hidden_dim)
        else:
            self.embedding_e = nn.Linear(1, hidden_dim)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GatedGCNLayer(hidden_dim, hidden_dim, dropout, self.batch_norm,
                          self.residual) for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GatedGCNLayer(hidden_dim, out_dim, dropout, self.batch_norm,
                          self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #24
0
    def __init__(self,
                 in_dim=146,
                 hidden_dims=[146, 146, 146, 146],
                 out_dim=146,
                 n_classes=10):
        super().__init__()
        self.hidden_dims = hidden_dims
        self.dropout = 0.0
        self.residual = True
        self.graph_norm = True
        self.batch_norm = True

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])
        self.gcn_1 = GCNLayer(self.hidden_dims[0], self.hidden_dims[1], F.relu,
                              self.dropout, self.graph_norm, self.batch_norm,
                              self.residual)
        if len(self.hidden_dims) >= 3:
            self.gcn_2 = GCNLayer(self.hidden_dims[1], self.hidden_dims[2],
                                  F.relu, self.dropout, self.graph_norm,
                                  self.batch_norm, self.residual)
        if len(self.hidden_dims) >= 4:
            self.gcn_3 = GCNLayer(self.hidden_dims[2], self.hidden_dims[3],
                                  F.relu, self.dropout, self.graph_norm,
                                  self.batch_norm, self.residual)
        if len(self.hidden_dims) >= 5:
            self.gcn_4 = GCNLayer(self.hidden_dims[3], self.hidden_dims[4],
                                  F.relu, self.dropout, self.graph_norm,
                                  self.batch_norm, self.residual)
        if len(self.hidden_dims) >= 6:
            self.gcn_5 = GCNLayer(self.hidden_dims[4], self.hidden_dims[5],
                                  F.relu, self.dropout, self.graph_norm,
                                  self.batch_norm, self.residual)

        self.gcn_o = GCNLayer(self.hidden_dims[-1], out_dim, F.relu,
                              self.dropout, self.graph_norm, self.batch_norm,
                              self.residual)
        self.readout_mlp = MLPReadout(out_dim, n_classes)
        pass
コード例 #25
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        num_heads = net_params['n_heads']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.layer_count = n_layers

        self.dropout = dropout

        self.embedding_h = nn.Linear(in_dim, hidden_dim * num_heads)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        #self.joining_layer = nn.Linear(hidden_dim * num_heads, hidden_dim * num_heads)
        #self.joining_layer = FCNet([hidden_dim * num_heads, hidden_dim * num_heads])

        self.joining_layers = nn.ModuleList([
            FCNet([hidden_dim * num_heads, hidden_dim * num_heads])
            for _ in range(n_layers)
        ])

        self.layers = nn.ModuleList([
            GATLayer(hidden_dim * num_heads, hidden_dim, num_heads, dropout,
                     self.graph_norm, self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GATLayer(hidden_dim * num_heads, out_dim, 1, dropout,
                     self.graph_norm, self.batch_norm, self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #26
0
ファイル: gat_net.py プロジェクト: alisure-ml/PyTorchGCN
    def __init__(self, net_params):
        super().__init__()
        self.readout = net_params.readout

        self.embedding_h = nn.Linear(
            net_params.in_dim, net_params.hidden_dim * net_params.n_heads)
        self.in_feat_dropout = nn.Dropout(net_params.in_feat_dropout)

        self.layers = nn.ModuleList([
            GATLayer(net_params.hidden_dim * net_params.n_heads,
                     net_params.hidden_dim, net_params.n_heads,
                     net_params.dropout, net_params.graph_norm,
                     net_params.batch_norm, net_params.residual)
            for _ in range(net_params.L - 1)
        ])
        self.layers.append(
            GATLayer(net_params.hidden_dim * net_params.n_heads,
                     net_params.out_dim, 1, net_params.dropout,
                     net_params.graph_norm, net_params.batch_norm,
                     net_params.residual))

        self.readout_mlp = MLPReadout(net_params.out_dim, net_params.n_classes)
        pass
コード例 #27
0
ファイル: wls_net.py プロジェクト: se-ok/WLsimilarity
    def __init__(self, net_params):
        super().__init__()

        n_iter = net_params.pop('n_iter')
        num_node_types = net_params.pop('in_dim')
        embed_dim, hidden_dim, out_dim = net_params.pop(
            'embed_dim'), net_params.pop('hidden_dim'), net_params.pop(
                'out_dim')
        n_mlp_layer, scale_mlp, dropout = net_params.pop(
            'n_mlp_layer'), net_params.pop('scale_mlp'), net_params.pop(
                'dropout')
        self.n_classes = net_params.pop('n_classes')

        hidden_dim = hidden_dim + (hidden_dim % 2)
        out_dim = out_dim + (out_dim % 2)

        layers = []

        _layer = WLSMLPLayer(embed_dim, hidden_dim, n_mlp_layer, scale_mlp,
                             dropout, **net_params)
        layers.append(_layer)
        layers.append(nn.BatchNorm1d(hidden_dim))

        for _ in range(n_iter - 2):
            _layer = WLSMLPLayer(hidden_dim, hidden_dim, n_mlp_layer,
                                 scale_mlp, dropout, **net_params)
            layers.append(_layer)
            layers.append(nn.BatchNorm1d(hidden_dim))

        _layer = WLSMLPLayer(hidden_dim, out_dim, n_mlp_layer, scale_mlp,
                             dropout, **net_params)
        layers.append(_layer)
        layers.append(nn.BatchNorm1d(out_dim))

        self.n_embedding = nn.Embedding(num_node_types, embed_dim)
        self.layers = nn.ModuleList(layers)
        self.classifier = MLPReadout(out_dim, self.n_classes)
コード例 #28
0
ファイル: sogcn_net.py プロジェクト: yuehaowang/SoGCN
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim']  # node_dim (feat is an integer)
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        activation_name = net_params['activation']
        max_order = net_params['max_order']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.n_classes = n_classes
        self.device = net_params['device']

        self.embedding_h = nn.Embedding(in_dim_node,
                                        hidden_dim)  # node feat is an integer
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        layer_params = {
            'activation': activations[activation_name],
            'dropout': dropout,
            'batch_norm': self.batch_norm,
            'residual': self.residual,
            'order': max_order
        }

        self.layers = nn.ModuleList([
            SoGCNLayer(hidden_dim, hidden_dim, **layer_params)
            for l in range(n_layers - 1)
        ])
        self.layers.append(SoGCNLayer(hidden_dim, out_dim, **layer_params))
        self.gru = GRU(hidden_dim, hidden_dim) if net_params['gru'] else None
        self.MLP_layer = MLPReadout(out_dim, n_classes)
コード例 #29
0
    def __init__(self, net_params):
        super().__init__()

        self.readout = net_params.readout
        self.device = net_params.device
        self.aggr_type = "sum"  # default for MoNet

        self.embedding_h = nn.Linear(net_params.in_dim, net_params.hidden_dim)

        self.layers = nn.ModuleList()
        self.pseudo_proj = nn.ModuleList()

        # Hidden layer
        for _ in range(net_params.L - 1):
            self.layers.append(
                GMMLayer(net_params.hidden_dim, net_params.hidden_dim,
                         net_params.pseudo_dim_MoNet, net_params.kernel,
                         self.aggr_type, net_params.dropout,
                         net_params.graph_norm, net_params.batch_norm,
                         net_params.residual))
            self.pseudo_proj.append(
                nn.Sequential(nn.Linear(2, net_params.pseudo_dim_MoNet),
                              nn.Tanh()))
            pass

        # Output layer
        self.layers.append(
            GMMLayer(net_params.hidden_dim, net_params.out_dim,
                     net_params.pseudo_dim_MoNet, net_params.kernel,
                     self.aggr_type, net_params.dropout, net_params.graph_norm,
                     net_params.batch_norm, net_params.residual))
        self.pseudo_proj.append(
            nn.Sequential(nn.Linear(2, net_params.pseudo_dim_MoNet),
                          nn.Tanh()))

        self.readout_mlp = MLPReadout(net_params.out_dim, net_params.n_classes)
        pass
コード例 #30
0
    def __init__(self, net_params):
        super().__init__()
        self.name = 'MoNet'
        num_atom_type = net_params['num_atom_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        kernel = net_params['kernel']  # for MoNet
        dim = net_params['pseudo_dim_MoNet']  # for MoNet
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.device = net_params['device']

        aggr_type = "sum"  # default for MoNet

        self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)

        self.layers = nn.ModuleList()
        self.pseudo_proj = nn.ModuleList()

        # Hidden layer
        for _ in range(n_layers - 1):
            self.layers.append(
                GMMLayer(hidden_dim, hidden_dim, dim, kernel, aggr_type,
                         dropout, batch_norm, residual))
            self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim),
                                                  nn.Tanh()))

        # Output layer
        self.layers.append(
            GMMLayer(hidden_dim, out_dim, dim, kernel, aggr_type, dropout,
                     batch_norm, residual))
        self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))

        self.MLP_layer = MLPReadout(out_dim, 1)  # out dim 1 since regression