Beispiel #1
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        aggregator_type = net_params['sage_aggregator']
        n_layers = net_params['L']
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.readout = net_params['readout']

        self.embedding_h = nn.Embedding(num_atom_type, hidden_dim)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphSageLayer(hidden_dim, hidden_dim, F.relu, dropout,
                           aggregator_type, batch_norm, residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GraphSageLayer(hidden_dim, out_dim, F.relu, dropout,
                           aggregator_type, batch_norm, residual))
        self.MLP_layer = MLPReadout(out_dim,
                                    1)  # 1 out dim since regression problem
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        aggregator_type = net_params['sage_aggregator']
        n_layers = net_params['L']
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.readout = net_params['readout']
        self.n_classes = n_classes
        self.device = net_params['device']

        self.embedding_h = nn.Linear(in_dim, hidden_dim)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphSageLayer(hidden_dim, hidden_dim, F.relu, dropout,
                           aggregator_type, batch_norm, residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GraphSageLayer(hidden_dim, out_dim, F.relu, dropout,
                           aggregator_type, batch_norm, residual))
        self.MLP_layer = MLPReadout(2 * out_dim, n_classes)
Beispiel #3
0
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim']  # node_dim (feat is an integer)
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        aggregator_type = net_params['sage_aggregator']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.residual = net_params['residual']
        self.n_classes = n_classes
        self.device = net_params['device']

        self.embedding_h = nn.Embedding(in_dim_node,
                                        hidden_dim)  # node feat is an integer
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphSageLayer(hidden_dim, hidden_dim, F.relu, dropout,
                           aggregator_type, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GraphSageLayer(hidden_dim, out_dim, F.relu, dropout,
                           aggregator_type, self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
Beispiel #4
0
    def __init__(self):
        super().__init__()
        self.L = 4
        self.out_dim = 108
        self.residual = True
        self.in_dim = 32
        self.hidden_dim = 108
        self.n_classes = 10
        self.in_feat_dropout = 0.0
        self.sage_aggregator = "meanpool"
        self.readout = "mean"
        self.dropout = 0.0

        self.embedding_h = nn.Linear(self.in_dim, self.hidden_dim)
        self.in_feat_dropout = nn.Dropout(self.in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphSageLayer(self.hidden_dim, self.hidden_dim, F.relu,
                           self.dropout, self.sage_aggregator, self.residual)
            for _ in range(self.L - 1)
        ])
        self.layers.append(
            GraphSageLayer(self.hidden_dim, self.out_dim, F.relu, self.dropout,
                           self.sage_aggregator, self.residual))
        self.readout_mlp = MLPReadout(self.out_dim, self.n_classes)
        pass
Beispiel #5
0
    def __init__(self,
                 in_dim=146,
                 hidden_dims=[108, 108, 108, 108],
                 out_dim=108,
                 n_classes=10):
        super().__init__()
        self.hidden_dims = hidden_dims
        self.dropout = 0.0
        self.residual = True
        self.sage_aggregator = "meanpool"

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])

        _in_dim = self.hidden_dims[0]
        self.gcn_list = nn.ModuleList()
        for hidden_dim in self.hidden_dims[1:]:
            self.gcn_list.append(
                GraphSageLayer(_in_dim, hidden_dim, F.relu, self.dropout,
                               self.sage_aggregator, self.residual))
            _in_dim = hidden_dim
            pass
        self.gcn_list.append(
            GraphSageLayer(self.hidden_dims[-1], out_dim, F.relu, self.dropout,
                           self.sage_aggregator, self.residual))

        self.readout_mlp = nn.Linear(out_dim, n_classes, bias=False)
        pass
 def __init__(self, net_params):
     super().__init__()
     num_node_type = net_params['num_node_type']
     num_edge_type = net_params['num_edge_type']
     hidden_dim = net_params['hidden_dim']
     out_dim = net_params['out_dim']
     n_classes = net_params['n_classes']
     in_feat_dropout = net_params['in_feat_dropout']
     dropout = net_params['dropout']
     aggregator_type = net_params['sage_aggregator']
     n_layers = net_params['L']    
     batch_norm = net_params['batch_norm']
     residual = net_params['residual']
     self.readout = net_params['readout']
     self.pos_enc = net_params['pos_enc']
     if self.pos_enc:
         pos_enc_dim = net_params['pos_enc_dim']
         self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
     else:
         in_dim = 1
         self.embedding_h = nn.Embedding(in_dim, hidden_dim)
     
     self.in_feat_dropout = nn.Dropout(in_feat_dropout)
     
     self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,
                                           dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])
     self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))
     self.MLP_layer = MLPReadout(out_dim, n_classes)
Beispiel #7
0
 def __init__(self, input_dim, assign_dim, output_feat_dim,
              activation, dropout, aggregator_type, graph_norm, batch_norm, link_pred):
     super().__init__()
     self.embedding_dim = input_dim
     self.assign_dim = assign_dim
     self.hidden_dim = output_feat_dim
     self.link_pred = link_pred
     self.feat_gc = GraphSageLayer(
         input_dim,
         output_feat_dim,
         activation,
         dropout,
         aggregator_type,
         graph_norm,
         batch_norm)
     self.pool_gc = GraphSageLayer(
         input_dim,
         assign_dim,
         activation,
         dropout,
         aggregator_type, 
         graph_norm,
         batch_norm)
     self.reg_loss = nn.ModuleList([])
     self.loss_log = {}
     self.reg_loss.append(EntropyLoss())
Beispiel #8
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        aggregator_type = net_params['sage_aggregator']
        n_layers = net_params['L']
        self.residual = net_params['residual']
        dgl_builtin = net_params['builtin']
        bnorm = net_params['batch_norm']

        self.layers = nn.ModuleList()
        # Input
        self.layers.append(
            GraphSageLayer(in_dim,
                           hidden_dim,
                           F.relu,
                           dropout,
                           aggregator_type,
                           self.residual,
                           bn=bnorm,
                           dgl_builtin=dgl_builtin))
        # Hidden layers
        self.layers.extend(
            nn.ModuleList([
                GraphSageLayer(hidden_dim,
                               hidden_dim,
                               F.relu,
                               dropout,
                               aggregator_type,
                               self.residual,
                               bn=bnorm,
                               dgl_builtin=dgl_builtin)
                for _ in range(n_layers - 1)
            ]))
        # Output layer
        self.layers.append(
            GraphSageLayer(hidden_dim,
                           n_classes,
                           None,
                           dropout,
                           aggregator_type,
                           self.residual,
                           bn=bnorm,
                           dgl_builtin=dgl_builtin))
 def __init__(self, in_dim, hidden_dims):
     super().__init__()
     self.gcn_list = nn.ModuleList()
     for hidden_dim in hidden_dims:
         self.gcn_list.append(GraphSageLayer(in_dim, hidden_dim, F.relu, 0.0, "meanpool", True))
         in_dim = hidden_dim
         pass
     pass
    def __init__(self, in_dim=64, hidden_dims=[108, 108], out_dim=108):
        super().__init__()
        self.hidden_dims = hidden_dims
        assert 2 <= len(self.hidden_dims) <= 3
        self.dropout = 0.0
        self.residual = True
        self.sage_aggregator = "meanpool"

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])
        self.graph_sage_1 = GraphSageLayer(self.hidden_dims[0], self.hidden_dims[1], F.relu,
                                           self.dropout, self.sage_aggregator, self.residual)
        if len(self.hidden_dims) >= 3:
            self.graph_sage_2 = GraphSageLayer(self.hidden_dims[1], self.hidden_dims[2], F.relu,
                                               self.dropout, self.sage_aggregator, self.residual)
            pass
        self.graph_sage_o = GraphSageLayer(self.hidden_dims[-1], out_dim, F.relu,
                                           self.dropout, self.sage_aggregator, self.residual)
        pass
 def __init__(self, in_dim, hidden_dims, n_classes=10):
     super().__init__()
     self.embedding_h = nn.Linear(in_dim, in_dim)
     self.gcn_list = nn.ModuleList()
     for hidden_dim in hidden_dims:
         self.gcn_list.append(GraphSageLayer(in_dim, hidden_dim, F.relu, 0.0, "meanpool", True))
         in_dim = hidden_dim
         pass
     self.readout_mlp = nn.Linear(hidden_dims[-1], n_classes, bias=False)
     pass
    def __init__(self, in_dim=146, hidden_dims=[108, 108, 108, 108], out_dim=108, n_classes=10):
        super().__init__()
        self.hidden_dims = hidden_dims
        assert 3 <= len(self.hidden_dims) <= 6
        self.dropout = 0.0
        self.residual = True
        self.sage_aggregator = "meanpool"

        self.embedding_h = nn.Linear(in_dim, self.hidden_dims[0])
        self.graph_sage_1 = GraphSageLayer(self.hidden_dims[0], self.hidden_dims[1], F.relu,
                                           self.dropout, self.sage_aggregator, self.residual)
        self.graph_sage_2 = GraphSageLayer(self.hidden_dims[1], self.hidden_dims[2], F.relu,
                                           self.dropout, self.sage_aggregator, self.residual)
        if len(self.hidden_dims) >= 4:
            self.graph_sage_3 = GraphSageLayer(self.hidden_dims[2], self.hidden_dims[3], F.relu,
                                               self.dropout, self.sage_aggregator, self.residual)
        if len(self.hidden_dims) >= 5:
            self.graph_sage_4 = GraphSageLayer(self.hidden_dims[3], self.hidden_dims[4], F.relu,
                                               self.dropout, self.sage_aggregator, self.residual)
        if len(self.hidden_dims) >= 6:
            self.graph_sage_5 = GraphSageLayer(self.hidden_dims[4], self.hidden_dims[5], F.relu,
                                               self.dropout, self.sage_aggregator, self.residual)

        self.graph_sage_o = GraphSageLayer(self.hidden_dims[-1], out_dim, F.relu,
                                           self.dropout, self.sage_aggregator, self.residual)
        self.readout_mlp = MLPReadout(out_dim, n_classes)
        pass
    def __init__(self, in_dim, hidden_dims, readout="mean"):
        super().__init__()
        self.in_dim = in_dim
        self.hidden_dims = hidden_dims
        self.readout = readout

        self.gcn_list = nn.ModuleList()
        _in_dim = self.in_dim
        for hidden_dim in hidden_dims:
            self.gcn_list.append(
                GraphSageLayer(_in_dim, hidden_dim, F.relu, 0.0, "meanpool",
                               True))
            _in_dim = hidden_dim
            pass

        Tools.print(
            "GraphSageNet1 #GNN1={} in_dim={} hidden_dims={} readout={}".
            format(len(self.hidden_dims), self.in_dim, self.hidden_dims,
                   self.readout))
        pass
Beispiel #14
0
    def __init__(self, in_dim, hidden_dims, skip_which, skip_dim=128, n_out=1):
        super().__init__()
        self.embedding_h = nn.Linear(in_dim, in_dim)

        _in_dim = in_dim
        self.gcn_list = nn.ModuleList()
        for hidden_dim in hidden_dims:
            self.gcn_list.append(GraphSageLayer(_in_dim, hidden_dim, F.relu, 0.0, "meanpool", True))
            _in_dim = hidden_dim
            pass

        sk_hidden_dims = [in_dim] + [hidden_dims[which-1] for which in skip_which]
        self.skip_connect_index = [0] + skip_which
        self.skip_connect_list = nn.ModuleList()
        for hidden_dim in sk_hidden_dims:
            self.skip_connect_list.append(nn.Linear(hidden_dim, skip_dim, bias=False))
            pass

        self.readout = nn.Linear(len(self.skip_connect_list) * skip_dim, n_out, bias=False)
        pass
    def __init__(self, in_dim, hidden_dims, n_classes=10, readout="mean"):
        super().__init__()
        self.in_dim = in_dim
        self.hidden_dims = hidden_dims
        self.readout = readout

        self.embedding_h = nn.Linear(in_dim, in_dim)
        self.gcn_list = nn.ModuleList()
        _in_dim = self.in_dim
        for hidden_dim in hidden_dims:
            self.gcn_list.append(
                GraphSageLayer(_in_dim, hidden_dim, F.relu, 0.0, "meanpool",
                               True))
            _in_dim = hidden_dim
            pass
        self.readout_mlp = nn.Linear(hidden_dims[-1], n_classes, bias=False)

        Tools.print(
            "GraphSageNet2 #GNN2={} in_dim={} hidden_dims={} readout={}".
            format(len(self.hidden_dims), self.in_dim, self.hidden_dims,
                   self.readout))
        pass
Beispiel #16
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        aggregator_type = net_params['sage_aggregator']
        n_layers = net_params['L']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.layer_norm = net_params['layer_norm']
        self.readout = net_params['readout']
        self.residual = net_params['residual']
        self.concat_norm = net_params['concat_norm']

        self.task = net_params['task']
        if self.task == 'classification':
            self.num_classes = net_params['num_classes']
        else:
            self.num_classes = 1

        self.embedding_lin = nn.Linear(num_atom_type, hidden_dim, bias=False)

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GraphSageLayer(hidden_dim, hidden_dim, F.relu, dropout,
                           aggregator_type, self.batch_norm, self.graph_norm,
                           self.layer_norm, self.concat_norm)
            for _ in range(n_layers)
        ])
        self.linear_ro = nn.Linear(hidden_dim, out_dim, bias=False)
        self.linear_predict = nn.Linear(out_dim, self.num_classes, bias=True)

        #	additional parameters for gated residual connection
        if self.residual == "gated":
            self.W_g = nn.Linear(2 * hidden_dim, hidden_dim, False)
Beispiel #17
0
    def __init__(self, net_params):

        super().__init__()
        input_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        embedding_dim = net_params['embedding_dim']
        label_dim = net_params['n_classes']
        activation = F.relu
        n_layers = net_params['L']  # this is the gnn_per_block param
        dropout = net_params['dropout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        aggregator_type = net_params['sage_aggregator']
        pool_ratio = net_params['pool_ratio']

        self.device = net_params['device']
        self.link_pred = net_params['linkpred']
        self.concat = net_params['cat']
        self.n_pooling = net_params['num_pool']
        self.batch_size = net_params['batch_size']
        self.link_pred_loss = []
        self.entropy_loss = []

        self.embedding_h = nn.Linear(input_dim, hidden_dim)

        # list of GNN modules before the first diffpool operation
        self.gc_before_pool = nn.ModuleList()

        self.assign_dim = net_params['assign_dim']
        # self.bn = True
        self.num_aggs = 1

        # constructing layers
        # layers before diffpool
        assert n_layers >= 3, "n_layers too few"
        self.gc_before_pool.append(
            GraphSageLayer(hidden_dim, hidden_dim, activation, dropout,
                           aggregator_type, self.graph_norm, self.batch_norm,
                           self.residual))

        for _ in range(n_layers - 2):
            self.gc_before_pool.append(
                GraphSageLayer(hidden_dim, hidden_dim, activation, dropout,
                               aggregator_type, self.graph_norm,
                               self.batch_norm, self.residual))

        self.gc_before_pool.append(
            GraphSageLayer(hidden_dim, embedding_dim, None, dropout,
                           aggregator_type, self.graph_norm, self.batch_norm,
                           self.residual))

        assign_dims = []
        assign_dims.append(self.assign_dim)
        if self.concat:
            # diffpool layer receive pool_emedding_dim node feature tensor
            # and return pool_embedding_dim node embedding
            pool_embedding_dim = hidden_dim * (n_layers - 1) + embedding_dim
        else:

            pool_embedding_dim = embedding_dim

        self.first_diffpool_layer = DiffPoolLayer(
            pool_embedding_dim, self.assign_dim, hidden_dim, activation,
            dropout, aggregator_type, self.graph_norm, self.batch_norm,
            self.link_pred)
        gc_after_per_pool = nn.ModuleList()

        # list of list of GNN modules, each list after one diffpool operation
        self.gc_after_pool = nn.ModuleList()

        for _ in range(n_layers - 1):
            gc_after_per_pool.append(
                DenseGraphSage(hidden_dim, hidden_dim, self.residual))
        gc_after_per_pool.append(
            DenseGraphSage(hidden_dim, embedding_dim, self.residual))
        self.gc_after_pool.append(gc_after_per_pool)

        self.assign_dim = int(self.assign_dim * pool_ratio)

        self.diffpool_layers = nn.ModuleList()
        # each pooling module
        for _ in range(self.n_pooling - 1):
            self.diffpool_layers.append(
                DenseDiffPool(pool_embedding_dim, self.assign_dim, hidden_dim,
                              self.link_pred))

            gc_after_per_pool = nn.ModuleList()

            for _ in range(n_layers - 1):
                gc_after_per_pool.append(
                    DenseGraphSage(hidden_dim, hidden_dim, self.residual))
            gc_after_per_pool.append(
                DenseGraphSage(hidden_dim, embedding_dim, self.residual))
            self.gc_after_pool.append(gc_after_per_pool)

            assign_dims.append(self.assign_dim)
            self.assign_dim = int(self.assign_dim * pool_ratio)

        # predicting layer
        if self.concat:
            self.pred_input_dim = pool_embedding_dim * \
                self.num_aggs * (n_pooling + 1)
        else:
            self.pred_input_dim = embedding_dim * self.num_aggs
        self.pred_layer = nn.Linear(self.pred_input_dim, label_dim)

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Linear):
                m.weight.data = init.xavier_uniform_(
                    m.weight.data, gain=nn.init.calculate_gain('relu'))
                if m.bias is not None:
                    m.bias.data = init.constant_(m.bias.data, 0.0)
Beispiel #18
0
    def __init__(self, net_params):
        super().__init__()
        self.device = net_params.device
        self.concat = net_params.cat
        self.batch_size = net_params.batch_size
        self.link_pred_loss = []
        self.entropy_loss = []
        
        self.embedding_h = nn.Linear(net_params.in_dim, net_params.hidden_dim)
        
        # list of GNN modules before the first diffpool operation
        self.gc_before_pool = nn.ModuleList()

        self.bn = True
        self.num_aggs = 1

        # constructing layers
        # layers before diffpool
        assert net_params.L >= 3, "n_layers too few"
        self.gc_before_pool.append(GraphSageLayer(
            net_params.hidden_dim, net_params.hidden_dim, F.relu,
            net_params.dropout, net_params.sage_aggregator, net_params.residual, self.bn))
        
        for _ in range(net_params.L - 2):
            self.gc_before_pool.append(GraphSageLayer(
                net_params.hidden_dim, net_params.hidden_dim, F.relu,
                net_params.dropout, net_params.sage_aggregator, net_params.residual, self.bn))
            pass
        
        self.gc_before_pool.append(GraphSageLayer(
            net_params.hidden_dim, net_params.embedding_dim, None,
            net_params.dropout, net_params.sage_aggregator, net_params.residual))
        
        assign_dims = [net_params.assign_dim]
        if self.concat:
            # diffpool layer receive pool_emedding_dim node feature tensor and return pool_embedding_dim node embedding
            pool_embedding_dim = net_params.hidden_dim * (net_params.L - 1) + net_params.embedding_dim
        else:
            pool_embedding_dim = net_params.embedding_dim
            pass

        self.first_diffpool_layer = DiffPoolLayer(
            pool_embedding_dim, net_params.assign_dim, net_params.hidden_dim,
            F.relu, net_params.dropout, net_params.sage_aggregator, net_params.linkpred)

        gc_after_per_pool = nn.ModuleList()
        for _ in range(net_params.L - 1):
            gc_after_per_pool.append(DenseGraphSage(net_params.hidden_dim, net_params.hidden_dim, net_params.residual))
        gc_after_per_pool.append(DenseGraphSage(net_params.hidden_dim, net_params.embedding_dim, net_params.residual))

        self.gc_after_pool = nn.ModuleList()
        self.gc_after_pool.append(gc_after_per_pool)

        net_params.assign_dim = int(net_params.assign_dim * net_params.pool_ratio)
        
        self.diffpool_layers = nn.ModuleList()
        for _ in range(net_params.num_pool - 1):
            self.diffpool_layers.append(DenseDiffPool(pool_embedding_dim, net_params.assign_dim,
                                                      net_params.hidden_dim, net_params.linkpred))
            gc_after_per_pool = nn.ModuleList()
            for _ in range(net_params.L - 1):
                gc_after_per_pool.append(DenseGraphSage(net_params.hidden_dim,
                                                        net_params.hidden_dim, net_params.residual))
                pass
            gc_after_per_pool.append(DenseGraphSage(net_params.hidden_dim,
                                                    net_params.embedding_dim, net_params.residual))
            self.gc_after_pool.append(gc_after_per_pool)
            
            assign_dims.append(net_params.assign_dim)
            net_params.assign_dim = int(net_params.assign_dim * net_params.pool_ratio)
            pass

        # predicting layer
        if self.concat:
            self.pred_input_dim = pool_embedding_dim *  self.num_aggs * (net_params.num_pool + 1)
        else:
            self.pred_input_dim = net_params.embedding_dim * self.num_aggs
        self.pred_layer = nn.Linear(self.pred_input_dim, net_params.n_classes)

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Linear):
                m.weight.data = init.xavier_uniform_(m.weight.data, gain=nn.init.calculate_gain('relu'))
                if m.bias is not None:
                    m.bias.data = init.constant_(m.bias.data, 0.0)
                pass
            pass

        pass