Пример #1
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        num_heads = net_params['n_heads']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']

        self.dropout = dropout

        self.embedding_h = nn.Linear(in_dim, hidden_dim * num_heads)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GATLayer(hidden_dim * num_heads, hidden_dim, num_heads, dropout,
                     self.graph_norm, self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GATLayer(hidden_dim * num_heads, out_dim, 1, dropout,
                     self.graph_norm, self.batch_norm, self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
Пример #2
0
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim']  # node_dim (feat is an integer)
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        num_heads = net_params['n_heads']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']

        self.readout = net_params['readout']
        self.norm = net_params['norm']
        self.residual = net_params['residual']
        self.dropout = dropout
        self.n_classes = n_classes
        self.device = net_params['device']

        self.embedding_h = nn.Embedding(in_dim_node, hidden_dim *
                                        num_heads)  # node feat is an integer

        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GATLayer(hidden_dim * num_heads, hidden_dim, num_heads, dropout,
                     self.norm, self.residual) for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GATLayer(hidden_dim * num_heads, out_dim, 1, dropout, self.norm,
                     self.residual))
        self.MLP_layer = MLPReadout(out_dim, n_classes)
Пример #3
0
 def __init__(self,
              num_feats,
              num_classes,
              num_hidden,
              num_layers,
              num_heads,
              merge='cat',
              activation=F.elu,
              graph_norm=False,
              batch_norm=False,
              residual=False,
              dropout=0):
     super(ResGATNet, self).__init__()
     self.num_layers = num_layers
     self.layers = nn.ModuleList()
     self.layers.append(
         GATLayer(num_feats, num_hidden, num_heads, merge, activation,
                  graph_norm, batch_norm, residual, dropout))
     for i in range(1, num_layers - 1):
         self.layers.append(
             GATLayer(num_hidden * num_heads, num_hidden, num_heads, merge,
                      activation, graph_norm, batch_norm, residual,
                      dropout))
     self.layers.append(
         GATLayer(num_hidden * num_heads, num_classes, 1, 'mean',
                  activation, graph_norm, batch_norm, residual, dropout))
Пример #4
0
    def __init__(self):
        super().__init__()

        self.L = 4
        self.out_dim = 152
        self.residual = True
        self.readout = "mean"
        self.in_dim = 32
        self.hidden_dim = 19
        self.n_heads = 8
        self.n_classes = 10
        self.in_feat_dropout = 0.0
        self.dropout = 0.0
        self.graph_norm = True
        self.batch_norm = True

        self.embedding_h = nn.Linear(self.in_dim,
                                     self.hidden_dim * self.n_heads)
        self.in_feat_dropout = nn.Dropout(self.in_feat_dropout)

        self.layers = nn.ModuleList([
            GATLayer(self.hidden_dim * self.n_heads, self.hidden_dim,
                     self.n_heads, self.dropout, self.graph_norm,
                     self.batch_norm, self.residual) for _ in range(self.L - 1)
        ])
        self.layers.append(
            GATLayer(self.hidden_dim * self.n_heads, self.out_dim, 1,
                     self.dropout, self.graph_norm, self.batch_norm,
                     self.residual))

        self.readout_mlp = MLPReadout(self.out_dim, self.n_classes)
        pass
Пример #5
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        num_heads = net_params['n_heads']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']

        self.dropout = dropout

        self.embedding_h = nn.Embedding(num_atom_type, hidden_dim * num_heads)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)

        self.layers = nn.ModuleList([
            GATLayer(hidden_dim * num_heads, hidden_dim, num_heads, dropout,
                     self.batch_norm, self.residual)
            for _ in range(n_layers - 1)
        ])
        self.layers.append(
            GATLayer(hidden_dim * num_heads, out_dim, 1, dropout,
                     self.batch_norm, self.residual))
        self.MLP_layer = MLPReadout(out_dim,
                                    1)  # 1 out dim since regression problem
Пример #6
0
    def __init__(self, net_params):
        super().__init__()

        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        n_classes = net_params['n_classes']
        num_heads = net_params['n_heads']
        dropout = net_params['dropout']
        n_layers = net_params['L']

        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.dropout = dropout
        self.n_classes = n_classes
        self.device = net_params['device']
        self.dgl_builtin = net_params['builtin']

        feat_drop = dropout
        attn_drop = dropout
        negative_slope = 0.2
        residual = False
        self.layers = nn.ModuleList()
        self.activation = F.elu
        # input projection (no residual)
        self.layers.append(
            GATLayer(in_dim,
                     hidden_dim,
                     num_heads,
                     dropout,
                     self.graph_norm,
                     self.batch_norm,
                     self.residual,
                     activation=self.activation,
                     dgl_builtin=self.dgl_builtin))
        # hidden layers
        for l in range(1, n_layers):
            # due to multi-head, the in_dim = hidden_dim * num_heads
            self.layers.append(
                GATLayer(hidden_dim * num_heads,
                         hidden_dim,
                         num_heads,
                         dropout,
                         self.graph_norm,
                         self.batch_norm,
                         self.residual,
                         activation=self.activation,
                         dgl_builtin=self.dgl_builtin))
        # output projection
        self.layers.append(
            GATLayer(hidden_dim * num_heads,
                     n_classes,
                     1,
                     dropout,
                     self.graph_norm,
                     self.batch_norm,
                     self.residual,
                     activation=None,
                     dgl_builtin=self.dgl_builtin))
Пример #7
0
 def __init__(self, in_dim=64, hidden_dims=[19, 19]):
     super().__init__()
     self.n_heads = 8
     self.embedding_h = nn.Linear(in_dim, hidden_dims[0] * self.n_heads)
     self.gcn_list = nn.ModuleList()
     for hidden_dim in hidden_dims[:-1]:
         self.gcn_list.append(GATLayer(hidden_dim * self.n_heads, hidden_dim, self.n_heads, 0.0, True, True, True))
         pass
     self.gcn_list.append(GATLayer(hidden_dims[-2] * self.n_heads,
                                   hidden_dims[-1] * self.n_heads, 1, 0.0, True, True, True))
     pass
Пример #8
0
 def __init__(self, in_dim=19, hidden_dims=[19, 19, 19, 19], n_classes=10):
     super().__init__()
     self.n_heads = 8
     self.embedding_h = nn.Linear(in_dim * self.n_heads,
                                  hidden_dims[0] * self.n_heads)
     self.gcn_list = nn.ModuleList()
     for hidden_dim in hidden_dims[:-1]:
         self.gcn_list.append(
             GATLayer(hidden_dim * self.n_heads, hidden_dim, self.n_heads,
                      0.0, True, True, True))
         pass
     self.gcn_list.append(
         GATLayer(hidden_dims[-2] * self.n_heads,
                  hidden_dims[-1] * self.n_heads, 1, 0.0, True, True, True))
     self.readout_mlp = MLPReadout(hidden_dims[-1] * self.n_heads,
                                   n_classes)
     pass
Пример #9
0
    def __init__(self, in_dim, hidden_dims, skip_which, skip_dim=128, n_out=1):
        super().__init__()
        self.n_heads = 8
        self.embedding_h = nn.Linear(in_dim, in_dim)

        _in_dim = in_dim
        self.gcn_list = nn.ModuleList()
        for hidden_dim in hidden_dims[:-1]:
            self.gcn_list.append(GATLayer(_in_dim, hidden_dim, self.n_heads, 0.0, True, True, True))
            _in_dim = hidden_dim * self.n_heads
            pass
        self.gcn_list.append(GATLayer(_in_dim, hidden_dims[-1]*self.n_heads, 1, 0.0, True, True, True))

        sk_hidden_dims = [in_dim] + [hidden_dims[which-1] * self.n_heads for which in skip_which]
        self.skip_connect_index = [0] + skip_which
        self.skip_connect_list = nn.ModuleList()
        for hidden_dim in sk_hidden_dims:
            self.skip_connect_list.append(nn.Linear(hidden_dim, skip_dim, bias=False))
            pass

        self.readout = nn.Linear(len(self.skip_connect_list) * skip_dim, n_out, bias=False)
        pass
Пример #10
0
    def __init__(self, net_params):
        super().__init__()
        self.readout = net_params.readout

        self.embedding_h = nn.Linear(
            net_params.in_dim, net_params.hidden_dim * net_params.n_heads)
        self.in_feat_dropout = nn.Dropout(net_params.in_feat_dropout)

        self.layers = nn.ModuleList([
            GATLayer(net_params.hidden_dim * net_params.n_heads,
                     net_params.hidden_dim, net_params.n_heads,
                     net_params.dropout, net_params.graph_norm,
                     net_params.batch_norm, net_params.residual)
            for _ in range(net_params.L - 1)
        ])
        self.layers.append(
            GATLayer(net_params.hidden_dim * net_params.n_heads,
                     net_params.out_dim, 1, net_params.dropout,
                     net_params.graph_norm, net_params.batch_norm,
                     net_params.residual))

        self.readout_mlp = MLPReadout(net_params.out_dim, net_params.n_classes)
        pass
Пример #11
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        num_bond_type = net_params['num_bond_type']
        hidden_dim = net_params['hidden_dim']
        num_heads = net_params['n_heads']
        out_dim = net_params['out_dim']
        in_feat_dropout = net_params['in_feat_dropout']
        dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.layer_norm = net_params['layer_norm']
        self.residual = net_params['residual']

        self.att_reduce_fn = net_params['att_reduce_fn']

        self.task = net_params['task']
        if self.task == 'classification':
            self.num_classes = net_params['num_classes']
        
        self.dropout = dropout
        
        self.embedding_lin = nn.Linear(num_atom_type, hidden_dim, bias=False)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        
        self.layers = nn.ModuleList([
            GATLayer(hidden_dim, hidden_dim, num_heads, dropout, 
                     self.graph_norm, self.batch_norm, self.layer_norm, self.att_reduce_fn) for _ in range(n_layers)])

        self.linear_ro = nn.Linear(hidden_dim, out_dim, bias=False)        
        self.linear_predict = nn.Linear(out_dim, 1, bias=True)

		#	additional parameters for gated gcn
        if self.residual == "gated":
            self.W_g  = nn.Linear(2*hidden_dim, hidden_dim, False)