コード例 #1
0
ファイル: model.py プロジェクト: illidanlab/MoCL-DK
    def __init__(self,
                 num_layer,
                 emb_dim,
                 num_tasks,
                 JK="last",
                 drop_ratio=0,
                 graph_pooling="mean",
                 gnn_type="gin"):
        super(GNN_graphCL, self).__init__()
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.JK = JK
        self.emb_dim = emb_dim
        self.num_tasks = num_tasks

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.gnn = GNN(num_layer, emb_dim, JK, drop_ratio, gnn_type=gnn_type)
        self.proj_head = nn.Sequential(nn.Linear(emb_dim, 128),
                                       nn.ReLU(inplace=True),
                                       nn.Linear(128, 128))

        #Different kind of graph pooling
        if graph_pooling == "sum":
            self.pool = global_add_pool
        elif graph_pooling == "mean":
            self.pool = global_mean_pool
        elif graph_pooling == "max":
            self.pool = global_max_pool
        elif graph_pooling == "attention":
            if self.JK == "concat":
                self.pool = GlobalAttention(
                    gate_nn=torch.nn.Linear((self.num_layer + 1) * emb_dim, 1))
            else:
                self.pool = GlobalAttention(
                    gate_nn=torch.nn.Linear(emb_dim, 1))
        elif graph_pooling[:-1] == "set2set":
            set2set_iter = int(graph_pooling[-1])
            if self.JK == "concat":
                self.pool = Set2Set((self.num_layer + 1) * emb_dim,
                                    set2set_iter)
            else:
                self.pool = Set2Set(emb_dim, set2set_iter)
        else:
            raise ValueError("Invalid graph pooling type.")

        #For graph-level binary classification
        if graph_pooling[:-1] == "set2set":
            self.mult = 2
        else:
            self.mult = 1

        if self.JK == "concat":
            self.graph_pred_linear = torch.nn.Linear(
                self.mult * (self.num_layer + 1) * self.emb_dim,
                self.num_tasks)
        else:
            self.graph_pred_linear = torch.nn.Linear(self.mult * self.emb_dim,
                                                     self.num_tasks)
コード例 #2
0
class GlobalAttentionNet(torch.nn.Module):
    def __init__(self, dataset, num_layers, hidden):
        super().__init__()
        self.conv1 = SAGEConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden, hidden))
        self.att = GlobalAttention(Linear(hidden, 1))
        self.lin1 = Linear(hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.att.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))
        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
        x = self.att(x, batch)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
コード例 #3
0
 def __init__(self, dataset, num_layers, hidden):
     super().__init__()
     self.conv1 = SAGEConv(dataset.num_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(SAGEConv(hidden, hidden))
     self.att = GlobalAttention(Linear(hidden, 1))
     self.lin1 = Linear(hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
コード例 #4
0
    def __init__(
        self,
        num_layer,
        node_feat_dim,
        edge_feat_dim,
        emb_dim,
        fingerprint_dim,
        JK="last",
        graph_pooling="mean",
        drop_ratio=0,
        gnn_type="gine",
        use_embedding=False,
    ):
        super(GNN_fingerprint, self).__init__()
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.JK = JK
        self.emb_dim = emb_dim

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")
        if use_embedding:
            self.gnn = GNN(num_layer,
                           emb_dim,
                           JK,
                           drop_ratio,
                           gnn_type=gnn_type)
        else:
            self.gnn = GNN_MLP(num_layer,
                               node_feat_dim,
                               edge_feat_dim,
                               emb_dim,
                               JK,
                               drop_ratio,
                               gnn_type=gnn_type)
        self.linear_pred_fingerprint = torch.nn.Linear(emb_dim, emb_dim)

        self.fingerprint_decoder = FingerprintDecoder(emb_dim, fingerprint_dim)

        # Different kind of graph pooling
        if graph_pooling == "sum":
            self.pool = global_add_pool
        elif graph_pooling == "mean":
            self.pool = global_mean_pool
        elif graph_pooling == "max":
            self.pool = global_max_pool
        elif graph_pooling == "attention":
            if self.JK == "concat":
                self.pool = GlobalAttention(
                    gate_nn=torch.nn.Linear((self.num_layer + 1) * emb_dim, 1))
            else:
                self.pool = GlobalAttention(
                    gate_nn=torch.nn.Linear(emb_dim, 1))
        else:
            raise ValueError("Invalid graph pooling type.")
コード例 #5
0
    def __init__(self,
                 t_edge: int,
                 t_node: int,
                 n_node: int,
                 h_dim: int,
                 n_out: int,
                 n_layers=3):
        super().__init__()
        self.t_edge = t_edge
        self.t_node = t_node
        self.n_node = n_node
        self.h_dim = h_dim
        self.n_out = n_out

        self.encoder = Encoder(t_edge, t_node, h_dim, n_layers)
        self.mean = nn.Linear(h_dim, h_dim)
        self.logvar = nn.Linear(h_dim, h_dim)
        self.decoder = Decoder(t_node, t_edge, h_dim)
        self.affine = nn.Sequential(nn.LayerNorm(h_dim), nn.ReLU(True),
                                    nn.Dropout(0.2), nn.Linear(h_dim, h_dim))
        self.gp = GlobalAttention(nn.Linear(h_dim, 1), self.affine)

        self.out = nn.Sequential(nn.LayerNorm(h_dim), nn.ReLU(True),
                                 nn.Dropout(0.5), nn.Linear(h_dim, h_dim // 2),
                                 nn.LayerNorm(h_dim // 2), nn.ReLU(),
                                 nn.Dropout(0.5), nn.Linear(h_dim // 2, n_out))
コード例 #6
0
ファイル: networks.py プロジェクト: Genuage/Genuage
    def __init__(self, n_c=64, latent_dim=128, p=0., dim=1, x_dim=1, e_dim=1):
        super(TrajsEncoder, self).__init__()
        moments = [1, 2, 4]
        M = len(moments)

        self.jumpsConv1 = JumpsConv(out_channels=n_c,
                                    x_dim=x_dim,
                                    edge_attr_dim=e_dim,
                                    dropout=p,
                                    aggr="mean",
                                    moments=moments)
        self.jumpsConv2 = JumpsConv(out_channels=n_c,
                                    x_dim=n_c,
                                    dropout=p,
                                    edge_attr_dim=e_dim,
                                    aggr="max")

        self.jumpsConv_final = JumpsConv(out_channels=n_c,
                                         x_dim=2 * n_c,
                                         dropout=p,
                                         edge_attr_dim=e_dim,
                                         aggr="mean",
                                         moments=moments)

        gate_nn = MLP([3 * n_c, n_c, n_c // 2, 1], dropout=p)
        self.pooling = GlobalAttention(gate_nn=gate_nn)
        #self.pooling = global_mean_pool

        self.mlp = MLP([3 * n_c, 2 * n_c, 2 * latent_dim, latent_dim - 1],
                       dropout=p)  # used to be tanh for last_activation
コード例 #7
0
    def __init__(self, num_classes):
        super(PoolNetv2, self).__init__()

        self.att_w = nn.Sequential(nn.Linear(512, 256), nn.ELU(),
                                   nn.Linear(256, 128), nn.ELU(),
                                   nn.Linear(128, 1))

        self.att_net = nn.Sequential(
            nn.Linear(512, 512),
            #nn.Tanh()
            nn.ELU())

        self.conv1 = GATConv(3, 32, heads=2)
        self.norm1 = InstanceNorm(64, affine=True)
        self.pool1 = TopKPooling(64, ratio=0.3, nonlinearity=torch.sigmoid)

        self.conv2 = GATConv(64, 128, heads=2)
        self.norm2 = InstanceNorm(256, affine=True)
        self.pool2 = TopKPooling(256, ratio=0.3, nonlinearity=torch.sigmoid)

        self.conv3 = GATConv(256, 512, heads=2, concat=False)
        self.norm3 = InstanceNorm(512, affine=True)

        self.att = GlobalAttention(gate_nn=self.att_w, nn=self.att_net)

        self.lin1 = Linear(512, 512)
        self.lin2 = Linear(512, 256)
        self.lin3 = Linear(256, num_classes)
コード例 #8
0
ファイル: model.py プロジェクト: zhaoshan2/pretrain-gnns
    def __init__(self,
                 num_layer,
                 emb_dim,
                 num_tasks,
                 JK="last",
                 drop_ratio=0,
                 graph_pooling="mean",
                 gnn_type="gin"):
        super(GNN_graphpred, self).__init__()
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.JK = JK
        self.emb_dim = emb_dim
        self.num_tasks = num_tasks

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.gnn = GNN(num_layer, emb_dim, JK, drop_ratio, gnn_type=gnn_type)

        #Different kind of graph pooling
        if graph_pooling == "sum":
            self.pool = global_add_pool
        elif graph_pooling == "mean":
            self.pool = global_mean_pool
        elif graph_pooling == "max":
            self.pool = global_max_pool
        elif graph_pooling == "attention":
            self.pool = GlobalAttention(gate_nn=torch.nn.Linear(emb_dim, 1))
        else:
            raise ValueError("Invalid graph pooling type.")

        self.graph_pred_linear = torch.nn.Linear(2 * self.emb_dim,
                                                 self.num_tasks)
コード例 #9
0
    def __init__(self):
        super().__init__()

        att_mask = Linear(config.emb_size, 1)
        att_feat = Sequential( Linear(config.emb_size, config.emb_size), LeakyReLU() )

        self.glob = GlobalAttention(att_mask, att_feat)
        self.tranform = Sequential( Linear(config.emb_size + config.emb_size, config.emb_size), LeakyReLU() )
コード例 #10
0
    def __init__(
        self,
        dm: DataModule,
        n_c: int = 64,  # Number of convolution kernels
        latent_dim: int = 8,
    ):  # Dimension of edges
        super(TrajsEncoder, self).__init__()
        # To compute moments of features

        x_dim = dm.x_dim
        e_dim = dm.e_dim
        self.no_edge_mode = e_dim == 0

        if self.no_edge_mode:
            Conv = MinimalJumpsConv
        else:
            Conv = JumpsConv

        f_inner_width = [128, 64]
        moments = [1]
        n_final_convolutions = 1

        self.conv1 = Conv(
            out_channels=n_c,
            x_dim=x_dim,
            edge_attr_dim=e_dim,
            f_inner_width=f_inner_width,
            aggr="mean",
            moments=moments,
        )

        self.conv2 = Conv(
            out_channels=n_c,
            x_dim=n_c,
            f_inner_width=f_inner_width,
            edge_attr_dim=e_dim,
            aggr="max",
        )
        final_convs = []
        for i in range(n_final_convolutions):
            final_convs.append(
                Conv(
                    out_channels=n_c,
                    x_dim=(1 + 1 * (i == 0)) * n_c,
                    f_inner_width=f_inner_width,
                    edge_attr_dim=e_dim,
                    aggr="mean",
                ))
        self.final_convs = nn.ModuleList(final_convs)

        K = 2 + n_final_convolutions
        # K = 1
        # if params_scarcity == 0:
        gate_nn = MLP([K * n_c, n_c, n_c // 2, 1])
        self.pooling = GlobalAttention(gate_nn=gate_nn)

        self.mlp = MLP([K * n_c,
                        latent_dim])  # used to be tanh for last_activation
コード例 #11
0
    def __init__(self, node_size, global_size):
        super().__init__()

        att_mask = Linear(node_size, 1)
        att_feat = Sequential(Linear(node_size, node_size), LeakyReLU())

        self.glob = GlobalAttention(att_mask, att_feat)
        self.tranform = Sequential(Linear(global_size * 2, global_size),
                                   LeakyReLU())
コード例 #12
0
    def __init__(self, num_inputs, hidden_dim, latent_dim):
        super(GraphEncoder, self).__init__()
        self.conv1 = GCNConv(num_inputs, hidden_dim)
        self.conv2 = GCNConv(hidden_dim, hidden_dim)
        # mean and std projectors
        self.mean_layer = nn.Linear(hidden_dim, latent_dim)
        self.var_layer = nn.Linear(hidden_dim, latent_dim)

        self.attention = GlobalAttention(nn.Linear(hidden_dim, 1))
コード例 #13
0
    def __init__(
        self,
        num_tasks,
        num_layer=5,
        emb_dim=300,
        gnn_type="gin",
        virtual_node=True,
        residual=False,
        drop_ratio=0.5,
        jk="last",
        graph_pooling="mean",
    ):
        if num_layer <= 1:
            raise ValueError("Number of GNN layers must be greater than 1.")

        super(GNN, self).__init__()

        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.jk = jk
        self.emb_dim = emb_dim
        self.num_tasks = num_tasks
        self.graph_pooling = graph_pooling

        # GNN to generate node embeddings
        gnn_cls = GNN_node_Virtualnode if virtual_node else GNN_node
        self.gnn_node = gnn_cls(
            num_layer,
            emb_dim,
            jk=jk,
            drop_ratio=drop_ratio,
            residual=residual,
            gnn_type=gnn_type,
        )

        # Pooling function to generate whole-graph embeddings
        if self.graph_pooling == "sum":
            self.pool = global_add_pool
        elif self.graph_pooling == "mean":
            self.pool = global_mean_pool
        elif self.graph_pooling == "max":
            self.pool = global_max_pool
        elif self.graph_pooling == "attention":
            self.pool = GlobalAttention(gate_nn=torch.nn.Sequential(
                torch.nn.Linear(emb_dim, 2 * emb_dim),
                torch.nn.BatchNorm1d(2 * emb_dim),
                torch.nn.ReLU(),
                torch.nn.Linear(2 * emb_dim, 1),
            ))
        elif self.graph_pooling == "set2set":
            self.pool = Set2Set(emb_dim, processing_steps=2)
        else:
            raise ValueError("Invalid graph pooling type.")

        adj = 2 if graph_pooling == "set2set" else 1
        self.graph_pred_linear = torch.nn.Linear(adj * self.emb_dim,
                                                 self.num_tasks)
コード例 #14
0
ファイル: gcn.py プロジェクト: dotd/GNN_experiments
    def __init__(self,
                 num_classes: int,
                 num_layer=5,
                 emb_dim=300,
                 node_encoder=None,
                 edge_encoder_ctor: torch.nn.Module = None,
                 residual=False,
                 drop_ratio=0.5,
                 JK="last",
                 graph_pooling="mean",
                 max_seq_len=1):

        super(GCN, self).__init__()

        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.JK = JK
        self.emb_dim = emb_dim
        self.graph_pooling = graph_pooling
        self.max_seq_len = max_seq_len
        self.num_classes = num_classes

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        # GNN to generate node embeddings
        self.gnn_node = GNN_node(num_layer,
                                 emb_dim=emb_dim,
                                 JK=JK,
                                 node_encoder=node_encoder,
                                 edge_encoder_ctor=edge_encoder_ctor,
                                 drop_ratio=drop_ratio,
                                 residual=residual)

        # Pooling function to generate whole-graph embeddings
        if self.graph_pooling == "sum":
            self.pool = global_add_pool
        elif self.graph_pooling == "mean":
            self.pool = global_mean_pool
        elif self.graph_pooling == "max":
            self.pool = global_max_pool
        elif self.graph_pooling == "attention":
            self.pool = GlobalAttention(gate_nn=torch.nn.Sequential(
                torch.nn.Linear(emb_dim, 2 *
                                emb_dim), torch.nn.BatchNorm1d(2 * emb_dim),
                torch.nn.ReLU(), torch.nn.Linear(2 * emb_dim, 1)))
        elif self.graph_pooling == "set2set":
            self.pool = Set2Set(emb_dim, processing_steps=2)
        else:
            raise ValueError("Invalid graph pooling type.")

        self.graph_pred_linear_list = torch.nn.ModuleList()
        for i in range(max_seq_len):
            self.graph_pred_linear_list.append(
                torch.nn.Linear(emb_dim, self.num_classes))
コード例 #15
0
    def __init__(self, args, num_node_features, num_edge_features):
        super(GNN, self).__init__()

        self.depth = args.depth
        self.hidden_size = args.hidden_size
        self.dropout = args.dropout
        self.gnn_type = args.gnn_type
        self.graph_pool = args.graph_pool
        self.tetra = args.tetra
        self.task = args.task

        if self.gnn_type == 'dmpnn':
            self.edge_init = nn.Linear(num_node_features + num_edge_features, self.hidden_size)
            self.edge_to_node = DMPNNConv(args)
        else:
            self.node_init = nn.Linear(num_node_features, self.hidden_size)
            self.edge_init = nn.Linear(num_edge_features, self.hidden_size)

        # layers
        self.convs = torch.nn.ModuleList()

        for _ in range(self.depth):
            if self.gnn_type == 'gin':
                self.convs.append(GINEConv(args))
            elif self.gnn_type == 'gcn':
                self.convs.append(GCNConv(args))
            elif self.gnn_type == 'dmpnn':
                self.convs.append(DMPNNConv(args))
            else:
                ValueError('Undefined GNN type called {}'.format(self.gnn_type))

        # graph pooling
        if self.tetra:
            self.tetra_update = get_tetra_update(args)

        if self.graph_pool == "sum":
            self.pool = global_add_pool
        elif self.graph_pool == "mean":
            self.pool = global_mean_pool
        elif self.graph_pool == "max":
            self.pool = global_max_pool
        elif self.graph_pool == "attn":
            self.pool = GlobalAttention(
                gate_nn=torch.nn.Sequential(torch.nn.Linear(self.hidden_size, 2 * self.hidden_size),
                                            torch.nn.BatchNorm1d(2 * self.hidden_size),
                                            torch.nn.ReLU(),
                                            torch.nn.Linear(2 * self.hidden_size, 1)))
        elif self.graph_pool == "set2set":
            self.pool = Set2Set(self.hidden_size, processing_steps=2)
        else:
            raise ValueError("Invalid graph pooling type.")

        # ffn
        self.mult = 2 if self.graph_pool == "set2set" else 1
        self.ffn = nn.Linear(self.mult * self.hidden_size, 1)
コード例 #16
0
    def __init__(self, config):
        super(Net, self).__init__()

        annotation_size = config["hidden_size_orig"]
        hidden_size = config["gnn_h_size"]
        n_steps = config["num_timesteps"]
        num_cls = 2

        self.reduce = nn.Linear(annotation_size, hidden_size)
        self.conv = GatedGraphConv(hidden_size, n_steps)
        self.agg = GlobalAttention(nn.Linear(hidden_size, 1),
                                   nn.Linear(hidden_size, 2))
        self.lin = nn.Linear(hidden_size, num_cls)
コード例 #17
0
    def __init__(self, args):
        super(GlobalAggregator, self).__init__()
        self.aggr = args.aggr
        if self.aggr == "global-attn":
            mlp1 = MLP(args.num_layers, args.input_size, args.hidden_size, 1)
            mlp2 = MLP(args.num_layers, args.input_size, args.hidden_size, args.output_size)
            self.global_attention = GlobalAttention(mlp1, mlp2)

        self.registry = {'mean': self.mean,
                         'max': self.max,
                         'sum': self.sum,
                         'mean-main': self.mean_main,
                         'mean-side': self.mean_side,
                         'global-attn': self.global_attn}
コード例 #18
0
ファイル: gnn.py プロジェクト: rpatil524/ogb
    def __init__(self, num_vocab, max_seq_len, node_encoder, num_layer = 5, emb_dim = 300, 
                    gnn_type = 'gin', virtual_node = True, residual = False, drop_ratio = 0.5, JK = "last", graph_pooling = "mean"):
        '''
            num_tasks (int): number of labels to be predicted
            virtual_node (bool): whether to add virtual node or not
        '''

        super(GNN, self).__init__()

        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.JK = JK
        self.emb_dim = emb_dim
        self.num_vocab = num_vocab
        self.max_seq_len = max_seq_len
        self.graph_pooling = graph_pooling

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        ### GNN to generate node embeddings
        if virtual_node:
            self.gnn_node = GNN_node_Virtualnode(num_layer, emb_dim, node_encoder, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)
        else:
            self.gnn_node = GNN_node(num_layer, emb_dim, node_encoder, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)


        ### Pooling function to generate whole-graph embeddings
        if self.graph_pooling == "sum":
            self.pool = global_add_pool
        elif self.graph_pooling == "mean":
            self.pool = global_mean_pool
        elif self.graph_pooling == "max":
            self.pool = global_max_pool
        elif self.graph_pooling == "attention":
            self.pool = GlobalAttention(gate_nn = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, 1)))
        elif self.graph_pooling == "set2set":
            self.pool = Set2Set(emb_dim, processing_steps = 2)
        else:
            raise ValueError("Invalid graph pooling type.")

        self.graph_pred_linear_list = torch.nn.ModuleList()

        if graph_pooling == "set2set":
            for i in range(max_seq_len):
                 self.graph_pred_linear_list.append(torch.nn.Linear(2*emb_dim, self.num_vocab))

        else:
            for i in range(max_seq_len):
                 self.graph_pred_linear_list.append(torch.nn.Linear(emb_dim, self.num_vocab))
コード例 #19
0
def test_global_attention():
    channels, batch_size = (32, 10)
    gate_nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, 1))
    nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, channels))

    glob = GlobalAttention(gate_nn, nn)
    assert glob.__repr__() == (
        'GlobalAttention(gate_nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=1, bias=True)\n'
        '), nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')

    x = torch.randn((batch_size**2, channels))
    batch = torch.arange(batch_size, dtype=torch.long)
    batch = batch.view(-1, 1).repeat(1, batch_size).view(-1)

    assert glob(x, batch).size() == (batch_size, channels)
    assert glob(x, batch, batch_size + 1).size() == (batch_size + 1, channels)
コード例 #20
0
    def __init__(self, dim_features, dim_target, model_config, JK="last"):
        super(GIN, self).__init__()

        self.config = model_config
        self.node_encoder = NodeEncoder(model_config['gnn_hidden_dimensions'],
                                        num_nodetypes=len(NODE_PRIMITIVES),
                                        num_nodeattributes=8)

        if model_config['virtual_node']:
            self.gnn_node = GNN_node_Virtualnode(
                num_layer=model_config['num_gnn_layers'],
                emb_dim=model_config['gnn_hidden_dimensions'],
                JK=JK,
                drop_ratio=model_config['dropout_prob'],
                residual=False,
                gnn_type='gin',
                node_encoder=self.node_encoder)
        else:
            self.gnn_node = GNN_node(
                num_layer=model_config['num_gnn_layers'],
                emb_dim=model_config['gnn_hidden_dimensions'],
                JK=JK,
                drop_ratio=model_config['drop_ratio'],
                residual=False,
                gnn_type='gin',
                node_encoder=self.node_encoder)
        if model_config['graph_pooling'] == "sum":
            self.pool = global_add_pool
        elif model_config['graph_pooling'] == "mean":
            self.pool = global_mean_pool
        elif model_config['graph_pooling'] == "max":
            self.pool = global_max_pool
        elif model_config['graph_pooling'] == "attention":
            self.pool = GlobalAttention(gate_nn=torch.nn.Sequential(
                torch.nn.Linear(model_config['gnn_hidden_dimensions'], 2 *
                                model_config['gnn_hidden_dimensions']),
                torch.nn.BatchNorm1d(2 *
                                     model_config['gnn_hidden_dimensions']),
                torch.nn.ReLU(),
                torch.nn.Linear(2 * model_config['gnn_hidden_dimensions'], 1)))
        elif model_config['graph_pooling'] == "set2set":
            self.pool = Set2Set(model_config['gnn_hidden_dimensions'],
                                processing_steps=2)
        else:
            raise ValueError("Invalid graph pooling type.")

        self.graph_pred_linear_list = torch.nn.ModuleList()

        self.graph_pred_linear = torch.nn.Linear(
            model_config['gnn_hidden_dimensions'], 1)
コード例 #21
0
    def __init__(self):
        super(Net, self).__init__()
        self.lin0 = torch.nn.Linear(dataset.num_features, dim)

        nn = Sequential(Linear(2, 64), ReLU(), Linear(64, dim * dim))
        #nn = Sequential(Linear(5, dim * dim))
        self.conv = NNConv(dim, dim, nn, aggr='mean')
        self.gru = GRU(dim, dim)

        # self.set2set = Set2Set(dim, processing_steps=1)
        self.pool1 = SAGPooling(dim, min_score=0.001, GNN=GCNConv)
        gatt_nn = Sequential(Linear(dim, dim), ReLU(), Linear(dim, 1))
        self.gatt = GlobalAttention(gatt_nn)
        self.lin1 = torch.nn.Linear(dim, dim)
        self.lin2 = torch.nn.Linear(dim, 1)
コード例 #22
0
    def __init__(self,
                 args,
                 in_features=16928,
                 hidden=2048,
                 heads=8,
                 out_features=3072,
                 edge_dim=2):
        super(GAT_x1_GATP, self).__init__()
        self.lin01 = nn.Sequential(nn.Linear(in_features, 2048, bias=False), nn.ReLU(), \
                                   nn.BatchNorm1d(2048, eps=0.2), nn.Dropout(0.7))

        self.conv1 = GATConv(2048, 128, heads=16, dropout=0.0)
        self.graph_norm_1 = GraphSizeNorm()
        self.pool = GlobalAttention(gate_nn=nn.Linear(2048, 1))
        self.lin2 = nn.Sequential(nn.Dropout(0.7), nn.Linear(2048, 1024, bias=False), nn.ReLU(), \
                                  nn.BatchNorm1d(1024, eps=0.2))
        self.lin3 = nn.Linear(1024, 10)
コード例 #23
0
    def __init__(self,
                 n_word,
                 num_layer=5,
                 dim=768,
                 heads=12,
                 num_message_passing=3,
                 window=11,
                 layer_cnn=3,
                 layer_output=3,
                 drop_ratio=0.5,
                 graph_pooling='mean'):
        super().__init__()
        self.ligand_encoder = MolGT(num_layer, dim, heads, num_message_passing,
                                    drop_ratio)
        self.embed_word = nn.Embedding(n_word, dim)

        self.W_cnn = nn.ModuleList([
            nn.Conv2d(in_channels=1,
                      out_channels=1,
                      kernel_size=2 * window + 1,
                      stride=1,
                      padding=window) for _ in range(layer_cnn)
        ])
        self.W_attention = nn.Linear(dim, dim)
        self.W_out = nn.ModuleList(
            [nn.Linear(2 * dim, 2 * dim) for _ in range(layer_output)])
        self.W_interaction = nn.Linear(2 * dim, 2)
        self.layer_cnn = layer_cnn
        self.layer_output = layer_output

        self.dummy = False
        if graph_pooling == "sum":
            self.pool = global_add_pool
        elif graph_pooling == "mean":
            self.pool = global_mean_pool
        elif graph_pooling == "max":
            self.pool = global_max_pool
        elif graph_pooling == "attention":
            self.pool = GlobalAttention(gate_nn=torch.nn.Linear(emb_dim, 1))
        elif graph_pooling == "set2set":
            self.pool = nn.Sequential(Set2Set(emb_dim, 3),
                                      nn.Linear(2 * hidden_dim, hidden_dim))
        elif graph_pooling == "dummy":
            self.dummy = True
        else:
            raise ValueError("Invalid graph pooling type.")
コード例 #24
0
    def __init__(self, args):
        super(Proposal, self).__init__()

        self.args = args

        num_node_features = args.get("num_node_features")
        num_hidden = args.get("num_hidden")
        num_flex = args.get("num_flex")
        num_classes = args.get("num_classes")
        dropout = args.get("dropout", 0.0)

        self.dropout = dropout

        self.pe = PositionalEncoding(23, dropout)

        self.classic_conv = nn.Conv1d(23, 23, 3, padding=1)
        self.classic_conv_bn = torch.nn.BatchNorm1d(num_node_features)

        self.ll1 = Linear(num_node_features, num_hidden)

        self.att_block1 = AttentionBlock(num_hidden, 8 * num_hidden, depth=5)

        self.gp = GlobalAttention(
            torch.nn.Sequential(Linear(8 * num_hidden, 21 * num_hidden),
                                Linear(21 * num_hidden, 1)))

        self.slc4 = SublayerConnection(8 * num_hidden)

        self.fff = FinalFeedForward(8 * num_hidden, num_flex, dropout)

        self.classic_conv2 = nn.Conv1d(1, 10, 9, padding=4)
        self.classic_conv_bn2 = torch.nn.BatchNorm1d(8 * num_hidden)

        self.fc_task1 = Linear(8 * num_hidden,
                               num_node_features)  # random masked node task
        self.fc_task2 = Linear(
            8 * num_hidden,
            num_node_features)  # random node in next graph task

        self.fc1 = Linear(8 * num_hidden, num_classes)

        self.graph_embedding_function = args.get("graph_embedding_function",
                                                 None)

        self.reset_parameters()
コード例 #25
0
    def __init__(self,
                 num_layer,
                 emb_dim,
                 heads,
                 num_message_passing,
                 num_tasks,
                 drop_ratio=0,
                 graph_pooling="mean"):
        super(MolGT_graphpred, self).__init__()
        self.num_layer = num_layer
        self.drop_ratio = drop_ratio
        self.emb_dim = emb_dim
        self.num_tasks = num_tasks

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")

        self.gnn = MolGNet(num_layer, emb_dim, heads, num_message_passing,
                           drop_ratio)
        self.dummy = False
        # Different kind of graph pooling
        if graph_pooling == "sum":
            self.pool = global_add_pool
        elif graph_pooling == "mean":
            self.pool = global_mean_pool
        elif graph_pooling == "max":
            self.pool = global_max_pool
        elif graph_pooling == "attention":
            self.pool = GlobalAttention(gate_nn=torch.nn.Linear(emb_dim, 1))
        elif graph_pooling == "set2set":
            self.pool = Set2Set(emb_dim, 3)
        elif graph_pooling == "collection":
            self.dummy = True
        else:
            raise ValueError("Invalid graph pooling type.")

        # For graph-level binary classification
        if graph_pooling == "set2set":
            self.mult = 2
        else:
            self.mult = 1

        self.graph_pred_linear = torch.nn.Linear(self.mult * self.emb_dim,
                                                 self.num_tasks)
コード例 #26
0
    def __init__(self,
                 hidden,
                 n_features,
                 n_classes,
                 act='relu',
                 pool='avg',
                 dropout=0.):
        """ Model init

        Parameters
        ----------
        hidden: int
            Size of hidden layer
        n_features: int
            Size of feature dimension
        n_classes: int
            Number of classes
        act: str in ['relu', 'linear']
            Default: 'relu'
        pool: str in ['avg', 'max', 'att_h', 'att_x']
            Default: 'avg'
        dropout: float
            Dropout rate in training. Default: 0.
        """
        super(GC_NET, self).__init__()

        self.hidden = hidden
        self.n_features = n_features
        self.n_classes = n_classes
        self.act = act
        self.pool = pool

        # GCN layer
        self.conv = GCNConv(self.n_features, self.hidden, bias=False)
        # pooling
        if self.pool == 'att_x':
            self.att_x = Linear(self.n_features, self.n_features, bias=False)
        elif self.pool == 'att_h':
            self.att_h = GlobalAttention(torch.nn.Linear(self.hidden, 1))
        # linear output
        self.lin = Linear(self.hidden, self.n_classes, bias=False)
        # dropout
        self.dropout = Dropout(dropout)
コード例 #27
0
ファイル: neural_networks.py プロジェクト: AdelNabli/MCN
    def __init__(self, n_pool, dim_embedding, dim_hidden, weighted):
        super(ContextEncoder, self).__init__()

        self.weighted = weighted
        if weighted:
            # there are 8 features that are added to the input data
            first_dim = dim_embedding + 2
        else:
            # else, only 4 features are added
            first_dim = dim_embedding + 1

        self.n_pool = n_pool
        self.graph_pool = nn.ModuleList([
            GlobalAttention(
                nn.Sequential(nn.Linear(first_dim, dim_hidden), nn.ReLU(),
                              nn.Linear(dim_hidden, 1)),
                nn.Sequential(nn.Linear(first_dim, dim_hidden), nn.ReLU(),
                              nn.Linear(dim_hidden, dim_embedding)))
            for k in range(n_pool)
        ])
コード例 #28
0
ファイル: gnns.py プロジェクト: DeepWukong/DeepWukong
    def __init__(self, config: DictConfig, vocab: Vocabulary,
                 vocabulary_size: int, pad_idx: int):
        super(GraphConvEncoder, self).__init__()
        self.__config = config
        self.__pad_idx = pad_idx
        self.__st_embedding = STEncoder(config, vocab, vocabulary_size,
                                        pad_idx)

        self.input_GCL = GCNConv(config.rnn.hidden_size, config.hidden_size)

        self.input_GPL = TopKPooling(config.hidden_size,
                                     ratio=config.pooling_ratio)

        for i in range(config.n_hidden_layers - 1):
            setattr(self, f"hidden_GCL{i}",
                    GCNConv(config.hidden_size, config.hidden_size))
            setattr(
                self, f"hidden_GPL{i}",
                TopKPooling(config.hidden_size, ratio=config.pooling_ratio))

        self.attpool = GlobalAttention(torch.nn.Linear(config.hidden_size, 1))
コード例 #29
0
    def __init__(self, att_config):
        super(AttInter, self).__init__()

        self.num_layer = att_config.num_conv_layer
        self.feat_dim = att_config.feat_dim
        self.emb_dim = att_config.conv_emb_dim
        self.hid_dim = att_config.pred_hid_dim
        self.graph_pooling = att_config.graph_pooling
        self.device = att_config.device

        self.attn_pool = AttnPooling(att_config.in_channels, att_config.ratio)

        if self.num_layer < 2:
            raise ValueError("Number of GNN layers must be greater than 1.")
        self.gnn_node = GNN_node(self.num_layer,
                                 self.emb_dim,
                                 JK=att_config.JK,
                                 drop_ratio=att_config.conv_drop_ratio,
                                 residual=False,
                                 gnn_type=att_config.gnn_type,
                                 feat_dim=self.feat_dim)

        ### Pooling function to generate whole-graph embeddings
        if self.graph_pooling == "sum":
            self.pool = global_add_pool
        elif self.graph_pooling == "mean":
            self.pool = global_mean_pool
        elif self.graph_pooling == "max":
            self.pool = global_max_pool
        elif self.graph_pooling == "attention":
            self.pool = GlobalAttention(gate_nn=torch.nn.Sequential(
                torch.nn.Linear(self.emb_dim, 2 * self.emb_dim),
                torch.nn.BatchNorm1d(2 * self.emb_dim), torch.nn.ReLU(),
                torch.nn.Linear(2 * self.emb_dim, 1)))
        elif self.graph_pooling == "set2set":
            self.pool = Set2Set(self.emb_dim, processing_steps=2)
        else:
            raise ValueError("Invalid graph pooling type.")

        self.predictor = MLP(self.emb_dim + self.feat_dim, self.hid_dim, 2)
コード例 #30
0
    def __init__(self, num_classes):
        super(PoolNet, self).__init__()

        self.att_w = nn.Sequential(nn.Linear(256, 64), nn.ELU(),
                                   nn.Linear(64, 1))

        self.att_net = nn.Sequential(nn.Linear(256, 256), nn.ELU())

        self.conv1 = GATConv(3, 16, heads=2)
        self.norm1 = InstanceNorm(32)
        self.pool1 = TopKPooling(32, ratio=0.3)

        self.conv2 = GATConv(32, 64, heads=2)
        self.norm2 = InstanceNorm(128)
        self.pool2 = TopKPooling(128, ratio=0.3)

        self.conv3 = GATConv(128, 256, heads=1)
        self.norm3 = InstanceNorm(256)

        self.att = GlobalAttention(gate_nn=self.att_w, nn=self.att_net)

        self.lin1 = Linear(256, 512)
        self.lin2 = Linear(512, 128)
        self.lin3 = Linear(128, num_classes)