Esempio n. 1
0
    def __init__(self,in_channels, out_channels1, out_channels2,out_channels3,out_channels4, out_channels5, dropout,batch_size):
        super(GraphPooling, self).__init__()   

        self.batch_size=batch_size
        self.out_channels2=out_channels2

        """
        Encoding
        """
        ### Encoding
        self.sage1=tnn.SAGEConv(in_channels,out_channels1,normalize=False)
        self.sage2=tnn.SAGEConv(out_channels1,out_channels2,normalize=False)
        self.sage3=tnn.SAGEConv(out_channels2,out_channels3,normalize=False)
        self.sage4=tnn.SAGEConv(out_channels3,out_channels4,normalize=False)
        self.tr1=nn.Linear(out_channels4,out_channels5)
        self.tr2=nn.Linear(out_channels5,1)

        self.drop=torch.nn.Dropout(p=0.2)


        ## Batch Normalization
        self.bano1 = nn.BatchNorm1d(num_features=out_channels1)
        self.bano2 = nn.BatchNorm1d(num_features=out_channels2)
        self.bano3 = nn.BatchNorm1d(num_features=out_channels3)
        self.bano4 = nn.BatchNorm1d(num_features=out_channels4)
        self.bano5 = nn.BatchNorm1d(num_features=out_channels5)


        self.edge1=EdgePooling(out_channels1, edge_score_method=None, dropout=0., add_to_edge_score=0.5)
        self.edge2=EdgePooling(out_channels2, edge_score_method=None, dropout=0., add_to_edge_score=0.5)
        self.edge3=EdgePooling(out_channels3, edge_score_method=None, dropout=0., add_to_edge_score=0.5)
Esempio n. 2
0
 def __init__(self, feat_in, hidden_features=10, output_dim=1, dropout=0.1):
     """
     A class for graph nodes classification. Contains graph convolution layers
     :param feat_in: Number of features of one node
     :param hidden_features: Number of features between layers
     :param dropout: dropout value (after each layer)
     """
     super(TreeSupport, self).__init__()
     self.dropout = dropout
     self.conv1 = gnn.GINConv(
         nn.Sequential(nn.Linear(feat_in, hidden_features), nn.LeakyReLU(),
                       nn.Linear(hidden_features, hidden_features)),
         train_eps=True
     )
     self.conv2 = gnn.GINConv(
         nn.Sequential(nn.Linear(hidden_features, max(hidden_features // 2, 1)), nn.LeakyReLU(),
                       nn.Linear(max(hidden_features // 2, 1), max(hidden_features // 2, 1))),
         train_eps=True
     )
     self.conv3 = gnn.GINConv(
         nn.Sequential(nn.Linear(max(hidden_features // 2, 1), max(hidden_features // 2, 1)), nn.LeakyReLU(),
                       nn.Linear(max(hidden_features // 2, 1), max(hidden_features // 2, 1))),
         train_eps=True
     )
     self.conv4 = gnn.SAGEConv(max(hidden_features // 2, 1), max(hidden_features // 2, 1))
     self.conv5 = gnn.SAGEConv(max(hidden_features // 2, 1), output_dim)
Esempio n. 3
0
    def __init__(self, ppi_adj, g2v_embedding, args):
        super(GEX_PPI_SAGE_cat4_MLP, self).__init__()


        print('num genes : '+str(args.num_genes))
        self.bn1 = nn.BatchNorm1d(args.ecfp_nBits)
        self.bn2 = nn.BatchNorm1d(args.num_genes)
        self.bn3 = nn.BatchNorm1d(1)
        self.bn4 = nn.BatchNorm1d(1)

        self.drug_mlp1 = nn.Linear(args.ecfp_nBits, 
                    int(args.ecfp_nBits*2/3 + args.drug_embed_dim/3), bias = True)
        self.drug_mlp2 = nn.Linear(int(args.ecfp_nBits*2/3 + args.drug_embed_dim/3),
                    int(args.ecfp_nBits/3+args.drug_embed_dim*2/3), bias = True)
        self.drug_mlp3 = nn.Linear(int(args.ecfp_nBits/3+args.drug_embed_dim*2/3),
                    args.drug_embed_dim, bias = True)

        self.gcns = {}
        for i in range(args.num_gcn_hops):
            if i==0:
                self.gcns[i] = geo_nn.SAGEConv(args.gene2vec_dim, args.gcn_hidden_dim1*4)
            else:
                self.gcns[i] = geo_nn.SAGEConv(args.gcn_hidden_dim1*4, args.gcn_hidden_dim1*4)
        for i in range(args.num_gcn_hops):
            self.add_module('gcn_{}'.format(i), self.gcns[i])


#        self.pred_emb_dim = args.drug_embed_dim + (args.num_gcn_hops*args.gcn_hidden_dim1*args.gat_num_heads) + 2
        self.pred_emb_dim = args.drug_embed_dim + (args.num_gcn_hops*args.gcn_hidden_dim1*args.gat_num_heads) + 2

        self.pred_mlp1 = nn.Linear(self.pred_emb_dim, int(self.pred_emb_dim*2/3), bias = True)
        self.pred_mlp2 = nn.Linear(int(self.pred_emb_dim*2/3), int(self.pred_emb_dim/3), bias = True)
        self.pred_mlp3 = nn.Linear(int(self.pred_emb_dim/3), args.num_classes, bias = True)

        self.activ = nn.ReLU()
        self.activ2 = nn.Softmax()


        #   
        self.g2v_embeddings = nn.Embedding(args.num_genes, args.gene2vec_dim)
        if args.g2v_pretrained == True:
            g2v_embedding = F.normalize(torch.from_numpy(g2v_embedding), p = 2)
            self.g2v_embeddings.weight.data.copy_(g2v_embedding)


        self.ppi_adj = ppi_adj # 2 x num edges

        #   Read out MLP
        
        self.readout_mlp1 = nn.Linear(args.num_genes,
               int(args.num_genes*2/3), bias = True)
        self.readout_mlp2 = nn.Linear(int(args.num_genes*2/3), 
                int(args.num_genes/3), bias = True)
        self.readout_mlp3 = nn.Linear(int(args.num_genes/3), 1, bias = True)
Esempio n. 4
0
    def _setup_layers(self):
        dims = []
        # GNN layers
        graph_layers = []
        for layer in self.config['graph_layers']:
            if type(layer) == int:
                dim = dims[-1] if len(dims) != 0 else self.config['n_features']
                graph_layers.append(geo_nn.SAGEConv(dim, layer))
                dims.append(layer)

            elif type(layer) == str and layer in self.activations:
                graph_layers.append(self.activations[layer]())

        # Fully connected layers
        fc_layers = []
        for i, layer in enumerate(self.config['fc_layers']):
            if type(layer) == int:
                if type(self.config['fc_layers'][i - 1]) == int and i != 0:
                    fc_layers.append(NN.ReLU())
                    continue

                dim = dims[-1] if len(dims) != 0 else self.config['n_features']
                fc_layers.append(NN.Linear(dim, layer))
                dims.append(layer)

            elif type(layer) == str and layer in self.activations:
                fc_layers.append(self.activations[layer]())

            elif layer == 'drop':
                fc_layers.append(NN.Dropout(.3))

        # Output layer
        fc_layers.append(NN.Linear(dims[-1], config['n_labels']))

        return NN.ModuleList(graph_layers), NN.ModuleList(fc_layers)
Esempio n. 5
0
    def __init__(self, \
        n_features, \
        n_classes, \
        n_hidden_GNN=[10], \
        n_hidden_FC=[], \
        dropout_GNN=0, \
        dropout_FC=0):
        super(GraphSAGE, self).__init__(\
            n_features, n_classes, n_hidden_GNN,\
            n_hidden_FC, dropout_FC, dropout_GNN)

        self.layers_GNN.append(pyg_nn.SAGEConv(1, n_hidden_GNN[0]))
        if self.n_layers_GNN > 1:
            for i in range(self.n_layers_GNN - 1):
                self.layers_GNN.append(
                    pyg_nn.SAGEConv(n_hidden_GNN[i], n_hidden_GNN[(i + 1)]))
Esempio n. 6
0
 def __init__(self, feat_in, n_classes=3, max_nodes=45):
     """
     Initializes class for alignment classification
     :param feat_in: the second dimension size of node feature matrix
     :param n_classes: the number of classes in dataset
     :param max_nodes: maximum number of nodes in a graph
     """
     super(AlignSAGE, self).__init__()
     first_hidden = max(feat_in // 2, 1)
     second_hidden = max(feat_in // 4, 1)
     self.N = max_nodes
     self.sage1 = gnn.SAGEConv(feat_in, first_hidden)
     self.pooling1 = gnn.SAGPooling(first_hidden)
     self.sage2 = gnn.SAGEConv(first_hidden, second_hidden)
     self.pooling2 = gnn.SAGPooling(second_hidden)
     self.lin1 = nn.Linear(10, 100)
     self.lin2 = nn.Linear(100, n_classes)
Esempio n. 7
0
    def build_conv_model(self, model_type, node_in_dim, node_out_dim, edge_dim,
                         edge_mode, normalize_emb, activation, aggr):

        if model_type == 'GCN':
            return pyg_nn.GCNConv(node_in_dim, node_out_dim)
        elif model_type == 'GraphSage':
            return pyg_nn.SAGEConv(node_in_dim, node_out_dim)
        elif model_type == 'GAT':
            return pyg_nn.GATConv(node_in_dim, node_out_dim)
        elif model_type == 'EGCN':
            return EGCNConv(node_in_dim, node_out_dim, edge_dim, edge_mode)
        elif model_type == 'EGSAGE':
            return EGraphSage(node_in_dim, node_out_dim, edge_dim, activation,
                              edge_mode, normalize_emb, aggr)
Esempio n. 8
0
def build_conv_model(conv_type, node_in_dim, node_out_dim):
    if conv_type == 'GSage':
        return pyg_nn.SAGEConv(node_in_dim, node_out_dim)
    elif conv_type == 'GCN':
        return pyg_nn.GCNConv(node_in_dim, node_out_dim)
    elif conv_type == 'GAT':
        return pyg_nn.GATConv(node_in_dim, node_out_dim)
    elif conv_type == 'GSage2':
        from conv_layers import GraphSage2
        return GraphSage2(node_in_dim, node_out_dim)
    elif conv_type == 'GSageW':
        from conv_layers import GraphSageW
        return GraphSageW(node_in_dim, node_out_dim)
    else:
        raise NotImplementedError
Esempio n. 9
0
def build_gnn_conv_model(conv_type, node_in_dim, node_out_dim):
    if conv_type == 'GSage':
        return pyg_nn.SAGEConv(node_in_dim,
                               node_out_dim,
                               normalize=False,
                               bias=True)
    elif conv_type == 'GCN':
        return pyg_nn.GCNConv(node_in_dim,
                              node_out_dim,
                              add_self_loops=False,
                              normalize=True,
                              bias=True)
    elif conv_type == 'GAT':
        return pyg_nn.GATConv(node_in_dim,
                              node_out_dim,
                              heads=1,
                              concat=True,
                              negative_slope=0.2,
                              dropout=0.,
                              add_self_loops=False,
                              bias=True)
    elif conv_type == 'Tran':
        gnn = pyg_nn.TransformerConv(node_in_dim,
                                     node_out_dim,
                                     heads=1,
                                     concat=True,
                                     dropout=0.,
                                     bias=True)
        gnn.lin_edge = None  # remove redundant parameters
        return gnn
    elif conv_type == 'GSage2':
        from rlkit.torch.networks.conv_layers import GraphSage2
        return GraphSage2(node_in_dim, node_out_dim)
    elif conv_type == 'GSageW':
        from rlkit.torch.networks.conv_layers import GraphSageW
        return GraphSageW(node_in_dim, node_out_dim)
    else:
        raise NotImplementedError
Esempio n. 10
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 seq_len,
                 head_num,
                 qs_graph_dir,
                 device,
                 dropout,
                 n_hop,
                 gcn_type,
                 gcn_layer_num,
                 gcn_on,
                 pretrained_embedding=None,
                 freeze=True):
        """[summary]

        Args:
            input_dim ([type]): [description]
            hidden_dim ([type]): [description]
            output_dim ([type]): output dim of GCN
            seq_len ([type]): [description]
            head_num ([type]): [description]
            qs_graph_dir ([type]): [description]
        """
        super(Model, self).__init__()

        self.device = device

        self.gcn_on = gcn_on

        if pretrained_embedding is not None:
            self.pretrained_embedding = pretrained_embedding
        else:
            self.pretrained_embedding = None

        if gcn_type == 'sgconv' and gcn_on:
            self.gcn_layer_num = gcn_layer_num
            self.convs = nn.ModuleList()
            if gcn_layer_num == 1:
                self.convs.append(pyg_nn.SGConv(input_dim, output_dim,
                                                K=n_hop))
            elif gcn_layer_num > 1:
                self.convs.append(pyg_nn.SGConv(input_dim, hidden_dim,
                                                K=n_hop))
            else:
                raise ValueError("Unsupported gcn_layer_num {}")

            for i in range(self.gcn_layer_num - 1):
                if i == self.gcn_layer_num - 2:
                    self.convs.append(
                        pyg_nn.SGConv(hidden_dim, output_dim, K=n_hop))
                else:
                    self.convs.append(
                        pyg_nn.SGConv(hidden_dim, hidden_dim, K=n_hop))
        elif gcn_type == 'gconv' and gcn_on:
            self.gcn_layer_num = gcn_layer_num
            self.convs = nn.ModuleList()
            if gcn_layer_num == 1:
                self.convs.append(pyg_nn.GCNConv(input_dim, output_dim))
            elif gcn_layer_num > 1:
                self.convs.append(pyg_nn.GCNConv(input_dim, hidden_dim))
            else:
                raise ValueError("Unsupported gcn_layer_num {}")

            for i in range(self.gcn_layer_num - 1):
                if i == self.gcn_layer_num - 2:
                    self.convs.append(pyg_nn.GCNConv(hidden_dim, output_dim))
                else:
                    self.convs.append(pyg_nn.GCNConv(hidden_dim, hidden_dim))
        elif gcn_type == 'gat' and gcn_on:
            self.gcn_layer_num = gcn_layer_num
            self.convs = nn.ModuleList()
            if gcn_layer_num == 1:
                self.convs.append(
                    pyg_nn.GATConv(input_dim,
                                   output_dim,
                                   heads=5,
                                   concat=False))
            elif gcn_layer_num > 1:
                self.convs.append(
                    pyg_nn.GATConv(input_dim,
                                   hidden_dim,
                                   heads=5,
                                   concat=False))
            else:
                raise ValueError("Unsupported gcn_layer_num {}")

            for i in range(self.gcn_layer_num - 1):
                if i == self.gcn_layer_num - 2:
                    self.convs.append(pyg_nn.GATConv(hidden_dim, output_dim))
                else:
                    self.convs.append(pyg_nn.GATConv(hidden_dim, hidden_dim))
        elif gcn_type == 'sage' and gcn_on:
            self.gcn_layer_num = gcn_layer_num
            self.convs = nn.ModuleList()
            if gcn_layer_num == 1:
                self.convs.append(pyg_nn.SAGEConv(input_dim, output_dim))
            elif gcn_layer_num > 1:
                self.convs.append(pyg_nn.SAGEConv(input_dim, hidden_dim))
            else:
                raise ValueError("Unsupported gcn_layer_num {}")

            for i in range(self.gcn_layer_num - 1):
                if i == self.gcn_layer_num - 2:
                    self.convs.append(pyg_nn.SAGEConv(hidden_dim, output_dim))
                else:
                    self.convs.append(pyg_nn.SAGEConv(hidden_dim, hidden_dim))

        self.dropout = dropout

        with open(qs_graph_dir, "r") as src:
            self.qs_graph = json.load(src)

        # ! input shape should be (seq_len - 1)
        self.seq_len = seq_len

        self.correctness_embedding_layer = nn.Embedding(2, input_dim)

        if self.pretrained_embedding is None:
            self.node_embedding_layer = nn.Embedding(len(self.qs_graph),
                                                     input_dim)

        self.linears = nn.ModuleList()
        self.linears.append(nn.Linear(output_dim * 2, output_dim, bias=True))

        for _ in range(3):
            self.linears.append(nn.Linear(output_dim, output_dim, bias=False))

        self.linears.append(nn.Linear(output_dim, 1))

        self.MHA = nn.MultiheadAttention(embed_dim=output_dim,
                                         num_heads=head_num,
                                         dropout=self.dropout[1])

        self.FFN = nn.ModuleList()

        for _ in range(2):
            self.FFN.append(nn.Linear(output_dim, output_dim))

        self.lns = nn.ModuleList()
        self.lns.append(nn.LayerNorm(hidden_dim))
        self.lns.append(nn.LayerNorm(hidden_dim))

        self.lns.append(nn.LayerNorm(output_dim))
        self.lns.append(nn.LayerNorm(output_dim))

        self.pos_embedding = nn.Embedding(seq_len - 1, output_dim)

        self.dropout_layers = nn.ModuleList()

        self.dropout_layers.append(nn.Dropout(p=self.dropout[0]))
        self.dropout_layers.append(nn.Dropout(p=self.dropout[2]))
Esempio n. 11
0
 def build_conv_model(self, node_in_dim, node_out_dim):
     return pyg_nn.SAGEConv(node_in_dim,node_out_dim)
Esempio n. 12
0
    def __init__(
        self,
        pre_graph_builder,
        obs_dim,
        action_dim,
        node_dim,
        conv_type,
        use_attention=True,
        normalize_emb=False,
        num_layer=1,
        output_activation=None,
    ):
        super(GraphContextNet, self).__init__()

        self.obs_dim = obs_dim
        self.action_dim = action_dim
        self.node_dim = node_dim
        self.conv_type = conv_type
        self.num_layer = num_layer

        # graph builder
        self.pre_graph_builder = pre_graph_builder

        # convs
        if self.conv_type == 'GContext':
            self.conv = ContextConv(
                obs_dim,
                action_dim,
                node_dim,
                use_attention=use_attention,
                num_layer=num_layer,
                normalize_emb=normalize_emb,
                negative_slope=0.2,
            )
        elif self.conv_type == 'GAT':
            self.pre_lin_l = Mlp(
                input_size=obs_dim + action_dim,
                output_size=node_dim,
                hidden_sizes=[node_dim] * num_layer,
                hidden_activation=nn.LeakyReLU(0.2),
                output_activation=nn.LeakyReLU(0.2),
            )
            self.pre_lin_r = Mlp(
                input_size=obs_dim,
                output_size=node_dim,
                hidden_sizes=[node_dim] * num_layer,
                hidden_activation=nn.LeakyReLU(0.2),
                output_activation=nn.LeakyReLU(0.2),
            )
            self.conv = pyg_nn.GATConv(
                in_channels=[node_dim, node_dim],
                out_channels=node_dim,
                heads=1,
                concat=True,
                negative_slope=0.2,
                dropout=0.,
                add_self_loops=False,
                bias=True,
            )
            self.aft_lin = nn.Linear(node_dim + node_dim, node_dim)
        elif self.conv_type in ['GAT2', 'GSage2']:
            self.convs = nn.ModuleList()
            self.mlps = nn.ModuleList()
            if self.conv_type == 'GAT2':
                self.convs.append(
                    pyg_nn.GATConv(
                        in_channels=[obs_dim + action_dim, obs_dim],
                        out_channels=node_dim,
                        heads=1,
                        concat=True,
                        negative_slope=0.2,
                        dropout=0.,
                        add_self_loops=False,
                        bias=True,
                    ))
            elif self.conv_type == 'GSage2':
                self.convs.append(
                    pyg_nn.SAGEConv(
                        in_channels=[obs_dim + action_dim, obs_dim],
                        out_channels=node_dim,
                        normalize=normalize_emb,
                        bias=True,
                    ))
            self.mlps.append(nn.Linear(obs_dim + action_dim, node_dim))

            for l in range(1, num_layer):
                if self.conv_type == 'GAT2':
                    self.convs.append(
                        pyg_nn.GATConv(
                            in_channels=[node_dim, node_dim],
                            out_channels=node_dim,
                            heads=1,
                            concat=True,
                            negative_slope=0.2,
                            dropout=0.,
                            add_self_loops=False,
                            bias=True,
                        ))
                elif self.conv_type == 'GSage2':
                    self.convs.append(
                        pyg_nn.SAGEConv(
                            in_channels=[node_dim, node_dim],
                            out_channels=node_dim,
                            normalize=normalize_emb,
                            bias=True,
                        ))
                if l < num_layer - 1:
                    self.mlps.append(nn.Linear(node_dim, node_dim))

        elif self.conv_type in ['GSage3', 'GAT3']:
            self.obs_convs = nn.ModuleList()
            self.action_convs = nn.ModuleList()
            self.mlps = nn.ModuleList()
            if self.conv_type == 'GSage3':
                self.obs_convs.append(
                    pyg_nn.SAGEConv(
                        in_channels=obs_dim,
                        out_channels=node_dim,
                        normalize=normalize_emb,
                        bias=True,
                    ))
                self.action_convs.append(
                    pyg_nn.SAGEConv(
                        in_channels=[node_dim + action_dim, obs_dim],
                        out_channels=node_dim,
                        normalize=normalize_emb,
                        bias=True,
                    ))
                for l in range(1, num_layer):
                    self.obs_convs.append(
                        pyg_nn.SAGEConv(
                            in_channels=node_dim,
                            out_channels=node_dim,
                            normalize=normalize_emb,
                            bias=True,
                        ))
                    self.action_convs.append(
                        pyg_nn.SAGEConv(
                            in_channels=[node_dim + action_dim, node_dim],
                            out_channels=node_dim,
                            normalize=normalize_emb,
                            bias=True,
                        ))
            elif self.conv_type == 'GAT3':
                self.obs_convs.append(
                    pyg_nn.GATConv(
                        in_channels=obs_dim,
                        out_channels=node_dim,
                        heads=1,
                        concat=True,
                        negative_slope=0.2,
                        dropout=0.,
                        add_self_loops=True,
                        bias=True,
                    ))
                self.action_convs.append(
                    pyg_nn.GATConv(
                        in_channels=[node_dim + action_dim, obs_dim],
                        out_channels=node_dim,
                        heads=1,
                        concat=True,
                        negative_slope=0.2,
                        dropout=0.,
                        add_self_loops=False,
                        bias=True,
                    ))
                self.mlps.append(nn.Linear(obs_dim + node_dim, node_dim))
            for l in range(1, num_layer):
                self.obs_convs.append(
                    pyg_nn.GATConv(
                        in_channels=node_dim,
                        out_channels=node_dim,
                        heads=1,
                        concat=True,
                        negative_slope=0.2,
                        dropout=0.,
                        add_self_loops=True,
                        bias=True,
                    ))
                self.action_convs.append(
                    pyg_nn.GATConv(
                        in_channels=[node_dim + action_dim, node_dim],
                        out_channels=node_dim,
                        heads=1,
                        concat=True,
                        negative_slope=0.2,
                        dropout=0.,
                        add_self_loops=False,
                        bias=True,
                    ))
                self.mlps.append(nn.Linear(node_dim + node_dim, node_dim))

        self.output_activation = get_activation(output_activation)
Esempio n. 13
0
 def __init__(self, in_c, out_c, hid_c=64):
     super(graphSAGE, self).__init__()
     self.conv1 = pyg_nn.SAGEConv(in_c, hid_c)
     self.conv2 = pyg_nn.SAGEConv(hid_c, out_c)