Exemplo n.º 1
0
 def __init__(self, hidden_channels):
     super(GCN, self).__init__()
     torch.manual_seed(12345)
     self.conv1 = geo_nn.GCNConv(dataset.num_node_features, hidden_channels)
     self.conv2 = geo_nn.GCNConv(hidden_channels, hidden_channels)
     self.conv3 = geo_nn.GCNConv(hidden_channels, hidden_channels)
     self.lin = nn.Linear(hidden_channels, dataset.num_classes)
Exemplo n.º 2
0
    def __init__(self, num_classes, num_features, loss=nn.CrossEntropyLoss()):
        super().__init__()

        self.conv1 = graph_nn.GCNConv(num_features, 128)
        self.conv2 = graph_nn.GCNConv(128, num_classes)
        self.conv_layers = [self.conv1, self.conv2]  # Для SAINT сэмплера
        self.loss = loss
Exemplo n.º 3
0
 def __init__(self, in_features, out_features):
     super(GAEEnc, self).__init__()
     self.gcn1 = nng.GCNConv(in_features, out_features)
     self.gcn1.reset_parameters()
     self.relu = nn.ReLU()
     self.gcn2 = nng.GCNConv(2 * out_features, out_features)
     self.gcn2.reset_parameters()
Exemplo n.º 4
0
    def __init__(self, ppi_adj, g2v_embedding, args):
        super(GEX_PPI_GCN_cat4_MLP, self).__init__()


        print('num genes : '+str(args.num_genes))
        self.bn1 = nn.BatchNorm1d(args.ecfp_nBits)
        self.bn2 = nn.BatchNorm1d(args.num_genes)
        self.bn3 = nn.BatchNorm1d(1)
        self.bn4 = nn.BatchNorm1d(1)

        self.drug_mlp1 = nn.Linear(args.ecfp_nBits, 
                    int(args.ecfp_nBits*2/3 + args.drug_embed_dim/3), bias = True)
        self.drug_mlp2 = nn.Linear(int(args.ecfp_nBits*2/3 + args.drug_embed_dim/3),
                    int(args.ecfp_nBits/3+args.drug_embed_dim*2/3), bias = True)
        self.drug_mlp3 = nn.Linear(int(args.ecfp_nBits/3+args.drug_embed_dim*2/3),
                    args.drug_embed_dim, bias = True)

        self.gcns = {}
        for i in range(args.num_gcn_hops):
            if i==0:
                self.gcns[i] = geo_nn.GCNConv(args.gene2vec_dim, args.gcn_hidden_dim1*4)
            else:
                self.gcns[i] = geo_nn.GCNConv(args.gcn_hidden_dim1*4, args.gcn_hidden_dim1*4)
        for i in range(args.num_gcn_hops):
            self.add_module('gcn_{}'.format(i), self.gcns[i])


#        self.pred_emb_dim = args.drug_embed_dim + (args.num_gcn_hops*args.gcn_hidden_dim1*args.gat_num_heads) + 2
        self.pred_emb_dim = args.drug_embed_dim + (args.num_gcn_hops*args.gcn_hidden_dim1*args.gat_num_heads) + 2

        self.pred_mlp1 = nn.Linear(self.pred_emb_dim, int(self.pred_emb_dim*2/3), bias = True)
        self.pred_mlp2 = nn.Linear(int(self.pred_emb_dim*2/3), int(self.pred_emb_dim/3), bias = True)
        self.pred_mlp3 = nn.Linear(int(self.pred_emb_dim/3), args.num_classes, bias = True)

        self.activ = nn.ReLU()
        self.activ2 = nn.Softmax()


        #   
        self.g2v_embeddings = nn.Embedding(args.num_genes, args.gene2vec_dim)
        if args.g2v_pretrained == True:
            g2v_embedding = F.normalize(torch.from_numpy(g2v_embedding), p = 2)
            self.g2v_embeddings.weight.data.copy_(g2v_embedding)


        self.ppi_adj = ppi_adj # 2 x num edges

        #   Read out MLP
        
        self.readout_mlp1 = nn.Linear(args.num_genes,
               int(args.num_genes*2/3), bias = True)
        self.readout_mlp2 = nn.Linear(int(args.num_genes*2/3), 
                int(args.num_genes/3), bias = True)
        self.readout_mlp3 = nn.Linear(int(args.num_genes/3), 1, bias = True)
Exemplo n.º 5
0
 def __init__(self, input_dim, args):
     super(Net, self).__init__()
     self.model = args.model
     if self.model == 'GCN':
         self.conv1 = pyg_nn.GCNConv(input_dim, args.hidden_dim)
         self.conv2 = pyg_nn.GCNConv(args.hidden_dim, args.hidden_dim)
     elif self.model == 'Spline':
         self.conv1 = pyg_nn.SplineConv(input_dim, args.hidden_dim, dim=1, kernel_size=2)
         self.conv2 = pyg_nn.SplineConv(args.hidden_dim, args.hidden_dim, dim=1, kernel_size=2)
     else:
         raise ValueError('unknown conv')
     self.loss_fn = torch.nn.BCEWithLogitsLoss()
Exemplo n.º 6
0
 def __init__(self,
              input_dim: int = 4,
              output_dim: int = 1,
              hidden_dim: int = 64,
              n_train_epochs: int = 100):
     super(GCNSurrogateModel, self).__init__()
     self.gcn1 = geonn.GCNConv(input_dim, 16)
     self.gcn2 = geonn.GCNConv(16, 32)
     self.gcn3 = geonn.GCNConv(32, hidden_dim)
     self.activation = nn.Tanh()
     self.last_layer = nn.Linear(hidden_dim, output_dim)
     self.optimizer = optim.Adam(self.parameters(), lr=0.001)
     self.n_train_epochs = n_train_epochs
Exemplo n.º 7
0
 def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, class_num,
              dropout):
     super(MultiTaskGNN, self).__init__()
     self.gc1 = gnn.GCNConv(input_feat_dim, hidden_dim1)
     self.gc2 = gnn.GCNConv(hidden_dim1, hidden_dim2)
     self.gc3 = gnn.GCNConv(hidden_dim1, hidden_dim2)
     self.gc4 = gnn.GCNConv(hidden_dim1, hidden_dim1)
     self.seq = nn.Sequential(
         nn.ReLU(),
         nn.Linear(hidden_dim1, class_num),
     )
     self.drop = dropout
     self.dc = InnerProductDecoder(dropout, act=lambda x: x)
     self.binact = StraightBin
Exemplo n.º 8
0
    def __init__(self, hp):
        super().__init__(hp)
        self.hp = hp
        mhp = self.hp.model

        # How to use pointnet on source pcd and merge it to the mesh?
        if mhp.use_pointnet:
            self.feature = PointNet2(mhp.init_dim)
        else:
            self.feature = (lambda x: torch.rand(
                (x.size(0), mhp.init_dim)).to(device=x.device))

        self.gcn1 = gnn.GCNConv(mhp.init_dim, mhp.hidden_dim1, cached=True)
        self.gcn2 = gnn.GCNConv(mhp.hidden_dim1, mhp.hidden_dim2, cached=True)
        self.gcn3 = gnn.GCNConv(mhp.hidden_dim2, 3, cached=True)
Exemplo n.º 9
0
    def __init__(self,
                 dim,
                 dropout=0.5,
                 activation=F.relu,
                 virtual_node=False,
                 virtual_node_agg=True,
                 k=4,
                 last_layer=False,
                 conv_type='gin',
                 edge_embedding=None):
        super().__init__()
        self.edge_embed = edge_embedding
        self.conv_type = conv_type
        if conv_type == 'gin+':
            self.conv = GINEPLUS(MLP(dim, dim), dim, k=k)
        elif conv_type == 'naivegin+':
            self.conv = NAIVEGINEPLUS(MLP(dim, dim), dim, k=k)
        elif conv_type == 'gin':
            self.conv = nng.GINEConv(MLP(dim, dim), train_eps=True)
        elif conv_type == 'gcn':
            self.conv = nng.GCNConv(dim, dim)
        self.norm = nn.BatchNorm1d(dim)
        self.act = activation or nn.Identity()
        self.last_layer = last_layer

        self.dropout_ratio = dropout

        self.virtual_node = virtual_node
        self.virtual_node_agg = virtual_node_agg
        if self.virtual_node and self.virtual_node_agg:
            self.vn_aggregator = VNAgg(dim, conv_type=conv_type)
Exemplo n.º 10
0
    def __init__(self, state_size, depth, params, device="cpu"):
        super(EntireGNN, self).__init__()
        self.state_size = state_size
        self.depth = depth
        self.params = params
        self.device = device

        self.embedding_size = self.params.embedding_size
        self.hidden_size = self.params.hidden_size
        self.pos_size = self.params.pos_size
        self.dropout = self.params.dropout
        self.nonlinearity = self.params.nonlinearity.get_true_key()

        self.nl = (
            nn.ReLU(inplace=True) if self.nonlinearity == "relu" else nn.Tanh()
        )
        self.gnn_conv = nn.ModuleList()
        sizes = (
            [state_size] +
            [self.hidden_size] * (self.depth - 2) +
            [self.embedding_size]
        )
        for i in range(1, len(sizes)):
            self.gnn_conv.append(
                gnn.GCNConv(sizes[i - 1], sizes[i])
            )
Exemplo n.º 11
0
 def build_conv_model(self, input_dim, hidden_dim):
     # refer to pytorch geometric nn module for different implementation of GNNs.
     if self.task == 'node':
         return pyg_nn.GCNConv(input_dim, hidden_dim)
     else:
         return pyg_nn.GINConv(nn.Sequential(nn.Linear(input_dim, hidden_dim),
                               nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
Exemplo n.º 12
0
    def __init__(self, embedding_num):
        super(TypeInferModel, self).__init__()

        self.embedding = nn.Embedding(embedding_num, 128)

        self.c1 = tgnn.GCNConv(128, 256)
        self.c2 = tgnn.GCNConv(256, 512)
        #self.c3 = tgnn.SGConv(512, 512)
        #self.c4 = tgnn.SGConv(512, 512)

        self.norm1 = nn.BatchNorm1d(256)
        self.norm2 = nn.BatchNorm1d(512)
        #self.norm3 = nn.BatchNorm1d(512)
        #self.norm4 = nn.BatchNorm1d(512)

        self.dense1 = nn.Linear(512, 4096)
        self.dense2 = nn.Linear(4096, 6)
Exemplo n.º 13
0
    def __init__(self, in_channels, out_channels):
        """Initializes the GCN encoder

        Parameters
        ----------
        in_channels : int
            The number of channels in the input graph nodes
        out_channels : int
            The number of dimensions in the embeddings
        """
        super(Encoder, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels * 5
        self.conv1 = gnn.GCNConv(in_channels, 2 * out_channels, cached=False)
        self.conv2 = gnn.GCNConv(2 * out_channels,
                                 2 * out_channels,
                                 cached=False)
        self.conv3 = gnn.GCNConv(2 * out_channels, out_channels, cached=False)
Exemplo n.º 14
0
    def __init__(self, n_features, lr=1e-3):
        '''
		n_features: number of features from dataset, should be 37
		'''
        super(GraphNet, self).__init__()
        # define your GNN model here
        self.conv1 = geo_nn.GCNConv(n_features,
                                    64,
                                    cached=False,
                                    normalize=True)
        self.conv2 = geo_nn.GCNConv(64, 128, cached=False, normalize=True)
        self.conv3 = geo_nn.GCNConv(128, 256, cached=False, normalize=True)

        self.linear1 = nn.Linear(256, 128)
        self.bn1 = nn.BatchNorm1d(128)
        self.linear2 = nn.Linear(128, 1)

        self.optimizer = torch.optim.Adam(self.parameters(), lr=lr)
Exemplo n.º 15
0
 def build_conv_model(self, input_dim, output_dim):
     args = self.args
     if args.method == 'base':  # sage with add agg
         conv_model = MyConv(input_dim, output_dim)
     elif args.method == 'gcn':
         conv_model = pyg_nn.GCNConv(input_dim, output_dim)
     elif args.method == 'gin':
         conv_model = pyg_nn.GINConv(
             nn.Sequential(nn.Linear(input_dim, output_dim), nn.ReLU(),
                           nn.Linear(output_dim, output_dim)))
     return conv_model
Exemplo n.º 16
0
def build_conv_layer(input_dim: int, hidden_dim: int,
                     task: str = 'graph') -> pyg_nn.MessagePassing:

    if task == 'graph':
        return pyg_nn.GINConv(nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim)
        ))
    else:
        return pyg_nn.GCNConv(input_dim, hidden_dim)
Exemplo n.º 17
0
    def __init__(self, hparams, node_dim, edge_dim):
        super(GCN, self).__init__()

        self.node_dim = node_dim
        self.edge_dim = edge_dim
        self.hparams = hparams
        self.output_dim = 1

        # Linear atom embedding
        self.linatoms = torch.nn.Linear(self.node_dim,
                                        hparams['conv_base_size'])

        # Graph Convolution
        emb_dim = hparams['emb_dim']
        conv_dims = net_pattern(hparams['conv_n_layers'],
                                hparams['conv_base_size'],
                                hparams['conv_ratio']) + [emb_dim]
        conv_layers = []
        for index in range(hparams['conv_n_layers']):
            conv_layers.append(
                gnn.GCNConv(conv_dims[index],
                            conv_dims[index + 1],
                            cached=False))

        self.graph_conv = nn.ModuleList(conv_layers)
        if self.hparams['conv_batchnorm']:
            self.bn = nn.ModuleList(
                [nn.BatchNorm1d(dim) for dim in conv_dims[1:]])
        # Graph embedding
        if hparams['emb_set2set']:
            self.graph_emb = gnn.Set2Set(emb_dim, processing_steps=3)
            emb_dim = emb_dim * 2
        else:
            self.graph_emb = nn.Sequential(nn.Linear(emb_dim, emb_dim),
                                           str2act(hparams['emb_act']))

        # Build mlp
        self.using_mlp = hparams['mlp_layers'] > 0
        if self.using_mlp:
            self.mlp, last_dim = make_mlp(emb_dim, hparams['mlp_layers'],
                                          hparams['mlp_dim_ratio'],
                                          hparams['mlp_act'],
                                          hparams['mlp_batchnorm'],
                                          hparams['mlp_dropout'])
        else:
            last_dim = emb_dim

        # Prediction
        self.pred = nn.Linear(last_dim, self.output_dim)

        # placeholder for the gradients
        self.gradients = None
Exemplo n.º 18
0
    def __init__(self,
                 in_dim=256,
                 h_dims=(128, 64),
                 n_layer=3,
                 add_self_loops=False,
                 use_sparse=True,
                 **kwargs):
        super(GCN, self).__init__()
        self.gcn_layer = nn.ModuleList()
        self.use_sparse = use_sparse
        # self.bns = nn.ModuleList()
        self.adj_dropout = EdgeDropout(keep_prob=0.9)
        self.bn_layer = nn.ModuleList()
        self.gcn_layer.append(
            gnn.GCNConv(in_dim,
                        h_dims[0],
                        add_self_loops=add_self_loops,
                        cached=False))
        # nn.init.kaiming_normal_(self.gcn_layer[0].weight)
        self.bn_layer.append(nn.BatchNorm1d(h_dims[0]))
        for i in range(n_layer - 1):
            self.gcn_layer.append(
                gnn.GCNConv(h_dims[0],
                            h_dims[0],
                            add_self_loops=add_self_loops,
                            cached=False))
            self.bn_layer.append(nn.BatchNorm1d(h_dims[0]))
            # self.bns.append(nn.BatchNorm1d(in_dim))
        models = [nn.BatchNorm1d(h_dims[0])]
        for dim_in, dim_out in zip(list(h_dims), h_dims[1:]):
            models.append(nn.Linear(dim_in, dim_out))
            # models.append(nn.BatchNorm1d(dim_out))
            models.append(nn.ReLU(inplace=True))
            # models.append(nn.BatchNorm1d(dim_out))

        # models[-1] = nn.LeakyReLU(inplace=True)
        # del models[-1]
        self.project = nn.Sequential(*models)
        self.bn = nn.BatchNorm1d(in_dim)
Exemplo n.º 19
0
 def build_conv_model(self, model_type, input_dim, hidden_dim):
     if True:
         # use a simple GCN for node embedding
         if model_type == 'GCN':
             return pyg_nn.GCNConv(input_dim, hidden_dim)
         elif model_type == 'GraphSage':
             return GraphSage(input_dim, hidden_dim)
         elif model_type == 'GAT':
             return GAT(input_dim, hidden_dim)
     else:
         # for whole graph embedding
         return pyg_nn.GINConv(nn.Sequential(nn.Linear(input_dim, hidden_dim),
                               nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
Exemplo n.º 20
0
    def __init__(self, config, in_channels, out_channels):
        '''
            in_channels : num of node features
            out_channels: num of class
        '''
        super().__init__()
        self.config = config

        self.hidden_dim = config.hidden_dim
        self.dropout_rate = config.dropout_rate

        self.conv1 = gnn.GCNConv(in_channels,
                                 self.hidden_dim,
                                 improved=False,
                                 cached=True,
                                 bias=True,
                                 normalize=True)
        self.conv2 = gnn.GCNConv(self.hidden_dim,
                                 out_channels,
                                 improved=False,
                                 cached=True,
                                 bias=True,
                                 normalize=True)
Exemplo n.º 21
0
    def build_conv_model(self, model_type, node_in_dim, node_out_dim, edge_dim,
                         edge_mode, normalize_emb, activation, aggr):

        if model_type == 'GCN':
            return pyg_nn.GCNConv(node_in_dim, node_out_dim)
        elif model_type == 'GraphSage':
            return pyg_nn.SAGEConv(node_in_dim, node_out_dim)
        elif model_type == 'GAT':
            return pyg_nn.GATConv(node_in_dim, node_out_dim)
        elif model_type == 'EGCN':
            return EGCNConv(node_in_dim, node_out_dim, edge_dim, edge_mode)
        elif model_type == 'EGSAGE':
            return EGraphSage(node_in_dim, node_out_dim, edge_dim, activation,
                              edge_mode, normalize_emb, aggr)
Exemplo n.º 22
0
def build_conv_model(conv_type, node_in_dim, node_out_dim):
    if conv_type == 'GSage':
        return pyg_nn.SAGEConv(node_in_dim, node_out_dim)
    elif conv_type == 'GCN':
        return pyg_nn.GCNConv(node_in_dim, node_out_dim)
    elif conv_type == 'GAT':
        return pyg_nn.GATConv(node_in_dim, node_out_dim)
    elif conv_type == 'GSage2':
        from conv_layers import GraphSage2
        return GraphSage2(node_in_dim, node_out_dim)
    elif conv_type == 'GSageW':
        from conv_layers import GraphSageW
        return GraphSageW(node_in_dim, node_out_dim)
    else:
        raise NotImplementedError
Exemplo n.º 23
0
Arquivo: gcn.py Projeto: xduan7/MoReL
    def __init__(self,
                 node_attr_dim: int,
                 state_dim: int = 16,
                 num_conv: int = 2,
                 out_dim: int = 1,
                 dropout: float = 0.2):

        super(GCN, self).__init__()

        self.__dropout = dropout

        # Convolution layers
        # Note that if 'cached' is set
        self.__conv_layers = nn.ModuleList([
            pyg_nn.GCNConv(node_attr_dim if (i == 0) else state_dim,
                           out_dim if (i == (num_conv - 1)) else state_dim,
                           cached=False) for i in range(num_conv)
        ])
Exemplo n.º 24
0
    def __init__(self,
                 GCN_hidden_channels: List[int],
                 perceptron_hidden_dims: List[int],
                 board_representation_dim: int,
                 dropout: int = 0):
        """
        Translates a the board to a latent representation understandable by main Q-net.
        Implementation: GCNConv+batchnorm layers followed by FC layers
        :param GCN_hidden_channels: number of GCNConv+batchnorm layers
        :param perceptron_hidden_dims: list of FC hidden layer dimensions
        :param board_representation_dim: dimension of output representation of board
        :param dropout: probability of performing dropout.  0 for no dropout
        """
        super().__init__()
        self.GCN_modules = []
        self.FC_modules = []

        #GCNConv+pooling+batchnorm+relu layers
        in_channels = 1  # TODO: calculate?
        for GCN_layer_channels in GCN_hidden_channels:
            self.GCN_modules.append(
                tgnn.GCNConv(in_channels=in_channels,
                             out_channels=GCN_layer_channels))
            self.GCN_modules.append(nn.ReLU())
            self.GCN_modules.append(
                tgnn.BatchNorm(in_channels=GCN_layer_channels))
            in_channels = GCN_layer_channels

        # FC layers
        self.FC_modules.append(nn.Flatten())
        in_dim = in_channels * (
            19 + 14 + 18 + 11
        )  # TODO: calculate the output size from the Flatten module
        for FC_layer_dim in perceptron_hidden_dims:
            self.FC_modules.append(
                nn.Sequential(
                    nn.Linear(in_features=in_dim, out_features=FC_layer_dim),
                    nn.ReLU(), nn.Dropout(p=dropout)))
            in_dim = FC_layer_dim
        self.FC_modules.append(
            nn.Linear(in_features=in_dim,
                      out_features=board_representation_dim))
        self.FC_modules = nn.Sequential(*self.FC_modules)
Exemplo n.º 25
0
def build_gnn_conv_model(conv_type, node_in_dim, node_out_dim):
    if conv_type == 'GSage':
        return pyg_nn.SAGEConv(node_in_dim,
                               node_out_dim,
                               normalize=False,
                               bias=True)
    elif conv_type == 'GCN':
        return pyg_nn.GCNConv(node_in_dim,
                              node_out_dim,
                              add_self_loops=False,
                              normalize=True,
                              bias=True)
    elif conv_type == 'GAT':
        return pyg_nn.GATConv(node_in_dim,
                              node_out_dim,
                              heads=1,
                              concat=True,
                              negative_slope=0.2,
                              dropout=0.,
                              add_self_loops=False,
                              bias=True)
    elif conv_type == 'Tran':
        gnn = pyg_nn.TransformerConv(node_in_dim,
                                     node_out_dim,
                                     heads=1,
                                     concat=True,
                                     dropout=0.,
                                     bias=True)
        gnn.lin_edge = None  # remove redundant parameters
        return gnn
    elif conv_type == 'GSage2':
        from rlkit.torch.networks.conv_layers import GraphSage2
        return GraphSage2(node_in_dim, node_out_dim)
    elif conv_type == 'GSageW':
        from rlkit.torch.networks.conv_layers import GraphSageW
        return GraphSageW(node_in_dim, node_out_dim)
    else:
        raise NotImplementedError
Exemplo n.º 26
0
 def build_layer(self, args, input_dim, output_dim):
     if args.model_type == 'GCN':
         return pyg_nn.GCNConv(input_dim, output_dim)
     elif args.model_type == 'GraphSage':
         return GraphSage(input_dim, output_dim)
     elif args.model_type == 'GAT':
         return GAT(input_dim, output_dim)
     elif args.model_type == 'Gate':
         return pyg_nn.GatedGraphConv(output_dim, 3)
     elif args.model_type == 'ARMA':
         return pyg_nn.ARMAConv(input_dim,
                                output_dim,
                                num_stacks=3,
                                num_layers=2,
                                dropout=args.dropout)
     # Warning, high memory requirements.
     elif args.model_type == 'AGNN':
         return pyg_nn.AGNNConv()
     elif args.model_type == 'TAG':
         return pyg_nn.TAGConv(input_dim, output_dim, K=3)
     elif args.model_type == 'APPNP':
         return pyg_nn.APPNP(K=10, alpha=0.1)
     elif args.model_type == 'Feast':
         return pyg_nn.FeaStConv(input_dim, output_dim, heads=3)
Exemplo n.º 27
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 seq_len,
                 head_num,
                 qs_graph_dir,
                 device,
                 dropout,
                 n_hop,
                 gcn_type,
                 gcn_layer_num,
                 gcn_on,
                 pretrained_embedding=None,
                 freeze=True):
        """[summary]

        Args:
            input_dim ([type]): [description]
            hidden_dim ([type]): [description]
            output_dim ([type]): output dim of GCN
            seq_len ([type]): [description]
            head_num ([type]): [description]
            qs_graph_dir ([type]): [description]
        """
        super(Model, self).__init__()

        self.device = device

        self.gcn_on = gcn_on

        if pretrained_embedding is not None:
            self.pretrained_embedding = pretrained_embedding
        else:
            self.pretrained_embedding = None

        if gcn_type == 'sgconv' and gcn_on:
            self.gcn_layer_num = gcn_layer_num
            self.convs = nn.ModuleList()
            if gcn_layer_num == 1:
                self.convs.append(pyg_nn.SGConv(input_dim, output_dim,
                                                K=n_hop))
            elif gcn_layer_num > 1:
                self.convs.append(pyg_nn.SGConv(input_dim, hidden_dim,
                                                K=n_hop))
            else:
                raise ValueError("Unsupported gcn_layer_num {}")

            for i in range(self.gcn_layer_num - 1):
                if i == self.gcn_layer_num - 2:
                    self.convs.append(
                        pyg_nn.SGConv(hidden_dim, output_dim, K=n_hop))
                else:
                    self.convs.append(
                        pyg_nn.SGConv(hidden_dim, hidden_dim, K=n_hop))
        elif gcn_type == 'gconv' and gcn_on:
            self.gcn_layer_num = gcn_layer_num
            self.convs = nn.ModuleList()
            if gcn_layer_num == 1:
                self.convs.append(pyg_nn.GCNConv(input_dim, output_dim))
            elif gcn_layer_num > 1:
                self.convs.append(pyg_nn.GCNConv(input_dim, hidden_dim))
            else:
                raise ValueError("Unsupported gcn_layer_num {}")

            for i in range(self.gcn_layer_num - 1):
                if i == self.gcn_layer_num - 2:
                    self.convs.append(pyg_nn.GCNConv(hidden_dim, output_dim))
                else:
                    self.convs.append(pyg_nn.GCNConv(hidden_dim, hidden_dim))
        elif gcn_type == 'gat' and gcn_on:
            self.gcn_layer_num = gcn_layer_num
            self.convs = nn.ModuleList()
            if gcn_layer_num == 1:
                self.convs.append(
                    pyg_nn.GATConv(input_dim,
                                   output_dim,
                                   heads=5,
                                   concat=False))
            elif gcn_layer_num > 1:
                self.convs.append(
                    pyg_nn.GATConv(input_dim,
                                   hidden_dim,
                                   heads=5,
                                   concat=False))
            else:
                raise ValueError("Unsupported gcn_layer_num {}")

            for i in range(self.gcn_layer_num - 1):
                if i == self.gcn_layer_num - 2:
                    self.convs.append(pyg_nn.GATConv(hidden_dim, output_dim))
                else:
                    self.convs.append(pyg_nn.GATConv(hidden_dim, hidden_dim))
        elif gcn_type == 'sage' and gcn_on:
            self.gcn_layer_num = gcn_layer_num
            self.convs = nn.ModuleList()
            if gcn_layer_num == 1:
                self.convs.append(pyg_nn.SAGEConv(input_dim, output_dim))
            elif gcn_layer_num > 1:
                self.convs.append(pyg_nn.SAGEConv(input_dim, hidden_dim))
            else:
                raise ValueError("Unsupported gcn_layer_num {}")

            for i in range(self.gcn_layer_num - 1):
                if i == self.gcn_layer_num - 2:
                    self.convs.append(pyg_nn.SAGEConv(hidden_dim, output_dim))
                else:
                    self.convs.append(pyg_nn.SAGEConv(hidden_dim, hidden_dim))

        self.dropout = dropout

        with open(qs_graph_dir, "r") as src:
            self.qs_graph = json.load(src)

        # ! input shape should be (seq_len - 1)
        self.seq_len = seq_len

        self.correctness_embedding_layer = nn.Embedding(2, input_dim)

        if self.pretrained_embedding is None:
            self.node_embedding_layer = nn.Embedding(len(self.qs_graph),
                                                     input_dim)

        self.linears = nn.ModuleList()
        self.linears.append(nn.Linear(output_dim * 2, output_dim, bias=True))

        for _ in range(3):
            self.linears.append(nn.Linear(output_dim, output_dim, bias=False))

        self.linears.append(nn.Linear(output_dim, 1))

        self.MHA = nn.MultiheadAttention(embed_dim=output_dim,
                                         num_heads=head_num,
                                         dropout=self.dropout[1])

        self.FFN = nn.ModuleList()

        for _ in range(2):
            self.FFN.append(nn.Linear(output_dim, output_dim))

        self.lns = nn.ModuleList()
        self.lns.append(nn.LayerNorm(hidden_dim))
        self.lns.append(nn.LayerNorm(hidden_dim))

        self.lns.append(nn.LayerNorm(output_dim))
        self.lns.append(nn.LayerNorm(output_dim))

        self.pos_embedding = nn.Embedding(seq_len - 1, output_dim)

        self.dropout_layers = nn.ModuleList()

        self.dropout_layers.append(nn.Dropout(p=self.dropout[0]))
        self.dropout_layers.append(nn.Dropout(p=self.dropout[2]))
Exemplo n.º 28
0
 def __init__(self):
     super().__init__()
     self.bn = BatchNorm1d(K)
     self.conv_in = geom.GCNConv(K, K)
     self.conv_out = geom.GCNConv(K, K, flow='target_to_source')
Exemplo n.º 29
0
 def __init__(self, in_c, hid_c, out_c):
     super(GraphCNN, self).__init__()
     self.conv1 = pyg_nn.GCNConv(in_channels=in_c, out_channels=hid_c)
     self.conv2 = pyg_nn.GCNConv(in_channels=hid_c, out_channels=out_c)
Exemplo n.º 30
0
 def __init__(self, input_dim, output_dim):
     super(GraphEmbedder, self).__init__()
     self.gcn_conv1 = geom.GCNConv(input_dim, input_dim * 2, bias=True)
     self.gcn_conv2 = geom.GCNConv(input_dim * 2, input_dim * 2, bias=True)
     self.gcn_conv3 = geom.GCNConv(input_dim * 2, output_dim, bias=True)