Ejemplo n.º 1
0
 def __init__(self,
              num_node_features=100,
              num_class=18,
              hidden=16,
              dropout_rate=0.5,
              num_layers=2):
     super(GCN, self).__init__()
     self.first_lin = Linear(num_node_features, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers):
         self.convs.append(GCNConv(hidden, hidden))
     self.lin2 = Linear(hidden, num_class)
     self.dropout_rate = dropout_rate
Ejemplo n.º 2
0
    def __init__(self,
                 n_node_features,
                 n_edge_features=None,
                 hiddens=32,
                 aggr="max",
                 depth=4,
                 **kwargs):
        super(SimpleGraphCenteredNet, self).__init__()
        assert hiddens % 4 == 0, "`hiddens` has to be a multiple of 4"
        self.hiddens = hiddens
        self.aggr = aggr
        self.depth = depth

        self.conv_i = GCNConv(n_node_features, hiddens)
        self.convs_h = nn.Sequential(
            *[GCNConv(hiddens, hiddens) for _ in range(depth)])

        self.decoder = nn.Sequential(
            nn.Linear(hiddens, hiddens // 2),
            nn.ReLU(),
            nn.Linear(hiddens // 2, 4),
        )
Ejemplo n.º 3
0
    def _build_kg_layer(self):
        # db encoder
        self.entity_encoder = RGCNConv(self.n_entity, self.kg_emb_dim, self.n_relation, self.num_bases)
        self.entity_self_attn = SelfAttentionSeq(self.kg_emb_dim, self.kg_emb_dim)

        # concept encoder
        self.word_encoder = GCNConv(self.kg_emb_dim, self.kg_emb_dim)
        self.word_self_attn = SelfAttentionSeq(self.kg_emb_dim, self.kg_emb_dim)

        # gate mechanism
        self.gate_layer = GateLayer(self.kg_emb_dim)

        logger.debug('[Finish build kg layer]')
def test_sequential_jittable():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    adj_t = SparseTensor(row=edge_index[0], col=edge_index[1]).t()

    model = Sequential('x: Tensor, edge_index: Tensor', [
        (GCNConv(16, 64).jittable(), 'x, edge_index -> x'),
        ReLU(inplace=True),
        (GCNConv(64, 64).jittable(), 'x, edge_index -> x'),
        ReLU(inplace=True),
        Linear(64, 7),
    ])
    torch.jit.script(model)(x, edge_index)

    model = Sequential('x: Tensor, edge_index: SparseTensor', [
        (GCNConv(16, 64).jittable(), 'x, edge_index -> x'),
        ReLU(inplace=True),
        (GCNConv(64, 64).jittable(), 'x, edge_index -> x'),
        ReLU(inplace=True),
        Linear(64, 7),
    ])
    torch.jit.script(model)(x, adj_t)
Ejemplo n.º 5
0
 def __init__(self,
              num_features,
              num_classes,
              num_hidden,
              num_layers,
              apply_log_softmax=True):
     super(NodeGCN, self).__init__()
     self.convs = nn.ModuleList()
     self.apply_log_softmax = apply_log_softmax
     for i in range(num_layers):
         in_features = num_features if i == 0 else num_hidden
         out_features = num_classes if i == num_layers - 1 else num_hidden
         self.convs.append(GCNConv(in_features, out_features))
Ejemplo n.º 6
0
    def __init__(self, hidden_channels, num_layers, max_z, 
                 use_feature=False, node_embedding=None, 
                 dropout=0.5):
        super(GCN, self).__init__()
        self.use_feature = use_feature
        self.node_embedding = node_embedding
        self.max_z = max_z
        self.z_embedding = Embedding(self.max_z, hidden_channels)

        self.convs = torch.nn.ModuleList()
        initial_channels = hidden_channels
        if self.use_feature:
            initial_channels += dataset.num_features
        if self.node_embedding is not None:
            initial_channels += node_embedding.embedding_dim
        self.convs.append(GCNConv(initial_channels, hidden_channels))
        for _ in range(num_layers - 1):
            self.convs.append(GCNConv(hidden_channels, hidden_channels))

        self.dropout = dropout
        self.lin1 = Linear(hidden_channels, hidden_channels)
        self.lin2 = Linear(hidden_channels, 1)
Ejemplo n.º 7
0
 def __init__(self, num_layers, hidden_list, activation, data):
     super(ModelGCN, self).__init__()
     assert len(hidden_list) == num_layers + 1
     self.linear_1 = Linear(data.num_features, hidden_list[0])
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers):
         self.convs.append(GCNConv(hidden_list[i], hidden_list[i + 1]))
     self.JK = JumpingKnowledge(mode='max')
     self.linear_2 = Linear(hidden_list[-1], data.num_class)
     if activation == "relu":
         self.activation = relu
     elif activation == "leaky_relu":
         self.activation = leaky_relu
Ejemplo n.º 8
0
    def __init__(self, in_channels, out_channels, ins_dim, dropout=0.0):

        super(gcn_seq, self).__init__()

        # 5 layers of conv with  BN, ReLU, and Dropout in between
        self.convs = torch.nn.ModuleList(
            [GCNConv(in_channels + ins_dim, out_channels) for _ in range(5)])

        # for the last output, no batch norm
        self.bns = torch.nn.ModuleList(
            [torch.nn.BatchNorm1d(out_channels) for _ in range(5 - 1)])

        self.dropout = dropout
Ejemplo n.º 9
0
    def __init__(self,
                 inpt_size,
                 hidden_size,
                 output_size,
                 posemb_size,
                 dropout=0.5):
        # inpt_size: utter_hidden_size + user_embed_size
        super(GCNRNNContext, self).__init__()
        self.conv1 = GCNConv(hidden_size + posemb_size, hidden_size)
        self.conv2 = GCNConv(hidden_size, hidden_size)
        self.conv3 = GCNConv(hidden_size, hidden_size)
        self.bn1 = nn.BatchNorm1d(num_features=hidden_size)
        self.bn2 = nn.BatchNorm1d(num_features=hidden_size)
        self.bn3 = nn.BatchNorm1d(num_features=hidden_size)

        # rnn for background
        self.rnn = nn.GRU(inpt_size, hidden_size)

        self.linear = nn.Linear(hidden_size * 2, output_size)
        self.drop = nn.Dropout(p=dropout)
        self.posemb = nn.Embedding(
            100, posemb_size)  # 100 is far bigger than the max turn lengths
Ejemplo n.º 10
0
    def __init__(self, in_feature, hidden_feature, out_feature):
        super(GraphEncoder_GCN, self).__init__()

        self.in_feature = in_feature
        self.hidden_feature = hidden_feature
        self.out_feature = out_feature

        self.conv1 = GCNConv(in_feature, hidden_feature)
        # self.conv2 = GCNConv(hidden_feature, hidden_feature)
        self.conv3 = GCNConv(hidden_feature, out_feature)

        # self.linear1 = nn.Linear(hidden_feature,hidden_feature)
        self.linear1 = nn.Linear(hidden_feature, out_feature)

        self.linear2 = nn.Linear(hidden_feature, hidden_feature)
        self.linear3 = nn.Linear(out_feature, out_feature)
        self.linear4 = nn.Linear(hidden_feature + hidden_feature + out_feature,
                                 out_feature)

        self.tanh = nn.Tanh()

        self.relu = torch.relu
Ejemplo n.º 11
0
 def __init__(self, hidden_size, filter_):
     super().__init__()
     self.dense = nn.Linear(hidden_size, hidden_size)
     self.activation = nn.Tanh()
     
     self.conv = GCNConv(hidden_size, hidden_size)
     #self.conv2 = GCNConv(hidden_size*2, hidden_size)
     if filter_:
         self.filter = nn.Sequential(
             nn.Linear(2*hidden_size, 1),
             nn.Sigmoid())
     else:
         self.filter = None
Ejemplo n.º 12
0
 def __init__(self, node_feats, channels, out_feats, edge_feats=1):
     super(GCNNet, self).__init__()
     self.conv1 = GCNConv(node_feats, channels)
     self.conv2 = GCNConv(channels, channels)
     self.conv3 = GCNConv(channels, channels)
     self.conv4 = GCNConv(channels, channels)
     self.conv5 = GCNConv(channels, channels)
     self.conv9 = GCNConv(channels, out_feats)
Ejemplo n.º 13
0
def main():
    x_dim = 512
    x_len = 10000

    x = sparse.rand(x_len,
                    x_dim,
                    density=10 / x_dim,
                    format='csr',
                    dtype=np.float)
    adj = sparse.rand(x_len,
                      x_len,
                      density=10 / x_len,
                      format='csr',
                      dtype=np.float)
    w = sparse.rand(x_dim,
                    x_dim,
                    density=10 / x_dim,
                    format='csr',
                    dtype=np.float)

    start = time.time()
    adj.dot(x.dot(w))
    print(time.time() - start)

    x1 = x.todense().astype(np.float)
    adj1 = adj.todense().astype(np.float)
    w1 = w.todense().astype(np.float)

    start = time.time()
    adj1.dot(x1.dot(w1))
    print(time.time() - start)

    x2 = torch.tensor(x1, dtype=torch.float)
    adj2 = torch.tensor(adj1, dtype=torch.float)
    w2 = torch.tensor(w1, dtype=torch.float)

    start = time.time()
    adj2.matmul(x2.matmul(w2))
    print(time.time() - start)

    adj2alt = torch.rand((x_len, x_len), dtype=torch.float)
    start = time.time()
    adj2alt.matmul(x2.matmul(w2))
    print(time.time() - start)

    conv = GCNConv(x_dim, x_dim)
    edge_index, _ = dense_to_sparse(adj2)

    start = time.time()
    x3 = conv(x2, edge_index)
    print(time.time() - start)
Ejemplo n.º 14
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.save_hyperparameters()

        assert kwargs["num_layers"] >= 2

        self.convs = nn.ModuleList()

        normalize = not kwargs.get("use_gdc", True)

        self.convs.append(
            GCNConv(
                kwargs["num_features"],
                kwargs["hidden_channels"],
                cached=kwargs["cached"],
                normalize=normalize,
            )
        )

        for idx in range(kwargs["num_layers"] - 2):
            self.convs.append(
                GCNConv(
                    kwargs["hidden_channels"],
                    kwargs["hidden_channels"],
                    cached=kwargs["cached"],
                    normalize=normalize,
                )
            )

        self.convs.append(
            GCNConv(
                kwargs["hidden_channels"],
                kwargs["num_classes"],
                cached=kwargs["cached"],
                normalize=normalize,
            )
        )
    def __init__(self, g_dim, h_dim1, h_dim2, z_dim, n_classes):
        super(VAE, self).__init__()

        # encoder
        self.fc1 = GCNConv(g_dim, h_dim1, F.relu)
        self.fc2 = GCNConv(h_dim1, h_dim2, F.relu)
        self.fc31 = GCNConv(h_dim2, z_dim, F.linear)
        self.fc32 = GCNConv(h_dim2, z_dim, F.linear)
        # decoder
        self.fc4 = GCNConv(z_dim, h_dim2, F.relu)
        self.fc5 = GCNConv(h_dim2, h_dim1, F.relu)
        self.fc6 = GCNConv(h_dim1, g_dim, F.sigmoid)
Ejemplo n.º 16
0
    def __init__(self,
                 num_features_xd,
                 num_features_xt,
                 latent_dim=64,
                 dropout=0.2,
                 n_output=1,
                 device='cpu',
                 **kwargs):
        super(GEFA_no_residual_drug, self).__init__()

        self.n_output = n_output
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)
        self.dropout1 = nn.Dropout(0.5)
        self.device = device
        self.num_rblock = 4

        # SMILES graph branch
        self.conv1_xd = GCNConv(num_features_xd, num_features_xd)
        self.conv2_xd = GCNConv(num_features_xd, num_features_xd * 2)
        self.fc_g1_d = torch.nn.Linear(num_features_xd * 2, 1024)
        self.fc_g2_d = torch.nn.Linear(1024, num_features_xt)
        self.fc_g3_d = torch.nn.Linear(num_features_xt, latent_dim * 2)

        # attention
        self.first_linear = torch.nn.Linear(num_features_xt, num_features_xt)
        self.second_linear = torch.nn.Linear(num_features_xt, 1)

        # protein graph branch
        self.conv1_xt = GCNConv(num_features_xt, latent_dim)
        self.conv2_xt = GCNConv(latent_dim, latent_dim * 2)
        self.rblock_xt = ResidualBlock(latent_dim * 2)
        self.fc_g1_t = torch.nn.Linear(latent_dim * 2, 1024)
        self.fc_g2_t = torch.nn.Linear(1024, latent_dim * 2)

        self.fc1 = nn.Linear(4 * latent_dim, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.out = nn.Linear(512, self.n_output)
Ejemplo n.º 17
0
    def __init__(self, args):
        super(Net, self).__init__()

        self.args = args
        self.nhid = args.nhid

        self.num_features = args.num_features
        self.num_classes = args.num_classes

        self.alpha = args.alpha
        self.pooling_ratio = args.pooling_ratio
        self.dropout_ratio = args.dropout_ratio

        self.pooling_layer_type = args.pooling_layer_type
        self.feature_fusion_type = args.feature_fusion_type

        self.conv1 = GCNConv(self.num_features, self.nhid)
        self.pool1 = GSAPool(self.nhid,
                             pooling_ratio=self.pooling_ratio,
                             alpha=self.alpha,
                             pooling_conv=self.pooling_layer_type,
                             fusion_conv=self.feature_fusion_type)
        self.conv2 = GCNConv(self.nhid, self.nhid)
        self.pool2 = GSAPool(self.nhid,
                             pooling_ratio=self.pooling_ratio,
                             alpha=self.alpha,
                             pooling_conv=self.pooling_layer_type,
                             fusion_conv=self.feature_fusion_type)
        self.conv3 = GCNConv(self.nhid, self.nhid)
        self.pool3 = GSAPool(self.nhid,
                             pooling_ratio=self.pooling_ratio,
                             alpha=self.alpha,
                             pooling_conv=self.pooling_layer_type,
                             fusion_conv=self.feature_fusion_type)

        self.lin1 = torch.nn.Linear(self.nhid * 2, self.nhid)
        self.lin2 = torch.nn.Linear(self.nhid, self.nhid // 2)
        self.lin3 = torch.nn.Linear(self.nhid // 2, self.num_classes)
Ejemplo n.º 18
0
class GCNWithJK(torch.nn.Module):
    def __init__(self, dataset, num_layers, hidden, mode='cat'):
        super(GCNWithJK, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(GCNConv(hidden, hidden))
        self.jump = JumpingKnowledge(mode)
        if mode == 'cat':
            self.lin1 = Linear(num_layers * hidden, hidden)
        else:
            self.lin1 = Linear(hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))
        xs = [x]
        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
            xs += [x]
        x = self.jump(xs)
        x = global_mean_pool(x, batch)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Ejemplo n.º 19
0
    def __init__(self):
        super(Net, self).__init__()

        num_features = dataset.num_features
        dim = args.dim

        self.conv1 = GCNConv(num_features, dim)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        self.conv2 = GCNConv(dim, dim)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        self.conv3 = GCNConv(dim, dim)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        self.conv4 = GCNConv(dim, dim)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        self.conv5 = GCNConv(dim, dim)
        self.bn5 = torch.nn.BatchNorm1d(dim)

        self.fc1 = Linear(5 * dim, dim)
        self.fc2 = Linear(dim, 1)
Ejemplo n.º 20
0
    def __init__(self,
                 aggr='mean',
                 num_embs=None,
                 hidden_size=256,
                 input_size=128,
                 output_size=128,
                 num_layers=2,
                 dropout=0.5,
                 args=None):
        super(GCNNet, self).__init__()
        if num_embs is None:
            print("Must pass in the number of embeddings")
            exit()
        self.embed = nn.Embedding(num_embs, input_size)

        self.in_layer = GCNConv(input_size, hidden_size)
        self.hidden_layers = nn.ModuleList([GCNConv(hidden_size, hidden_size) for _ in range(num_layers - 2)])
        self.out_layer = GCNConv(hidden_size, output_size)

        self.num_layers = num_layers
        self.relu = nn.ReLU()
        self.dp = nn.Dropout(p=dropout)
        self.global_aggr = GlobalAggregator(args)
Ejemplo n.º 21
0
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GCNConv(dataset.num_features,
                             16,
                             cached=True,
                             normalize=not args.use_gdc)
        self.conv2 = GCNConv(16,
                             dataset.num_classes,
                             cached=True,
                             normalize=not args.use_gdc)
        # self.conv1 = ChebConv(data.num_features, 16, K=2)
        # self.conv2 = ChebConv(16, data.num_features, K=2)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(x, dim=1)
Ejemplo n.º 22
0
    def __init__(self, node_num):
        super(GCNNet, self).__init__()
        dim = hidden_dim

        # 搞个gru
        self.gru_encoder = nn.GRU(2, dim, bidirectional=True, batch_first=True)
        self.lstm_fc = nn.Sequential(nn.Linear(dim * 2, dim))

        self.conv1 = ChebConv(dim, dim, K=15)
        # self.conv2 = ChebConv(dim, dim, K=10)
        # self.conv1 = ARMAConv(dim, dim, num_layers=2)
        # self.conv2 = ARMAConv(dim, dim, num_layers=2)
        # self.conv1 = GCNConv(dim, dim)
        self.conv2 = GCNConv(dim, dim)
Ejemplo n.º 23
0
    def __init__(self, n_output=1, n_filters=32, embed_dim=128, num_features_xd=78, num_features_xt=25, output_dim=128, dropout=0.2):

        super(GCNNet, self).__init__()

        # SMILES graph branch
        self.n_output = n_output
        self.conv1 = GCNConv(num_features_xd, num_features_xd)
        self.conv2 = GCNConv(num_features_xd, num_features_xd*2)
        self.conv3 = GCNConv(num_features_xd*2, num_features_xd * 4)
        self.fc_g1 = torch.nn.Linear(num_features_xd*4, 1024)
        self.fc_g2 = torch.nn.Linear(1024, output_dim)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)

        # protein sequence branch (1d conv)
        self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
        self.conv_xt_1 = nn.Conv1d(in_channels=1000, out_channels=n_filters, kernel_size=8)
        self.fc1_xt = nn.Linear(32*121, output_dim)

        # combined layers
        self.fc1 = nn.Linear(2*output_dim, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.out = nn.Linear(512, self.n_output)
Ejemplo n.º 24
0
 def get_layer(gnn_type):
     if gnn_type == 'ChebConv': layer = ChebConv(h, h, K=2)
     elif gnn_type == 'GCNConv': layer = GCNConv(h, h)
     elif gnn_type == 'GINConv':
         dnn = nn.Sequential(nn.Linear(h, h), nn.LeakyReLU(),
                             nn.Linear(h, h))
         layer = GINConv(dnn)
     elif gnn_type == 'SAGEConv':
         layer = SAGEConv(h, h, normalize=True)
     elif gnn_type == 'GATConv':
         layer = GATConv(h, h)
     else:
         raise NotImplementedError
     return layer
Ejemplo n.º 25
0
    def __init__(self, nfeat, nhid, nout, n_nodes, window, dropout):
        super(MPNN_TSFM, self).__init__()
        self.window = window
        self.n_nodes = n_nodes
        #self.batch_size = batch_size
        self.nhid = nhid
        self.nfeat = nfeat
        self.conv1 = GCNConv(nfeat, nhid)
        self.conv2 = GCNConv(nhid, nhid)
        
        self.bn1 = nn.BatchNorm1d(nhid)
        self.bn2 = nn.BatchNorm1d(nhid)
        
        self.tsfm1 = TransformerEncoderLayer(2*nhid, 1, nhid, dropout)
        self.tsfm2 = TransformerEncoderLayer(2*nhid, 1, nhid, dropout)

        
        #self.fc1 = nn.Linear(2*nhid+window*nfeat, nhid)
        self.fc1 = nn.Linear(4*nhid+window*nfeat, nhid)
        self.fc2 = nn.Linear(nhid, nout)
        
        self.dropout = nn.Dropout(dropout)
        self.relu = nn.ReLU()
Ejemplo n.º 26
0
 def __init__(self,
              node_size,
              embed_dim,
              embedding_finetune=False,
              hidden_dim=512,
              num_class=2,
              dropout=0.5,
              layers=2):
     super(GCNNet, self).__init__()
     self.node_size = node_size
     self.embed_dim = embed_dim
     self.embed = torch.nn.Embedding(num_embeddings=node_size,
                                     embedding_dim=embed_dim)
     self.embedding_finetune = embedding_finetune
     self.convs = torch.nn.ModuleList(
         [GCNConv(embed_dim, hidden_dim, normalize=False)])
     self.convs.extend([
         GCNConv(hidden_dim, hidden_dim, normalize=False)
         for i in range(layers - 2)
     ])
     self.convs.append(GCNConv(hidden_dim, num_class, normalize=False))
     self._weight_init_()
     self.dropout = dropout
Ejemplo n.º 27
0
    def __init__(self, channels, reduction=4, k=2):
        super(DyReLU, self).__init__()
        self.channels = channels
        self.k = k
        self.fc1 = GCNConv(channels, channels // reduction)
        #         self.fc1 = GraphConv(channels, channels // reduction)
        self.relu = nn.ReLU(inplace=True)
        self.fc2 = nn.Linear(channels // reduction, 2 * k)
        self.sigmoid = nn.Sigmoid()

        self.register_buffer('lambdas',
                             torch.Tensor([1.] * k + [0.5] * k).float())
        self.register_buffer('init_v',
                             torch.Tensor([1.] + [0.] * (2 * k - 1)).float())
Ejemplo n.º 28
0
    def __init__(self,
                 num_features,
                 output_channels,
                 nb_neurons=128,
                 **kwargs):
        """

        Parameters
        ----------
        num_features: int
            number of node features
        output_channels: int
            number of classes
        """
        super(GCNConv1TPK, self).__init__()

        self.conv1 = GCNConv(num_features, nb_neurons)
        self.conv2 = GCNConv(nb_neurons, nb_neurons)
        self.pool = TopKPooling(nb_neurons, ratio=0.8)
        self.conv3 = GCNConv(nb_neurons, nb_neurons)

        self.lin1 = torch.nn.Linear(nb_neurons, 64)
        self.lin2 = torch.nn.Linear(64, output_channels)
Ejemplo n.º 29
0
 def __init__(self,
              d1=90,
              d2=80,
              d3=50,
              num_features=1,
              num_classes=1,
              num_layers=4,
              **kwargs):
     super(Net6, self).__init__()
     self.conv1 = GCNConv(num_features, d1)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(GCNConv(d1, d1))
     self.bn1 = nn.BatchNorm1d(d1)
     self.fc1 = nn.Linear(d1, d2)
     self.bn2 = nn.BatchNorm1d(d2)
     self.fc2 = nn.Linear(d2, d3)
     self.bn3 = nn.BatchNorm1d(d3)
     self.fc3 = nn.Linear(d3, 1)  # one output for regression
     self.num_layers = num_layers
     self.d1 = d1
     self.d2 = d2
     self.d3 = d3
Ejemplo n.º 30
0
 def __init__(self, in_features, out_features, aggregation, attention,
              **kwargs):
     super().__init__()
     assert attention in ("constant", "gcn", "gat")
     if attention == "constant":
         self.op = ConstantConv(in_features, out_features)
     elif attention == "gcn":
         self.op = GCNConv(in_features, out_features)
     else:
         self.op = GATConv(in_features,
                           out_features,
                           dropout=config.DROPOUT)
     assert aggregation in ("add", "mean", "max")
     self.op.aggr = aggregation