コード例 #1
0
    def __init__(self, data, decode_modelname, train_pos_edge_adj_t, num_hidden_channels, num_layers, jk_mode='cat', alpha=0.1, theta=0.5, shared_weights = True, activation = None, dropout = 0.0):
        '''
        Args:
            data (torch_geometric.data.Data): グラフデータ.
            decode_modelname
            train_pos_edge_adj_t (torch.SparseTensor[2, num_pos_edges]): trainデータのリンク.
            num_hidden_channels (int or None): 隠れ層の出力次元数. 全ての層で同じ値が適用される.
            num_layers (int or None): 隠れ層の数.
            jk_mode (:obj:`str`): JK-Netにおけるaggregation方法. ('cat', 'max' or 'lstm'). (Default: 'cat)
            alpha (float): convolution後に初期層を加える割合. (Default: 0.1)
            theta (float): .
            shared_weights (bool): . (Default: True)
            activation (obj`int` or None): activation functionを指定。None, "relu", "leaky_relu", or "tanh". (Default: None)
            dropout (float): 各層のDropoutの割合. (Default: 0.0)
        '''
        super(GCNIIwithJK, self).__init__()
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.decode_modelname = decode_modelname

        self.num_layers = num_layers
        self.train_pos_edge_adj_t = train_pos_edge_adj_t
        self.hidden_channels_str = (f'{num_hidden_channels}_'*num_layers)[:-1]

        self.lins = torch.nn.ModuleList()
        # self.lins.append(torch.nn.Linear(data.x.size(1), num_hidden_channels))
        self.lins.append(GCNConv(data.x.size(1), num_hidden_channels))

        self.convs = torch.nn.ModuleList()
        for layer in range(num_layers):
            self.convs.append(GCN2Conv(num_hidden_channels, alpha, theta, layer+1, shared_weights = shared_weights, normalize = False))

        if self.decode_modelname in ['VGAE', 'Shifted-VGAE']:
            self.convs.append(GCN2Conv(num_hidden_channels, alpha, theta, layer+1, shared_weights = shared_weights, normalize = False))

        self.jk_mode = jk_mode
        self.jumps = torch.nn.ModuleList()
        for layer in range(num_layers//4):
            self.jumps.append(JumpingKnowledge(jk_mode))

        if self.jk_mode == 'cat':
            self.lins.append(torch.nn.Linear(4 * num_hidden_channels, num_hidden_channels))

        self.batchnorms = torch.nn.ModuleList()
        for layer in range(num_layers - 1 + (num_layers%4==0)):
            self.batchnorms.append(torch.nn.BatchNorm1d(num_hidden_channels))

        self.activation = activation
        self.dropout = dropout    
コード例 #2
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_layers,
                 dropout,
                 alpha=0.5,
                 theta=1.0,
                 shared_weights=True):
        super(GCNII, self).__init__()

        self.conv_in = GCNConv(in_channels, hidden_channels, normalize=False)

        self.convs = torch.nn.ModuleList()
        for l in range(num_layers):
            self.convs.append(
                GCN2Conv(hidden_channels,
                         alpha,
                         theta,
                         layer=l + 1,
                         shared_weights=shared_weights,
                         normalize=False))

        self.conv_out = GCNConv(hidden_channels, out_channels, normalize=False)

        self.dropout = dropout
コード例 #3
0
def test_gcn2_conv():
    x = torch.randn(4, 16)
    x_0 = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    value = torch.rand(row.size(0))
    adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))
    adj1 = adj2.set_value(None)

    conv = GCN2Conv(16, alpha=0.2)
    assert conv.__repr__() == 'GCN2Conv(16, alpha=0.2, beta=1.0)'
    out1 = conv(x, x_0, edge_index)
    assert out1.size() == (4, 16)
    assert torch.allclose(conv(x, x_0, adj1.t()), out1, atol=1e-6)
    out2 = conv(x, x_0, edge_index, value)
    assert out2.size() == (4, 16)
    assert torch.allclose(conv(x, x_0, adj2.t()), out2, atol=1e-6)

    t = '(Tensor, Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x, x_0, edge_index).tolist() == out1.tolist()
    assert jit(x, x_0, edge_index, value).tolist() == out2.tolist()

    t = '(Tensor, Tensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x, x_0, adj1.t()), out1, atol=1e-6)
    assert torch.allclose(jit(x, x_0, adj2.t()), out2, atol=1e-6)

    conv.cached = True
    conv(x, x_0, edge_index)
    assert conv(x, x_0, edge_index).tolist() == out1.tolist()
    conv(x, x_0, adj1.t())
    assert torch.allclose(conv(x, x_0, adj1.t()), out1, atol=1e-6)
コード例 #4
0
    def __init__(self,
                 num_of_features,
                 hid_size,
                 num_of_classes,
                 num_layers=64,
                 alpha=0.1,
                 theta=0.5,
                 shared_weights=True,
                 dropout=0.0):
        super(GCNII, self).__init__()

        self.lins = torch.nn.ModuleList()
        self.lins.append(nn.Linear(num_of_features, hid_size))
        self.lins.append(nn.Linear(hid_size, num_of_classes))

        self.convs = torch.nn.ModuleList()
        for layer in range(num_layers):
            self.convs.append(
                GCN2Conv(hid_size,
                         alpha,
                         theta,
                         layer + 1,
                         shared_weights,
                         normalize=False))

        self.dropout = dropout
コード例 #5
0
    def __init__(self, hidden_channels, num_layers, alpha, theta,
                 shared_weights=True, dropout=0.0):
        super(Net, self).__init__()

        self.lins = torch.nn.ModuleList()
        self.lins.append(Linear(train_dataset.num_features, hidden_channels))
        self.lins.append(Linear(hidden_channels, train_dataset.num_classes))

        self.convs = torch.nn.ModuleList()
        for layer in range(num_layers):
            self.convs.append(
                GCN2Conv(hidden_channels, alpha, theta, layer + 1,
                         shared_weights, normalize=False))

        self.dropout = dropout
コード例 #6
0
ファイル: gcn2.py プロジェクト: tchaton/lightning-geometric
 def make_layers(
     self,
     dim_in,
     dim_out,
     alpha=None,
     theta=None,
     shared_weights=None,
     normalize=None,
 ):
     self.convs.append(
         GCN2Conv(dim_out,
                  alpha,
                  theta,
                  shared_weights,
                  normalize=normalize))
     self.lins.append(torch.nn.Linear(dim_in, dim_out))
コード例 #7
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_layers,
                 alpha,
                 dropout=0.):

        super(Net, self).__init__()

        self.lin1 = Linear(in_channels, hidden_channels)

        self.convs = ModuleList()
        self.batch_norms = ModuleList()
        for _ in range(num_layers):
            self.convs.append(GCN2Conv(hidden_channels, alpha, cached=True))
            self.batch_norms.append(BatchNorm(hidden_channels))

        self.lin2 = Linear(hidden_channels, out_channels)

        self.dropout = dropout
コード例 #8
0
ファイル: gcn2.py プロジェクト: Linkerbrain/TextGNNs
    def __init__(self, input_shape, output_shape, 
                    hidden_channels, num_layers, dropout, alpha,
                     shared_weights, use_edge_weights):
        super(GCN2, self).__init__()

        self.lins = torch.nn.ModuleList()
        
        self.start_with_lin = input_shape != hidden_channels

        if self.start_with_lin:
            self.lins.append(Linear(input_shape, hidden_channels))
        self.lins.append(Linear(hidden_channels, output_shape))

        self.convs = torch.nn.ModuleList()
        for layer in range(num_layers):
            self.convs.append(
                GCN2Conv(channels=hidden_channels, alpha=alpha, theta=0.5, layer=layer + 1,
                         shared_weights=shared_weights, normalize=True))

        self.dropout = dropout
        self.use_edge_weights = use_edge_weights
コード例 #9
0
    def __init__(self,
                 data_list,
                 decode_modelname,
                 train_pos_edge_adj_t,
                 num_hidden_channels,
                 num_layers,
                 alpha=0.1,
                 theta=0.5,
                 shared_weights=True,
                 activation=None,
                 dropout=0.0,
                 future_prediction=True):
        '''
        Args:
            data (torch_geometric.data.Data): グラフデータ.
            decode_modelname
            train_pos_edge_adj_t (torch.SparseTensor[2, num_pos_edges]): trainデータのリンク.
            num_hidden_channels (int or None): 隠れ層の出力次元数. 全ての層で同じ値が適用される.
            num_layers (int or None): 隠れ層の数.
            alpha (float): convolution後に初期層を加える割合. (Default: 0.1)
            theta (float): .
            shared_weights (bool): . (Default: True)
            activation (obj`int` or None): activation functionを指定。None, "relu", "leaky_relu", or "tanh". (Default: None)
            dropout (float): 各層のDropoutの割合. (Default: 0.0)
        '''
        super(EvolveGCNIIO, self).__init__()
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.decode_modelname = decode_modelname

        self.num_layers = num_layers
        self.train_pos_edge_adj_t = train_pos_edge_adj_t
        self.hidden_channels_str = (f'{num_hidden_channels}_' *
                                    num_layers)[:-1]

        self.lins = torch.nn.ModuleList()
        self.recurrents = torch.nn.ModuleList()
        self.lins.append(GCNConv(data_list[-1].x.size(1), num_hidden_channels))
        self.recurrents.append(
            LSTM(input_size=num_hidden_channels,
                 hidden_size=num_hidden_channels))

        self.convs = torch.nn.ModuleList()
        for layer in range(num_layers - 1):
            self.convs.append(
                GCN2Conv(num_hidden_channels,
                         alpha,
                         theta,
                         layer + 1,
                         shared_weights=shared_weights,
                         normalize=False))
            self.recurrents.append(
                LSTM(input_size=num_hidden_channels,
                     hidden_size=num_hidden_channels))

        self.batchnorms = torch.nn.ModuleList()
        for layer in range(num_layers - 2):
            self.batchnorms.append(torch.nn.BatchNorm1d(num_hidden_channels))

        self.activation = activation
        self.dropout = dropout
        self.future_prediction = future_prediction

        if self.future_prediction is True:
            self.feature_recurrents = torch.nn.ModuleList()
            self.feature_recurrents.append(
                LSTM(input_size=num_hidden_channels,
                     hidden_size=num_hidden_channels))