Esempio n. 1
0
class Block_2hop(torch.nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels, jp=False):
        super(Block_2hop, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, out_channels)
        self.jp = jp
        if self.jp:
            self.jump = JumpingKnowledge('cat')
            self.lin = Linear(hidden_channels + out_channels, out_channels)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()
        if self.jp:
            self.lin.reset_parameters()

    def forward(self, x, adj, mask=None, add_loop=True):
        x1 = F.relu(self.conv1(x, adj, mask, add_loop))
        x1 = F.normalize(x1, p=2, dim=-1)
        x2 = F.relu(self.conv2(x1, adj, mask, add_loop))
        x2 = F.normalize(x2, p=2, dim=-1)
        if self.jp:
            return F.relu(self.lin(self.jump([x1, x2])))
        return x2
class Block(torch.nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels, mode='cat'):
        super(Block, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, out_channels)
        self.jump = JumpingKnowledge(mode)
        if mode == 'cat':
            self.lin = Linear(hidden_channels + out_channels, out_channels)
        else:
            self.lin = Linear(out_channels, out_channels)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()
        self.lin.reset_parameters()

    # x: [batch_size, num_nodes, in_channels]
    def forward(self, x, adj, mask=None, add_loop=True):
        # x1: [batch_size, num_nodes,hidden_channels]
        x1 = F.relu(self.conv1(x, adj, mask, add_loop))
        # x2: [batch_size,num_nodes, out_channels]
        x2 = F.relu(self.conv2(x1, adj, mask, add_loop))
        # [batch_size,num_nodes,out_channels]
        return self.lin(self.jump([x1, x2]))
Esempio n. 3
0
def test_dense_sage_conv():
    channels = 16
    sparse_conv = SAGEConv(channels, channels)
    dense_conv = DenseSAGEConv(channels, channels)
    assert dense_conv.__repr__() == 'DenseSAGEConv(16, 16)'

    # Ensure same weights (identity) and bias (ones).
    index = torch.arange(0, channels, dtype=torch.long)
    sparse_conv.weight.data.fill_(0)
    sparse_conv.weight.data[index, index] = 1
    sparse_conv.bias.data.fill_(1)
    dense_conv.weight.data.fill_(0)
    dense_conv.weight.data[index, index] = 1
    dense_conv.bias.data.fill_(1)

    x = torch.randn((5, channels))
    edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 4],
                               [1, 2, 0, 2, 0, 1, 4, 3]])

    sparse_out = sparse_conv(x, edge_index)

    x = torch.cat([x, x.new_zeros(1, channels)], dim=0).view(2, 3, channels)
    adj = torch.Tensor([
        [[0, 1, 1], [1, 0, 1], [1, 1, 0]],
        [[0, 1, 0], [1, 0, 0], [0, 0, 0]],
    ])
    mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.uint8)

    dense_out = dense_conv(x, adj, mask)
    assert dense_out.size() == (2, 3, channels)
    dense_out = dense_out.view(6, channels)[:-1]
    assert torch.allclose(sparse_out, dense_out, atol=1e-04)
Esempio n. 4
0
    def __init__(
        self,
        in_channels,
        hidden_channels,
        out_channels,
        normalize=False,
        add_loop=False,
        lin=True,
    ):
        super(GNN, self).__init__()

        self.add_loop = add_loop

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels, normalize)
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, hidden_channels, normalize)
        self.bn2 = torch.nn.BatchNorm1d(hidden_channels)
        self.conv3 = DenseSAGEConv(hidden_channels, out_channels, normalize)
        self.bn3 = torch.nn.BatchNorm1d(out_channels)

        if lin is True:
            self.lin = torch.nn.Linear(2 * hidden_channels + out_channels,
                                       out_channels)
        else:
            self.lin = None
Esempio n. 5
0
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(Block, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, out_channels)

        self.lin = torch.nn.Linear(hidden_channels + out_channels,
                                   out_channels)
Esempio n. 6
0
    def __init__(self, in_channels, hidden_channels, out_channels, jp=False):
        super(Block_2hop, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, out_channels)
        self.jp = jp
        if self.jp:
            self.jump = JumpingKnowledge('cat')
            self.lin = Linear(hidden_channels + out_channels, out_channels)
    def __init__(self, in_channels, hidden_channels, out_channels, mode='cat'):
        super(Block, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, out_channels)
        self.jump = JumpingKnowledge(mode)
        if mode == 'cat':
            self.lin = Linear(hidden_channels + out_channels, out_channels)
        else:
            self.lin = Linear(out_channels, out_channels)
Esempio n. 8
0
class Block_1hop(torch.nn.Module):
    # If we only connect up to 1-hop neighbors, jumping knowledge is always False.
    def __init__(self, in_channels, hidden_channels, out_channels, jp=False):
        super(Block_1hop, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, out_channels)  

    def reset_parameters(self):
        self.conv1.reset_parameters()

    def forward(self, x, adj, mask=None, add_loop=True):
        x1 = F.relu(self.conv1(x, adj, mask, add_loop))
        x1 = F.normalize(x1, p=2, dim=-1)
        return x1
Esempio n. 9
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hidden_channels=None,
                 normalize=False,
                 add_loop=False,
                 lin=True):
        super().__init__()

        if hidden_channels is None:
            hidden_channels = []

        self.add_loop = add_loop

        n_channels = [in_channels] + hidden_channels + [out_channels]

        self.expansion = n_channels - 1

        self.convs = nn.ModuleList()
        self.bns = nn.ModuleList()

        for layer_i in range(self.expansion):
            self.convs.append(
                DenseSAGEConv(n_channels[layer_i], n_channels[layer_i + 1],
                              normalize))
            self.bns.append(torch.nn.BatchNorm1d(n_channels[layer_i + 1]))

        if lin is True:
            self.lin = torch.nn.Linear(2 * hidden_channels + out_channels,
                                       out_channels)
        else:
            self.lin = None
Esempio n. 10
0
class Block_2hop(torch.nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(Block_2hop, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, out_channels)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, x, adj, mask=None, add_loop=True):
        x1 = F.relu(self.conv1(x, adj, mask, add_loop))
        x1 = F.normalize(x1, p=2, dim=-1)
        x2 = F.relu(self.conv2(x1, adj, mask, add_loop))
        x2 = F.normalize(x2, p=2, dim=-1)
        return x2
Esempio n. 11
0
 def __init__(self, num_features, n_classes, num_hidden, num_hidden_layers, dropout, activation,normalize=True, bias=True):
     super(PDenseSAGE, self).__init__()
     # dropout
     if dropout:
         self.dropout = nn.Dropout(p=dropout)
     else:
         self.dropout = nn.Dropout(p=0.)
     #activation
     self.activation = activation
     # input layer
     self.conv_input = DenseSAGEConv(num_features, num_hidden, normalize=normalize, bias=bias)
     # Hidden layers
     self.layers = nn.ModuleList()
     for _ in range(num_hidden_layers):
         self.layers.append(DenseSAGEConv(num_hidden, num_hidden, normalize=normalize,bias=bias))
     # output layer
     self.conv_output = DenseSAGEConv(num_hidden, n_classes, normalize=normalize, bias=bias)
Esempio n. 12
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 lin=True):
        super().__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.bn1 = nn.BatchNorm1d(hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, hidden_channels)
        self.bn2 = nn.BatchNorm1d(hidden_channels)
        self.conv3 = DenseSAGEConv(hidden_channels, out_channels)

        if lin is True:
            self.lin = nn.Linear((NUM_SAGE_LAYERS - 1) * hidden_channels + out_channels, out_channels)
        else:
            # GNN's intermediate representation is given by the concatenation of SAGE layers
            self.lin = None
Esempio n. 13
0
    def __init__(self, feat_in, n_classes, n_chan=64):
        super(Net, self).__init__()

        num_nodes = math.ceil(0.4 * average_nodes)
        self.gnn1 = DenseSAGEConv(feat_in, n_chan)
        # self.pool1 = torch.nn.Linear(n_chan, num_nodes)
        # self.pool1 = Prototype_Pooling(num_nodes, n_chan, n_chan, n_chan)
        self.pool1 = Embedded_Pool(num_nodes, n_chan, n_chan, n_chan)

        num_nodes = math.ceil(0.4 * num_nodes)
        self.gnn2 = DenseSAGEConv(n_chan, n_chan)
        # self.pool2 = torch.nn.Linear(n_chan, num_nodes)
        # self.pool2 = Prototype_Pooling(num_nodes, n_chan, n_chan, n_chan)
        self.pool2 = Embedded_Pool(num_nodes, n_chan, n_chan, n_chan)

        self.gnn3 = DenseSAGEConv(n_chan, n_chan)

        self.lin1 = torch.nn.Linear(n_chan, n_chan)
        self.lin2 = torch.nn.Linear(n_chan, n_classes)
Esempio n. 14
0
class Block(torch.nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(Block, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, out_channels)

        self.lin = torch.nn.Linear(hidden_channels + out_channels,
                                   out_channels)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()
        self.lin.reset_parameters()

    def forward(self, x, adj, mask=None, add_loop=True):
        x1 = F.relu(self.conv1(x, adj, mask, add_loop))
        x2 = F.relu(self.conv2(x1, adj, mask, add_loop))
        return self.lin(torch.cat([x1, x2], dim=-1))
Esempio n. 15
0
 def _gcn(self, name, input_dim, hidden_dim, bias, activation='relu'):
     if name == 'SAGE':
         return DenseSAGEConv(input_dim,
                              hidden_dim,
                              normalize=True,
                              bias=bias)
     else:
         nn1 = nn.Sequential(nn.Linear(input_dim, hidden_dim),
                             self._activation(activation),
                             nn.Linear(hidden_dim, hidden_dim))
         return DenseGINConv(nn1)
Esempio n. 16
0
class Block(torch.nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels, mode='cat'):
        super().__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, out_channels)
        self.jump = JumpingKnowledge(mode)
        if mode == 'cat':
            self.lin = Linear(hidden_channels + out_channels, out_channels)
        else:
            self.lin = Linear(out_channels, out_channels)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()
        self.lin.reset_parameters()

    def forward(self, x, adj, mask=None):
        x1 = F.relu(self.conv1(x, adj, mask))
        x2 = F.relu(self.conv2(x1, adj, mask))
        return self.lin(self.jump([x1, x2]))
    def __init__(self, num_layers, in_channels, out_channels, residual=True):
        super().__init__()

        self.num_layers = num_layers
        self.residual = residual
        self.layers = nn.ModuleList()
        self.bns = nn.ModuleList()
        for i in range(num_layers - 1):
            if i == 0:
                self.layers.append(
                    DenseSAGEConv(in_channels, out_channels, normalize=True))
            else:
                self.layers.append(
                    DenseSAGEConv(out_channels, out_channels, normalize=True))
            self.bns.append(nn.BatchNorm1d(out_channels))

        if num_layers == 1:
            self.layers.append(
                DenseSAGEConv(in_channels, out_channels, normalize=True))
        else:
            self.layers.append(
                DenseSAGEConv(out_channels, out_channels, normalize=True))
Esempio n. 18
0
def test_dense_sage_conv():
    channels = 16
    sparse_conv = SAGEConv(channels, channels, normalize=True)
    dense_conv = DenseSAGEConv(channels, channels, normalize=True)
    assert dense_conv.__repr__() == 'DenseSAGEConv(16, 16)'

    # Ensure same weights and bias.
    dense_conv.lin_rel = sparse_conv.lin_l
    dense_conv.lin_root = sparse_conv.lin_r

    x = torch.randn((5, channels))
    edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 4],
                               [1, 2, 0, 2, 0, 1, 4, 3]])

    sparse_out = sparse_conv(x, edge_index)
    assert sparse_out.size() == (5, channels)

    x = torch.cat([x, x.new_zeros(1, channels)], dim=0).view(2, 3, channels)
    adj = torch.Tensor([
        [
            [0, 1, 1],
            [1, 0, 1],
            [1, 1, 0],
        ],
        [
            [0, 1, 0],
            [1, 0, 0],
            [0, 0, 0],
        ],
    ])
    mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.bool)

    dense_out = dense_conv(x, adj, mask)
    assert dense_out.size() == (2, 3, channels)

    assert dense_out[1, 2].abs().sum().item() == 0
    dense_out = dense_out.view(6, channels)[:-1]
    assert torch.allclose(sparse_out, dense_out, atol=1e-04)
Esempio n. 19
0
def test_dense_sage_conv_with_broadcasting():
    batch_size, num_nodes, channels = 8, 3, 16
    conv = DenseSAGEConv(channels, channels)

    x = torch.randn(batch_size, num_nodes, channels)
    adj = torch.Tensor([
        [0, 1, 1],
        [1, 0, 1],
        [1, 1, 0],
    ])

    assert conv(x, adj).size() == (batch_size, num_nodes, channels)
    mask = torch.tensor([1, 1, 1], dtype=torch.bool)
    assert conv(x, adj, mask).size() == (batch_size, num_nodes, channels)
Esempio n. 20
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 lin=True,
                 norm=True,
                 norm_embed=True):
        super(GNN, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels, norm,
                                   norm_embed)
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, hidden_channels, norm,
                                   norm_embed)
        self.bn2 = torch.nn.BatchNorm1d(hidden_channels)
        self.conv3 = DenseSAGEConv(hidden_channels, out_channels, norm,
                                   norm_embed)
        self.bn3 = torch.nn.BatchNorm1d(out_channels)

        if lin is True:
            self.lin = torch.nn.Linear(2 * hidden_channels + out_channels,
                                       out_channels)
        else:
            self.lin = None
Esempio n. 21
0
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(Block_3hop, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, hidden_channels)
        self.conv2 = DenseSAGEConv(hidden_channels, hidden_channels)
        self.conv3 = DenseSAGEConv(hidden_channels, out_channels)
Esempio n. 22
0
    def __init__(
            self,
            in_channels=1,
            hidden_channels=1,
            out_channels=1,
            normalize=False,
            add_loop=False,
            gnn_k=1,
            gnn_type=1,
            jump=None,  #None,max,lstm
            res=False,
            activation='leaky'):
        super(GNN, self).__init__()

        self.add_loop = add_loop

        self.in_channels = in_channels
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.bn2 = torch.nn.BatchNorm1d(out_channels)
        self.k = gnn_k  #number of repitiions of gnn
        self.gnn_type = gnn_type

        self.jump = jump
        if not (jump is None):
            if jump != 'lstm':
                self.jk = JumpingKnowledge(jump)
            else:
                self.jk = JumpingKnowledge(jump, out_channels, gnn_k)
        if activation == 'leaky':
            self.activ = F.leaky_relu
        elif activation == 'elu':
            self.activ = F.elu
        elif activation == 'relu':
            self.activ = F.relu
        self.res = res
        if self.gnn_type in [10, 12] and self.res == True:
            raise Exception('res must be false when gnn_type==10 or 12!')
        if self.k == 1 and self.res == True:
            raise Exception('res must be false when gnn_k==1!')
        if self.k == 1 and not (self.jump is None):
            raise Exception(
                'jumping knowledge only serves for the case where k>1!')
        if gnn_type == 0:
            self.conv1 = DenseSAGEConv(in_channels=self.in_channels,
                                       out_channels=out_channels,
                                       normalize=False)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels,
                                       out_channels=out_channels,
                                       normalize=False)
        if gnn_type == 1:
            self.conv1 = DenseSAGEConv(in_channels=self.in_channels,
                                       out_channels=out_channels,
                                       normalize=True)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels,
                                       out_channels=out_channels,
                                       normalize=True)

        if gnn_type == 2:
            self.conv1 = GCNConv(in_channels=1,
                                 out_channels=out_channels,
                                 cached=False)
            self.conv2 = GCNConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 cached=False)
        if gnn_type == 3:
            self.conv1 = GCNConv(in_channels=1,
                                 out_channels=out_channels,
                                 improved=True,
                                 cached=False)
            self.conv2 = GCNConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 improved=True,
                                 cached=False)
        if gnn_type == 4:
            self.conv1 = ChebConv(in_channels=1,
                                  out_channels=out_channels,
                                  K=2)
            self.conv2 = ChebConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  K=2)
        if gnn_type == 5:
            self.conv1 = ChebConv(in_channels=1,
                                  out_channels=out_channels,
                                  K=4)
            self.conv2 = ChebConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  K=4)
        if gnn_type == 6:
            self.conv1 = GraphConv(in_channels=1,
                                   out_channels=out_channels,
                                   aggr='add')
            self.conv2 = GraphConv(in_channels=hidden_channels,
                                   out_channels=out_channels,
                                   aggr='add')
        if gnn_type == 7:
            self.conv1 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=3,
                                        aggr='add',
                                        bias=True)
            self.conv2 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=3,
                                        aggr='add',
                                        bias=True)
        if gnn_type == 8:
            self.conv1 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=7,
                                        aggr='add',
                                        bias=True)
            self.conv2 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=7,
                                        aggr='add',
                                        bias=True)
        if gnn_type == 9:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=1,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=1,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0.6)
        if gnn_type == 10:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=6,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=6,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 11:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 12:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 13:
            self.conv1 = AGNNConv(requires_grad=True)
            self.conv2 = AGNNConv(requires_grad=True)
        if gnn_type == 14:
            self.conv1 = ARMAConv(in_channels=1,
                                  out_channels=hidden_channels,
                                  num_stacks=1,
                                  num_layers=1,
                                  shared_weights=False,
                                  act=F.relu,
                                  dropout=0.5,
                                  bias=True)
            self.conv2 = ARMAConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  num_stacks=1,
                                  num_layers=1,
                                  shared_weights=False,
                                  act=F.relu,
                                  dropout=0.5,
                                  bias=True)
        if gnn_type == 15:
            self.conv1 = SGConv(in_channels=1,
                                out_channels=out_channels,
                                K=1,
                                cached=True,
                                bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels,
                                out_channels=out_channels,
                                K=1,
                                cached=True,
                                bias=True)
        if gnn_type == 16:
            self.conv1 = SGConv(in_channels=1,
                                out_channels=out_channels,
                                K=3,
                                cached=True,
                                bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels,
                                out_channels=out_channels,
                                K=3,
                                cached=True,
                                bias=True)
        if gnn_type == 17:
            self.conv1 = APPNP(K=1, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=1, alpha=0.2, bias=True)
        if gnn_type == 18:
            self.conv1 = APPNP(K=3, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=3, alpha=0.2, bias=True)
        if gnn_type == 19:
            self.conv1 = RGCNConv(in_channels=1,
                                  out_channels=out_channels,
                                  num_relations=3,
                                  num_bases=2,
                                  bias=True)
            self.conv2 = RGCNConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  num_relations=3,
                                  num_bases=2,
                                  bias=True)
# =============================================================================
#         if gnn_type==20:
#             self.conv1 = SignedConv(in_channels=1, out_channels=out_channels, first_aggr=True, bias=True)
#             self.conv2 = SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=True, bias=True)
#         if gnn_type==21:
#             self.conv1 =SignedConv(in_channels=1, out_channels=out_channels, first_aggr=False, bias=True)
#             self.conv2 =SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=False, bias=True)
#         if gnn_type==22:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#         if gnn_type==23:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#         if gnn_type==24:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
# =============================================================================
        if gnn_type == 25:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=2,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=2,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 26:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=False,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=False,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 27:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 28:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 29:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
Esempio n. 23
0
    def __init__(self,
                 in_channels=1,
                 hidden_channels=1,
                 out_channels=1,
                 normalize=False,
                 add_loop=False,
                 gnn_k=1,
                 gnn_type=1):
        super(GNN, self).__init__()

        self.add_loop = add_loop

        
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.bn2 = torch.nn.BatchNorm1d(out_channels)
        self.k=gnn_k#number of repitiions of gnn
        self.gnn_type=gnn_type
        if gnn_type==0:
            self.conv1 = DenseSAGEConv(in_channels=1, out_channels=hidden_channels, normalize=False)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels, out_channels=out_channels, normalize=False)
        if gnn_type==1:
            self.conv1 = DenseSAGEConv(in_channels=1, out_channels=hidden_channels, normalize=True)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels, out_channels=out_channels, normalize=True)
        
        if gnn_type==2:
            self.conv1 = GCNConv(in_channels=1, out_channels=hidden_channels, cached=True)
            self.conv2 = GCNConv(in_channels=hidden_channels, out_channels=out_channels, cached=True)
        if gnn_type==3:
            self.conv1 = GCNConv(in_channels=1, out_channels=hidden_channels,improved=True, cached=True)
            self.conv2 = GCNConv(in_channels=hidden_channels, out_channels=out_channels,improved=True, cached=True)
        if gnn_type==4:
            self.conv1 = ChebConv(in_channels=1, out_channels=hidden_channels,K=2)
            self.conv2 = ChebConv(in_channels=hidden_channels, out_channels=out_channels,K=2)
        if gnn_type==5:
            self.conv1 = ChebConv(in_channels=1, out_channels=hidden_channels,K=4)
            self.conv2 = ChebConv(in_channels=hidden_channels, out_channels=out_channels,K=4)
        if gnn_type==6:
            self.conv1 = GraphConv(in_channels=1, out_channels=hidden_channels,aggr='add')
            self.conv2 = GraphConv(in_channels=hidden_channels, out_channels=out_channels,aggr='add')
        if gnn_type==7:
            self.conv1 = GatedGraphConv(in_channels=1,out_channels=hidden_channels, num_layers=3, aggr='add', bias=True)
            self.conv2 = GatedGraphConv(in_channels=hidden_channels,out_channels=out_channels, num_layers=3, aggr='add', bias=True)
        if gnn_type==8:
            self.conv1 = GatedGraphConv(in_channels=1,out_channels=hidden_channels, num_layers=7, aggr='add', bias=True)
            self.conv2 = GatedGraphConv(in_channels=hidden_channels,out_channels=out_channels, num_layers=7, aggr='add', bias=True)
        if gnn_type==9:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=1, concat=True, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=1, concat=True, negative_slope=0.2,dropout=0.6)
        if gnn_type==10:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=6, concat=False, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=6, concat=False, negative_slope=0.2,dropout=0.6)
            
        if gnn_type==11:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=4, concat=True, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=4, concat=True, negative_slope=0.2,dropout=0.6)
        
        if gnn_type==12:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=4, concat=False, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=4, concat=False, negative_slope=0.2,dropout=0.6)
            
        if gnn_type==13:
            self.conv1 = AGNNConv(requires_grad=True)
            self.conv2 = AGNNConv(requires_grad=True)
        if gnn_type==14:
            self.conv1 = ARMAConv(in_channels=1, out_channel=hidden_channels, num_stacks=1, num_layers=1, \
                                  shared_weights=False, act=F.relu, dropout=0.5, bias=True)
            self.conv2 = ARMAConv(in_channels=hidden_channels, out_channel=out_channels, num_stacks=1, num_layers=1, \
                                  shared_weights=False, act=F.relu, dropout=0.5, bias=True)
        if gnn_type==15:
            self.conv1 = SGConv(in_channels=1, out_channels=hidden_channels, K=1, cached=True, bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels, out_channels=out_channels, K=1, cached=True, bias=True)
        if gnn_type==16:
            self.conv1 = SGConv(in_channels=1, out_channels=hidden_channels, K=3, cached=True, bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels, out_channels=out_channels, K=3, cached=True, bias=True)
        if gnn_type==17:
            self.conv1 = APPNP(K=1, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=1, alpha=0.2, bias=True)
        if gnn_type==18:
            self.conv1 = APPNP(K=3, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=3, alpha=0.2, bias=True)
        if gnn_type==19:
            self.conv1 =RGCNConv(in_channels=1, out_channels=hidden_channels, num_relations=3, num_bases=2, bias=True)
            self.conv2 =RGCNConv(in_channels=hidden_channels, out_channels=out_channels, num_relations=3, num_bases=2, bias=True)
# =============================================================================
#         if gnn_type==20:
#             self.conv1 = SignedConv(in_channels=1, out_channels=hidden_channels, first_aggr=True, bias=True)
#             self.conv2 = SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=True, bias=True)
#         if gnn_type==21:
#             self.conv1 =SignedConv(in_channels=1, out_channels=hidden_channels, first_aggr=False, bias=True)
#             self.conv2 =SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=False, bias=True)
#         if gnn_type==22:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#         if gnn_type==23:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=5, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#         if gnn_type==24:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
# =============================================================================
        if gnn_type==25:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==26:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=3, is_open_spline=False, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=3, is_open_spline=False, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==27:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==28:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=3, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=3, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
        if gnn_type==29:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
Esempio n. 24
0
    def __init__(self, in_channels, hidden_channels, out_channels, jp=False):
        super(Block_1hop, self).__init__()

        self.conv1 = DenseSAGEConv(in_channels, out_channels)