def __init__(self, in_channels, out_channels, hidden_channels=32):
        super(Net, self).__init__()

        self.conv1 = GCNConv(in_channels, hidden_channels)
        num_nodes = ceil(0.5 * average_nodes)
        self.pool1 = Linear(hidden_channels, num_nodes)

        self.conv2 = DenseGraphConv(hidden_channels, hidden_channels)
        num_nodes = ceil(0.5 * num_nodes)
        self.pool2 = Linear(hidden_channels, num_nodes)

        self.conv3 = DenseGraphConv(hidden_channels, hidden_channels)

        self.lin1 = Linear(hidden_channels, hidden_channels)
        self.lin2 = Linear(hidden_channels, out_channels)
Exemple #2
0
    def __init__(self, in_channels, out_channels, hidden_channels=32):
        super().__init__()

        self.conv1 = GCNConv(in_channels, hidden_channels)
        num_nodes = ceil(0.5 * avg_num_nodes)
        self.pool1 = DMoNPooling([hidden_channels, hidden_channels], num_nodes)

        self.conv2 = DenseGraphConv(hidden_channels, hidden_channels)
        num_nodes = ceil(0.5 * num_nodes)
        self.pool2 = DMoNPooling([hidden_channels, hidden_channels], num_nodes)

        self.conv3 = DenseGraphConv(hidden_channels, hidden_channels)

        self.lin1 = Linear(hidden_channels, hidden_channels)
        self.lin2 = Linear(hidden_channels, out_channels)
def test_dense_graph_conv(aggr):
    channels = 16
    sparse_conv = GraphConv(channels, channels, aggr=aggr)
    dense_conv = DenseGraphConv(channels, channels, aggr=aggr)
    assert dense_conv.__repr__() == 'DenseGraphConv(16, 16)'

    # Ensure same weights and bias.
    dense_conv.lin_rel = sparse_conv.lin_rel
    dense_conv.lin_root = sparse_conv.lin_root

    x = torch.randn((5, channels))
    edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 4],
                               [1, 2, 0, 2, 0, 1, 4, 3]])

    sparse_out = sparse_conv(x, edge_index)
    assert sparse_out.size() == (5, channels)

    x = torch.cat([x, x.new_zeros(1, channels)], dim=0).view(2, 3, channels)
    adj = torch.Tensor([
        [
            [0, 1, 1],
            [1, 0, 1],
            [1, 1, 0],
        ],
        [
            [0, 1, 0],
            [1, 0, 0],
            [0, 0, 0],
        ],
    ])
    mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.bool)

    dense_out = dense_conv(x, adj, mask)
    assert dense_out.size() == (2, 3, channels)
    dense_out = dense_out.view(-1, channels)

    assert torch.allclose(sparse_out, dense_out[:5], atol=1e-04)
    assert dense_out[-1].abs().sum() == 0
def test_dense_graph_conv_with_broadcasting(aggr):
    batch_size, num_nodes, channels = 8, 3, 16
    conv = DenseGraphConv(channels, channels, aggr=aggr)

    x = torch.randn(batch_size, num_nodes, channels)
    adj = torch.Tensor([
        [0, 1, 1],
        [1, 0, 1],
        [1, 1, 0],
    ])

    assert conv(x, adj).size() == (batch_size, num_nodes, channels)
    mask = torch.tensor([1, 1, 1], dtype=torch.bool)
    assert conv(x, adj, mask).size() == (batch_size, num_nodes, channels)
Exemple #5
0
def test_dense_graph_conv(aggr):
    channels = 16
    sparse_conv = GraphConv(channels, channels, aggr=aggr)
    dense_conv = DenseGraphConv(channels, channels, aggr=aggr)
    assert dense_conv.__repr__() == 'DenseGraphConv(16, 16)'

    # Ensure same weights and bias.
    dense_conv.lin_rel = sparse_conv.lin_rel
    dense_conv.lin_root = sparse_conv.lin_root

    x = torch.randn((5, channels))
    edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 4],
                               [1, 2, 0, 2, 0, 1, 4, 3]])

    sparse_out = sparse_conv(x, edge_index)
    assert sparse_out.size() == (5, channels)

    adj = to_dense_adj(edge_index)
    mask = torch.ones(5, dtype=torch.bool)

    dense_out = dense_conv(x, adj, mask)[0]
    assert dense_out.size() == (5, channels)
    assert torch.allclose(sparse_out, dense_out, atol=1e-04)
Exemple #6
0
    def __init__(self, num_features, num_classes, max_num_nodes, num_layers, gnn_hidden_dim,
                 gnn_output_dim, mlp_hidden_dim, pooling_type, invariant, encode_edge, pre_sum_aggr=False):
        super().__init__()

        self.encode_edge = encode_edge
        self.pre_sum_aggr = pre_sum_aggr
        self.max_num_nodes = max_num_nodes
        self.pooling_type = pooling_type
        self.num_diffpool_layers = num_layers

        # Reproduce paper choice about coarse factor
        coarse_factor = 0.1 if num_layers == 1 else 0.25

        gnn_dim_input = num_features
        if encode_edge:
            gnn_dim_input = gnn_hidden_dim
            self.conv1 = GCNConv(gnn_hidden_dim, aggr='add')

        if self.pre_sum_aggr:
            self.conv1 = DenseGraphConv(gnn_dim_input, gnn_dim_input)

        no_new_clusters = ceil(coarse_factor * self.max_num_nodes)
        gnn_embed_dim_output = (NUM_SAGE_LAYERS - 1) * gnn_hidden_dim + gnn_output_dim

        layers = []
        current_num_clusters = self.max_num_nodes
        for i in range(num_layers):

            diffpool_layer = DiffPoolLayer(gnn_dim_input, gnn_hidden_dim, gnn_output_dim, current_num_clusters,
                                           no_new_clusters, pooling_type, invariant)
            layers.append(diffpool_layer)

            # Update embedding sizes
            gnn_dim_input = gnn_embed_dim_output
            current_num_clusters = no_new_clusters
            no_new_clusters = ceil(no_new_clusters * coarse_factor)

        self.diffpool_layers = nn.ModuleList(layers)

        # After DiffPool layers, apply again layers of GraphSAGE convolutions
        self.final_embed = SAGEConvolutions(gnn_embed_dim_output, gnn_hidden_dim, gnn_output_dim, lin=False)
        final_embed_dim_output = gnn_embed_dim_output * (num_layers + 1)

        self.lin1 = nn.Linear(final_embed_dim_output, mlp_hidden_dim)
        self.lin2 = nn.Linear(mlp_hidden_dim, num_classes)
        self.atom_encoder = AtomEncoder(emb_dim=gnn_hidden_dim)
    def __init__(self,
                 num_features,
                 num_classes,
                 max_num_nodes,
                 hidden,
                 pooling_type,
                 num_layers,
                 encode_edge=False):
        super(MincutPool, self).__init__()
        self.encode_edge = encode_edge

        self.atom_encoder = AtomEncoder(emb_dim=hidden)

        self.pooling_type = pooling_type
        self.convs = nn.ModuleList()
        self.pools = nn.ModuleList()
        self.num_layers = num_layers

        for i in range(num_layers):
            if i == 0:
                if encode_edge:
                    self.convs.append(GCNConv(hidden, aggr='add'))
                else:
                    self.convs.append(
                        GraphConv(num_features, hidden, aggr='add'))
            else:
                self.convs.append(DenseGraphConv(hidden, hidden))

        self.rms = []
        num_nodes = max_num_nodes
        for i in range(num_layers - 1):
            num_nodes = ceil(0.5 * num_nodes)
            if pooling_type == 'mlp':
                self.pools.append(Linear(hidden, num_nodes))
            else:
                self.rms.append(
                    fetch_assign_matrix('uniform', ceil(2 * num_nodes),
                                        num_nodes))

        self.lin1 = Linear(hidden, hidden)
        self.lin2 = Linear(hidden, num_classes)
Exemple #8
0
 def __init__(self):
     super(Net, self).__init__()
     self.layer1 = DenseGraphConv(1433, 16)
     self.layer2 = DenseGraphConv(16, 7)
    def __init__(self,
                 num_features,
                 num_classes,
                 max_num_nodes,
                 num_layers,
                 gnn_hidden_dim,
                 gnn_output_dim,
                 mlp_hidden_dim,
                 pooling_type,
                 invariant,
                 encode_edge=False,
                 pre_sum_aggr=False):
        super().__init__()

        # gnn_hidden_dim equals gnn_output_dim
        self.encode_edge = encode_edge
        self.max_num_nodes = max_num_nodes
        self.pooling_type = pooling_type
        self.num_pooling_layers = num_layers

        gnn_dim_input = num_features
        if encode_edge:
            gnn_dim_input = gnn_hidden_dim
            self.conv1 = GCNConv(gnn_hidden_dim, aggr='add')

        # Reproduce paper choice about coarse factor
        coarse_factor = 0.1 if num_layers == 1 else 0.25

        if pre_sum_aggr:  # this is only used for IMDB
            self.initial_embed = DenseGraphConv(gnn_dim_input, gnn_output_dim)
        else:
            self.initial_embed = SAGEConvolutions(1, gnn_dim_input,
                                                  gnn_output_dim)

        no_new_clusters = ceil(coarse_factor * self.max_num_nodes)

        layers = []
        after_pool_layers = []
        current_num_clusters = self.max_num_nodes
        for i in range(num_layers):

            diffpool_layer = DiffPoolLayer(gnn_output_dim, gnn_output_dim,
                                           current_num_clusters,
                                           no_new_clusters, pooling_type,
                                           invariant)
            layers.append(diffpool_layer)

            # Update embedding sizes
            current_num_clusters = no_new_clusters
            no_new_clusters = ceil(no_new_clusters * coarse_factor)

            after_pool_layers.append(
                SAGEConvolutions(3, gnn_output_dim, gnn_output_dim))

        self.diffpool_layers = nn.ModuleList(layers)
        self.after_pool_layers = nn.ModuleList(after_pool_layers)

        # After DiffPool layers, apply again layers of GraphSAGE convolutions
        final_embed_dim_output = gnn_output_dim

        self.lin1 = nn.Linear(final_embed_dim_output, mlp_hidden_dim)
        self.lin2 = nn.Linear(mlp_hidden_dim, num_classes)
        self.atom_encoder = AtomEncoder(emb_dim=gnn_hidden_dim)