def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.edge_attr = None
        data = max_pool(cluster, data, transform=transform)

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        x, batch = max_pool_x(cluster, data.x, data.batch)

        #x = global_mean_pool(x, batch)
        x_min = torch_scatter.scatter_min(x, batch, dim=0)[0]
        gather_idxs = batch.expand(x.shape[1], -1).t()
        gather_mins = torch.gather(x_min, 0, gather_idxs)
        s = F.relu(-gather_mins)
        x = x + s
        x = self.aggregator(x, batch)
        s_out = self.aggregator(s, batch)
        x = x - s_out

        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        return F.log_softmax(self.fc2(x), dim=1)
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        if self.encode_edge:
            x = self.atom_encoder(x)
            x = self.conv1(x, edge_index, data.edge_attr)
        else:
            x = self.conv1(x, edge_index)
        x = F.relu(x)
        xs = [global_mean_pool(x, batch)]
        for i, conv in enumerate(self.convs):
            x = F.relu(conv(x, edge_index))
            xs += [global_mean_pool(x, batch)]
            if self.pooling_type != 'none':
                if self.pooling_type == 'complement':
                    complement = batched_negative_edges(edge_index=edge_index, batch=batch, force_undirected=True)
                    cluster = graclus(complement, num_nodes=x.size(0))
                elif self.pooling_type == 'graclus':
                    cluster = graclus(edge_index, num_nodes=x.size(0))
                data = Batch(x=x, edge_index=edge_index, batch=batch)
                data = max_pool(cluster, data)
                x, edge_index, batch = data.x, data.edge_index, data.batch

        if not self.no_cat:
            x = self.jump(xs)
        else:
            x = global_mean_pool(x, batch)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return x
예제 #3
0
    def forward(self, data):
        data.x = F.elu(self.conv1a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv1b(data.x, data.edge_index, data.edge_attr))
        # data.x = F.elu(self.conv1c(data.x, data.edge_index, data.edge_attr))
        # data.x = self.bn1(data.x)

        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster1 = graclus(data.edge_index, weight, data.x.size(0))
        pos1 = data.pos
        edge_index1 = data.edge_index
        batch1 = data.batch if hasattr(data, 'batch') else None
        # weights1 = bweights(data, cluster1)
        data = max_pool(cluster1, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv2a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv2b(data.x, data.edge_index, data.edge_attr))
        # data.x = F.elu(self.conv2c(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster2 = graclus(data.edge_index, weight, data.x.size(0))
        pos2 = data.pos
        edge_index2 = data.edge_index
        batch2 = data.batch if hasattr(data, 'batch') else None
        # weights2 = bweights(data, cluster2)
        data = max_pool(cluster2, data, transform=T.Cartesian(cat=False))

        # upsample
        # data = recover_grid_barycentric(data, weights=weights2, pos=pos2, edge_index=edge_index2, cluster=cluster2,
        #                                  batch=batch2, transform=T.Cartesian(cat=False))
        data.x = F.elu(self.conv3a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv3b(data.x, data.edge_index, data.edge_attr))

        data = recover_grid(data,
                            pos2,
                            edge_index2,
                            cluster2,
                            batch=batch2,
                            transform=T.Cartesian(cat=False))

        # data = recover_grid_barycentric(data, weights=weights1, pos=pos1, edge_index=edge_index1, cluster=cluster1,
        #                                  batch=batch1, transform=T.Cartesian(cat=False))
        data.x = F.elu(self.conv4a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv4b(data.x, data.edge_index, data.edge_attr))
        data = recover_grid(data,
                            pos1,
                            edge_index1,
                            cluster1,
                            batch=batch1,
                            transform=T.Cartesian(cat=False))

        # TODO handle contract on trainer and  evaluator
        data.x = F.elu(self.convout(data.x, data.edge_index, data.edge_attr))

        x = data.x

        # return F.sigmoid(x)
        return x
예제 #4
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = graclus(
            data.edge_index,
            torch.reshape(data.edge_attr, (data.edge_attr.shape[0], )),
            data.x.size(0))
        data = max_pool(cluster, data)

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        cluster = graclus(
            data.edge_index,
            torch.reshape(data.edge_attr, (data.edge_attr.shape[0], )),
            data.x.size(0))
        x, batch = max_pool_x(cluster, data.x, data.batch)

        x = global_mean_pool(x, batch)
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        return F.log_softmax(self.fc2(x), dim=1)
예제 #5
0
    def forward(self, data):
        data.x = F.relu(self.conv1(data.x, data.edge_index))
        data.x = F.dropout(data.x, training=self.training)

        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.x, batch = max_pool_x(cluster, data.x, data.batch)
        data.x = global_mean_pool(data.x, batch)

        data.x = self.fc1(data.x)
        data.x = F.log_softmax(data.x, dim=1)
        return data.x
예제 #6
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        ######## calculate similarity between nodes
        weight = normalized_cut_2d(data.edge_index, data.pos)
        ######### graph clustering without the need of eigenvector
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.edge_attr = None
        ########## Pools and coarsens a graph. All nodes within the same cluster will be represented as one node and appply transform
        data = max_pool(cluster, data, transform=transform)

        ########## 2nd conv net
        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        ############## Max-Pools node features according to the clustering defined in cluster
        x, batch = max_pool_x(cluster, data.x, data.batch)

        ############## Returns batch-wise graph-level-outputs by averaging node features across the node dimension
        x = global_mean_pool(x, batch)
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        return F.log_softmax(self.fc2(x), dim=1)
예제 #7
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.edge_attr = None
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr))

        x = F.elu(self.fc1(data.x))
        x = F.dropout(x, training=self.training, p=self.dropout)
        x = self.fc2(x)
        y = global_mean_pool(x, data.batch)

        if (self.wgan):
            return y

        return torch.sigmoid(y)
예제 #8
0
    def forward(self, data):
        for i in range(self.layers_num):
            data.x = self.conv_layers[i](data.x, data.pos, data.edge_index)

            if self.use_cluster_pooling:
                weight = normalized_cut_2d(data.edge_index, data.pos)
                cluster = graclus(data.edge_index, weight, data.x.size(0))
                data = max_pool(cluster,
                                data,
                                transform=T.Cartesian(cat=False))

        data.x = global_mean_pool(data.x, data.batch)
        x = self.fc1(data.x)

        return F.log_softmax(x, dim=1)
예제 #9
0
    def forward(self, data):

        pos, edge_index, batch = data.pos, data.edge_index, data.batch
        real_batch_size = pos.size(0) / self.nr_points
        real_batch_size = int(real_batch_size)

        # Build first edges
        edge_index = knn_graph(pos, self.k, batch, loop=False)

        #extract features in 3d
        _, _, features_dd, _ = self.ds1(pos, edge_index, None)

        #graclus
        cluster = graclus(edge_index)

        pos_gra, batch_gra = avg_pool_x(cluster, pos, batch)
        features_gra, _ = max_pool_x(cluster, features_dd, batch)

        #knn(f)
        with torch.no_grad():
            edge_index_gra = knn_graph(features_gra.norm(dim=2),
                                       self.k,
                                       batch_gra,
                                       loop=False)

        # DD2
        _, _, features_dd2, _ = self.dd2(pos_gra, edge_index_gra, features_gra)

        y1 = self.nn1(features_dd2)

        y1_pool, _ = max_pool_x(batch_gra, y1, batch_gra)

        y1_pool = torch.nn.functional.relu(y1_pool)
        y1_pool = self.bn1(y1_pool)

        y2 = self.nn2(y1_pool)
        y2 = torch.nn.functional.relu(y2)
        y2 = self.bn2(y2)

        y3 = self.nn3(y2)
        y3 = torch.nn.functional.relu(y3)
        y3 = self.bn3(y3)

        y4 = self.nn4(y3)
        out = self.sm(y4)

        return out
예제 #10
0
    def forward(self, data):
        x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr
        x = self.conv1(x)
        x = F.elu(x)

        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, x.size(0))
        data.edge_attr = None
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))
        x = F.elu(self.conv1(x, edge_index, edge_attr))
        x = self.conv2(x, edge_index, edge_attr)
        x = F.elu(self.conv3(x, edge_index, edge_attr))
        x = self.conv4(x, edge_index, edge_attr)
        x = F.elu(self.conv5(x, edge_index, edge_attr))
        x = self.conv6(x, edge_index, edge_attr)
        x = F.dropout(x, training=self.training)
        return F.log_softmax(x, dim=1)
예제 #11
0
    def forward(self, graph):
        data = graph
        data.x = torch.cat([data.pos, data.x], dim=1)
        for i, monet_layer in enumerate(self.monet_layers[:-1]):
            data.x = F.relu(
                monet_layer(data.x, data.edge_index, data.edge_attr))
            weight = normalized_cut_2d(data.edge_index, data.pos)
            cluster = graclus(data.edge_index, weight, data.x.size(0))
            if i == 0:
                data.edge_attr = None
            data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = self.monet_layers[-1](data.x, data.edge_index, data.edge_attr)

        for linear_layer in self.linear_layers[:-1]:
            x = global_mean_pool(data.x, data.batch)
            x = F.relu(linear_layer(x))
            x = F.dropout(x)

        return F.log_softmax(self.linear_layers[-1](x), dim=1)
예제 #12
0
def test_graclus():
    edge_index = torch.tensor([[0, 1], [1, 0]])
    assert graclus(edge_index).tolist() == [0, 0]
예제 #13
0
 def forward(self, data):
     cluster = geom_nn.graclus(data.edge_index, num_nodes=data.x.size(0))
     data = geom_nn.max_pool(cluster, data)
     return data
예제 #14
0
def apply_graclus_pooling(x, edge_index, batch, method="max"):
    cluster = graclus(edge_index)
    func = max_pool if method == "max" else avg_pool
    new_data = func(cluster, Batch(x=x, edge_index=edge_index, batch=batch))
    return new_data.x, new_data.edge_index, new_data.batch
예제 #15
0
def mgpool(x, pos, edge_index, batch, mask=None):
    adj_values = torch.ones(edge_index.shape[1]).cuda()
    cluster = graclus(edge_index)
    cluster, perm = consecutive_cluster(cluster)

    index = torch.stack([cluster, torch.arange(0, x.shape[0]).cuda()], dim=0)
    values = torch.ones(cluster.shape[0], dtype=torch.float).cuda()
    uniq, inv, counts = torch.unique(cluster,
                                     return_inverse=True,
                                     return_counts=True)
    newsize = uniq.shape[0]

    origsize = x.shape[0]

    new_batch = pool_batch(perm, batch)
    # Compute random walk graph laplacian:
    laplacian_index, laplacian_weights = get_laplacian(edge_index,
                                                       normalization='rw')
    laplacian_index, laplacian_weights = torch_sparse.coalesce(
        laplacian_index, laplacian_weights, m=origsize, n=origsize)
    index, values = torch_sparse.coalesce(index, values, m=newsize,
                                          n=origsize)  # P^T matrix
    new_feat = torch_sparse.spmm(index,
                                 values,
                                 m=newsize,
                                 n=origsize,
                                 matrix=x)  # P^T X
    new_pos = torch_sparse.spmm(index,
                                values,
                                m=newsize,
                                n=origsize,
                                matrix=pos)  # P^T POS

    new_adj, new_adj_val = torch_sparse.spspmm(index,
                                               values,
                                               edge_index,
                                               adj_values,
                                               m=newsize,
                                               k=origsize,
                                               n=origsize,
                                               coalesced=True)  # P^T A
    index, values = torch_sparse.transpose(index,
                                           values,
                                           m=newsize,
                                           n=origsize,
                                           coalesced=True)  # P
    new_adj, new_adj_val = torch_sparse.spspmm(new_adj,
                                               new_adj_val,
                                               index,
                                               values,
                                               m=newsize,
                                               k=origsize,
                                               n=newsize,
                                               coalesced=True)  # (P^T A) P
    # Precompute QP :
    values = torch.ones(cluster.shape[0], dtype=torch.float).cuda()
    index, values = torch_sparse.spspmm(laplacian_index,
                                        laplacian_weights,
                                        index,
                                        values,
                                        m=origsize,
                                        k=origsize,
                                        n=newsize,
                                        coalesced=True)
    return new_adj, new_feat, new_pos, new_batch, index, values, origsize, newsize