示例#1
0
def test_max_pool_x():
    cluster = torch.tensor([0, 1, 0, 1, 2, 2])
    x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
    batch = torch.tensor([0, 0, 0, 0, 1, 1])

    out = max_pool_x(cluster, x, batch)
    assert out[0].tolist() == [[5, 6], [7, 8], [11, 12]]
    assert out[1].tolist() == [0, 0, 1]

    out = max_pool_x(cluster, x, batch, size=2)
    assert out.tolist() == [[5, 6], [7, 8], [11, 12], [0, 0]]
    def forward(self, data):
        data.x = F.elu(
            self.bn1(self.conv1(data.x, data.edge_index, data.edge_attr)))
        cluster = voxel_grid(data.pos, data.batch, size=4)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.block1(data)
        cluster = voxel_grid(data.pos, data.batch, size=6)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.block2(data)
        cluster = voxel_grid(data.pos, data.batch, size=24)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.block3(data)
        cluster = voxel_grid(data.pos, data.batch, size=64)
        x = max_pool_x(cluster, data.x, data.batch, size=8)

        # if your torch-geometric version is below 1.3.2(roughly, we do not test all versions), use x.view() instead of x[0].view()
        # x = x.view(-1, self.fc1.weight.size(1))
        x = x[0].view(-1, self.fc1.weight.size(1))
        x = self.fc1(x)
        x = F.elu(x)
        x = self.bn(x)
        x = self.drop_out(x)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
示例#3
0
    def forward(self, data):
        data.x = F.elu(
            self.bn1(self.conv0(data.x, data.edge_index, data.edge_attr)))
        cluster = voxel_grid(data.pos, data.batch, size=[4, 3])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv1(data)
        cluster = voxel_grid(data.pos, data.batch, size=[16, 12])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv2(data)
        cluster = voxel_grid(data.pos, data.batch, size=[30, 23])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv3(data)
        cluster = voxel_grid(data.pos, data.batch, size=[60, 45])
        x = max_pool_x(cluster, data.x, data.batch, size=16)
        # x = max_pool_x(cluster, data.x, data.batch)

        x = x[0].view(-1, self.fc1.weight.size(1))
        x = self.fc1(x)
        x = F.elu(x)
        x = self.bn(x)
        x = self.drop_out(x)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.edge_attr = None
        data = max_pool(cluster, data, transform=transform)

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        x, batch = max_pool_x(cluster, data.x, data.batch)

        #x = global_mean_pool(x, batch)
        x_min = torch_scatter.scatter_min(x, batch, dim=0)[0]
        gather_idxs = batch.expand(x.shape[1], -1).t()
        gather_mins = torch.gather(x_min, 0, gather_idxs)
        s = F.relu(-gather_mins)
        x = x + s
        x = self.aggregator(x, batch)
        s_out = self.aggregator(s, batch)
        x = x - s_out

        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        return F.log_softmax(self.fc2(x), dim=1)
示例#5
0
    def forward(self, data):

        act = nn.Tanhshrink()
        act = F.relu
        #act = nn.LeakyReLU(0.25)

        # first conv block
        data.x = act(self.conv1(
            data.x, data.edge_index, data.edge_attr))
        cluster = get_preloaded_cluster(data.cluster0, data.batch)
        data = community_pooling(cluster, data)

        # second conv block
        data.x = act(self.conv2(
            data.x, data.edge_index, data.edge_attr))
        cluster = get_preloaded_cluster(data.cluster1, data.batch)
        x, batch = max_pool_x(cluster, data.x, data.batch)

        # FC
        x = scatter_mean(x, batch, dim=0)
        x = act(self.fc1(x))
        x = self.fc2(x)
        #x = F.dropout(x, training=self.training)

        return x
示例#6
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index))
        data.x = self.bn1(data.x)
        cluster = voxel_grid(data.pos, data.batch, size=[4,4])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv2(data.x, data.edge_index))
        data.x = self.bn2(data.x)
        cluster = voxel_grid(data.pos, data.batch, size=[6,6])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))
        
        data.x = F.elu(self.conv3(data.x, data.edge_index))
        data.x = self.bn3(data.x)
        cluster = voxel_grid(data.pos, data.batch, size=[20,20])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))
        
        data.x = F.elu(self.conv4(data.x, data.edge_index))
        data.x = self.bn4(data.x)
        cluster = voxel_grid(data.pos, data.batch, size=[32,32])
        x = max_pool_x(cluster, data.x, data.batch, size=32)
        
        x = x.view(-1, self.fc1.weight.size(1))

        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)

        return F.log_softmax(x, dim=1)
示例#7
0
    def forward(self, data):
        data.x = self.datanorm * data.x
        data.x = self.inputnet(data.x)

        data.edge_index = to_undirected(
            knn_graph(data.x,
                      self.k,
                      data.batch,
                      loop=False,
                      flow=self.edgeconv1.flow))
        data.x = self.edgeconv1(data.x, data.edge_index)

        weight = normalized_cut_2d(data.edge_index, data.x)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.edge_attr = None
        data = max_pool(cluster, data)

        data.edge_index = to_undirected(
            knn_graph(data.x,
                      self.k,
                      data.batch,
                      loop=False,
                      flow=self.edgeconv2.flow))
        data.x = self.edgeconv2(data.x, data.edge_index)

        weight = normalized_cut_2d(data.edge_index, data.x)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        x, batch = max_pool_x(cluster, data.x, data.batch)

        x = global_max_pool(x, batch)

        return self.output(x).squeeze(-1)
示例#8
0
    def forward(self, data):

        act = nn.Tanhshrink()
        act = F.relu
        #act = nn.LeakyReLU(0.25)

        data.x = act(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = community_detection(data.internal_edge_index,
                                      data.num_nodes,
                                      edge_attr=None,
                                      batches=data.batches)
        data = community_pooling(cluster, data)

        data.x = act(self.conv2(data.x, data.edge_index, data.edge_attr))
        cluster = community_detection(data.internal_edge_index,
                                      data.num_nodes,
                                      edge_attr=None)
        x, batch = max_pool_x(cluster, data.x, data.batch)

        x = scatter_mean(x, batch, dim=0)
        x = act(self.fc1(x))
        x = self.fc2(x)
        #x = F.dropout(x, training=self.training)

        return x
示例#9
0
    def forward(self, data):

        pos, edge_index, batch = data.pos, data.edge_index, data.batch
        real_batch_size = pos.size(0) / self.nr_points
        real_batch_size = int(real_batch_size)

        # Build first edges
        edge_index = knn_graph(pos, self.k, batch, loop=False)

        #extract features in 3d
        _, _, features_dd, _ = self.ds1(pos, edge_index, None)

        #graclus
        cluster = graclus(edge_index)

        pos_gra, batch_gra = avg_pool_x(cluster, pos, batch)
        features_gra, _ = max_pool_x(cluster, features_dd, batch)

        #knn(f)
        with torch.no_grad():
            edge_index_gra = knn_graph(features_gra.norm(dim=2),
                                       self.k,
                                       batch_gra,
                                       loop=False)

        # DD2
        _, _, features_dd2, _ = self.dd2(pos_gra, edge_index_gra, features_gra)

        y1 = self.nn1(features_dd2)

        y1_pool, _ = max_pool_x(batch_gra, y1, batch_gra)

        y1_pool = torch.nn.functional.relu(y1_pool)
        y1_pool = self.bn1(y1_pool)

        y2 = self.nn2(y1_pool)
        y2 = torch.nn.functional.relu(y2)
        y2 = self.bn2(y2)

        y3 = self.nn3(y2)
        y3 = torch.nn.functional.relu(y3)
        y3 = self.bn3(y3)

        y4 = self.nn4(y3)
        out = self.sm(y4)

        return out
示例#10
0
    def forward(self, sample):
        x, edge_index = sample.x, sample.edge_index

        # Dropout layer
        # edge_index = self.dropout_edges(edge_index, dropout=0.2)

        x = self.dense_input(x, self.empty_edges)
        x = F.gelu(x)

        x = self.input(x, edge_index)
        x = F.gelu(x)
        x = self.conv1(x, edge_index)
        x = F.gelu(x)

        # if self.pooling_layers > 1:
        #     batch = torch.tensor([0 for _ in x], dtype=torch.long, device=self.device)
        #     pooled = self.topkpool1(x, edge_index, batch=batch)
        #     x, edge_index = pooled[0], pooled[1]

        x = self.conv2(x, edge_index)
        x = F.gelu(x)

        if self.pooling_layers > 0:
            batch = torch.tensor([0 for _ in x], dtype=torch.long, device=self.device)
            pooled = self.topkpool2(x, edge_index, batch=batch)
            x, edge_index = pooled[0], pooled[1]

        x = self.conv3(x, edge_index)
        x = F.gelu(x)

        # For large graphs
        # while len(x) > 8:
        #     batch = torch.tensor([0 for _ in x], dtype=torch.long, device=self.device)
        #     pooled = self.topkpool3(x, edge_index, batch=batch)
        #     x, edge_index = pooled[0], pooled[1]
        #     x = self.conv4(x, edge_index)
        #     x = F.gelu(x)

        batch = torch.tensor([0 for _ in x], dtype=torch.long, device=self.device)
        # With sort_pool it works but we have the same problem: the output layer learns the order of the pooled nodes
        # using k = 3, let's see what happens by shuffling the nodes
        if self.final_pooling == "avg_pool_x":
            cluster = torch.as_tensor([i % self.final_nodes for i in range(len(x))], device=self.device)
            (x, cluster) = avg_pool_x(cluster, x, batch)
        elif self.final_pooling == "sort_pooling":
            x = global_sort_pool(x, batch, self.final_nodes)
        elif self.final_pooling == "topk" or self.final_pooling == "asap" or self.final_pooling == "sag":
            pooled = self.last_pooling_layer(x, edge_index)
            x = pooled[0]
        elif self.final_pooling == "max_pool_x":
            cluster = torch.as_tensor([i % self.final_nodes for i in range(len(x))], device=self.device)
            (x, cluster) = max_pool_x(cluster, x, batch)
            # (x2, cluster2) = avg_pool_x(cluster, x, batch)
            # x = torch.cat([x1.view(-1), x2.view(-1)])

        return self.output(x.view(-1))
示例#11
0
    def forward(self, data):
        data.x = F.relu(self.conv1(data.x, data.edge_index))
        data.x = F.dropout(data.x, training=self.training)

        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.x, batch = max_pool_x(cluster, data.x, data.batch)
        data.x = global_mean_pool(data.x, batch)

        data.x = self.fc1(data.x)
        data.x = F.log_softmax(data.x, dim=1)
        return data.x
示例#12
0
    def forward(self, data):
        act = F.relu
        data_ext = data.clone()

        # EXTERNAL INTERACTION GRAPH
        # first conv block
        data.x = act(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = get_preloaded_cluster(data.cluster0, data.batch)
        data = community_pooling(cluster, data)

        # second conv block
        data.x = act(self.conv2(data.x, data.edge_index, data.edge_attr))
        cluster = get_preloaded_cluster(data.cluster1, data.batch)
        x, batch = max_pool_x(cluster, data.x, data.batch)

        # INTERNAL INTERACTION GRAPH
        # first conv block
        data_ext.x = act(
            self.conv1_ext(data_ext.x, data_ext.edge_index,
                           data_ext.edge_attr))
        cluster = get_preloaded_cluster(data_ext.cluster0, data_ext.batch)
        data_ext = community_pooling(cluster, data_ext)

        # second conv block
        data_ext.x = act(
            self.conv2_ext(data_ext.x, data_ext.edge_index,
                           data_ext.edge_attr))
        cluster = get_preloaded_cluster(data_ext.cluster1, data_ext.batch)
        x_ext, batch_ext = max_pool_x(cluster, data_ext.x, data_ext.batch)

        # FC
        x = scatter_mean(x, batch, dim=0)
        x_ext = scatter_mean(x_ext, batch_ext, dim=0)

        x = torch.cat([x, x_ext], dim=1)
        x = act(self.fc1(x))
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.fc2(x)

        return x
示例#13
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        x, batch = max_pool_x(cluster, data.x, data.batch)

        x = global_mean_pool(x, batch)
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        return F.log_softmax(self.fc2(x), dim=1)
示例#14
0
    def forward(self, data):
        x = F.relu(self.conv1(data.x, data.edge_index))
        cluster = graclus(data.edge_index, num_nodes=x.shape[0])
        data = max_pool(
            cluster, Data(x=x, batch=data.batch, edge_index=data.edge_index))

        x = F.relu(self.conv2(data.x, data.edge_index))
        cluster = graclus(data.edge_index, num_nodes=x.shape[0])
        x, batch = max_pool_x(cluster, x, data.batch)

        x = global_mean_pool(x, batch)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return x
    def forward(self, x):
        data = x
        data.x = F.relu(
            self.bn1(self.conv1(data.x, data.edge_index, data.edge_attr)))

        data.x = self.layer1(data)
        data.x = self.layer2(data.x, data.edge_index, data.edge_attr)
        data.x = self.layer3(data.x, data.edge_index, data.edge_attr)
        data.x = self.layer4(data.x, data.edge_index, data.edge_attr)

        # clustering in Spline COnv
        cluster = voxel_grid(data.pos, data.batch, size=4)
        x = max_pool_x(cluster, data.x, data.batch, size=4)
        x = x.view(-1, self.fc.weight.size(1))
        x = self.fc(x)

        return x
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=5, start=0, end=28)
        data = max_pool(cluster, data, transform=transform)

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=7, start=0, end=28)
        data = max_pool(cluster, data, transform=transform)

        data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=14, start=0, end=27.99)
        x = max_pool_x(cluster, data.x, data.batch, size=4)

        x = x.view(-1, self.fc1.weight.size(1))
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
示例#17
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = graclus(
            data.edge_index,
            torch.reshape(data.edge_attr, (data.edge_attr.shape[0], )),
            data.x.size(0))
        data = max_pool(cluster, data)

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        cluster = graclus(
            data.edge_index,
            torch.reshape(data.edge_attr, (data.edge_attr.shape[0], )),
            data.x.size(0))
        x, batch = max_pool_x(cluster, data.x, data.batch)

        x = global_mean_pool(x, batch)
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        return F.log_softmax(self.fc2(x), dim=1)
示例#18
0
    def forward(self, data):
        data = self.conv1(data)
        cluster = voxel_grid(data.pos, data.batch, size=2)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv2(data)
        cluster = voxel_grid(data.pos, data.batch, size=4)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv3(data)
        cluster = voxel_grid(data.pos, data.batch, size=7)
        x = max_pool_x(cluster, data.x, data.batch, size=25)
        # x = max_pool_x(cluster, data.x, data.batch)

        x = x[0].view(-1, self.fc1.weight.size(1))
        x = self.fc1(x)
        x = F.elu(x)
        x = self.bn(x)
        x = self.drop_out(x)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
示例#19
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        ######## calculate similarity between nodes
        weight = normalized_cut_2d(data.edge_index, data.pos)
        ######### graph clustering without the need of eigenvector
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.edge_attr = None
        ########## Pools and coarsens a graph. All nodes within the same cluster will be represented as one node and appply transform
        data = max_pool(cluster, data, transform=transform)

        ########## 2nd conv net
        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        ############## Max-Pools node features according to the clustering defined in cluster
        x, batch = max_pool_x(cluster, data.x, data.batch)

        ############## Returns batch-wise graph-level-outputs by averaging node features across the node dimension
        x = global_mean_pool(x, batch)
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        return F.log_softmax(self.fc2(x), dim=1)
    def forward(self, x, batch: OptTensor=None):
        
        if batch is None:
            batch = torch.zeros(x.size()[0], dtype=torch.int64, device=x.device)
        
        '''Embedding1: Intermediate Latent space features (hiddenDim)'''
        x_emb = self.inputnet(x)   

        '''KNN(k neighbors) over intermediate Latent space features'''     
        for ec in self.edgeconvs:
            edge_index = knn_graph(x_emb, self.k, batch, loop=False, flow=ec.flow)
            x_emb = x_emb + ec(x_emb, edge_index)
    
        '''
        [1]
        Embedding2: Final Latent Space embedding coords from x,y,z to ncats_out
        '''
        out = self.output(x_emb)
        #plot = self.plotlayer(out)


        '''KNN(k neighbors) over Embedding2 features''' 
        edge_index = knn_graph(out, self.k, batch, loop=False, flow=ec.flow)
        
        ''' 
        use Embedding1 to build an edge classifier
        inputnet_cat is residual to inputnet
        '''
        x_cat = self.inputnet_cat(x) + x_emb

        '''
        [2]
        Compute Edge Categories Convolution over Embedding1
        '''
        for ec in self.edgecatconvs:            
            x_cat = x_cat + ec(torch.cat([x_cat, x_emb, x], dim=1), edge_index)
        
        edge_scores = self.edge_classifier(torch.cat([x_cat[edge_index[0]], 
                                                      x_cat[edge_index[1]]], 
                                                      dim=1)).squeeze()
        

        '''
        use the predicted graph to generate disjoint subgraphs
        these are our physics objects
        '''
        objects = UnionFind(x.size()[0])
        good_edges = edge_index[:,torch.argmax(edge_scores, dim=1) > 0]
        good_edges_cpu = good_edges.cpu().numpy() 

        for edge in good_edges_cpu.T:
            objects.union(edge[0],edge[1])
        cluster_map = torch.from_numpy(np.array([objects.find(i) for i in range(x.shape[0])], 
                                                dtype=np.int64)).to(x.device)
        cluster_roots, inverse = torch.unique(cluster_map, return_inverse=True)
        # remap roots to [0, ..., nclusters-1]
        cluster_map = torch.arange(cluster_roots.size()[0], 
                                   dtype=torch.int64, 
                                   device=x.device)[inverse]
        

        ''' 
        [3]
        use Embedding1 to learn segmented cluster properties 
        inputnet_cat is residual to inputnet
        '''
        x_prop = self.inputnet_prop(x) + x_emb
        # now we accumulate over all selected disjoint subgraphs
        # to define per-object properties
        for ec in self.propertyconvs:
            x_prop = x_prop + ec(torch.cat([x_prop, x_emb, x], dim=1), good_edges)        
        props_pooled, cluster_batch = max_pool_x(cluster_map, x_prop, batch)
        cluster_props = self.property_predictor(props_pooled)    

        return out, edge_scores, edge_index, cluster_map, cluster_props, cluster_batch