示例#1
0
 def compute_laplacian(self, adj):
     adj_size = adj.shape[-2]
     deg_mat, adj = compute_deg_matrix(adj)
     if self.reg_mode == 0:
         laplacian = deg_mat - adj  #
     else:
         laplacian = torch.eye(adj_size).to(adj.device) - torch.matmul(
             inverse_diag_mat(deg_mat), adj)
     # else:
     #     lambda_max = find_largest_eigval(adj)  # might be approximated by max degree
     #     laplacian = (torch.eye(adj_size).to(adj.device) - adj / lambda_max)
     if self.lap_hop > 1:
         laplacian = torch.matrix_power(laplacian, self.lap_hop)
     return laplacian
示例#2
0
    def _select_leader(self, adj, x, nodes=None, **kwargs):

        adj_size = adj.shape[-2]
        # Compute the graph Laplacian, then the norm of the Laplacian
        laplacian = self.compute_laplacian(adj).matmul(x)
        laplacian_norm = torch.norm(laplacian, dim=-1)  # b * n

        adj_no_diag = adj - torch.diag_embed(
            torch.diagonal(adj.permute(1, 2, 0)))
        node_deg, _ = compute_deg_matrix(adj_no_diag, selfloop=False)
        # we want node where the following:
        # \sum(wj xj) / \sum(wj) < xi ==>  D(^-1)AX < X, where A does not have diagonal entry
        nei_laplacian_diff = (
            laplacian_norm.unsqueeze(-1) -
            torch.bmm(torch.matmul(inverse_diag_mat(node_deg), adj_no_diag),
                      laplacian_norm.unsqueeze(-1))).squeeze(-1)

        # normalize to all strictly positive values
        min_val = torch.min(nei_laplacian_diff, -1, keepdim=True)[0]
        nei_laplacian_normalized = (nei_laplacian_diff -
                                    min_val) + torch.abs(min_val)

        if nodes is None:
            nodes = torch.ones_like(nei_laplacian_normalized)

        nei_laplacian_normalized = nei_laplacian_normalized * nodes.squeeze(
            -1).float()  # set unwanted (fake nodes) to 0

        k = self.k
        mask = nei_laplacian_normalized > 0
        if k is None:
            # print(mask.shape, nodes.shape, nei_laplacian_diff.shape)
            mask = nei_laplacian_diff * nodes.float().squeeze(-1) > 0
            # find best max k for this batch
            k = torch.max(torch.sum(
                mask, dim=-1))  # maximum number of valid centroid in the batch
            # note that in this we relax the assumption by computing
            # \sum\limits_{j \neq i} s_i - a_{ij} s_j \big) > 0, and not
            # \forall\; v_j,  s_i - A_{ij} s_j  > 0  as seen in the paper
        _, leader_idx = torch.topk(nei_laplacian_normalized,
                                   k=k,
                                   dim=-1,
                                   largest=True)  # select k best
        self.leader_idx = leader_idx.to(adj.device)
        return leader_idx, mask
示例#3
0
def get_signal(adj, x):
    deg_mat, adj = compute_deg_matrix(adj.squeeze(0))
    adj_size = adj.size(-1)
    laplacian = torch.eye(adj_size).to(adj.device) - torch.matmul(
        torch.pinverse(deg_mat), adj)
    return laplacian.matmul(x).norm()
示例#4
0
def _lazy_random_walk(adj):
    deg, adj = compute_deg_matrix(adj, inv=False, selfloop=False)
    deg = deg**-1
    deg = deg.masked_fill(torch.isinf(deg), 0)
    return torch.matmul(deg, adj)