Пример #1
0
    def panentropy(self,
                   adj_t: SparseTensor,
                   dtype: Optional[int] = None) -> SparseTensor:

        tmp = SparseTensor.eye(adj_t.size(0),
                               adj_t.size(1),
                               has_value=True,
                               dtype=dtype,
                               device=adj_t.device())
        tmp = tmp.mul_nnz(self.weight[0])

        outs = [tmp]
        for i in range(1, self.filter_size + 1):
            tmp = tmp @ adj_t
            tmp = tmp.mul_nnz(self.weight[i])
            outs += [tmp]

        row = torch.cat([out.storage.row() for out in outs], dim=0)
        col = torch.cat([out.storage.col() for out in outs], dim=0)
        value = torch.cat([out.storage.value() for out in outs], dim=0)

        out = SparseTensor(row=row,
                           col=col,
                           value=value,
                           sparse_sizes=adj_t.sparse_sizes()).coalesce()

        return out
Пример #2
0
def laplace(adj: SparseTensor, lap_type=None):
    M, N = adj.sizes()
    assert M == N
    row, col, val = adj.clone().coo()
    val = col.new_ones(col.shape, dtype=adj.dtype()) if val is None else val
    deg = adj.sum(0)

    loop_index = torch.arange(N, device=adj.device()).unsqueeze_(0)
    if lap_type in (None, "sym"):
        deg05 = deg.pow(-0.5)
        deg05[deg05 == float("inf")] = 0
        wgt = deg05[row] * val * deg05[col]
        wgt = torch.cat([-wgt.unsqueeze_(0), val.new_ones(1, N)], 1).squeeze_()

    elif lap_type == "rw":
        deg_inv = 1.0 / deg
        deg_inv[deg_inv == float("inf")] = 0
        wgt = deg_inv[row] * val

        wgt = torch.cat([-wgt.unsqueeze_(0), val.new_ones(1, N)], 1).squeeze_()

    elif lap_type == "comb":
        wgt = torch.cat([-val.unsqueeze_(0), deg.unsqueeze_(0)], 1).squeeze_()

    else:
        raise TypeError("Invalid laplace type: {}".format(lap_type))

    row = torch.cat([row.unsqueeze_(0), loop_index], 1).squeeze_()
    col = torch.cat([col.unsqueeze_(0), loop_index], 1).squeeze_()
    lap = SparseTensor(row=row, col=col, value=wgt, sparse_sizes=(M, N))
    return lap
Пример #3
0
def degree_matrix(adj: SparseTensor, indeg=True):
    N = adj.size(-1)
    deg = adj.sum(0) if indeg else adj.sum(1)
    row = col = torch.arange(N, device=adj.device())
    degs = torch.as_tensor(deg, device=adj.device())
    return SparseTensor(
        row=row, col=col, value=degs, sparse_sizes=(N, N), is_sorted=True
    )
Пример #4
0
    def forward(self, data, train_idx):
        n = data.graph['num_nodes']
        edge_index = data.graph['edge_index']
        edge_weight=None

        if isinstance(edge_index, torch.Tensor):
            edge_index, edge_weight = gcn_norm( 
                edge_index, edge_weight, n, False)
            row, col = edge_index
            # transposed if directed
            adj_t = SparseTensor(row=col, col=row, value=edge_weight, sparse_sizes=(n, n))
        elif isinstance(edge_index, SparseTensor):
            edge_index = gcn_norm(
                edge_index, edge_weight, n, False)
            edge_weight=None
            adj_t = edge_index

        y = torch.zeros((n, self.out_channels)).to(adj_t.device())
        if data.label.shape[1] == 1:
            # make one hot
            y[train_idx] = F.one_hot(data.label[train_idx], self.out_channels).squeeze(1).to(y)
        elif self.mult_bin:
            y = torch.zeros((n, 2*self.out_channels)).to(adj_t.device())
            for task in range(data.label.shape[1]):
                y[train_idx, 2*task:2*task+2] = F.one_hot(data.label[train_idx, task], 2).to(y)
        else:
            y[train_idx] = data.label[train_idx].to(y.dtype)
        result = y.clone()
        for _ in range(self.num_iters):
            for _ in range(self.hops):
                result = matmul(adj_t, result)
            result *= self.alpha
            result += (1-self.alpha)*y

        if self.mult_bin:
            output = torch.zeros((n, self.out_channels)).to(result.device)
            for task in range(data.label.shape[1]):
                output[:, task] = result[:, 2*task+1]
            result = output

        return result