Пример #1
0
 def forward(self):
     x = self.data.x
     # self.ei1, self.ew1 = self.adj1
     # self.ei2, self.ew2 = self.adj2
     x = F.relu(self.conv1(x, SparseTensor.from_torch_sparse_coo_tensor(self.adj1 -self.id)))
     x = F.dropout(x, training=self.training)
     x = self.conv2(x, SparseTensor.from_torch_sparse_coo_tensor(self.adj2 - self.id))
     return F.log_softmax(x, dim=1)
Пример #2
0
def prune_adj(oriadj, non_zero_idx: int, percent: int):
    original_prune_num = int(
        ((non_zero_idx - oriadj.size()[0]) / 2) * (percent / 100))
    adj = SparseTensor.from_torch_sparse_coo_tensor(oriadj).to_scipy()

    # find the lower half of the matrix
    low_adj = tril(adj, -1)
    non_zero_low_adj = low_adj.data[low_adj.data != 0]

    low_pcen = np.percentile(abs(non_zero_low_adj), percent)
    under_threshold = abs(low_adj.data) < low_pcen
    before = len(non_zero_low_adj)
    low_adj.data[under_threshold] = 0
    non_zero_low_adj = low_adj.data[low_adj.data != 0]
    after = len(non_zero_low_adj)

    rest_pruned = original_prune_num - (before - after)
    if rest_pruned > 0:
        mask_low_adj = (low_adj.data != 0)
        low_adj.data[low_adj.data == 0] = 2000000
        flat_indices = np.argpartition(low_adj.data,
                                       rest_pruned - 1)[:rest_pruned]
        low_adj.data = np.multiply(low_adj.data, mask_low_adj)
        low_adj.data[flat_indices] = 0
    low_adj.eliminate_zeros()
    new_adj = low_adj + low_adj.transpose()
    new_adj = new_adj + sparse.eye(new_adj.shape[0])
    return SparseTensor.from_scipy(new_adj).to_torch_sparse_coo_tensor().to(
        device)
Пример #3
0
def to_cpx(mat, layout="csr", dtype=None):
    assert layout in SparseLayouts
    import cupy as cp
    import cupyx.scipy as xcipy

    if isinstance(mat, torch.Tensor):
        assert mat.dim() == 2
        assert mat.is_cuda
        if mat.is_sparse:
            smt = SparseTensor.from_torch_sparse_coo_tensor(mat)
        else:
            smt = SparseTensor.from_dense(mat)

    elif isinstance(mat, SparseTensor):
        assert mat.dim() == 2
        assert mat.is_cuda()
        smt = mat

    elif isinstance(mat, xcipy.sparse.spmatrix):
        assert mat.ndim == 2
        cls = {
            "csr": xcipy.sparse.csr_matrix,
            "csc": xcipy.sparse.csc_matrix,
            "coo": xcipy.sparse.coo_matrix,
        }[layout]
        smt = cls(mat)
        return smt

    else:
        raise RuntimeError

    shape = smt.sparse_sizes()

    if layout == "coo":
        row, col, value = smt.coo()
        row = cp.asarray(row.detach())
        col = cp.asarray(col.detach())
        value = (cp.asarray(value.detach())
                 if smt.has_value() else cp.ones(smt.nnz(), dtype=dtype))
        return xcipy.sparse.coo_matrix((value, (row, col)), shape)
    elif layout == "csr":
        rowptr, col, value = smt.csr()
        rowptr = cp.asarray(rowptr.detach())
        col = cp.asarray(col.detach())
        value = (cp.asarray(value.detach())
                 if smt.has_value() else cp.ones(smt.nnz(), dtype=dtype))
        return xcipy.sparse.csr_matrix((value, col, rowptr), shape)
    elif layout == "csc":
        colptr, row, value = smt.csc()
        colptr = cp.asarray(colptr.detach())
        row = cp.asarray(row.detach())
        value = (cp.asarray(value.detach())
                 if smt.has_value() else cp.ones(smt.nnz(), dtype=dtype))
        return xcipy.sparse.csc_matrix((value, row, colptr), shape)

    else:
        raise RuntimeError(
            f"{layout} is not one of valid sparse formats `coo`, `csr` and `csc`."
        )
Пример #4
0
def to_scipy(mat, layout="csr", dtype=None):
    assert layout in SparseLayouts
    if isinstance(mat, torch.Tensor):
        if not mat.is_sparse:
            smt = SparseTensor.from_dense(mat).to_scipy(layout, dtype)
        else:
            smt = SparseTensor.from_torch_sparse_coo_tensor(mat).to_scipy(
                layout, dtype)
    elif isinstance(mat, SparseTensor):
        smt = mat.to_scipy(layout, dtype)
    elif isinstance(mat, (spmatrix, np.ndarray)):
        cls = {"csr": csr_matrix, "csc": csc_matrix, "coo": coo_matrix}[layout]
        smt = cls(mat)
    else:
        raise TypeError(f"{type(mat)} is not supported now or invalid")

    return smt
Пример #5
0
def to_torch_sparse(mat):
    if isinstance(mat, torch.Tensor):
        if not mat.is_sparse:
            stm = SparseTensor.from_dense(mat)
        else:
            stm = SparseTensor.from_torch_sparse_coo_tensor(mat)

    elif isinstance(mat, np.ndarray):
        stm = SparseTensor.from_dense(torch.as_tensor(mat))

    elif isinstance(mat, spmatrix):
        stm = SparseTensor.from_scipy(mat)

    elif isinstance(mat, SparseTensor):
        stm = mat
    else:
        raise TypeError(f"{type(mat)} not supported now")

    return stm
Пример #6
0
else:
    dataset = Planetoid(path, dataset, transform=T.NormalizeFeatures())
# print(f"Number of graphs in {dataset} dataset:", len(dataset))
data = dataset[0]
model, data = Net(dataset, data, args).to(device), data.to(device)
checkpoint = torch.load(f"./pretrain_pytorch/{args.dataset}_model.pth.tar")
model.load_state_dict(checkpoint)

loss = lambda m: F.nll_loss(m()[data.train_mask], data.y[data.train_mask])
# print("construct admm training")
support1 = model.adj1  # sparse
support2 = model.adj2  # sparse
partial_adj_mask = support1.clone()
adj_variables = [support1, support2]
rho = 1e-3
non_zero_idx = SparseTensor.from_torch_sparse_coo_tensor(model.adj1).nnz()
print(non_zero_idx)
Z1 = U1 = Z2 = U2 = partial_adj_mask.clone()
model.adj1.requires_grad = True
model.adj2.requires_grad = True
adj_mask = partial_adj_mask.clone()


# Update the gradient of the adjacency matrices
# grads_vars: {name: torch.Tensor}
def update_gradients_adj(grads_vars, adj_mask):
    temp_grad_adj1 = 0
    var1 = None
    var2 = None
    temp_grad_adj2 = 0
    for key, var in grads_vars.items():
Пример #7
0
from data import load_data_file
from torch_sparse import SparseTensor
import torch
from torch_geometric.nn import SAGEConv

adj = load_data_file()
adj_sparse = SparseTensor.from_torch_sparse_coo_tensor(adj)

x = torch.randn((5, 10))
sage_conv = SAGEConv(10, 9, True)
out = sage_conv(x, adj_sparse)
print(out)
print(type(out))