Example #1
0
    def solve_with_SVD_v2(self, adj, lamb=1):
        # NS version of GraRep

        # first construct A^k with normalization
        deg_inv = 1 / (torch_sparse.sum(adj, dim=1).view(-1, 1))
        A_tilde = torch_sparse.mul(adj, deg_inv)
        A = [A_tilde]
        if self.k >= 2:
            for i in range(1, self.k):
                A.append(A[i - 1].matmul(A_tilde))

        # then obtain W via low-rank SVD
        W = []
        for k, Ak in enumerate(A, 1):
            print('solving SVD with k={k}'.format(k=k))
            tau_k = Ak.sum(dim=0).view(1, -1)

            # remove the conditional probability term in the objective function
            # to make it a pure negative sampling model
            temp = Ak.storage.value()
            temp[temp > 0] = 1
            temp[temp <= 0] = 0
            Ak.storage._value = temp

            Xk = torch_sparse.mul(Ak, self.num_embeddings / (tau_k * lamb))
            temp = torch.log(Xk.storage.value() + 1e-15)
            temp[temp < 0] = 0
            Xk.storage._value = temp

            Xk = Xk.to_scipy('coo')
            u, s, vt = svds(Xk, k=self.embedding_dim)  # torch.svd_lowrank does not work due to a bug
            Wk = torch.tensor(u * s ** 0.5)
            W.append(Wk)

        return torch.cat(W, dim=1)
Example #2
0
    def forward(self, x: Tensor, edge_index: Adj, size: Size = None) -> Tensor:
        """"""
        edge_weight: OptTensor = None
        if isinstance(edge_index, Tensor):
            num_nodes = size[1] if size is not None else x.size(self.node_dim)
            if self.add_self_loops:
                edge_index, _ = remove_self_loops(edge_index)
                edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)

            row, col = edge_index[0], edge_index[1]
            deg_inv = 1. / degree(col, num_nodes=num_nodes).clamp_(1.)

            edge_weight = deg_inv[col]
            edge_weight[row == col] += self.diag_lambda * deg_inv

        elif isinstance(edge_index, SparseTensor):
            if self.add_self_loops:
                edge_index = set_diag(edge_index)

            col, row, _ = edge_index.coo()  # Transposed.
            deg_inv = 1. / sum(edge_index, dim=1).clamp_(1.)

            edge_weight = deg_inv[col]
            edge_weight[row == col] += self.diag_lambda * deg_inv
            edge_index = edge_index.set_value(edge_weight, layout='coo')

        # propagate_type: (x: Tensor, edge_weight: OptTensor)
        out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
                             size=None)
        out = out @ self.weight + x @ self.root_weight

        if self.bias is not None:
            out += self.bias

        return out
Example #3
0
    def solve_with_SVD_v1(self, adj, lamb=1):
        # the original NCE version of GraRep

        # first construct A^k with normalization
        deg_inv = 1 / (torch_sparse.sum(adj, dim=1).view(-1, 1))
        A_tilde = torch_sparse.mul(adj, deg_inv)
        A = [A_tilde]
        if self.k >= 2:
            for i in range(1, self.k):
                A.append(A[i - 1].matmul(A_tilde))

        # then obtain W via low-rank SVD
        W = []
        C = []
        for k, Ak in enumerate(A, 1):
            print('solving SVD with k={k}'.format(k=k))
            tau_k = Ak.sum(dim=0).view(1, -1)

            Xk = torch_sparse.mul(Ak, self.num_embeddings / (tau_k * lamb))
            temp = torch.log(Xk.storage.value() + 1e-15)
            temp[temp < 0] = 0
            Xk.storage._value = temp

            Xk = Xk.to_scipy('coo')
            u, s, vt = svds(Xk, k=self.embedding_dim)  # torch.svd_lowrank does not work due to a bug
            Wk = torch.tensor(u * s ** 0.5)
            Ck = torch.tensor(vt.T * s ** 0.5)
            W.append(Wk)
            C.append(Ck)

        W = torch.cat(W, dim=1)
        C = torch.cat(C, dim=1)
        H = torch.cat((W, C), dim=1)
        return W
Example #4
0
 def norm(self, edge_index):
     adj = edge_index
     deg = sum(adj, dim=1)
     deg_inv = deg.pow_(-1)
     deg_inv.masked_fill_(deg_inv == float('inf'), 0.)
     adj = mul(adj, deg_inv.view((-1, 1)))
     return adj
def gcn_norm(adj_t):
    adj_t = torch_sparse.fill_diag(adj_t, 1)  # add self-loop

    deg = torch_sparse.sum(adj_t, dim=1).pow_(-0.5)  # compute normalized degree matrix
    deg.masked_fill_(deg == float('inf'), 0.)  # for numerical stability

    adj_t = torch_sparse.mul(adj_t, deg.view(-1, 1))  # row-wise mul
    adj_t = torch_sparse.mul(adj_t, deg.view(1, -1))  # col-wise mul
    return adj_t
Example #6
0
def gcn_degree_norm(edge_index,
                    edge_weight=None,
                    num_nodes=None,
                    improved=False,
                    add_self_loops=True,
                    dtype=None):

    fill_value = 2. if improved else 1.

    if isinstance(edge_index, SparseTensor):
        adj_t = edge_index
        if not adj_t.has_value():
            adj_t = adj_t.fill_value(1., dtype=dtype)
        if add_self_loops:
            adj_t = fill_diag(adj_t, fill_value)
        deg = sum(adj_t, dim=1)
        deg_inv_sqrt = deg.pow_(-1)
        deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0.)
        #adj_t = mul(adj_t, deg_inv_sqrt.view(-1, 1))
        adj_t = mul(adj_t, deg_inv_sqrt.view(1, -1))
        return adj_t

    else:
        num_nodes = maybe_num_nodes(edge_index, num_nodes)

        if edge_weight is None:
            edge_weight = torch.ones((edge_index.size(1), ),
                                     dtype=dtype,
                                     device=edge_index.device)

        if add_self_loops:
            edge_index, tmp_edge_weight = add_remaining_self_loops(
                edge_index, edge_weight, fill_value, num_nodes)
            assert tmp_edge_weight is not None
            edge_weight = tmp_edge_weight

        row, col = edge_index[0], edge_index[1]
        deg = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)
        deg_inv_sqrt = deg.pow_(-1)
        deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
        A = deg_inv_sqrt[row] * edge_weight
        return edge_index, A
Example #7
0
    def forward(self, x, edge_index, edge_attr=None, batch=None):
        if batch is None:
            batch = edge_index.new_zeros(x.size(0))
        num_node = x.size(0)

        k = F.relu(self.lin_2(x))

        A = SparseTensor.from_edge_index(edge_index=edge_index,
                                         edge_attr=edge_attr,
                                         sparse_sizes=(num_node, num_node))
        I = SparseTensor.eye(num_node, device=self.args.device)
        A_wave = fill_diag(A, 1)

        s = A_wave @ k

        score = s.squeeze()
        perm = topk(score, self.ratio, batch)

        A = self.norm(A)

        K_neighbor = A * k.T
        x_neighbor = K_neighbor @ x

        # ----modified
        deg = sum(A, dim=1)
        deg_inv = deg.pow_(-1)
        deg_inv.masked_fill_(deg_inv == float('inf'), 0.)
        x_neighbor = x_neighbor * deg_inv.view(1, -1).T
        # ----
        x_self = x * k

        x = x_neighbor * (
            1 - self.args.combine_ratio) + x_self * self.args.combine_ratio

        x = x[perm]
        batch = batch[perm]
        edge_index, edge_attr = filter_adj(edge_index,
                                           edge_attr,
                                           perm,
                                           num_nodes=s.size(0))

        return x, edge_index, edge_attr, batch, perm