示例#1
0
 def _transform(self, ranks: GraphSignal, **kwargs):
     separator = to_signal(ranks, self.separator)
     ranksR = ranks * separator
     ranksB = ranks * (1-separator)
     mulR = backend.safe_div(1., backend.sum(ranksR))
     mulB = backend.safe_div(1., backend.sum(ranksB))
     return ranksR*mulR + ranksB*mulB
示例#2
0
 def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:
     known_scores, scores = self.to_numpy(scores)
     known_scores = backend.safe_div(known_scores,
                                     backend.max(known_scores))
     scores = backend.safe_div(scores, backend.max(scores))
     return backend.safe_div(backend.sum(known_scores * scores),
                             backend.sum(scores))
示例#3
0
 def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:
     known_scores, scores = self.to_numpy(scores, normalization=True)
     eps = backend.epsilon()
     known_scores = known_scores - backend.min(known_scores) + eps
     known_scores = backend.safe_div(known_scores,
                                     backend.sum(known_scores))
     scores = scores - backend.min(scores) + eps
     scores = backend.safe_div(scores, backend.sum(scores))
     ratio = scores / known_scores
     ret = backend.sum(scores * backend.log(ratio))
     return ret
示例#4
0
 def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:
     sensitive, scores = self.to_numpy(scores)
     p1 = backend.dot(scores, sensitive)
     p2 = backend.dot(scores, 1 - sensitive)
     if p1 == 0 or p2 == 0:
         return 0
     s = backend.sum(sensitive)
     p1 = backend.safe_div(p1, s)
     p2 = backend.safe_div(p2, backend.length(sensitive) - s)
     if p1 <= p2:  # this implementation is derivable
         return p1 / p2
     return p2 / p1
示例#5
0
 def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:
     sensitive, scores = self.to_numpy(scores)
     p1 = backend.dot(scores, sensitive)
     p2 = backend.dot(scores, 1 - sensitive)
     s = backend.sum(sensitive)
     n = backend.length(sensitive)
     p1 = backend.safe_div(p1, s)
     p2 = backend.safe_div(p2, n - s)
     #if p1 <= p2*self.target_pRule:
     #    p2 *= self.target_pRule
     #elif p2 <= p1*self.target_pRule:
     #    p1 *= self.target_pRule
     #else:
     #    return 0
     return (p1 - p2)**2 * n
示例#6
0
 def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:
     if len(self.get_graph(scores)) == 0:
         return 0
     adjacency, scores = self.to_numpy(scores)
     neighbors = backend.conv(scores, adjacency)
     internal_edges = backend.dot(neighbors, scores)
     expected_edges = backend.sum(scores) ** 2 - backend.sum(scores ** 2) # without self-loops
     return backend.safe_div(internal_edges, expected_edges)
示例#7
0
def _cos_similarity(v, u, scores):
    dot = 0
    l2v = 0
    l2u = 0
    for group_scores in scores.values():
        ui = group_scores.get(u, 0)
        vi = group_scores.get(v, 0)
        l2u += ui * ui
        l2v += vi * vi
        dot = ui * vi
    return backend.safe_div(dot, np.sqrt(l2u * l2v))
示例#8
0
    def _step(self, M, personalization, ranks, *args, **kwargs):
        ranks.np = self._formula(M, personalization, ranks, *args, **kwargs)
        if isinstance(ranks.np, GraphSignal):
            ranks.np = ranks.np.np

        if isinstance(self.use_quotient, Postprocessor):
            ranks.np = self.use_quotient(ranks)
        elif self.use_quotient:
            ranks.np = backend.safe_div(ranks, backend.sum(ranks))
        if self.converge_to_eigenvectors:
            personalization.np = ranks.np
示例#9
0
    def _start(self, M, personalization, ranks, sensitive, *args, **kwargs):
        sensitive = to_signal(ranks, sensitive)
        outR = self.outR  # backend.conv(sensitive.np, M)
        outB = self.outB  # backend.conv(1.-sensitive.np, M)
        phi = backend.sum(sensitive.np) / backend.length(
            sensitive.np) * self.target_prule
        dR = backend.repeat(0., len(sensitive.graph))
        dB = backend.repeat(0., len(sensitive.graph))

        case1 = outR < phi * (outR + outB)
        case2 = (1 - case1) * (outR != 0)
        case3 = (1 - case1) * (1 - case2)
        dR[case1] = phi - (1 - phi) / outB[case1] * outR[case1]
        dR[case3] = phi
        dB[case2] = (1 - phi) - phi / outR[case2] * outB[case2]
        dB[case3] = 1 - phi

        personalization.np = backend.safe_div(sensitive.np * personalization.np, backend.sum(sensitive.np)) * self.target_prule \
                             + backend.safe_div(personalization.np * (1 - sensitive.np), backend.sum(1 - sensitive.np))
        personalization.np = backend.safe_div(personalization.np,
                                              backend.sum(personalization.np))
        L = sensitive.np
        if self.redistributor is None or self.redistributor == "uniform":
            original_ranks = 1
        elif self.redistributor == "original":
            original_ranks = PageRank(
                alpha=self.alpha,
                preprocessor=default_preprocessor(assume_immutability=False,
                                                  normalization="col"),
                convergence=self.convergence)(personalization).np
        else:
            original_ranks = self.redistributor(personalization).np

        self.dR = dR
        self.dB = dB
        self.xR = backend.safe_div(original_ranks * L,
                                   backend.sum(original_ranks * L))
        self.xB = backend.safe_div(original_ranks * (1 - L),
                                   backend.sum(original_ranks * (1 - L)))
        super()._start(M, personalization, ranks, *args, **kwargs)
示例#10
0
def eigdegree(M):
    """
    Calculates the entropy-preserving eigenvalue degree to be used in matrix normalization.
    Args:
        M: the adjacency matrix
    """
    v = backend.repeat(1., M.shape[0])
    from pygrank.algorithms import ConvergenceManager
    convergence = ConvergenceManager(tol=backend.epsilon(), max_iters=1000)
    convergence.start()
    eig = 0
    v = v / backend.dot(v, v)
    while not convergence.has_converged(eig):
        v = backend.conv(v, M)
        v = backend.safe_div(v, backend.dot(v, v)**0.5)
        eig = backend.dot(backend.conv(v, M), v)
    return eig / (v * v)
示例#11
0
 def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:
     graph = self.get_graph(scores)
     if len(graph) == 0:
         return float('inf')
     adjacency, scores = self.to_numpy(scores)
     if backend.max(scores) > self.max_rank:
         if self.autofix:
             scores = scores * (self.max_rank / backend.max(scores))
         else:
             raise Exception("Normalize scores to be <= " + str(self.max_rank) + " for non-negative conductance")
     neighbors = backend.conv(scores, adjacency)
     internal_edges = backend.dot(neighbors, scores)
     external_edges = backend.dot(neighbors, self.max_rank-scores)
     if not graph.is_directed():
         external_edges += backend.dot(scores, backend.conv(self.max_rank-scores, adjacency))
         internal_edges *= 2
     if external_edges == 0:
         return float('inf')
     return backend.safe_div(external_edges, internal_edges, default=float('inf'))
示例#12
0
 def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:
     known_scores, scores = self.to_numpy(scores)
     divide = backend.dot(known_scores, known_scores) * backend.dot(
         scores, scores)
     return backend.safe_div(backend.dot(known_scores, scores), divide**0.5)