Ejemplo n.º 1
0
 def _retrieve_power(self, ranks_power, M):
     if self.__active_dict is not None:
         if self.convergence.iteration not in self.__active_dict:
             self.__active_dict[self.convergence.iteration] = backend.conv(
                 ranks_power,
                 M) if self.krylov_dims is None else ranks_power @ M
         return self.__active_dict[self.convergence.iteration]
     return backend.conv(ranks_power,
                         M) if self.krylov_dims is None else ranks_power @ M
Ejemplo n.º 2
0
 def _step(self, M, personalization, ranks, *args, **kwargs):
     if self.convergence.iteration > len(self.params):
         return 0
     param = self.params[self.convergence.iteration - 1]
     if param == 0:
         return ranks
     if param == 1:
         ranks.np = backend.conv(ranks, M).np
         return ranks
     ranks.np = (backend.conv(ranks, M) * param + ranks * (1 - param)).np
Ejemplo n.º 3
0
def arnoldi_iteration(A: BackendGraph, b: BackendPrimitive, n: int):
    """Computes a basis of the (n + 1)-Krylov subspace of A: the space
    spanned by {b, Ab, ..., A^n b}.

    Source: https://en.wikipedia.org/wiki/Arnoldi_iteration

    Arguments:
      A: m × m array
      b: initial vector (length m)
      n: dimension of Krylov subspace, must be >= 1

    Returns:
      Q: m x (n + 1) array, the columns are an orthonormal basis of the
        Krylov subspace.
      h: (n + 1) x n array, A on basis Q. It is upper Hessenberg.
    """
    h = [[0 for _ in range(n)] for _ in range(n + 1)]
    Q = [backend.self_normalize(b)]
    for k in range(1, n):
        v = backend.conv(Q[k - 1], A)
        for j in range(k):
            h[j][k - 1] = backend.dot(Q[j], v)
            v = v - h[j][k - 1] * Q[j]
        h[k][k - 1] = backend.dot(v, v)**0.5
        Q.append(v / h[k][k - 1] if h[k][k - 1] != 0 else v * 0)
    return backend.combine_cols(Q), h
Ejemplo n.º 4
0
def krylov_base(M, personalization, krylov_space_degree):
    warnings.warn(
        "Krylov approximation is not stable yet (results may differ in future versions)"
    )
    # TODO: throw exception for non-symmetric matrix
    personalization = backend.to_primitive(personalization)
    base = [
        personalization / backend.dot(personalization, personalization)**0.5
    ]
    base_norms = []
    alphas = []
    for j in range(0, krylov_space_degree):
        v = base[j]
        w = backend.conv(v, M)
        a = backend.dot(v, w)
        alphas.append(a)
        next_w = w - a * v
        if j > 0:
            next_w -= base[j - 1] * base_norms[j - 1]
        next_w_norm = (backend.sum(next_w**2))**0.5
        base_norms.append(next_w_norm)
        if j != krylov_space_degree - 1:
            base.append(next_w / next_w_norm)
    H = diags([alphas, base_norms[1:], base_norms[1:]], [0, -1, 1])
    V = backend.combine_cols(base)  #V = np.column_stack(base)
    return V, H
Ejemplo n.º 5
0
def eigdegree(M):
    """
    Calculates the entropy-preserving eigenvalue degree to be used in matrix normalization.
    Args:
        M: the adjacency matrix
    """
    v = backend.repeat(1., M.shape[0])
    from pygrank.algorithms import ConvergenceManager
    convergence = ConvergenceManager(tol=backend.epsilon(), max_iters=1000)
    convergence.start()
    eig = 0
    v = v / backend.dot(v, v)
    while not convergence.has_converged(eig):
        v = backend.conv(v, M)
        v = backend.safe_div(v, backend.dot(v, v)**0.5)
        eig = backend.dot(backend.conv(v, M), v)
    return eig / (v * v)
Ejemplo n.º 6
0
 def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:
     if len(self.get_graph(scores)) == 0:
         return 0
     adjacency, scores = self.to_numpy(scores)
     neighbors = backend.conv(scores, adjacency)
     internal_edges = backend.dot(neighbors, scores)
     expected_edges = backend.sum(scores) ** 2 - backend.sum(scores ** 2) # without self-loops
     return backend.safe_div(internal_edges, expected_edges)
Ejemplo n.º 7
0
 def normalization(self, M):
     import scipy.sparse
     sensitive = self.sensitive
     phi = self.phi
     outR = backend.conv(sensitive.np, M)
     outB = backend.conv(1. - sensitive.np, M)
     case1 = outR < phi * (outR + outB)
     case2 = (1 - case1) * (outR != 0)
     case3 = (1 - case1) * (1 - case2)
     d = backend.repeat(0, backend.length(outR))
     d[case1] = (1 - phi) / outB[case1]
     d[case2] = phi / outR[case2]
     d[case3] = 1
     Q = scipy.sparse.spdiags(d, 0, *M.shape)
     M = M + Q * M
     self.outR = outR
     self.outB = outB
     return M
Ejemplo n.º 8
0
 def evaluate(self, scores: GraphSignalData) -> BackendPrimitive:
     graph = self.get_graph(scores)
     if len(graph) == 0:
         return float('inf')
     adjacency, scores = self.to_numpy(scores)
     if backend.max(scores) > self.max_rank:
         if self.autofix:
             scores = scores * (self.max_rank / backend.max(scores))
         else:
             raise Exception("Normalize scores to be <= " + str(self.max_rank) + " for non-negative conductance")
     neighbors = backend.conv(scores, adjacency)
     internal_edges = backend.dot(neighbors, scores)
     external_edges = backend.dot(neighbors, self.max_rank-scores)
     if not graph.is_directed():
         external_edges += backend.dot(scores, backend.conv(self.max_rank-scores, adjacency))
         internal_edges *= 2
     if external_edges == 0:
         return float('inf')
     return backend.safe_div(external_edges, internal_edges, default=float('inf'))
Ejemplo n.º 9
0
def krylov_error_bound(V,
                       H,
                       M,
                       personalization,
                       measure=measures.Mabs,
                       max_powers=1):
    personalization = personalization / backend.dot(personalization,
                                                    personalization)**0.5
    krylov_dims = V.shape[1]
    krylov_result = backend.eye(int(krylov_dims))
    errors = list()
    for power in range(max_powers + 1):
        errors.append(
            measure(personalization)(krylov2original(V, krylov_result,
                                                     int(krylov_dims))))
        if power < max_powers:
            krylov_result = krylov_result @ H
            personalization = backend.conv(personalization, M)
    return max(errors)
Ejemplo n.º 10
0
 def _formula(self, M, personalization, ranks, *args, **kwargs):
     ret = backend.conv(ranks*self.sqrt_degrees_left, M) * self.sqrt_degrees \
            + personalization * self.personalization_skew
     return ret
Ejemplo n.º 11
0
 def _formula(self, M, personalization, ranks, *args, **kwargs):
     # TODO: return self.alpha * (ranks * M + backend.sum(ranks[self.is_dangling]) * personalization) + (1 - self.alpha) * personalization
     return backend.conv(
         ranks, M) * self.alpha + personalization * (1 - self.alpha)
Ejemplo n.º 12
0
 def _formula(self, M, personalization, ranks, sensitive, *args, **kwargs):
     deltaR = backend.sum(ranks * self.dR)
     deltaB = backend.sum(ranks * self.dB)
     return (backend.conv(ranks, M) + deltaR * self.xR + deltaB *
             self.xB) * self.alpha + personalization * (1 - self.alpha)
Ejemplo n.º 13
0
 def _formula(self, M, personalization, ranks, *args, **kwargs):
     a = self.alpha * self.t / self.convergence.iteration
     return personalization + a * (backend.conv(ranks, M) *
                                   personalization) - ranks
Ejemplo n.º 14
0
 def _formula(self, M, personalization, ranks, *args, **kwargs):
     ret = (backend.conv(ranks, M) * self.degrees + personalization *
            self.absorption) / (self.absorption + self.degrees)
     return ret
Ejemplo n.º 15
0
    def _tune(self, graph=None, personalization=None, *args, **kwargs):
        #graph_dropout = kwargs.get("graph_dropout", 0)
        #kwargs["graph_dropout"] = 0
        previous_backend = backend.backend_name()
        personalization = to_signal(graph, personalization)
        graph = personalization.graph
        if self.tuning_backend is not None and self.tuning_backend != previous_backend:
            backend.load_backend(self.tuning_backend)
        backend_personalization = to_signal(
            personalization, backend.to_array(personalization.np))
        #training, validation = split(backend_personalization, 0.8)
        #training2, validation2 = split(backend_personalization, 0.6)
        #measure_weights = [1, 1, 1, 1, 1]
        #propagated = [training.np, validation.np, backend_personalization.np, training2.np, validation2.np]

        measure_values = [None] * (self.num_parameters + self.autoregression)
        M = self.ranker_generator(measure_values).preprocessor(graph)

        #for _ in range(10):
        #    backend_personalization.np = backend.conv(backend_personalization.np, M)
        training, validation = split(backend_personalization, 0.8)
        training1, training2 = split(training, 0.5)

        propagated = [training1.np, training2.np]
        measures = [
            self.measure(backend_personalization, training1),
            self.measure(backend_personalization, training2)
        ]
        #measures = [self.measure(validation, training), self.measure(training, validation)]

        if self.basis == "krylov":
            for i in range(len(measure_values)):
                measure_values[i] = [
                    measure(p) for p, measure in zip(propagated, measures)
                ]
                propagated = [backend.conv(p, M) for p in propagated]
        else:
            basis = [
                arnoldi_iteration(M, p, len(measure_values))[0]
                for p in propagated
            ]
            for i in range(len(measure_values)):
                measure_values[i] = [
                    float(measure(base[:, i]))
                    for base, measure in zip(basis, measures)
                ]
        measure_values = backend.to_primitive(measure_values)
        mean_value = backend.mean(measure_values, axis=0)
        measure_values = measure_values - mean_value
        best_parameters = measure_values
        measure_weights = [1] * measure_values.shape[1]
        if self.autoregression != 0:
            #vals2 = -measure_values-mean_value
            #measure_values = np.concatenate([measure_values, vals2-np.mean(vals2, axis=0)], axis=1)
            window = backend.repeat(1. / self.autoregression,
                                    self.autoregression)
            beta1 = 0.9
            beta2 = 0.999
            beta1t = 1
            beta2t = 1
            rms = window * 0
            momentum = window * 0
            error = float('inf')
            while True:
                beta1t *= beta1
                beta2t *= beta2
                prev_error = error
                parameters = backend.copy(measure_values)
                for i in range(len(measure_values) - len(window) - 2, -1, -1):
                    parameters[i, :] = backend.dot(
                        (window),
                        measure_values[(i + 1):(i + len(window) + 1), :])
                errors = (parameters - measure_values
                          ) * measure_weights / backend.sum(measure_weights)
                for j in range(len(window)):
                    gradient = 0
                    for i in range(len(measure_values) - len(window) - 1):
                        gradient += backend.dot(measure_values[i + j + 1, :],
                                                errors[i, :])
                    momentum[j] = beta1 * momentum[j] + (
                        1 - beta1) * gradient  #*np.sign(window[j])
                    rms[j] = beta2 * rms[j] + (1 - beta2) * gradient * gradient
                    window[j] -= 0.01 * momentum[j] / (1 - beta1t) / (
                        (rms[j] / (1 - beta2t))**0.5 + 1.E-8)
                    #window[j] -= 0.01*gradient*np.sign(window[j])
                error = backend.mean(backend.abs(errors))
                if error == 0 or abs(error - prev_error) / error < 1.E-6:
                    best_parameters = parameters
                    break
        best_parameters = backend.mean(best_parameters[:self.num_parameters, :]
                                       * backend.to_primitive(measure_weights),
                                       axis=1) + backend.mean(mean_value)

        if self.tunable_offset is not None:
            div = backend.max(best_parameters)
            if div != 0:
                best_parameters /= div
            measure = self.tunable_offset(validation, training)
            base = basis[0] if self.basis != "krylov" else None
            best_offset = optimize(
                lambda params: -measure.best_direction() * measure(
                    self._run(training, [(best_parameters[i] + params[
                        2]) * params[0]**i + params[1] for i in range(
                            len(best_parameters))], base, *args, **kwargs)),
                #lambda params: - measure.evaluate(self._run(training, best_parameters + params[0], *args, **kwargs)),
                max_vals=[1, 0, 0],
                min_vals=[0, 0, 0],
                deviation_tol=0.005,
                parameter_tol=1,
                partitions=5,
                divide_range=2)
            #best_parameters += best_offset[0]
            best_parameters = [
                (best_parameters[i] + best_offset[2]) * best_offset[0]**i +
                best_offset[1] for i in range(len(best_parameters))
            ]

        best_parameters = backend.to_primitive(best_parameters)
        if backend.sum(backend.abs(best_parameters)) != 0:
            best_parameters /= backend.mean(backend.abs(best_parameters))
        if self.tuning_backend is not None and self.tuning_backend != previous_backend:
            best_parameters = [
                float(param) for param in best_parameters
            ]  # convert parameters to backend-independent list
            backend.load_backend(previous_backend)
        #kwargs["graph_dropout"] = graph_dropout
        if self.basis != "krylov":
            return Tautology(), self._run(
                personalization, best_parameters, *args,
                **kwargs)  # TODO: make this unecessary
        return self.ranker_generator(best_parameters), personalization