def arnoldi_iteration(A: BackendGraph, b: BackendPrimitive, n: int): """Computes a basis of the (n + 1)-Krylov subspace of A: the space spanned by {b, Ab, ..., A^n b}. Source: https://en.wikipedia.org/wiki/Arnoldi_iteration Arguments: A: m × m array b: initial vector (length m) n: dimension of Krylov subspace, must be >= 1 Returns: Q: m x (n + 1) array, the columns are an orthonormal basis of the Krylov subspace. h: (n + 1) x n array, A on basis Q. It is upper Hessenberg. """ h = [[0 for _ in range(n)] for _ in range(n + 1)] Q = [backend.self_normalize(b)] for k in range(1, n): v = backend.conv(Q[k - 1], A) for j in range(k): h[j][k - 1] = backend.dot(Q[j], v) v = v - h[j][k - 1] * Q[j] h[k][k - 1] = backend.dot(v, v)**0.5 Q.append(v / h[k][k - 1] if h[k][k - 1] != 0 else v * 0) return backend.combine_cols(Q), h
def krylov_base(M, personalization, krylov_space_degree): warnings.warn( "Krylov approximation is not stable yet (results may differ in future versions)" ) # TODO: throw exception for non-symmetric matrix personalization = backend.to_primitive(personalization) base = [ personalization / backend.dot(personalization, personalization)**0.5 ] base_norms = [] alphas = [] for j in range(0, krylov_space_degree): v = base[j] w = backend.conv(v, M) a = backend.dot(v, w) alphas.append(a) next_w = w - a * v if j > 0: next_w -= base[j - 1] * base_norms[j - 1] next_w_norm = (backend.sum(next_w**2))**0.5 base_norms.append(next_w_norm) if j != krylov_space_degree - 1: base.append(next_w / next_w_norm) H = diags([alphas, base_norms[1:], base_norms[1:]], [0, -1, 1]) V = backend.combine_cols(base) #V = np.column_stack(base) return V, H
def evaluate(self, scores: GraphSignalData) -> BackendPrimitive: known_scores, scores = self.to_numpy(scores) #thresh = backend.min(scores[known_scores!=0]) #scores = 1/(1+np.exp(-scores/thresh+1)) eps = backend.epsilon() ret = -backend.dot(known_scores, backend.log(scores + eps)) - backend.dot( 1 - known_scores, backend.log(1 - scores + eps)) return ret
def evaluate(self, scores: GraphSignalData) -> BackendPrimitive: sensitive, scores = self.to_numpy(scores) p1 = backend.dot(scores, sensitive) p2 = backend.dot(scores, 1 - sensitive) if p1 == 0 or p2 == 0: return 0 s = backend.sum(sensitive) p1 = backend.safe_div(p1, s) p2 = backend.safe_div(p2, backend.length(sensitive) - s) if p1 <= p2: # this implementation is derivable return p1 / p2 return p2 / p1
def evaluate(self, scores: GraphSignalData) -> BackendPrimitive: sensitive, scores = self.to_numpy(scores) p1 = backend.dot(scores, sensitive) p2 = backend.dot(scores, 1 - sensitive) s = backend.sum(sensitive) n = backend.length(sensitive) p1 = backend.safe_div(p1, s) p2 = backend.safe_div(p2, n - s) #if p1 <= p2*self.target_pRule: # p2 *= self.target_pRule #elif p2 <= p1*self.target_pRule: # p1 *= self.target_pRule #else: # return 0 return (p1 - p2)**2 * n
def eigdegree(M): """ Calculates the entropy-preserving eigenvalue degree to be used in matrix normalization. Args: M: the adjacency matrix """ v = backend.repeat(1., M.shape[0]) from pygrank.algorithms import ConvergenceManager convergence = ConvergenceManager(tol=backend.epsilon(), max_iters=1000) convergence.start() eig = 0 v = v / backend.dot(v, v) while not convergence.has_converged(eig): v = backend.conv(v, M) v = backend.safe_div(v, backend.dot(v, v)**0.5) eig = backend.dot(backend.conv(v, M), v) return eig / (v * v)
def evaluate(self, scores: GraphSignalData) -> BackendPrimitive: if len(self.get_graph(scores)) == 0: return 0 adjacency, scores = self.to_numpy(scores) neighbors = backend.conv(scores, adjacency) internal_edges = backend.dot(neighbors, scores) expected_edges = backend.sum(scores) ** 2 - backend.sum(scores ** 2) # without self-loops return backend.safe_div(internal_edges, expected_edges)
def evaluate(self, scores: GraphSignalData) -> BackendPrimitive: graph = self.get_graph(scores) if len(graph) == 0: return float('inf') adjacency, scores = self.to_numpy(scores) if backend.max(scores) > self.max_rank: if self.autofix: scores = scores * (self.max_rank / backend.max(scores)) else: raise Exception("Normalize scores to be <= " + str(self.max_rank) + " for non-negative conductance") neighbors = backend.conv(scores, adjacency) internal_edges = backend.dot(neighbors, scores) external_edges = backend.dot(neighbors, self.max_rank-scores) if not graph.is_directed(): external_edges += backend.dot(scores, backend.conv(self.max_rank-scores, adjacency)) internal_edges *= 2 if external_edges == 0: return float('inf') return backend.safe_div(external_edges, internal_edges, default=float('inf'))
def krylov_error_bound(V, H, M, personalization, measure=measures.Mabs, max_powers=1): personalization = personalization / backend.dot(personalization, personalization)**0.5 krylov_dims = V.shape[1] krylov_result = backend.eye(int(krylov_dims)) errors = list() for power in range(max_powers + 1): errors.append( measure(personalization)(krylov2original(V, krylov_result, int(krylov_dims)))) if power < max_powers: krylov_result = krylov_result @ H personalization = backend.conv(personalization, M) return max(errors)
def evaluate(self, scores: GraphSignalData) -> BackendPrimitive: known_scores, scores = self.to_numpy(scores) return backend.dot(known_scores, scores)
def evaluate(self, scores: GraphSignalData) -> BackendPrimitive: known_scores, scores = self.to_numpy(scores) divide = backend.dot(known_scores, known_scores) * backend.dot( scores, scores) return backend.safe_div(backend.dot(known_scores, scores), divide**0.5)
def _tune(self, graph=None, personalization=None, *args, **kwargs): #graph_dropout = kwargs.get("graph_dropout", 0) #kwargs["graph_dropout"] = 0 previous_backend = backend.backend_name() personalization = to_signal(graph, personalization) graph = personalization.graph if self.tuning_backend is not None and self.tuning_backend != previous_backend: backend.load_backend(self.tuning_backend) backend_personalization = to_signal( personalization, backend.to_array(personalization.np)) #training, validation = split(backend_personalization, 0.8) #training2, validation2 = split(backend_personalization, 0.6) #measure_weights = [1, 1, 1, 1, 1] #propagated = [training.np, validation.np, backend_personalization.np, training2.np, validation2.np] measure_values = [None] * (self.num_parameters + self.autoregression) M = self.ranker_generator(measure_values).preprocessor(graph) #for _ in range(10): # backend_personalization.np = backend.conv(backend_personalization.np, M) training, validation = split(backend_personalization, 0.8) training1, training2 = split(training, 0.5) propagated = [training1.np, training2.np] measures = [ self.measure(backend_personalization, training1), self.measure(backend_personalization, training2) ] #measures = [self.measure(validation, training), self.measure(training, validation)] if self.basis == "krylov": for i in range(len(measure_values)): measure_values[i] = [ measure(p) for p, measure in zip(propagated, measures) ] propagated = [backend.conv(p, M) for p in propagated] else: basis = [ arnoldi_iteration(M, p, len(measure_values))[0] for p in propagated ] for i in range(len(measure_values)): measure_values[i] = [ float(measure(base[:, i])) for base, measure in zip(basis, measures) ] measure_values = backend.to_primitive(measure_values) mean_value = backend.mean(measure_values, axis=0) measure_values = measure_values - mean_value best_parameters = measure_values measure_weights = [1] * measure_values.shape[1] if self.autoregression != 0: #vals2 = -measure_values-mean_value #measure_values = np.concatenate([measure_values, vals2-np.mean(vals2, axis=0)], axis=1) window = backend.repeat(1. / self.autoregression, self.autoregression) beta1 = 0.9 beta2 = 0.999 beta1t = 1 beta2t = 1 rms = window * 0 momentum = window * 0 error = float('inf') while True: beta1t *= beta1 beta2t *= beta2 prev_error = error parameters = backend.copy(measure_values) for i in range(len(measure_values) - len(window) - 2, -1, -1): parameters[i, :] = backend.dot( (window), measure_values[(i + 1):(i + len(window) + 1), :]) errors = (parameters - measure_values ) * measure_weights / backend.sum(measure_weights) for j in range(len(window)): gradient = 0 for i in range(len(measure_values) - len(window) - 1): gradient += backend.dot(measure_values[i + j + 1, :], errors[i, :]) momentum[j] = beta1 * momentum[j] + ( 1 - beta1) * gradient #*np.sign(window[j]) rms[j] = beta2 * rms[j] + (1 - beta2) * gradient * gradient window[j] -= 0.01 * momentum[j] / (1 - beta1t) / ( (rms[j] / (1 - beta2t))**0.5 + 1.E-8) #window[j] -= 0.01*gradient*np.sign(window[j]) error = backend.mean(backend.abs(errors)) if error == 0 or abs(error - prev_error) / error < 1.E-6: best_parameters = parameters break best_parameters = backend.mean(best_parameters[:self.num_parameters, :] * backend.to_primitive(measure_weights), axis=1) + backend.mean(mean_value) if self.tunable_offset is not None: div = backend.max(best_parameters) if div != 0: best_parameters /= div measure = self.tunable_offset(validation, training) base = basis[0] if self.basis != "krylov" else None best_offset = optimize( lambda params: -measure.best_direction() * measure( self._run(training, [(best_parameters[i] + params[ 2]) * params[0]**i + params[1] for i in range( len(best_parameters))], base, *args, **kwargs)), #lambda params: - measure.evaluate(self._run(training, best_parameters + params[0], *args, **kwargs)), max_vals=[1, 0, 0], min_vals=[0, 0, 0], deviation_tol=0.005, parameter_tol=1, partitions=5, divide_range=2) #best_parameters += best_offset[0] best_parameters = [ (best_parameters[i] + best_offset[2]) * best_offset[0]**i + best_offset[1] for i in range(len(best_parameters)) ] best_parameters = backend.to_primitive(best_parameters) if backend.sum(backend.abs(best_parameters)) != 0: best_parameters /= backend.mean(backend.abs(best_parameters)) if self.tuning_backend is not None and self.tuning_backend != previous_backend: best_parameters = [ float(param) for param in best_parameters ] # convert parameters to backend-independent list backend.load_backend(previous_backend) #kwargs["graph_dropout"] = graph_dropout if self.basis != "krylov": return Tautology(), self._run( personalization, best_parameters, *args, **kwargs) # TODO: make this unecessary return self.ranker_generator(best_parameters), personalization