def cycle(length, directed=False): """ Produces a cycle of length `length`. Parameters ---------- length: int Number of vertices in the cycle directed: bool, False Is the cycle directed? Returns ------- a Graph object """ graph = Graph() edges = [] for i in range(length - 1): edges.append((i, i+1)) if not directed: edges.append((i+1, i)) edges.append((length - 1, 0)) if not directed: edges.append((0, length - 1)) graph.add_edges(edges) return graph
def approx_stationary(edges, logspace=False, iterations=None, lim=1e-8, initial_state=None): """ Approximate stationary distributions computed by by sparse matrix multiplications. Produces correct results and uses little memory but is likely not the most CPU efficient implementation in general (e.g. an eigenvector calculator may be better). Essentially raises the transition probabilities matrix to a large power. Parameters ----------- edges: list of tuples Transition probabilities of the form [(source, target, transition_probability logspace: bool False Carry out the calculation in logspace iterations: int, None Maximum number of iterations lim: float, 1e-13 Approximate algorithm breaks when successive iterations have a kl_divergence less than lim initial_state: None A distribution over the states of the process. If None, the uniform distribution is used. """ g = Graph() g.add_edges(edges) cache = Cache(g) gen = stationary_generator(cache, logspace=logspace, initial_state=initial_state) previous_ranks = None for i, ranks in enumerate(gen): if i > 200: if i % 10: s = kl_divergence(ranks, previous_ranks) if s < lim: break if iterations: if i == iterations: break previous_ranks = ranks # Reverse the enumeration d = dict() for m, r in enumerate(ranks): state = cache.inv_enum[m] d[(state)] = r return d
def approx_stationary(edges, logspace=False, iterations=None, lim=1e-8, initial_state=None): """ Approximate stationary distributions computed by by sparse matrix multiplications. Produces correct results and uses little memory but is likely not the most CPU efficient implementation in general (e.g. an eigenvector calculator may be better). Essentially raises the transition probabilities matrix to a large power. Parameters ----------- edges: list of tuples Transition probabilities of the form [(source, target, transition_probability logspace: bool False Carry out the calculation in logspace iterations: int, None Maximum number of iterations lim: float, 1e-13 Approximate algorithm breaks when successive iterations have a kl_divergence less than lim """ g = Graph() g.add_edges(edges) cache = Cache(g) gen = stationary_generator(cache, logspace=logspace, initial_state=initial_state) previous_ranks = None for i, ranks in enumerate(gen): if i > 200: if i % 10: s = kl_divergence(ranks, previous_ranks) if s < lim: break if iterations: if i == iterations: break previous_ranks = ranks # Reverse the enumeration d = dict() for m, r in enumerate(ranks): state = cache.inv_enum[m] d[(state)] = r return d
#m = [[1,2], [2,1]] #m = [[2,1],[1,2]] #m = [[2,2],[2,1]] m = [[2, 2], [1, 1]] print(N, m, mu) graph = cycle(N) fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) edge_dict = multivariate_graph_transitions(N, graph, incentive, num_types=2, mu=mu) edges = [(v1, v2, t) for ((v1, v2), t) in edge_dict.items()] g = Graph(edges) print("There are %s configurations and %s transitions" % (len(set([x[0] for x in edge_dict.keys()])), len(edge_dict))) print("Local Maxima:", len(find_extrema_yen(g, extrema="max"))) print("Local Minima:", len(find_extrema_yen(g, extrema="min"))) print("Total States:", 2**N) exit() print("Computing stationary") s = stationary_distribution(edges, lim=1e-8, iterations=1000) print("Local Maxima:", len(find_extrema_stationary(s, g, extrema="max"))) print("Local Minima:", len(find_extrema_stationary(s, g, extrema="min"))) # Print stationary distribution top 20