def test_incentive_process_k(lim=1e-14): """ Compare stationary distribution computations to known analytic form for neutral landscape for the Moran process. """ for k in [1, 2, 10,]: for n, N in [(2, 20), (2, 50), (3, 10), (3, 20)]: mu = (n-1.)/n * 1./(N+1) m = numpy.ones((n, n)) # neutral landscape fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) # Neutral landscape is the default edges = incentive_process.k_fold_incentive_transitions( N, incentive, num_types=n, mu=mu, k=k) stationary_1 = stationary_distribution(edges, lim=lim) # Check that the stationary distribution satisfies balance # conditions check_detailed_balance(edges, stationary_1) check_global_balance(edges, stationary_1) check_eigenvalue(edges, stationary_1) # Also check edge_func calculation edges = incentive_process.multivariate_transitions( N, incentive, num_types=n, mu=mu) states = states_from_edges(edges) edge_func = power_transitions(edges, k) stationary_2 = stationary_distribution( edge_func, states=states, lim=lim) for key in stationary_1.keys(): assert_almost_equal( stationary_1[key], stationary_2[key], places=5)
def test_extrema_moran_5(lim=1e-16): """ Test for extrema of the stationary distribution. """ n = 3 N = 60 mu = (3. / 2) * 1. / N m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]] maxes = set([(20, 20, 20), (0, 0, 60), (0, 60, 0), (60, 0, 0), (30, 0, 30), (0, 30, 30), (30, 30, 0)]) fitness_landscape = linear_fitness_landscape(m) incentive = fermi(fitness_landscape, beta=0.1) edges = incentive_process.multivariate_transitions(N, incentive, num_types=n, mu=mu) s = stationary_distribution(edges, lim=lim) s2 = expected_divergence(edges, q_d=0) flow = inflow_outflow(edges) # These sets should all correspond assert_equal(find_local_maxima(s), set(maxes)) assert_equal(find_local_minima(s2), set(maxes)) assert_equal(find_local_minima(flow), set(maxes)) # The minima are pathological assert_equal(find_local_minima(s), set([(3, 3, 54), (3, 54, 3), (54, 3, 3)])) assert_equal(find_local_maxima(s2), set([(4, 52, 4), (4, 4, 52), (52, 4, 4)])) assert_equal(find_local_maxima(flow), set([(1, 58, 1), (1, 1, 58), (58, 1, 1)]))
def test_extrema_moran_3(lim=1e-12): """ Test for extrema of the stationary distribution. """ n = 2 N = 100 mu = 6. / 25 m = [[1, 0], [0, 1]] maxes = set([(38, 62), (62, 38)]) mins = set([(50, 50), (100, 0), (0, 100)]) fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) edges = incentive_process.multivariate_transitions(N, incentive, num_types=n, mu=mu) s = stationary_distribution(edges, lim=lim) flow = inflow_outflow(edges) for q_d in [0, 1]: s2 = incentive_process.kl(edges, q_d=1) assert_equal(find_local_maxima(s), set(maxes)) assert_equal(find_local_minima(s), set(mins)) assert_equal(find_local_minima(s2), set([(50, 50), (40, 60), (60, 40)])) assert_equal(find_local_maxima(flow), set(mins))
def test_incentive_process(lim=1e-14): """ Compare stationary distribution computations to known analytic form for neutral landscape for the Moran process. """ for n, N in [(2, 10), (2, 40), (3, 10), (3, 20), (4, 10)]: mu = (n - 1.) / n * 1./ (N + 1) alpha = N * mu / (n - 1. - n * mu) # Neutral landscape is the default edges = incentive_process.compute_edges(N, num_types=n, incentive_func=replicator, mu=mu) for logspace in [False, True]: stationary_1 = incentive_process.neutral_stationary( N, alpha, n, logspace=logspace) for exact in [False, True]: stationary_2 = stationary_distribution( edges, lim=lim, logspace=logspace, exact=exact) for key in stationary_1.keys(): assert_almost_equal( stationary_1[key], stationary_2[key], places=4) # Check that the stationary distribution satisfies balance conditions check_detailed_balance(edges, stationary_1) check_global_balance(edges, stationary_1) check_eigenvalue(edges, stationary_1) # Test Entropy Rate bounds er = entropy_rate(edges, stationary_1) h = (2. * n - 1) / n * numpy.log(n) assert_less_equal(er, h) assert_greater_equal(er, 0)
def two_dim_transitions_figure(N, m, mu=0.01, incentive_func=replicator): """ Plot transition entropies and stationary distributions. """ n = len(m[0]) fitness_landscape = linear_fitness_landscape(m) incentive = incentive_func(fitness_landscape) if not mu: mu = 1./ N edges = incentive_process.multivariate_transitions(N, incentive, num_types=n, mu=mu) s = stationary_distribution(edges, exact=True) d = edges_to_edge_dict(edges) # Set up plots gs = gridspec.GridSpec(3, 1) ax1 = pyplot.subplot(gs[0, 0]) ax1.set_title("Transition Probabilities") ups, downs, _ = two_dim_transitions(edges) xs = range(0, N+1) ax1.plot(xs, ups) ax1.plot(xs, downs) ax2 = pyplot.subplot(gs[1, 0]) ax2.set_title("Relative Entropy") divs1 = expected_divergence(edges) divs2 = expected_divergence(edges, q_d=0) plot_dictionary(divs1, ax=ax2) plot_dictionary(divs2, ax=ax2) ax3 = pyplot.subplot(gs[2, 0]) ax3.set_title("Stationary Distribution") plot_dictionary(s, ax=ax3) ax3.set_xlabel("Number of A individuals (i)")
def two_dim_wright_fisher_figure(N, m, mu=0.01, incentive_func=replicator): """ Plot relative entropies and stationary distribution for the Wright-Fisher process. """ n = len(m[0]) fitness_landscape = linear_fitness_landscape(m) incentive = incentive_func(fitness_landscape) if not mu: mu = 1./ N edge_func = wright_fisher.multivariate_transitions(N, incentive, mu=mu, num_types=n) states = list(simplex_generator(N, d=n-1)) s = stationary_distribution(edge_func, states=states, iterations=4*N) s0 = expected_divergence(edge_func, states=states, q_d=0) s1 = expected_divergence(edge_func, states=states, q_d=1) # Set up plots gs = gridspec.GridSpec(2, 1) ax2 = pyplot.subplot(gs[0, 0]) ax2.set_title("Relative Entropy") plot_dictionary(s0, ax=ax2) plot_dictionary(s1, ax=ax2) ax3 = pyplot.subplot(gs[1, 0]) ax3.set_title("Stationary Distribution") plot_dictionary(s, ax=ax3) ax3.set_xlabel("Number of A individuals (i)")
def bomze_figures(N=60, beta=1, process="incentive", directory=None): """ Makes plots of the stationary distribution and expected divergence for each of the plots in Bomze's classification. """ if not directory: directory = "bomze_paper_figures_%s" % process ensure_directory(directory) for i, m in enumerate(bomze_matrices()): mu = 3./2 * 1./N fitness_landscape = linear_fitness_landscape(m) incentive = fermi(fitness_landscape, beta=beta) edges = incentive_process.multivariate_transitions(N, incentive, num_types=3, mu=mu) d1 = stationary_distribution(edges) filename = os.path.join(directory, "%s_%s_stationary.eps" % (i, N)) figure, tax = ternary.figure(scale = N) tax.heatmap(d1) tax.savefig(filename=filename) pyplot.close(figure) for q_d in [0., 1.]: d2 = expected_divergence(edges, q_d=q_d) filename = os.path.join(directory, "%s_%s_%s.eps" % (i, N, q_d)) figure, tax = ternary.figure(scale = N) tax.heatmap(d2) tax.savefig(filename=filename) pyplot.close(figure)
def test_stationary_2(): """ Test the stationary distribution computations a simple Markov process. """ edges = [ (0, 0, 1. / 3), (0, 1, 1. / 3), (0, 2, 1. / 3), (1, 0, 1. / 4), (1, 1, 1. / 2), (1, 2, 1. / 4), (2, 0, 1. / 6), (2, 1, 1. / 3), (2, 2, 1. / 2), ] exact_stationary = {0: 6. / 25, 1: 10. / 25, 2: 9. / 25} for logspace in [True, False]: s = stationary_distribution(edges, logspace=logspace) # Check that the stationary distribution satisfies balance conditions #check_detailed_balance(edges, s) check_global_balance(edges, s) check_eigenvalue(edges, s) # Check that the approximation converged to the exact distribution for key in s.keys(): assert_almost_equal(exact_stationary[key], s[key])
def test_extrema_moran_5(lim=1e-16): """ Test for extrema of the stationary distribution. """ n = 3 N = 60 mu = (3./2) * 1./N m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]] maxes = set([(20, 20, 20), (0, 0, 60), (0, 60, 0), (60, 0, 0), (30, 0, 30), (0, 30, 30), (30, 30, 0)]) fitness_landscape = linear_fitness_landscape(m) incentive = fermi(fitness_landscape, beta=0.1) edges = incentive_process.multivariate_transitions( N, incentive, num_types=n, mu=mu) s = stationary_distribution(edges, lim=lim) s2 = expected_divergence(edges, q_d=0) flow = inflow_outflow(edges) # These sets should all correspond assert_equal(find_local_maxima(s), set(maxes)) assert_equal(find_local_minima(s2), set(maxes)) assert_equal(find_local_minima(flow), set(maxes)) # The minima are pathological assert_equal(find_local_minima(s), set([(3, 3, 54), (3, 54, 3), (54, 3, 3)])) assert_equal(find_local_maxima(s2), set([(4, 52, 4), (4, 4, 52), (52, 4, 4)])) assert_equal(find_local_maxima(flow), set([(1, 58, 1), (1, 1, 58), (58, 1, 1)]))
def graphical_abstract_figures(N=60, q=1, beta=0.1): """ Three dimensional process examples. """ a = 0 b = 1 m = [[a, b, b], [b, a, b], [b, b, a]] mu = (3. / 2 ) * 1. / N fitness_landscape = linear_fitness_landscape(m) incentive = fermi(fitness_landscape, beta=beta, q=q) edges = incentive_process.multivariate_transitions(N, incentive, num_types=3, mu=mu) d = stationary_distribution(edges, iterations=None) figure, tax = ternary.figure(scale=N) tax.heatmap(d, scale=N) tax.savefig(filename="ga_stationary.eps", dpi=600) d = expected_divergence(edges, q_d=0) figure, tax = ternary.figure(scale=N) tax.heatmap(d, scale=N) tax.savefig(filename="ga_d_0.eps", dpi=600) d = expected_divergence(edges, q_d=1) figure, tax = ternary.figure(scale=N) tax.heatmap(d, scale=N) tax.savefig(filename="ga_d_1.eps", dpi=600)
def two_dim_wright_fisher_figure(N, m, mu=0.01, incentive_func=replicator): """ Plot relative entropies and stationary distribution for the Wright-Fisher process. """ n = len(m[0]) fitness_landscape = linear_fitness_landscape(m) incentive = incentive_func(fitness_landscape) if not mu: mu = 1. / N edge_func = wright_fisher.multivariate_transitions(N, incentive, mu=mu, num_types=n) states = list(simplex_generator(N, d=n - 1)) s = stationary_distribution(edge_func, states=states, iterations=4 * N) s0 = expected_divergence(edge_func, states=states, q_d=0) s1 = expected_divergence(edge_func, states=states, q_d=1) # Set up plots gs = gridspec.GridSpec(2, 1) ax2 = pyplot.subplot(gs[0, 0]) ax2.set_title("Relative Entropy") plot_dictionary(s0, ax=ax2) plot_dictionary(s1, ax=ax2) ax3 = pyplot.subplot(gs[1, 0]) ax3.set_title("Stationary Distribution") plot_dictionary(s, ax=ax3) ax3.set_xlabel("Number of A individuals (i)")
def bomze_figures(N=60, beta=1, process="incentive", directory=None): """ Makes plots of the stationary distribution and expected divergence for each of the plots in Bomze's classification. """ if not directory: directory = "bomze_paper_figures_%s" % process ensure_directory(directory) for i, m in enumerate(bomze_matrices()): mu = 3. / 2 * 1. / N fitness_landscape = linear_fitness_landscape(m) incentive = fermi(fitness_landscape, beta=beta) edges = incentive_process.multivariate_transitions(N, incentive, num_types=3, mu=mu) d1 = stationary_distribution(edges) filename = os.path.join(directory, "%s_%s_stationary.eps" % (i, N)) figure, tax = ternary.figure(scale=N) tax.heatmap(d1) tax.savefig(filename=filename) pyplot.close(figure) for q_d in [0., 1.]: d2 = expected_divergence(edges, q_d=q_d) filename = os.path.join(directory, "%s_%s_%s.eps" % (i, N, q_d)) figure, tax = ternary.figure(scale=N) tax.heatmap(d2) tax.savefig(filename=filename) pyplot.close(figure)
def four_dim_figures(N=30, beta=1., q=1.): """ Four dimensional example. Three dimensional slices are plotted for illustation. """ m = [[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [0, 0, 0, 1]] num_types = len(m[0]) fitness_landscape = linear_fitness_landscape(m) mu = 4. / 3 * 1. / N incentive = fermi(fitness_landscape, beta=beta, q=q) edges = incentive_process.multivariate_transitions(N, incentive, num_types=num_types, mu=mu) d1 = expected_divergence(edges, q_d=0, boundary=True) d2 = stationary_distribution(edges) # We need to slice the 4dim dictionary into three-dim slices for plotting. for slice_index in range(4): for d in [d1, d2]: slice_dict = slice_dictionary(d, N, slice_index=3) figure, tax = ternary.figure(scale=N) tax.heatmap(slice_dict, style="d") pyplot.show()
def graphical_abstract_figures(N=60, q=1, beta=0.1): """ Three dimensional process examples. """ a = 0 b = 1 m = [[a, b, b], [b, a, b], [b, b, a]] mu = (3. / 2) * 1. / N fitness_landscape = linear_fitness_landscape(m) incentive = fermi(fitness_landscape, beta=beta, q=q) edges = incentive_process.multivariate_transitions(N, incentive, num_types=3, mu=mu) d = stationary_distribution(edges, iterations=None) figure, tax = ternary.figure(scale=N) tax.heatmap(d, scale=N) tax.savefig(filename="ga_stationary.eps", dpi=600) d = expected_divergence(edges, q_d=0) figure, tax = ternary.figure(scale=N) tax.heatmap(d, scale=N) tax.savefig(filename="ga_d_0.eps", dpi=600) d = expected_divergence(edges, q_d=1) figure, tax = ternary.figure(scale=N) tax.heatmap(d, scale=N) tax.savefig(filename="ga_d_1.eps", dpi=600)
def test_wright_fisher(N=20, lim=1e-10, n=2): """Test 2 dimensional Wright-Fisher process.""" for n in [2, 3]: mu = (n - 1.) / n * 1. / (N + 1) m = numpy.ones((n, n)) # neutral landscape fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) # Wright-Fisher for low_memory in [True, False]: edge_func = wright_fisher.multivariate_transitions( N, incentive, mu=mu, num_types=n, low_memory=low_memory) states = list(simplex_generator(N, d=n - 1)) for logspace in [False, True]: s = stationary_distribution(edge_func, states=states, iterations=200, lim=lim, logspace=logspace) wf_edges = edge_func_to_edges(edge_func, states) er = entropy_rate(wf_edges, s) assert_greater_equal(er, 0) # Check that the stationary distribution satistifies balance conditions check_detailed_balance(wf_edges, s, places=2) check_global_balance(wf_edges, s, places=4) check_eigenvalue(wf_edges, s, places=2)
def test_wright_fisher(N=20, lim=1e-10, n=2): """Test 2 dimensional Wright-Fisher process.""" for n in [2, 3]: mu = (n - 1.) / n * 1. / (N + 1) m = numpy.ones((n, n)) # neutral landscape fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) # Wright-Fisher for low_memory in [True, False]: edge_func = wright_fisher.multivariate_transitions( N, incentive, mu=mu, num_types=n, low_memory=low_memory) states = list(simplex_generator(N, d=n-1)) for logspace in [False, True]: s = stationary_distribution( edge_func, states=states, iterations=200, lim=lim, logspace=logspace) wf_edges = edge_func_to_edges(edge_func, states) er = entropy_rate(wf_edges, s) assert_greater_equal(er, 0) # Check that the stationary distribution satistifies balance # conditions check_detailed_balance(wf_edges, s, places=2) check_global_balance(wf_edges, s, places=4) check_eigenvalue(wf_edges, s, places=2)
def test_extrema_wf(lim=1e-10): """ For small mu, the Wright-Fisher process is minimal in the center. Test that this happens. """ for n, N, mins in [(2, 40, [(20, 20)]), (3, 30, [(10, 10, 10)])]: mu = 1. / N**3 m = numpy.ones((n, n)) # neutral landscape fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) edge_func = wright_fisher.multivariate_transitions(N, incentive, mu=mu, num_types=n) states = list(simplex_generator(N, d=n - 1)) s = stationary_distribution(edge_func, states=states, iterations=4 * N, lim=lim) s2 = expected_divergence(edge_func, states=states, q_d=0) assert_equal(find_local_minima(s), set(mins)) er = entropy_rate(edge_func, s, states=states) assert_greater_equal(er, 0)
def compute_entropy_rate(N=30, n=2, m=None, incentive_func=None, beta=1., mu=None, exact=False, lim=1e-13, logspace=False): if not m: m = np.ones((n, n)) if not incentive_func: incentive_func = incentives.fermi if not mu: # mu = (n-1.)/n * 1./(N+1) mu = 1. / N fitness_landscape = incentives.linear_fitness_landscape(m) incentive = incentive_func(fitness_landscape, beta=beta, q=1) edges = incentive_process.multivariate_transitions(N, incentive, num_types=n, mu=mu) s = stationary.stationary_distribution(edges, exact=exact, lim=lim, logspace=logspace) e = stationary.entropy_rate(edges, s) return e, s
def cycle_stationary_example(N, m, mu, incentive_func=replicator): graph = cycle(N) fitness_landscape = linear_fitness_landscape(m) incentive = incentive_func(fitness_landscape) edge_dict = multivariate_graph_transitions(N, graph, incentive, num_types=2, mu=mu) print "There are %s configurations and %s transitions" % (len(set([x[0] for x in edge_dict.keys()])), len(edge_dict)) # Compute stationary distribution edges = [(v1, v2, t) for ((v1,v2),t) in edge_dict.items()] s = stationary_distribution(edges, lim=1e-8) return s
def test_extrema_moran(lim=1e-16): """ Test for extrema of the stationary distribution. """ n = 2 for N, maxes, mins in [(60, [(30, 30)], [(60, 0), (0, 60)]), (100, [(50, 50)], [(100, 0), (0, 100)])]: mu = 1. / N edges = incentive_process.compute_edges(N, num_types=n, incentive_func=replicator, mu=mu) s = stationary_distribution(edges, lim=lim) assert_equal(find_local_maxima(s), set(maxes)) assert_equal(find_local_minima(s), set(mins))
def test_extrema_moran_2(lim=1e-16): """ Test for extrema of the stationary distribution. """ n = 2 N = 100 mu = 1. / 1000 m = [[1, 2], [3, 1]] maxes = set([(33, 67), (100,0), (0, 100)]) fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) edges = incentive_process.multivariate_transitions(N, incentive, num_types=n, mu=mu) s = stationary_distribution(edges, lim=lim) s2 = expected_divergence(edges, q_d=0) assert_equal(find_local_maxima(s), set(maxes)) assert_equal(find_local_minima(s2), set(maxes))
def tournament_stationary_3(N, mu=None): """ Example for a tournament selection matrix. """ if not mu: mu = 3./2 * 1./N m = [[1,1,1], [0,1,1], [0,0,1]] num_types = len(m[0]) fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) edges = incentive_process.multivariate_transitions(N, incentive, num_types=num_types, mu=mu) s = stationary_distribution(edges) ternary.heatmap(s, scale=N, scientific=True) d = expected_divergence(edges, q_d=0) ternary.heatmap(d, scale=N, scientific=True) pyplot.show()
def test_stationary(t1=0.4, t2=0.6): """ Test the stationary distribution computations a simple Markov process. """ edges = [(0, 1, t1), (0, 0, 1. - t1), (1, 0, t2), (1, 1, 1. - t2)] s_0 = 1. / (1. + t1 / t2) exact_stationary = {0: s_0, 1: 1 - s_0} for logspace in [True, False]: s = stationary_distribution(edges, logspace=logspace) # Check that the stationary distribution satisfies balance conditions check_detailed_balance(edges, s) check_global_balance(edges, s) check_eigenvalue(edges, s) # Check that the approximation converged to the exact distribution for key in s.keys(): assert_almost_equal(exact_stationary[key], s[key])
def test_stationary_2(): """ Test the stationary distribution computations a simple Markov process. """ edges = [(0, 0, 1./3), (0, 1, 1./3), (0, 2, 1./3), (1, 0, 1./4), (1, 1, 1./2), (1, 2, 1./4), (2, 0, 1./6), (2, 1, 1./3), (2, 2, 1./2),] exact_stationary = {0: 6./25, 1: 10./25, 2:9./25} for logspace in [True, False]: s = stationary_distribution(edges, logspace=logspace) # Check that the stationary distribution satisfies balance conditions check_global_balance(edges, s) check_eigenvalue(edges, s) # Check that the approximation converged to the exact distribution for key in s.keys(): assert_almost_equal(exact_stationary[key], s[key])
def rps_figures(N=60, q=1, beta=1.): """ Three rock-paper-scissors examples. """ m = [[0, -1, 1], [1, 0, -1], [-1, 1, 0]] num_types = len(m[0]) fitness_landscape = linear_fitness_landscape(m) for i, mu in enumerate([1./math.sqrt(N), 1./N, 1./N**(3./2)]): # Approximate calculation mu = 3/2. * mu incentive = fermi(fitness_landscape, beta=beta, q=q) edges = incentive_process.multivariate_transitions(N, incentive, num_types=num_types, mu=mu) d = stationary_distribution(edges, lim=1e-10) figure, tax = ternary.figure() tax.heatmap(d, scale=N) tax.savefig(filename="rsp_mu_" + str(i) + ".eps", dpi=600)
def test_stationary(t1=0.4, t2=0.6): """ Test the stationary distribution computations a simple Markov process. """ edges = [(0, 1, t1), (0, 0, 1. - t1), (1, 0, t2), (1, 1, 1. - t2)] s_0 = 1./(1. + t1 / t2) exact_stationary = {0: s_0, 1: 1 - s_0} for logspace in [True, False]: s = stationary_distribution(edges, logspace=logspace) # Check that the stationary distribution satisfies balance conditions check_detailed_balance(edges, s) check_global_balance(edges, s) check_eigenvalue(edges, s) # Check that the approximation converged to the exact distribution for key in s.keys(): assert_almost_equal(exact_stationary[key], s[key])
def compute_entropy_rate(N=30, n=2, m=None, incentive_func=None, beta=1., mu=None, exact=False, lim=1e-13, logspace=False): if not m: m = np.ones((n, n)) if not incentive_func: incentive_func = incentives.fermi if not mu: # mu = (n-1.)/n * 1./(N+1) mu = 1. / N fitness_landscape = incentives.linear_fitness_landscape(m) incentive = incentive_func(fitness_landscape, beta=beta, q=1) edges = incentive_process.multivariate_transitions( N, incentive, num_types=n, mu=mu) s = stationary.stationary_distribution(edges, exact=exact, lim=lim, logspace=logspace) e = stationary.entropy_rate(edges, s) return e, s
def wright_fisher(N, game_matrix=None, mu=None, incentive_func=replicator, logspace=False): """ A convenience function for the Moran process with mutation. Computes the transition probabilities and the stationary distribution. The number of types is determined from the dimensions of the game_matrix. Parameters ---------- N: int The population size game_matrix: list of lists or numpy matrix, None The game matrix of the process, e.g. [[1, 2], [2, 1]] for the two-type Hawk-Dove game. If not specified, the 2-type neutral landscape is used. mu: float, None The mutation rate, if None then `mu` is set to 1 / N incentive_func: function, replicator A function defining the process, e.g. the Moran process, logit, Fermi, Incentives functions are in stationary.processes.incentives logspace: bool, False Compute in log-space or not Returns ------- edges, s, er: the list of transitions, the stationary distribution, and the entropy rate. """ if not game_matrix: game_matrix = [[1, 1], [1, 1]] if not mu: mu = 1. / N num_types = len(game_matrix[0]) fitness_landscape = linear_fitness_landscape(game_matrix) incentive = incentive_func(fitness_landscape) edge_func = wright_fisher.multivariate_transitions(N, incentive, mu=mu, num_types=num_types) states = list(simplex_generator(N, d=num_types-1)) s = stationary_distribution(edge_func, states=states, iterations=4*N, logspace=logspace) er = entropy_rate(edge_func, s) return edge_func, s, er
def test_extrema_wf(lim=1e-10): """ For small mu, the Wright-Fisher process is minimal in the center. Test that this happens. """ for n, N, mins in [(2, 40, [(20, 20)]), (3, 30, [(10, 10, 10)])]: mu = 1. / N ** 3 m = numpy.ones((n, n)) # neutral landscape fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) edge_func = wright_fisher.multivariate_transitions( N, incentive, mu=mu, num_types=n) states = list(simplex_generator(N, d=n-1)) s = stationary_distribution( edge_func, states=states, iterations=4*N, lim=lim) assert_equal(find_local_minima(s), set(mins)) er = entropy_rate(edge_func, s, states=states) assert_greater_equal(er, 0)
def test_stationary_3(): """ Test the stationary distribution computations a simple Markov process. """ edges = [(0, 0, 0), (0, 1, 1), (0, 2, 0), (0, 3, 0), (1, 0, 1./3), (1, 1, 0), (1, 2, 2./3), (1, 3, 0), (2, 0, 0), (2, 1, 2./3), (2, 2, 0), (2, 3, 1./3), (3, 0, 0), (3, 1, 0), (3, 2, 1), (3, 3, 0)] exact_stationary = {0: 1./8, 1: 3./8, 2: 3./8, 3: 1./8} for logspace in [True, False]: s = stationary_distribution(edges, logspace=logspace) # Check that the stationary distribution satisfies balance conditions check_detailed_balance(edges, s) check_global_balance(edges, s) check_eigenvalue(edges, s) # Check that the approximation converged to the exact distribution for key in s.keys(): assert_almost_equal(exact_stationary[key], s[key])
def test_extrema_moran_4(lim=1e-16): """ Test for extrema of the stationary distribution. """ n = 3 N = 60 mu = 3./ (2 * N) m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]] maxes = set([(20,20,20)]) mins = set([(0, 0, 60), (0, 60, 0), (60, 0, 0)]) fitness_landscape = linear_fitness_landscape(m) incentive = logit(fitness_landscape, beta=0.1) edges = incentive_process.multivariate_transitions(N, incentive, num_types=n, mu=mu) s = stationary_distribution(edges, lim=lim) s2 = expected_divergence(edges, q_d=0) assert_equal(find_local_maxima(s), set(maxes)) assert_equal(find_local_minima(s), set(mins)) assert_equal(find_local_minima(s2), set(maxes)) assert_equal(find_local_maxima(s2), set(mins))
def tournament_stationary_3(N, mu=None): """ Example for a tournament selection matrix. """ if not mu: mu = 3. / 2 * 1. / N m = [[1, 1, 1], [0, 1, 1], [0, 0, 1]] num_types = len(m[0]) fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) edges = incentive_process.multivariate_transitions(N, incentive, num_types=num_types, mu=mu) s = stationary_distribution(edges) ternary.heatmap(s, scale=N, scientific=True) d = expected_divergence(edges, q_d=0) ternary.heatmap(d, scale=N, scientific=True) pyplot.show()
def rps_figures(N=60, q=1, beta=1.): """ Three rock-paper-scissors examples. """ m = [[0, -1, 1], [1, 0, -1], [-1, 1, 0]] num_types = len(m[0]) fitness_landscape = linear_fitness_landscape(m) for i, mu in enumerate([1. / math.sqrt(N), 1. / N, 1. / N**(3. / 2)]): # Approximate calculation mu = 3 / 2. * mu incentive = fermi(fitness_landscape, beta=beta, q=q) edges = incentive_process.multivariate_transitions(N, incentive, num_types=num_types, mu=mu) d = stationary_distribution(edges, lim=1e-10) figure, tax = ternary.figure() tax.heatmap(d, scale=N) tax.savefig(filename="rsp_mu_" + str(i) + ".eps", dpi=600)
def test_extrema_moran_3(lim=1e-12): """ Test for extrema of the stationary distribution. """ n = 2 N = 100 mu = 6./ 25 m = [[1, 0], [0, 1]] maxes = set([(38, 62), (62, 38)]) mins = set([(50, 50), (100, 0), (0, 100)]) fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) edges = incentive_process.multivariate_transitions(N, incentive, num_types=n, mu=mu) s = stationary_distribution(edges, lim=lim) flow = inflow_outflow(edges) for q_d in [0, 1]: s2 = expected_divergence(edges, q_d=1) assert_equal(find_local_maxima(s), set(maxes)) assert_equal(find_local_minima(s), set(mins)) assert_equal(find_local_minima(s2), set([(50,50), (40, 60), (60, 40)])) assert_equal(find_local_maxima(flow), set(mins))
def two_dim_transitions_figure(N, m, mu=0.01, incentive_func=replicator): """ Plot transition entropies and stationary distributions. """ n = len(m[0]) fitness_landscape = linear_fitness_landscape(m) incentive = incentive_func(fitness_landscape) if not mu: mu = 1. / N edges = incentive_process.multivariate_transitions(N, incentive, num_types=n, mu=mu) s = stationary_distribution(edges, exact=True) d = edges_to_edge_dict(edges) # Set up plots gs = gridspec.GridSpec(3, 1) ax1 = pyplot.subplot(gs[0, 0]) ax1.set_title("Transition Probabilities") ups, downs, _ = two_dim_transitions(edges) xs = range(0, N + 1) ax1.plot(xs, ups) ax1.plot(xs, downs) ax2 = pyplot.subplot(gs[1, 0]) ax2.set_title("Relative Entropy") divs1 = expected_divergence(edges) divs2 = expected_divergence(edges, q_d=0) plot_dictionary(divs1, ax=ax2) plot_dictionary(divs2, ax=ax2) ax3 = pyplot.subplot(gs[2, 0]) ax3.set_title("Stationary Distribution") plot_dictionary(s, ax=ax3) ax3.set_xlabel("Number of A individuals (i)")
linear_fitness_landscape, replicator) from stationary.utils import expected_divergence from stationary import stationary_distribution if __name__ == '__main__': N = 40 mu = 3./2. * 1./N m = [[1, 2], [2, 1]] fitness_landscape = linear_fitness_landscape(m, normalize=False) incentive = replicator(fitness_landscape) death_probabilities = even_death(N) edges = variable_population_transitions( N, fitness_landscape, death_probabilities, incentive=incentive, mu=mu) s = stationary_distribution(edges, iterations=10000) # Print out the states with the highest stationary probabilities vs = [(v, k) for (k, v) in s.items()] vs.sort(reverse=True) print(vs[:10]) # Plot the stationary distribution and expected divergence figure, tax = ternary.figure(scale=N) tax.heatmap(s) d2 = expected_divergence(edges, q_d=0) d = dict() for k, v in d2.items(): d[k] = math.sqrt(v)
from stationary.processes.incentive_process import linear_fitness_landscape, replicator from stationary.utils import expected_divergence from stationary import stationary_distribution if __name__ == '__main__': N = 40 mu = 3./2. * 1./N m = [[1, 2], [2, 1]] fitness_landscape = linear_fitness_landscape(m, normalize=False) incentive = replicator(fitness_landscape) death_probabilities = even_death(N) edges = variable_population_transitions(N, fitness_landscape, death_probabilities, incentive=incentive, mu=mu) s = stationary_distribution(edges, iterations=10000) # Print out the states with the highest stationary probabilities vs = [(v,k) for (k,v) in s.items()] vs.sort(reverse=True) print vs[:10] # Plot the stationary distributoin and expected divergence figure, tax = ternary.figure(scale=N) tax.heatmap(s) d2 = expected_divergence(edges, q_d=0) d = dict() for k, v in d2.items(): d[k] = math.sqrt(v)
incentive, num_types=2, mu=mu) edges = [(v1, v2, t) for ((v1, v2), t) in edge_dict.items()] g = Graph(edges) print("There are %s configurations and %s transitions" % (len(set([x[0] for x in edge_dict.keys()])), len(edge_dict))) print("Local Maxima:", len(find_extrema_yen(g, extrema="max"))) print("Local Minima:", len(find_extrema_yen(g, extrema="min"))) print("Total States:", 2**N) exit() print("Computing stationary") s = stationary_distribution(edges, lim=1e-8, iterations=1000) print("Local Maxima:", len(find_extrema_stationary(s, g, extrema="max"))) print("Local Minima:", len(find_extrema_stationary(s, g, extrema="min"))) # Print stationary distribution top 20 print("Stationary") for k, v in sorted(s.items(), key=itemgetter(1), reverse=True)[:20]: print(k, v) print(len([v for v in s.values() if v > 0.001]), sum([v for v in s.values() if v > 0.001])) # Consolidate states s, inverse_mapper = consolidate_stationary(s, N) # Print stationary distribution top 20 print("Consolidated Stationary")
graph = cycle(N) fitness_landscape = linear_fitness_landscape(m) incentive = replicator(fitness_landscape) edge_dict = multivariate_graph_transitions(N, graph, incentive, num_types=2, mu=mu) edges = [(v1, v2, t) for ((v1, v2), t) in edge_dict.items()] g = Graph(edges) print("There are %s configurations and %s transitions" % (len(set([x[0] for x in edge_dict.keys()])), len(edge_dict))) print("Local Maxima:", len(find_extrema_yen(g, extrema="max"))) print("Local Minima:", len(find_extrema_yen(g, extrema="min"))) print("Total States:", 2**N) exit() print("Computing stationary") s = stationary_distribution(edges, lim=1e-8, iterations=1000) print("Local Maxima:", len(find_extrema_stationary(s, g, extrema="max"))) print("Local Minima:", len(find_extrema_stationary(s, g, extrema="min"))) # Print stationary distribution top 20 print("Stationary") for k, v in sorted(s.items(), key=itemgetter(1), reverse=True)[:20]: print(k, v) print(len([v for v in s.values() if v > 0.001]), sum([v for v in s.values() if v > 0.001])) # Consolidate states s, inverse_mapper = consolidate_stationary(s, N) # Print stationary distribution top 20 print("Consolidated Stationary") for k,v in sorted(s.items(), key=itemgetter(1), reverse=True)[:20]: