def test_incentive_process(lim=1e-14):
    """
    Compare stationary distribution computations to known analytic form for
    neutral landscape for the Moran process.
    """

    for n, N in [(2, 10), (2, 40), (3, 10), (3, 20), (4, 10)]:
        mu = (n - 1.) / n * 1./ (N + 1)
        alpha = N * mu / (n - 1. - n * mu)

        # Neutral landscape is the default
        edges = incentive_process.compute_edges(N, num_types=n,
                                                incentive_func=replicator, mu=mu)
        for logspace in [False, True]:
            stationary_1 = incentive_process.neutral_stationary(
                N, alpha, n, logspace=logspace)
            for exact in [False, True]:
                stationary_2 = stationary_distribution(
                    edges, lim=lim, logspace=logspace, exact=exact)
                for key in stationary_1.keys():
                    assert_almost_equal(
                        stationary_1[key], stationary_2[key], places=4)

        # Check that the stationary distribution satisfies balance conditions
        check_detailed_balance(edges, stationary_1)
        check_global_balance(edges, stationary_1)
        check_eigenvalue(edges, stationary_1)

        # Test Entropy Rate bounds
        er = entropy_rate(edges, stationary_1)
        h = (2. * n - 1) / n * numpy.log(n)
        assert_less_equal(er, h)
        assert_greater_equal(er, 0)
def test_extrema_moran(lim=1e-16):
    """
    Test for extrema of the stationary distribution.
    """
    n = 2
    for N, maxes, mins in [(60, [(30, 30)], [(60, 0), (0, 60)]),
                           (100, [(50, 50)], [(100, 0), (0, 100)])]:
        mu = 1. / N
        edges = incentive_process.compute_edges(N, num_types=n,
                                                incentive_func=replicator, mu=mu)

        s = stationary_distribution(edges, lim=lim)
        assert_equal(find_local_maxima(s), set(maxes))
        assert_equal(find_local_minima(s), set(mins))
Example #3
0
def compute_everything(N, m, mu, initial_state, num_trajectories=100, trajectory_length=100, incentive_func=replicator, beta=1.):
    # Get the edges of the Markov process
    edges = incentive_process.compute_edges(N=N, m=m, mu=mu, beta=beta,
                                            incentive_func=incentive_func)

    transition_dict = edges_to_dictionary(edges)
    # Generate some trajectories
    trajectories = list(generate_trajectories(edges, initial_state, iterations=num_trajectories, max_iterations=trajectory_length))

    # Compute yens along the trajectories
    yens = [compute_yen(trajectory, transition_dict) for trajectory in trajectories]

    # Compute the fitness flux
    fitness_landscape = linear_fitness_landscape(m)
    try:
        incentive = incentive_func(fitness_landscape, beta=beta)
    except TypeError :
        incentive = incentive_func(fitness_landscape)
    fluxes = [compute_fitness_flux(trajectory, incentive) for trajectory in trajectories]
    # Compute the self-informations
    self_infos = [compute_self_info(trajectory, edges) for trajectory in trajectories]

    return (yens, fluxes, self_infos)