Ejemplo n.º 1
0
def two_dim_wright_fisher_figure(N, m, mu=0.01, incentive_func=replicator):
    """
    Plot relative entropies and stationary distribution for the Wright-Fisher
    process.
    """

    n = len(m[0])
    fitness_landscape = linear_fitness_landscape(m)
    incentive = incentive_func(fitness_landscape)
    if not mu:
        mu = 1. / N

    edge_func = wright_fisher.multivariate_transitions(N,
                                                       incentive,
                                                       mu=mu,
                                                       num_types=n)
    states = list(simplex_generator(N, d=n - 1))
    s = stationary_distribution(edge_func, states=states, iterations=4 * N)
    s0 = expected_divergence(edge_func, states=states, q_d=0)
    s1 = expected_divergence(edge_func, states=states, q_d=1)

    # Set up plots
    gs = gridspec.GridSpec(2, 1)

    ax2 = pyplot.subplot(gs[0, 0])
    ax2.set_title("Relative Entropy")
    plot_dictionary(s0, ax=ax2)
    plot_dictionary(s1, ax=ax2)

    ax3 = pyplot.subplot(gs[1, 0])
    ax3.set_title("Stationary Distribution")
    plot_dictionary(s, ax=ax3)
    ax3.set_xlabel("Number of A individuals (i)")
Ejemplo n.º 2
0
def test_wright_fisher(N=20, lim=1e-10, n=2):
    """Test 2 dimensional Wright-Fisher process."""
    for n in [2, 3]:
        mu = (n - 1.) / n * 1. / (N + 1)
        m = numpy.ones((n, n))  # neutral landscape
        fitness_landscape = linear_fitness_landscape(m)
        incentive = replicator(fitness_landscape)

        # Wright-Fisher
        for low_memory in [True, False]:
            edge_func = wright_fisher.multivariate_transitions(
                N, incentive, mu=mu, num_types=n, low_memory=low_memory)
            states = list(simplex_generator(N, d=n - 1))
            for logspace in [False, True]:
                s = stationary_distribution(edge_func,
                                            states=states,
                                            iterations=200,
                                            lim=lim,
                                            logspace=logspace)
                wf_edges = edge_func_to_edges(edge_func, states)

                er = entropy_rate(wf_edges, s)
                assert_greater_equal(er, 0)

                # Check that the stationary distribution satistifies balance conditions
                check_detailed_balance(wf_edges, s, places=2)
                check_global_balance(wf_edges, s, places=4)
                check_eigenvalue(wf_edges, s, places=2)
Ejemplo n.º 3
0
def two_dim_wright_fisher_figure(N, m, mu=0.01, incentive_func=replicator):
    """
    Plot relative entropies and stationary distribution for the Wright-Fisher
    process.
    """

    n = len(m[0])
    fitness_landscape = linear_fitness_landscape(m)
    incentive = incentive_func(fitness_landscape)
    if not mu:
        mu = 1./ N

    edge_func = wright_fisher.multivariate_transitions(N, incentive, mu=mu, num_types=n)
    states = list(simplex_generator(N, d=n-1))
    s = stationary_distribution(edge_func, states=states, iterations=4*N)
    s0 = expected_divergence(edge_func, states=states, q_d=0)
    s1 = expected_divergence(edge_func, states=states, q_d=1)

    # Set up plots
    gs = gridspec.GridSpec(2, 1)

    ax2 = pyplot.subplot(gs[0, 0])
    ax2.set_title("Relative Entropy")
    plot_dictionary(s0, ax=ax2)
    plot_dictionary(s1, ax=ax2)

    ax3 = pyplot.subplot(gs[1, 0])
    ax3.set_title("Stationary Distribution")
    plot_dictionary(s, ax=ax3)
    ax3.set_xlabel("Number of A individuals (i)")
Ejemplo n.º 4
0
def test_extrema_wf(lim=1e-10):
    """
    For small mu, the Wright-Fisher process is minimal in the center.
    Test that this happens.
    """

    for n, N, mins in [(2, 40, [(20, 20)]), (3, 30, [(10, 10, 10)])]:
        mu = 1. / N**3
        m = numpy.ones((n, n))  # neutral landscape
        fitness_landscape = linear_fitness_landscape(m)
        incentive = replicator(fitness_landscape)

        edge_func = wright_fisher.multivariate_transitions(N,
                                                           incentive,
                                                           mu=mu,
                                                           num_types=n)
        states = list(simplex_generator(N, d=n - 1))
        s = stationary_distribution(edge_func,
                                    states=states,
                                    iterations=4 * N,
                                    lim=lim)
        s2 = expected_divergence(edge_func, states=states, q_d=0)

        assert_equal(find_local_minima(s), set(mins))

        er = entropy_rate(edge_func, s, states=states)
        assert_greater_equal(er, 0)
Ejemplo n.º 5
0
def test_wright_fisher(N=20, lim=1e-10, n=2):
    """Test 2 dimensional Wright-Fisher process."""
    for n in [2, 3]:
        mu = (n - 1.) / n * 1. / (N + 1)
        m = numpy.ones((n, n)) # neutral landscape
        fitness_landscape = linear_fitness_landscape(m)
        incentive = replicator(fitness_landscape)

        # Wright-Fisher
        for low_memory in [True, False]:
            edge_func = wright_fisher.multivariate_transitions(
                N, incentive, mu=mu, num_types=n, low_memory=low_memory)
            states = list(simplex_generator(N, d=n-1))
            for logspace in [False, True]:
                s = stationary_distribution(
                    edge_func, states=states, iterations=200, lim=lim,
                    logspace=logspace)
                wf_edges = edge_func_to_edges(edge_func, states)

                er = entropy_rate(wf_edges, s)
                assert_greater_equal(er, 0)

                # Check that the stationary distribution satistifies balance
                # conditions
                check_detailed_balance(wf_edges, s, places=2)
                check_global_balance(wf_edges, s, places=4)
                check_eigenvalue(wf_edges, s, places=2)
Ejemplo n.º 6
0
def test_stationary_generator():
    d = 1
    N = 1
    states = set(simplex_generator(N, d))
    expected = set([(0, 1), (1, 0)])
    assert_equal(states, expected)

    N = 2
    states = set(simplex_generator(N, d))
    expected = set([(0, 2), (1, 1), (2, 0)])
    assert_equal(states, expected)

    N = 3
    states = set(simplex_generator(N, d))
    expected = set([(0, 3), (1, 2), (2, 1), (3, 0)])
    assert_equal(states, expected)

    d = 2
    N = 1
    states = set(simplex_generator(N, d))
    expected = set([(1, 0, 0), (0, 1, 0), (0, 0, 1)])
    assert_equal(states, expected)

    N = 2
    states = set(simplex_generator(N, d))
    expected = set([(1, 1, 0), (0, 1, 1), (1, 0, 1), (0, 2, 0), (0, 0, 2),
                    (2, 0, 0)])
    assert_equal(states, expected)

    for d in range(1, 5):
        for N in range(1, 20):
            states = set(simplex_generator(N, d))
            size = comb(N + d, d, exact=True)
            assert_equal(len(states), size)
Ejemplo n.º 7
0
def test_stationary_generator():
    d = 1
    N = 1
    states = set(simplex_generator(N, d))
    expected = set([(0, 1), (1, 0)])
    assert_equal(states, expected)

    N = 2
    states = set(simplex_generator(N, d))
    expected = set([(0, 2), (1, 1), (2, 0)])
    assert_equal(states, expected)

    N = 3
    states = set(simplex_generator(N, d))
    expected = set([(0, 3), (1, 2), (2, 1), (3, 0)])
    assert_equal(states, expected)

    d = 2
    N = 1
    states = set(simplex_generator(N, d))
    expected = set([(1, 0, 0), (0, 1, 0), (0, 0, 1)])
    assert_equal(states, expected)

    N = 2
    states = set(simplex_generator(N, d))
    expected = set([(1, 1, 0), (0, 1, 1), (1, 0, 1), (0, 2, 0), (0, 0, 2), 
                    (2, 0, 0)])
    assert_equal(states, expected)

    for d in range(1, 5):
        for N in range(1, 20):
            states = set(simplex_generator(N, d))
            size = comb(N + d, d, exact=True)
            assert_equal(len(states), size)
Ejemplo n.º 8
0
def wright_fisher(N, game_matrix=None, mu=None, incentive_func=replicator,
                  logspace=False):
    """
    A convenience function for the Moran process with mutation. Computes the
    transition probabilities and the stationary distribution.

    The number of types is determined from the dimensions of the game_matrix.

    Parameters
    ----------
    N: int
        The population size
    game_matrix: list of lists or numpy matrix, None
        The game matrix of the process, e.g. [[1, 2], [2, 1]] for the two-type
        Hawk-Dove game. If not specified, the 2-type neutral landscape is used.
    mu: float, None
        The mutation rate, if None then `mu` is set to 1 / N
    incentive_func: function, replicator
        A function defining the process, e.g. the Moran process, logit, Fermi,
        Incentives functions are in stationary.processes.incentives
    logspace: bool, False
        Compute in log-space or not

    Returns
    -------
    edges, s, er: the list of transitions, the stationary distribution, and the
    entropy rate.
    """

    if not game_matrix:
        game_matrix = [[1, 1], [1, 1]]
    if not mu:
        mu = 1. / N
    num_types = len(game_matrix[0])

    fitness_landscape = linear_fitness_landscape(game_matrix)
    incentive = incentive_func(fitness_landscape)
    edge_func = wright_fisher.multivariate_transitions(N, incentive, mu=mu,
                                                       num_types=num_types)
    states = list(simplex_generator(N, d=num_types-1))
    s = stationary_distribution(edge_func, states=states, iterations=4*N,
                                logspace=logspace)
    er = entropy_rate(edge_func, s)
    return edge_func, s, er
Ejemplo n.º 9
0
def test_extrema_wf(lim=1e-10):
    """
    For small mu, the Wright-Fisher process is minimal in the center.
    Test that this happens.
    """

    for n, N, mins in [(2, 40, [(20, 20)]), (3, 30, [(10, 10, 10)])]:
        mu = 1. / N ** 3
        m = numpy.ones((n, n)) # neutral landscape
        fitness_landscape = linear_fitness_landscape(m)
        incentive = replicator(fitness_landscape)

        edge_func = wright_fisher.multivariate_transitions(
            N, incentive, mu=mu, num_types=n)
        states = list(simplex_generator(N, d=n-1))
        s = stationary_distribution(
            edge_func, states=states, iterations=4*N, lim=lim)
        assert_equal(find_local_minima(s), set(mins))
        er = entropy_rate(edge_func, s, states=states)
        assert_greater_equal(er, 0)
Ejemplo n.º 10
0
def exact_stationary(edges, initial_state=None, logspace=False):
    """
    Computes the stationary distribution of a reversible process on the simplex exactly. No check for reversibility.

    Parameters
    ----------

    edges: list or dictionary
        The edges or edge_dict of the process
    initial: tuple, None
        The initial state. If not given a suitable state is created.
    logspace: bool False
        Carry out the calculation in logspace

    returns
    -------
    dictionary, the stationary distribution
    """

    # Convert edges to edge_dict if necessary
    if isinstance(edges, list):
        edges = edges_to_edge_dict(edges)
    # Compute population parameters from the edge_dict
    state = edges.keys()[0][0]
    N = sum(state)
    num_players = len(state)
    # Get an initial state
    if not initial_state:
        initial_state = [N//num_players]*(num_players)
        initial_state[-1] = N - (num_players-1) * (N//num_players)
    initial_state = tuple(initial_state)

    # Use the exact form of the stationary distribution.
    d = dict()
    for state in simplex_generator(N, num_players-1):
        # Take a path from initial to state.
        seq = [initial_state]
        e = list(seq[-1])
        for i in range(0, num_players):
            while e[i] < state[i]:
                for j in range(0, num_players):
                    if e[j] > state[j]:
                        break
                e[j] = e[j] - 1
                e[i] = e[i] + 1
                seq.append(tuple(e))
            while e[i] > state[i]:
                for j in range(0, num_players):
                    if e[j] < state[j]:
                        break
                e[j] = e[j] + 1
                e[i] = e[i] - 1
                seq.append(tuple(e))
        if logspace:
            s = 0.
        else:
            s = 1.
        for index in range(len(seq)-1):
            e, f = seq[index], seq[index+1]
            if logspace:
                s += log(edges[(e,f)]) - log(edges[(f, e)])
            else:
                s *= edges[(e,f)] / edges[(f, e)]
        d[state] = s
    if logspace:
        s0 = logsumexp([v for v in d.values()])
        for key, v in d.items():
            d[key] = exp(v-s0)
    else:
        s0 = 1./(sum([v for v in d.values()]))
        for key, v in d.items():
            d[key] = s0 * v
    return d