def wright_fisher_transitions(N, fitness_landscape=None, incentive=None, mutations=None, w=None):
    """Computes transition probabilities for the Markov process. Since the transition matrix is tri-diagonal (i.e. sparse), use a dictionary."""
    if not mutations:
        mutations = boundary_mutations()
    if not w:
        w = 1.
    d = dict()
    d = numpy.zeros((N+1, N+1))
    mu, nu = mutations(N, 0)
    d[0][1] = mu
    d[0][0] = 1- mu
    mu, nu = mutations(N, N)
    d[N][N-1] = nu
    d[N][N] = 1 - nu
    for a in range(1, N):
        b = N - a
        if incentive:
            i = normalize(incentive([a,b]))
        else:
            i = normalize(multiply_vectors([a, b], fitness_landscape([a,b])))
        i[0] = 1. - w + w * i[0]
        i[1] = 1. - w + w * i[1]
        mu, nu = mutations(N, a)
        up = ((i[0] * (1. - mu) + i[1]*nu))
        down = ((i[0] * mu + i[1]*(1. - nu)))
        for j in range(0, N+1):
            if (j == 0) or (j == N):
                d[a][j] = exp(j * log(up) + (N-j) * log(down))
            else:
                d[a][j] = comb(N,j, exact=True) * exp(j * log(up) + (N-j) * log(down))
    return d
def moran_transitions(N, fitness_landscape=None, incentive=None, mutations=None, w=None):
    """Computes transition probabilities for the Markov process. Since the transition matrix is tri-diagonal (i.e. sparse), use a dictionary."""
    if not mutations:
        mutations = boundary_mutations()
    if not w:
        w = 1.
    d = dict()
    mu, nu = mutations(N, 0)
    d[(0,1)] = mu
    d[(0,0)] = 1. - mu
    mu, nu = mutations(N, N)
    d[(N, N-1)] = nu
    d[(N, N)] = 1. - nu
    for a in range(1, N):
        b = N - a
        if incentive:
            i = normalize(incentive([a,b]))
        else:
            i = normalize(multiply_vectors([a, b], fitness_landscape([a,b])))
        i[0] = 1. - w + w * i[0]
        i[1] = 1. - w + w * i[1]
        mu, nu = mutations(N, a)
        up = ((i[0] * (1. - mu) + i[1]*nu)) * float(b) / (a + b)
        down = ((i[0] * mu + i[1]*(1. - nu))) * float(a) / (a + b)
        d[(a, a+1)] = up
        d[(a, a-1)] = down
        d[(a, a)] = 1. - up - down
    return d
def moran_transitions(N,
                      fitness_landscape=None,
                      incentive=None,
                      mutations=None,
                      w=None):
    """Computes transition probabilities for the Markov process. Since the transition matrix is tri-diagonal (i.e. sparse), use a dictionary."""
    if not mutations:
        mutations = boundary_mutations()
    if not w:
        w = 1.
    d = dict()
    mu, nu = mutations(N, 0)
    d[(0, 1)] = mu
    d[(0, 0)] = 1. - mu
    mu, nu = mutations(N, N)
    d[(N, N - 1)] = nu
    d[(N, N)] = 1. - nu
    for a in range(1, N):
        b = N - a
        if incentive:
            i = normalize(incentive([a, b]))
        else:
            i = normalize(multiply_vectors([a, b], fitness_landscape([a, b])))
        i[0] = 1. - w + w * i[0]
        i[1] = 1. - w + w * i[1]
        mu, nu = mutations(N, a)
        up = ((i[0] * (1. - mu) + i[1] * nu)) * float(b) / (a + b)
        down = ((i[0] * mu + i[1] * (1. - nu))) * float(a) / (a + b)
        d[(a, a + 1)] = up
        d[(a, a - 1)] = down
        d[(a, a)] = 1. - up - down
    return d
Exemple #4
0
def entropy_test(N=50):
    from mpsim.math_helpers import shannon_entropy, normalize
    ts = []
    for i in range(0, N / 2):
        t = [(N - i) * (N - i), (N - i) * (N - i), i * (N - 2 * i)]
        #t = [float(x) / (N*(2*N-3*i)) for x in t]
        ts.append(normalize(t))
    es = [shannon_entropy(t) for t in ts]
    print max(es)
    pyplot.plot(range(0, N / 2), es)
    pyplot.show()
Exemple #5
0
def entropy_test(N=50):
    from mpsim.math_helpers import shannon_entropy, normalize
    ts = []
    for i in range(0, N/2):
        t = [(N-i)*(N-i), (N-i)*(N-i), i*(N-2*i)]
        #t = [float(x) / (N*(2*N-3*i)) for x in t]
        ts.append(normalize(t))
    es = [shannon_entropy(t) for t in ts]
    print max(es)
    pyplot.plot(range(0, N/2), es)
    pyplot.show()
def wright_fisher_transitions(N,
                              fitness_landscape=None,
                              incentive=None,
                              mutations=None,
                              w=None):
    """Computes transition probabilities for the Markov process. Since the transition matrix is tri-diagonal (i.e. sparse), use a dictionary."""
    if not mutations:
        mutations = boundary_mutations()
    if not w:
        w = 1.
    d = dict()
    d = numpy.zeros((N + 1, N + 1))
    mu, nu = mutations(N, 0)
    d[0][1] = mu
    d[0][0] = 1 - mu
    mu, nu = mutations(N, N)
    d[N][N - 1] = nu
    d[N][N] = 1 - nu
    for a in range(1, N):
        b = N - a
        if incentive:
            i = normalize(incentive([a, b]))
        else:
            i = normalize(multiply_vectors([a, b], fitness_landscape([a, b])))
        i[0] = 1. - w + w * i[0]
        i[1] = 1. - w + w * i[1]
        mu, nu = mutations(N, a)
        up = ((i[0] * (1. - mu) + i[1] * nu))
        down = ((i[0] * mu + i[1] * (1. - nu)))
        for j in range(0, N + 1):
            if (j == 0) or (j == N):
                d[a][j] = exp(j * log(up) + (N - j) * log(down))
            else:
                d[a][j] = comb(N, j, exact=True) * exp(j * log(up) +
                                                       (N - j) * log(down))
    return d