Exemplo n.º 1
0
def eval_f(Y):
    """ some reformulations to make eval_f_orig
        compatible with algopy

        missing: support for scipy.linalg.expm

        i.e., this function can't be differentiated with algopy

    """

    a, b, v = transform_params(Y)

    Q = algopy.zeros((4, 4), dtype=Y)
    Q[0, 0] = 0
    Q[0, 1] = a
    Q[0, 2] = b
    Q[0, 3] = b
    Q[1, 0] = a
    Q[1, 1] = 0
    Q[1, 2] = b
    Q[1, 3] = b
    Q[2, 0] = b
    Q[2, 1] = b
    Q[2, 2] = 0
    Q[2, 3] = a
    Q[3, 0] = b
    Q[3, 1] = b
    Q[3, 2] = a
    Q[3, 3] = 0

    Q = Q * v
    Q -= algopy.diag(algopy.sum(Q, axis=1))
    P = algopy.expm(Q)
    S = algopy.log(algopy.dot(algopy.diag(v), P))
    return -algopy.sum(S * g_data)
Exemplo n.º 2
0
def create_transition_matrix_numeric(mu, d, v):
    """
    Use numerical integration.
    This is not so compatible with algopy because it goes through fortran.
    Note that d = 2*h - 1 following Kimura 1957.
    The rate mu is a catch-all scaling factor.
    The finite distribution v is assumed to be a stochastic vector.
    @param mu: scales the rate matrix
    @param d: dominance (as opposed to recessiveness) of preferred states.
    @param v: numpy array defining a distribution over states
    @return: transition matrix
    """

    # Construct the numpy matrix whose entries
    # are differences of log equilibrium probabilities.
    # Everything in this code block is pure numpy.
    F = numpy.log(v)
    e = numpy.ones_like(F)
    S = numpy.outer(e, F) - numpy.outer(F, e)

    # Create the rate matrix Q and return its matrix exponential.
    # Things in this code block may use algopy if mu and d
    # are bundled with truncated Taylor information.
    D = d * numpy.sign(S)
    pre_Q = numpy.vectorize(numeric_fixation)(0.5*S, D)
    pre_Q = mu * pre_Q
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    P = algopy.expm(Q)
    return P
Exemplo n.º 3
0
def eval_f_unconstrained_kb(
        theta,
        subs_counts, log_counts, v,
        h,
        ts, tv, syn, nonsyn, compo, asym_compo,
        ):
    """
    No dominance/recessivity constraint.
    @param theta: length seven unconstrained vector of free variables
    """
    # unpack theta
    log_mu = theta[0]
    log_kappa = theta[1]
    log_omega = theta[2]
    d = theta[3]
    log_kb = theta[4]
    log_nt_weights = algopy.zeros(4, dtype=theta)
    log_nt_weights[0] = theta[5]
    log_nt_weights[1] = theta[6]
    log_nt_weights[2] = theta[7]
    log_nt_weights[3] = 0
    #
    # construct the transition matrix
    Q = get_Q_unconstrained_kb(
            ts, tv, syn, nonsyn, compo, asym_compo,
            h,
            log_counts,
            log_mu, log_kappa, log_omega, d, log_kb, log_nt_weights)
    P = algopy.expm(Q)
    #
    # return the neg log likelihood
    neg_log_likelihood = -get_log_likelihood(P, v, subs_counts)
    print neg_log_likelihood
    return neg_log_likelihood
Exemplo n.º 4
0
def eval_f(
        theta,
        subs_counts, log_counts, v,
        h,
        ts, tv, syn, nonsyn, compo, asym_compo,
        ):
    """
    @param theta: length six unconstrained vector of free variables
    """

    # unpack theta
    log_mu = theta[0]
    log_kappa = theta[1]
    log_omega = theta[2]
    log_nt_weights = algopy.zeros(4, dtype=theta)
    log_nt_weights[0] = theta[3]
    log_nt_weights[1] = theta[4]
    log_nt_weights[2] = theta[5]
    log_nt_weights[3] = 1.

    # construct the transition matrix
    Q = get_Q(
            ts, tv, syn, nonsyn, compo, asym_compo,
            h,
            log_counts,
            log_mu, log_kappa, log_omega, log_nt_weights)
    P = algopy.expm(Q)
    
    # return the neg log likelihood
    return -get_log_likelihood(P, v, subs_counts)
Exemplo n.º 5
0
def eval_f(
        theta,
        subs_counts, log_counts, v,
        h,
        ts, tv, syn, nonsyn, compo, asym_compo,
        ):
    """
    The function formerly known as minimize-me.
    @param theta: length six unconstrained vector of free variables
    """
    # unpack theta
    log_mu = theta[0]
    log_kappa = theta[1]
    log_omega = theta[2]
    log_nt_weights = algopy.zeros(4, dtype=theta)
    log_nt_weights[0] = theta[3]
    log_nt_weights[1] = theta[4]
    log_nt_weights[2] = theta[5]
    log_nt_weights[3] = 0
    #
    # construct the transition matrix
    Q = get_Q(
            ts, tv, syn, nonsyn, compo, asym_compo,
            h,
            log_counts,
            log_mu, log_kappa, log_omega, log_nt_weights)
    P = algopy.expm(Q)
    #
    # return the neg log likelihood
    return -get_log_likelihood(P, v, subs_counts)
Exemplo n.º 6
0
def create_transition_matrix_numeric(mu, d, v):
    """
    Use numerical integration.
    This is not so compatible with algopy because it goes through fortran.
    Note that d = 2*h - 1 following Kimura 1957.
    The rate mu is a catch-all scaling factor.
    The finite distribution v is assumed to be a stochastic vector.
    @param mu: scales the rate matrix
    @param d: dominance (as opposed to recessiveness) of preferred states.
    @param v: numpy array defining a distribution over states
    @return: transition matrix
    """

    # Construct the numpy matrix whose entries
    # are differences of log equilibrium probabilities.
    # Everything in this code block is pure numpy.
    F = numpy.log(v)
    e = numpy.ones_like(F)
    S = numpy.outer(e, F) - numpy.outer(F, e)

    # Create the rate matrix Q and return its matrix exponential.
    # Things in this code block may use algopy if mu and d
    # are bundled with truncated Taylor information.
    D = d * numpy.sign(S)
    pre_Q = numpy.vectorize(numeric_fixation)(0.5 * S, D)
    pre_Q = mu * pre_Q
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    P = algopy.expm(Q)
    return P
Exemplo n.º 7
0
def get_log_likelihood(pre_Q_prefix, pre_Q_suffix, v, subs_counts):
    """
    The stationary distribution of P is empirically derived.
    It is proportional to the codon counts by construction.
    @param pre_Q_prefix: component of hadamard decomposition of pre_Q
    @param pre_Q_suffix: component of hadamard decomposition of pre_Q
    @param v: stationary distribution proportional to observed codon counts
    @param subs_counts: observed substitution counts
    """
    Q = get_Q(pre_Q_prefix, pre_Q_suffix)
    #
    P = algopy.expm(Q)
    #
    # This untested eigh approach is way too slow because of the algopy eigh.
    """
    Da = numpy.diag(numpy.sqrt(v))
    Db = numpy.diag(numpy.reciprocal(numpy.sqrt(v)))
    Q_symmetrized = algopy.dot(Da, algopy.dot(Q, Db))
    w, V = algopy.eigh(Q_symmetrized)
    W_exp = algopy.diag(algopy.exp(w))
    P_symmetrized = algopy.dot(V, algopy.dot(W_exp, V.T))
    P = algopy.dot(Db, algopy.dot(P_symmetrized, Da))
    """
    #
    log_score_matrix = algopy.log(algopy.dot(algopy.diag(v), P))
    log_likelihood = algopy.sum(log_score_matrix * subs_counts)
    return log_likelihood
Exemplo n.º 8
0
def eval_f_unconstrained_kb(
        theta,
        subs_counts, log_counts, v,
        h,
        gtr, syn, nonsyn, compo, asym_compo,
        ):
    # unpack theta
    log_mu = theta[0]
    log_g = algopy.zeros(6, dtype=theta)
    log_g[0] = theta[1]
    log_g[1] = theta[2]
    log_g[2] = theta[3]
    log_g[3] = theta[4]
    log_g[4] = theta[5]
    log_g[5] = 0
    log_omega = theta[6]
    d = theta[7]
    log_kb = theta[8]
    log_nt_weights = algopy.zeros(4, dtype=theta)
    log_nt_weights[0] = theta[9]
    log_nt_weights[1] = theta[10]
    log_nt_weights[2] = theta[11]
    log_nt_weights[3] = 0
    #
    # construct the transition matrix
    Q = get_Q_unconstrained_kb(
            gtr, syn, nonsyn, compo, asym_compo,
            h,
            log_counts,
            log_mu, log_g, log_omega, d, log_kb, log_nt_weights)
    P = algopy.expm(Q)
    #
    # return the neg log likelihood
    return -get_log_likelihood(P, v, subs_counts)
Exemplo n.º 9
0
def eval_f(
        theta,
        #subs_counts, 
        patterns, pattern_weights,
        log_counts, v,
        h,
        ts, tv, syn, nonsyn, compo, asym_compo,
        ):
    """
    @param theta: length six unconstrained vector of free variables
    """

    # unpack theta
    log_mu = theta[0]
    log_kappa = theta[1]
    log_omega = theta[2]
    log_nt_weights = algopy.zeros(4, dtype=theta)
    log_nt_weights[0] = theta[3]
    log_nt_weights[1] = theta[4]
    log_nt_weights[2] = theta[5]
    log_nt_weights[3] = 0

    # construct the transition matrix
    Q = get_Q(
            ts, tv, syn, nonsyn, compo, asym_compo,
            h,
            log_counts,
            log_mu, log_kappa, log_omega, log_nt_weights)
    P = algopy.expm(Q)
    
    # return the neg log likelihood
    """
    log_likelihood = get_log_likelihood(P, v, subs_counts)
    #A = subs_counts
    #B = algopy.log(P.T * v)
    #log_likelihoods = slow_part(A, B)
    #return algopy.sum(log_likelihoods)
    """
    npatterns = patterns.shape[0]
    nstates = patterns.shape[1]
    ov = (0, 1)
    v_to_children = {1 : [0]}
    de_to_P = {(1, 0) : P}
    root_prior = v
    log_likelihood = alignll.fast_fels(
            ov, v_to_children, de_to_P, root_prior,
            patterns, pattern_weights,
            )
    neg_ll = -log_likelihood
    print neg_ll
    return neg_ll
Exemplo n.º 10
0
def get_conditional_log_likelihood(pre_Q_prefix, pre_Q_suffix, subs_counts):
    """
    @param pre_Q_prefix: component of hadamard decomposition of pre_Q
    @param pre_Q_suffix: component of hadamard decomposition of pre_Q
    @param subs_counts: observed substitution counts
    """
    # NOTE: this is not the usual log likelihood,
    # because it is conditional on the initial sequence,
    # and it is not assumed to be at stationarity with respect to the
    # pair of diverged sequences.
    # It is kind of a hack.
    Q = get_Q(pre_Q_prefix, pre_Q_suffix)
    P = algopy.expm(Q)
    log_likelihood = algopy.sum(algopy.log(P) * subs_counts)
    return log_likelihood
def get_conditional_log_likelihood(pre_Q_prefix, pre_Q_suffix, subs_counts):
    """
    @param pre_Q_prefix: component of hadamard decomposition of pre_Q
    @param pre_Q_suffix: component of hadamard decomposition of pre_Q
    @param subs_counts: observed substitution counts
    """
    # NOTE: this is not the usual log likelihood,
    # because it is conditional on the initial sequence,
    # and it is not assumed to be at stationarity with respect to the
    # pair of diverged sequences.
    # It is kind of a hack.
    Q = get_Q(pre_Q_prefix, pre_Q_suffix)
    P = algopy.expm(Q)
    log_likelihood = algopy.sum(algopy.log(P) * subs_counts)
    return log_likelihood
Exemplo n.º 12
0
    def get_neg_ll(cls,
            patterns, pattern_weights,
            stationary_distn,
            ts, tv, syn, nonsyn,
            theta,
            ):
        """
        This model has only a single omega parameter.
        @param theta: vector of free variables with sensitivities
        """

        # unpack theta
        log_mus = theta[0:3]
        log_kappa = theta[3]
        log_omega = theta[4]

        # construct the transition matrices
        transition_matrices = []
        for i in range(3):
            mu = algopy.exp(log_mus[i])
            kappa = algopy.exp(log_kappa)
            omega = algopy.exp(log_omega)
            pre_Q = codon1994.get_pre_Q(
                    ts, tv, syn, nonsyn,
                    stationary_distn,
                    kappa, omega)
            Q = markovutil.pre_Q_to_Q(pre_Q, stationary_distn, mu)
            P = algopy.expm(Q)
            transition_matrices.append(P)

        # return the neg log likelihood
        ov = range(4)
        v_to_children = {3 : [0, 1, 2]}
        de_to_P = {
                (3, 0) : transition_matrices[0],
                (3, 1) : transition_matrices[1],
                (3, 2) : transition_matrices[2],
                }
        root_prior = stationary_distn
        log_likelihood = alignll.fast_fels(
        #log_likelihood = alignll.fels(
                ov, v_to_children, de_to_P, root_prior,
                patterns, pattern_weights,
                )
        neg_ll = -log_likelihood
        print neg_ll
        return neg_ll
Exemplo n.º 13
0
def get_branch_ll(subs_counts, pre_Q, distn, branch_length):
    """
    This log likelihood calculation function is compatible with algopy.
    @param subs_counts: substitution counts
    @param pre_Q: rates with arbitrary scaling and arbitrary diagonals
    @param distn: initial distribution
    @param branch_length: expected number of changes
    @return: log likelihood
    """
    Q = pre_Q_to_Q(pre_Q, distn, branch_length)
    P = algopy.expm(Q)

    # Scale the rows of the transition matrix by the initial distribution.
    # This scaled matrix will be symmetric if the process is reversible.
    P_scaled = (P.T * distn).T

    # Use the transition matrix and the substitution counts
    # to compute the log likelihood.
    return algopy.sum(algopy.log(P_scaled) * subs_counts)
Exemplo n.º 14
0
def neg_log_likelihood(
        ov, v_to_children, root_prior, 
        patterns, pat_mults,
        des,
        log_blens,
        ):
    blens = algopy.exp(log_blens)
    Q = get_jc_rate_matrix()
    de_to_P = dict((de, algopy.expm(b*Q)) for de, b in zip(des, blens))
    log_likelihood = alignll.fels(
            ov, v_to_children, de_to_P, root_prior,
            patterns, pat_mults,
            )
    neg_ll = -log_likelihood
    #print 'branch lengths:'
    #print blens
    #print 'neg log likelihood:'
    #print neg_ll
    #print
    return neg_ll
Exemplo n.º 15
0
def get_branch_mix(probs, pre_Qs, eq_distns, branch_length):
    """
    This log likelihood calculation function is compatible with algopy.
    Note that the word 'mix' in the function name
    does not refer to a mix of branch lengths,
    but rather to a mixture of unscaled parameterized rate matrices.
    @param probs: discrete distribution of mixture probabilities
    @param pre_Qs: rates with arbitrary scaling and arbitrary diagonals
    @param eq_distns: equilibrium distributions
    @param branch_length: expected number of changes
    @return: transition matrices
    """

    # Subtract diagonals to give the unscaled rate matrices.
    # Also compute the expected rates of the unscaled rate matrices.
    # Use an unnecessarily explicit-looking calculation,
    # because the entries inside the probs list
    # and the entries inside the observed expected rates list
    # each have taylor information,
    # but the lists themselves are not taylor-aware.
    # The code could be re-orgainized later so that we are using
    # more explicitly taylor-aware lists.
    unscaled_Qs = []
    r = 0
    for p, pre_Q, eq_distn in zip(probs, pre_Qs, eq_distns):
        unscaled_Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
        unscaled_Qs.append(unscaled_Q)
        observed_r = -algopy.dot(algopy.diag(unscaled_Q), eq_distn)
        r = r + p * observed_r

    # Compute the correctly scaled rate matrices
    # so that the expected rate of the mixture is equal
    # to the branch length that has been passed as an argument
    # to this function.
    Qs = []
    for unscaled_Q in unscaled_Qs:
        Q = (branch_length / r) * unscaled_Q
        Qs.append(Q)

    # Return the appropriately time-scaled transition matrices.
    return [algopy.expm(Q) for Q in Qs]
Exemplo n.º 16
0
def eval_f(
        theta,
        subs_counts, log_counts, v,
        h,
        ts, tv, syn, nonsyn, compo, asym_compo,
        ):
    """
    The function formerly known as minimize-me.
    @param theta: length six unconstrained vector of free variables
    """
    #
    # construct the rate matrix and the transition matrix
    Q = get_Q_slsqp(
            ts, tv, syn, nonsyn, compo, asym_compo,
            h,
            log_counts, v,
            theta)
    P = algopy.expm(Q)
    #
    # return the neg log likelihood
    neg_log_likelihood = -get_log_likelihood(P, v, subs_counts)
    return neg_log_likelihood
Exemplo n.º 17
0
def create_transition_matrix_explicit(Y, v):
    """
    Use hypergeometric functions.
    Note that d = 2*h - 1 following Kimura 1957.
    The rate mu is a catch-all scaling factor.
    The finite distribution v is assumed to be a stochastic vector.
    @param Y: vector of parameters to optimize
    @param v: numpy array defining a distribution over states
    @return: transition matrix
    """

    n = len(v)
    mu, d = transform_params(Y)

    # Construct the numpy matrix whose entries
    # are differences of log equilibrium probabilities.
    # Everything in this code block is pure numpy.
    F = numpy.log(v)
    e = numpy.ones_like(F)
    S = numpy.outer(e, F) - numpy.outer(F, e)

    # Create the rate matrix Q and return its matrix exponential.
    # Things in this code block may use algopy if mu and d
    # are bundled with truncated Taylor information.
    D = d * numpy.sign(S)

    #FIXME: I would like to further vectorize this block,
    # and also it may currently give subtly wrong results
    # because denom_piecewise may not vectorize correctly.
    pre_Q = algopy.zeros((n, n), dtype=Y)
    for i in range(n):
        for j in range(n):
            pre_Q[i, j] = 1. / denom_piecewise(0.5 * S[i, j], D[i, j])

    pre_Q = mu * pre_Q
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    P = algopy.expm(Q)
    return P
Exemplo n.º 18
0
def eval_f_unconstrained_kb(
    theta,
    subs_counts,
    log_counts,
    v,
    h,
    gtr,
    syn,
    nonsyn,
    compo,
    asym_compo,
):
    # unpack theta
    log_mu = theta[0]
    log_g = algopy.zeros(6, dtype=theta)
    log_g[0] = theta[1]
    log_g[1] = theta[2]
    log_g[2] = theta[3]
    log_g[3] = theta[4]
    log_g[4] = theta[5]
    log_g[5] = 0
    log_omega = theta[6]
    d = theta[7]
    log_kb = theta[8]
    log_nt_weights = algopy.zeros(4, dtype=theta)
    log_nt_weights[0] = theta[9]
    log_nt_weights[1] = theta[10]
    log_nt_weights[2] = theta[11]
    log_nt_weights[3] = 0
    #
    # construct the transition matrix
    Q = get_Q_unconstrained_kb(gtr, syn, nonsyn, compo, asym_compo, h,
                               log_counts, log_mu, log_g, log_omega, d, log_kb,
                               log_nt_weights)
    P = algopy.expm(Q)
    #
    # return the neg log likelihood
    return -get_log_likelihood(P, v, subs_counts)
Exemplo n.º 19
0
def create_transition_matrix_explicit(Y, v):
    """
    Use hypergeometric functions.
    Note that d = 2*h - 1 following Kimura 1957.
    The rate mu is a catch-all scaling factor.
    The finite distribution v is assumed to be a stochastic vector.
    @param Y: vector of parameters to optimize
    @param v: numpy array defining a distribution over states
    @return: transition matrix
    """

    n = len(v)
    mu, d = transform_params(Y)

    # Construct the numpy matrix whose entries
    # are differences of log equilibrium probabilities.
    # Everything in this code block is pure numpy.
    F = numpy.log(v)
    e = numpy.ones_like(F)
    S = numpy.outer(e, F) - numpy.outer(F, e)

    # Create the rate matrix Q and return its matrix exponential.
    # Things in this code block may use algopy if mu and d
    # are bundled with truncated Taylor information.
    D = d * numpy.sign(S)

    #FIXME: I would like to further vectorize this block,
    # and also it may currently give subtly wrong results
    # because denom_piecewise may not vectorize correctly.
    pre_Q = algopy.zeros((n,n), dtype=Y)
    for i in range(n):
        for j in range(n):
            pre_Q[i, j] = 1. / denom_piecewise(0.5*S[i, j], D[i, j])

    pre_Q = mu * pre_Q
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    P = algopy.expm(Q)
    return P
Exemplo n.º 20
0
def eval_f(Y):
    """ some reformulations to make eval_f_orig
        compatible with algopy

        missing: support for scipy.linalg.expm

        i.e., this function can't be differentiated with algopy

    """

    a, b, v = transform_params(Y)

    Q = algopy.zeros((4,4), dtype=Y)
    Q[0,0] = 0;    Q[0,1] = a;    Q[0,2] = b;    Q[0,3] = b;
    Q[1,0] = a;    Q[1,1] = 0;    Q[1,2] = b;    Q[1,3] = b;
    Q[2,0] = b;    Q[2,1] = b;    Q[2,2] = 0;    Q[2,3] = a;
    Q[3,0] = b;    Q[3,1] = b;    Q[3,2] = a;    Q[3,3] = 0;

    Q = Q * v
    Q -= algopy.diag(algopy.sum(Q, axis=1))
    P = algopy.expm(Q)
    S = algopy.log(algopy.dot(algopy.diag(v), P))
    return -algopy.sum(S * g_data)
Exemplo n.º 21
0
def eval_f(
        theta,
        subs_counts, log_counts, v,
        h,
        gtr, syn, nonsyn, compo, asym_compo,
        ):
    """
    The function formerly known as minimize-me.
    @param theta: length six unconstrained vector of free variables
    """
    # unpack theta
    log_mu = theta[0]
    log_g = algopy.zeros(6, dtype=theta)
    log_g[0] = theta[1]
    log_g[1] = theta[2]
    log_g[2] = theta[3]
    log_g[3] = theta[4]
    log_g[4] = theta[5]
    log_g[5] = 0
    log_omega = theta[6]
    log_nt_weights = algopy.zeros(4, dtype=theta)
    log_nt_weights[0] = theta[7]
    log_nt_weights[1] = theta[8]
    log_nt_weights[2] = theta[9]
    log_nt_weights[3] = 0
    #
    # construct the transition matrix
    Q = get_Q(
            gtr, syn, nonsyn, compo, asym_compo,
            h,
            log_counts,
            log_mu, log_g, log_omega, log_nt_weights)
    P = algopy.expm(Q)
    #
    # return the neg log likelihood
    return -get_log_likelihood(P, v, subs_counts)