コード例 #1
0
 def natural_to_encoded(cls, natural_theta):
     """
     The first parameter is a proportion.
     The fourth parameter is unconstrained.
     """
     encoded = algopy.zeros_like(natural_theta)
     encoded[0] = algopy.special.logit(natural_theta[0])
     encoded[1] = algopy.log(natural_theta[1])
     encoded[2] = algopy.log(natural_theta[2])
     encoded[3] = natural_theta[3]
     encoded[4:] = algopy.log(natural_theta[4:])
     return encoded
コード例 #2
0
 def natural_to_encoded(cls, natural_theta):
     """
     The second parameter is a proportion.
     """
     encoded[0] = algopy.logit(natural_theta[0])
     encoded[1:] = algopy.log(natural_theta[1:])
     return encoded
コード例 #3
0
def get_two_taxon_neg_ll(
        model,
        subs_counts,
        log_counts, codon_distn,
        ts, tv, syn, nonsyn, compo, asym_compo,
        theta,
        ):
    """
    Get the negative log likelihood.
    This function does not use the logarithms.
    It is mostly for computing the hessian;
    otherwise the version with the logarithms would probably be better.
    The first param group is the model implementation.
    The second param group is the data.
    The third param group consists of data summaries.
    The fourth param group consists of design matrices related to genetic code.
    The fifth param group consist of free parameters of the model.
    """
    branch_length = theta[0]
    model_theta = theta[1:]
    model_log_theta = algopy.log(model_theta)
    distn = model.get_distn(
            log_counts, codon_distn,
            ts, tv, syn, nonsyn, compo, asym_compo,
            model_log_theta,
            )
    pre_Q = model.get_pre_Q(
            log_counts, codon_distn,
            ts, tv, syn, nonsyn, compo, asym_compo,
            model_log_theta,
            )
    neg_ll = -markovutil.get_branch_ll(
            subs_counts, pre_Q, distn, branch_length)
    return neg_ll
コード例 #4
0
ファイル: matrixexponential.py プロジェクト: shoyer/algopy
def eval_f(Y):
    """ some reformulations to make eval_f_orig
        compatible with algopy

        missing: support for scipy.linalg.expm

        i.e., this function can't be differentiated with algopy

    """

    a, b, v = transform_params(Y)

    Q = algopy.zeros((4, 4), dtype=Y)
    Q[0, 0] = 0
    Q[0, 1] = a
    Q[0, 2] = b
    Q[0, 3] = b
    Q[1, 0] = a
    Q[1, 1] = 0
    Q[1, 2] = b
    Q[1, 3] = b
    Q[2, 0] = b
    Q[2, 1] = b
    Q[2, 2] = 0
    Q[2, 3] = a
    Q[3, 0] = b
    Q[3, 1] = b
    Q[3, 2] = a
    Q[3, 3] = 0

    Q = Q * v
    Q -= algopy.diag(algopy.sum(Q, axis=1))
    P = algopy.expm(Q)
    S = algopy.log(algopy.dot(algopy.diag(v), P))
    return -algopy.sum(S * g_data)
コード例 #5
0
def eval_f_eigh(Y):
    """ some reformulations to make eval_f_orig
        compatible with algopy

        replaced scipy.linalg.expm by a symmetric eigenvalue decomposition

        this function **can** be differentiated with algopy

    """
    a, b, v = transform_params(Y)

    Q = algopy.zeros((4,4), dtype=Y)
    Q[0,0] = 0;    Q[0,1] = a;    Q[0,2] = b;    Q[0,3] = b;
    Q[1,0] = a;    Q[1,1] = 0;    Q[1,2] = b;    Q[1,3] = b;
    Q[2,0] = b;    Q[2,1] = b;    Q[2,2] = 0;    Q[2,3] = a;
    Q[3,0] = b;    Q[3,1] = b;    Q[3,2] = a;    Q[3,3] = 0;

    Q = algopy.dot(Q, algopy.diag(v))
    Q -= algopy.diag(algopy.sum(Q, axis=1))
    va = algopy.diag(algopy.sqrt(v))
    vb = algopy.diag(1./algopy.sqrt(v))
    W, U = algopy.eigh(algopy.dot(algopy.dot(va, Q), vb))
    M = algopy.dot(U, algopy.dot(algopy.diag(algopy.exp(W)), U.T))
    P = algopy.dot(vb, algopy.dot(M, va))
    S = algopy.log(algopy.dot(algopy.diag(v), P))
    return -algopy.sum(S * g_data)
コード例 #6
0
ファイル: markovutil.py プロジェクト: argriffing/slowedml
def get_branch_mix_ll(subs_counts, probs, pre_Qs, distn, branch_length):
    """
    This log likelihood calculation function is compatible with algopy.
    Note that the word 'mix' in the function name
    does not refer to a mix of branch lengths,
    but rather to a mixture of unscaled parameterized rate matrices.
    @param subs_counts: substitution counts
    @param probs: discrete distribution of mixture probabilities
    @param pre_Qs: rates with arbitrary scaling and arbitrary diagonals
    @param distn: initial distribution common to both component processes
    @param branch_length: expected number of changes
    @return: log likelihood
    """

    # Get the appropriately time-scaled transition matrices.
    Ps = get_branch_mix(probs, pre_Qs, [distn, distn], branch_length)

    # The probability associated with each count is
    # a convex combination of the probabilities computed with site classes.
    P_mix = probs[0] * Ps[0] + probs[1] * Ps[1]

    # Scale the rows of the transition matrix by the initial distribution.
    # This scaled matrix will be symmetric if the process is reversible.
    P_mix_scaled = (P_mix.T * distn).T

    # Use the probability transition matrix and the substitution counts
    # to compute the log likelihood.
    return algopy.sum(algopy.log(P_mix_scaled) * subs_counts)
コード例 #7
0
ファイル: p53em.py プロジェクト: argriffing/nxblink
def unpack_params(log_params):
    """
    Unpack the parameters.

    This function also enforces the simplex constraint
    on the mutational nucleotide distribution,
    and it computes a penalty corresponding to violation of this constraint.

    """
    # undo the log transformation of the parameters
    params = exp(log_params)

    # unpack the parameters
    kappa, omega = params[0:2]
    A, C, G, T = params[2:6]
    blink_on, blink_off = params[6:8]
    edge_rates = params[8:]

    # normalize the nucleotide probability distribution and compute a penalty
    nt_prob_sum = A + C + G + T
    A = A / nt_prob_sum
    C = C / nt_prob_sum
    G = G / nt_prob_sum
    T = T / nt_prob_sum
    penalty = square(log(nt_prob_sum))

    # return unpacked parameters and the penalty
    unpacked = (kappa, omega, A, C, G, T, blink_on, blink_off, edge_rates)
    return unpacked, penalty
コード例 #8
0
def get_log_likelihood(pre_Q_prefix, pre_Q_suffix, v, subs_counts):
    """
    The stationary distribution of P is empirically derived.
    It is proportional to the codon counts by construction.
    @param pre_Q_prefix: component of hadamard decomposition of pre_Q
    @param pre_Q_suffix: component of hadamard decomposition of pre_Q
    @param v: stationary distribution proportional to observed codon counts
    @param subs_counts: observed substitution counts
    """
    Q = get_Q(pre_Q_prefix, pre_Q_suffix)
    #
    P = algopy.expm(Q)
    #
    # This untested eigh approach is way too slow because of the algopy eigh.
    """
    Da = numpy.diag(numpy.sqrt(v))
    Db = numpy.diag(numpy.reciprocal(numpy.sqrt(v)))
    Q_symmetrized = algopy.dot(Da, algopy.dot(Q, Db))
    w, V = algopy.eigh(Q_symmetrized)
    W_exp = algopy.diag(algopy.exp(w))
    P_symmetrized = algopy.dot(V, algopy.dot(W_exp, V.T))
    P = algopy.dot(Db, algopy.dot(P_symmetrized, Da))
    """
    #
    log_score_matrix = algopy.log(algopy.dot(algopy.diag(v), P))
    log_likelihood = algopy.sum(log_score_matrix * subs_counts)
    return log_likelihood
コード例 #9
0
ファイル: numericml.py プロジェクト: argriffing/em-dec-2013
def infer_parameter_values(p_guess, mu_guess, data, mask):
    k = p_guess.shape[1]

    # Pack the guess.
    packed_guess = np.concatenate((log(p_guess.flatten()), logit(mu_guess)))

    # Define the objective function, some of its derivatives, and a guess.
    f = partial(penalized_packed_neg_ll, k, data, mask)
    g = partial(eval_grad, f)
    h = partial(eval_hess, f)
    #hessp = partial(eval_hessp, f)

    #cg = algopy.CGraph()
    #x = algopy.Function(list(range(1, len(packed_guess)+1)))
    #y = f(x)
    #cg.trace_off()
    #cg.independentFunctionList = [x]
    #cg.dependentFunctionList = [y]
    #hessp = cg.hess_vec

    # Search for the maximum likelihood parameter values.
    # The direct hessp evaluation turns out to be slower, for some reason,
    # than directly calculating the hessian and then multiplying.
    res = scipy.optimize.minimize(
            f, packed_guess, method='trust-ncg', jac=g,
            hess=h,
            #hessp=hessp,
            )
    xopt = res.x

    # unpack the optimal parameters
    p_opt, mu_opt, penalty_opt = unpack_params(xopt, k)

    return p_opt, mu_opt, h(xopt)
コード例 #10
0
ファイル: sitell.py プロジェクト: argriffing/slowedml
def fels(ov, v_to_children, pattern, de_to_P, root_prior):
    """
    The P matrices and the root prior may be algopy objects.
    @param ov: ordered vertices with child vertices before parent vertices
    @param v_to_children: map from a vertex to a sequence of child vertices
    @param pattern: an array that maps vertex to state, or to -1 if internal
    @param de_to_P: map from a directed edge to a transition matrix
    @param root_prior: equilibrium distribution at the root
    @return: log likelihood
    """
    nvertices = len(ov)
    nstates = len(root_prior)
    states = range(nstates)
    root = ov[-1]

    # Initialize the map from vertices to subtree likelihoods.
    likelihoods = algopy.ones(
            (nvertices, nstates),
            dtype=de_to_P.values()[0],
            )

    # Compute the subtree likelihoods using dynamic programming.
    for v in ov:
        for pstate in range(nstates):
            for c in v_to_children.get(v, []):
                P = de_to_P[v, c]
                likelihoods[v, pstate] *= algopy.dot(P[pstate], likelihoods[c])
        state = pattern[v]
        if state >= 0:
            for s in range(nstates):
                if s != state:
                    likelihoods[v, s] = 0

    # Get the log likelihood by summing over equilibrium states at the root.
    return algopy.log(algopy.dot(root_prior, likelihoods[root]))
コード例 #11
0
def eval_f(Y):
    """ some reformulations to make eval_f_orig
        compatible with algopy

        missing: support for scipy.linalg.expm

        i.e., this function can't be differentiated with algopy

    """

    a, b, v = transform_params(Y)

    Q = algopy.zeros((4,4), dtype=Y)
    Q[0,0] = 0;    Q[0,1] = a;    Q[0,2] = b;    Q[0,3] = b;
    Q[1,0] = a;    Q[1,1] = 0;    Q[1,2] = b;    Q[1,3] = b;
    Q[2,0] = b;    Q[2,1] = b;    Q[2,2] = 0;    Q[2,3] = a;
    Q[3,0] = b;    Q[3,1] = b;    Q[3,2] = a;    Q[3,3] = 0;

    Q = Q * v
    Q -= algopy.diag(algopy.sum(Q, axis=1))
    #P = linalg.expm(Q)
    # XXX can I get rid of the 4 on the following line?
    P = algopy_expm(Q, 4)
    S = algopy.log(algopy.dot(algopy.diag(v), P))
    return -algopy.sum(S * g_data)
コード例 #12
0
ファイル: markov_algo.py プロジェクト: phubaba/stats
def loglikilihood(r, params1, params2, ):

    running_sum = 0
    p_1_tm1 = .5
    p_2_tm1 = .5
    g_1_tm1 = 1
    g_2_tm1 = 1
    h_1_tm1 = .25 ** 2
    h_2_tm1 = .25 ** 2
    p_1_tm1_agg_1 = .5
    p_1_tm1_agg_2 = .5

    pm1 = params1
    pm2 = params2
    p_1, p_2, h_1, h_2 = [], [], [], []
    for t in xrange(1, len(r)):
        r_t = r[t]
        P_t = get_P_t(pm1.d, pm1.e, r_t)
        Q_t = get_Q_t(pm2.d, pm2.e, r_t)
        p_1_t = get_p_1_t(P_t, Q_t, g_1_tm1, p_1_tm1, g_2_tm1)
        p_2_t = get_p_2_t(P_t, Q_t, g_1_tm1, p_1_tm1, g_2_tm1)
        p_1_tm1_agg_1 = get_p_1_tm1_agg_1(pm1.d, pm1.e, pm1.lamb, pm1.gamma, h_1_tm1, p_1_tm1, pm2.d, pm2.e, pm2.lamb, pm2.gamma, h_2_tm1, p_2_tm1)
        p_1_tm1_agg_2 = get_p_1_tm1_agg_2(pm1.d, pm1.e, pm1.lamb, pm1.gamma, h_1_tm1, p_1_tm1, pm2.d, pm2.e, pm2.lamb, pm2.gamma, h_2_tm1, p_2_tm1)
        h_tm1_agg_1 = get_h_tm1_agg_i(p_1_tm1_agg_1, h_1_tm1, h_2_tm1, pm1.lamb, pm1.gamma, pm2.lamb, pm2.gamma)
        h_tm1_agg_2 = get_h_tm1_agg_i(p_1_tm1_agg_2, h_1_tm1, h_2_tm1, pm1.lamb, pm1.gamma, pm2.lamb, pm2.gamma)
        delta_t_agg_1 = get_delta_t_agg_i(p_1_tm1_agg_1, r_t, pm1.lamb, pm1.gamma, pm2.lamb, pm2.gamma, h_1_tm1, h_2_tm1)
        delta_t_agg_2 = get_delta_t_agg_i(p_1_tm1_agg_2, r_t, pm1.lamb, pm1.gamma, pm2.lamb, pm2.gamma, h_1_tm1, h_2_tm1)
        h_1_t = get_h_i_t(pm1.omega, pm1.alpha, pm1.beta, pm1.b, pm1.c, pm1.mu, pm1.v, h_tm1_agg_1, delta_t_agg_1)
        h_2_t = get_h_i_t(pm2.omega, pm2.alpha, pm2.beta, pm2.b, pm2.c, pm2.mu, pm2.v, h_tm1_agg_2, delta_t_agg_2)

        f_1_t = get_f(r_t, h_1_tm1, pm1.lamb, pm1.gamma)
        f_2_t = get_f(r_t, h_2_tm1, pm2.lamb, pm2.gamma)
        running_sum += algopy.log(f_1_t)
        running_sum += algopy.log(f_2_t)

        p_1_tm1 = p_1_t
        p_2_tm1 = p_2_t
        g_1_tm1 = f_1_t
        g_2_tm1 = f_2_t
        h_1_tm1 = h_1_t
        h_2_tm1 = h_2_t
        p_1.append(p_1_t)
        p_2.append(p_2_t)
        h_1.append(h_1_t)
        h_2.append(h_2_t)

    return running_sum, (p_1, p_2, h_1, h_2)
コード例 #13
0
def demo_small_tree():
    nvertices = 3
    nleaves = 2
    nedges = 2
    v = np.exp(np.random.randn(2))
    v1, v2 = v.tolist()

    # define the shape of the tree
    B = np.array([
        [1, 0, -1],
        [0, 1, -1],
        ], dtype=float)

    # construct the centered covariance matrix using matrix algebra
    L = centered_tree_covariance(B, nleaves, v)

    # construct the centered covariance matrix using direct methods
    C = np.array([
        [v1, 0],
        [0, v2],
        ], dtype=float)
    C = doubly_centered(C)
    assert_allclose(L, C)

    # sample centered data
    vsqrt = np.sqrt(v)
    xs = []
    nsamples = 1000
    for i in range(nsamples):
        x = np.zeros(nleaves)
        x[0] = np.random.normal(0, vsqrt[0])
        x[1] = np.random.normal(0, vsqrt[1])
        x -= x.mean()
        xs.append(x)

    # check the log likelihood using matrix algebra
    print('average log likelihoods using matrix algebra')
    ll_average_matrix = log_likelihoods(L, xs).mean()
    print(ll_average_matrix)
    print()

    # check the log likelihood using felsenstein pruning
    print('average log likelihoods using felsenstein pruning')
    lls = []
    for x in xs:
        ll = scipy.stats.norm.logpdf(x[1] - x[0], loc=0, scale=sqrt(v1 + v2))
        pruning_adjustment = 0.5 * log(nleaves)
        lls.append(pruning_adjustment + ll)
    ll_average_pruning = np.mean(lls)
    print(ll_average_pruning)
    print()

    d = ll_average_pruning - ll_average_matrix
    print('difference of log likelihoods:')
    print(d)
    print()
    print('exp of difference of log likelihoods:')
    print(exp(d))
    print()
コード例 #14
0
 def natural_to_encoded(cls, natural_theta):
     """
     The first parameter is a proportion.
     """
     encoded = algopy.zeros_like(natural_theta)
     encoded[0] = algopy.special.logit(natural_theta[0])
     encoded[1:] = algopy.log(natural_theta[1:])
     return encoded
コード例 #15
0
ファイル: codon_model.py プロジェクト: eteq/algopy
def get_log_likelihood(P, v, subs_counts):
    """
    The stationary distribution of P is empirically derived.
    It is proportional to the codon counts by construction.
    @param P: a transition matrix using codon counts and free parameters
    @param v: stationary distribution proportional to observed codon counts
    @param subs_counts: observed substitution counts
    """
    return algopy.sum(subs_counts * algopy.log(P.T * v))
コード例 #16
0
def get_log_likelihood(P, v, subs_counts):
    """
    The stationary distribution of P is empirically derived.
    It is proportional to the codon counts by construction.
    @param P: a transition matrix using codon counts and free parameters
    @param v: stationary distribution proportional to observed codon counts
    @param subs_counts: observed substitution counts
    """
    return algopy.sum(algopy.log(P.T * v) * subs_counts)
コード例 #17
0
ファイル: em.py プロジェクト: argriffing/nxblink
def get_ll_root(summary, distn, blink_on, blink_off):
    """

    Parameters
    ----------
    summary : Summary object
        Summary of blinking process trajectories.
    distn : dense possibly exotic array
        Primary state distribution.
    blink_on : float, or exotic float-like with derivatives information
        blink rate on
    blink_off : float, or exotic float-like with derivatives information
        blink rate off

    Returns
    -------
    ll : float, or exotic float-like with derivatives information
        log likelihood contribution from root state

    """
    # construct the blink distribution with the right data type
    blink_distn = algopy.zeros(2, dtype=distn)
    blink_distn[0] = blink_off / (blink_on + blink_off)
    blink_distn[1] = blink_on / (blink_on + blink_off)

    # initialize expected log likelihood using the right data type
    ll = algopy.zeros(1, dtype=distn)[0]

    # root primary state contribution to expected log likelihood
    obs = algopy.zeros_like(distn)
    for state, count in summary.root_pri_to_count.items():
        if count:
            ll = ll + count * log(distn[state])

    # root blink state contribution to expected log likelihood
    if summary.root_off_count:
        ll = ll + summary.root_off_count * log(blink_distn[0])
    if summary.root_xon_count:
        ll = ll + summary.root_xon_count * log(blink_distn[1])

    # return expected log likelihood contribution of root
    return ll / summary.nsamples
コード例 #18
0
ファイル: p53em.py プロジェクト: argriffing/nxblink
def pack_params(kappa, omega, A, C, G, T, blink_on, blink_off, edge_rates):
    """
    This function is mainly for constructing initial parameter values.

    Returns log params suitable as an initial vector
    for scipy.optimize.minimize methods.

    """
    global_params = [kappa, omega, A, C, G, T, blink_on, blink_off]
    params = np.array(list(global_params) + list(edge_rates))
    log_params = log(params)
    return log_params
コード例 #19
0
ファイル: codon1994.py プロジェクト: argriffing/slowedml
def get_f1x4_codon_distn(compo, nt_distn):
    """
    The f1x4 notation is from e.g. Table (1) of Yang and Nielsen 1998.
    @param compo: a (ncodons, 4) design matrix defining codon compositions
    @param nt_distn: empirical or free nucleotide distribution
    @return: codon distribution
    """
    log_nt_distn = algopy.log(nt_distn)
    M = log_nt_distn * compo
    log_codon_distn = algopy.sum(M, axis=-1)
    codon_kernel = algopy.exp(log_codon_distn)
    codon_distn = codon_kernel / algopy.sum(codon_kernel)
    return codon_distn
コード例 #20
0
ファイル: model2s.py プロジェクト: argriffing/ctmczoo
def objective(distn, dwell, trans, log_params):
    """
    Get the expected negative log likelihood.

    This is a helper function for the EM.

    """
    params = algopy.exp(log_params)
    ll_distn = algopy.dot(distn, algopy.log(get_distn(params)))
    ll_dwell = -algopy.dot(get_rates_out(params), dwell)
    ll_trans_01 = trans[0, 1] * log_params[0]
    ll_trans_10 = trans[1, 0] * log_params[1]
    ll = ll_distn + ll_dwell + ll_trans_01 + ll_trans_10
    return -ll
コード例 #21
0
 def get_pre_Qs(cls,
         em_probs, em_distns,
         ts, tv, syn, nonsyn, compo, asym_compo,
         natural_theta):
     f1x4mg_natural_theta = natural_theta[1:]
     pre_Q_0 = codon1994models.F1x4MG.get_pre_Q(
         None, None,
         ts, tv, syn, nonsyn, compo, asym_compo,
         f1x4mg_natural_theta)
     pre_Q_1 = yn2008models.FMutSelG_F.get_pre_Q(
         algopy.log(em_distns[1]), em_distns[1],
         ts, tv, syn, nonsyn, compo, asym_compo,
         natural_theta)
     return pre_Q_0, pre_Q_1
コード例 #22
0
ファイル: preferred_nucleotide.py プロジェクト: shoyer/algopy
def eval_f_explicit(subs_counts, v, Y):
    """
    Note that Y is last for compatibility with functools.partial.
    It is convenient for usage with numdifftools, although this parameter
    ordering is the opposite of the convention of scipy.optimize.
    @return: negative log likelihood
    @param Y: parameters to jointly estimate
    @param subs_counts: observed data
    @param v: fixed equilibrium probabilities for states
    """
    P = create_transition_matrix_explicit(Y, v)
    vdiag = algopy.diag(v)
    J = algopy.dot(vdiag, P)
    S = algopy.log(J)
    return -algopy.sum(S * subs_counts)
コード例 #23
0
def get_conditional_log_likelihood(pre_Q_prefix, pre_Q_suffix, subs_counts):
    """
    @param pre_Q_prefix: component of hadamard decomposition of pre_Q
    @param pre_Q_suffix: component of hadamard decomposition of pre_Q
    @param subs_counts: observed substitution counts
    """
    # NOTE: this is not the usual log likelihood,
    # because it is conditional on the initial sequence,
    # and it is not assumed to be at stationarity with respect to the
    # pair of diverged sequences.
    # It is kind of a hack.
    Q = get_Q(pre_Q_prefix, pre_Q_suffix)
    P = algopy.expm(Q)
    log_likelihood = algopy.sum(algopy.log(P) * subs_counts)
    return log_likelihood
コード例 #24
0
def get_conditional_log_likelihood(pre_Q_prefix, pre_Q_suffix, subs_counts):
    """
    @param pre_Q_prefix: component of hadamard decomposition of pre_Q
    @param pre_Q_suffix: component of hadamard decomposition of pre_Q
    @param subs_counts: observed substitution counts
    """
    # NOTE: this is not the usual log likelihood,
    # because it is conditional on the initial sequence,
    # and it is not assumed to be at stationarity with respect to the
    # pair of diverged sequences.
    # It is kind of a hack.
    Q = get_Q(pre_Q_prefix, pre_Q_suffix)
    P = algopy.expm(Q)
    log_likelihood = algopy.sum(algopy.log(P) * subs_counts)
    return log_likelihood
コード例 #25
0
ファイル: codon1994.py プロジェクト: argriffing/slowedml
def get_f3x4_codon_distn(full_compo, nt_distns):
    """
    The f3x4 notation is from e.g. Table (1) of Yang and Nielsen 1998.
    Although algopy implements most of the functions of numpy,
    it seems to not have an implementation of the tensordot function.
    @param full_compo: a (ncodons, 3, 4) binary matrix of codon compositions
    @param nt_distns: empirical or free nucleotide distributions
    @return: codon distribution
    """
    log_nt_distns = algopy.log(nt_distns)
    M = log_nt_distns * full_compo
    log_codon_distn = algopy.sum(algopy.sum(M, axis=-1), axis=-1)
    codon_kernel = algopy.exp(log_codon_distn)
    codon_distn = codon_kernel / algopy.sum(codon_kernel)
    return codon_distn
コード例 #26
0
def eval_f_explicit(subs_counts, v, Y):
    """
    Note that Y is last for compatibility with functools.partial.
    It is convenient for usage with numdifftools, although this parameter
    ordering is the opposite of the convention of scipy.optimize.
    @return: negative log likelihood
    @param Y: parameters to jointly estimate
    @param subs_counts: observed data
    @param v: fixed equilibrium probabilities for states
    """
    P = create_transition_matrix_explicit(Y, v)
    vdiag = algopy.diag(v)
    J = algopy.dot(vdiag, P)
    S = algopy.log(J)
    return -algopy.sum(S * subs_counts)
コード例 #27
0
def clever_cross_entropy_trees(B, nleaves, va, vb):
    """
    Try being a little more clever.

    @param B: augmented incidence matrix
    @param nleaves: number of leaves
    @param va: augmented reference point edge variances
    @param vb: augmented test point edge variances
    """

    # deduce some quantities assuming an unrooted bifurcating tree
    ninternal = nleaves - 2
    nvertices = nleaves + ninternal
    nedges = nvertices - 1

    # define an index for taking schur complements
    n = nvertices
    k = nleaves + 1

    # Construct the full Laplacian matrix plus J/n.
    # Take a block of the diagonal, corresponding to the inverse
    # of a schur complement.
    Wa = diag(reciprocal(va))
    La_plus = dot(B.T, dot(Wa, B))
    print(La_plus)
    print(scipy.linalg.eigh(La_plus))
    Laa = La_plus[:k, :k]
    Lab = La_plus[:k, k:]
    Lba = La_plus[k:, :k]
    Lbb = La_plus[k:, k:]
    L_schur_plus = Laa - dot(Lab, dot(inv(Lbb), Lba))
    assert_allclose(inv(L_schur_plus), inv(La_plus)[:k, :k])
    A = inv(La_plus)[:k, :k]
    print(scipy.linalg.eigh(A))

    # Construct the Schur complement of the test point matrix.
    Wb = diag(reciprocal(vb))
    L_plus = dot(B.T, dot(Wb, B))
    Laa = L_plus[:k, :k]
    Lab = L_plus[:k, k:]
    Lba = L_plus[k:, :k]
    Lbb = L_plus[k:, k:]
    L_schur_plus = Laa - dot(Lab, dot(inv(Lbb), Lba))
    B_inv = L_schur_plus
    #return 0.5 * ((n-1) * LOG2PI + trace(dot(B_inv, A)) - log(det(B_inv)))
    return 0.5 * (n * LOG2PI + trace(dot(B_inv, A) - 1) - log(det(B_inv)))
コード例 #28
0
def custom_pruning(v, x):
    """
    Do Felsenstein REML pruning using a hardcoded tree.
    Branch lengths are variances.
    Return the log likelihood.
    @param v: branch lengths
    @param x: data vector
    @return: ll
    """
    nleaves = x.shape[0]
    ll01, delta01, x01 = prune_cherry(v[0], v[1], x[0], x[1])
    ll23, delta23, x23 = prune_cherry(v[2], v[3], x[2], x[3])
    v45 = v[4] + delta01 + delta23
    sigma45 = sqrt(v45)
    ll45 = scipy.stats.norm.logpdf(x23 - x01, loc=0, scale=sigma45)
    pruning_adjustment = 0.5 * log(nleaves)
    return pruning_adjustment + ll01 + ll23 + ll45
コード例 #29
0
def unpack_params(params):
    """
    Return the unpacked parameters and a normalization penalty.

    There are four contexts.
    The middle two contexts are constrained to have the same probability.
    
    This is for the numerical likelihood maximization.
    k is the number of contexts

    """
    k = 2
    nprobs = k * 2

    # de-concatenate the packed parameters
    packed_probs = params[:nprobs]
    packed_mu = params[nprobs:nprobs+2]

    # reshape the transformed probabilities
    reshaped_packed_probs = packed_probs.reshape((2, k))

    # force the 0,1 probability to equal the 1,0 probability
    # penalize if P[0, 1] is different than P[1, 0].
    ancestral_diff = reshaped_packed_probs[0, 1] - reshaped_packed_probs[1, 0]
    ancestral_sum = reshaped_packed_probs[0, 1] + reshaped_packed_probs[1, 0]
    ancestral_mean = 0.5 * ancestral_sum
    reshaped_packed_probs[0, 1] = ancestral_mean
    reshaped_packed_probs[1, 0] = ancestral_mean
    ancestral_penalty = square(ancestral_diff)

    # transform the probabilities and compute a normalization penalty
    unnormal_probs = exp(reshaped_packed_probs)
    denom = unnormal_probs.sum()
    simplex_penalty = square(log(denom))

    # compute the normalized transition probabilities
    p = unnormal_probs / denom

    # compute the total penalty
    penalty = ancestral_penalty + simplex_penalty

    # unpack mu
    mu = expit(packed_mu)

    return p, mu, penalty
コード例 #30
0
def get_two_taxon_neg_ll(
        model,
        em_probs, em_distns,
        subs_counts,
        ts, tv, syn, nonsyn, compo, asym_compo,
        natural_theta,
        ):
    """
    Get the negative log likelihood.
    This function does not use the logarithms.
    It is mostly for computing the hessian;
    otherwise the version with the logarithms would probably be better.
    The first param group is the model implementation.
    The second param group is expectation-maximization stuff.
    The third param group is the data.
    The next param group consists of design matrices related to genetic code.
    The next param group consist of free parameters of the model.
    """

    # unpack some parameters
    branch_length = natural_theta[0]
    natural_model_theta = natural_theta[1:]

    # compute the appropriately scaled transition matrices
    pre_Qs = model.get_pre_Qs(
            em_probs, em_distns,
            ts, tv, syn, nonsyn, compo, asym_compo,
            natural_model_theta)
    eq_distns = model.get_distns(
            em_probs, em_distns,
            ts, tv, syn, nonsyn, compo, asym_compo,
            natural_model_theta)
    Ps = markovutil.get_branch_mix(em_probs, pre_Qs, eq_distns, branch_length)

    # compute the mixture transition matrix
    P_mix = algopy.zeros_like(Ps[0])
    P_mix += em_probs[0] * (Ps[0].T * eq_distns[0]).T
    P_mix += em_probs[1] * (Ps[1].T * eq_distns[1]).T

    # compute the neg log likelihood
    neg_ll = -algopy.sum(algopy.log(P_mix) * subs_counts)
    print neg_ll
    return neg_ll
コード例 #31
0
ファイル: markovutil.py プロジェクト: argriffing/slowedml
def get_branch_ll(subs_counts, pre_Q, distn, branch_length):
    """
    This log likelihood calculation function is compatible with algopy.
    @param subs_counts: substitution counts
    @param pre_Q: rates with arbitrary scaling and arbitrary diagonals
    @param distn: initial distribution
    @param branch_length: expected number of changes
    @return: log likelihood
    """
    Q = pre_Q_to_Q(pre_Q, distn, branch_length)
    P = algopy.expm(Q)

    # Scale the rows of the transition matrix by the initial distribution.
    # This scaled matrix will be symmetric if the process is reversible.
    P_scaled = (P.T * distn).T

    # Use the transition matrix and the substitution counts
    # to compute the log likelihood.
    return algopy.sum(algopy.log(P_scaled) * subs_counts)
コード例 #32
0
ファイル: fmutsel.py プロジェクト: argriffing/slowedml
def get_pre_Q(
        log_counts,
        h,
        ts, tv, syn, nonsyn, compo, asym_compo,
        nt_distn, kappa, omega,
        ):

    # compute the selection differences
    F = get_selection_F(log_counts, compo, algopy.log(nt_distn))
    S = get_selection_S(F)

    # compute the term that corresponds to conditional fixation rate of codons
    codon_fixation = h(S)

    # compute the mutation and fixation components
    A = (kappa * ts + tv) * (omega * nonsyn + syn)
    B = algopy.dot(asym_compo, nt_distn) * codon_fixation

    # construct the pre rate matrix
    pre_Q = A * B
    return pre_Q
コード例 #33
0
ファイル: sitell.py プロジェクト: argriffing/slowedml
def brute(ov, v_to_children, pattern, de_to_P, root_prior):
    """
    This function is only for testing and documentation.
    The P matrices and the root prior may be algopy objects.
    @param ov: ordered vertices with child vertices before parent vertices
    @param v_to_children: map from a vertex to a sequence of child vertices
    @param pattern: an array that maps vertex to state, or to -1 if internal
    @param de_to_P: map from a directed edge to a transition matrix
    @param root_prior: equilibrium distribution at the root
    @return: log likelihood
    """
    nvertices = len(pattern)
    nstates = len(root_prior)
    root = ov[-1]
    v_unknowns = [v for v, state in enumerate(pattern) if state == -1]
    n_unknowns = len(v_unknowns)

    # Construct the set of directed edges on the tree.
    des = set((p, c) for p, cs in v_to_children.items() for c in cs)

    # Compute the likelihood by directly summing over all possibilities.
    likelihood = 0
    for assignment in itertools.product(range(nstates), repeat=n_unknowns):

        # Fill in the state assignments for all vertices.
        augmented_pattern = np.array(pattern)
        for v, state in zip(v_unknowns, assignment):
            augmented_pattern[v] = state

        # Add to the log likelihood.
        edge_prob = 1.0
        for p, c in des:
            p_state = augmented_pattern[p]
            c_state = augmented_pattern[c]
            edge_prob *= de_to_P[p, c][p_state, c_state]
        likelihood += root_prior[augmented_pattern[root]] * edge_prob

    # Return the log likelihood.
    return algopy.log(likelihood)
コード例 #34
0
def infer_parameter_values(p_guess, mu_guess, data, mask):
    k = p_guess.shape[1]

    # Pack the guess.
    packed_guess = np.concatenate((log(p_guess.flatten()), logit(mu_guess)))

    # Define the objective function, some of its derivatives, and a guess.
    f = partial(penalized_packed_neg_ll, k, data, mask)
    g = partial(eval_grad, f)
    h = partial(eval_hess, f)

    # Search for the maximum likelihood parameter values.
    # The direct hessp evaluation turns out to be slower, for some reason,
    # than directly calculating the hessian and then multiplying.
    res = scipy.optimize.minimize(
            f, packed_guess, method='trust-ncg', jac=g, hess=h)
    xopt = res.x

    # unpack the optimal parameters
    p_opt, mu_opt, penalty_opt = unpack_params(xopt)

    return p_opt, mu_opt, h(xopt)
コード例 #35
0
ファイル: matrixexponential.py プロジェクト: shoyer/algopy
def eval_f_eigh(Y):
    """ some reformulations to make eval_f_orig
        compatible with algopy

        replaced scipy.linalg.expm by a symmetric eigenvalue decomposition

        this function **can** be differentiated with algopy

    """
    a, b, v = transform_params(Y)

    Q = algopy.zeros((4, 4), dtype=Y)
    Q[0, 0] = 0
    Q[0, 1] = a
    Q[0, 2] = b
    Q[0, 3] = b
    Q[1, 0] = a
    Q[1, 1] = 0
    Q[1, 2] = b
    Q[1, 3] = b
    Q[2, 0] = b
    Q[2, 1] = b
    Q[2, 2] = 0
    Q[2, 3] = a
    Q[3, 0] = b
    Q[3, 1] = b
    Q[3, 2] = a
    Q[3, 3] = 0

    Q = algopy.dot(Q, algopy.diag(v))
    Q -= algopy.diag(algopy.sum(Q, axis=1))
    va = algopy.diag(algopy.sqrt(v))
    vb = algopy.diag(1. / algopy.sqrt(v))
    W, U = algopy.eigh(algopy.dot(algopy.dot(va, Q), vb))
    M = algopy.dot(U, algopy.dot(algopy.diag(algopy.exp(W)), U.T))
    P = algopy.dot(vb, algopy.dot(M, va))
    S = algopy.log(algopy.dot(algopy.diag(v), P))
    return -algopy.sum(S * g_data)
コード例 #36
0
ファイル: algopy_helper.py プロジェクト: codeants2012/NLP.py
 def obj(self, x, **kwargs):
     return algopy.log(1 + x[0]**2) - x[1]