示例#1
0
def kimura_1957_54_denominator_analytic_b(c, d):
    """
    The numerator of (5.4) goes to 1 in the scaling limit.
    So we only care about the denominator for phylogenetic purposes.
    In this function we reformulate the denominator.
    """
    # Mathematica notation:
    # Exp[-c*(1+d)^2 / (2*d)] / (2*d) ) * (
    #       (1+d)*Hypergeometric1F1[1/2, 3/2, c*(1+d)^2 / (2*d)] -
    #       (1-d)*Hypergeometric1F1[1/2, 3/2, c*(1-d)^2 / (2*d)] )
    #
    # precompute a common factor
    c2d = c / (2.*d)
    #
    # compute the asymmetric part
    asym_part = algopy.exp(-c)
    #
    # compute the part that is symmetric in the sense that f(c,d) = f(-c,-d).
    sym_a = 1. / (2.*d)
    sym_b = algopy.exp(-c2d*(d*d + 1.))
    hyper_a = (1. + d) * algopy.special.hyp1f1(0.5, 1.5, c2d*(1+d)**2)
    hyper_b = (1. - d) * algopy.special.hyp1f1(0.5, 1.5, c2d*(1-d)**2)
    sym_part = sym_a * sym_b * (hyper_a - hyper_b)
    #
    # return the approximate value of the function
    return asym_part * sym_part
示例#2
0
def kimura_1957_54_denominator_analytic_b(c, d):
    """
    The numerator of (5.4) goes to 1 in the scaling limit.
    So we only care about the denominator for phylogenetic purposes.
    In this function we reformulate the denominator.
    """
    # Mathematica notation:
    # Exp[-c*(1+d)^2 / (2*d)] / (2*d) ) * (
    #       (1+d)*Hypergeometric1F1[1/2, 3/2, c*(1+d)^2 / (2*d)] -
    #       (1-d)*Hypergeometric1F1[1/2, 3/2, c*(1-d)^2 / (2*d)] )
    #
    # precompute a common factor
    c2d = c / (2. * d)
    #
    # compute the asymmetric part
    asym_part = algopy.exp(-c)
    #
    # compute the part that is symmetric in the sense that f(c,d) = f(-c,-d).
    sym_a = 1. / (2. * d)
    sym_b = algopy.exp(-c2d * (d * d + 1.))
    hyper_a = (1. + d) * algopy.special.hyp1f1(0.5, 1.5, c2d * (1 + d)**2)
    hyper_b = (1. - d) * algopy.special.hyp1f1(0.5, 1.5, c2d * (1 - d)**2)
    sym_part = sym_a * sym_b * (hyper_a - hyper_b)
    #
    # return the approximate value of the function
    return asym_part * sym_part
示例#3
0
def DerivativeExpression_jac2(sympy_X, weights):
    assert type(weights) == dict

    X = sympy_X
    W1 = weights['dense_1'][0]
    b1 = weights['dense_1'][1]

    W2 = weights['dense_2'][0]
    b2 = weights['dense_2'][1]

    W3 = weights['dense_3'][0]
    b3 = weights['dense_3'][1]

    fa = np.dot(X, W1) + b1

    # active_fa = np.array([1 / (1 - algopy.exp(-_)) for _ in fa])
    active_fa = 1 / (1 + algopy.exp(-fa))
    fb = np.dot(active_fa, W2) + b2
    # active_fb = np.array([1 / (1 - algopy.exp(-_)) for _ in fb])
    active_fb = 1 / (1 + algopy.exp(-fb))
    fc = np.dot(active_fb, W3) + b3

    # print("the derivation of ", input[derivate_target_index])
    # print(diff(fb, X[derivate_target_index]))
    return fc[0]
示例#4
0
def get_Q_unconstrained_kb(
        ts, tv, syn, nonsyn, compo, asym_compo,
        h,
        log_counts,
        log_mu, log_kappa, log_omega, d, log_kb, log_nt_weights):
    """
    This adds yet another parameter.
    """
    #FIXME: constructing this each time seems wasteful
    codon_neighbor_mask = ts + tv
    #FIXME: this is being hacked to use fixed-order quadrature
    #FIXME: and to disregard the h parameter
    mu = algopy.exp(log_mu)
    kappa = algopy.exp(log_kappa)
    omega = algopy.exp(log_omega)
    F = get_selection_F(log_counts, compo, log_nt_weights)
    S = get_selection_S(F)
    H = get_fixation_unconstrained_kb_fquad(
            S, d, log_kb, g_quad_x, g_quad_w, codon_neighbor_mask)
    #H = get_fixation_unconstrained_kb_fquad_cython(
            #S, d, log_kb, codon_neighbor_mask)
    pre_Q = mu * (kappa * ts + tv) * (omega * nonsyn + syn) * algopy.exp(
            algopy.dot(asym_compo, log_nt_weights)) * H
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    return Q
示例#5
0
def get_Q(
        ts, tv, syn, nonsyn, compo, asym_compo,
        h,
        log_counts,
        log_mu, log_kappa, log_omega, log_nt_weights):
    """
    Notation is from Yang and Nielsen 2008.
    The first group of args consists of precomputed ndarrays.
    The second group is only the fixation function.
    The third group consists of empirically (non-free) estimated parameters.
    The fourth group depends only on free parameters.
    Speed matters.
    @param ts: indicator for transition
    @param tv: indicator for transversion
    @param syn: indicator for synonymous codons
    @param nonsyn: indicator for nonsynonymous codons
    @param compo: site independent nucleotide composition per codon
    @param asym_compo: tensor from get_asym_compo function
    @param h: fixation function
    @param log_counts: empirically counted codons in the data set
    @param log_mu: free param for scaling
    @param log_kappa: free param for transition transversion rate distinction
    @param log_omega: free param for syn nonsyn rate distinction
    @param log_nt_weights: mostly free param array for mutation equilibrium
    @return: rate matrix
    """
    mu = algopy.exp(log_mu)
    kappa = algopy.exp(log_kappa)
    omega = algopy.exp(log_omega)
    F = get_selection_F(log_counts, compo, log_nt_weights)
    S = get_selection_S(F)
    pre_Q = mu * (kappa * ts + tv) * (omega * nonsyn + syn) * algopy.exp(
            algopy.dot(asym_compo, log_nt_weights)) * h(S)
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    return Q
示例#6
0
def get_Q(
        ts, tv, syn, nonsyn, compo, asym_compo,
        h,
        log_counts,
        log_mu, log_kappa, log_omega, log_nt_weights):
    """
    Notation is from Yang and Nielsen 2008.
    The first group of args consists of precomputed ndarrays.
    The second group is only the fixation function.
    The third group consists of empirically (non-free) estimated parameters.
    The fourth group depends only on free parameters.
    @param ts: indicator for transition
    @param tv: indicator for transversion
    @param syn: indicator for synonymous codons
    @param nonsyn: indicator for nonsynonymous codons
    @param compo: site independent nucleotide composition per codon
    @param asym_compo: tensor from get_asym_compo function
    @param h: fixation function
    @param log_counts: empirically counted codons in the data set
    @param log_mu: free param for scaling
    @param log_kappa: free param for transition transversion rate distinction
    @param log_omega: free param for syn nonsyn rate distinction
    @param log_nt_weights: mostly free param array for mutation equilibrium
    @return: rate matrix
    """
    mu = algopy.exp(log_mu)
    kappa = algopy.exp(log_kappa)
    omega = algopy.exp(log_omega)
    F = get_selection_F(log_counts, compo, log_nt_weights)
    S = get_selection_S(F)
    pre_Q = mu * (kappa * ts + tv) * (omega * nonsyn + syn) * algopy.exp(
            algopy.dot(asym_compo, log_nt_weights)) * h(S)
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    return Q
def IV_algopy(x, Vd):
    """
    IV curve implemented using algopy instead of numpy
    """
    nobs = x.shape[1]
    out = zeros((3, nobs), dtype=x)
    Ee, Tc, Rs, Rsh, Isat1_0, Isat2, Isc0, alpha_Isc, Eg = x
    Vt = Tc * KB / QE
    Isc = Ee * Isc0 * (1.0 + (Tc - T0) * alpha_Isc)
    Isat1 = (
        Isat1_0 * (Tc ** 3.0 / T0 ** 3.0) *
        exp(Eg * QE / KB * (1.0 / T0 - 1.0 / Tc))
    )
    Vd_sc = Isc * Rs  # at short circuit Vc = 0 
    Id1_sc = Isat1 * (exp(Vd_sc / Vt) - 1.0)
    Id2_sc = Isat2 * (exp(Vd_sc / 2.0 / Vt) - 1.0)
    Ish_sc = Vd_sc / Rsh
    Iph = Isc + Id1_sc + Id2_sc + Ish_sc
    Id1 = Isat1 * (exp(Vd / Vt) - 1.0)
    Id2 = Isat2 * (exp(Vd / 2.0 / Vt) - 1.0)
    Ish = Vd / Rsh
    Ic = Iph - Id1 - Id2 - Ish
    Vc = Vd - Ic * Rs
    out[0] = Ic
    out[1] = Vc
    out[2] = Ic * Vc
    return out
示例#8
0
def get_Q(
        gtr, syn, nonsyn, compo, asym_compo,
        h,
        log_counts,
        log_mu, log_g, log_omega, log_nt_weights):
    """
    Most of the notation is from Yang and Nielsen 2008.
    The first group of args consists of precomputed ndarrays.
    The second group is only the fixation function.
    The third group consists of empirically (non-free) estimated parameters.
    The fourth group depends only on free parameters.
    @param gtr: ndim-3 ndarray indicating the nucleotide exchange type
    @param syn: indicator for synonymous codons
    @param nonsyn: indicator for nonsynonymous codons
    @param compo: site independent nucleotide composition per codon
    @param asym_compo: tensor from get_asym_compo function
    @param h: fixation function
    @param log_counts: empirically counted codons in the data set
    @param log_mu: free param for scaling
    @param log_g: logs of six exchangeabilities
    @param log_omega: free param for syn nonsyn rate distinction
    @param log_nt_weights: mostly free param array for mutation equilibrium
    @return: rate matrix
    """
    mu = algopy.exp(log_mu)
    g = algopy.exp(log_g)
    omega = algopy.exp(log_omega)
    F = get_selection_F(log_counts, compo, log_nt_weights)
    S = get_selection_S(F)
    pre_Q = mu * algopy.dot(gtr, g) * (omega * nonsyn + syn) * algopy.exp(
            algopy.dot(asym_compo, log_nt_weights)) * h(S)
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    return Q
示例#9
0
def ackley(x):
    a, b, c = 20.0, -0.2, 2.0 * numpy.pi
    len_recip = 1.0 / len(x)
    sum_sqrs, sum_cos = 0.0, 0.0
    for i in x:
        sum_cos += algopy.cos(c * i)
        sum_sqrs += i * i
    return -a * algopy.exp(b * algopy.sqrt(len_recip * sum_sqrs)) - algopy.exp(len_recip * sum_cos) + a + numpy.e
示例#10
0
def denom_not_genic(c, d):
    c2d = c / (2. * d)
    asym_part = algopy.exp(-c)
    sym_a = 1. / (2. * d)
    sym_b = algopy.exp(-c2d * (d * d + 1.))
    hyper_a = (1. + d) * algopy.special.dpm_hyp1f1(0.5, 1.5, c2d * (1 + d)**2)
    hyper_b = (1. - d) * algopy.special.dpm_hyp1f1(0.5, 1.5, c2d * (1 - d)**2)
    sym_part = sym_a * sym_b * (hyper_a - hyper_b)
    return asym_part * sym_part
示例#11
0
def denom_not_genic(c, d):
    c2d = c / (2.*d)
    asym_part = algopy.exp(-c)
    sym_a = 1. / (2.*d)
    sym_b = algopy.exp(-c2d*(d*d + 1.))
    hyper_a = (1. + d) * algopy.special.dpm_hyp1f1(0.5, 1.5, c2d*(1+d)**2)
    hyper_b = (1. - d) * algopy.special.dpm_hyp1f1(0.5, 1.5, c2d*(1-d)**2)
    sym_part = sym_a * sym_b * (hyper_a - hyper_b)
    return asym_part * sym_part
示例#12
0
def ackley(x):
    a, b, c = 20.0, -0.2, 2.0 * numpy.pi
    len_recip = 1.0 / len(x)
    sum_sqrs, sum_cos = 0.0, 0.0
    for i in x:
        sum_cos += algopy.cos(c * i)
        sum_sqrs += i * i
    return (-a * algopy.exp(b * algopy.sqrt(len_recip * sum_sqrs)) -
            algopy.exp(len_recip * sum_cos) + a + numpy.e)
示例#13
0
def get_neg_ll(vY, mX, vBeta):
    """
    @param vY: predefined numpy array
    @param mX: predefined numpy array
    @param vBeta: parameters of the likelihood function
    """
    #FIXME: algopy could benefit from the addition of a logsumexp function...
    alpha = algopy.dot(mX, vBeta)
    return algopy.sum(vY * algopy.log1p(algopy.exp(-alpha)) +
                      (1 - vY) * algopy.log1p(algopy.exp(alpha)))
示例#14
0
def get_neg_ll(vY, mX, vBeta):
    """
    @param vY: predefined numpy array
    @param mX: predefined numpy array
    @param vBeta: parameters of the likelihood function
    """
    #FIXME: algopy could benefit from the addition of a logsumexp function...
    alpha = algopy.dot(mX, vBeta)
    return algopy.sum(
            vY*algopy.log1p(algopy.exp(-alpha)) +
            (1-vY)*algopy.log1p(algopy.exp(alpha)))
示例#15
0
def denom_hyp2f0_b(c, d):
    prefix = algopy.exp(-c)
    a1 = algopy.exp(c)
    a2 = 1. / (2. * c)
    a3 = 1. / (1. + d)
    a4 = algopy.special.hyp2f0(1.0, 0.5, (2. * d) / (c * (1. + d)**2))
    b1 = algopy.exp(-c)
    b2 = 1. / (2. * c)
    b3 = 1. / (1. - d)
    b4 = algopy.special.hyp2f0(1.0, 0.5, (2. * d) / (c * (1. - d)**2))
    return prefix * (a1 * a2 * a3 * a4 - b1 * b2 * b3 * b4)
示例#16
0
def get_Q_unconstrained(gtr, syn, nonsyn, compo, asym_compo, h, log_counts,
                        log_mu, log_g, log_omega, d, log_nt_weights):
    mu = algopy.exp(log_mu)
    g = algopy.exp(log_g)
    omega = algopy.exp(log_omega)
    F = get_selection_F(log_counts, compo, log_nt_weights)
    S = get_selection_S(F)
    pre_Q = mu * algopy.dot(gtr, g) * (omega * nonsyn + syn) * algopy.exp(
        algopy.dot(asym_compo, log_nt_weights)) * h(S, d)
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    return Q
 def encoded_to_natural(cls, encoded_theta):
     """
     The first parameter is a proportion.
     The fourth parameter is unconstrained.
     """
     natural = algopy.zeros_like(encoded_theta)
     natural[0] = algopy.special.expit(encoded_theta[0])
     natural[1] = algopy.exp(encoded_theta[1])
     natural[2] = algopy.exp(encoded_theta[2])
     natural[3] = encoded_theta[3]
     natural[4:] = algopy.exp(encoded_theta[4:])
     return natural
示例#18
0
def denom_hyp1f1_b(c, d):
    """
    This uses only algopy.special.hyp1f1(1.0, 1.5, x).
    """
    prefix = algopy.exp(-c)
    a1 = algopy.exp(c)
    a2 = (1. + d) / (2. * d)
    a3 = algopy.special.hyp1f1(1.0, 1.5, -c*(1+d)**2 / (2. * d))
    b1 = algopy.exp(-c)
    b2 = (1. - d) / (2. * d)
    b3 = algopy.special.hyp1f1(1.0, 1.5, -c*(1-d)**2 / (2. * d))
    return prefix * (a1 * a2 * a3 - b1 * b2 * b3)
示例#19
0
 def get_pre_Q(cls,
         log_counts, codon_distn,
         ts, tv, syn, nonsyn, compo, asym_compo,
         theta,
         ):
     cls.check_theta(theta)
     kappa = algopy.exp(theta[0])
     omega = algopy.exp(theta[1])
     nt_distn = markovutil.log_ratios_to_distn(theta[2:5])
     pre_Q = codon1994.get_MG_pre_Q(
             ts, tv, syn, nonsyn, asym_compo,
             nt_distn, kappa, omega)
     return pre_Q
示例#20
0
def denom_hyperu_b(c, d):
    """
    This uses only algopy.special.hyperu(1.0, 1.5, x).
    It is only meaningful when c and d have different signs,
    but for now do not enforce this.
    """
    prefix = algopy.exp(-c)
    a1 = algopy.exp(-c)
    a2 = (1. - d) / (4. * d)
    a3 = algopy.special.hyperu(1.0, 1.5, -c*(1-d)**2 / (2. * d))
    b1 = algopy.exp(c)
    b2 = (1. + d) / (4. * d)
    b3 = algopy.special.hyperu(1.0, 1.5, -c*(1+d)**2 / (2. * d))
    return prefix * (a1 * a2 * a3 - b1 * b2 * b3)
示例#21
0
def get_Q_unconstrained_kb(
        gtr, syn, nonsyn, compo, asym_compo,
        h,
        log_counts,
        log_mu, log_g, log_omega, d, log_kb, log_nt_weights):
    mu = algopy.exp(log_mu)
    g = algopy.exp(log_g)
    omega = algopy.exp(log_omega)
    F = get_selection_F(log_counts, compo, log_nt_weights)
    S = get_selection_S(F)
    pre_Q = mu * algopy.dot(gtr, g) * (omega * nonsyn + syn) * algopy.exp(
            algopy.dot(asym_compo, log_nt_weights)) * h(S, d, log_kb)
    Q = pre_Q - algopy.diag(algopy.sum(pre_Q, axis=1))
    return Q
示例#22
0
def unpack_params(log_params):
    """
    Unpack the parameters.

    This function also enforces the simplex constraint
    on the mutational nucleotide distribution,
    and it computes a penalty corresponding to violation of this constraint.

    """
    # undo the log transformation of the parameters
    params = exp(log_params)

    # unpack the parameters
    kappa, omega = params[0:2]
    A, C, G, T = params[2:6]
    blink_on, blink_off = params[6:8]
    edge_rates = params[8:]

    # normalize the nucleotide probability distribution and compute a penalty
    nt_prob_sum = A + C + G + T
    A = A / nt_prob_sum
    C = C / nt_prob_sum
    G = G / nt_prob_sum
    T = T / nt_prob_sum
    penalty = square(log(nt_prob_sum))

    # return unpacked parameters and the penalty
    unpacked = (kappa, omega, A, C, G, T, blink_on, blink_off, edge_rates)
    return unpacked, penalty
示例#23
0
def get_Q_slsqp(
        ts, tv, syn, nonsyn, compo, asym_compo,
        h,
        log_counts, v,
        theta):
    #FIXME: hardcoded for selection without recessivity parameters
    #
    # unpack theta
    branch_length = theta[0]
    kappa = theta[1]
    omega = theta[2]
    """
    nt_probs = algopy.zeros(4, dtype=theta)
    nt_probs[0] = theta[3]
    nt_probs[1] = theta[4]
    nt_probs[2] = theta[5]
    nt_probs[3] = 1.0 - algopy.sum(nt_probs)
    print nt_probs
    log_nt_weights = algopy.log(nt_probs)
    """
    log_nt_weights = theta[-4:]
    #
    F = get_selection_F(log_counts, compo, log_nt_weights)
    S = get_selection_S(F)
    pre_Q_exch = (kappa * ts + tv) * (omega * nonsyn + syn)
    pre_Q = pre_Q_exch * algopy.exp(
            algopy.dot(asym_compo, log_nt_weights)) * h(S)
    rates = algopy.sum(pre_Q, axis=1)
    Q = pre_Q - algopy.diag(rates)
    Q *= branch_length / algopy.dot(rates, v)
    return Q
示例#24
0
def preferred_dominant_fixation(S):
    """
    Preferred alleles are purely dominant.
    """
    a = algopy.exp(algopy.special.botched_clip(0, np.inf, S))
    b = algopy.special.hyp1f1(0.5, 1.5, abs(S))
    return a / b
示例#25
0
def denom_near_genic(c, d):
    a0 = 1. / (2.*c)
    b01 = 1. / (1.+d)
    b02 = algopy.special.dpm_hyp2f0(1.0, 0.5, (2.*d)/(c*(1.+d)**2))
    b11 = algopy.exp(-2.*c) / (1.-d)
    b12 = algopy.special.dpm_hyp2f0(1.0, 0.5, (2.*d)/(c*(1.-d)**2))
    return a0 * (b01 * b02 - b11 * b12)
示例#26
0
def unconstrained_recessivity_fixation(
        adjacency,
        kimura_d,
        S,
        ):
    """
    This should be compatible with algopy.
    But it may be very slow.
    @param adjacency: a binary design matrix to reduce unnecessary computation
    @param kimura_d: a parameter that might carry Taylor information
    @param S: an ndarray of selection differences with Taylor information
    return: an ndarray of fixation probabilities with Taylor information
    """
    x = g_quad_x
    w = g_quad_w
    nstates = S.shape[0]
    D = algopy.sign(S) * kimura_d
    H = algopy.zeros_like(S)
    for i in range(nstates):
        for j in range(nstates):
            if not adjacency[i, j]:
                continue
            tmp_a = - S[i, j] * x
            tmp_b = algopy.exp(tmp_a * (D[i, j] * (1-x) + 1))
            tmp_c = algopy.dot(tmp_b, w)
            H[i, j] = algopy.reciprocal(tmp_c)
    return H
示例#27
0
def unrolled_unconstrained_recessivity_fixation(
        adjacency,
        kimura_d,
        S,
        ):
    """
    This should be compatible with algopy.
    But it may be very slow.
    The unrolling is with respect to a dot product.
    @param adjacency: a binary design matrix to reduce unnecessary computation
    @param kimura_d: a parameter that might carry Taylor information
    @param S: an ndarray of selection differences with Taylor information
    return: an ndarray of fixation probabilities with Taylor information
    """
    nknots = len(g_quad_x)
    nstates = S.shape[0]
    D = algopy.sign(S) * kimura_d
    H = algopy.zeros_like(S)
    for i in range(nstates):
        for j in range(nstates):
            if not adjacency[i, j]:
                continue
            for x, w in zip(g_quad_x, g_quad_w):
                tmp_a = - S[i, j] * x
                tmp_b = algopy.exp(tmp_a * (D[i, j] * (1-x) + 1))
                H[i, j] += tmp_b * w
            H[i, j] = algopy.reciprocal(H[i, j])
    return H
示例#28
0
def get_h_i_t(w_i, alpha_i, beta_i, b_i, c_i, mu_i, v_i, h_tm1_agg_i, delta_t_agg_i):
    if mu_i > 0:
        sqrthtpowmu = algopy.sqrt(h_tm1_agg_i) ** mu_i
        return (w_i + alpha_i * sqrthtpowmu * get_f_i(delta_t_agg_i, b_i, c_i) ** v_i + beta_i * sqrthtpowmu) ** (2./mu_i)
    else:
        sqrtht = algopy.sqrt(h_tm1_agg_i)
        return (algopy.exp(w_i + alpha_i * get_f_i(delta_t_agg_i, b_i, c_i)  ** v_i + beta_i * algopy.log(sqrtht)) ** (2.))
示例#29
0
def get_two_taxon_neg_ll_log_theta(
        model,
        subs_counts,
        log_counts, codon_distn,
        ts, tv, syn, nonsyn, compo, asym_compo,
        theta,
        ):
    """
    Get the negative log likelihood.
    This function uses the logarithms of the model parameters.
    The first param group is the model implementation.
    The second param group is the data.
    The third param group consists of data summaries.
    The fourth param group consists of design matrices related to genetic code.
    The fifth param group consist of free parameters of the model.
    """
    branch_length = algopy.exp(theta[0])
    model_theta = theta[1:]
    distn = model.get_distn(
            log_counts, codon_distn,
            ts, tv, syn, nonsyn, compo, asym_compo,
            model_theta,
            )
    pre_Q = model.get_pre_Q(
            log_counts, codon_distn,
            ts, tv, syn, nonsyn, compo, asym_compo,
            model_theta,
            )
    neg_ll = -markovutil.get_branch_ll(
            subs_counts, pre_Q, distn, branch_length)
    return neg_ll
示例#30
0
def denom_near_genic(c, d):
    a0 = 1. / (2. * c)
    b01 = 1. / (1. + d)
    b02 = algopy.special.dpm_hyp2f0(1.0, 0.5, (2. * d) / (c * (1. + d)**2))
    b11 = algopy.exp(-2. * c) / (1. - d)
    b12 = algopy.special.dpm_hyp2f0(1.0, 0.5, (2. * d) / (c * (1. - d)**2))
    return a0 * (b01 * b02 - b11 * b12)
示例#31
0
 def get_pre_Q(cls,
         log_counts, codon_distn,
         ts, tv, syn, nonsyn, compo, asym_compo,
         theta,
         ):
     cls.check_theta(theta)
     kappa = algopy.exp(theta[0])
     omega = algopy.exp(theta[1])
     nt_distn = markovutil.log_ratios_to_distn(theta[2:5])
     pre_Q = fmutsel.get_pre_Q(
             log_counts,
             fmutsel.preferred_recessive_fixation,
             ts, tv, syn, nonsyn, compo, asym_compo,
             nt_distn, kappa, omega,
             )
     return pre_Q
示例#32
0
def d_f(x):
    """function"""
    return x[0] * x[1] * x[2] + exp(x[0]) * x[1]  # x[differnce]

    # forward AD without building a computational graph
    x = UTPM.init_jacobian([3, 5, 7])
    y = d_f(x)
    algopy_jacobian = UTPM.extract_jacobian(y)
    print('jacobian = ', algopy_jacobian)

    # reverse mode using a computational graph
    # Step 1/2 - trace the evaluation function
    cg = algopy.CGraph()
    x = algopy.Function([1, 2, 3])
    y = d_f(x)
    cg.trace_off()
    cg.independentFunctionList = [x]
    cg.dependentFunctionList = [y]

    # Step 2/2 - use the graph to evaluate derivatives
    print('gradient =', cg.gradient([3., 5, 7]))
    print(
        'Jacobian =', cg.jacobian([3., 5, 7])
    )  # a square matrix of first order partial derivatives, the derivative of f at all possible points wrt x
    print(
        'Hessian =', cg.hessian([3., 5., 7.])
    )  # a matrix of second order partial derivatives of the function in question (square), can use optimisation for local min/max/saddle of a critical value.
    print('Hessian vector product =', cg.hess_vec([3., 5., 7.], [4, 5, 6]))
示例#33
0
    def get_neg_ll(cls,
            patterns, pattern_weights,
            ts, tv, syn, nonsyn, full_compo,
            theta,
            ):
        
        # pick the nt distn parameters from the end of the theta vector
        log_nt_distns = algopy.zeros((3, 4), dtype=theta)
        log_nt_distns_block = algopy.reshape(theta[-9:], (3, 3))
        log_nt_distns[:, :-1] = log_nt_distns_block
        reduced_theta = theta[:-9]
        unnormalized_nt_distns = algopy.exp(log_nt_distns)

        # normalize each of the three nucleotide distributions
        row_sums = algopy.sum(unnormalized_nt_distns, axis=1)
        nt_distns = (unnormalized_nt_distns.T / row_sums).T

        # get the implied codon distribution
        stationary_distn = codon1994.get_f3x4_codon_distn(
                full_compo,
                nt_distns,
                )

        return A.get_neg_ll(
                patterns, pattern_weights,
                stationary_distn,
                ts, tv, syn, nonsyn,
                reduced_theta,
                )
示例#34
0
def preferred_recessive_fixation(S):
    """
    Preferred alleles are purely recessive.
    """
    a = algopy.exp(algopy.special.botched_clip(-np.inf, 0, S))
    b = algopy.special.hyp1f1(0.5, 1.5, -abs(S))
    return a / b
示例#35
0
 def encoded_to_natural(cls, encoded_theta):
     """
     The second variable is a proportion.
     """
     natural[0] = algopy.expit(natural_theta[0])
     natural[1:] = algopy.exp(natural_theta[1:])
     return natural
示例#36
0
def kimura_1957_54_denominator_analytic(c, d):
    """
    See the corresponding description for the numerator.
    This function computes the normalizing constant.
    """
    # Mathematica notation:
    # Integrate[Exp[-2*c*d*x*(1-x) - 2*c*x], {x, 0, 1}]
    #
    # precompute some intermediate quantities
    # FIXME: algopy has no csqrt but then again also does not support complex.
    #sqrt_c = cmath.sqrt(c)
    #sqrt_d = cmath.sqrt(d)
    sqrt_c = algopy.sqrt(c)
    sqrt_d = algopy.sqrt(d)
    sqrt_2 = math.sqrt(2)
    exp_2c = algopy.exp(2 * c)
    #
    # compute the numerator
    dawsn_num_a = algopy.special.dawsn((sqrt_c * (d - 1)) / (sqrt_2 * sqrt_d))
    dawsn_num_b = algopy.special.dawsn((sqrt_c * (d + 1)) / (sqrt_2 * sqrt_d))
    num = dawsn_num_a + exp_2c * dawsn_num_b
    #
    # compute the denominator
    den = sqrt_2 * sqrt_c * sqrt_d * exp_2c
    #
    return num / den
示例#37
0
def kimura_1957_54_denominator_analytic(c, d):
    """
    See the corresponding description for the numerator.
    This function computes the normalizing constant.
    """
    # Mathematica notation:
    # Integrate[Exp[-2*c*d*x*(1-x) - 2*c*x], {x, 0, 1}]
    #
    # precompute some intermediate quantities
    # FIXME: algopy has no csqrt but then again also does not support complex.
    #sqrt_c = cmath.sqrt(c)
    #sqrt_d = cmath.sqrt(d)
    sqrt_c = algopy.sqrt(c)
    sqrt_d = algopy.sqrt(d)
    sqrt_2 = math.sqrt(2)
    exp_2c = algopy.exp(2*c)
    #
    # compute the numerator
    dawsn_num_a = algopy.special.dawsn(
            (sqrt_c * (d-1)) / (sqrt_2 * sqrt_d))
    dawsn_num_b = algopy.special.dawsn(
            (sqrt_c * (d+1)) / (sqrt_2 * sqrt_d))
    num = dawsn_num_a + exp_2c * dawsn_num_b
    #
    # compute the denominator
    den = sqrt_2 * sqrt_c * sqrt_d * exp_2c
    #
    return num / den
示例#38
0
def kimura_1957_54_numerator_analytic(p, c, d):
    """
    From Kimura 1957, equation (5.4).
    This is the diffusion approximation of the fixation probability
    of an allele that has reached frequency p in the population,
    with scaled selection c = Ns
    and dominance/recessivity parameter d = 2h - 1.
    @param p: initial allele frequency in the population
    @param c: population-scaled selection coefficient
    @param d: transformed dominance/recessivity parameter
    @return: diffusion estimate of fixation probability
    """
    # Mathematica notation:
    # Integrate[Exp[-2*c*d*x*(1-x) - 2*c*x], {x, 0, p}]
    #
    # precompute some intermediate quantities
    sqrt_c = algopy.sqrt(c)
    sqrt_d = algopy.sqrt(d)
    sqrt_2 = math.sqrt(2)
    sqrt_pi = math.sqrt(math.pi)
    #
    # compute the numerator
    erfi_num_a = algopy.special.erfi(
            (sqrt_c * (1 + d)) / (sqrt_2 * sqrt_d))
    erfi_num_b = algopy.special.erfi(
            (sqrt_c * (1 + d - 2*d*p)) / (sqrt_2 * sqrt_d))
    num = (sqrt_pi / sqrt_2) * (erfi_num_a + erfi_num_b)
    #
    # compute the denominator
    exp_den_a = algopy.exp((c*((1+d)**2)) / (2*d))
    den = 2*algopy.sqrt(c)*algopy.sqrt(d)*exp_den_a
    #
    return num / den
示例#39
0
def eval_f_eigh(Y):
    """ some reformulations to make eval_f_orig
        compatible with algopy

        replaced scipy.linalg.expm by a symmetric eigenvalue decomposition

        this function **can** be differentiated with algopy

    """
    a, b, v = transform_params(Y)

    Q = algopy.zeros((4,4), dtype=Y)
    Q[0,0] = 0;    Q[0,1] = a;    Q[0,2] = b;    Q[0,3] = b;
    Q[1,0] = a;    Q[1,1] = 0;    Q[1,2] = b;    Q[1,3] = b;
    Q[2,0] = b;    Q[2,1] = b;    Q[2,2] = 0;    Q[2,3] = a;
    Q[3,0] = b;    Q[3,1] = b;    Q[3,2] = a;    Q[3,3] = 0;

    Q = algopy.dot(Q, algopy.diag(v))
    Q -= algopy.diag(algopy.sum(Q, axis=1))
    va = algopy.diag(algopy.sqrt(v))
    vb = algopy.diag(1./algopy.sqrt(v))
    W, U = algopy.eigh(algopy.dot(algopy.dot(va, Q), vb))
    M = algopy.dot(U, algopy.dot(algopy.diag(algopy.exp(W)), U.T))
    P = algopy.dot(vb, algopy.dot(M, va))
    S = algopy.log(algopy.dot(algopy.diag(v), P))
    return -algopy.sum(S * g_data)
示例#40
0
def get_two_taxon_neg_ll_encoded_theta(
        model,
        subs_counts,
        log_counts, codon_distn,
        ts, tv, syn, nonsyn, compo, asym_compo,
        encoded_theta,
        ):
    """
    Get the negative log likelihood.
    This function uses the logarithms of the model parameters.
    The first param group is the model implementation.
    The second param group is the data.
    The third param group consists of data summaries.
    The fourth param group consists of design matrices related to genetic code.
    The fifth param group consist of free parameters of the model.
    """
    branch_length = algopy.exp(encoded_theta[0])
    encoded_model_theta = encoded_theta[1:]
    natural_model_theta = model.encoded_to_natural(encoded_model_theta)
    natural_theta = algopy.zeros_like(encoded_theta)
    natural_theta[0] = branch_length
    natural_theta[1:] = natural_model_theta
    return get_two_taxon_neg_ll(
        model,
        subs_counts,
        log_counts, codon_distn,
        ts, tv, syn, nonsyn, compo, asym_compo,
        natural_theta,
        )
示例#41
0
def kimura_1957_54_numerator_analytic(p, c, d):
    """
    From Kimura 1957, equation (5.4).
    This is the diffusion approximation of the fixation probability
    of an allele that has reached frequency p in the population,
    with scaled selection c = Ns
    and dominance/recessivity parameter d = 2h - 1.
    @param p: initial allele frequency in the population
    @param c: population-scaled selection coefficient
    @param d: transformed dominance/recessivity parameter
    @return: diffusion estimate of fixation probability
    """
    # Mathematica notation:
    # Integrate[Exp[-2*c*d*x*(1-x) - 2*c*x], {x, 0, p}]
    #
    # precompute some intermediate quantities
    sqrt_c = algopy.sqrt(c)
    sqrt_d = algopy.sqrt(d)
    sqrt_2 = math.sqrt(2)
    sqrt_pi = math.sqrt(math.pi)
    #
    # compute the numerator
    erfi_num_a = algopy.special.erfi((sqrt_c * (1 + d)) / (sqrt_2 * sqrt_d))
    erfi_num_b = algopy.special.erfi(
        (sqrt_c * (1 + d - 2 * d * p)) / (sqrt_2 * sqrt_d))
    num = (sqrt_pi / sqrt_2) * (erfi_num_a + erfi_num_b)
    #
    # compute the denominator
    exp_den_a = algopy.exp((c * ((1 + d)**2)) / (2 * d))
    den = 2 * algopy.sqrt(c) * algopy.sqrt(d) * exp_den_a
    #
    return num / den
示例#42
0
def get_Q_prefix_gtr(gtr, syn, nonsyn, log_mu, log_gtr_exch, log_omega):
    """
    Compute a chunk of a hadamard decomposition of the pre-Q matrix.
    By hadamard decomposition I mean the factoring of a matrix
    into the entrywise product of two matrices.
    By pre-Q matrix I mean the rate matrix before the row sums
    have been subtracted from the diagonal.
    Notation is from Yang and Nielsen 2008.
    The first group of args consists of precomputed ndarrays.
    The second group depends only on free parameters.
    Note that this function does not depend on mutation process
    stationary distribution parameters,
    and it does not depend on recessivity parameters.
    """
    mu = algopy.exp(log_mu)
    gtr_exch = algopy.exp(log_gtr_exch)
    omega = algopy.exp(log_omega)
    return mu * algopy.dot(gtr, gtr_exch) * (omega * nonsyn + syn)
示例#43
0
def transform_params(Y):
    X = algopy.exp(Y)
    tsrate, tvrate = X[0], X[1]
    v_unnormalized = algopy.zeros(4, dtype=X)
    v_unnormalized[0] = X[2]
    v_unnormalized[1] = X[3]
    v_unnormalized[2] = X[4]
    v_unnormalized[3] = 1.0
    v = v_unnormalized / algopy.sum(v_unnormalized)
    return tsrate, tvrate, v
示例#44
0
def unpack_distribution(nstates, d4_reduction, d4_nstates, X):
    log_v = algopy.zeros(nstates, dtype=X)
    for i_full, i_reduced in enumerate(d4_reduction):
        if i_reduced == d4_nstates - 1:
            log_v[i_full] = 0.0
        else:
            log_v[i_full] = X[i_reduced]
    v = algopy.exp(log_v)
    v = v / algopy.sum(v)
    return v
示例#45
0
def get_neg_ll(y, X, theta):
    alpha = theta[-1]
    beta = theta[:-1]
    a = alpha * algopy.exp(algopy.dot(X, beta))
    ll = algopy.sum(-y * algopy.log1p(1 / a) + -algopy.log1p(a) / alpha +
                    algopy.special.gammaln(y + 1 / alpha) +
                    -algopy.special.gammaln(y + 1) +
                    -algopy.special.gammaln(1 / alpha))
    neg_ll = -ll
    return neg_ll
示例#46
0
def denom_near_genic_combo(c, d):
    a0 = 1. / (2.*c)
    b01 = 1. / (1.+d)
    z1 = (2.*d)/(c*(1.+d)**2)
    z2 = (2.*d)/(c*(1.-d)**2)
    z1_recip = (c*(1.+d)**2) / (2.*d)
    #FIXME: unfinished
    b02 = _hyp2f0_combo(1.0, 0.5, (2.*d)/(c*(1.+d)**2))
    b11 = algopy.exp(-2.*c) / (1.-d)
    b12 = _hyp2f0_combo(1.0, 0.5, (2.*d)/(c*(1.-d)**2))
    return a0 * (b01 * b02 - b11 * b12)
示例#47
0
def denom_near_genic(c, d):
    #if not c:
        #return numpy.nan
    #if d in (-1, 1):
        #return numpy.nan
    a0 = 1. / (2.*c)
    b01 = 1. / (1.+d)
    b02 = algopy.special.dpm_hyp2f0(1.0, 0.5, (2.*d)/(c*(1.+d)**2))
    b11 = algopy.exp(-2.*c) / (1.-d)
    b12 = algopy.special.dpm_hyp2f0(1.0, 0.5, (2.*d)/(c*(1.-d)**2))
    return a0 * (b01 * b02 - b11 * b12)
示例#48
0
def get_fixation_unconstrained_kb(S, d, log_kb):
    """
    This uses the Kacser and Burns effect instead of the sign function.
    """
    soft_sign_S = algopy.tanh(algopy.exp(log_kb) * S)
    D = d * soft_sign_S
    H = algopy.zeros_like(S)
    for i in range(H.shape[0]):
        for j in range(H.shape[1]):
            H[i, j] = 1. / kimrecessive.denom_piecewise(0.5 * S[i, j], D[i, j])
    return H
示例#49
0
def denom_fixed_quad(c, d, x, w):
    """
    This function is compatible with algopy.
    The x and w params should be precomputed with a=0, b=1.
    @param c: large positive means mutant is more fit
    @param d: large positive means mutant is dominant as opposed to recessive
    @param x: quadrature points in the interval [0, 1]
    @param w: corresponding nonneg quadrature weights summing to 1
    """
    neg_two_c_x = -2*c*x
    y = algopy.exp(neg_two_c_x*(d*(1-x) + 1))
    return algopy.dot(y, w)
示例#50
0
def get_fixation_unconstrained_kb_fquad(
        S, d, log_kb, x, w, codon_neighbor_mask):
    """
    This uses the Kacser and Burns effect instead of the sign function.
    """
    #TODO: possibly use a mirror symmetry to double the speed
    soft_sign_S = algopy.tanh(algopy.exp(log_kb)*S)
    D = d * soft_sign_S
    H = algopy.zeros_like(S)
    for i in range(H.shape[0]):
        for j in range(H.shape[1]):
            if codon_neighbor_mask[i, j]:
                H[i, j] = 1. / kimrecessive.denom_fixed_quad(
                        0.5*S[i, j], D[i, j], x, w)
    return H
示例#51
0
def eval_f_eigh(Y):
    """ some reformulations to make eval_f_orig
        compatible with algopy

        replaced scipy.linalg.expm by a symmetric eigenvalue decomposition

        this function **can** be differentiated with algopy

    """
    a, b, v = transform_params(Y)

    Q = algopy.zeros((4, 4), dtype=Y)
    Q[0, 0] = 0
    Q[0, 1] = a
    Q[0, 2] = b
    Q[0, 3] = b
    Q[1, 0] = a
    Q[1, 1] = 0
    Q[1, 2] = b
    Q[1, 3] = b
    Q[2, 0] = b
    Q[2, 1] = b
    Q[2, 2] = 0
    Q[2, 3] = a
    Q[3, 0] = b
    Q[3, 1] = b
    Q[3, 2] = a
    Q[3, 3] = 0

    Q = algopy.dot(Q, algopy.diag(v))
    Q -= algopy.diag(algopy.sum(Q, axis=1))
    va = algopy.diag(algopy.sqrt(v))
    vb = algopy.diag(1. / algopy.sqrt(v))
    W, U = algopy.eigh(algopy.dot(algopy.dot(va, Q), vb))
    M = algopy.dot(U, algopy.dot(algopy.diag(algopy.exp(W)), U.T))
    P = algopy.dot(vb, algopy.dot(M, va))
    S = algopy.log(algopy.dot(algopy.diag(v), P))
    return -algopy.sum(S * g_data)
示例#52
0
def eval_f(x):
    """ some function """
    return x[0] * x[1] * x[2] + exp(x[0]) * x[1]
示例#53
0
def denom_genic_b(c):
    return (1. - algopy.exp(-2*c)) / (2*c)
示例#54
0
def denom_knudsen(c):
    """
    This is +gwF = 1/2.
    """
    return algopy.exp(-c)