def unrolled_unconstrained_recessivity_fixation( adjacency, kimura_d, S, ): """ This should be compatible with algopy. But it may be very slow. The unrolling is with respect to a dot product. @param adjacency: a binary design matrix to reduce unnecessary computation @param kimura_d: a parameter that might carry Taylor information @param S: an ndarray of selection differences with Taylor information return: an ndarray of fixation probabilities with Taylor information """ nknots = len(g_quad_x) nstates = S.shape[0] D = algopy.sign(S) * kimura_d H = algopy.zeros_like(S) for i in range(nstates): for j in range(nstates): if not adjacency[i, j]: continue for x, w in zip(g_quad_x, g_quad_w): tmp_a = - S[i, j] * x tmp_b = algopy.exp(tmp_a * (D[i, j] * (1-x) + 1)) H[i, j] += tmp_b * w H[i, j] = algopy.reciprocal(H[i, j]) return H
def unconstrained_recessivity_fixation( adjacency, kimura_d, S, ): """ This should be compatible with algopy. But it may be very slow. @param adjacency: a binary design matrix to reduce unnecessary computation @param kimura_d: a parameter that might carry Taylor information @param S: an ndarray of selection differences with Taylor information return: an ndarray of fixation probabilities with Taylor information """ x = g_quad_x w = g_quad_w nstates = S.shape[0] D = algopy.sign(S) * kimura_d H = algopy.zeros_like(S) for i in range(nstates): for j in range(nstates): if not adjacency[i, j]: continue tmp_a = - S[i, j] * x tmp_b = algopy.exp(tmp_a * (D[i, j] * (1-x) + 1)) tmp_c = algopy.dot(tmp_b, w) H[i, j] = algopy.reciprocal(tmp_c) return H
def get_fixation_dominant_disease(S): sign_S = algopy.sign(S) H = algopy.zeros_like(S) for i in range(H.shape[0]): for j in range(H.shape[1]): H[i, j] = 1. / kimrecessive.denom_piecewise( 0.5*S[i, j], -sign_S[i, j]) return H
def get_fixation_unconstrained(S, d): sign_S = algopy.sign(S) D = d * sign_S H = algopy.zeros_like(S) for i in range(H.shape[0]): for j in range(H.shape[1]): H[i, j] = 1. / kimrecessive.denom_piecewise(0.5 * S[i, j], D[i, j]) return H
def get_fixation_unconstrained(S, d): sign_S = algopy.sign(S) D = d * sign_S H = algopy.zeros_like(S) for i in range(H.shape[0]): for j in range(H.shape[1]): H[i, j] = 1. / kimrecessive.denom_piecewise( 0.5*S[i, j], D[i, j]) return H
def algopy_unconstrained_recessivity_fixation( kimura_d, S, ): """ This is only compatible with algopy and is not compatible with numpy. It takes ridiculous measures to compute higher order derivatives. @param adjacency: a binary design matrix to reduce unnecessary computation @param kimura_d: a parameter that might carry Taylor information @param S: an ndarray of selection differences with Taylor information return: an ndarray of fixation probabilities with Taylor information """ nstates = S.shape[0] D = algopy.sign(S) * kimura_d H = algopy.zeros_like(S) ncoeffs = S.data.shape[0] shp = (ncoeffs, -1) S_data_reshaped = S.data.reshape(shp) D_data_reshaped = D.data.reshape(shp) H_data_reshaped = H.data.reshape(shp) tmp_a = algopy.zeros_like(H) tmp_b = algopy.zeros_like(H) tmp_c = algopy.zeros_like(H) tmp_a_data_reshaped = tmp_a.data.reshape(shp) tmp_b_data_reshaped = tmp_b.data.reshape(shp) tmp_c_data_reshaped = tmp_c.data.reshape(shp) pykimuracore.kimura_algopy( g_quad_x, g_quad_w, S_data_reshaped, D_data_reshaped, tmp_a_data_reshaped, tmp_b_data_reshaped, tmp_c_data_reshaped, H_data_reshaped, ) return H
def get_fixation_unconstrained_fquad(S, d, x, w, codon_neighbor_mask): """ In this function name, fquad means "fixed quadrature." The S ndarray with ndim=2 depends on free parameters. The d parameter is itself a free parameter. So both of those things are algopy objects carrying Taylor information. On the other hand, x and w are precomputed ndim=1 ndarrays which are not carrying around extra Taylor information. @param S: array of selection differences @param d: parameter that controls dominance vs. recessivity @param x: precomputed roots for quadrature @param w: precomputed weights for quadrature @param codon_neighbor_mask: only compute entries neighboring pairs """ #TODO: possibly use a mirror symmetry to double the speed sign_S = algopy.sign(S) D = d * sign_S H = algopy.zeros_like(S) for i in range(H.shape[0]): for j in range(H.shape[1]): if codon_neighbor_mask[i, j]: H[i, j] = 1. / kimrecessive.denom_fixed_quad( 0.5*S[i, j], D[i, j], x, w) return H
def get_fixation_unconstrained_fquad(S, d, x, w, codon_neighbor_mask): """ In this function name, fquad means "fixed quadrature." The S ndarray with ndim=2 depends on free parameters. The d parameter is itself a free parameter. So both of those things are algopy objects carrying Taylor information. On the other hand, x and w are precomputed ndim=1 ndarrays which are not carrying around extra Taylor information. @param S: array of selection differences @param d: parameter that controls dominance vs. recessivity @param x: precomputed roots for quadrature @param w: precomputed weights for quadrature @param codon_neighbor_mask: only compute entries of neighboring codon pairs """ #TODO: possibly use a mirror symmetry to double the speed sign_S = algopy.sign(S) D = d * sign_S H = algopy.zeros_like(S) for i in range(H.shape[0]): for j in range(H.shape[1]): if codon_neighbor_mask[i, j]: H[i, j] = 1. / kimrecessive.denom_fixed_quad( 0.5*S[i, j], D[i, j], x, w) return H