예제 #1
0
def centered_tree_covariance(B, nleaves, v):
    """
    @param B: rows of this unweighted incidence matrix are edges
    @param nleaves: number of leaves
    @param v: vector of edge variances
    """
    #TODO: track the block multiplication through the schur complement
    W = diag(reciprocal(v))
    L = dot(B.T, dot(W, B))
    #print('full laplacian matrix:')
    #print(L)
    #print()
    nvertices = v.shape[0]
    ninternal = nvertices - nleaves
    Laa = L[:nleaves, :nleaves]
    Lab = L[:nleaves, nleaves:]
    Lba = L[nleaves:, :nleaves]
    Lbb = L[nleaves:, nleaves:]
    L_schur = Laa - dot(Lab, dot(inv(Lbb), Lba))
    L_schur_pinv = restored(inv(augmented(L_schur)))
    #print('schur laplacian matrix:')
    #print(L_schur)
    #print()
    #print('pinv of schur laplacian matrix:')
    #print(L_schur_pinv)
    #print()
    return L_schur_pinv
예제 #2
0
def unrolled_unconstrained_recessivity_fixation(
        adjacency,
        kimura_d,
        S,
        ):
    """
    This should be compatible with algopy.
    But it may be very slow.
    The unrolling is with respect to a dot product.
    @param adjacency: a binary design matrix to reduce unnecessary computation
    @param kimura_d: a parameter that might carry Taylor information
    @param S: an ndarray of selection differences with Taylor information
    return: an ndarray of fixation probabilities with Taylor information
    """
    nknots = len(g_quad_x)
    nstates = S.shape[0]
    D = algopy.sign(S) * kimura_d
    H = algopy.zeros_like(S)
    for i in range(nstates):
        for j in range(nstates):
            if not adjacency[i, j]:
                continue
            for x, w in zip(g_quad_x, g_quad_w):
                tmp_a = - S[i, j] * x
                tmp_b = algopy.exp(tmp_a * (D[i, j] * (1-x) + 1))
                H[i, j] += tmp_b * w
            H[i, j] = algopy.reciprocal(H[i, j])
    return H
예제 #3
0
def unconstrained_recessivity_fixation(
        adjacency,
        kimura_d,
        S,
        ):
    """
    This should be compatible with algopy.
    But it may be very slow.
    @param adjacency: a binary design matrix to reduce unnecessary computation
    @param kimura_d: a parameter that might carry Taylor information
    @param S: an ndarray of selection differences with Taylor information
    return: an ndarray of fixation probabilities with Taylor information
    """
    x = g_quad_x
    w = g_quad_w
    nstates = S.shape[0]
    D = algopy.sign(S) * kimura_d
    H = algopy.zeros_like(S)
    for i in range(nstates):
        for j in range(nstates):
            if not adjacency[i, j]:
                continue
            tmp_a = - S[i, j] * x
            tmp_b = algopy.exp(tmp_a * (D[i, j] * (1-x) + 1))
            tmp_c = algopy.dot(tmp_b, w)
            H[i, j] = algopy.reciprocal(tmp_c)
    return H
예제 #4
0
def clever_cross_entropy_trees(B, nleaves, va, vb):
    """
    Try being a little more clever.

    @param B: augmented incidence matrix
    @param nleaves: number of leaves
    @param va: augmented reference point edge variances
    @param vb: augmented test point edge variances
    """

    # deduce some quantities assuming an unrooted bifurcating tree
    ninternal = nleaves - 2
    nvertices = nleaves + ninternal
    nedges = nvertices - 1

    # define an index for taking schur complements
    n = nvertices
    k = nleaves + 1

    # Construct the full Laplacian matrix plus J/n.
    # Take a block of the diagonal, corresponding to the inverse
    # of a schur complement.
    Wa = diag(reciprocal(va))
    La_plus = dot(B.T, dot(Wa, B))
    print(La_plus)
    print(scipy.linalg.eigh(La_plus))
    Laa = La_plus[:k, :k]
    Lab = La_plus[:k, k:]
    Lba = La_plus[k:, :k]
    Lbb = La_plus[k:, k:]
    L_schur_plus = Laa - dot(Lab, dot(inv(Lbb), Lba))
    assert_allclose(inv(L_schur_plus), inv(La_plus)[:k, :k])
    A = inv(La_plus)[:k, :k]
    print(scipy.linalg.eigh(A))

    # Construct the Schur complement of the test point matrix.
    Wb = diag(reciprocal(vb))
    L_plus = dot(B.T, dot(Wb, B))
    Laa = L_plus[:k, :k]
    Lab = L_plus[:k, k:]
    Lba = L_plus[k:, :k]
    Lbb = L_plus[k:, k:]
    L_schur_plus = Laa - dot(Lab, dot(inv(Lbb), Lba))
    B_inv = L_schur_plus
    #return 0.5 * ((n-1) * LOG2PI + trace(dot(B_inv, A)) - log(det(B_inv)))
    return 0.5 * (n * LOG2PI + trace(dot(B_inv, A) - 1) - log(det(B_inv)))