Esempio n. 1
0
def abeff_trianglebound(N, y, t, cond):
    sigma1 = 0.5 * (1 + y)
    sum1, sum2, sum3, sum5 = [0.0 for _ in range(4)]
    b1 = 1
    a1 = mp.power(N, -0.4)
    xN = 4 * mp.pi() * N * N - mp.pi() * t / 4.0
    xNp1 = 4 * mp.pi() * (N + 1) * (N + 1) - mp.pi() * t / 4.0
    delta = mp.pi() * y / (2 * (xN - 6 - (14 + 2 * y) / mp.pi())) + 2 * y * (
        7 + y) * mp.log(abs(1 + y + 1j * xNp1) / (4 * mp.pi)) / (xN * xN)
    expdelta = mp.exp(delta)
    for n in range(1, 30 * N + 1):
        nf = float(n)
        denom = mp.power(nf, sigma1 + (t / 4.0) * mp.log(N * N))
        common1 = mp.exp((t / 4.0) * mp.power(mp.log(nf), 2))
        common2 = common1 * mp.power(nf / N, y) * expdelta * mp.exp(
            t * y * mp.log(n) / (2 * (xN - 6)))
        bn, bn2, bn3, bn5 = [
            common1 * abs(cond[n][2 * i - 1]) for i in range(1, 5)
        ]
        an, an2, an3, an5 = [
            common2 * abs(cond[n][2 * i]) for i in range(1, 5)
        ]
        sum1 += (bn + an) / denom
        sum2 += (bn2 + an2) / denom
        sum3 += (bn3 + an3) / denom
        sum5 += (bn5 + an5) / denom
    return [N, expdelta] + [2 - j for j in [sum1, sum2, sum3, sum5]]
Esempio n. 2
0
def abtoyx_e3(z, t):
    x = z.real
    xdash = x + mp.pi() * t / 4.0
    y = z.imag
    sigma1, sigma2 = 0.5 * (1 + y), 0.5 * (1 - y)
    s1, s2 = sigma1 + 0.5j * xdash, sigma2 + 0.5j * xdash
    N = int(mp.sqrt(0.25 * x / mp.pi()))
    sum1_L, sum1_R = 0.0, 0.0
    factor2 = 1 - 1 / mp.power(2.0, s1 + (t / 4.0) * mp.log(N * N / 2.0))
    factor3 = 1 - 1 / mp.power(3.0, s1 + (t / 4.0) * mp.log(N * N / 2.0))
    factorN = mp.power(N, -0.4)
    for n in range(1, N + 1):
        n = float(n)
        sum1_L += mp.power(n, -1 * s1 - (t / 4.0) * mp.log(N * N / n))
        sum1_R += mp.power(n, -1 * s2 - (t / 4.0) * mp.log(N * N / n))
    sum1_R = sum1_R * factorN
    sum12_L, sum12_R = sum1_L * factor2, sum1_R * factor2
    sum123_L, sum123_R = sum12_L * factor3, sum12_R * factor3
    absum1_L, absum1_R, absum12_L, absum12_R, absum123_L, absum123_R = abs(
        sum1_L), abs(sum1_R), abs(sum12_L), abs(sum12_R), abs(sum123_L), abs(
            sum123_R)
    abdiff1, abdiff12, abdiff123 = absum1_L - absum1_R, absum12_L - absum12_R, absum123_L - absum123_R
    return [
        sum1_L, sum1_R, absum1_L, absum1_R, abdiff1, sum12_L, sum12_R,
        absum12_L, absum12_R, abdiff12, sum123_L, sum123_R, absum123_L,
        absum123_R, abdiff123
    ]
Esempio n. 3
0
def FreeFermions(eigvec, subsystem, FermiVector):
	r=range(FermiVector)
	Cij=mp.matrix([[mp.fsum([eigvec[i,k]*eigvec[j,k] for k in r]) for i in subsystem] for j in subsystem])
	C_eigval=mp.eigsy(Cij, eigvals_only=True)
	EH_eigval=mp.matrix([mp.log(mp.fdiv(mp.fsub(mp.mpf(1.0),x),x)) for x in C_eigval])
	S=mp.re(mp.fsum([mp.log(mp.mpf(1.0)+mp.exp(-x))+mp.fdiv(x,mp.exp(x)+mp.mpf(1.0)) for x in EH_eigval]))
	return(S)
Esempio n. 4
0
def abeff_lemmabound(N, y, t, cond):
    sigma1 = 0.5 * (1 + y)
    sum1, sum2, sum3, sum5 = [0.0 for _ in range(4)]
    b1 = 1
    a1 = mp.power(N, -0.4)
    xN = 4 * mp.pi() * N * N - mp.pi() * t / 4.0
    xNp1 = 4 * mp.pi() * (N + 1) * (N + 1) - mp.pi() * t / 4.0
    delta = mp.pi() * y / (2 * (xN - 6 - (14 + 2 * y) / mp.pi())) + 2 * y * (
        7 + y) * mp.log(abs(1 + y + 1j * xNp1) / (4 * mp.pi)) / (xN * xN)
    expdelta = mp.exp(delta)
    for n in range(2, 30 * N + 1):
        nf = float(n)
        denom = mp.power(nf, sigma1 + (t / 4.0) * mp.log(N * N))
        #print([cond[n][i] for i in range(1,9)])
        common1 = mp.exp((t / 4.0) * mp.power(mp.log(nf), 2))
        common2 = common1 * mp.power(nf / N, y)
        common3 = expdelta * (mp.exp(t * y * mp.log(n) / (2 * (xN - 6))) - 1)
        bn, bn2, bn3, bn5 = [common1 * cond[n][2 * i - 1] for i in range(1, 5)]
        an, an2, an3, an5 = [common2 * cond[n][2 * i] for i in range(1, 5)]
        en, en2, en3, en5 = an * common3, an2 * common3, an3 * common3, an5 * common3
        sum1 += (en + max(
            (1 - a1) * abs(bn + an) / (1 + a1), abs(bn - an))) / denom
        sum2 += (en2 + max(
            (1 - a1) * abs(bn2 + an2) / (1 + a1), abs(bn2 - an2))) / denom
        sum3 += (en3 + max(
            (1 - a1) * abs(bn3 + an3) / (1 + a1), abs(bn3 - an3))) / denom
        sum5 += (en5 + max(
            (1 - a1) * abs(bn5 + an5) / (1 + a1), abs(bn5 - an5))) / denom
    return [N, expdelta] + [1 - a1 - j for j in [sum1, sum2, sum3, sum5]]
Esempio n. 5
0
    def cumulants(self, gamma):
        """
        Compute 

        .. math::
           
           \Lambda(\gamma) = \log\left(\int_{\mathbb{R}^n} 
                             e^{\gamma \|z\|^2_2} F(dz)\right)

        as well as its first two derivatives with respect to $\gamma$,
        where $F$ is the empirical distribution of `self.sample`.

        """
        norm_squared = self.sample
        M = norm_squared.mean()
        M0 = float(np.mean([mp.exp(gamma * (ns - M)) for ns in norm_squared]))
        M0 *= mp.exp(gamma * M)
        M1 = np.mean([
            np.exp(float(gamma * ns + np.log(ns) - mp.log(M0)))
            for ns in norm_squared
        ])
        M2 = np.mean([
            np.exp(float(gamma * ns + 2 * np.log(ns) - mp.log(M0)))
            for ns in norm_squared
        ])
        return M0, M1, (M2 - M1**2)
Esempio n. 6
0
def D_transform(j0,j1,j2):
    
    d = mp.mpf(10**(-mp.dps+1))
    
    z0,z1,z2,z3 = 1/(j0*j1*j2),j1*j0/j2,j2*j0/j1,j1*j2/j0
    c0,c1,c2,c3 = z0+z1+z2+z3,z0+z1-z2-z3,z0+z2-z1-z3,z0+z3-z1-z2 
    
    t1 = mp.sqrt(c2+0j)*mp.sqrt(c3+0j)/(mp.sqrt(c0+0j)*mp.sqrt(c1+0j))
    t2 = mp.sqrt(c1+0j)*mp.sqrt(c3+0j)/(mp.sqrt(c0+0j)*mp.sqrt(c2+0j))
    t3 = mp.sqrt(c1+0j)*mp.sqrt(c2+0j)/(mp.sqrt(c0+0j)*mp.sqrt(c3+0j))
    
    if abs(1+t1)<d:
        t1 = -1+d
    if abs(1+t2)<d:
        t2 = -1+d
    if abs(1+t3)<d:
        t3 = -1+d
        
    j1p = d + mp.sqrt((1-t1)/(1+t1))
    j2p = d + mp.sqrt((1-t2)/(1+t2))
    jD  = d + mp.sqrt((1-t3)/(1+t3))
    
    dF = mp.log(z0/(j1p*j2p*jD+1/(j1p*j2p*jD))+0j)+mp.log(jD+1/jD)
    
    return j1p,j2p,dF
Esempio n. 7
0
def _solve_expx_x_logx(tau, tol, max_steps=10):
    '''Solves the equation

    log(pi/tau) = pi/2 * exp(x) - x - log(x)

    approximately using Newton's method. The approximate solution is guaranteed
    to overestimate.
    '''
    x = mp.log(2 / mp.pi * mp.log(mp.pi / tau))

    # x = mp.log(tau/mp.pi) -  mp.lambertw(-tau/2, -1))
    # x = mp.mpf(1)/2 \
    #    - mp.log(mp.sqrt(mp.pi/tau)) \
    #    - mp.lambertw(-mp.sqrt(mp.exp(1)*mp.pi*tau)/4, -1)

    def f0(x):
        return mp.pi / 2 * mp.exp(x) - x - mp.log(x * mp.pi / tau)

    def f1(x):
        return mp.pi / 2 * mp.exp(x) - 1 - mp.mpf(1) / x

    f0x = f0(x)
    success = False
    # At least one step is performed. This is required for the guarantee of
    # overestimation.
    for _ in range(max_steps):
        x -= f0x / f1(x)
        f0x = f0(x)
        if abs(f0x) < tol:
            success = True
            break

    assert success
    return x
Esempio n. 8
0
def Nt(t, T):
    '''Estimates number of H_t zeroes expected to be found upto height T'''
    t, T = mp.mpf(t), mp.mpf(T)
    Tsmall = T / (4 * mp.pi())
    N0 = Tsmall * mp.log(Tsmall) - Tsmall
    extra = (t / 16.0) * mp.log(Tsmall)
    return (N0 + extra).real
Esempio n. 9
0
def kill_vertical(E, j_matrix, x, y):
    y0, y1 = y, y + 1
    j0 = j_matrix[y0 * L + x, y1 * L + x]
    j_matrix[y0 * L + x, y1 * L + x], j_matrix[y1 * L + x, y0 * L + x] = 1, 1
    dF = mp.log(j0 + 1 / j0) - mp.log(2)
    E += dF
    return E, j_matrix
Esempio n. 10
0
def Ht_Effective(z, t):
    """
    This uses the effective approximation of H_t from Terry's blog
    :param z: point at which H_t is computed
    :param t: the "time" parameter
    :return: H_t as a sum of two terms that are analogous to A and B, but now also with an efffective error bound (returned as percentage of |H_t|
    """
    z, t = mp.mpc(z), mp.mpc(t)
    sigma = (1 - z.imag) / 2.0
    T = (z.real) / 2.0
    Tdash = T + t * mp.pi() / 8.0
    s1 = sigma + 1j * T
    s2 = 1 - sigma + 1j * T
    N = int((mp.sqrt(Tdash / (2 * mp.pi()))).real)

    alph1 = alpha1(s1)
    alph2 = alpha1(s2).conjugate()
    A0_expo = (t / 4.0) * alph1 * alph1
    B0_expo = (t / 4.0) * alph2 * alph2
    H01_est1 = H01(s1)
    H01_est2 = H01(s2).conjugate()

    #begin main estimate block
    A0 = mp.exp(A0_expo) * H01_est1
    B0 = mp.exp(B0_expo) * H01_est2
    A_sum = 0.0
    B_sum = 0.0
    for n in range(1, N + 1):
        A_sum += 1 / mp.power(n, s1 + (t / 2.0) * alph1 -
                              (t / 4.0) * mp.log(n))
        B_sum += 1 / mp.power(
            n, 1 - s1 + (t / 2.0) * alph2 - (t / 4.0) * mp.log(n))
    A = A0 * A_sum
    B = B0 * B_sum
    H = (A + B) / 8.0
    #end main estimate block

    #begin error block
    A0_err_expo = (t / 4.0) * (abs(alph1)**2)  #A0_expo.real may also work
    B0_err_expo = (t / 4.0) * (abs(alph2)**2)  #B0_expo.real may also work
    epserr_1 = mp.exp(A0_err_expo) * abs(H01_est1) * abs(eps_err(s1, t)) / (
        (T - 3.33) * 8.0)
    epserr_2 = mp.exp(B0_err_expo) * abs(H01_est2) * abs(eps_err(s2, t)) / (
        (T - 3.33) * 8.0)
    epserr = epserr_1 + epserr_2

    C0 = mp.sqrt(mp.pi()) * mp.exp(-1 * (t / 64.0) * (mp.pi()**2)) * mp.power(
        Tdash, 1.5) * mp.exp(-1 * mp.pi() * T / 4.0)
    C = C0 * vwf_err(s1, t) / 8.0
    toterr = epserr + C
    #print(epserr_1, epserr_2, C0, vwf_err(s1, t), C, toterr.real)
    #end error block

    if z.imag == 0: return (H.real, toterr.real / abs(H.real))
    else: return (H, toterr.real / abs(H))
Esempio n. 11
0
def FreeFermions(subsystem, C):
    C = mp.matrix([[C[x, y] for x in subsystem] for y in subsystem])
    C_eigval = mp.eigh(C, eigvals_only=True)
    EH_eigval = mp.matrix(
        [mp.log(mp.fdiv(mp.fsub(mp.mpf(1.0), x), x)) for x in C_eigval])
    S = mp.re(
        mp.fsum([
            mp.log(mp.mpf(1.0) + mp.exp(-x)) + mp.fdiv(x,
                                                       mp.exp(x) + mp.mpf(1.0))
            for x in EH_eigval
        ]))
    return (S)
Esempio n. 12
0
def abtoybound(N, y, t, cond):
    sigma1, sigma2 = 0.5 * (1 + y), 0.5 * (1 - y)
    sum1_L, sum1_R, sum12_L, sum12_R, sum123_L, sum123_R, sum1235_L, sum1235_R = [
        0.0 for _ in range(8)
    ]
    ddxsum1_L, ddxsum1_R, ddxsum12_L, ddxsum12_R, ddxsum123_L, ddxsum123_R, ddxsum1235_L, ddxsum1235_R = [
        0.0 for _ in range(8)
    ]
    factorN = 1 / mp.power(N, 0.4)
    for n in range(1, 30 * N + 1):
        nf = float(n)
        denom1 = mp.power(nf, sigma1 + (t / 4.0) * mp.log(N * N / nf))
        denom2 = mp.power(nf, sigma2 + (t / 4.0) * mp.log(N * N / nf))
        term1_L = abs(cond[n][1] / denom1)
        term1_R = abs(cond[n][2] / denom2)
        sum1_L += term1_L
        sum1_R += term1_R
        ddxsum1_L += mp.log(n) * term1_L
        ddxsum1_R += mp.log(n) * term1_R
        term12_L = abs(cond[n][3] / denom1)
        term12_R = abs(cond[n][4] / denom2)
        sum12_L += term12_L
        sum12_R += term12_R
        ddxsum12_L += mp.log(n) * term12_L
        ddxsum12_R += mp.log(n) * term12_R
        term123_L = abs(cond[n][5] / denom1)
        term123_R = abs(cond[n][6] / denom2)
        sum123_L += term123_L
        sum123_R += term123_R
        ddxsum123_L += mp.log(n) * term123_L
        ddxsum123_R += mp.log(n) * term123_R
        term1235_L = abs(cond[n][7] / denom1)
        term1235_R = abs(cond[n][8] / denom2)
        sum1235_L += term1235_L
        sum1235_R += term1235_R
        ddxsum1235_L += mp.log(n) * term1235_L
        ddxsum1235_R += mp.log(n) * term1235_R
    sum1_L, sum12_L, sum123_L, sum1235_L = sum1_L - 1, sum12_L - 1, sum123_L - 1, sum1235_L - 1
    sum1_R, sum12_R, sum123_R, sum1235_R = sum1_R * factorN, sum12_R * factorN, sum123_R * factorN, sum1235_R * factorN
    ddxsum1_L, ddxsum12_L, ddxsum123_L, ddxsum1235_L = 0.5 * ddxsum1_L, 0.5 * ddxsum12_L, 0.5 * ddxsum123_L, 0.5 * ddxsum1235_L
    ddxsum1_R, ddxsum12_R, ddxsum123_R, ddxsum1235_R = 0.5 * ddxsum1_R * factorN, 0.5 * ddxsum12_R * factorN, 0.5 * ddxsum123_R * factorN, 0.5 * ddxsum1235_R * factorN

    abdiff1, ddxsum1 = 1 - sum1_L - sum1_R, ddxsum1_L + ddxsum1_R
    abdiff12, ddxsum12 = 1 - sum12_L - sum12_R, ddxsum12_L + ddxsum12_R
    abdiff123, ddxsum123 = 1 - sum123_L - sum123_R, ddxsum123_L + ddxsum123_R
    abdiff1235, ddxsum1235 = 1 - sum1235_L - sum1235_R, ddxsum1235_L + ddxsum1235_R
    return [
        sum1_L, sum1_R, abdiff1, ddxsum1_L, ddxsum1_R, ddxsum1, sum12_L,
        sum12_R, abdiff12, ddxsum12_L, ddxsum12_R, ddxsum12, sum123_L,
        sum123_R, abdiff123, ddxsum123_L, ddxsum123_R, ddxsum123, sum1235_L,
        sum1235_R, abdiff1235, ddxsum1235_L, ddxsum1235_R, ddxsum1235
    ]
Esempio n. 13
0
def FreeFermions(subsystem,
                 C_t):  #implements free fermion technique by peschel
    C = mp.matrix([[C_t[x, y] for x in subsystem] for y in subsystem])
    C_eigval = mp.eigh(C, eigvals_only=True)
    EH_eigval = mp.matrix(
        [mp.log(mp.fdiv(mp.fsub(mp.mpf(1.0), x), x)) for x in C_eigval])
    S = mp.re(
        mp.fsum([
            mp.log(mp.mpf(1.0) + mp.exp(-x)) + mp.fdiv(x,
                                                       mp.exp(x) + mp.mpf(1.0))
            for x in EH_eigval
        ]))
    return (S)
Esempio n. 14
0
def calc_lmsr_marginal_price(token_count, token_index, net_outcome_tokens_sold,
                             funding):
    mp.dps = 100
    mp.pretty = True
    b = mpf(funding) / mp.log(len(net_outcome_tokens_sold))
    result = b * mp.log(
        sum(
            mp.exp(share_count / b + token_count / b)
            for share_count in net_outcome_tokens_sold) - sum(
                mp.exp(share_count / b)
                for index, share_count in enumerate(net_outcome_tokens_sold) if
                index != token_index)) - net_outcome_tokens_sold[token_index]

    return result
Esempio n. 15
0
def ln_I(k, n, t, rho, X, W):
    N = len(X)
    y0 = find_y0(k, n, t, rho)
    fy0 = f(y0, k, n, t, rho)
    fppy0 = d2fdy2(y0, k, n, t, rho)
    #X,W = scipy.special.he_roots(N)
    r = lambda y: f(y, k, n, t, rho) - fy0 + ((y - y0)**2) * (abs(fppy0) / 2.)
    ln_ans = mp.log(
        sum([
            W[i] * mp.exp(r(mp.sqrt(1. / abs(fppy0)) * X[i] + y0))
            for i in range(N)
        ]))
    ln_ans = ln_ans + fy0 + 0.5 * mp.log(1 - rho) - 0.5 * mp.log(
        2 * mp.pi * rho * abs(fppy0))
    return (ln_ans)
Esempio n. 16
0
def kill_horizontal(E, j_matrix, x, y):
    if x > 0:
        j1 = j_matrix[y * L + x, y * L + x + 1]
        j2 = j_matrix[y * L + x - 1, y * L + x]
        jp, dF = L_transform(j1, j2)
        j_matrix[y * L + x + 1, y * L + x], j_matrix[y * L + x,
                                                     y * L + x + 1] = 1, 1
        j_matrix[y * L + x - 1, y * L + x], j_matrix[y * L + x,
                                                     y * L + x - 1] = jp, jp
        E += dF - mp.log(2)
    if x == 0:
        j0 = j_matrix[0, 1]
        j_matrix[0, 1], j_matrix[1, 0] = 1, 1
        E += mp.log(j0 + 1 / j0) - mp.log(2)
    return E, j_matrix
Esempio n. 17
0
def z_Zolotarev(N, x, m):
    r"""
    Function to evaluate the Zolotarev polynomial (eq 1, [McNamara93]_).
    
    :param N:    Order of the Zolotarev polynomial
    :param x:    The argument at which one would like to evaluate the Zolotarev polynomial
    :param m:    m is the elliptic parameter (not the modulus k and not the nome q)
                  
    :rtype:      Returns a float, the value of Zolotarev polynomial at x
    """
    M = -ellipk(m) / N
    x3 = ellipfun('sn', u=-M, m=m)
    xbar = x3 * mp.sqrt(
        (x**2 - 1) / (x**2 - x3**2))  # rearranged eq 21, [Levy70]_
    u = ellipf(mp.asin(xbar),
               m)  # rearranged eq 20, [Levy70]_, asn(x) = F(asin(x)|m)
    f = mp.cosh((N / 2) * mp.log(z_eta(M + u, m) / z_eta(M - u, m)))
    if f.imag / f.real > 1e-10:
        print("imaginary part of the Zolotarev function is not negligible!")
        print("f_imaginary = ", f.imag)
    else:
        if (x > 0):  # no idea why I am doing this ... anyhow, it seems working
            f = -f.real
        else:
            f = f.real
    return f
Esempio n. 18
0
def test_svd_test_case():
    # a test case from Golub and Reinsch
    #  (see wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).)

    eps = mp.exp(0.8 * mp.log(mp.eps))

    a = [[22, 10,  2,   3,  7],
         [14,  7, 10,   0,  8],
         [-1, 13, -1, -11,  3],
         [-3, -2, 13,  -2,  4],
         [ 9,  8,  1,  -2,  4],
         [ 9,  1, -7,   5, -1],
         [ 2, -6,  6,   5,  1],
         [ 4,  5,  0,  -2,  2]]

    a = mp.matrix(a)
    b = mp.matrix([mp.sqrt(1248), 20, mp.sqrt(384), 0, 0])

    S = mp.svd_r(a, compute_uv = False)
    S -= b
    assert mp.mnorm(S) < eps

    S = mp.svd_c(a, compute_uv = False)
    S -= b
    assert mp.mnorm(S) < eps
Esempio n. 19
0
def run_eig(A, verbose = 0):
    if verbose > 1:
        print("original matrix (eig):\n", A)

    n = A.rows

    E, EL, ER = mp.eig(A, left = True, right = True)

    if verbose > 1:
        print("E:\n", E)
        print("EL:\n", EL)
        print("ER:\n", ER)

    eps = mp.exp(0.8 * mp.log(mp.eps))

    err0 = 0
    for i in xrange(n):
        B = A * ER[:,i] - E[i] * ER[:,i]
        err0 = max(err0, mp.mnorm(B))

        B = EL[i,:] * A - EL[i,:] * E[i]
        err0 = max(err0, mp.mnorm(B))

    err0 /= n * n

    if verbose > 0:
        print("difference (E):", err0)

    assert err0 < eps
Esempio n. 20
0
File: util.py Progetto: fritzr/pyfp
def floor_power(x, n=2):
    """
    Return the value sign(x) * n^k such that n^k is the largest value <= |x|
    for integer k.
    """
    x = abs(x)
    return mp.power(n, mp.floor(mp.log(x, n)))
Esempio n. 21
0
File: util.py Progetto: fritzr/pyfp
def next_power(x, n=2):
    """
    Return the value sign(x) * n^k such that n^k is the smallest value > |x|
    for integer k.
    """
    x = abs(x)
    return mp.power(n, mp.floor(mp.log(x, n)) + 1)
Esempio n. 22
0
def entropy_pf(Lph,Tph,meas,G,prec=15,q=2,dps=200):
    
    # setting digit precision
    mp.dps = dps
    
    # =============================================================================
    # Evaluation of the entropy using partition function
    # Lph,Tph -- physical size (number of qubits) and time (circuit depth)
    # G -- (large) parameter controling the gap between relevant and irrelevant states
    # prec -- number of digits in the answer
    # q -- qudit dimension
    # =============================================================================
    
    # -- define effective inverse temperature
    beta = mp.log((q**2+1)/q)
    # -- dimension of the effective lattice
    L,T = int(Lph/2)+(Lph+1)%2,Tph+1
    # -- corresponding couplings for the numerator and denominator 
    J_matrix1,J_matrix2 = generate_lattice_couplings(L,T,meas,G,beta)
    # -- including the temperature
    J_matrix1 = -beta*mp.matrix(J_matrix1.tolist())
    J_matrix2 = -beta*mp.matrix(J_matrix2.tolist())
    # -- evaluation of the entropy
    entropy = -log_partition_function(J_matrix1,L,T).real\
              +log_partition_function(J_matrix2,L,T).real
    
    return mp.nstr(entropy,prec)
Esempio n. 23
0
def run_eigsy(A, verbose = False):
    if verbose:
        print("original matrix:\n", str(A))

    D, Q = mp.eigsy(A)
    B = Q * mp.diag(D) * Q.transpose()
    C = A - B
    E = Q * Q.transpose() - mp.eye(A.rows)

    if verbose:
        print("eigenvalues:\n", D)
        print("eigenvectors:\n", Q)

    NC = mp.mnorm(C)
    NE = mp.mnorm(E)

    if verbose:
        print("difference:", NC, "\n", C, "\n")
        print("difference:", NE, "\n", E, "\n")

    eps = mp.exp( 0.8 * mp.log(mp.eps))

    assert NC < eps
    assert NE < eps

    return NC
Esempio n. 24
0
    def log_likelihood(self, x, S=10):
        # define the posterior q(z|x) / encode x into q(z|x)
        qz = self.posterior(x)
        # define the prior p(z)
        pz = self.prior(batch_size=x.size(0))

        # sample S samples from the posterior per data point x
        z = qz.rsample([S])  # [S, batchsize, latentdim]

        # define the observation model p(x|z) = B(x | g(z))
        px = self.observation_model(z)

        # Calculating Monte Carlo Estimate of log likelihood
        sum_log_lik = px.log_prob(x).sum(-1) + pz.log_prob(z).sum(
            -1) - qz.log_prob(z).sum(-1)
        log_lik = torch.zeros(x.shape[0])
        for i in range(x.shape[0]):
            tmp = mp.log(
                sum([mp.exp(t)
                     for t in sum_log_lik[:, i].detach().numpy()]) / S)
            log_lik[i] = float(tmp)

        ave_log_lik = log_lik.mean()
        n_in_ave = x.shape[0]

        return {
            "log_like": log_lik,
            "average_log_like": ave_log_lik,
            "n": n_in_ave
        }
Esempio n. 25
0
def run_hessenberg(A, verbose=0):
    if verbose > 1:
        print("original matrix (hessenberg):\n", A)

    n = A.rows

    Q, H = mp.hessenberg(A)

    if verbose > 1:
        print("Q:\n", Q)
        print("H:\n", H)

    B = Q * H * Q.transpose_conj()

    eps = mp.exp(0.8 * mp.log(mp.eps))

    err0 = 0
    for x in xrange(n):
        for y in xrange(n):
            err0 += abs(A[y, x] - B[y, x])
    err0 /= n * n

    err1 = 0
    for x in xrange(n):
        for y in xrange(x + 2, n):
            err1 += abs(H[y, x])

    if verbose > 0:
        print("difference (H):", err0, err1)

    if verbose > 1:
        print("B:\n", B)

    assert err0 < eps
    assert err1 == 0
def test_levin_2():
    # [2] A. Sidi - "Pratical Extrapolation Methods" p.373
    mp.dps = 17
    z = mp.mpf(10)
    eps = mp.mpf(mp.eps)
    with mp.extraprec(2 * mp.prec):
        L = mp.levin(method="sidi", variant="t")
        n = 0
        while 1:
            s = (-1)**n * mp.fac(n) * z**(-n)
            v, e = L.step(s)
            n += 1
            if e < eps:
                break
            if n > 1000: raise RuntimeError("iteration limit exceeded")
    eps = mp.exp(0.9 * mp.log(eps))
    exact = mp.quad(lambda x: mp.exp(-x) / (1 + x / z), [0, mp.inf])
    # there is also a symbolic expression for the integral:
    #   exact = z * mp.exp(z) * mp.expint(1,z)
    err = abs(v - exact)
    assert err < eps
    w = mp.nsum(lambda n: (-1)**n * mp.fac(n) * z**(-n), [0, mp.inf],
                method="sidi",
                levin_variant="t")
    assert err < eps
Esempio n. 27
0
def Ht_AFE_B(z, t):
    """
    This is the much more accurate approx functional eqn posted by Terry at
    https://terrytao.wordpress.com/2018/02/02/polymath15-second-thread-generalising-the-riemann-siegel-approximate-functional-equation/#comment-492182
    :param z: point at which H_t is computed
    :param t: the "time" parameter
    :return: the B part in Ht
    """
    z, t = mp.mpc(z), mp.mpc(t)
    s = (1 + 1j * z.real - z.imag) / 2
    tau = mp.sqrt(s.imag / (2 * mp.pi()))
    M = int(tau)

    B_pre = (1 / 16.0) * s * (s - 1) * mp.power(mp.pi(), 0.5 *
                                                (s - 1)) * mp.gamma(0.5 *
                                                                    (1 - s))
    B_sum = 0.0
    for m in range(1, M + 1):
        if t.real > 0:
            B_sum += mp.exp(
                (t / 16.0) * mp.power(mp.log(
                    (5 - s) / (2 * mp.pi() * m * m)), 2)) / mp.power(m, 1 - s)
        else:
            B_sum += 1 / mp.power(m, 1 - s)

    return B_pre * B_sum
Esempio n. 28
0
 def w(sigma, t, T0dash):
     wterm1 = 1 + (sigma**2) / (T0dash**2)
     wterm2 = 1 + ((1 - sigma)**2) / (T0dash**2)
     wterm3 = (sigma - 1) * mp.log(wterm1) / 4.0 + nonnegative(
         (T0dash / 2.0) * mp.atan(sigma / T0dash) -
         sigma / 2.0) + 1 / (12.0 * (T0dash - 0.33))
     return mp.sqrt(wterm1) * mp.sqrt(wterm2) * mp.exp(wterm3)
Esempio n. 29
0
def run_hessenberg(A, verbose = 0):
    if verbose > 1:
        print("original matrix (hessenberg):\n", A)

    n = A.rows

    Q, H = mp.hessenberg(A)

    if verbose > 1:
        print("Q:\n",Q)
        print("H:\n",H)

    B = Q * H * Q.transpose_conj()

    eps = mp.exp(0.8 * mp.log(mp.eps))

    err0 = 0
    for x in xrange(n):
        for y in xrange(n):
            err0 += abs(A[y,x] - B[y,x])
    err0 /= n * n

    err1 = 0
    for x in xrange(n):
        for y in xrange(x + 2, n):
            err1 += abs(H[y,x])

    if verbose > 0:
        print("difference (H):", err0, err1)

    if verbose > 1:
        print("B:\n", B)

    assert err0 < eps
    assert err1 == 0
Esempio n. 30
0
def Ht_AFE_A(z, t):
    """
    This is the much more accurate approx functional eqn posted by Terry at
    https://terrytao.wordpress.com/2018/02/02/polymath15-second-thread-generalising-the-riemann-siegel-approximate-functional-equation/#comment-492182
    :param z: point at which H_t is computed
    :param t: the "time" parameter
    :return: the A part in Ht
    """
    z, t = mp.mpc(z), mp.mpc(t)
    s = (1 + 1j * z.real - z.imag) / 2
    tau = mp.sqrt(s.imag / (2 * mp.pi()))
    N = int(tau)

    A_pre = (1/16) * s * (s-1) \
            * mp.power(mp.pi(), -1*s/2) * mp.gamma(s/2)
    A_sum = 0.0
    for n in range(1, N + 1):
        if t.real > 0:
            A_sum += mp.exp(
                (t / 16) * mp.power(mp.log(
                    (s + 4) / (2 * mp.pi() * n * n)), 2)) / mp.power(n, s)
        else:
            A_sum += 1 / mp.power(n, s)

    return A_pre * A_sum
def test_levin_3():
    mp.dps = 17
    z = mp.mpf(2)
    eps = mp.mpf(mp.eps)
    with mp.extraprec(
            7 * mp.prec
    ):  # we need copious amount of precision to sum this highly divergent series
        L = mp.levin(method="levin", variant="t")
        n, s = 0, 0
        while 1:
            s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4**n))
            n += 1
            v, e = L.step_psum(s)
            if e < eps:
                break
            if n > 1000: raise RuntimeError("iteration limit exceeded")
    eps = mp.exp(0.8 * mp.log(eps))
    exact = mp.quad(lambda x: mp.exp(-x * x / 2 - z * x**4),
                    [0, mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
    # there is also a symbolic expression for the integral:
    #   exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi))
    err = abs(v - exact)
    assert err < eps
    w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) /
                (mp.fac(n) * mp.fac(2 * n) * (4**n)), [0, mp.inf],
                method="levin",
                levin_variant="t",
                workprec=8 * mp.prec,
                steps=[2] + [1 for x in xrange(1000)])
    err = abs(v - w)
    assert err < eps
Esempio n. 32
0
def run_eigsy(A, verbose=False):
    if verbose:
        print("original matrix:\n", str(A))

    D, Q = mp.eigsy(A)
    B = Q * mp.diag(D) * Q.transpose()
    C = A - B
    E = Q * Q.transpose() - mp.eye(A.rows)

    if verbose:
        print("eigenvalues:\n", D)
        print("eigenvectors:\n", Q)

    NC = mp.mnorm(C)
    NE = mp.mnorm(E)

    if verbose:
        print("difference:", NC, "\n", C, "\n")
        print("difference:", NE, "\n", E, "\n")

    eps = mp.exp(0.8 * mp.log(mp.eps))

    assert NC < eps
    assert NE < eps

    return NC
Esempio n. 33
0
def run_eig(A, verbose=0):
    if verbose > 1:
        print("original matrix (eig):\n", A)

    n = A.rows

    E, EL, ER = mp.eig(A, left=True, right=True)

    if verbose > 1:
        print("E:\n", E)
        print("EL:\n", EL)
        print("ER:\n", ER)

    eps = mp.exp(0.8 * mp.log(mp.eps))

    err0 = 0
    for i in xrange(n):
        B = A * ER[:, i] - E[i] * ER[:, i]
        err0 = max(err0, mp.mnorm(B))

        B = EL[i, :] * A - EL[i, :] * E[i]
        err0 = max(err0, mp.mnorm(B))

    err0 /= n * n

    if verbose > 0:
        print("difference (E):", err0)

    assert err0 < eps
Esempio n. 34
0
def test_levin_nsum():
  mp.dps = 17

  with mp.extraprec(mp.prec):
      z = mp.mpf(10) ** (-10)
      a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
      assert abs(a - mp.euler) < 1e-10

  eps = mp.exp(0.8 * mp.log(mp.eps))

  a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
  assert abs(a - mp.log(2)) < eps

  z = 2 + 1j
  f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
  v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
  exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
  assert abs(exact - v) < eps
    def cumulants(self, gamma):
        """
        Compute 

        .. math::
           
           \Lambda(\gamma) = \log\left(\int_{\mathbb{R}^n} 
                             e^{\gamma \|z\|^2_2} F(dz)\right)

        as well as its first two derivatives with respect to $\gamma$,
        where $F$ is the empirical distribution of `self.sample`.

        """
        norm_squared = self.sample
        M = norm_squared.mean()
        M0 = float(np.mean([mp.exp(gamma*(ns-M)) for ns in norm_squared]))
        M0 *= mp.exp(gamma*M)
        M1 = np.mean([np.exp(float(gamma*ns+np.log(ns)-mp.log(M0))) for ns in norm_squared])
        M2 = np.mean([np.exp(float(gamma*ns+2*np.log(ns)-mp.log(M0))) for ns in norm_squared])
        return M0, M1, (M2-M1**2)
Esempio n. 36
0
def test_cohen_alt_0():
    mp.dps = 17
    AC = mp.cohen_alt()
    S, s, n = [], 0, 1
    while 1:
        s += -((-1) ** n) * mp.one / (n * n)
        n += 1
        S.append(s)
        v, e = AC.update_psum(S)
        if e < mp.eps:
            break
        if n > 1000: raise RuntimeError("iteration limit exceeded")
    eps = mp.exp(0.9 * mp.log(mp.eps))
    err = abs(v - mp.pi ** 2 / 12)
    assert err < eps
Esempio n. 37
0
def z_Zolotarev(N, x, m):
    """Function to evaluate the Zolotarev polynomial (eq 1, [McNamara93]_)."""
    M = -ellipk(m) / N
    x3 = ellipfun('sn', u= -M, m=m)  
    xbar = x3 * mp.sqrt((x ** 2 - 1) / (x ** 2 - x3 ** 2)) # rearranged eq 21, [Levy70]_
    u = ellipf(mp.asin(xbar), m) # rearranged eq 20, [Levy70]_, asn(x) = F(asin(x)|m)     
    f = mp.cosh((N / 2) * mp.log(z_eta(M + u, m) / z_eta(M - u, m)))
    if (f.imag / f.real > 1e-10):
        print "imaginary part of the Zolotarev function is not negligible!"
        print "f_imaginary = ", f.imag
    else:
        if (x > 0): # no idea why I am doing this ... anyhow, it seems working
            f = -f.real  
        else:
            f = f.real        
    return f
Esempio n. 38
0
def entropy(NMZ, NM, NZ, NZW, M, K, J, alpha, phi):
    '''
    compute perplexity as a function of entropy of the model
    '''
    AK = K * alpha
    N = 0
    ent = 0
    for m, d in enumerate(DTM):
        #print "m:", m
        #print "d", d
        theta = NMZ[m, :] / (M + AK)
        #print theta
        ent -= mp.log(np.inner(dryrun[:,m],theta))
        #print "ent:", ent
        N += M
    return mp.exp(ent/N)
Esempio n. 39
0
def run_schur(A, verbose = 0):
    if verbose > 1:
        print("original matrix (schur):\n", A)

    n = A.rows

    Q, R = mp.schur(A)

    if verbose > 1:
        print("Q:\n", Q)
        print("R:\n", R)

    B = Q * R * Q.transpose_conj()
    C = Q * Q.transpose_conj()

    eps = mp.exp(0.8 * mp.log(mp.eps))

    err0 = 0
    for x in xrange(n):
        for y in xrange(n):
            err0 += abs(A[y,x] - B[y,x])
    err0 /= n * n

    err1 = 0
    for x in xrange(n):
        for y in xrange(n):
            if x == y:
                C[y,x] -= 1
            err1 += abs(C[y,x])
    err1 /= n * n

    err2 = 0
    for x in xrange(n):
        for y in xrange(x + 1, n):
            err2 += abs(R[y,x])

    if verbose > 0:
        print("difference (S):", err0, err1, err2)

    if verbose > 1:
        print("B:\n", B)

    assert err0 < eps
    assert err1 < eps
    assert err2 == 0
Esempio n. 40
0
def test_levin_0():
    mp.dps = 17
    eps = mp.mpf(mp.eps)
    with mp.extraprec(2 * mp.prec):
        L = mp.levin(method = "levin", variant = "u")
        S, s, n = [], 0, 1
        while 1:
            s += mp.one / (n * n)
            n += 1
            S.append(s)
            v, e = L.update_psum(S)
            if e < eps:
                break
            if n > 1000: raise RuntimeError("iteration limit exceeded")
    eps = mp.exp(0.9 * mp.log(eps))
    err = abs(v - mp.pi ** 2 / 6)
    assert err < eps
    w = mp.nsum(lambda n: 1/(n * n), [1, mp.inf], method = "levin", levin_variant = "u")
    err = abs(v - w)
    assert err < eps
Esempio n. 41
0
def test_levin_1():
    mp.dps = 17
    eps = mp.mpf(mp.eps)
    with mp.extraprec(2 * mp.prec):
        L = mp.levin(method = "levin", variant = "v")
        A, n = [], 1
        while 1:
            s = mp.mpf(n) ** (2 + 3j)
            n += 1
            A.append(s)
            v, e = L.update(A)
            if e < eps:
                break
            if n > 1000: raise RuntimeError("iteration limit exceeded")
    eps = mp.exp(0.9 * mp.log(eps))
    err = abs(v - mp.zeta(-2-3j))
    assert err < eps
    w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
    err = abs(v - w)
    assert err < eps
Esempio n. 42
0
def run_svd_r(A, full_matrices = False, verbose = True):

    m, n = A.rows, A.cols

    eps = mp.exp(0.8 * mp.log(mp.eps))

    if verbose:
        print("original matrix:\n", str(A))
        print("full", full_matrices)

    U, S0, V = mp.svd_r(A, full_matrices = full_matrices)

    S = mp.zeros(U.cols, V.rows)
    for j in xrange(min(m, n)):
        S[j,j] = S0[j]

    if verbose:
        print("U:\n", str(U))
        print("S:\n", str(S0))
        print("V:\n", str(V))

    C = U * S * V - A
    err = mp.mnorm(C)
    if verbose:
        print("C\n", str(C), "\n", err)
    assert err < eps

    D = V * V.transpose() - mp.eye(V.rows)
    err = mp.mnorm(D)
    if verbose:
        print("D:\n", str(D), "\n", err)
    assert err < eps

    E = U.transpose() * U - mp.eye(U.cols)
    err = mp.mnorm(E)
    if verbose:
        print("E:\n", str(E), "\n", err)
    assert err < eps
Esempio n. 43
0
def test_levin_3():
    mp.dps = 17
    z=mp.mpf(2)
    eps = mp.mpf(mp.eps)
    with mp.extraprec(7*mp.prec):  # we need copious amount of precision to sum this highly divergent series
        L = mp.levin(method = "levin", variant = "t")
        n, s = 0, 0
        while 1:
            s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
            n += 1
            v, e = L.step_psum(s)
            if e < eps:
                break
            if n > 1000: raise RuntimeError("iteration limit exceeded")
    eps = mp.exp(0.8 * mp.log(eps))
    exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
    # there is also a symbolic expression for the integral:
    #   exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi))
    err = abs(v - exact)
    assert err < eps
    w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
    err = abs(v - w)
    assert err < eps
Esempio n. 44
0
def test_levin_2():
    # [2] A. Sidi - "Pratical Extrapolation Methods" p.373
    mp.dps = 17
    z=mp.mpf(10)
    eps = mp.mpf(mp.eps)
    with mp.extraprec(2 * mp.prec):
        L = mp.levin(method = "sidi", variant = "t")
        n = 0
        while 1:
            s = (-1)**n * mp.fac(n) * z ** (-n)
            v, e = L.step(s)
            n += 1
            if e < eps:
                break
            if n > 1000: raise RuntimeError("iteration limit exceeded")
    eps = mp.exp(0.9 * mp.log(eps))
    exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
    # there is also a symbolic expression for the integral:
    #   exact = z * mp.exp(z) * mp.expint(1,z)
    err = abs(v - exact)
    assert err < eps
    w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
    assert err < eps
Esempio n. 45
0
def z_Zolotarev(N, x, m):
    r"""
    Function to evaluate the Zolotarev polynomial (eq 1, [McNamara93]_).
    
    :param N:    Order of the Zolotarev polynomial
    :param x:    The argument at which one would like to evaluate the Zolotarev polynomial
    :param m:    m is the elliptic parameter (not the modulus k and not the nome q)
                  
    :rtype:      Returns a float, the value of Zolotarev polynomial at x
    """
    M = -ellipk(m) / N
    x3 = ellipfun('sn', u= -M, m=m)  
    xbar = x3 * mp.sqrt((x ** 2 - 1) / (x ** 2 - x3 ** 2)) # rearranged eq 21, [Levy70]_
    u = ellipf(mp.asin(xbar), m) # rearranged eq 20, [Levy70]_, asn(x) = F(asin(x)|m)     
    f = mp.cosh((N / 2) * mp.log(z_eta(M + u, m) / z_eta(M - u, m)))
    if (f.imag / f.real > 1e-10):
        print "imaginary part of the Zolotarev function is not negligible!"
        print "f_imaginary = ", f.imag
    else:
        if (x > 0): # no idea why I am doing this ... anyhow, it seems working
            f = -f.real  
        else:
            f = f.real        
    return f
############
# RG58 or Jans numbers
############

a		= 0.4675*10**-3;                # mm
b		= 1.475*10**-3;                 # mm
c		= 1.8 *10**-3;                  # mm
sigma	= 5.8*10**7; 					# 1/Ohm*m
epsPE   = 1.9; 


f   = np.logspace(5, 8, 100); 
omega = 2*math.pi*f; 

Lp = mu0/(2*math.pi)*mp.log(b/a); 			    # ln(ra/ri)
Cp = 2*math.pi*eps0*epsPE/(mp.log(b/a)); 	    # ln(ra/ri)


etac	= np.sqrt(1j*omega*mu0/sigma); 
gammac	= np.sqrt(1j*omega*mu0*sigma);

# shortcuts
i0 = lambda x: mp.besseli(0,x)
i1 = lambda x: mp.besseli(1,x)
k0 = lambda x: mp.besselk(0,x)
k1 = lambda x: mp.besselk(1,x)

# Zap	= etac/(2*math.pi*a)*(sp.iv(0,gammac*a) / sp.iv(1,gammac*a)); 
ZapA = etac/(2*math.pi*a)
# NOTE: This file implements an slightly different version of li_criter.
#       It finds the taylor expansion coefficients of log(xi(z/(z-1)), instead if its derivative,
#       which is in the original li_criterion
#
# the following code is from
# http://fredrikj.net/blog/2013/03/testing-lis-criterion/
# It uses mpmath to calculate taylor expansion of xi function
#
# It will produce the 1st 21 coefficients for Li-criter
# [-0.69315, 0.023096, 0.046173, 0.069213, 0.092198, 0.11511, 0.13793, 0.16064, 0.18322, 0.20566,
#  0.22793, 0.25003,  0.27194,  0.29363,  0.31511,  0.33634, 0.35732, 0.37803, 0.39847, 0.41862, 0.43846]
#
# More information about mpmath can be found at: mpmath.org
# http://mpmath.org/

from mpmath import mp

mp.dps = 5
mp.pretty = True
xi = lambda s: (s - 1) * mp.pi ** (-0.5 * s) * mp.gamma(1 + 0.5 * s) * mp.zeta(s)

# calculate 1st 21 coefficients of taylor expansion of log(xi(z/(z-1))
tmp = mp.taylor(lambda z: mp.log(xi(z / (z - 1))), 0, 20)
print tmp