Esempio n. 1
0
def Propagate_stationary(M, p, dt, ddt=1):
    syst1 = time.time()

    E, EL, ER = mp.eig(M.T, left=True, right=True)
    E, EL, ER = mp.eig_sort(E, EL, ER)
    times = range(ddt, dt + ddt, ddt)
    UR = ER**-1
    R = [0] * (len(E) - 1)
    R.append(1)
    # print(M.T)
    # print(E)
    intermediate_populations = []
    print(time.time() - syst1)
    # print(np.exp(time*np.diag(e)))
    # E = np.real(np.dot(np.dot(U, np.diag(R)), Uinv))
    for i in range(len(times)):
        # print(mp.diag(R[i]))
        A = ER * mp.diag(R) * UR * p
        intermediate_populations.append(
            np.array((A.T).apply(mp.re).tolist()[0], dtype=float))
    # print(intermediate_populations)
    print(time.time() - syst1)
    # intermediate_populations = [np.array(((ER*mp.diag(R)*EL*p).T).apply(mp.re).tolist()[0], dtype=float) for t in times]
    # print(intermediate_populations)
    return intermediate_populations
Esempio n. 2
0
def approx_P_sqrt_T(A, tolerance=1e-4):
    """
    Compute P_sqrt_T such that approximately iv_P_norm(M, P_sqrt_T) < max(abs(eig(A)))

    Will raise an AssertionError if approximately max(abs(eig(A))) >= 1.

    @param tolerance: relative factor for numerical tolerance. Decrease with caution. Increasing the tolerance will increase robustness.
    """
    A = iv_matrix_mid_as_mp(A)
    eigv_A, _= mp.eig(A)
    max_eigv_A = max([abs(i) for i in eigv_A])
    assert max_eigv_A < (1 - tolerance)
    A = iv_matrix_to_numpy_ndarray(A)
    # eigenvalue scaling of A (without this, we could only guarantee iv_P_norm(M, P_sqrt_T) < 1)
    A = A / float(max_eigv_A) * (1 - tolerance)
    Q = numpy.eye(len(A))
    # scipy solves AXA^H - X + Q = 0 for X.
    # We need the solution of A.T P A - P = -Q, so A must be transposed!
    P = scipy.linalg.solve_discrete_lyapunov(a=A.T, q=Q)
    # check validity of the solution
    # Note that there is no guaranteed tolerance bound. For practical reasons we choose the given tolerance.
    assert numpy.allclose(numpy.matmul(numpy.matmul(A.T, P), A) - P, -Q, atol=tolerance), "Discrete lyapunov solution inaccurate. Try increasing the tolerance."
    # numpy.linalg.cholesky returns lower-triangular L such that L*L.T = P
    # Here, L.T is called P_sqrt_T.
    P_sqrt_T = numpy.linalg.cholesky(P).T
    return numpy_ndarray_to_mp_matrix(P_sqrt_T)
Esempio n. 3
0
def Propagate_trunc2(M, p, dt, ddt=1):

    E, EL, ER = mp.eig(M.T, left=True, right=True)
    E, EL, ER = mp.eig_sort(E, EL, ER)
    times = np.arange(0, dt, ddt) + dt
    N = int(dt / ddt)
    if len(p) == 1:
        intermediate_populations = [p for i in range(N)]
        return intermediate_populations
    else:
        R = [[0 for i in range(len(p))] for j in range(N)]
        for i in range(N):
            R[i][-2] = mp.exp(E[-2] * times[i])
            R[i][-1] = 1
        # print(M)
        # print(ER)
        print(E)
        # print(R)
        # print(EL)
        # print(p)
        # print(ER*mp.diag(R[0])*EL*p)
        intermediate_populations = []
        for i in range(N):
            # print(mp.diag(R[i]))
            A = ER * mp.diag(R[i]) * EL * p
            intermediate_populations.append(
                np.array((A.T).apply(mp.re).tolist()[0], dtype=float))
        print(intermediate_populations)
        return intermediate_populations
 def test_P_synthesis(self):
     """
     Combined test:
     For A with eigenvalues inside the unit disk,
     generate P_sqrt_T such that P_norm(A, P_sqrt_T) < 1.
     """
     for c in [0.001234, 1, -2, 42.123]:
         eigenvalue = 0.99
         # rotation matrix with eigenvalue magnitude 0.99,
         # transformed with factor c (c=1: invariant set is a circle, otherwise: invariant set is elliptical)
         A = iv.matrix([[0., -c * eigenvalue], [eigenvalue / c, 0]])
         # Note that A must be well-conditioned, otherwise this test will fail
         # compute eigenvalues of A
         eigv_A, _ = mp.eig(iv_matrix_mid_as_mp(A))
         self.assertAlmostEqual(eigenvalue, max([abs(i) for i in eigv_A]),
                                3)
         P_sqrt_T = approx_P_sqrt_T(A)
         P_norm_iv = iv_P_norm(A, P_sqrt_T)
         self.check_P_norm(A, P_sqrt_T)
         # P_norm_iv must be at least 0.99, but should not be much larger than that
         self.assertLess(P_norm_iv, 1)
         self.assertGreater(P_norm_iv.b, 0.99)
         self.check_P_norm_expm(P_sqrt_T,
                                M1=mp.randmatrix(2),
                                A=A,
                                M2=mp.randmatrix(2),
                                tau=0.01)
Esempio n. 5
0
def eig_all_mpVer(theta):

    Y, Z = mp.eig(theta)
    Y = mp2np(Y)
    Z = mp2np(Z)
    piv = np.argsort(Y)[::-1]
    Y = np.sqrt(np.abs(Y[piv]))
    Z = np.conj(Z[:, piv].T)
    return Y, Z
Esempio n. 6
0
def QSdiagonalise(mat):
    if QSMODE == MODE_NORM:
        w, v = np.linalg.eig(mat)
        P = np.transpose(np.matrix(v, dtype=np.complex128))
        return np.dot(P, np.dot(mat, np.linalg.inv(P)))
    else:
        w, v = mpmath.eig(mat)
        P = mpmath.matrix(v).T
        return P * mat * P**-1
Esempio n. 7
0
def QSdiagonalise(mat):
    if QSMODE == MODE_NORM:
        w, v = np.linalg.eig(mat)
        P = np.transpose(np.matrix(v, dtype=np.complex128))
        return np.dot(P, np.dot(mat, np.linalg.inv(P)))
    else:
        w, v = mpmath.eig(mat)
        P = mpmath.matrix(v).T
        return P * mat * P**-1
def steady_state_eig(_matrix):
    values, vectors = eig(_matrix)
    real_values = []
    for num in values:
        real_values.append(num.real)
    largest_eig_values = two_largest(real_values)  # want the largest numbers because they should all be negative
    _index = real_values.index(max(real_values))
    steady_state_raw = vectors[:, _index]
    steady_state = steady_state_raw / fsum(steady_state_raw)
    return steady_state, largest_eig_values
Esempio n. 9
0
def multivariate_normalization(data, variables_size):
    mpmath.mp.dps = 20
    for j in range(0, len(data[0])):
        means = []
        ts_s = []
        for u in range(0, variables_size):
            means.append(numpy.mean(data[u][j]))
            ts_s.append(data[u][j])
        covariance_matrix = mpmath.matrix(numpy.cov(ts_s))
        w, v = mpmath.eig(covariance_matrix)
        diagonal = mpmath.diag(w)

        try:
            result = mpmath.sqrtm(diagonal)
        except ZeroDivisionError as error:
            print("j: ", j)
            print("ts:", ts_s)
            print("not invertible sqrtm")
            print("covariance_matrix:", covariance_matrix)
            sys.exit()

        B = v * result
        try:
            inverse_B = B**-1
        except ZeroDivisionError as error:
            # Not invertible. Skip this one.
            # Non invertable cases Uwave
            print("j: ", j)
            print("ts:", ts_s)
            print("not invertible")
            print("covariance_matrix:", covariance_matrix)
            sys.exit()
        except Exception as exception:
            # Not invertible. Skip this one.
            print("j: ", j)
            print("ts:", ts_s)
            print("not invertible")
            print("covariance_matrix:", covariance_matrix)
            sys.exit()
        for i in range(0, len(data[0][j])):
            atributes_together = []
            for u in range(0, variables_size):
                atributes_together.append(data[u][j][i])
            atributes_together = mpmath.matrix(atributes_together)
            result = atributes_together - mpmath.matrix(means)
            result = inverse_B * result
            for u in range(0, variables_size):
                if type(result[u]) is mpmath.mpc:
                    data[u][j][i] = result[u].real
                else:
                    data[u][j][i] = result[u]
    return data
Esempio n. 10
0
def eigenvalues(mat, sort=True):
    if mode == mode_python:
        e, _ = np.linalg.eig(mat)
        if sort:
            idx = np.argsort(e)
            return e[idx]
        else:
            return e
    else:
        e, v = mpmath.eig(mat)
        if sort:
            e, v = mpmath.eig_sort(e, v)
        return mpmath.matrix(e)
Esempio n. 11
0
def diagonalise(mat, sort=True):
    if mode == mode_python:
        e, v = np.linalg.eig(mat)
        if sort:
            idx = e.argsort()[::-1]
            v = v[:, idx]
        P = np.transpose(np.matrix(v, dtype=np.complex128))
        return np.dot(P, np.dot(mat, np.linalg.inv(P)))
    else:
        e, vl, vr = mpmath.eig(mat, True, True)
        if sort:
            e, vl, vr = mpmath.eig_sort(e, vl, vr)
        return vr**-1 * mat * vr
Esempio n. 12
0
def test_mev():
    output("""\
    reim:{$[0>type x;1 0*x;2=count x;x;'`]};
    mc:{((x[0]*y 0)-x[1]*y 1;(x[0]*y 1)+x[1]*y 0)};
    mmc:{((.qml.mm[x 0]y 0)-.qml.mm[x 1]y 1;(.qml.mm[x 0]y 1)+.qml.mm[x 1]y 0)};
    mev_:{[b;x]
        if[2<>count wv:.qml.mev x;'`length];
        if[not all over prec>=abs
            mmc[flip vc;flip(flip')(reim'')flip x]-
            flip(w:reim'[wv 0])mc'vc:(flip')(reim'')(v:wv 1);'`check];
        / Normalize sign; LAPACK already normalized to real
        v*:1-2*0>{x a?max a:abs x}each vc[;0];
        (?'[prec>=abs w[;1];w[;0];w];?'[b;v;0n])};""")

    for A in eigenvalue_subjects:
        if A.rows <= 3:
            V = []
            for w, n, r in A.eigenvects():
                w = sp.simplify(sp.expand_complex(w))
                if len(r) == 1:
                    r = r[0]
                    r = sp.simplify(sp.expand_complex(r))
                    r = r.normalized() / sp.sign(max(r, key=abs))
                    r = sp.simplify(sp.expand_complex(r))
                else:
                    r = None
                V.extend([(w, r)] * n)
            V.sort(key=lambda (x, _): (-abs(x), -sp.im(x)))
        else:
            Am = mp.matrix(A)
            # extra precision for complex pairs to be equal in sort
            with mp.extradps(mp.mp.dps):
                W, R = mp.eig(Am)
            V = []
            for w, r in zip(W, (R.column(i) for i in range(R.cols))):
                w = mp.chop(w)
                with mp.extradps(mp.mp.dps):
                    _, S, _ = mp.svd(Am - w * mp.eye(A.rows))
                if sum(x == 0 for x in mp.chop(S)) == 1:
                    # nullity 1, so normalized eigenvector is unique
                    r /= mp.norm(r) * mp.sign(max(r, key=abs))
                    r = mp.chop(r)
                else:
                    r = None
                V.append((w, r))
            V.sort(key=lambda (x, _): (-abs(x), -x.imag))
        W, R = zip(*V)
        test("mev_[%sb" % "".join("0" if r is None else "1" for r in R),
             A, (W, [r if r is None else list(r) for r in R]),
             complex_pair=True)
Esempio n. 13
0
File: mpmat.py Progetto: zholos/qml
def test_mev():
    output("""\
    reim:{$[0>type x;1 0*x;2=count x;x;'`]};
    mc:{((x[0]*y 0)-x[1]*y 1;(x[0]*y 1)+x[1]*y 0)};
    mmc:{((.qml.mm[x 0]y 0)-.qml.mm[x 1]y 1;(.qml.mm[x 0]y 1)+.qml.mm[x 1]y 0)};
    mev_:{[b;x]
        if[2<>count wv:.qml.mev x;'`length];
        if[not all over prec>=abs
            mmc[flip vc;flip(flip')(reim'')flip x]-
            flip(w:reim'[wv 0])mc'vc:(flip')(reim'')(v:wv 1);'`check];
        / Normalize sign; LAPACK already normalized to real
        v*:1-2*0>{x a?max a:abs x}each vc[;0];
        (?'[prec>=abs w[;1];w[;0];w];?'[b;v;0n])};""")

    for A in eigenvalue_subjects:
        if A.rows <= 3:
            V = []
            for w, n, r in A.eigenvects():
                w = sp.simplify(sp.expand_complex(w))
                if len(r) == 1:
                    r = r[0]
                    r = sp.simplify(sp.expand_complex(r))
                    r = r.normalized() / sp.sign(max(r, key=abs))
                    r = sp.simplify(sp.expand_complex(r))
                else:
                    r = None
                V.extend([(w, r)]*n)
            V.sort(key=lambda (x, _): (-abs(x), -sp.im(x)))
        else:
            Am = mp.matrix(A)
            # extra precision for complex pairs to be equal in sort
            with mp.extradps(mp.mp.dps):
                W, R = mp.eig(Am)
            V = []
            for w, r in zip(W, (R.column(i) for i in range(R.cols))):
                w = mp.chop(w)
                with mp.extradps(mp.mp.dps):
                    _, S, _ = mp.svd(Am - w*mp.eye(A.rows))
                if sum(x == 0 for x in mp.chop(S)) == 1:
                    # nullity 1, so normalized eigenvector is unique
                    r /= mp.norm(r) * mp.sign(max(r, key=abs))
                    r = mp.chop(r)
                else:
                    r = None
                V.append((w, r))
            V.sort(key=lambda (x, _): (-abs(x), -x.imag))
        W, R = zip(*V)
        test("mev_[%sb" % "".join("0" if r is None else "1" for r in R), A,
             (W, [r if r is None else list(r) for r in R]), complex_pair=True)
Esempio n. 14
0
def eigen(M):
    # print(M)
    # M = mp.matrix(M.tolist())
    # eigenValues, eigenVectors = mp.eig(M)
    # eigenValues = np.diag(np.array(eigenMatrix).astype('float128'))
    E, EL, ER = mp.eig(M, left=True, right=True)
    E, EL, ER = mp.eig_sort(E, EL, ER)
    # print(ER*mp.diag(E)*EL)
    eigenVectors = ER
    eigenValues = E
    # eigenVectors = np.array(ER.apply(mp.re).tolist(), dtype=float)
    # eigenValues = np.array([mp.re(x) for x in E], dtype=float)
    # idx = eigenValues.argsort()[::-1]
    # eigenValues = eigenValues
    if len(eigenVectors.shape) == 1:
        eigenVectors = [eigenVectors]
    # print(eigenValues)
    # print(eigenVectors)
    return eigenValues, eigenVectors
Esempio n. 15
0
def Propagate(M, p, dt, ddt=1):
    E, EL, ER = mp.eig(M.T, left=True, right=True)
    UR = ER**-1
    # E, EL, ER = mp.eig_sort(E, EL, ER)   # time_series = np.arange(0, dt, ddt) + dt
    # print(mp.nstr(EL*ER, n=3))
    times = range(ddt, dt + ddt, ddt)
    # print(E)
    intermediate_populations = []
    # print(np.exp(time*np.diag(e)))
    # E = np.real(np.dot(np.dot(U, np.diag(R)), Uinv))
    for i in range(len(times)):
        R = [mp.exp(E[j] * times[i]) for j in range(len(E))]
        # R = [mp.exp(E[j]*0) for j in range(len(E))]
        A = ER * mp.diag(R) * UR * p
        # print(R)
        intermediate_populations.append(
            np.array((A.T).apply(mp.re).tolist()[0], dtype=float))
    # print(p)
    # print(intermediate_populations[-1])
    # intermediate_populations = [np.array(((ER*mp.diag(R)*EL*p).T).apply(mp.re).tolist()[0], dtype=float) for t in times]
    # print(intermediate_populations)
    return intermediate_populations
Esempio n. 16
0
def gauss(N, dps=15):
    '''Calculate 1D Gaussian quadrature points at arbitrary precision.
 
    Re-implementation of 'gauss.m', from Trefethen's Spectral Methods in MATLAB'''
    with mp.workdps(dps):
        beta = mpmath.matrix(N - 1, 1)
        for i in range(1, N):
            beta[i - 1] = 0.5 / (1 - (mpmath.mpf(2 * i)**-2))**0.5
        T = mpmath.matrix(N, N)
        for i in range(N - 1):
            T[i + 1, i] = beta[i]
            T[i, i + 1] = beta[i]
        (eigval, eigvec) = mpmath.eig(T)
        # Sort eigenvalues
        zl = zip(eigval, range(len(eigval)))
        zl.sort()
        x = np.array([ee[0] for ee in zl])
        w = np.array([2 * (eigvec[0, ee[1]]**2) for ee in zl])
    if (dps <= 15):
        x = x.astype('double')
        w = w.astype('double')
    return (x, w)
Esempio n. 17
0
def mc_compute_stationary_mpmath(P,
                                 precision=17,
                                 irreducible=False,
                                 ltol=0,
                                 utol=None):
    """
    Computes the stationary distributions of Markov matrix P.

    Parameters
    ----------
    P : array_like(float, ndim=2)
        A discrete Markov transition matrix.

    precision : scalar(int), optional(default: 17)
        Decimal precision in float-point arithemetic with mpmath.
        mpmath.mp.dps is set to *precision*.

    irreducible : bool, optional(default: False)
        Set True if P is known a priori to be irreducible
        (for any i, j, (P^k)_{ij} > 0 for some k).
        If True, the eigenvector for the maximum eigenvalue is returned.

    ltol, utol: scalar(float), optional(default: ltol=0, utol=None)
        Lower and upper tolerance levels.
        Find eigenvectors for eigenvalues in [1-ltol, 1+utol]
        (where [1-ltol, 1+utol] = [1-ltol, +inf) when utol=None).

    Returns
    -------
    vecs : list of numpy.arrays of mpmath.ctx_mp_python.mpf
        A list of the eigenvectors of whose eigenvalues in [1-ltol, 1+utol].

    Notes
    -----
    mpmath 0.18 or above is required.

    References
    ----------

        http://mpmath.org/doc/current

    """
    LTOL = ltol  # Lower tolerance level
    if utol is None:  # Upper tolerance level
        UTOL = 'inf'
    else:
        UTOL = utol

    with mp.workdps(precision):  # Temporarily change the working precision
        E, EL = mp.eig(mp.matrix(P), left=True, right=False)
        # E  : a list of length n containing the eigenvalues of A
        # EL : a matrix whose rows contain the left eigenvectors of A
        # See: github.com/fredrik-johansson/mpmath/blob/master/mpmath/matrices/eigen.py
        E, EL = mp.eig_sort(E, EL)  # Sorted in a descending order

        if irreducible:
            num_eigval_one = 1
        else:
            num_eigval_one = sum(
                mp.mpf(1) - mp.mpf(LTOL) <= val <= mp.mpf(1) + mp.mpf(UTOL)
                for val in E)

        vecs = [
            np.array((EL[i, :] / sum(EL[i, :])).tolist()[0])
            for i in range(EL.rows - num_eigval_one, EL.rows)
        ]

    return vecs
Esempio n. 18
0
def calc_ellipsoid_axes(coords, uvals, cell, probability=0.5, longest=True):
    """
    This method calculates the principal axes of an ellipsoid as list of two
    fractional coordinate triples.
    Many thanks to R. W. Grosse-Kunstleve and P. D. Adams
    for their great publication on the handling of atomic anisotropic displacement
    parameters:
    R. W. Grosse-Kunstleve, P. D. Adams, J Appl Crystallogr 2002, 35, 477–480.

    F = ... * exp ( -2π²[ h²(a*)²U11 + k²(b*)²U22 + ... + 2hka*b*U12 ] )

    SHELXL atom:
    Name type  x      y      z    occ     U11 U22 U33 U23 U13 U12
    F3    4    0.210835   0.104067   0.437922  21.00000   0.07243   0.03058 =
       0.03216  -0.01057  -0.01708   0.03014
    >>> import mpmath as mpm
    >>> cell = (10.5086, 20.9035, 20.5072, 90, 94.13, 90)
    >>> coords = [0.210835,   0.104067,   0.437922]
    >>> uvals = [0.07243, 0.03058, 0.03216, -0.01057, -0.01708, 0.03014]
    >>> l = calc_ellipsoid_axes(coords, uvals, cell, longest=True)
    >>> print(mpm.nstr(l))
    [[0.24765096, 0.11383281, 0.43064756], [0.17401904, 0.09430119, 0.44519644]]
    >>> calc_ellipsoid_axes(coords, uvals, cell, longest=False)
    [[[0.24765096, 0.11383281, 0.43064756], [0.218406, 0.09626142, 0.43746127], [0.21924358, 0.10514684, 0.44886868]], [[0.17401904, 0.09430119, 0.44519644], [0.203264, 0.11187258, 0.43838273], [0.20242642, 0.10298716, 0.42697532]]]
    >>> cell = (10.5086, 20.9035, 20.5072, 90, 94.13, 90)
    >>> coords = [0.210835,   0.104067,   0.437922]
    >>> uvals = [0.07243, -0.03058, 0.03216, -0.01057, -0.01708, 0.03014]
    >>> calc_ellipsoid_axes(coords, uvals, cell, longest=True)
    <BLANKLINE>
    Ellipsoid is non positive definite!
    <BLANKLINE>
    False

    >>> uvals = [0.07243, 0.03058, 0.03216, -0.01057, -0.01708]
    >>> calc_ellipsoid_axes(coords, uvals, cell, longest=False)
    Traceback (most recent call last):
    ...
    Exception: 6 Uij values have to be supplied!

    >>> cell = (10.5086, 20.9035, 90, 94.13, 90)
    >>> coords = [0.210835,   0.104067,   0.437922]
    >>> uvals = [0.07243, 0.03058, 0.03216, -0.01057, -0.01708, 0.03014]
    >>> calc_ellipsoid_axes(coords, uvals, cell, longest=True)
    Traceback (most recent call last):
    ...
    Exception: cell needs six parameters!

    :param coords: coordinates of the respective atom in fractional coordinates
    :type coords: list
    :param uvals: Uij valiues of the respective ellipsoid on fractional
                  basis like in cif and SHELXL format
    :type uvals: list
    :param cell: unit cell of the structure: a, b, c, alpha, beta, gamma
    :type cell:  list
    :param probability: thermal probability of the ellipsoid
    :type probability: float or int
    :param longest: not always the length is important. make to False to
                    get all three coordiantes of the ellipsoid axes.
    :type longest: boolean

    """
    from misc import A
    probability += 1
    # Uij is symmetric:
    if len(uvals) != 6:
        raise Exception('6 Uij values have to be supplied!')
    if len(cell) != 6:
        raise Exception('cell needs six parameters!')
    # orthogonalization matrix that transforms the fractional coordinates
    # with respect to a crystallographic basis system to coordinates
    # with respect to a Cartesian basis:
    A = A(cell).orthogonal_matrix
    Ucart = ufrac_to_ucart(A, cell, uvals)
    # print(Ucart)
    # E => eigenvalues, Q => eigenvectors:
    E, Q = mpm.eig(Ucart)
    # calculate vectors of ellipsoid axes  
    try:
        sqrt(E[0])
        sqrt(E[1])
        sqrt(E[2])
    except ValueError:
        print('\nEllipsoid is non positive definite!\n')
        return False
    v1 = mpm.matrix([Q[0, 0], Q[1, 0], Q[2, 0]])
    v2 = mpm.matrix([Q[0, 1], Q[1, 1], Q[2, 1]])
    v3 = mpm.matrix([Q[0, 2], Q[1, 2], Q[2, 2]])
    v1i = v1 * (-1)
    v2i = v2 * (-1)
    v3i = v3 * (-1)
    # multiply probability (usually 50%)
    e1 = sqrt(E[0]) * probability
    e2 = sqrt(E[1]) * probability
    e3 = sqrt(E[2]) * probability
    # scale axis vectors to eigenvalues 
    v1, v2, v3, v1i, v2i, v3i = v1 * e1, v2 * e2, v3 * e3, v1i * e1, v2i * e2, v3i * e3
    # find out which vector is the longest:
    length = mpm.norm(v1)
    v = 0
    if mpm.norm(v2) > length:
        length = mpm.norm(v2)
        v = 1
    elif mpm.norm(v3) > length:
        length = mpm.norm(v3)
        v = 2
    # move vectors back to atomic position
    atom = A * mpm.matrix(coords)
    v1, v1i = v1 + atom, v1i + atom
    v2, v2i = v2 + atom, v2i + atom
    v3, v3i = v3 + atom, v3i + atom
    # go back into fractional coordinates:
    a1 = cart_to_frac(v1, cell)
    a2 = cart_to_frac(v2, cell)
    a3 = cart_to_frac(v3, cell)
    a1i = cart_to_frac(v1i, cell)
    a2i = cart_to_frac(v2i, cell)
    a3i = cart_to_frac(v3i, cell)
    allvec = [[a1, a2, a3], [a1i, a2i, a3i]]
    if longest:
        # only the longest vector
        return [allvec[0][v], allvec[1][v]]
    else:
        # all vectors:
        return allvec
def mc_compute_stationary_mpmath(P, precision=17, irreducible=False, ltol=0, utol=None):
    """
    Computes the stationary distributions of Markov matrix P.

    Parameters
    ----------
    P : array_like(float, ndim=2)
        A discrete Markov transition matrix.

    precision : scalar(int), optional(default: 17)
        Decimal precision in float-point arithemetic with mpmath.
        mpmath.mp.dps is set to *precision*.

    irreducible : bool, optional(default: False)
        Set True if P is known a priori to be irreducible
        (for any i, j, (P^k)_{ij} > 0 for some k).
        If True, the eigenvector for the maximum eigenvalue is returned.

    ltol, utol: scalar(float), optional(default: ltol=0, utol=None)
        Lower and upper tolerance levels.
        Find eigenvectors for eigenvalues in [1-ltol, 1+utol]
        (where [1-ltol, 1+utol] = [1-ltol, +inf) when utol=None).

    Returns
    -------
    vecs : list of numpy.arrays of mpmath.ctx_mp_python.mpf
        A list of the eigenvectors of whose eigenvalues in [1-ltol, 1+utol].

    Notes
    -----
    mpmath 0.18 or above is required.

    References
    ----------

        http://mpmath.org/doc/current

    """
    LTOL = ltol  # Lower tolerance level
    if utol is None:  # Upper tolerance level
        UTOL = 'inf'
    else:
        UTOL = utol

    with mp.workdps(precision):  # Temporarily change the working precision
        E, EL = mp.eig(mp.matrix(P), left=True, right=False)
        # E  : a list of length n containing the eigenvalues of A
        # EL : a matrix whose rows contain the left eigenvectors of A
        # See: github.com/fredrik-johansson/mpmath/blob/master/mpmath/matrices/eigen.py
        E, EL = mp.eig_sort(E, EL)  # Sorted in a descending order

        if irreducible:
            num_eigval_one = 1
        else:
            num_eigval_one = sum(
                mp.mpf(1) - mp.mpf(LTOL) <= val <= mp.mpf(1) + mp.mpf(UTOL)
                for val in E
                )

        vecs = [np.array((EL[i, :]/sum(EL[i, :])).tolist()[0])
                for i in range(EL.rows-num_eigval_one, EL.rows)]

    return vecs
Esempio n. 20
0
    A = iv_matrix_mid_to_numpy_ndarray(A)
    M2 = iv_matrix_mid_to_numpy_ndarray(M2)
    # coerce tau to maximum
    tau = float(mp.mpf(abs(iv.mpf(tau)).b))
    max_norm = 0
    for t in numpy.linspace(-tau, tau, 100):
        matrix = numpy.matmul(M1, numpy.matmul(scipy.linalg.expm(A*t) - numpy.eye(len(A)), M2))
        max_norm = max(max_norm, approx_P_norm(M=matrix, P_sqrt_T=P_sqrt_T))
    return max_norm

if __name__ == "__main__":
    random.seed(1234565567)
    print("Example: random matrix")
    for i in range(1):
        c = 2
        A = mp.randmatrix(20) - 0.5
        eigv_A, _= mp.eig(iv_matrix_mid_as_mp(A))
        A = 0.5 * A / max([abs(i) for i in eigv_A])


        eigv_A, _= mp.eig(iv_matrix_mid_as_mp(A))
        #print('eigenvalues(A) = ', eigv_A)
        print('spectral radius(A) = ', max([abs(i) for i in eigv_A]))
        print('interval spectral_norm(A) = ', iv_spectral_norm(A))
        P_sqrt_T = approx_P_sqrt_T(A)
        print('interval P_norm(A) = ',iv_P_norm(A, P_sqrt_T))
        print('interval P_norm(...expm(...)) = ', iv_P_norm_expm(P_sqrt_T, M1=mp.eye(len(A)), A=A, M2=mp.eye(len(A)), tau=0.01))
        print('sampled P_norm(...expm(...)) = ', approx_P_norm_expm(P_sqrt_T, M1=mp.eye(len(A)), A=A, M2=mp.eye(len(A)), tau=0.01))
        print('')