示例#1
0
 def test_return_statdist(self):
     C = np.loadtxt(testpath + 'C_1_lag.dat')
     # dense
     T, mu = apicall(C, reversible=True, method='dense', return_statdist=True)
     mu_manual = stationary_distribution(T)
     np.testing.assert_allclose(mu, mu_manual)
     # sparse
     T, mu = apicall(C, reversible=True, method='sparse', return_statdist=True)
     mu_manual = stationary_distribution(T)
     np.testing.assert_allclose(mu, mu_manual)
示例#2
0
def coarsegrain(P, n):
    """
    Coarse-grains transition matrix P to n sets using PCCA

    Coarse-grains transition matrix P such that the dominant eigenvalues are preserved, using:

    ..math:
        \tilde{P} = M^T P M (M^T M)^{-1}

    See [2]_ for the derivation of this form from the coarse-graining method first derived in [1]_.

    References
    ----------
    [1] S. Kube and M. Weber
        A coarse graining method for the identification of transition rates between molecular conformations.
        J. Chem. Phys. 126, 024103 (2007)
    [2] F. Noe, H. Wu, J.-H. Prinz and N. Plattner:
        Projected and hidden Markov models for calculating kinetics and metastable states of complex molecules
        J. Chem. Phys. 139, 184114 (2013)
    """
    M = pcca(P, n)
    # coarse-grained transition matrix
    W = np.linalg.inv(np.dot(M.T, M))
    A = np.dot(np.dot(M.T, P), M)
    P_coarse = np.dot(W, A)

    # symmetrize and renormalize to eliminate numerical errors
    from deeptime.markov.tools.analysis import stationary_distribution
    pi_coarse = np.dot(M.T, stationary_distribution(P))
    X = np.dot(np.diag(pi_coarse), P_coarse)
    P_coarse = X / X.sum(axis=1)[:, None]

    return P_coarse
示例#3
0
def is_reversible(T, mu=None, tol=1e-15):
    r"""
    checks whether T is reversible in terms of given stationary distribution.
    If no distribution is given, it will be calculated out of T.

    performs follwing check:
    :math:`\pi_i P_{ij} = \pi_j P_{ji}
    Parameters
    ----------
    T : scipy.sparse matrix
        Transition matrix
    mu : numpy.ndarray vector
        stationary distribution
    tol : float
        tolerance to check with

    Returns
    -------
    Truth value : bool
        True, if T is a stochastic matrix
        False, otherwise
    """
    if not is_transition_matrix(T, tol):
        raise ValueError("given matrix is not a valid transition matrix.")

    T = T.tocsr()

    if mu is None:
        from deeptime.markov.tools.analysis import stationary_distribution
        mu = stationary_distribution(T)

    Mu = diags(mu, 0)
    prod = Mu * T

    return allclose_sparse(prod, prod.transpose(), rtol=tol)
示例#4
0
    def setUp(self):
        """Store state of the rng"""
        self.state = np.random.mtrand.get_state()
        """Reseed the rng to enforce 'deterministic' behavior"""
        np.random.mtrand.seed(42)
        """Meta-stable birth-death chain"""
        b = 2
        q = np.zeros(7)
        p = np.zeros(7)
        q[1:] = 0.5
        p[0:-1] = 0.5
        q[2] = 1.0 - 10**(-b)
        q[4] = 10**(-b)
        p[2] = 10**(-b)
        p[4] = 1.0 - 10**(-b)

        bdc = BirthDeathChain(q, p)
        self.dtraj = bdc.msm.simulate(10000, start=0)
        self.tau = 1
        """Estimate MSM"""
        self.C_MSM = count_matrix(self.dtraj, self.tau, sliding=True)
        self.lcc_MSM = largest_connected_set(self.C_MSM)
        self.Ccc_MSM = largest_connected_submatrix(self.C_MSM,
                                                   lcc=self.lcc_MSM)
        self.mle_rev_max_err = 1E-8
        self.P_MSM = transition_matrix(self.Ccc_MSM,
                                       reversible=True,
                                       maxerr=self.mle_rev_max_err)
        self.mu_MSM = stationary_distribution(self.P_MSM)
        self.k = 3
        self.ts = timescales(self.P_MSM, k=self.k, tau=self.tau)
示例#5
0
文件: msm.py 项目: markovmodel/PyEMMA
 def pi(self, value):
     if value is None and self.P is not None:
         from deeptime.markov.tools.analysis import stationary_distribution
         value = stationary_distribution(self.P)
     elif value is not None:
         # check sum is one
         _np.testing.assert_allclose(_np.sum(value), 1, atol=1e-14)
     self._pi = value
示例#6
0
def test_simulate_stats(msm):
    # test statistics of starting state
    N = 5000
    trajs = [msm.simulate(1, seed=i + 1) for i in range(N)]
    ss = np.concatenate(trajs).astype(int)
    pi = stationary_distribution(msm.transition_matrix)
    piest = count_states(ss) / float(N)
    np.testing.assert_allclose(piest, pi, atol=0.025)
    assert_(msm.stationary)
示例#7
0
def from_data(dtrajs, n_hidden_states, reversible):
    r""" Makes an initial guess :class:`HMM <HiddenMarkovModel>` with Gaussian output model.

    To this end, a Gaussian mixture model is estimated using `scikit-learn <https://scikit-learn.org/>`_.

    Parameters
    ----------
    dtrajs : array_like or list of array_like
        Trajectories which are used for making the initial guess.
    n_hidden_states : int
        Number of hidden states.
    reversible : bool
        Whether the hidden transition matrix is estimated so that it is reversible.

    Returns
    -------
    hmm_init : HiddenMarkovModel
        An initial guess for the HMM

    See Also
    --------
    deeptime.markov.hmm.GaussianOutputModel : The type of output model this heuristic uses.
    deeptime.markov.hmm.init.discrete.metastable_from_data
    deeptime.markov.hmm.init.discrete.metastable_from_msm
    """
    from deeptime.markov.hmm import HiddenMarkovModel, GaussianOutputModel
    from sklearn.mixture import GaussianMixture
    import deeptime.markov.tools.estimation as msmest
    import deeptime.markov.tools.analysis as msmana
    from deeptime.util.types import ensure_timeseries_data

    dtrajs = ensure_timeseries_data(dtrajs)
    collected_observations = np.concatenate(dtrajs)
    gmm = GaussianMixture(n_components=n_hidden_states)
    gmm.fit(collected_observations[:, None])
    output_model = GaussianOutputModel(n_hidden_states, means=gmm.means_[:, 0], sigmas=np.sqrt(gmm.covariances_[:, 0]))

    # Compute fractional state memberships.
    Nij = np.zeros((n_hidden_states, n_hidden_states))
    for o_t in dtrajs:
        # length of trajectory
        T = o_t.shape[0]
        # output probability
        pobs = output_model.to_state_probability_trajectory(o_t)
        # normalize
        pobs /= pobs.sum(axis=1)[:, None]
        # Accumulate fractional transition counts from this trajectory.
        for t in range(T - 1):
            Nij += np.outer(pobs[t, :], pobs[t + 1, :])

    # Compute transition matrix maximum likelihood estimate.
    transition_matrix = msmest.transition_matrix(Nij, reversible=reversible)
    initial_distribution = msmana.stationary_distribution(transition_matrix)
    return HiddenMarkovModel(transition_model=transition_matrix, output_model=output_model,
                             initial_distribution=initial_distribution)
示例#8
0
    def test_multiple_components(self):
        L = np.array([[0, 1, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0],
                      [1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1],
                      [0, 0, 0, 1, 0, 1, 0], [0, 0, 0, 1, 1, 0, 1],
                      [0, 0, 0, 1, 0, 1, 0]])

        transition_matrix = L / np.sum(L, 1).reshape(-1, 1)
        pi = np.zeros((transition_matrix.shape[0], ))
        for cs in connected_sets(transition_matrix):
            P_sub = transition_matrix[cs, :][:, cs]
            pi[cs] = stationary_distribution(P_sub)
        chi = pcca(transition_matrix, 2, pi=pi)
        expected = np.array([[0., 1.], [0., 1.], [0., 1.], [1., 0.], [1., 0.],
                             [1., 0.], [1., 0.]])
        np.testing.assert_equal(chi, expected)
示例#9
0
def is_reversible(P):
    """ Returns if P is reversible on its weakly connected sets """
    import deeptime.markov.tools.analysis as msmana
    # treat each weakly connected set separately
    sets = compute_connected_sets(P, directed=False)
    for s in sets:
        Ps = P[s, :][:, s]
        if not msmana.is_transition_matrix(Ps):
            return False  # isn't even a transition matrix!
        pi = msmana.stationary_distribution(Ps)
        X = pi[:, None] * Ps
        if not np.allclose(X, X.T):
            return False
    # survived.
    return True
示例#10
0
def enforce_reversible_on_closed(P):
    """ Enforces transition matrix P to be reversible on its closed sets. """
    import deeptime.markov.tools.analysis as msmana
    Prev = P.copy()
    # treat each weakly connected set separately
    sets = closed_sets(P)
    for s in sets:
        indices = np.ix_(s, s)
        # compute stationary probability
        pi_s = msmana.stationary_distribution(P[indices])
        # symmetrize
        X_s = pi_s[:, None] * P[indices]
        X_s = 0.5 * (X_s + X_s.T)
        # normalize
        Prev[indices] = X_s / X_s.sum(axis=1)[:, None]
    return Prev
示例#11
0
 def _transition_matrix_samples(self, msm, given_pi):
     Psamples = [s.transition_matrix for s in msm.samples]
     # shape
     assert np.array_equal(np.shape(Psamples), (self.nsamples, self.n_states, self.n_states))
     # consistency
     import deeptime.markov.tools.analysis as msmana
     for P in Psamples:
         assert msmana.is_transition_matrix(P)
         try:
             assert msmana.is_reversible(P)
         except AssertionError:
             # re-do calculation msmtools just performed to get details
             from deeptime.markov.tools.analysis import stationary_distribution
             mu = stationary_distribution(P)
             X = mu[:, np.newaxis] * P
             np.testing.assert_allclose(X, np.transpose(X), atol=1e-12,
                                        err_msg="P not reversible, given_pi={}".format(given_pi))
示例#12
0
def test_birth_death_chain(fixed_seed, sparse):
    """Meta-stable birth-death chain"""
    b = 2
    q = np.zeros(7)
    p = np.zeros(7)
    q[1:] = 0.5
    p[0:-1] = 0.5
    q[2] = 1.0 - 10**(-b)
    q[4] = 10**(-b)
    p[2] = 10**(-b)
    p[4] = 1.0 - 10**(-b)

    bdc = deeptime.data.birth_death_chain(q, p)
    dtraj = bdc.msm.simulate(10000, start=0)
    tau = 1

    reference_count_matrix = msmest.count_matrix(dtraj, tau, sliding=True)
    reference_largest_connected_component = msmest.largest_connected_set(
        reference_count_matrix)
    reference_lcs = msmest.largest_connected_submatrix(
        reference_count_matrix, lcc=reference_largest_connected_component)
    reference_msm = msmest.transition_matrix(reference_lcs,
                                             reversible=True,
                                             maxerr=1e-8)
    reference_statdist = msmana.stationary_distribution(reference_msm)
    k = 3
    reference_timescales = msmana.timescales(reference_msm, k=k, tau=tau)

    msm = estimate_markov_model(dtraj, tau, sparse=sparse)
    assert_equal(tau, msm.count_model.lagtime)
    assert_array_equal(reference_largest_connected_component,
                       msm.count_model.connected_sets()[0])
    assert_(scipy.sparse.issparse(msm.count_model.count_matrix) == sparse)
    assert_(scipy.sparse.issparse(msm.transition_matrix) == sparse)
    if sparse:
        count_matrix = msm.count_model.count_matrix.toarray()
        transition_matrix = msm.transition_matrix.toarray()
    else:
        count_matrix = msm.count_model.count_matrix
        transition_matrix = msm.transition_matrix
    assert_array_almost_equal(reference_lcs.toarray(), count_matrix)
    assert_array_almost_equal(reference_count_matrix.toarray(), count_matrix)
    assert_array_almost_equal(reference_msm.toarray(), transition_matrix)
    assert_array_almost_equal(reference_statdist, msm.stationary_distribution)
    assert_array_almost_equal(reference_timescales[1:], msm.timescales(k - 1))
示例#13
0
 def _transition_matrix_samples(self, msm, given_pi):
     Psamples = msm.sample_f('transition_matrix')
     # shape
     assert np.array_equal(np.shape(Psamples),
                           (self.nsamples, self.nstates, self.nstates))
     # consistency
     for P in Psamples:
         assert is_transition_matrix(P)
         try:
             assert is_reversible(P)
         except AssertionError:
             mu = stationary_distribution(P)
             X = mu[:, np.newaxis] * P
             np.testing.assert_allclose(
                 X,
                 np.transpose(X),
                 atol=1e-12,
                 err_msg="P not reversible, given_pi={}".format(given_pi))
示例#14
0
    def __init__(self, C, P0=None, seed: int = -1):
        from deeptime.markov.tools.estimation import transition_matrix as tmatrix
        from deeptime.markov.tools.analysis import stationary_distribution
        from .._mle_bindings import RevSampler32, RevSampler64, RevSampler128

        if C.dtype not in (np.float32, np.float64, np.longdouble):
            dtype = np.float64
        else:
            dtype = C.dtype

        self.C = C.astype(dtype)
        """Set up initial state of the chain"""
        if P0 is None:
            # only do a few iterations to get close to the MLE and suppress not converged warning
            P0 = tmatrix(self.C,
                         reversible=True,
                         maxiter=100,
                         warn_not_converged=False)
            assert P0.dtype == self.C.dtype
        else:
            P0 = P0.astype(self.C.dtype)
        pi0 = stationary_distribution(P0).astype(self.C.dtype)
        V0 = pi0[:, np.newaxis] * P0

        self.V = V0
        self.c = self.C.sum(axis=1)
        """Check for valid input"""
        self.check_input()
        """Get nonzero indices"""
        self.I, self.J = np.where((self.C + self.C.T) > 0.0)
        """Init Vsampler"""
        if self.C.dtype == np.float32:
            self._sampler = RevSampler32(seed)
        elif self.C.dtype == np.float64:
            self._sampler = RevSampler64(seed)
        elif self.C.dtype == np.longdouble:
            self._sampler = RevSampler128(seed)
        else:
            raise ValueError(f"Unknown dtype {self.C.dtype}")
示例#15
0
    def test_pcca_coarsegrain(self):
        # fine-grained transition matrix
        P = np.array([[0.9, 0.1, 0.0, 0.0, 0.0], [0.1, 0.89, 0.01, 0.0, 0.0],
                      [0.0, 0.1, 0.8, 0.1, 0.0], [0.0, 0.0, 0.01, 0.79, 0.2],
                      [0.0, 0.0, 0.0, 0.2, 0.8]])
        from deeptime.markov.tools.analysis import stationary_distribution
        pi = stationary_distribution(P)
        Pi = np.diag(pi)
        m = 3
        # Susanna+Marcus' expression ------------
        M = pcca(P, m)
        pi_c = np.dot(M.T, pi)
        Pi_c_inv = np.diag(1.0 / pi_c)
        # restriction and interpolation operators
        R = M.T
        I = np.dot(np.dot(Pi, M), Pi_c_inv)
        # result
        ms1 = np.linalg.inv(np.dot(R, I)).T
        ms2 = np.dot(np.dot(I.T, P), R.T)
        Pc_ref = np.dot(ms1, ms2)
        # ---------------------------------------

        Pc = coarsegrain(P, 3)
        # test against Marcus+Susanna's expression
        assert np.max(np.abs(Pc - Pc_ref)) < 1e-10
        # test mass conservation
        assert np.allclose(Pc.sum(axis=1), np.ones(m))

        p = PCCA(P, m)
        # test against Marcus+Susanna's expression
        assert np.max(
            np.abs(p.coarse_grained_transition_matrix - Pc_ref)) < 1e-10
        # test against the present coarse-grained stationary dist
        assert np.max(
            np.abs(p.coarse_grained_stationary_probability - pi_c)) < 1e-10
        # test mass conservation
        assert np.allclose(p.coarse_grained_transition_matrix.sum(axis=1),
                           np.ones(m))
示例#16
0
    def setUp(self):
        P = np.array([[0.8, 0.15, 0.05, 0.0,
                       0.0], [0.1, 0.75, 0.05, 0.05, 0.05],
                      [0.05, 0.1, 0.8, 0.0, 0.05], [0.0, 0.2, 0.0, 0.8, 0.0],
                      [0.0, 0.02, 0.02, 0.0, 0.96]])
        P = csr_matrix(P)
        A = [0]
        B = [4]
        mu = stationary_distribution(P)
        qminus = committor(P, A, B, forward=False, mu=mu)
        qplus = committor(P, A, B, forward=True, mu=mu)
        self.A = A
        self.B = B
        self.F = flux_matrix(P, mu, qminus, qplus, netflux=True)

        self.paths = [
            np.array([0, 1, 4]),
            np.array([0, 2, 4]),
            np.array([0, 1, 2, 4])
        ]
        self.capacities = [
            0.0072033898305084252, 0.0030871670702178975,
            0.00051452784503631509
        ]
示例#17
0
def pcca(P, m, stationary_distribution=None):
    """PCCA+ spectral clustering method with optimized memberships.

    Implementation according to :footcite:`roblitz2013fuzzy`.
    Clusters the first m eigenvectors of a transition matrix in order to cluster the states.
    This function does not assume that the transition matrix is fully connected. Disconnected sets
    will automatically define the first metastable states, with perfect membership assignments.

    Parameters
    ----------
    P : ndarray (n,n)
        Transition matrix.
    m : int
        Number of clusters to group to.
    stationary_distribution : ndarray(n,), optional, default=None
        Stationary distribution over the full state space, can be given if already computed.

    References
    ----------
    .. footbibliography::
    """
    if m <= 0 or m > P.shape[0]:
        raise ValueError(
            "Number of metastable sets must be larger than 0 and can be at most as large as the number "
            "of states.")
    assert 0 < m <= P.shape[0]
    from scipy.sparse import issparse
    if issparse(P):
        warnings.warn(
            'PCCA is only implemented for dense matrices, '
            'converting sparse transition matrix to dense ndarray.',
            stacklevel=2)
        P = P.toarray()

    # stationary distribution
    if stationary_distribution is None:
        from deeptime.markov.tools.analysis import stationary_distribution
        pi = stationary_distribution(P)
    else:
        pi = stationary_distribution

    # memberships
    from .tools.analysis.dense._pcca import pcca as _algorithm_impl
    M = _algorithm_impl(P, m, pi)

    # coarse-grained stationary distribution
    pi_coarse = np.dot(M.T, pi)

    # HMM output matrix
    B = np.linalg.multi_dot([np.diag(1.0 / pi_coarse), M.T, np.diag(pi)])
    # renormalize B to make it row-stochastic
    B /= B.sum(axis=1)[:, None]

    # coarse-grained transition matrix
    W = np.linalg.inv(np.dot(M.T, M))
    A = np.dot(np.dot(M.T, P), M)
    P_coarse = np.dot(W, A)

    # symmetrize and renormalize to eliminate numerical errors
    X = np.dot(np.diag(pi_coarse), P_coarse)
    # and normalize
    P_coarse = X / X.sum(axis=1)[:, None]

    return PCCAModel(P_coarse, pi_coarse, M, B)
示例#18
0
    def test_mle_trev_given_pi(self):
        C = np.loadtxt(testpath + 'C_1_lag.dat')
        pi = np.loadtxt(testpath + 'pi.dat')

        T_impl_algo_dense_type_dense = impl_dense(C, pi)
        T_impl_algo_sparse_type_sparse = impl_sparse(
            scipy.sparse.csr_matrix(C), pi).toarray()
        T_Frank = impl_dense_Frank(C, pi)
        T_api_algo_dense_type_dense = apicall(C,
                                              reversible=True,
                                              mu=pi,
                                              method='dense')
        T_api_algo_sparse_type_dense = apicall(C,
                                               reversible=True,
                                               mu=pi,
                                               method='sparse')
        T_api_algo_dense_type_sparse = apicall(scipy.sparse.csr_matrix(C),
                                               reversible=True,
                                               mu=pi,
                                               method='dense').toarray()
        T_api_algo_sparse_type_sparse = apicall(scipy.sparse.csr_matrix(C),
                                                reversible=True,
                                                mu=pi,
                                                method='sparse').toarray()
        T_api_algo_auto_type_dense = apicall(C,
                                             reversible=True,
                                             mu=pi,
                                             method='auto')
        T_api_algo_auto_type_sparse = apicall(scipy.sparse.csr_matrix(C),
                                              reversible=True,
                                              mu=pi,
                                              method='auto').toarray()

        assert_allclose(T_impl_algo_dense_type_dense, T_Frank)
        assert_allclose(T_impl_algo_sparse_type_sparse, T_Frank)
        assert_allclose(T_api_algo_dense_type_dense, T_Frank)
        assert_allclose(T_api_algo_sparse_type_dense, T_Frank)
        assert_allclose(T_api_algo_dense_type_sparse, T_Frank)
        assert_allclose(T_api_algo_sparse_type_sparse, T_Frank)
        assert_allclose(T_api_algo_auto_type_dense, T_Frank)
        assert_allclose(T_api_algo_auto_type_sparse, T_Frank)

        assert is_transition_matrix(T_Frank)
        assert is_transition_matrix(T_impl_algo_dense_type_dense)
        assert is_transition_matrix(T_impl_algo_sparse_type_sparse)
        assert is_transition_matrix(T_api_algo_dense_type_dense)
        assert is_transition_matrix(T_api_algo_sparse_type_dense)
        assert is_transition_matrix(T_api_algo_dense_type_sparse)
        assert is_transition_matrix(T_api_algo_sparse_type_sparse)
        assert is_transition_matrix(T_api_algo_auto_type_dense)
        assert is_transition_matrix(T_api_algo_auto_type_sparse)

        assert_allclose(stationary_distribution(T_Frank), pi)
        assert_allclose(stationary_distribution(T_impl_algo_dense_type_dense),
                        pi)
        assert_allclose(
            stationary_distribution(T_impl_algo_sparse_type_sparse), pi)
        assert_allclose(stationary_distribution(T_api_algo_dense_type_dense),
                        pi)
        assert_allclose(stationary_distribution(T_api_algo_sparse_type_dense),
                        pi)
        assert_allclose(stationary_distribution(T_api_algo_dense_type_sparse),
                        pi)
        assert_allclose(stationary_distribution(T_api_algo_sparse_type_sparse),
                        pi)
        assert_allclose(stationary_distribution(T_api_algo_auto_type_dense),
                        pi)
        assert_allclose(stationary_distribution(T_api_algo_auto_type_sparse),
                        pi)
示例#19
0
def _pcca_connected(P, n, pi=None):
    r"""PCCA+ spectral clustering method with optimized memberships [1]_

    Clusters the first n_cluster eigenvectors of a transition matrix in order to cluster the states.
    This function assumes that the transition matrix is fully connected.

    Parameters
    ----------
    P : ndarray (n,n)
        Transition matrix.
    n : int
        Number of clusters to group to.
    pi: ndarray(n,), optional, default=None
        Stationary distribution if available.

    Returns
    -------
    chi : ndarray (n x m)
        A matrix containing the probability or membership of each state to be assigned to each cluster.
        The rows sum to 1.

    References
    ----------
    [1] S. Roeblitz and M. Weber, Fuzzy spectral clustering by PCCA+:
        application to Markov state models and data classification.
        Adv Data Anal Classif 7, 147-179 (2013).
    """

    # test connectivity
    from deeptime.markov.tools.estimation import connected_sets

    labels = connected_sets(P)
    n_components = len(
        labels
    )  # (n_components, labels) = connected_components(P, connection='strong')
    if n_components > 1:
        raise ValueError(
            "Transition matrix is disconnected. Cannot use pcca_connected.")

    if pi is None:
        from deeptime.markov.tools.analysis import stationary_distribution
        pi = stationary_distribution(P)
    else:
        if pi.shape[0] != P.shape[0]:
            raise ValueError(
                f"Stationary distribution must span entire state space but got {pi.shape[0]} states "
                f"instead of {P.shape[0]}.")
        pi /= pi.sum()  # make sure it is normalized

    from deeptime.markov.tools.analysis import is_reversible

    if not is_reversible(P, mu=pi):
        raise ValueError(
            "Transition matrix does not fulfill detailed balance. "
            "Make sure to call pcca with a reversible transition matrix estimate"
        )
    # TODO: Susanna mentioned that she has a potential fix for nonreversible matrices by replacing each complex conjugate
    #      pair by the real and imaginary components of one of the two vectors. We could use this but would then need to
    #      orthonormalize all eigenvectors e.g. using Gram-Schmidt orthonormalization. Currently there is no theoretical
    #      foundation for this, so I'll skip it for now.

    # right eigenvectors, ordered
    from deeptime.markov.tools.analysis import eigenvectors

    evecs = eigenvectors(P, n)

    # orthonormalize
    for i in range(n):
        evecs[:, i] /= math.sqrt(np.dot(evecs[:, i] * pi, evecs[:, i]))
    # make first eigenvector positive
    evecs[:, 0] = np.abs(evecs[:, 0])

    # Is there a significant complex component?
    if not np.alltrue(np.isreal(evecs)):
        warnings.warn(
            "The given transition matrix has complex eigenvectors, so it doesn't exactly fulfill detailed balance. "
            "Forcing eigenvectors to be real and continuing. Be aware that this is not theoretically solid."
        )
    evecs = np.real(evecs)

    # create initial solution using PCCA+. This could have negative memberships
    chi, rot_matrix = _pcca_connected_isa(evecs, n)

    # optimize the rotation matrix with PCCA++.
    rot_matrix = _opt_soft(evecs, rot_matrix, n)

    # These memberships should be nonnegative
    memberships = np.dot(evecs[:, :], rot_matrix)

    # We might still have numerical errors. Force memberships to be in [0,1]
    memberships = np.clip(memberships, 0., 1.)

    for i in range(0, np.shape(memberships)[0]):
        memberships[i] /= np.sum(memberships[i])

    return memberships
示例#20
0
    def setUp(self):
        # 5-state toy system
        self.P = np.array([[0.8, 0.15, 0.05, 0.0, 0.0],
                           [0.1, 0.75, 0.05, 0.05, 0.05],
                           [0.05, 0.1, 0.8, 0.0, 0.05],
                           [0.0, 0.2, 0.0, 0.8, 0.0],
                           [0.0, 0.02, 0.02, 0.0, 0.96]])
        self.A = [0]
        self.B = [4]
        self.I = [1, 2, 3]

        # REFERENCE SOLUTION FOR PATH DECOMP
        self.ref_committor = np.array(
            [0., 0.35714286, 0.42857143, 0.35714286, 1.])
        self.ref_backwardcommittor = np.array(
            [1., 0.65384615, 0.53125, 0.65384615, 0.])
        self.ref_grossflux = np.array(
            [[0., 0.00771792, 0.00308717, 0., 0.],
             [0., 0., 0.00308717, 0.00257264, 0.00720339],
             [0., 0.00257264, 0., 0., 0.00360169],
             [0., 0.00257264, 0., 0., 0.], [0., 0., 0., 0., 0.]])
        self.ref_netflux = np.array([[
            0.00000000e+00, 7.71791768e-03, 3.08716707e-03, 0.00000000e+00,
            0.00000000e+00
        ],
                                     [
                                         0.00000000e+00, 0.00000000e+00,
                                         5.14527845e-04, 0.00000000e+00,
                                         7.20338983e-03
                                     ],
                                     [
                                         0.00000000e+00, 0.00000000e+00,
                                         0.00000000e+00, 0.00000000e+00,
                                         3.60169492e-03
                                     ],
                                     [
                                         0.00000000e+00, 4.33680869e-19,
                                         0.00000000e+00, 0.00000000e+00,
                                         0.00000000e+00
                                     ],
                                     [
                                         0.00000000e+00, 0.00000000e+00,
                                         0.00000000e+00, 0.00000000e+00,
                                         0.00000000e+00
                                     ]])
        self.ref_totalflux = 0.0108050847458
        self.ref_kAB = 0.0272727272727
        self.ref_mfptAB = 36.6666666667

        self.ref_paths = [[0, 1, 4], [0, 2, 4], [0, 1, 2, 4]]
        self.ref_pathfluxes = np.array(
            [0.00720338983051, 0.00308716707022, 0.000514527845036])

        self.ref_paths_95percent = [[0, 1, 4], [0, 2, 4]]
        self.ref_pathfluxes_95percent = np.array(
            [0.00720338983051, 0.00308716707022])
        self.ref_majorflux_95percent = np.array(
            [[0., 0.00720339, 0.00308717, 0., 0.],
             [0., 0., 0., 0., 0.00720339], [0., 0., 0., 0., 0.00308717],
             [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]])

        # Testing:
        self.tpt1 = compute_reactive_flux(self.P, self.A, self.B)

        # 16-state toy system
        P2_nonrev = np.array([[
            0.5, 0.2, 0.0, 0.0, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
            0.0, 0.0, 0.0
        ],
                              [
                                  0.2, 0.5, 0.1, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0,
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                              ],
                              [
                                  0.0, 0.1, 0.5, 0.2, 0.0, 0.0, 0.2, 0.0, 0.0,
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                              ],
                              [
                                  0.0, 0.0, 0.1, 0.5, 0.0, 0.0, 0.0, 0.4, 0.0,
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                              ],
                              [
                                  0.3, 0.0, 0.0, 0.0, 0.5, 0.1, 0.0, 0.0, 0.1,
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                              ],
                              [
                                  0.0, 0.1, 0.0, 0.0, 0.2, 0.5, 0.1, 0.0, 0.0,
                                  0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                              ],
                              [
                                  0.0, 0.0, 0.1, 0.0, 0.0, 0.1, 0.5, 0.2, 0.0,
                                  0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0
                              ],
                              [
                                  0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.3, 0.5, 0.0,
                                  0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0
                              ],
                              [
                                  0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.5,
                                  0.1, 0.0, 0.0, 0.3, 0.0, 0.0, 0.0
                              ],
                              [
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.2,
                                  0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0
                              ],
                              [
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0,
                                  0.1, 0.5, 0.1, 0.0, 0.0, 0.2, 0.0
                              ],
                              [
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0,
                                  0.0, 0.2, 0.5, 0.0, 0.0, 0.0, 0.2
                              ],
                              [
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3,
                                  0.0, 0.0, 0.0, 0.5, 0.2, 0.0, 0.0
                              ],
                              [
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                                  0.1, 0.0, 0.0, 0.3, 0.5, 0.1, 0.0
                              ],
                              [
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                                  0.0, 0.2, 0.0, 0.0, 0.1, 0.5, 0.2
                              ],
                              [
                                  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                                  0.0, 0.0, 0.3, 0.0, 0.0, 0.2, 0.5
                              ]])
        pstat2_nonrev = msmana.stationary_distribution(P2_nonrev)
        # make reversible
        C = np.dot(np.diag(pstat2_nonrev), P2_nonrev)
        Csym = C + C.T
        self.P2 = Csym / np.sum(Csym, axis=1)[:, np.newaxis]
        pstat2 = msmana.stationary_distribution(self.P2)
        self.A2 = [0, 4]
        self.B2 = [11, 15]
        self.coarsesets2 = [
            [2, 3, 6, 7],
            [10, 11, 14, 15],
            [0, 1, 4, 5],
            [8, 9, 12, 13],
        ]

        # REFERENCE SOLUTION CG
        self.ref2_tpt_sets = [
            set([0, 4]),
            set([2, 3, 6, 7]),
            set([10, 14]),
            set([1, 5]),
            set([8, 9, 12, 13]),
            set([11, 15])
        ]
        self.ref2_cgA = [0]
        self.ref2_cgI = [1, 2, 3, 4]
        self.ref2_cgB = [5]
        self.ref2_cgpstat = np.array([
            0.15995388, 0.18360442, 0.12990937, 0.11002342, 0.31928127,
            0.09722765
        ])
        self.ref2_cgcommittor = np.array(
            [0., 0.56060272, 0.73052426, 0.19770537, 0.36514272, 1.])
        self.ref2_cgbackwardcommittor = np.array(
            [1., 0.43939728, 0.26947574, 0.80229463, 0.63485728, 0.])
        self.ref2_cggrossflux = np.array(
            [[0., 0., 0., 0.00427986, 0.00282259, 0.],
             [0., 0, 0.00234578, 0.00104307, 0., 0.00201899],
             [0., 0.00113892, 0, 0., 0.00142583, 0.00508346],
             [0., 0.00426892, 0., 0, 0.00190226, 0.],
             [0., 0., 0.00530243, 0.00084825, 0, 0.], [0., 0., 0., 0., 0.,
                                                       0.]])
        self.ref2_cgnetflux = np.array(
            [[0., 0., 0., 0.00427986, 0.00282259, 0.],
             [0., 0., 0.00120686, 0., 0., 0.00201899],
             [0., 0., 0., 0., 0., 0.00508346],
             [0., 0.00322585, 0., 0., 0.00105401, 0.],
             [0., 0., 0.0038766, 0., 0., 0.], [0., 0., 0., 0., 0., 0.]])

        # Testing
        self.tpt2 = compute_reactive_flux(self.P2, self.A2, self.B2)
示例#21
0
def plot_markov_model(P,
                      pos=None,
                      state_sizes=None,
                      state_scale=1.0,
                      state_colors='#ff5500',
                      state_labels='auto',
                      minflux=1e-6,
                      arrow_scale=1.0,
                      arrow_curvature=1.0,
                      arrow_labels='weights',
                      arrow_label_format='%2.e',
                      max_width=12,
                      max_height=12,
                      figpadding=0.2,
                      show_frame=False,
                      ax=None,
                      **textkwargs):
    r"""Network representation of MSM transition matrix

    This visualization is not optimized for large matrices. It is meant to be
    used for the visualization of small models with up to 10-20 states, e.g.
    obtained by a HMM coarse-graining. If used with large network, the automatic
    node positioning will be very slow and may still look ugly.

    Parameters
    ----------
    P : ndarray(n,n) or MSM object with attribute 'transition matrix'
        Transition matrix or MSM object
    pos : ndarray(n,2), optional, default=None
        User-defined positions to draw the states on. If not given, will try
        to place them automatically.
    state_sizes : ndarray(n), optional, default=None
        User-defined areas of the discs drawn for each state. If not given,
        the stationary probability of P will be used.
    state_colors : string, ndarray(n), or list, optional, default='#ff5500' (orange)
        string :
            a Hex code for a single color used for all states
        array :
            n values in [0,1] which will result in a grayscale plot
        list :
            of len = nstates, with a color for each state. The list can mix strings, RGB values and
            hex codes, e.g. :py:obj:`state_colors` = ['g', 'red', [.23, .34, .35], '#ff5500'] is
            possible.
    state_labels : list of strings, optional, default is 'auto'
        A list with a label for each state, to be displayed at the center
        of each node/state. If left to 'auto', the labels are automatically set to the state
        indices.
    minflux : float, optional, default=1e-6
        The minimal flux (p_i * p_ij) for a transition to be drawn
    arrow_scale : float, optional, default=1.0
        Relative arrow scale. Set to a value different from 1 to increase
        or decrease the arrow width.
    arrow_curvature : float, optional, default=1.0
        Relative arrow curvature. Set to a value different from 1 to make
        arrows more or less curved.
    arrow_labels : 'weights', None or a ndarray(n,n) with label strings. Optional, default='weights'
        Strings to be placed upon arrows. If None, no labels will be used.
        If 'weights', the elements of P will be used. If a matrix of strings is
        given by the user these will be used.
    arrow_label_format : str, optional, default='%10.2f'
        The numeric format to print the arrow labels
    max_width = 12
        The maximum figure width
    max_height = 12
        The maximum figure height
    figpadding = 0.2
        The relative figure size used for the padding
    show_frame: boolean (default=False)
        Draw a frame around the network.
    ax : matplotlib Axes object, optional, default=None
        The axes to plot to. When set to None a new Axes (and Figure) object will be used.
    textkwargs : optional argument for the text of the state and arrow labels.
        See http://matplotlib.org/api/text_api.html#matplotlib.text.Text for more info. The
        parameter 'size' refers to the size of the state and arrow labels and overwrites the
        matplotlib default. The parameter 'arrow_label_size' is only used for the arrow labels;
        please note that 'arrow_label_size' is not part of matplotlib.text.Text's set of parameters
        and will raise an exception when passed to matplotlib.text.Text directly.

    Returns
    -------
    fig, pos : matplotlib.Figure, ndarray(n,2)
    a Figure object containing the plot and the positions of states.
    Can be used later to plot a different network representation (e.g. the flux)

    Examples
    --------
    >>> import numpy as np
    >>> P = np.array([[0.8,  0.15, 0.05,  0.0,  0.0],
    ...              [0.1,  0.75, 0.05, 0.05, 0.05],
    ...              [0.05,  0.1,  0.8,  0.0,  0.05],
    ...              [0.0,  0.2, 0.0,  0.8,  0.0],
    ...              [0.0,  0.02, 0.02, 0.0,  0.96]])
    >>> plot_markov_model(P) # doctest:+ELLIPSIS
    (<...Figure..., array...)

    """
    from deeptime.markov.tools.analysis import stationary_distribution
    if isinstance(P, _np.ndarray):
        P = P.copy()
    else:
        # MSM object? then get transition matrix first
        P = P.transition_matrix.copy()
    if state_sizes is None:
        state_sizes = stationary_distribution(P)
    if minflux > 0:
        F = _np.dot(_np.diag(stationary_distribution(P)), P)
        I, J = _np.where(F < minflux)
        P[I, J] = 0.0
    plot = NetworkPlot(P, pos=pos, ax=ax)
    fig = plot.plot_network(state_sizes=state_sizes,
                            state_scale=state_scale,
                            state_colors=state_colors,
                            state_labels=state_labels,
                            arrow_scale=arrow_scale,
                            arrow_curvature=arrow_curvature,
                            arrow_labels=arrow_labels,
                            arrow_label_format=arrow_label_format,
                            max_width=max_width,
                            max_height=max_height,
                            figpadding=figpadding,
                            xticks=False,
                            yticks=False,
                            show_frame=show_frame,
                            **textkwargs)
    return fig, plot.pos
示例#22
0
def compute_reactive_flux(
        transition_matrix: np.ndarray,
        source_states: Iterable[int],
        target_states: Iterable[int],
        stationary_distribution=None,
        qminus=None,
        qplus=None,
        transition_matrix_tolerance: Optional[float] = None) -> ReactiveFlux:
    r""" Computes the A->B reactive flux using transition path theory (TPT).

    Parameters
    ----------
    transition_matrix : (M, M) ndarray or scipy.sparse matrix
        The transition matrix.
    source_states : array_like
        List of integer state labels for set A
    target_states : array_like
        List of integer state labels for set B
    stationary_distribution : (M,) ndarray, optional, default=None
        Stationary vector. If None is computed from the transition matrix internally.
    qminus : (M,) ndarray (optional)
        Backward committor for A->B reaction
    qplus : (M,) ndarray (optional)
        Forward committor for A-> B reaction
    transition_matrix_tolerance : float, optional, default=None
        Tolerance with which is checked whether the input is actually a transition matrix. If None (default),
        no check is performed.

    Returns
    -------
    tpt: deeptime.markov.tools.flux.ReactiveFlux object
        A python object containing the reactive A->B flux network
        and several additional quantities, such as stationary probability,
        committors and set definitions.

    Notes
    -----
    The central object used in transition path theory is the forward and backward comittor function.

    TPT (originally introduced in :footcite:`weinan2006towards`) for continous systems has a
    discrete version outlined in :footcite:`metzner2009transition`. Here, we use the transition
    matrix formulation described in :footcite:`noe2009constructing`.

    See also
    --------
    ReactiveFlux

    References
    ----------
    .. footbibliography::
    """
    import deeptime.markov.tools.analysis as msmana

    source_states = ensure_array(source_states, dtype=int)
    target_states = ensure_array(target_states, dtype=int)

    if len(source_states) == 0 or len(target_states) == 0:
        raise ValueError('set A or B is empty')

    n_states = transition_matrix.shape[0]
    if len(source_states) > n_states or len(target_states) > n_states \
            or max(source_states) > n_states or max(target_states) > n_states:
        raise ValueError(
            'set A or B defines more states than the given transition matrix.')

    if transition_matrix_tolerance is not None and \
            msmana.is_transition_matrix(transition_matrix, tol=transition_matrix_tolerance):
        raise ValueError('given matrix T is not a transition matrix')

    # we can compute the following properties from either dense or sparse T
    # stationary dist
    if stationary_distribution is None:
        stationary_distribution = msmana.stationary_distribution(
            transition_matrix)
    # forward committor
    if qplus is None:
        qplus = msmana.committor(transition_matrix,
                                 source_states,
                                 target_states,
                                 forward=True)
    # backward committor
    if qminus is None:
        if msmana.is_reversible(transition_matrix, mu=stationary_distribution):
            qminus = 1.0 - qplus
        else:
            qminus = msmana.committor(transition_matrix,
                                      source_states,
                                      target_states,
                                      forward=False,
                                      mu=stationary_distribution)
    # gross flux
    grossflux = tptapi.flux_matrix(transition_matrix,
                                   stationary_distribution,
                                   qminus,
                                   qplus,
                                   netflux=False)
    # net flux
    netflux = to_netflux(grossflux)

    # construct flux object
    return ReactiveFlux(source_states,
                        target_states,
                        net_flux=netflux,
                        stationary_distribution=stationary_distribution,
                        qminus=qminus,
                        qplus=qplus,
                        gross_flux=grossflux)
示例#23
0
def test_coarse_grain(sparse_mode):
    # 16-state toy system
    P_nonrev = np.array([[
        0.5, 0.2, 0.0, 0.0, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0
    ],
                         [
                             0.2, 0.5, 0.1, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.1, 0.5, 0.2, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.1, 0.5, 0.0, 0.0, 0.0, 0.4, 0.0, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.3, 0.0, 0.0, 0.0, 0.5, 0.1, 0.0, 0.0, 0.1, 0.0,
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.1, 0.0, 0.0, 0.2, 0.5, 0.1, 0.0, 0.0, 0.1,
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.1, 0.0, 0.0, 0.1, 0.5, 0.2, 0.0, 0.0,
                             0.1, 0.0, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.3, 0.5, 0.0, 0.0,
                             0.0, 0.1, 0.0, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.5, 0.1,
                             0.0, 0.0, 0.3, 0.0, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.2, 0.5,
                             0.1, 0.0, 0.0, 0.1, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.1,
                             0.5, 0.1, 0.0, 0.0, 0.2, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0,
                             0.2, 0.5, 0.0, 0.0, 0.0, 0.2
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.0,
                             0.0, 0.0, 0.5, 0.2, 0.0, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1,
                             0.0, 0.0, 0.3, 0.5, 0.1, 0.0
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.2, 0.0, 0.0, 0.1, 0.5, 0.2
                         ],
                         [
                             0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                             0.0, 0.3, 0.0, 0.0, 0.2, 0.5
                         ]])
    pstat2_nonrev = stationary_distribution(P_nonrev)
    # make reversible
    C = np.dot(np.diag(pstat2_nonrev), P_nonrev)
    Csym = C + C.T
    P = Csym / np.sum(Csym, axis=1)[:, np.newaxis]
    if sparse_mode:
        P = sparse.csr_matrix(P)
    msm = MarkovStateModel(P)
    tpt = msm.reactive_flux([0, 4], [11, 15])
    coarse_sets = [
        [2, 3, 6, 7],
        [10, 11, 14, 15],
        [0, 1, 4, 5],
        [8, 9, 12, 13],
    ]
    tpt_sets, cgRF = tpt.coarse_grain(coarse_sets)
    assert_equal(
        tpt_sets,
        [{0, 4}, {2, 3, 6, 7}, {10, 14}, {1, 5}, {8, 9, 12, 13}, {11, 15}])
    assert_equal(cgRF.source_states, [0])
    assert_equal(cgRF.intermediate_states, [1, 2, 3, 4])
    assert_equal(cgRF.target_states, [5])
    assert_array_almost_equal(
        cgRF.stationary_distribution,
        np.array([
            0.15995388, 0.18360442, 0.12990937, 0.11002342, 0.31928127,
            0.09722765
        ]))
    assert_array_almost_equal(
        cgRF.forward_committor,
        np.array([0., 0.56060272, 0.73052426, 0.19770537, 0.36514272, 1.]))
    assert_array_almost_equal(
        cgRF.backward_committor,
        np.array([1., 0.43939728, 0.26947574, 0.80229463, 0.63485728, 0.]))
    assert_array_almost_equal(
        _to_dense(cgRF.net_flux),
        np.array([[0., 0., 0., 0.00427986, 0.00282259, 0.],
                  [0., 0., 0.00120686, 0., 0., 0.00201899],
                  [0., 0., 0., 0., 0., 0.00508346],
                  [0., 0.00322585, 0., 0., 0.00105401, 0.],
                  [0., 0., 0.0038766, 0., 0., 0.], [0., 0., 0., 0., 0., 0.]]))
    assert_array_almost_equal(
        _to_dense(cgRF.gross_flux),
        np.array([[0., 0., 0., 0.00427986, 0.00282259, 0.],
                  [0., 0, 0.00234578, 0.00104307, 0., 0.00201899],
                  [0., 0.00113892, 0, 0., 0.00142583, 0.00508346],
                  [0., 0.00426892, 0., 0, 0.00190226, 0.],
                  [0., 0., 0.00530243, 0.00084825, 0, 0.],
                  [0., 0., 0., 0., 0., 0.]]),
        decimal=6)
def test_statdist(mode, stationary_vector_data):
    P = stationary_vector_data.transition_matrix
    mu = stationary_vector_data.stationary_distribution
    mun = stationary_distribution(P, mode=mode)
    assert_allclose(mu, mun)
示例#25
0
def test_statdist(scenario):
    _, bdc = scenario
    P = bdc.transition_matrix
    mu = bdc.stationary_distribution
    mun = stationary_distribution(P)
    assert_allclose(mu, mun)
示例#26
0
 def test_statdist(self):
     P = self.bdc.transition_matrix
     mu = self.bdc.stationary_distribution
     mun = stationary_distribution(P)
     assert_allclose(mu, mun)