Ejemplo n.º 1
0
    def test_rdl_decomposition(self):
        P=self.bdc.transition_matrix()
        mu=self.bdc.stationary_distribution()

        """Non-reversible"""

        """k=None"""
        Rn, Dn, Ln=rdl_decomposition(P)        
        Xn=np.dot(Ln, Rn)
        """Right-eigenvectors"""
        self.assertTrue(np.allclose(np.dot(P, Rn), np.dot(Rn, Dn)))
        """Left-eigenvectors"""
        self.assertTrue(np.allclose(np.dot(Ln, P), np.dot(Dn, Ln)))
        """Orthonormality"""
        self.assertTrue(np.allclose(Xn, np.eye(self.dim)))
        """Probability vector"""
        self.assertTrue(np.allclose(np.sum(Ln[0,:]), 1.0))

        """k is not None"""
        Rn, Dn, Ln=rdl_decomposition(P, k=self.k)        
        Xn=np.dot(Ln, Rn)               
        """Right-eigenvectors"""
        self.assertTrue(np.allclose(np.dot(P, Rn), np.dot(Rn, Dn)))
        """Left-eigenvectors"""
        self.assertTrue(np.allclose(np.dot(Ln, P), np.dot(Dn, Ln)))
        """Orthonormality"""
        self.assertTrue(np.allclose(Xn, np.eye(self.k)))
        """Probability vector"""
        self.assertTrue(np.allclose(np.sum(Ln[0,:]), 1.0))

        """Reversible"""

        """k=None"""
        Rn, Dn, Ln=rdl_decomposition(P, norm='reversible')        
        Xn=np.dot(Ln, Rn)
        """Right-eigenvectors"""
        self.assertTrue(np.allclose(np.dot(P, Rn), np.dot(Rn, Dn)))
        """Left-eigenvectors"""
        self.assertTrue(np.allclose(np.dot(Ln, P), np.dot(Dn, Ln)))
        """Orthonormality"""
        self.assertTrue(np.allclose(Xn, np.eye(self.dim)))
        """Probability vector"""
        self.assertTrue(np.allclose(np.sum(Ln[0,:]), 1.0))   
        """Reversibility"""
        self.assertTrue(np.allclose(Ln.transpose(), mu[:,np.newaxis]*Rn))

        """k is not None"""
        Rn, Dn, Ln=rdl_decomposition(P, norm='reversible', k=self.k)        
        Xn=np.dot(Ln, Rn)
        """Right-eigenvectors"""
        self.assertTrue(np.allclose(np.dot(P, Rn), np.dot(Rn, Dn)))
        """Left-eigenvectors"""
        self.assertTrue(np.allclose(np.dot(Ln, P), np.dot(Dn, Ln)))
        """Orthonormality"""
        self.assertTrue(np.allclose(Xn, np.eye(self.k)))
        """Probability vector"""
        self.assertTrue(np.allclose(np.sum(Ln[0,:]), 1.0))   
        """Reversibility"""
        self.assertTrue(np.allclose(Ln.transpose(), mu[:,np.newaxis]*Rn))
Ejemplo n.º 2
0
 def test_rdl_decomposition(self):
     P = self.bdc.transition_matrix()
     mu = self.bdc.stationary_distribution()
     """Non-reversible"""
     """k=None"""
     Rn, Dn, Ln = rdl_decomposition(P)
     Xn = np.dot(Ln, Rn)
     """Right-eigenvectors"""
     assert_allclose(np.dot(P, Rn), np.dot(Rn, Dn))
     """Left-eigenvectors"""
     assert_allclose(np.dot(Ln, P), np.dot(Dn, Ln))
     """Orthonormality"""
     assert_allclose(Xn, np.eye(self.dim))
     """Probability vector"""
     assert_allclose(np.sum(Ln[0, :]), 1.0)
     """k is not None"""
     Rn, Dn, Ln = rdl_decomposition(P, k=self.k)
     Xn = np.dot(Ln, Rn)
     """Right-eigenvectors"""
     assert_allclose(np.dot(P, Rn), np.dot(Rn, Dn))
     """Left-eigenvectors"""
     assert_allclose(np.dot(Ln, P), np.dot(Dn, Ln))
     """Orthonormality"""
     assert_allclose(Xn, np.eye(self.k))
     """Probability vector"""
     assert_allclose(np.sum(Ln[0, :]), 1.0)
     """Reversible"""
     """k=None"""
     Rn, Dn, Ln = rdl_decomposition(P, norm='reversible')
     Xn = np.dot(Ln, Rn)
     """Right-eigenvectors"""
     assert_allclose(np.dot(P, Rn), np.dot(Rn, Dn))
     """Left-eigenvectors"""
     assert_allclose(np.dot(Ln, P), np.dot(Dn, Ln))
     """Orthonormality"""
     assert_allclose(Xn, np.eye(self.dim))
     """Probability vector"""
     assert_allclose(np.sum(Ln[0, :]), 1.0)
     """Reversibility"""
     assert_allclose(Ln.transpose(), mu[:, np.newaxis] * Rn)
     """k is not None"""
     Rn, Dn, Ln = rdl_decomposition(P, norm='reversible', k=self.k)
     Xn = np.dot(Ln, Rn)
     """Right-eigenvectors"""
     assert_allclose(np.dot(P, Rn), np.dot(Rn, Dn))
     """Left-eigenvectors"""
     assert_allclose(np.dot(Ln, P), np.dot(Dn, Ln))
     """Orthonormality"""
     assert_allclose(Xn, np.eye(self.k))
     """Probability vector"""
     assert_allclose(np.sum(Ln[0, :]), 1.0)
     """Reversibility"""
     assert_allclose(Ln.transpose(), mu[:, np.newaxis] * Rn)
Ejemplo n.º 3
0
    def setUp(self):
        p=np.zeros(10)
        q=np.zeros(10)
        p[0:-1]=0.5
        q[1:]=0.5
        p[4]=0.01
        q[6]=0.1

        self.bdc=BirthDeathChain(q, p)
        
        self.mu = self.bdc.stationary_distribution()
        self.T = self.bdc.transition_matrix()

        """Test matrix-vector product against spectral decomposition"""        
        R, D, L=rdl_decomposition(self.T)
        self.L=L
        self.R=R
        self.ts=timescales(self.T)
        self.times=np.array([1, 5, 10, 20, 100])

        ev=np.diagonal(D)
        self.ev_t=ev[np.newaxis,:]**self.times[:,np.newaxis]

        self.k=4

        """Observable"""
        obs1 = np.zeros(10)
        obs1[0] = 1
        obs1[1] = 1
        self.obs=obs1

        """Initial distribution"""
        w0=np.zeros(10)
        w0[0:4]=0.25
        self.p0=w0     
Ejemplo n.º 4
0
    def setUp(self):
        p = np.zeros(10)
        q = np.zeros(10)
        p[0:-1] = 0.5
        q[1:] = 0.5
        p[4] = 0.01
        q[6] = 0.1

        self.bdc = BirthDeathChain(q, p)

        self.mu = self.bdc.stationary_distribution()
        self.T = self.bdc.transition_matrix()
        """Test matrix-vector product against spectral decomposition"""
        R, D, L = rdl_decomposition(self.T)
        self.L = L
        self.R = R
        self.ts = timescales(self.T)
        self.times = np.array([1, 5, 10, 20, 100])

        ev = np.diagonal(D)
        self.ev_t = ev[np.newaxis, :]**self.times[:, np.newaxis]

        self.k = 4
        """Observable"""
        obs1 = np.zeros(10)
        obs1[0] = 1
        obs1[1] = 1
        self.obs = obs1
        """Initial distribution"""
        w0 = np.zeros(10)
        w0[0:4] = 0.25
        self.p0 = w0
Ejemplo n.º 5
0
    def setUp(self):
        p = np.zeros(10)
        q = np.zeros(10)
        p[0:-1] = 0.5
        q[1:] = 0.5
        p[4] = 0.01
        q[6] = 0.1

        self.bdc = BirthDeathChain(q, p)

        self.mu = self.bdc.stationary_distribution()
        self.T = self.bdc.transition_matrix()
        R, D, L = rdl_decomposition(self.T, norm='reversible')
        self.L = L
        self.R = R
        self.ts = timescales(self.T)
        self.times = np.array([1, 5, 10, 20, 100])

        ev = np.diagonal(D)
        self.ev_t = ev[np.newaxis, :]**self.times[:, np.newaxis]

        self.k = 4

        obs1 = np.zeros(10)
        obs1[0] = 1
        obs1[1] = 1
        obs2 = np.zeros(10)
        obs2[8] = 1
        obs2[9] = 1

        self.obs1 = obs1
        self.obs2 = obs2
        self.one_vec = np.ones(10)
Ejemplo n.º 6
0
    def setUp(self):
        p=np.zeros(10)
        q=np.zeros(10)
        p[0:-1]=0.5
        q[1:]=0.5
        p[4]=0.01
        q[6]=0.1

        self.bdc=BirthDeathChain(q, p)
        
        self.mu = self.bdc.stationary_distribution()
        self.T = self.bdc.transition_matrix()
        R, D, L=rdl_decomposition(self.T, norm='reversible')
        self.L=L
        self.R=R
        self.ts=timescales(self.T)
        self.times=np.array([1, 5, 10, 20, 100])

        ev=np.diagonal(D)
        self.ev_t=ev[np.newaxis,:]**self.times[:,np.newaxis]

        self.k=4

        obs1 = np.zeros(10)
        obs1[0] = 1
        obs1[1] = 1
        obs2 = np.zeros(10)
        obs2[8] = 1
        obs2[9] = 1

        self.obs1=obs1
        self.obs2=obs2
        self.one_vec=np.ones(10)
Ejemplo n.º 7
0
def time_correlations_direct(P, pi, obs1, obs2=None, times=[1]):
    r"""Compute time-correlations of obs1, or time-cross-correlation with obs2.
    
    The time-correlation at time=k is computed by the matrix-vector expression: 
    cor(k) = obs1' diag(pi) P^k obs2
    
    
    Parameters
    ----------
    P : ndarray, shape=(n, n) or scipy.sparse matrix
        Transition matrix
    obs1 : ndarray, shape=(n)
        Vector representing observable 1 on discrete states
    obs2 : ndarray, shape=(n)
        Vector representing observable 2 on discrete states. If not given,
        the autocorrelation of obs1 will be computed
    pi : ndarray, shape=(n)
        stationary distribution vector. Will be computed if not given
    times : array-like, shape(n_t)
        Vector of time points at which the (auto)correlation will be evaluated 
    
    Returns
    -------
    
    """
    n_t = len(times)
    times = np.sort(
        times)  # sort it to use caching of previously computed correlations
    f = np.zeros(n_t)

    # maximum time > number of rows?
    if times[-1] > P.shape[0]:
        use_diagonalization = True
        R, D, L = rdl_decomposition(P)
        # discard imaginary part, if all elements i=0
        if not np.any(np.iscomplex(R)):
            R = np.real(R)
        if not np.any(np.iscomplex(D)):
            D = np.real(D)
        if not np.any(np.iscomplex(L)):
            L = np.real(L)
        rdl = (R, D, L)

    if use_diagonalization:
        for i in xrange(n_t):
            f[i] = time_correlation_by_diagonalization(P, pi, obs1, obs2,
                                                       times[i], rdl)
    else:
        start_values = None
        for i in xrange(n_t):
            f[i], start_values = \
                time_correlation_direct_by_mtx_vec_prod(P, pi, obs1, obs2,
                                                        times[i], start_values, True)
    return f
Ejemplo n.º 8
0
def time_correlations_direct(P, pi, obs1, obs2=None, times=[1]):
    r"""Compute time-correlations of obs1, or time-cross-correlation with obs2.
    
    The time-correlation at time=k is computed by the matrix-vector expression: 
    cor(k) = obs1' diag(pi) P^k obs2
    
    
    Parameters
    ----------
    P : ndarray, shape=(n, n) or scipy.sparse matrix
        Transition matrix
    obs1 : ndarray, shape=(n)
        Vector representing observable 1 on discrete states
    obs2 : ndarray, shape=(n)
        Vector representing observable 2 on discrete states. If not given,
        the autocorrelation of obs1 will be computed
    pi : ndarray, shape=(n)
        stationary distribution vector. Will be computed if not given
    times : array-like, shape(n_t)
        Vector of time points at which the (auto)correlation will be evaluated 
    
    Returns
    -------
    
    """
    n_t = len(times)
    times = np.sort(times)  # sort it to use caching of previously computed correlations
    f = np.zeros(n_t)

    # maximum time > number of rows?
    if times[-1] > P.shape[0]:
        use_diagonalization = True
        R, D, L = rdl_decomposition(P)
        # discard imaginary part, if all elements i=0
        if not np.any(np.iscomplex(R)):
            R = np.real(R)
        if not np.any(np.iscomplex(D)):
            D = np.real(D)
        if not np.any(np.iscomplex(L)):
            L = np.real(L)
        rdl = (R, D, L)

    if use_diagonalization:
        for i in xrange(n_t):
            f[i] = time_correlation_by_diagonalization(P, pi, obs1, obs2, times[i], rdl)
    else:
        start_values = None
        for i in xrange(n_t):
            f[i], start_values = \
                time_correlation_direct_by_mtx_vec_prod(P, pi, obs1, obs2,
                                                        times[i], start_values, True)
    return f
Ejemplo n.º 9
0
def fingerprint(P, obs1, obs2=None, p0=None, tau=1, k=None, ncv=None):
    r"""Dynamical fingerprint for equilibrium or relaxation experiment

    The dynamical fingerprint is given by the implied time-scale
    spectrum together with the corresponding amplitudes.

    Parameters
    ----------
    P : (M, M) scipy.sparse matrix
        Transition matrix
    obs1 : (M,) ndarray
        Observable, represented as vector on state space
    obs2 : (M,) ndarray (optional)
        Second observable, for cross-correlations
    p0 : (M,) ndarray (optional)
        Initial distribution for a relaxation experiment
    tau : int (optional)
        Lag time of given transition matrix, for correct time-scales
    k : int (optional)
        Number of time-scales and amplitudes to compute
    ncv : int (optional)
        The number of Lanczos vectors generated, `ncv` must be greater than k;
        it is recommended that ncv > 2*k       


    Returns
    -------
    timescales : (N,) ndarray
        Time-scales of the transition matrix
    amplitudes : (N,) ndarray
        Amplitudes for the given observable(s)
        
    """
    if obs2 is None: 
        obs2=obs1
    R, D, L=rdl_decomposition(P, k=k, ncv=ncv)
    """Stationary vector"""
    mu=L[0, :]
    """Extract diagonal"""
    w=np.diagonal(D)  
    """Compute time-scales"""
    timescales = timescales_from_eigenvalues(w, tau)      
    if p0 is None:
        """Use stationary distribution - we can not use only left
        eigenvectors since the system might be non-reversible"""
        amplitudes=np.dot(mu*obs1, R)*np.dot(L, obs2)
    else:
        """Use initial distribution"""
        amplitudes=np.dot(p0*obs1, R)*np.dot(L, obs2)
    return timescales, amplitudes
Ejemplo n.º 10
0
def time_relaxations_direct(P, p0, obs, times=[1]):
    r"""Compute time-relaxations of obs with respect of given initial distribution.
    
    relaxation(k) = p0 P^k obs
    
    Parameters
    ----------
    P : ndarray, shape=(n, n) or scipy.sparse matrix
        Transition matrix
    p0 : ndarray, shape=(n)
        initial distribution
    obs : ndarray, shape=(n)
        Vector representing observable on discrete states. 
    times : array-like, shape(n_t)
        Vector of time points at which the (auto)correlation will be evaluated 
    
    Returns
    -------
    relaxations : ndarray, shape(n_t)
    """
    n_t = len(times)
    times = np.sort(times)

    # maximum time > number of rows?
    if times[-1] > P.shape[0]:
        use_diagonalization = True
        R, D, L = rdl_decomposition(P)
        # discard imaginary part, if all elements i=0
        if not np.any(np.iscomplex(R)):
            R = np.real(R)
        if not np.any(np.iscomplex(D)):
            D = np.real(D)
        if not np.any(np.iscomplex(L)):
            L = np.real(L)
        rdl = (R, D, L)

    f = np.empty(n_t, dtype=D.dtype)

    if use_diagonalization:
        for i in xrange(n_t):
            f[i] = time_relaxation_direct_by_diagonalization(
                P, p0, obs, times[i], rdl)
    else:
        start_values = None
        for i in xrange(n_t):
            f[i], start_values = time_relaxation_direct_by_mtx_vec_prod(
                P, p0, obs, times[i], start_values, True)
    return f
Ejemplo n.º 11
0
def time_relaxations_direct(P, p0, obs, times = [1]):
    r"""Compute time-relaxations of obs with respect of given initial distribution.
    
    relaxation(k) = p0 P^k obs
    
    Parameters
    ----------
    P : ndarray, shape=(n, n) or scipy.sparse matrix
        Transition matrix
    p0 : ndarray, shape=(n)
        initial distribution
    obs : ndarray, shape=(n)
        Vector representing observable on discrete states. 
    times : array-like, shape(n_t)
        Vector of time points at which the (auto)correlation will be evaluated 
    
    Returns
    -------
    relaxations : ndarray, shape(n_t)
    """
    n_t = len(times)
    times = np.sort(times)
    
    # maximum time > number of rows?
    if times[-1] > P.shape[0]:
        use_diagonalization = True
        R, D, L = rdl_decomposition(P)
        # discard imaginary part, if all elements i=0
        if not np.any(np.iscomplex(R)):
            R = np.real(R)
        if not np.any(np.iscomplex(D)):
            D = np.real(D)
        if not np.any(np.iscomplex(L)):
            L = np.real(L)
        rdl = (R, D, L)
    
    f = np.empty(n_t, dtype=D.dtype)
    
    if use_diagonalization:
        for i in xrange(n_t):
            f[i] = time_relaxation_direct_by_diagonalization(
                                        P, p0, obs, times[i], rdl)
    else:
        start_values = None
        for i in xrange(n_t):
            f[i], start_values = time_relaxation_direct_by_mtx_vec_prod(
                                        P, p0, obs, times[i], start_values, True)
    return f
Ejemplo n.º 12
0
def ec_geometric_series(p0, T, n):
    r"""Compute expected transition counts for Markov chain after n
    steps.

    Expected counts are computed according to ..math::
    
    E[C_{ij}^{(n)}]=\sum_{k=0}^{n-1} (p_0^t T^{k})_{i} p_{ij}   

    The sum is computed using the eigenvalue decomposition of T and
    applying the expression for a finite geometric series to each of
    the eigenvalues.

    For small n the computation of the eigenvalue decomposition can be
    much more expensive than a direct computation. In this case it is
    beneficial to compute the expected counts using successively
    computed matrix vector products p_1^t=p_0^t T, ... as increments.
    
    Parameters
    ----------
    p0 : (M,) ndarray
        Starting (probability) vector of the chain.
    T : (M, M) ndarray
        Transition matrix of the chain.
    n : int
        Number of steps to take from initial state.
        
    Returns
    --------
    EC : (M, M) ndarray
        Expected value for transition counts after N steps. 
    
    """
    if (n <= 0):
        EC = np.zeros(T.shape)
        return EC
    else:
        R, D, L = rdl_decomposition(T)
        w = np.diagonal(D)
        L = np.transpose(L)

        D_sum = np.diag(geometric_series(w, n - 1))
        T_sum = np.dot(np.dot(R, D_sum), np.conjugate(np.transpose(L)))
        p_sum = np.dot(p0, T_sum)
        EC = p_sum[:, np.newaxis] * T
        """Truncate imginary part - which is zero, but we want real
        return values"""
        EC = EC.real
        return EC
Ejemplo n.º 13
0
def ec_geometric_series(p0, T, n):  
    r"""Compute expected transition counts for Markov chain after n
    steps.

    Expected counts are computed according to ..math::
    
    E[C_{ij}^{(n)}]=\sum_{k=0}^{n-1} (p_0^t T^{k})_{i} p_{ij}   

    The sum is computed using the eigenvalue decomposition of T and
    applying the expression for a finite geometric series to each of
    the eigenvalues.

    For small n the computation of the eigenvalue decomposition can be
    much more expensive than a direct computation. In this case it is
    beneficial to compute the expected counts using successively
    computed matrix vector products p_1^t=p_0^t T, ... as increments.
    
    Parameters
    ----------
    p0 : (M,) ndarray
        Starting (probability) vector of the chain.
    T : (M, M) ndarray
        Transition matrix of the chain.
    n : int
        Number of steps to take from initial state.
        
    Returns
    --------
    EC : (M, M) ndarray
        Expected value for transition counts after N steps. 
    
    """
    if(n<=0):
        EC=np.zeros(T.shape)
        return EC
    else:
        R, D, L=rdl_decomposition(T)
        w=np.diagonal(D)
        L=np.transpose(L)

        D_sum=np.diag(geometric_series(w, n-1))
        T_sum=np.dot(np.dot(R, D_sum), np.conjugate(np.transpose(L)))
        p_sum=np.dot(p0, T_sum)
        EC=p_sum[:,np.newaxis]*T
        """Truncate imginary part - which is zero, but we want real
        return values"""
        EC=EC.real
        return EC
Ejemplo n.º 14
0
def pcca(T, n):
    # eigenvalues,left_eigenvectors,right_eigenvectors = decomposition.rdl_decomposition(T, n)
    R, D, L=decomposition.rdl_decomposition(T, n)
    eigenvalues=numpy.diagonal(D)
    left_eigenvectors=numpy.transpose(L)
    right_eigenvectors=R
    # TODO: complex warning maybe?
    right_eigenvectors = numpy.real(right_eigenvectors)

    # create initial solution that works using the old method

    (c_f, indic, chi, rot_matrix) = pcca_impl.cluster_by_isa(right_eigenvectors, n)

    rot_matrix = pcca_impl.opt_soft(right_eigenvectors, rot_matrix, n)

    memberships = numpy.dot(right_eigenvectors[:,:], rot_matrix)

    return memberships
Ejemplo n.º 15
0
def relaxation_decomp(P, p0, obs, times=[1], k=None, ncv=None):
    r"""Relaxation experiment.

    The relaxation experiment describes the time-evolution
    of an expectation value starting in a non-equilibrium
    situation.

    Parameters
    ----------
    P : (M, M) ndarray
        Transition matrix
    p0 : (M,) ndarray (optional)
        Initial distribution for a relaxation experiment
    obs : (M,) ndarray
        Observable, represented as vector on state space
    times : list of int (optional)
        List of times at which to compute expectation
    k : int (optional)
        Number of eigenvalues and amplitudes to use for computation
    ncv : int (optional)
        The number of Lanczos vectors generated, `ncv` must be greater than k;
        it is recommended that ncv > 2*k       

    Returns
    -------
    res : ndarray
        Array of expectation value at given times
        
    """
    R, D, L=rdl_decomposition(P, k=k, ncv=ncv)    
    """Extract eigenvalues"""
    ev=np.diagonal(D)  
    """Amplitudes"""
    amplitudes=np.dot(p0, R)*np.dot(L, obs)
    """Propgate eigenvalues"""
    times=np.asarray(times)
    ev_t=ev[np.newaxis,:]**times[:,np.newaxis]
    """Compute result"""
    res=np.dot(ev_t, amplitudes)
    """Truncate imgainary part - is zero anyways"""
    res=res.real
    return res       
Ejemplo n.º 16
0
def correlation_decomp(P, obs1, obs2=None, times=[1], k=None, ncv=None):
    r"""Time-correlation for equilibrium experiment - via decomposition.
    
    Parameters
    ----------
    P : (M, M) ndarray
        Transition matrix
    obs1 : (M,) ndarray
        Observable, represented as vector on state space
    obs2 : (M,) ndarray (optional)
        Second observable, for cross-correlations
    times : list of int (optional)
        List of times (in tau) at which to compute correlation
    k : int (optional)
        Number of eigenvalues and amplitudes to use for computation
    ncv : int (optional)
        The number of Lanczos vectors generated, `ncv` must be greater than k;
        it is recommended that ncv > 2*k       

    Returns
    -------
    correlations : ndarray
        Correlation values at given times
        
    """
    if obs2 is None: 
        obs2=obs1
    R, D, L=rdl_decomposition(P, k=k, ncv=ncv)    
    """Stationary vector"""
    mu=L[0,:]
    """Extract eigenvalues"""
    ev=np.diagonal(D)  
    """Amplitudes"""
    amplitudes=np.dot(mu*obs1, R)*np.dot(L, obs2)
    """Propgate eigenvalues"""
    times=np.asarray(times)
    ev_t=ev[np.newaxis,:]**times[:,np.newaxis]
    """Compute result"""
    res=np.dot(ev_t, amplitudes)
    """Truncate imgainary part - should be zero anyways"""
    res=res.real
    return res       
Ejemplo n.º 17
0
    def setUp(self):
        self.k=4

        p=np.zeros(10)
        q=np.zeros(10)
        p[0:-1]=0.5
        q[1:]=0.5
        p[4]=0.01
        q[6]=0.1

        self.bdc=BirthDeathChain(q, p)
        
        self.mu = self.bdc.stationary_distribution()
        self.T = self.bdc.transition_matrix_sparse()
        R, D, L=rdl_decomposition(self.T, k=self.k)
        self.L=L
        self.R=R
        self.ts=timescales(self.T, k=self.k)
        self.times=np.array([1, 5, 10, 20])

        ev=np.diagonal(D)
        self.ev_t=ev[np.newaxis,:]**self.times[:,np.newaxis]

        self.tau=7.5

        """Observables"""
        obs1 = np.zeros(10)
        obs1[0] = 1
        obs1[1] = 1
        obs2 = np.zeros(10)
        obs2[8] = 1
        obs2[9] = 1

        self.obs1=obs1
        self.obs2=obs2

        """Initial vector for relaxation"""
        w0=np.zeros(10)
        w0[0:4]=0.25
        self.p0=w0     
Ejemplo n.º 18
0
    def setUp(self):
        self.k = 4

        p = np.zeros(10)
        q = np.zeros(10)
        p[0:-1] = 0.5
        q[1:] = 0.5
        p[4] = 0.01
        q[6] = 0.1

        self.bdc = BirthDeathChain(q, p)

        self.mu = self.bdc.stationary_distribution()
        self.T = self.bdc.transition_matrix_sparse()
        R, D, L = rdl_decomposition(self.T, k=self.k)
        self.L = L
        self.R = R
        self.ts = timescales(self.T, k=self.k)
        self.times = np.array([1, 5, 10, 20])

        ev = np.diagonal(D)
        self.ev_t = ev[np.newaxis, :]**self.times[:, np.newaxis]

        self.tau = 7.5
        """Observables"""
        obs1 = np.zeros(10)
        obs1[0] = 1
        obs1[1] = 1
        obs2 = np.zeros(10)
        obs2[8] = 1
        obs2[9] = 1

        self.obs1 = obs1
        self.obs2 = obs2
        """Initial vector for relaxation"""
        w0 = np.zeros(10)
        w0[0:4] = 0.25
        self.p0 = w0
Ejemplo n.º 19
0
    def test_rdl_decomposition(self):
        P=self.bdc.transition_matrix_sparse()
        mu=self.bdc.stationary_distribution()

        """Non-reversible"""

        """k=None"""
        with self.assertRaises(ValueError):
            Rn, Dn, Ln=rdl_decomposition(P)        

        """k is not None"""
        Rn, Dn, Ln=rdl_decomposition(P, k=self.k)        
        Xn=np.dot(Ln, Rn)
        """Right-eigenvectors"""
        self.assertTrue(np.allclose(P.dot(Rn), np.dot(Rn, Dn)))    
        """Left-eigenvectors"""
        self.assertTrue(np.allclose(P.transpose().dot(Ln.transpose()).transpose(), np.dot(Dn, Ln)))               
        """Orthonormality"""
        self.assertTrue(np.allclose(Xn, np.eye(self.k)))
        """Probability vector"""
        self.assertTrue(np.allclose(np.sum(Ln[0,:]), 1.0))

        """k is not None, ncv is not None"""
        Rn, Dn, Ln=rdl_decomposition(P, k=self.k, ncv=self.ncv)        
        Xn=np.dot(Ln, Rn)
        """Right-eigenvectors"""
        self.assertTrue(np.allclose(P.dot(Rn), np.dot(Rn, Dn)))    
        """Left-eigenvectors"""
        self.assertTrue(np.allclose(P.transpose().dot(Ln.transpose()).transpose(), np.dot(Dn, Ln)))               
        """Orthonormality"""
        self.assertTrue(np.allclose(Xn, np.eye(self.k)))
        """Probability vector"""
        self.assertTrue(np.allclose(np.sum(Ln[0,:]), 1.0))

        """Reversible"""

        """k=None"""
        with self.assertRaises(ValueError):
            Rn, Dn, Ln=rdl_decomposition(P, norm='reversible')        

        """k is not None"""
        Rn, Dn, Ln=rdl_decomposition(P, k=self.k, norm='reversible')        
        Xn=np.dot(Ln, Rn)
        """Right-eigenvectors"""
        self.assertTrue(np.allclose(P.dot(Rn), np.dot(Rn, Dn)))    
        """Left-eigenvectors"""
        self.assertTrue(np.allclose(P.transpose().dot(Ln.transpose()).transpose(), np.dot(Dn, Ln)))               
        """Orthonormality"""
        self.assertTrue(np.allclose(Xn, np.eye(self.k)))
        """Probability vector"""
        self.assertTrue(np.allclose(np.sum(Ln[0,:]), 1.0))
        """Reversibility"""
        self.assertTrue(np.allclose(Ln.transpose(), mu[:,np.newaxis]*Rn))

        """k is not None ncv is not None"""
        Rn, Dn, Ln=rdl_decomposition(P, k=self.k, norm='reversible', ncv=self.ncv)        
        Xn=np.dot(Ln, Rn)
        """Right-eigenvectors"""
        self.assertTrue(np.allclose(P.dot(Rn), np.dot(Rn, Dn)))    
        """Left-eigenvectors"""
        self.assertTrue(np.allclose(P.transpose().dot(Ln.transpose()).transpose(), np.dot(Dn, Ln)))               
        """Orthonormality"""
        self.assertTrue(np.allclose(Xn, np.eye(self.k)))
        """Probability vector"""
        self.assertTrue(np.allclose(np.sum(Ln[0,:]), 1.0))
        """Reversibility"""
        self.assertTrue(np.allclose(Ln.transpose(), mu[:,np.newaxis]*Rn))
Ejemplo n.º 20
0
 def test_rdl_decomposition(self):
     P = self.bdc.transition_matrix_sparse()
     mu = self.bdc.stationary_distribution()
     """Non-reversible"""
     """k=None"""
     with self.assertRaises(ValueError):
         Rn, Dn, Ln = rdl_decomposition(P)
     """k is not None"""
     Rn, Dn, Ln = rdl_decomposition(P, k=self.k)
     Xn = np.dot(Ln, Rn)
     """Right-eigenvectors"""
     assert_allclose(P.dot(Rn), np.dot(Rn, Dn))
     """Left-eigenvectors"""
     assert_allclose(P.transpose().dot(Ln.transpose()).transpose(),
                     np.dot(Dn, Ln))
     """Orthonormality"""
     assert_allclose(Xn, np.eye(self.k))
     """Probability vector"""
     assert_allclose(np.sum(Ln[0, :]), 1.0)
     """k is not None, ncv is not None"""
     Rn, Dn, Ln = rdl_decomposition(P, k=self.k, ncv=self.ncv)
     Xn = np.dot(Ln, Rn)
     """Right-eigenvectors"""
     assert_allclose(P.dot(Rn), np.dot(Rn, Dn))
     """Left-eigenvectors"""
     assert_allclose(P.transpose().dot(Ln.transpose()).transpose(),
                     np.dot(Dn, Ln))
     """Orthonormality"""
     assert_allclose(Xn, np.eye(self.k))
     """Probability vector"""
     assert_allclose(np.sum(Ln[0, :]), 1.0)
     """Reversible"""
     """k=None"""
     with self.assertRaises(ValueError):
         Rn, Dn, Ln = rdl_decomposition(P, norm='reversible')
     """k is not None"""
     Rn, Dn, Ln = rdl_decomposition(P, k=self.k, norm='reversible')
     Xn = np.dot(Ln, Rn)
     """Right-eigenvectors"""
     assert_allclose(P.dot(Rn), np.dot(Rn, Dn))
     """Left-eigenvectors"""
     assert_allclose(P.transpose().dot(Ln.transpose()).transpose(),
                     np.dot(Dn, Ln))
     """Orthonormality"""
     assert_allclose(Xn, np.eye(self.k))
     """Probability vector"""
     assert_allclose(np.sum(Ln[0, :]), 1.0)
     """Reversibility"""
     assert_allclose(Ln.transpose(), mu[:, np.newaxis] * Rn)
     """k is not None ncv is not None"""
     Rn, Dn, Ln = rdl_decomposition(P,
                                    k=self.k,
                                    norm='reversible',
                                    ncv=self.ncv)
     Xn = np.dot(Ln, Rn)
     """Right-eigenvectors"""
     assert_allclose(P.dot(Rn), np.dot(Rn, Dn))
     """Left-eigenvectors"""
     assert_allclose(P.transpose().dot(Ln.transpose()).transpose(),
                     np.dot(Dn, Ln))
     """Orthonormality"""
     assert_allclose(Xn, np.eye(self.k))
     """Probability vector"""
     assert_allclose(np.sum(Ln[0, :]), 1.0)
     """Reversibility"""
     assert_allclose(Ln.transpose(), mu[:, np.newaxis] * Rn)