Пример #1
0
def update_resnrm(a, u, y0, resnrm):    
    au = a.dot(u)
    b = u.T.dot(au)
    yr = u.T.dot(y0)
    z = la.solve_lyapunov(b, yr.dot(yr.T))
    resnrm.append(la.norm((au - u.dot(b)).dot(z)))
    return
Пример #2
0
def solve_algric(A=None, R=None, W=None, X0=None,
                 nwtnstps=20, nwtntol=1e-12,
                 verbose=False):
    """ solve algebraic Riccati Equation via Newton iteration

    for dense matrices.

    The considered Ric Eqn is `A.TX + XA + XRX = W`
    """
    XN = X0
    nwtns, nwtnres = 0, 999
    while nwtns < nwtnstps and nwtnres > nwtntol:
        nwtns += 1
        fmat = A + np.dot(R, XN)
        cw = W + np.dot(XN, np.dot(R, XN))
        XNN = spla.solve_lyapunov(fmat.T, cw)
        nwtnres = np.linalg.norm(XN - XNN)
        XN = XNN
        if verbose:
            print 'Newton step {0}: norm of update {1}'.format(nwtns, nwtnres)
    if verbose:
        ricres = np.dot(A.T, XN) + np.dot(XN, A) + \
            np.dot(XN, np.dot(R, XN)) - W
        print 'Ric Residual: {0}'.format(np.linalg.norm(ricres))
    return XN
Пример #3
0
def _H2P(A, B, analog):
    """Computes the positive-definite P matrix for determining the H2-norm."""
    if analog:
        P = solve_lyapunov(A, -np.dot(B, B.T))  # AP + PA^T = -BB^T
    else:
        # Note: discretization is not performed for the user
        P = solve_discrete_lyapunov(A, np.dot(B, B.T))  # APA^T - P = -BB^T
    return P
Пример #4
0
 def calc_P(self, A, B, Q):
     P = sla.solve_lyapunov(self.A.T, -self.Q)
     return P
Пример #5
0
    def __init__(self, system):
        self.basis = system.unc.basis
        self.args = system.args

        self.P = sla.solve_lyapunov(self.args.A.T, self.args.Q_lyap)
		# record best fit (matrix distance between model and empirical FC)
		best_dist = 1e10

		# objective FC matrices (empirical)
		FC0_obj = FC_emp[i_sub,i_cond,0,:,:]
		FCtau_obj = FC_emp[i_sub,i_cond,i_tau,:,:]

		stop_opt = False
		i_opt = 0
		while not stop_opt:

			# calculate Jacobian of dynamical system
			J = -np.eye(N)/tau_x[i_sub,:].mean() + EC

			# calculate FC0 and FCtau for model
			FC0 = spl.solve_lyapunov(J,-Sigma)
			FCtau = np.dot(FC0,spl.expm(J.T*tau))

			# matrices of model error
			Delta_FC0 = FC0_obj-FC0
			Delta_FCtau = FCtau_obj-FCtau

			# calculate error between model and empirical data for FC0 and FC_tau (matrix distance)
			dist_FC_tmp = 0.5*(np.sqrt((Delta_FC0**2).sum()/(FC0_obj**2).sum())+np.sqrt((Delta_FCtau**2).sum()/(FCtau_obj**2).sum()))

			# calculate Pearson correlation between model and empirical data for FC0 and FC_tau
			Pearson_FC_tmp = 0.5*(stt.pearsonr(FC0.reshape(-1),FC0_obj.reshape(-1))[0]+stt.pearsonr(FCtau.reshape(-1),FCtau_obj.reshape(-1))[0])

			# record best model parameters
			if dist_FC_tmp<best_dist:
				best_dist = dist_FC_tmp
Пример #7
0
def SOC(num_neurons, p, R):
    C = 1.5
    # numerical constant
    gamma = 3
    n = 10
    #learning rate
    first_quadrant = np.arange(num_neurons / 2)
    second_quadrant = first_quadrant + num_neurons / 2

    W = np.zeros((num_neurons, num_neurons))
    w0 = R / np.sqrt(p * (1 - p) * (1 + gamma**2) / 2)
    for i in np.arange(num_neurons):
        for j in np.arange(num_neurons):
            if i != j:
                if np.random.rand() < p and j <= num_neurons / 2:
                    W[i, j] = w0 / np.sqrt(num_neurons)
                elif np.random.rand() < p and j > num_neurons / 2:
                    W[i, j] = -gamma * w0 / np.sqrt(num_neurons)

    W_adj = np.zeros((num_neurons, num_neurons))
    W_adj[W.nonzero()] = 1
    A = np.random.rand(num_neurons, num_neurons)
    A[A < 0.35] = -1
    A[A >= 0.35] = 0
    A[A < 0] = 1
    A = A - np.diag(np.diag(A))
    W_adj = W_adj + A - W_adj * A

    alpha = max(np.real(npLA.eig(W)[0]))
    s = max(C * alpha, alpha + 0.2)
    Q = spLA.solve_lyapunov(np.transpose(W - s * np.eye(num_neurons)),
                            -2 * np.eye(num_neurons))
    i = 0
    #    plt.figure();
    #    plt.isinteractive = False;
    step = 0
    #    e = 1 / np. trace(Q);
    while alpha >= 0.2:
        # solve Lyapunov equation
        step = step + 1
        if np.mod(step, 500) == 0:
            print "number of steps: ", step, "; alpha:", alpha

        P = spLA.solve_lyapunov(W - s * np.eye(num_neurons),
                                -2 * np.eye(num_neurons))
        deltaW = n * np.dot(Q, P) / np.trace(np.dot(Q, P))
        deltaW[:, first_quadrant] = 0
        W = W - deltaW

        # clipping positive inhibition neurons to zero
        W_i = W[:, second_quadrant]
        W_i[W_i > 0] = 0
        W = np.hstack((W[:, first_quadrant], W_i))

        #    # impost relative inhibitory strength constraint
        W_EE = W[np.ix_(first_quadrant, first_quadrant)]
        W_EI = W[np.ix_(first_quadrant, second_quadrant)]
        W_IE = W[np.ix_(second_quadrant, first_quadrant)]
        W_II = W[np.ix_(second_quadrant, second_quadrant)]
        W_EI = -gamma * W_EI * np.mean(W_EE) / np.mean(W_EI)
        W_II = -gamma * W_II * np.mean(W_IE) / np.mean(W_II)
        W = np.vstack((np.hstack((W_EE, W_EI)), np.hstack((W_IE, W_II))))

        # impose density constraints
        W[W_adj == 0] = 0

        #        i = i + 1;
        #        if np.mod(i,20) == 0:
        #            plt.clf();
        #            plt.axis([-5, 5, -5 ,5])
        #            plt.plot(npLA.eig(W)[0].real,npLA.eig(W)[0].imag,'ro');
        #            plt.ion();
        #            plt.draw()
        #            plt.pause(0.01);

        alpha = max(np.real(npLA.eig(W)[0]))
        s = max(C * alpha, alpha + 0.2)
        Q = spLA.solve_lyapunov(np.transpose(W - s * np.eye(num_neurons)),
                                -2 * np.eye(num_neurons))
#        e = 1 / np.trace(Q);
    a_l = npLA.eig(Q)[1][:, np.argmax(npLA.eig(Q)[0])]
    a_r = npLA.eig(Q)[1][:, np.argsort(npLA.eig(Q)[0])[-2]]
    return W, a_l, a_r, alpha
control, in_vecs, c_matrix = state_controllability(A, B)

# Question: Why does A.transpose give the same eigenvectors as the book
# and not plain A??
# Answer: The eig function in the numpy library only calculates
# the right hand eigenvectors. The Scipy library provides a eig function
# in which you can specify whether you want left or right handed eigenvectors.
# To determine controllability you need to calculate the input pole vectors
# which is dependant on the left eigenvectors.

# Calculate eigen vectors and pole vectors
val, vec = LA.eig(A, None, 1, 0, 0, 0)

n = lin.matrix_rank(c_matrix)

P = LA.solve_lyapunov(A, -B * B.T)


# Display results
print '\nThe transfer function realization is:'
print 'G(s) = '
print np.poly1d(G.num[0], variable='s')
print "----------------"
print np.poly1d(G.den, variable='s')

print '\n1) Eigenvalues are: p1 = ', val[0], 'and p2 = ', val[1]
print '   with eigenvectors: q1 = ', vec[:, 0], 'and q2 = ', vec[:, 1]
print '   Input pole vectors are: up1 = ', in_vecs[0], 'and up2 = ', in_vecs[1]

print '\n2) The controlabillity matrix has rank', n, 'and is given as:'
print c_matrix
Пример #9
0
def l1_norm(sys, rtol=1e-6, max_length=2**18):
    """Returns the L1-norm of a linear system within a relative tolerance.

    The L1-norm of a (BIBO stable) linear system is the integral of the
    absolute value of its impulse response. For unstable systems this will be
    infinite. The L1-norm is important because it bounds the worst-case
    output of the system for arbitrary inputs within [-1, 1]. In fact,
    this worst-case output is achieved by reversing the input which alternates
    between -1 and 1 during the intervals where the impulse response is
    negative or positive, respectively (in the limit as T -> infinity).

    Algorithm adapted from [1]_ following the methods of [2]_. This works by
    iteratively refining lower and upper bounds using progressively longer
    simulations and smaller timesteps. The lower bound is given by the
    absolute values of the discretized response. The upper bound is given by
    refining the time-step intervals where zero-crossings may have occurred.

    References:
        [1] http://www.mathworks.com/matlabcentral/fileexchange/41587-system-l1-norm/content/l1norm.m  # noqa: E501
            J.F. Whidborne (April 28, 1995).

        [2] Rutland, Neil K., and Paul G. Lane. "Computing the 1-norm of the
            impulse response of linear time-invariant systems."
            Systems & control letters 26.3 (1995): 211-221.
    """
    sys = LinearSystem(sys)
    if not sys.analog:
        raise ValueError("system (%s) must be analog" % sys)

    # Setup state-space system and check stability/conditioning
    # we will subtract out D and add it back in at the end
    A, B, C, D = sys.ss
    alpha = np.max(eig(A)[0].real)  # eq (28)
    if alpha >= 0:
        raise ValueError("system (%s) has unstable eigenvalue: %s" % (
            sys, alpha))

    # Compute a suitable lower-bound for the L1-norm
    # using the steady state response, which is equivalent to the
    # L1-norm without an absolute value (i.e. just an integral)
    G0 = sys.dcgain - sys.D  # -C.inv(A).B

    # Compute a suitable upper-bound for the L1-norm
    # Note this should be tighter than 2*sum(abs(hankel(sys)))
    def _normtail(sig, A, x, C):
        # observability gramiam when A perturbed by sig
        W = solve_lyapunov(A.T + sig*np.eye(len(A)), -C.T.dot(C))
        return np.sqrt(x.dot(W).dot(x.T) / 2 / sig)  # eq (39)

    xtol = -alpha * 1e-4
    _, fopt, _, _ = fminbound(
        _normtail, 0, -alpha, (A, B.T, C), xtol=xtol, full_output=True)

    # Setup parameters for iterative optimization
    L, U = abs(G0), fopt
    N = 2**4
    T = -1 / alpha

    while N <= max_length and .5 * (U - L) / L >= rtol:  # eq (25)

        # Step 1. Improve the lower bound by simulating more.
        dt = T / N
        dsys = cont2discrete((A, B, C, 0), dt=dt)
        Phi = dsys.A

        y = impulse(dsys, dt=None, length=N)
        abs_y = abs(y[1:])
        L_impulse = np.sum(abs_y)

        # bound the missing response from t > T from below
        L_tail = abs(G0 - np.sum(y))  # eq (33)
        L = max(L, L_impulse + L_tail)

        # Step 2. Improve the upper bound using refined interval method.
        x = _state_impulse(Phi, x0=B, k=N, delay=0)  # eq (38)
        abs_e = np.squeeze(abs(C.dot(x.T)))
        x = x[:-1]  # compensate for computing where thresholds crossed

        # find intervals that could have zero-crossings and adjust their
        # upper bounds (the lower bound is exact for the other intervals)
        CTC = C.T.dot(C)
        W = solve_lyapunov(A.T, Phi.T.dot(CTC).dot(Phi) - CTC)  # eq (36)
        AWA = A.T.dot(W.dot(A))
        thresh = np.squeeze(  # eq (41)
            np.sqrt(dt * np.sum(x.dot(AWA) * x, axis=1)))
        cross = np.maximum(abs_e[:-1], abs_e[1:]) <= thresh  # eq (20)
        abs_y[cross] = np.sqrt(  # eq (22, 37)
            dt * np.sum(x[cross].dot(W) * x[cross], axis=1))

        # bound the missing response from t > T from above
        _, U_tail, _, _ = fminbound(
            _normtail, 0, -alpha, (A, x[-1], C), xtol=xtol, full_output=True)
        U_impulse = np.sum(abs_y)
        U = max(min(U, U_impulse + U_tail), L)

        N *= 2
        if U_impulse - L_impulse < U_tail - L_tail:  # eq (26)
            T *= 2

    return (U + L) / 2 + D, .5 * (U - L) / L
Пример #10
0
    def fit(self, X, y=None, SC_mask=None, method='lyapunov', norm_fc=None, 
            epsilon_EC=0.0005, epsilon_Sigma=0.05, min_val_EC=0.0, max_val_EC=0.4,
            n_opt=10000, regul_EC=0, regul_Sigma=0,
            true_model=None, verbose=0):
        """
        Estimation of MOU parameters (connectivity C, noise covariance Sigma,
        and time constant tau_x) with Lyapunov optimization as in: Gilson et al.
        Plos Computational Biology (2016).
        PARAMETERS:
            X: the timeseries data of the system to estimate, shape: T time points x P variables.
            y: needed to comply with BaseEstimator (not used here)
            SC_mask: mask of known non-zero values for connectivity matrix, for example 
            estimated by DTI
            method: 'lyapunov' or 'moments'
            norm_fc: normalization factor for FC. Normalization is needed to avoid high connectivity value that make
            the network activity explode. FC is normalized as FC *= 0.5/norm_fc. norm_fc can be specified to be for example
            the average over all entries of FC for all subjects or sessions in a given group. If not specified the normalization factor is
            the mean of 0-lag covariance matrix of the provided time series ts_emp. 
            epsilon_EC : learning rate for connectivity (this should be about n_nodes times smaller than epsilon_Sigma).
            epsilon_Sigma : learning rate for Sigma (this should be about n_nodes times larger than epsilon_EC).
            min_val_EC : minimum value to bound connectivity estimate. This should be zero or slightly negative (too negative limit can bring to an
            inhibition dominated system). If the empirical covariance has many negative entries then a slightly negative limit can improve the estimation
            accuracy.
            max_val_EC : maximum value to bound connectivity estimate. This is useful to avoid large weight that make the system unstable.
            If the estimated connectivity saturates toward this value (it usually doesn't happen) it can be increased.
            n_opt : number of maximum optimization steps. If final number of iterations reaches this maximum it means the algorithm has not converged.
            regul_EC : regularization parameter for connectivity (try a value of 0.5)
            regul_Sigma : regularization parameter for Sigma (try a value of 0.001)
            true_model: a tuple (true_C, true_S) of true connectivity matrix and noise covariance (for calculating the error of estimation iteratively)
            verbose: verbosity level; 0: no output; 1: prints regularization parameters,
            estimated \tau_x, used lag \tau for lagged-covariance and evolution of the iterative
            optimization with final values of data covariance (Functional Connectivity) fitting;
            2: create a diagnostic graphics with cost function over iteration 
            (distance between true and estimated connectivity is also shown 
            if true_C is not None) and fitting of data covariance matrix (fitting of connectivity
            and Sigma are also shown if true_C and true_S are not None).
        RETURN:
            C: estimated connectivity matrix, shape [P, P] with null-diagonal
            Sigma: estimated noise covariance, shape [P, P]
            tau_x: estimated time constant (scalar)
            d_fit: a dictionary with diagnostics of the fit; keys are: iterations, distance and correlation
        """
        # TODO: raise a warning if the algorithm does not converge
        # TODO: look into regularization
        # TODO: get rid of comparison with ground truth
        # TODO: move SC_make to __init__()
        # TODO: make better graphics (deal with axes separation, etc.)
        # FIXME: tau_x in Matt origina script is calculated as the mean tau_x over sessions for each subject: why? Is this import?
        # TODO: check consistent N between object init and time series X passed to fit()
    
        if not(true_model is None):  # if true model is known
            true_C = true_model[0]  # used later to calculate error iteratively
            true_S = true_model[1]

        n_T = X.shape[0]  # number of time samples
        N = X.shape[1] # number of ROIs
        d_fit = dict()  # a dictionary to store the diagnostics of fit
        
        # mask for existing connections for EC and Sigma
        mask_diag = np.eye(N,dtype=bool)
        if SC_mask is None:
            mask_EC = np.logical_not(mask_diag) # all possible connections except self
        else:
            mask_EC = SC_mask
        mask_Sigma = np.eye(N,dtype=bool) # independent noise
        #mask_Sigma = np.ones([N,N],dtype=bool) # coloured noise
            
        if method=='lyapunov':
            
            if verbose>0:
                print('regularization:', regul_EC, ';', regul_Sigma)
            
            n_tau = 3 # number of time shifts for FC_emp
            v_tau = np.arange(n_tau)
            i_tau_opt = 1 # time shift for optimization
            
            min_val_Sigma_diag = 0. # minimal value for Sigma
          
            # FC matrix 
            ts_emp = X - np.outer(np.ones(n_T), X.mean(0))
            FC_emp = np.zeros([n_tau,N,N])
            for i_tau in range(n_tau):
                FC_emp[i_tau,:,:] = np.tensordot(ts_emp[0:n_T-n_tau,:],ts_emp[i_tau:n_T-n_tau+i_tau,:],axes=(0,0)) / float(n_T-n_tau-1)
        
            # normalize covariances (to ensure the system does not explode)
            if norm_fc is None:
                norm_fc = FC_emp[0,:,:].mean()
            FC_emp *= 0.5/norm_fc
            if verbose>0:
                print('max FC value (most of the distribution should be between 0 and 1):', FC_emp.mean())
        
            # autocovariance time constant
            log_ac = np.log(np.maximum(FC_emp.diagonal(axis1=1,axis2=2),1e-10))
            lin_reg = np.polyfit(np.repeat(v_tau,N),log_ac.reshape(-1),1)
            tau_x = -1./lin_reg[0]
            if verbose>0:
                print('inverse of negative slope (time constant):', tau_x)
            
            # optimization
            if verbose>0:
                print('*opt*')
                print('i tau opt:', i_tau_opt)
            tau = v_tau[i_tau_opt]
            
            # objective FC matrices (empirical)
            FC0_obj = FC_emp[0,:,:]
            FCtau_obj = FC_emp[i_tau_opt,:,:]
            
            coef_0 = np.sqrt(np.sum(FCtau_obj**2)) / (np.sqrt(np.sum(FC0_obj**2))+np.sqrt(np.sum(FCtau_obj**2)))
            coef_tau = 1. - coef_0
            
            # initial network parameters
            EC = np.zeros([N,N])
            Sigma = np.eye(N)  # initial noise
            
            # best distance between model and empirical data
            best_dist = 1e10
            best_Pearson = 0.
            
            # record model parameters and outputs
            dist_FC_hist = np.zeros([n_opt])*np.nan # FC error = matrix distance
            Pearson_FC_hist = np.zeros([n_opt])*np.nan # Pearson corr model/objective
            dist_EC_hist = np.zeros([n_opt])*np.nan # FC error = matrix distance
            Pearson_EC_hist = np.zeros([n_opt])*np.nan # Pearson corr model/objective
            
            stop_opt = False
            i_opt = 0
            while not stop_opt:
                # calculate Jacobian of dynamical system
                J = -np.eye(N)/tau_x + EC
                		
                # calculate FC0 and FCtau for model
                FC0 = spl.solve_lyapunov(J,-Sigma)
                FCtau = np.dot(FC0,spl.expm(J.T*tau))
                
                # calculate error between model and empirical data for FC0 and FC_tau (matrix distance)
                err_FC0 = np.sqrt(np.sum((FC0-FC0_obj)**2))/np.sqrt(np.sum(FC0_obj**2))
                err_FCtau = np.sqrt(np.sum((FCtau-FCtau_obj)**2))/np.sqrt(np.sum(FCtau_obj**2))
                dist_FC_hist[i_opt] = 0.5*(err_FC0+err_FCtau)
                if not(true_model is None):
                    dist_EC_hist[i_opt] = np.sqrt(np.sum((EC-true_C)**2))/np.sqrt(np.sum(true_C**2))
                	
                # calculate Pearson corr between model and empirical data for FC0 and FC_tau
                Pearson_FC_hist[i_opt] = 0.5*(stt.pearsonr(FC0.reshape(-1),FC0_obj.reshape(-1))[0]+stt.pearsonr(FCtau.reshape(-1),FCtau_obj.reshape(-1))[0])
                if not(true_model is None):
                    Pearson_EC_hist[i_opt] = stt.pearsonr(EC.reshape(-1), true_C.reshape(-1))[0]
                
                # best fit given by best Pearson correlation coefficient for both FC0 and FCtau (better than matrix distance)
                if dist_FC_hist[i_opt]<best_dist:
                    	best_dist = dist_FC_hist[i_opt]
                    	best_Pearson = Pearson_FC_hist[i_opt]
                    	i_best = i_opt
                    	EC_best = np.array(EC)
                    	Sigma_best = np.array(Sigma)
                    	FC0_best = np.array(FC0)
                    	FCtau_best = np.array(FCtau)
                else:
                    stop_opt = i_opt>100
                
                # Jacobian update with weighted FC updates depending on respective error
                Delta_FC0 = (FC0_obj-FC0)*coef_0
                Delta_FCtau = (FCtau_obj-FCtau)*coef_tau
                Delta_J = np.dot(np.linalg.pinv(FC0),Delta_FC0+np.dot(Delta_FCtau,spl.expm(-J.T*tau))).T/tau
                # update conectivity and noise
                EC[mask_EC] += epsilon_EC * (Delta_J - regul_EC*EC)[mask_EC]
                EC[mask_EC] = np.clip(EC[mask_EC],min_val_EC,max_val_EC)
                
                Sigma[mask_Sigma] += epsilon_Sigma * (-np.dot(J,Delta_FC0)-np.dot(Delta_FC0,J.T) - regul_Sigma)[mask_Sigma]
                Sigma[mask_diag] = np.maximum(Sigma[mask_diag],min_val_Sigma_diag)
                
                # check if end optimization: if FC error becomes too large
                if stop_opt or i_opt==n_opt-1:
                    stop_opt = True
                    d_fit['iterations'] = i_opt
                    d_fit['distance'] = best_dist
                    d_fit['correlation'] = best_Pearson
                    if verbose>0:
                        print('stop at step', i_opt, 'with best dist', best_dist, ';best FC Pearson:', best_Pearson)
                else:
                    if (i_opt)%20==0 and verbose>0:
                        print('opt step:', i_opt)
                        print('current dist FC:', dist_FC_hist[i_opt], '; current Pearson FC:', Pearson_FC_hist[i_opt])
                    i_opt += 1
                
            if verbose>1:
                # plots
                
                mask_nodiag = np.logical_not(np.eye(N,dtype=bool))
                mask_nodiag_and_not_EC = np.logical_and(mask_nodiag,np.logical_not(mask_EC))
                mask_nodiag_and_EC = np.logical_and(mask_nodiag,mask_EC)
                
                pp.figure()
                gs = GridSpec(2, 3)
                    
                if not(true_model is None):
                    pp.subplot(gs[0,2])
                    pp.scatter(true_C, EC_best, marker='x')
                    pp.xlabel('original EC')
                    pp.ylabel('estimated EC')
                    pp.text(pp.xlim()[0]+.05, pp.ylim()[1]-.05,
                             r'$\rho$: ' + str(stt.pearsonr(true_C[mask_EC], EC_best[mask_EC])[0]))
                    pp.subplot(gs[1,2])
                    pp.scatter(true_S, Sigma_best,marker='x')
                    pp.xlabel('original Sigma')
                    pp.ylabel('estimated Sigma')
                    pp.text(pp.xlim()[0]+.05, pp.ylim()[1]-.05,
                             r'$\rho_{diag}$: ' + str(stt.pearsonr(true_S.diagonal(), Sigma_best.diagonal())[0])
                             + r'$\rho_{off-diag}$: ' + str(stt.pearsonr(true_S[mask_nodiag], Sigma_best[mask_nodiag])[0])
                             )
                
                    
                pp.subplot(gs[0,0:2])
                pp.plot(range(n_opt),dist_FC_hist, label='distance FC')
                pp.plot(range(n_opt),Pearson_FC_hist, label=r'$\rho$ FC')
                if not(true_model is None):
                    pp.plot(range(n_opt),dist_EC_hist, label='distance EC')
                    pp.plot(range(n_opt),Pearson_EC_hist, label=r'$\rho$ EC')
                pp.legend()
                pp.xlabel('optimization step')
                pp.ylabel('FC error')
                
                    
                pp.subplot(gs[1,0])
                pp.scatter(FC0_obj[mask_nodiag_and_not_EC], FC0_best[mask_nodiag_and_not_EC], marker='x', color='k', label='not(SC)')
                pp.scatter(FC0_obj[mask_nodiag_and_EC], FC0_best[mask_nodiag_and_EC], marker='.', color='b', label='SC')
                pp.scatter(FC0_obj.diagonal(), FC0_best.diagonal(), marker= '.', color='c', label='diagonal')
                pp.legend()
                pp.xlabel('FC0 emp')
                pp.ylabel('FC0 model')
                
                
                pp.subplot(gs[1,1])
                pp.scatter(FCtau_obj[mask_nodiag_and_not_EC], FCtau_best[mask_nodiag_and_not_EC], marker='x', color='k', label='not(SC)')
                pp.scatter(FCtau_obj[mask_nodiag_and_EC], FCtau_best[mask_nodiag_and_EC], marker='.', color='b', label='SC')
                pp.scatter(FCtau_obj.diagonal(), FCtau_best.diagonal(), marker= '.', color='c', label='diagonal')
                pp.xlabel('FCtau emp')
                pp.ylabel('FCtau model')

        elif method=='moments':
            n_tau = 2
            ts_emp = X - np.outer(np.ones(n_T), X.mean(0))  # subtract mean
            # empirical covariance (0 and 1 lagged)
            Q_emp = np.zeros([n_tau, self.n_nodes, self.n_nodes])
            for i_tau in range(n_tau):
                Q_emp[i_tau, :, :] = np.tensordot(ts_emp[0:n_T-n_tau,:],ts_emp[i_tau:n_T-n_tau+i_tau,:],axes=(0,0)) / float(n_T-n_tau-1)
            # Jacobian estimate
            J = spl.logm(np.dot(np.linalg.inv(Q_emp[0, :, :]), Q_emp[1, :, :])).T  # WARNING: tau is 1 here (if a different one is used C gets divided by tau)
            if np.any(np.iscomplex(J)):
                J = np.real(J)
                print("Warning: complex values in J; casting to real!")
            # Sigma estimate
            Sigma_best = -np.dot(J, Q_emp[0, :, :])-np.dot(Q_emp[0, :, :], J.T)
            # theoretical covariance
            Q0 = spl.solve_lyapunov(J, -Sigma_best)
            # theoretical 1-lagged covariance
            Qtau = np.dot(Q0, spl.expm(J.T))  # WARNING: tau is 1 here (if a different one is used J.T gets multiplied by tau)
            # average correlation between empirical and theoretical
            d_fit['correlation'] = 0.5 * (stt.pearsonr(Q0.flatten(), Q_emp[0, :, :].flatten())[0] +
                                         stt.pearsonr(Qtau.flatten(), Q_emp[1, :, :].flatten())[0])
            tau_x = -J.diagonal().copy()
            EC_best = np.zeros([N, N])
            EC_best[mask_EC] = J[mask_EC]
            
        elif method=='bayes':
            X = X.T  # here shape is [ROIs, timepoints] to be consistent with Singh paper
            N = X.shape[1]  # number of time steps
            X -= np.outer(X.mean(axis=1), np.ones(N))  # center the time series
            T1 = [np.dot(X[:, i+1:i+2], X[:, i+1:i+2].T) for i in range(N-1)]
            T1 = np.sum(T1, axis=0)
            T2 = [np.dot(X[:, i+1:i+2], X[:, i:i+1].T) for i in range(N-1)]
            T2 = np.sum(T2, axis=0)
            T3 = [np.dot(X[:, i:i+1], X[:, i:i+1].T) for i in range(N-1)]
            T3 = np.sum(T3, axis=0)
        #    T4 = np.dot(X[:, 0:1], X[:, 0:1].T)  # this is actually not used
            LAM_best = np.dot(T2, np.linalg.inv(T3))
            # Kappa_best can be useful for generating samples using (called Sigma in Singh paper)
            # x_(n+1) = dot(LAM, x_n) + dot(sqrt(Kappa_best), Xi_n)
            Kappa_best = (T1 - np.dot(np.dot(T2, np.linalg.inv(T3)), T2.T)) / N
            # J is -lambda in Singh paper
            # WARNING: tau is 1 here (if a different one is used J.T gets multiplied by tau)
            J = spl.logm(LAM_best)
            if not np.all(np.isclose(spl.expm(J), LAM_best, rtol=1e-01)):
                print("Warning: logarithm!")
            if np.any(np.iscomplex(J)):
                J = np.real(J)
                print("Warning: complex values in J; casting to real!")
            # TODO: implement bayes I for Q0 (called c in Singh paper)
            Q0 = T3 / N  # this here is bayes II solution (equivalent to sample covariance)
            Sigma_best = -np.dot(J, Q0)-np.dot(Q0, J.T)
            tau_x = J.diagonal()
            np.fill_diagonal(J, 0)
            EC_best = J
            
        else:
            raise('method should be either \'lyapunov\' or \'moments\'')

        self.C = EC_best
        self.Sigma = Sigma_best
        self.tau_x = tau_x
        self.d_fit = d_fit

        return self
Пример #11
0
Файл: lib.py Проект: bchaud/isn
 def obsv(a, c=None):
     n, _ = np.shape(a)
     c = np.eye(n) if c is None else c
     return solve_lyapunov(a.T, -c.T.dot(c))
Пример #12
0
Файл: lib.py Проект: bchaud/isn
 def ctrl(a, b=None):
     n, _ = np.shape(a)
     b = np.eye(n) if b is None else b
     return solve_lyapunov(a, -b.dot(b.T))
Пример #13
0
# in which you can specify whether you want left or right handed eigenvectors.
# To determine controllability you need to calculate the input pole vectors
# which is dependant on the left eigenvectors.

# Calculate eigen vectors and pole vectors
val, vec = LA.eig(A, None, 1, 0, 0, 0)
U1 = np.dot(B.T, vec[:, 0].T)
U2 = np.dot(B.T, vec[:, 1].T)

# Set-up controllability Matrix
Con = np.zeros([2, 2])
Con[:, 0] = B.T
Con[:, 1] = (A * B).T
n = lin.matrix_rank(Con)

P = LA.solve_lyapunov(A, -B * B.T)

# Display results
print '\nThe transfer function realization is:'
print 'G(s) = '
print np.poly1d(G.num[0], variable='s')
print "----------------"
print np.poly1d(G.den, variable='s')

print '\n1) Eigenvalues are: p1 = ', val[0], 'and p2 = ', val[1]
print '   with eigenvectors: q1 = ', vec[:, 0], 'and q2 = ', vec[:, 1]
print '   Input pole vectors are: up1 = ', U1, 'and up2 = ', U2

print '\n2) The controlabillity matrix has rank', n, 'and is given as:'
print '  ', Con[0, :], '\n  ', Con[1, :]
Пример #14
0
# best distance between model and empirical data
best_dist = 1e10
best_Pearson = 0.

# record model parameters and outputs
dist_FC_hist = np.zeros([n_opt]) * np.nan  # FC error = matrix distance
Pearson_FC_hist = np.zeros([n_opt]) * np.nan  # Pearson corr model/objective

stop_opt = False
i_opt = 0
while not stop_opt:
    # calculate Jacobian of dynamical system
    J = -np.eye(N) / tau_x + EC

    # calculate FC0 and FCtau for model
    FC0 = spl.solve_lyapunov(J, -Sigma)
    FCtau = np.dot(FC0, spl.expm(J.T * tau))

    # calculate error between model and empirical data for FC0 and FC_tau (matrix distance)
    err_FC0 = np.sqrt(np.sum((FC0 - FC0_obj)**2)) / np.sqrt(np.sum(FC0_obj**2))
    err_FCtau = np.sqrt(np.sum(
        (FCtau - FCtau_obj)**2)) / np.sqrt(np.sum(FCtau_obj**2))
    dist_FC_hist[i_opt] = 0.5 * (err_FC0 + err_FCtau)

    # calculate Pearson corr between model and empirical data for FC0 and FC_tau
    Pearson_FC_hist[i_opt] = 0.5 * (
        stt.pearsonr(FC0.reshape(-1), FC0_obj.reshape(-1))[0] +
        stt.pearsonr(FCtau.reshape(-1), FCtau_obj.reshape(-1))[0])

    # best fit given by best Pearson correlation coefficient for both FC0 and FCtau (better than matrix distance)
    if dist_FC_hist[i_opt] < best_dist:
def minimize(dynMat,
             outMat=None,
             structMat=None,
             covMat=None,
             rankPar=10.,
             **optDict):
    """
        Minimizes   -log(|X|) + rankPar  || Z|| 
        subject to  dynMat * X + X * dynMat' + Z = 0
                    structMat .* x - covMat = 0

        Outputs:
            outDict with keys:
            X, Z, Y1, Y2
    """
    #-----------------------------------------------------------------
    # Initialization and Pre-processing..............................
    assert dynMat.ndim == 2
    assert covMat is not None, "I need some statistics to run this optimization problem...."
    assert covMat.ndim == 2

    norm = np.linalg.norm  # Makes life easier later
    minResPrimal = 1.0e05

    # Converting all input arrays into matrices
    # IMPORTANT: * NOW REPRESENTS MATRIX MULTIPLICATION AND NOT ELEMENTWISE MULTIPLICATION
    warn(
        "Be careful with array multiplication. All arrays are now matrices, so A*B is matrix multiplication, not elementwise multiplication."
    )

    # Dimensionality of the state
    nState = dynMat.shape[0]

    if outMat is None:
        warn("No outMat supplied, using identity")
        outMat = np.identity(nState, dtype=dynMat.dtype)
        # Take the state as the output if not specified
    # Dimensionality of output
    nOut = outMat.shape[0]

    if structMat is None:
        warn(
            "No structMat supplied, using non-zero entries of covMat to define one.."
        )
        # Treat all zero entries of covMat as being unknown.
        # It's not ideal, but hey, if you're too lazy to supply a structMat....
        structMat = covMat.copy().astype(bool).astype(np.int)

    dynMat = np.asmatrix(dynMat)
    outMat = np.asmatrix(outMat)
    covMat = np.asmatrix(covMat)
    structMat = np.asmatrix(structMat)

    # Default values for iterative solution of the optimization problem
    # Documentation on the iterations to be added soon
    stepSize = optDict.get("stepSize", 10.)
    tolPrimal = optDict.get("tolPrimal", 1.0e-06)
    tolDual = optDict.get("tolDual", 1.0e-06)
    iterMax = int(optDict.get("iterMax", 1.0e05))
    printIter = int(optDict.get("printIter", 100))
    if isinstance(stepSize, np.ndarray): stepSize = stepSize.flatten()[0]

    # Initializing X, Z, Y1, and Y2 for the iterations
    # Documentation coming soon

    Z0 = optDict.get("Z0", None)
    X0 = optDict.get("X0", None)
    Y10 = optDict.get("Y10", None)
    Y20 = optDict.get("Y20", None)
    if Z0 is None:
        print("No Z0 supplied, using identity...")
        Z0 = np.identity(nState, dtype=dynMat.dtype)
    if X0 is None:
        print("No X0 supplied, solving Lyapunov equation with Z0")
        X0 = solve_lyapunov(dynMat, -Z0)
    X0 = np.asmatrix(X0)
    Z0 = np.asmatrix(Z0)

    if Y10 is None:
        print("No Y10 supplied, solving adjoing Lyapunov equation")
        Y10 = solve_lyapunov(dynMat.H, X0)
        if not hasattr(Y10, "H"):
            print(
                "Y10 from solve_lyapunov does not have attribute 'H', so it's an ndarray and not a matrix. I've recast it into matrix, but have a look at why this is happening...."
            )
        Y10 = np.asmatrix(Y10)
        Y10 = rankPar * Y10 / norm(Y10,
                                   2)  # Norm returns largest singular value
    else:
        Y10 = np.asmatrix(Y10)

    # Y2 relates to imposing the constraint due to observed covariance stats,
    #   so it's size should relate to the dimensionality of the output (not state)
    if Y20 is None:
        Y20 = np.identity(outMat.shape[0], dtype=dynMat.dtype)
    Y20 = np.asmatrix(Y20)
    X = X0
    Z = Z0
    Y1 = Y10
    Y2 = Y20

    stepSize0 = stepSize

    #---------------------------------------------------------------------------------
    #---------------------------------------------------------------------------------
    # AMA method for optimization:
    print(
        "Write decorators for timing and logging (have a look at your bookmarks on Chrome)"
    )
    warn(
        "This code currently uses only method AMA. ADMM from the original matlab function hasn't been implemented."
    )
    # pdb.set_trace()

    # Ignoring some lines from original MATLAB code because I don't see why they're needed (yet)
    evalsLadjY = np.real( np.linalg.eigvals( dynMat.H * Y1 + Y1*dynMat + \
                        outMat.H * np.multiply( structMat, Y2) * outMat ) )
    logDetLadjY = np.sum(np.log(evalsLadjY + 0.j))
    dualY = logDetLadjY - np.trace(covMat * Y2) + nState

    #print("Before starting iterations, evalsLadjY, logDetLadjY, dualY, trace(covMat*Y2) are",evalsLadjY, logDetLadjY, dualY, np.trace(covMat*Y2))

    Istate = np.identity(nState, dtype=dynMat.dtype)  # Identity
    beta = 0.7  # Backtracking parameter
    stepSize1 = stepSize0  # Initial step size
    failIters = []  # Log failed iteration numbers

    print("")
    print("Starting iterations for AMA......")
    print(
        "stepSize_BB  stepSize  tolPrimal   resPrimal  tolDual   abs(dualGap)    iter"
    )

    funPrimalArr = np.zeros(iterMax)
    funDualArr = np.zeros(iterMax)
    resPrimalArr = np.zeros(iterMax)
    dualGapArr = np.zeros(iterMax)

    # Will include time later

    ##=======================================================================================
    # Function to dump data
    def dumpToFile(dumpFileName, dumpX, dumpOptDict):
        if not (dumpFileName.endswith('.h5')
                or dumpFileName.endswith('.hdf5')):
            dumpFileName = dumpFileName + '.hdf5'

        with h5py.File(dumpFileName, "w") as outFile:
            dumpData = outFile.create_dataset("X",
                                              data=dumpX,
                                              compression='gzip')
            for key in dumpOptDict.keys():
                if isinstance(dumpOptDict[key], np.ndarray):
                    outFile.create_dataset(key,
                                           data=dumpOptDict[key],
                                           compression='gzip')
                else:
                    dumpData.attrs[key] = dumpOptDict[key]

    ##=======================================================================================

    for AMAstep in range(iterMax):
        dynMat = np.asmatrix(dynMat)
        outMat = np.asmatrix(outMat)
        structMat = np.asmatrix(structMat)
        Istate = np.asmatrix(Istate)

        # Minimization step for X^{k+1}
        Xnew = np.linalg.solve(
            dynMat.H * Y1 + Y1 * dynMat +
            outMat.H * np.multiply(structMat, Y2) * outMat, Istate)

        # Xnew = np.linalg.inv( dynMat.H * Y1 + Y1 * dynMat +  outMat.H * np.multiply( structMat , Y2) * outMat)
        Xnew = np.asmatrix(Xnew)

        # To get the inverse of a matrix A, apparently np.linalg.solve(A, Identity) is better than np.linalg.inv(A)
        # I'm not sure yet if I need to call this Xnew instead of just X, I'll get back to this later
        Xnew = (Xnew + Xnew.H) / 2.

        eigX = np.real(np.linalg.eigvals(Xnew))
        logDetX = np.sum(np.log(eigX + 0.j))

        # Gradient of the dual function
        gradD1 = dynMat * Xnew + Xnew * dynMat.H
        gradD2 = np.multiply(structMat, outMat * Xnew * outMat.H) - covMat
        #print("|Xnew|,|gradD1|, |gradD2|:",det(Xnew),det(gradD1), det(gradD2))
        #print()

        # Define the primal residual as difference in new statistics from observed (constraints)
        resPrimalNew2 = gradD2
        stepSize = stepSize1

        #if AMAstep == 0:
        #print()
        #print("evalsX, logDetX, |gradD1|, |gradD2| are",eigX, logDetX, det(gradD1), det(gradD2))
        for innerIter in range(100):
            a = rankPar / stepSize
            aTol = 1.0e-01
            if False:
                if a > aTol: a = aTol

            assert isinstance(
                dynMat, np.matrix) and (Xnew, np.matrix) and (Y1, np.matrix)
            # Minimization step for Z^{k+1}
            Wnew = -(dynMat * Xnew + Xnew * dynMat.H + (1. / stepSize) * Y1)

            U, svals, V = np.linalg.svd(Wnew)
            warn("numpy's SVD factors as M=USV, not USV*")
            # The above line differs from the original matlab code because numpy's svd returns the singular values as an array directly, while Matlab returns a diagonal matrix
            if isinstance(svals, np.matrix):
                warn(
                    "The array of singular values is a matrix and not a regular array"
                )
                svals = svals.A
            assert isinstance(U, np.matrix) and isinstance(V, np.matrix)
            svals = np.abs(svals)

            # Singular value thresholding
            if True:
                svalsNew = ((1. - a / np.abs(svals)) *
                            svals) * (np.abs(svals) > a).astype(int)
                svalsNew = np.abs(svalsNew)
                # So what happened there is, for svals>a, sNew = (1 - a/|s|) * s = s-aA
                #   That doesn't make much sense to me, but we'll see how it goes
                #   Singular values should always be positive, so I don't see why all of that extra stuff is needed...
            else:
                svals = np.abs(svals)
                svalsNew = svals.copy()
                svalsNew[svals <= a] = 0.
                svalsNew[svals > a] = svals - a

            #print()
            #print("|Wnew|, a,rankPar, stepSize, svalsNew:", det(Wnew), a,rankPar,stepSize,svalsNew)

            # Update Z
            Znew = U * np.asmatrix(np.diag(svalsNew)) * V
            Znew = (Znew + Znew.H) / 2.

            # Update Y
            resPrimalNew1 = gradD1 + Znew

            # Lagrange multiplier update
            Y1new = Y1 + stepSize * resPrimalNew1
            Y2new = Y2 + stepSize * resPrimalNew2
            Y1new = (Y1new + Y1new.H) / 2.  # Keep them Hermitian
            Y2new = (Y2new + Y2new.H) / 2.

            #print("|Znew|,|Y1new|,|Y2new|:", det(Znew), det(Y1new), det(Y2new))
            assert isinstance(Y1new, np.matrix) and (Y2new, np.matrix)

            # Eigenvalues of X^{-1} (why though?)
            evalsLadjYnew = np.real(
                np.linalg.eigvals(dynMat.H * Y1new + Y1new * dynMat +
                                  outMat.H * np.multiply(structMat, Y2new) *
                                  outMat))
            logDetLadjYnew = np.sum(np.log(evalsLadjYnew + 0.j))
            dualYnew = logDetLadjYnew - np.trace(covMat * Y2new) + nState
            #print("evals:",evalsLadjYnew)
            #print("logdetLadjYnew, dualYnew:", logDetLadjYnew, dualYnew)
            if np.amin(evalsLadjYnew) < 0.:
                stepSize = stepSize * beta  # Backtrack the step
                #print("Reducing step size coz evals<0.....")
            elif ( dualYnew < dualY + \
                            np.trace( gradD1 *(Y1new - Y1)) + \
                            np.trace( gradD2 *(Y2new - Y2)) - \
                            (0.5/stepSize) * norm(Y1new - Y1, ord='fro')**2 - \
                            (0.5/stepSize) * norm(Y2new - Y2, ord='fro')**2 ):
                # Note: Frobenius norm essentially flattens the array and calculates a vector 2-norm
                stepSize = stepSize * beta
                #print("Reducing step size coz dualYnew < .....")
            else:
                #print("Not reducing the step size...")
                break
                # So this is where we're breaking out of the inner loop
                # I suppose what's happening here is we're checking if the primal residual is acceptable

        # Primal residual
        resPrimalNew1 = dynMat * Xnew + Xnew * dynMat.H + Znew
        resPrimalNew2 = np.multiply(structMat,
                                    outMat * Xnew * outMat.H) - covMat
        resPrimal = np.sqrt(
            norm(resPrimalNew1, ord='fro')**2 +
            norm(resPrimalNew2, ord='fro')**2)
        #print("resPrimal:",resPrimal)

        # Calculating the duality gap
        dualGap = -logDetX + rankPar * np.sum(svalsNew) - dualYnew
        #print("dualGap:",dualGap)

        # Print progress for every 100 outer iterations
        #if AMAstep%100 == 0:
        if resPrimal < 0.9 * minResPrimal:
            minResPrimal = resPrimal
            #if AMAstep%printIter == 0:
            print("%7.2g  %8.3g  %6.2g  %10.4g  %6.2g  %7.2g  %6.4g  %d" %(stepSize1, \
                    stepSize, tolPrimal, resPrimal, tolDual, (dualGap), np.sum(svalsNew), AMAstep) )
            sys.stdout.flush()
        #pdb.set_trace()

        # We start BB stepsize selection now, apparently, whatever that is..
        Xnew1 = np.linalg.solve( \
                dynMat.H * Y1new + Y1new * dynMat + outMat.H * np.multiply( structMat, Y2new) * outMat, \
                Istate)
        Xnew1 = np.asmatrix(Xnew1)
        Xnew1 = (Xnew1 + Xnew1.H) / 2.
        gradD1new = dynMat * Xnew1 + Xnew1 * dynMat.H
        gradD2new = np.multiply(structMat, outMat * Xnew1 * outMat.H) - covMat
        gradD1new = np.asmatrix(gradD1new)
        gradD2new = np.asmatrix(gradD2new)
        Y1new = np.asmatrix(Y1new)
        Y2new = np.asmatrix(Y2new)
        stepSize1 = np.real(  ( norm(Y1new - Y1, ord='fro')**2 + norm(Y2new - Y2, ord='fro')**2 )/ \
                ( np.trace( (Y1new - Y1) * (gradD1 - gradD1new) ) + np.trace( (Y2new - Y2) * (gradD2 - gradD2new) )   ) )
        #print("|Xnew1|, |gradD1new|, |gradD2new|, stepSize1:",det(Xnew1), det(gradD1new), det(gradD2new), stepSize1)

        if (stepSize1 < 0.) or not np.isfinite(stepSize1):
            stepSize1 = stepSize0
            failIters.append(AMAstep)
        #sys.exit()
        # This is the end of the iteration... Apparently
        funPrimalArr[AMAstep] = -np.log(
            np.linalg.det(Xnew + 0.j)) + rankPar * norm(Znew, ord='nuc')
        funDualArr[AMAstep] = np.real(dualYnew)
        resPrimalArr[AMAstep] = resPrimal
        dualGapArr[AMAstep] = np.abs(dualGap)
        # Again, I'll have to include execution time info later

        # Stopping criteria
        if (np.abs(dualGap) < tolDual) and (resPrimal < tolPrimal):
            print("AMA converged to assigned accuracy!")
            print("AMA steps: %d" % AMAstep)
            print("%12.2g  %10.2g  %10.2g  %10.2g  %10.2g  %10.2g  %d" %(stepSize1, \
                    stepSize, tolPrimal, resPrimal, tolDual, np.abs(dualGap), AMAstep) )
            break

        Y1 = Y1new
        Y2 = Y2new
        dualY = dualYnew

    # Assigning an output dictionary
    outDict = {}
    if AMAstep == iterMax:
        outDict['flag'] = 0
    else:
        outDict['flag'] = 1

    outDict['X'] = Xnew
    outDict['Z'] = Znew
    outDict['Y1'] = Y1new
    outDict['Y2'] = Y2new
    outDict['steps'] = AMAstep
    outDict['funPrimalArr'] = funPrimalArr[:AMAstep]
    outDict['funDualArr'] = funDualArr[:AMAstep]
    outDict['dualGapArr'] = dualGapArr[:AMAstep]

    return outDict
Пример #16
0
 def solveQ(self):
     return solve_lyapunov((transpose(self.W) - self.I), -2 * self.I)
Пример #17
0
 def check_continuous_case(self, a, q):
     x = solve_lyapunov(a, q)
     assert_array_almost_equal(
         np.dot(a, x) + np.dot(x,
                               a.conj().transpose()), q)
Пример #18
0
def optimize(FC0_obj,FC1_obj,tau_x,mask_EC,mask_Sigma):

    N = FC0_obj.shape[0]
    
    # optimzation rates (to avoid explosion of activity, Sigma is tuned quicker)
    epsilon_EC = 0.0005
    epsilon_Sigma = 0.05
    
    min_val_EC = 0. # minimal value for tuned EC elements
    max_val_EC = 1. # maximal value for tuned EC elements
    min_val_Sigma = 0.01 # minimal value for tuned Sigma elements
    
    # initial EC
    EC = np.zeros([N,N]) # initial connectivity
    Sigma = np.eye(N)  # initial noise

    # record best fit (matrix distance between model and empirical FC)
    best_dist = 1e10
    
    # scaling coefs for FC0 and FC1
    a0 = np.linalg.norm(FC1_obj) / (np.linalg.norm(FC0_obj) + np.linalg.norm(FC1_obj))
    a1 = 1. - a0

    stop_opt = False
    i_opt = 0
    while not stop_opt:

        # calculate Jacobian of dynamical system
        J = -np.eye(N)/tau_x + EC

        # calculate FC0 and FC1 for model
        FC0 = spl.solve_lyapunov(J,-Sigma)
        FC1 = np.dot(FC0,spl.expm(J.T))

        # matrices of model error
        Delta_FC0 = FC0_obj-FC0
        Delta_FC1 = FC1_obj-FC1

        # calculate error between model and empirical data for FC0 and FC_tau (matrix distance)
        dist_FC_tmp = 0.5*(np.linalg.norm(Delta_FC0)/np.linalg.norm(FC0_obj)+np.linalg.norm(Delta_FC1)/np.linalg.norm(FC1_obj))

        # calculate Pearson correlation between model and empirical data for FC0 and FC_tau
        Pearson_FC_tmp = 0.5*(stt.pearsonr(FC0.reshape(-1),FC0_obj.reshape(-1))[0]+stt.pearsonr(FC1.reshape(-1),FC1_obj.reshape(-1))[0])

        # record best model parameters
        if dist_FC_tmp<best_dist:
            best_dist = dist_FC_tmp
            best_Pearson = Pearson_FC_tmp
            i_best = i_opt
            J_mod_best = np.array(J)
            Sigma_mod_best = np.array(Sigma)
        else:
            stop_opt = i_opt>50

        # Jacobian update
        Delta_J = np.dot(np.linalg.pinv(FC0),a0*Delta_FC0+np.dot(a1*Delta_FC1,spl.expm(-J.T))).T

        # update EC (recurrent connectivity)
        EC[mask_EC] += epsilon_EC * Delta_J[mask_EC]
        EC[mask_EC] = np.clip(EC[mask_EC],min_val_EC,max_val_EC)

        # update Sigma (input variances)
        Delta_Sigma = -np.dot(J,Delta_FC0)-np.dot(Delta_FC0,J.T)
        Sigma[mask_Sigma] += epsilon_Sigma * Delta_Sigma[mask_Sigma]
        Sigma[mask_Sigma] = np.maximum(Sigma[mask_Sigma],min_val_Sigma)

        # check for stop
        if not stop_opt:
            i_opt += 1
        else:
            print('stop at step',i_best,'with best FC dist:',best_dist,'; best FC Pearson:',best_Pearson)

    return (J_mod_best,Sigma_mod_best)
    
Пример #19
0
 def _normtail(sig, A, x, C):
     # observability gramiam when A perturbed by sig
     W = solve_lyapunov(A.T + sig*np.eye(len(A)), -C.T.dot(C))
     return np.sqrt(x.dot(W).dot(x.T) / 2 / sig)  # eq (39)
Пример #20
0
def matt_mou(ts_emp, mask_EC=None, verbose=0, true_C=None, true_S=None):
    # optimzation steps and rate
    n_opt = 10000
    epsilon_EC = 0.0005
    epsilon_Sigma = 0.1
    regul_EC = 0  # 0.5
    regul_Sigma = 0  # 0.001

    print('regularization:', regul_EC, ';', regul_Sigma)

    N = 50  # number of ROIs

    n_tau = 3  # number of time shifts for FC_emp
    v_tau = np.arange(n_tau)
    i_tau_opt = 1  # time shift for optimization

    min_val_EC = 0.  # minimal value for EC
    max_val_EC = 0.2  # maximal value for EC
    min_val_Sigma_diag = 0.  # minimal value for Sigma

    # FC matrix (ts_emp matrix with stucture time x ROI index)
    n_T = ts_emp.shape[0]
    ts_emp -= np.outer(np.ones(n_T), ts_emp.mean(0))
    FC_emp = np.zeros([n_tau, N, N])
    for i_tau in range(n_tau):
        FC_emp[i_tau, :, :] = np.tensordot(
            ts_emp[0:n_T - n_tau, :],
            ts_emp[i_tau:n_T - n_tau + i_tau, :],
            axes=(0, 0)) / float(n_T - n_tau - 1)

    # autocovariance time constant
    log_ac = np.log(np.maximum(FC_emp.diagonal(axis1=1, axis2=2), 1e-10))
    lin_reg = np.polyfit(np.repeat(v_tau, N), log_ac.reshape(-1), 1)
    tau_x = -1. / lin_reg[0]
    print('inverse of negative slope (time constant):', tau_x)

    # mask for existing connections for EC and Sigma
    mask_diag = np.eye(N, dtype=bool)
    if mask_EC is None:
        mask_EC = np.logical_not(
            mask_diag)  # all possible connections except self
    mask_Sigma = np.eye(N, dtype=bool)  # independent noise
    #    mask_Sigma = np.ones([N,N],dtype=bool) # coloured noise

    # optimization
    print('*opt*')
    print('i tau opt:', i_tau_opt)
    tau = v_tau[i_tau_opt]

    # objective FC matrices (empirical)
    FC0_obj = FC_emp[0, :, :]
    FCtau_obj = FC_emp[i_tau_opt, :, :]

    coef_0 = np.sqrt(np.sum(FCtau_obj**2)) / (np.sqrt(np.sum(FC0_obj**2)) +
                                              np.sqrt(np.sum(FCtau_obj**2)))
    coef_tau = 1. - coef_0

    # initial network parameters
    EC = np.zeros([N, N])
    Sigma = np.eye(N)  # initial noise

    # best distance between model and empirical data
    best_dist = 1e10
    best_Pearson = 0.

    # record model parameters and outputs
    dist_FC_hist = np.zeros([n_opt]) * np.nan  # FC error = matrix distance
    Pearson_FC_hist = np.zeros([n_opt
                                ]) * np.nan  # Pearson corr model/objective
    dist_C_hist = np.zeros([n_opt]) * np.nan  # FC error = matrix distance
    Pearson_C_hist = np.zeros([n_opt]) * np.nan  # Pearson corr model/objective

    stop_opt = False
    i_opt = 0
    while not stop_opt:
        # calculate Jacobian of dynamical system
        J = -np.eye(N) / tau_x + EC

        # calculate FC0 and FCtau for model
        FC0 = spl.solve_lyapunov(J, -Sigma)
        FCtau = np.dot(FC0, spl.expm(J.T * tau))

        # calculate error between model and empirical data for FC0 and FC_tau (matrix distance)
        err_FC0 = np.sqrt(np.sum(
            (FC0 - FC0_obj)**2)) / np.sqrt(np.sum(FC0_obj**2))
        err_FCtau = np.sqrt(np.sum(
            (FCtau - FCtau_obj)**2)) / np.sqrt(np.sum(FCtau_obj**2))
        dist_FC_hist[i_opt] = 0.5 * (err_FC0 + err_FCtau)

        # calculate Pearson corr between model and empirical data for FC0 and FC_tau
        Pearson_FC_hist[i_opt] = 0.5 * (
            stt.pearsonr(FC0.reshape(-1), FC0_obj.reshape(-1))[0] +
            stt.pearsonr(FCtau.reshape(-1), FCtau_obj.reshape(-1))[0])

        # calculate the error between true and estimated connectivity C if the true is provided (for debugging)
        if not (true_C is None):
            dist_C_hist[i_opt] = np.sqrt(
                ((true_C - EC)**2).sum() / (true_C**2).sum())
            Pearson_C_hist[i_opt] = stt.pearsonr(true_C.flatten(),
                                                 EC.flatten())[0]

        # best fit given by best Pearson correlation coefficient for both FC0 and FCtau (better than matrix distance)
        if dist_FC_hist[i_opt] < best_dist:
            best_dist = dist_FC_hist[i_opt]
            best_Pearson = Pearson_FC_hist[i_opt]
            i_best = i_opt
            EC_best = np.array(EC)
            Sigma_best = np.array(Sigma)
            FC0_best = np.array(FC0)
            FCtau_best = np.array(FCtau)
        else:
            stop_opt = i_opt > 100

        # Jacobian update with weighted FC updates depending on respective error
        Delta_FC0 = (FC0_obj - FC0) * coef_0
        Delta_FCtau = (FCtau_obj - FCtau) * coef_tau
        Delta_J = np.dot(np.linalg.pinv(FC0), Delta_FC0 +
                         np.dot(Delta_FCtau, spl.expm(-J.T * tau))).T / tau

        # update conectivity and noise
        EC[mask_EC] += epsilon_EC * (Delta_J - regul_EC * EC)[mask_EC]
        EC[mask_EC] = np.clip(EC[mask_EC], min_val_EC, max_val_EC)

        Sigma[mask_Sigma] += epsilon_Sigma * (-np.dot(J, Delta_FC0) - np.dot(
            Delta_FC0, J.T) - regul_Sigma)[mask_Sigma]
        Sigma[mask_diag] = np.maximum(Sigma[mask_diag], min_val_Sigma_diag)

        # check if end optimization: if FC error becomes too large
        if stop_opt or i_opt == n_opt - 1:
            stop_opt = True
            print('stop at step', i_opt, 'with best dist', best_dist,
                  ';best FC Pearson:', best_Pearson)
        else:
            if (i_opt) % 20 == 0:
                print('opt step:', i_opt)
                print('current dist FC:', dist_FC_hist[i_opt],
                      '; current Pearson FC:', Pearson_FC_hist[i_opt])
            i_opt += 1

        if verbose == 2:
            plt.figure()
            plt.subplot(121)
            plt.plot(dist_FC_hist, label=r'$Q_{0/\tau}$ error')
            if not (true_C is None):
                plt.plot(dist_C_hist, label='C error')
                plt.legend()
            plt.ylabel('matrix error')
            plt.xlabel('iterations')
            plt.subplot(122)
            plt.plot(Pearson_FC_hist, label=r'$Q_{0/\tau}$ similarity')
            if not (true_C is None):
                plt.plot(Pearson_C_hist, label='C similarity')
                plt.legend()
            plt.ylabel('pearson correlation')
            plt.xlabel('iterations')

        return EC_best, Sigma_best
Пример #21
0
we will create the dynamics of a spring-mass-damper system
m \dot\dot x = F - k x - c \dot x


"""
GLOBALS.k = 100.
GLOBALS.c = 5.
GLOBALS.m = 1.

k, c, m = GLOBALS.k, GLOBALS.c, GLOBALS.m

A = np.array([[0., 1.], [-k / m, -c / m]])
b = np.array([0., 1. / m])

Q = -np.eye(2)
P = splin.solve_lyapunov(A.transpose(), Q)
#P = np.dot(X,X.transpose())


def UVtrajectory(t, u0, v0):
    X0 = np.array([u0, v0])
    Phi_t = splin.expm(t * A)
    return np.dot(Phi_t, X0)


if __name__ == '__main__':

    plt.close('all')

    if False:
        xs = np.linspace(-2, 2, 100)
Пример #22
0
 def grammian2(self):
     A, B, C = self.state_space()
     Q = -B @ B.T
     W = solve_lyapunov(A.toarray(), Q.toarray())
     return W
Пример #23
0
 def check_continuous_case(self, a, q):
     x = solve_lyapunov(a, q)
     assert_array_almost_equal(np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def compute_lyapunov_solution(b):
    rhs = np.zeros(b.shape)
    rhs[0, 0] = 1.
    z_i = la.solve_lyapunov(b, rhs)
    return z_i