Exemplo n.º 1
0
    def _disc_dQ_lyapunov(
        self,
        dt: np.ndarray,
        QQ: np.ndarray,
        dA: np.ndarray,
        dQQ: np.ndarray,
        Ad: np.ndarray,
        dAd: np.ndarray,
    ) -> Tuple[np.ndarray, np.ndarray]:
        """Discretization partial derivative of the diffusion matrix with the Lyapunov equation

        Args:
            dt: Sampling time
            QQ: Diffusion matrix
            dA: Jacobian state matrix
            dQQ: Jacobian diffusion matrix
            Ad: Discrete state matrix
            dAd: Jacobian discrete state matrix

        Returns:
            2-elements tuple containing
                - **QQd**: Process noise covariance matrix
                - **dQQd**: Jacobian process noise covariance matrix
        """

        N = dA.shape[0]

        QQd = self._disc_Q_lyapunov(QQ, Ad)
        eq = (-dA @ QQd - QQd @ dA.swapaxes(1, 2) - dQQ + dAd @ QQ @ Ad.T +
              Ad @ dQQ @ Ad.T + Ad @ QQ @ dAd.swapaxes(1, 2))
        dQQd = np.asarray(
            [solve_continuous_lyapunov(self.A, eq[i, :, :]) for i in range(N)])

        return QQd, dQQd
Exemplo n.º 2
0
    def model_covariance(self, tau=0.0):
        """
        Calculates theoretical (lagged) covariances of the model given the
        parameters (forward step). Notice that this is not the empirical
        covariance matrix as estimated from simulated time series.

        Parameters
        ----------
        tau : scalar
            The time lag to calculate the covariance. It can be a positive or
            negative.

        Returns
        -------
        FC : ndarray of rank-2
            The (lagged) covariance matrix.
        """

        # Calculate zero lag-covariance Q0 by solving Lyapunov equation
        Q0 = spl.solve_continuous_lyapunov(self.J.T, -self.Sigma)
        # Calculate the effect of the lag (still valid for tau = 0.0)
        if tau >= 0.0:
            return np.dot(Q0, spl.expm(tau * self.J))
        else:
            return np.dot(spl.expm(-tau * self.J.T), Q0)
def computeB(Fp, Fpp, Fppp, Qp, Qpp, V, W, D, C):
    FppC = symetric_tensor(np.tensordot(Fpp, C, 2))
    FpppD = symetric_tensor(np.tensordot(Fppp, D, 3))
    QpV = symetric_tensor(np.tensordot(Qp, V, 1))
    QppW = symetric_tensor(np.tensordot(Qpp, W, 2))

    b = -(FppC + FpppD / 3 + QpV + QppW / 2)
    return (solve_continuous_lyapunov(Fp, b))
Exemplo n.º 4
0
def efficient_solver(wm, gm, k, d0, g0, ep, g2, n, recalib=True, checks=False):
    ##initialisation of some data arrays for speed##
    r = np.zeros((4), dtype=np.complex128)
    ##finding x0##
    a = 0.25 * g2**2 * (gm**2 + 4 * wm**2)
    b = -((g0 * g2 * (gm**2 + 4 * wm**2)) / 2**0.5)
    c = (1 / 2) * g0**2 * (gm**2 + 4 * wm**2) + (1 / 2) * d0 * g2 * (gm**2 +
                                                                     4 * wm**2)
    d = -((d0 * g0 * (gm**2 + 4 * wm**2)) / 2**0.5)
    e = 2 * ep**2 * g2 * wm + (1 / 4) * d0**2 * (gm**2 + 4 * wm**2) + (
        1 / 16) * k**2 * (gm**2 + 4 * wm**2)
    f = (-2**0.5) * ep**2 * g0 * wm
    roots_x0 = np.roots(np.array([a, b, c, d, e, f],
                                 dtype=np.complex128))  #numpy roots function
    r[2] = roots_x0[
        len(roots_x0) -
        1]  #last root in the array given is always the real root, also this is x0
    x = r[
        2]  #as far as i know this utilises data casting, so no extra memory use or speed loss
    ##finding p0,x1,p1, and putting it all into a numpy array##
    r[3] = (0.5 * gm * (wm)**-1 * x)  #p0
    ##
    ##recalibration if needed-idk about how this works but ill implement for the moment and turn it off##
    d_eff = wm + 2**0.5 * g0 * x - g2 * x**2
    if recalib == True:
        d0 = wm
        w_eff = wm + (wm**2 + 0.25 * k**2)**-1 * 2 * g2 * ep**2
        g_eff = 2 * g2 * x - 2**0.5 * g0
    else:
        pass
    ##
    r[0] = (2**-0.5 * -2 * d_eff * ep * (d_eff**2 + k**2 * 0.25)**-1)  #x1 (Q0)
    r[1] = (2**-0.5 * -1 * k * ep * (d_eff**2 + k**2 * 0.25)**-1)  # p1 (P1)
    ##solving covariance matrix
    H = np.array([[hb * d_eff, 0, hb * r[0] * g_eff, 0],
                  [0, hb * d_eff, hb * r[1] * g_eff, 0],
                  [hb * r[0] * g_eff, hb * g_eff * r[1], hb * w_eff, 0],
                  [0, 0, 0, hb * wm]])
    gamma = 0.5 * np.array([[k, -k * i, 0, 0], [k * i, k, 0, 0],
                            [0, 0, gm * (2 * n + 1), -1 * i * gm],
                            [0, 0, gm * i, gm * (2 * n + 1)]])
    gamma_A = 0.5 * (gamma - np.transpose(gamma))
    gamma_S = 0.5 * (gamma + np.transpose(gamma))
    A = -1 * i / hb * np.dot(W, H) + np.dot(W, gamma_A)
    C = -1 * np.dot(W, np.dot(gamma_S, W))
    cov = linalg.solve_continuous_lyapunov(A, C)
    if checks == True:
        rs_test = 2 * cov + W
        print('RS-Matrix-eigenvalues:')
        print(np.linalg.eig(rs_test)[0])
        print('B-Matrix-eigenvalues:')
        print(np.linalg.eig(np.transpose(A))[0])
    else:
        pass
    return np.real(r), np.real(cov)
Exemplo n.º 5
0
    def _disc_Q_lyapunov(self, QQ: np.ndarray, Ad: np.ndarray) -> np.ndarray:
        """Discretization diffusion matrix by Lyapunov equation

        Args:
            QQ: Diffusion matrix
            Ad: Discrete state matrix

        Returns:
            Process noise covariance matrix
        """

        return solve_continuous_lyapunov(self.A, -QQ + Ad @ QQ @ Ad.T)
Exemplo n.º 6
0
def solve_lyap_dense(A, E, B, trans=False, options=None):
    """Compute the solution of a Lyapunov equation.

    See :func:`pymor.algorithms.lyapunov.solve_lyap_dense` for a
    general description.

    This function uses `scipy.linalg.solve_continuous_lyapunov`, which
    is a dense solver for Lyapunov equations with E=I.

    .. note::
        If E is not `None`, the problem will be reduced to a standard
        continuous-time algebraic Lyapunov equation by inverting E.

    Parameters
    ----------
    A
        The operator A as a 2D |NumPy array|.
    E
        The operator E as a 2D |NumPy array| or `None`.
    B
        The operator B as a 2D |NumPy array|.
    trans
        Whether the first operator in the Lyapunov equation is
        transposed.
    options
        The solver options to use (see
        :func:`lyap_dense_solver_options`).

    Returns
    -------
    X
        Lyapunov equation solution as a |NumPy array|.
    """

    _solve_lyap_dense_check_args(A, E, B, trans)
    options = _parse_options(options, lyap_dense_solver_options(), 'scipy',
                             None, False)

    if options['type'] == 'scipy':
        if E is not None:
            A = solve(E, A) if not trans else solve(E.T, A.T).T
            B = solve(E, B) if not trans else solve(E.T, B.T).T
        if trans:
            A = A.T
            B = B.T
        X = solve_continuous_lyapunov(A, -B.dot(B.T))
    else:
        raise ValueError(
            f"Unexpected Lyapunov equation solver ({options['type']}).")

    return X
    def build_ctle(affine_linearizable_dynamics, K, Q):
        """Build AffineQuadCLF from affine and linearizable dynamics with gain matrix by solving continuous-time Lyapunov equation (CTLE).

        CTLE is A' * P + P * A = -Q, where A = F - G * K is closed-loop matrix.

        Inputs:
        Affine and linearizable dynamics, affine_linearizable_dynamics: AffineDynamics, LinearizableDynamics
        Positive-definite state cost matrix, Q: numpy array
        Gain matrix, K: numpy array
        """

        A = affine_linearizable_dynamics.closed_loop_linear_system(K)
        P = solve_continuous_lyapunov(A.T, -Q)
        return AffineQuadCLF(affine_linearizable_dynamics, P)
Exemplo n.º 8
0
def disc_diffusion_lyap(A: np.ndarray, Q: np.ndarray, Ad: np.ndarray) -> np.ndarray:
    """Discretize the diffusion matrix by solving the Lyapunov equation

    Args:
        A: State matrix
        Q: Diffusion matrix
        Ad: Discrete state matrix

    Returns:
        Process noise covariance matrix
    """
    if not Q.any():
        return Q
    return solve_continuous_lyapunov(A, -Q + Ad @ Q @ Ad.T)
Exemplo n.º 9
0
Arquivo: scipy.py Projeto: pymor/pymor
def solve_lyap_dense(A, E, B, trans=False, options=None):
    """Compute the solution of a Lyapunov equation.

    See :func:`pymor.algorithms.lyapunov.solve_lyap_dense` for a
    general description.

    This function uses `scipy.linalg.solve_continuous_lyapunov`, which
    is a dense solver for Lyapunov equations with E=I.

    .. note::
        If E is not `None`, the problem will be reduced to a standard
        continuous-time algebraic Lyapunov equation by inverting E.

    Parameters
    ----------
    A
        The operator A as a 2D |NumPy array|.
    E
        The operator E as a 2D |NumPy array| or `None`.
    B
        The operator B as a 2D |NumPy array|.
    trans
        Whether the first operator in the Lyapunov equation is
        transposed.
    options
        The solver options to use (see
        :func:`lyap_dense_solver_options`).

    Returns
    -------
    X
        Lyapunov equation solution as a |NumPy array|.
    """

    _solve_lyap_dense_check_args(A, E, B, trans)
    options = _parse_options(options, lyap_dense_solver_options(), 'scipy', None, False)

    if options['type'] == 'scipy':
        if E is not None:
            A = solve(E, A) if not trans else solve(E.T, A.T).T
            B = solve(E, B) if not trans else solve(E.T, B.T).T
        if trans:
            A = A.T
            B = B.T
        X = solve_continuous_lyapunov(A, -B.dot(B.T))
    else:
        raise ValueError(f"Unexpected Lyapunov equation solver ({options['type']}).")

    return X
Exemplo n.º 10
0
    def controllability_gramian(self):
        """Controllability gramian matrix."""

        from scipy import linalg

        B = self.B.evaluate()
        Q = -B @ B.T

        # Find Wc given A @ Wc + Wc @ A.T = Q
        # Wc > o if (A, B) controllable
        Wc = linalg.solve_continuous_lyapunov(self.A.evaluate(), Q)

        # Wc should be symmetric positive semi-definite
        Wc = (Wc + Wc.T) / 2

        return Matrix(Wc)
Exemplo n.º 11
0
    def observability_gramian(self):
        """Observability gramian matrix."""

        from scipy import linalg

        C = self.C.evaluate()
        Q = -C.T @ C

        # Find Wo given A.T @ Wo + Wo @ A = Q
        # Wo > o if (C, A) observable
        Wo = linalg.solve_continuous_lyapunov(self.A.evaluate().T, Q)

        # Wo should be symmetric positive semi-definite
        Wo = (Wo + Wo.T) / 2

        return Matrix(Wo)
Exemplo n.º 12
0
 def solve_cov(self, r):
     self.gamma = 0.5 * np.array(
         [[self.k, -self.k * i, 0, 0], [self.k * i, self.k, 0, 0],
          [0, 0, self.gm * (2 * self.n + 1), -1 * i * self.gm],
          [0, 0, self.gm * i, self.gm * (2 * self.n + 1)]])
     self.gamma_A = 0.5 * (self.gamma - np.transpose(self.gamma))
     self.gamma_S = 0.5 * (self.gamma + np.transpose(self.gamma))
     self.H = np.array([[hb * self.deff, 0, hb * self.r[0] * self.g_eff, 0],
                        [0, hb * self.deff, hb * self.r[1] * self.g_eff, 0],
                        [
                            hb * self.r[0] * self.g_eff,
                            hb * self.g_eff * self.r[1], hb * self.w_eff, 0
                        ], [0, 0, 0, hb * self.wm]])
     self.A = -1 * i / hb * np.dot(W, self.H) + np.dot(W, self.gamma_A)
     self.C = -1 * np.dot(W, np.dot(self.gamma_S, W))
     self.cov = linalg.solve_continuous_lyapunov(self.A, self.C)
     return self.cov
Exemplo n.º 13
0
    def createDiscreteTimeSys(self, Ts):
        """
        Builds the discrete time state-space system in canonical form, using numerator and
        denominator coefficients of the companion form

        INPUT:  Sampling time Ts for the discretization

        OUTPUT: matrix A,C,V,Q: state matrix, output matrix,
                state variance matrix (solution of lyapunov equation), 
                and measurement variance matrix
        """

        # state dimension
        num_coeff, den_coeff = self.psd()

        F, state_dim = self.state_matrix()  # State matrix
        A = self.state_transition(Ts)  # State transition matrix = expm(F*Ts)
        G = np.eye(1, state_dim, k=state_dim - 1).T  # Input matrix

        # output matrix
        C = np.zeros((1, state_dim))[0]
        C[0:np.max(num_coeff.shape)] = num_coeff

        # state variance as solution of the lyapunov equation
        V = solve_continuous_lyapunov(F, -np.matmul(G, G.conj().T))

        # discretization of the noise matrix
        Q = np.zeros(state_dim)
        Ns = 10000
        t = Ts / Ns
        if state_dim == 1:
            for n in np.arange(t, Ts + t, step=t):
                Q = Q + t * np.exp(F * n) * np.dot(G,
                                                   G.conj().T) * np.exp(
                                                       F * n).conj().T
        else:
            for n in np.arange(t, Ts + t, step=t):
                #Q = Q + np.linalg.multi_dot([t, expm(np.dot(F,n)), np.dot(G,G.conj().T), expm(np.dot(F,n)).conj().T])
                Fn = self.state_transition(
                    n)  # REMEMBER TO WRITE ABOUT THIS OPTIMIZATION
                Q = Q + t * Fn @ (G @ G.conj().T) @ Fn.conj().T

        return A, C, V, Q
Exemplo n.º 14
0
def controllability_gramian_continuous(mat_a, mat_b):
    """
    Calculates the controllabilty gramian of a continuous time LTI system.
    
    Parameters
    ----------
    mat_a : ndarray
        The state matrix.
    mat_b : ndarray
        The input matrix.
        
    Returns
    -------
    mat_wc : ndarray
        The controllability gramian.
    """
    mat_q = -mat_b @ mat_b.T
    mat_wc = linalg.solve_continuous_lyapunov(mat_a, mat_q)
    return mat_wc
Exemplo n.º 15
0
def observability_gramian_continuous(mat_a, mat_c):
    """
    Calculates the observability gramian of a continuous time LTI system.
    
    Parameters
    ----------
    mat_a : ndarray
        The state matrix.
    mat_c : ndarray
        The output matrix.
        
    Returns
    -------
    mat_wo : ndarray
        The observability gramian.
    """
    mat_q = -mat_c.T @ mat_c
    mat_wo = linalg.solve_continuous_lyapunov(mat_a.T, mat_q)
    return mat_wo
Exemplo n.º 16
0
    def calc_Q0_from_param(self, J, Sigma):
        """
        Calculates the zero-lag covariance matrix from a Jacobian J and an
        input covariance matrix Sigma

        Parameters
        ----------
        J : ndarray of rank 2
            The Jacobian matrix. Shape [n x n]
        Sigma : ndarray of rank 2
            The input covariance matrix. Shape [n x n]

        Returns
        -------
        Q0 : ndarray of rank 2
            The theoretical zero-lag covariance matrix.
            Shape [n, n]
        """
        Q0 = spl.solve_continuous_lyapunov(J.T, -Sigma)
        return Q0
    def build_ctle(feedback_linearizable_output, K, Q):
        """Build a quadratic CLF from a FeedbackLinearizableOutput with auxilliary control gain matrix, by solving the continuous time Lyapunov equation (CTLE).

        CTLE is

        A_cl' P + P A_cl = -Q

        for specified Q.

        Outputs a QuadraticControlLyapunovFunction.

        Inputs:
        Auxilliary control gain matrix, K: numpy array (k, p)
        Positive definite matrix for CTLE, Q: numpy array (p, p)
        """

        A = feedback_linearizable_output.closed_loop_dynamics(K)
        P = solve_continuous_lyapunov(A.T, -Q)
        alpha = min(eigvals(Q)) / max(eigvals(P))
        return QuadraticControlLyapunovFunction(feedback_linearizable_output,
                                                P, alpha)
def lna_covariance(rfp, gfp, atc, iptg, par, lamb, size):
    '''
    This function estimates the covariance matrix of the rfp and gfp measurments taken at a specific
    point using the LNA approximation.

    This covariance matrix is not scaled by the system size by default, the user must do this to
    the result i.e. cov/size

    This function computes the covariance of a single celled observation. If many single-celled 
    observations are taken at the same input point, the covariance matrix entries will be smaller 
    for the mean of the observations than for the individual values (law of large numbers).
    The covariance matrix for a given single celled observation (as computed here) can be scaled by the
     number of observations taken to get the covariance matrix for the mean.
    '''
    #define the steady-state functions for each species
    rfp_func = lambda gfp, atc: param[0] + param[1] / (1 + (
        (gfp / param[2]) * (1 / (1 + (atc / param[3])**2.0)))**2.0)
    gfp_func = lambda rfp, iptg: param[4] + param[5] / (1 + (
        (rfp / param[6]) * (1 / (1 + (iptg / param[7])**2.0)))**2.0)
    #define the derivatives of the steady state-functions w.r.t. the opposite species
    rfp_d_gfp = lambda gfp,atc: -2*param[1]*gfp*((1/param[2])*(1/(1+(atc/param[3])**2.0)))**2.0 \
                                    /(1+(gfp**2)*((1/param[2])*(1/(1+(atc/param[3])**2.0)))**2.0)
    gfp_d_rfp = lambda rfp,iptg: -2*param[5]*rfp*((1/param[6])*(1/(1+(iptg/param[7])**2.0)))**2.0 \
                                    /(1+(rfp**2)*((1/param[6])*(1/(1+(iptg/param[7])**2.0)))**2.0)

    #compute the A (damping) matrix
    A = np.array([[-lamb, lamb * rfp_d_gfp(gfp, atc)],
                  [lamb * gfp_d_rfp(rfp, iptg), -lamb]])
    #compute the B (fluctuation) matrix
    B = np.array([[lamb * (rfp_func(gfp, atc) + rfp), 0],
                  [0, lamb * (gfp_d_rfp(rfp, iptg) + gfp)]])

    #solve the sylvester/lyapunov equation (this may be ill-conditioned near where a branch
    # disappears i.e. a bifurcation point
    C = ln.solve_continuous_lyapunov(A, -B)

    return C
Exemplo n.º 19
0
def disc_d_diffusion_lyap(
    A: np.ndarray, Q: np.ndarray, Ad: np.ndarray, dA: np.ndarray, dQ: np.ndarray, dAd: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
    """Discretize diffusion matrix and its derivative with the Lyapunov equation

    Args:
        A: Sampling time
        Q: Diffusion matrix
        Ad: Discrete state matrix
        dA: Derivative state matrix
        dQ: Derivative diffusion matrix
        dAd: Derivative discrete state matrix

    Returns:
        2-elements tuple containing
            - **Qd**: Process noise covariance matrix
            - **dQd**: Derivative process noise covariance matrix
    """
    if not Q.any():
        return Q

    nj, nx, _ = dA.shape
    Qd = disc_diffusion_lyap(A, Q, Ad)
    dQd = np.zeros((nj, nx, nx))
    tmp = (
        -dA @ Qd
        - Qd @ dA.swapaxes(1, 2)
        - dQ
        + dAd @ Q @ Ad.T
        + Ad @ dQ @ Ad.T
        + Ad @ Q @ dAd.swapaxes(1, 2)
    )
    for n in range(nj):
        if tmp[n].any():
            dQd[n] = solve_continuous_lyapunov(A, tmp[n])

    return Qd, dQd
Exemplo n.º 20
0
    def fit_moments(self, Q0_obj, Q1_obj, mask_C=None):
        """
        Estimation of MOU parameters (connectivity C, noise covariance Sigma,
        and time constant tau_x) with moments method.

        Parameters
        ----------
        Q0_obj : ndarray of rank 2
            The zero-lag covariance matrix of the time series to fit.
        Q1_obj : ndarray of rank 2
            The 1-lag covariance matrix of the time series to fit.
        mask_C : boolean ndarray of rank-2 (optional)
            Mask of known non-zero values for connectivity matrix, for example
            estimated by DTI.

        Returns
        -------
        J : ndarray of rank 2
            The estimated Jacobian. Shape [n_nodes, n_nodes]
        Sigma : ndarray of rank 2
            Estimated noise covariance. Shape [n_nodes, n_nodes]
        d_fit : dictionary
            A dictionary with diagnostics of the fit. Keys are: ['iterations',
            'distance', 'correlation'].
        """
        # Jacobian estimate
        inv_Q0 = np.linalg.inv(Q0_obj)
        J = spl.logm(np.dot(inv_Q0, Q1_obj))
        # Sigma estimate
        Sigma = -np.dot(J.conjugate(), Q0_obj) - np.dot(Q0_obj, J)

        # masks for existing positions
        mask_diag = np.eye(self.n_nodes, dtype=np.bool)
        if mask_C is None:
            # Allow all possible connections to be tuned except self-connections (on diagonal)
            mask_C = np.logical_not(mask_diag)

        # cast to real matrices
        if np.any(np.iscomplex(J)):
            print("Warning: complex values in J; casting to real!")
        J_best = np.real(J)
        J_best[np.logical_not(np.logical_or(mask_C, mask_diag))] = 0
        if np.any(np.iscomplex(Sigma)):
            print("Warning: complex values in Sigma; casting to real!")
        Sigma_best = np.real(Sigma)

        # model theoretical covariances with real J and Sigma
        Q0 = spl.solve_continuous_lyapunov(J_best.T, -Sigma_best)
        Q1 = np.dot(Q0, spl.expm(J_best))

        # Calculate error between model and empirical data for Q0 and FC_tau (matrix distance)
        dist_Q0 = np.linalg.norm(Q0 - Q0_obj) / np.linalg.norm(Q0_obj)
        dist_Qtau = np.linalg.norm(Q1 - Q1_obj) / np.linalg.norm(Q1_obj)
        self.d_fit['distance'] = 0.5 * (dist_Q0 + dist_Qtau)

        # Average correlation between empirical and theoretical
        Pearson_Q0 = stt.pearsonr(Q0.reshape(-1), Q0_obj.reshape(-1))[0]
        Pearson_Qtau = stt.pearsonr(Q1.reshape(-1), Q1_obj.reshape(-1))[0]
        self.d_fit['correlation'] = 0.5 * (Pearson_Q0 + Pearson_Qtau)

        # Save the results and return
        self.J = J_best  # matrix
        self.Sigma = Sigma_best  # matrix

        return self
Exemplo n.º 21
0
    def fit_LO(self,
               Q_obj,
               i_tau_opt=1,
               mask_C=None,
               mask_Sigma=None,
               epsilon_C=0.0001,
               epsilon_Sigma=0.01,
               regul_C=0.0,
               regul_Sigma=0.0,
               min_val_C=0.0,
               max_val_C=1.0,
               min_val_Sigma_diag=0.0,
               max_iter=10000,
               min_iter=10,
               **kwargs):
        """
        Estimation of MOU parameters (connectivity C, noise covariance Sigma,
        and time constant tau_x) with Lyapunov optimization as in: Gilson et al.
        Plos Computational Biology (2016).

        Parameters
        ----------
        Q_obj : ndarray
            The covariance matrix
        i_tau_opt : integer (optional)
            Mask of known non-zero values for connectivity matrix, for example
            estimated by DTI.
        mask_C : boolean ndarray of rank-2 (optional)
            Mask of known non-zero values for connectivity matrix, for example
            estimated by DTI. By default, all connections are allowed (except
            self-connections)
        mask_Sigma : boolean ndarray of rank-2 (optional)
            Mask of known non-zero values for the input covariance matrix
            (by default diagonal).
        epsilon_C : float (optional)
            Learning rate for connectivity (this should be about n_nodes times
            smaller than epsilon_Sigma).
        epsilon_Sigma : float (optional)
            Learning rate for Sigma (this should be about n_nodes times larger
            than epsilon_EC).
        regul_C : float (optional)
            Regularization parameter for connectivity. Try first a value of 0.5.
        regul_Sigma : float (optional)
            Regularization parameter for Sigma. Try first a value of 0.001.
        min_val_C : float (optional)
            Minimum value to bound connectivity estimate. This should be zero
            or slightly negative (too negative limit can bring to an inhibition
            dominated system). If the empirical covariance has many negative
            entries then a slightly negative limit can improve the estimation
            accuracy.
        max_val_C : float (optional)
            Maximum value to bound connectivity estimate. This is useful to
            avoid large weight that make the system unstable. If the estimated
            connectivity saturates toward this value (it usually doesn't happen)
            it can be increased.
        max_iter : integer (optional)
            Number of maximum optimization steps. If final number of iterations
            reaches this maximum it means the algorithm has not converged.
        min_iter : integer (optional)
            Number of minimum optimization steps before testing if end of 
            optimization (increase of model error).

        Returns
        -------
        J : ndarray of rank 2
            The estimated Jacobian. Shape [n_nodes, n_nodes]
        Sigma : ndarray of rank 2
            Estimated noise covariance. Shape [n_nodes, n_nodes]
        d_fit : dictionary
            A dictionary with diagnostics of the fit. Keys are: ['iterations',
            'distance', 'correlation'].
        """
        # TODO: make better graphics (deal with axes separation, etc.)

        if (not type(i_tau_opt) == int) or (i_tau_opt <= 0):
            raise ValueError('Scalar value i_tau_opt must be non-zero')

        # Objective FC matrices (empirical)
        Q0_obj = Q_obj[0]
        Qtau_obj = Q_obj[i_tau_opt]

        # Autocovariance time constant (exponential decay)
        log_ac = np.log(np.maximum(Q_obj.diagonal(axis1=1, axis2=2), 1e-10))
        v_tau = np.arange(Q_obj.shape[0], dtype=np.float)
        lin_reg = np.polyfit(np.repeat(v_tau, self.n_nodes),
                             log_ac.reshape(-1), 1)
        tau_obj = -1.0 / lin_reg[0]

        # coefficients to balance the model error between Q0 and Qtau
        norm_Q0_obj = np.linalg.norm(Q0_obj)
        norm_Qtau_obj = np.linalg.norm(Qtau_obj)

        # mask for existing connections for EC and Sigma
        mask_diag = np.eye(self.n_nodes, dtype=bool)
        if mask_C is None:
            # Allow all possible connections to be tuned except self-connections (on diagonal)
            mask_C = np.logical_not(mask_diag)
        if mask_Sigma is None:
            # Independent noise (no cross-covariances for Sigma)
            mask_Sigma = np.eye(self.n_nodes, dtype=bool)

        # Initialise network and noise. Give initial parameters
        C = np.zeros([self.n_nodes, self.n_nodes], dtype=np.float)
        tau_x = np.copy(tau_obj)
        Sigma = np.eye(self.n_nodes, dtype=np.float)

        # Best distance between model and empirical data
        best_dist = 1e10
        best_Pearson = 0.0

        # Arrays to record model parameters and outputs
        # model error = matrix distance between FC matrices
        dist_Q_hist = np.zeros([max_iter], dtype=np.float)
        # Pearson correlation between model and objective FC matrices
        Pearson_Q_hist = np.zeros([max_iter], dtype=np.float)

        # identity matrix
        id_mat = np.eye(self.n_nodes, dtype=np.float)

        # run the optimization process
        stop_opt = False
        i_iter = 0
        while not stop_opt:

            # calculate Jacobian of dynamical system
            J = -id_mat / tau_x + C

            # Calculate Q0 and Qtau for model
            Q0 = spl.solve_continuous_lyapunov(J.T, -Sigma)
            Qtau = np.dot(Q0, spl.expm(J * i_tau_opt))

            # difference matrices between model and objectives
            Delta_Q0 = Q0_obj - Q0
            Delta_Qtau = Qtau_obj - Qtau

            # Calculate error between model and empirical data for Q0 and FC_tau (matrix distance)
            dist_Q0 = np.linalg.norm(Delta_Q0) / norm_Q0_obj
            dist_Qtau = np.linalg.norm(Delta_Qtau) / norm_Qtau_obj
            dist_Q_hist[i_iter] = 0.5 * (dist_Q0 + dist_Qtau)

            # Calculate corr between model and empirical data for Q0 and FC_tau
            Pearson_Q0 = stt.pearsonr(Q0.reshape(-1), Q0_obj.reshape(-1))[0]
            Pearson_Qtau = stt.pearsonr(Qtau.reshape(-1),
                                        Qtau_obj.reshape(-1))[0]
            Pearson_Q_hist[i_iter] = 0.5 * (Pearson_Q0 + Pearson_Qtau)

            # Best fit given by best Pearson correlation coefficient
            # for both Q0 and Qtau (better than matrix distance)
            if dist_Q_hist[i_iter] < best_dist:
                best_dist = dist_Q_hist[i_iter]
                best_Pearson = Pearson_Q_hist[i_iter]
                J_best = np.copy(J)
                Sigma_best = np.copy(Sigma)
            else:
                # wait at least 5 optimization steps before stopping
                stop_opt = i_iter > min_iter

            # Jacobian update with weighted FC updates depending on respective error
            Delta_J = np.dot( np.linalg.pinv(Q0), Delta_Q0 ) + np.dot( Delta_Q0, np.linalg.pinv(Q0) ) \
                    + np.dot( np.linalg.pinv(Qtau), Delta_Qtau ) + np.dot( Delta_Qtau, np.linalg.pinv(Qtau) )

            # Update effective conectivity matrix (regularization is L2)
            C[mask_C] += epsilon_C * (Delta_J - regul_C * C)[mask_C]
            C[mask_C] = np.clip(C[mask_C], min_val_C, max_val_C)

            # Update noise matrix Sigma (regularization is L2)
            Delta_Sigma = -np.dot(J.T, Delta_Q0) - np.dot(Delta_Q0, J)
            Sigma[mask_Sigma] += epsilon_Sigma * (
                Delta_Sigma - regul_Sigma * Sigma)[mask_Sigma]
            Sigma[mask_diag] = np.maximum(Sigma[mask_diag], min_val_Sigma_diag)

            # Check if max allowed number of iterations have been reached
            if i_iter >= max_iter - 1:
                stop_opt = True
                print(
                    "Optimization did not converge. Maximum number of iterations arrived."
                )
            # Check if iteration has finished or still continues
            if stop_opt:
                self.d_fit['iterations'] = i_iter + 1
                self.d_fit['distance'] = best_dist
                self.d_fit['correlation'] = best_Pearson
                self.d_fit['distance history'] = dist_Q_hist
                self.d_fit['correlation history'] = Pearson_Q_hist
            else:
                i_iter += 1

        # Save the results and return
        self.J = J_best  # matrix
        self.Sigma = Sigma_best  # matrix

        return self
def computeW(Fp, Q):
    return (solve_continuous_lyapunov(Fp, -Q))
Exemplo n.º 23
0
    def initializeGainMatrix(self):
        """ Calculate the LQR gain matrix and matrices for adaptive controller.

        """

        # ---------------|LQR Controller|-------------------------
        # Use the results of linearization to create a state-space model

        n_p = 12  # number of states
        m = 4  # number of integral error terms

        # ----------------- Your Code Here ----------------- #
        # Compute the continuous A, B, Bc, C, D and
        # discretized A_d, B_d, Bc_d, C_d, D_d, for the computation of LQR gain
        Ap = np.zeros((12, 12))
        Ap[0:6, 6:] = np.eye(6)
        Ap[6, 4] = self.g
        Ap[7, 3] = -self.g
        Bp = np.zeros((12, 4))
        Bp[8, 0] = 1 / self.m
        Bp[9, 1] = 1 / self.Ix
        Bp[10, 2] = 1 / self.Iy
        Bp[11, 3] = 1 / self.Iz
        Cp = np.zeros((4, 12))
        Cp[:3, 0:3] = np.eye(3)
        Cp[3, 5] = 1

        Aupper = np.hstack((Ap, np.zeros((12, 4))))
        Alower = np.hstack((Cp, np.zeros((4, 4))))
        At = np.vstack((Aupper, Alower))
        Bt = np.vstack((Bp, np.zeros((4, 4))))
        Bc = np.vstack((np.zeros((12, 4)), -np.eye(4)))
        BtBc = np.hstack((Bt, Bc))
        Ct = np.hstack((Cp, np.zeros((4, 4))))
        Dt = np.zeros((4, 8))

        delT = 0.01
        CT_sys = signal.StateSpace(At, BtBc, Ct, Dt)
        DT_sys = CT_sys.to_discrete(delT)
        A_dt = DT_sys.A
        B_dt = DT_sys.B
        A_d = A_dt
        B_td = B_dt[:, :4]
        B_d = B_td
        Bc_d = B_dt[:, 4:]
        B = Bt
        A = At
        # ----------------- Your Code Ends Here ----------------- #

        # Record the matrix for later use
        self.B = B  # continuous version of B
        self.A_d = A_d  # discrete version of A
        self.B_d = B_d  # discrete version of B
        self.Bc_d = Bc_d  # discrete version of Bc

        # -----------------    Example code     ----------------- #
        max_pos = 15.0
        max_ang = 0.2 * self.pi
        max_vel = 6.0
        max_rate = 0.015 * self.pi
        max_eyI = 3.

        max_states = np.array([
            0.1 * max_pos, 0.1 * max_pos, max_pos, max_ang, max_ang, max_ang,
            0.5 * max_vel, 0.5 * max_vel, max_vel, max_rate, max_rate,
            max_rate, 0.1 * max_eyI, 0.1 * max_eyI, 1 * max_eyI, 0.1 * max_eyI
        ])

        max_inputs = np.array(
            [0.2 * self.U1_max, self.U1_max, self.U1_max, self.U1_max])

        Q = np.diag(1 / max_states**2)
        R = np.diag(1 / max_inputs**2)
        # -----------------  Example code Ends ----------------- #
        # ----------------- Your Code Here ----------------- #
        # Come up with reasonable values for Q and R (state and control weights)
        # The example code above is a good starting point, feel free to use them or write you own.
        # Tune them to get the better performance

        # ----------------- Your Code Ends Here ----------------- #

        # solve for LQR gains
        [K, _, _] = dlqr(A_d, B_d, Q, R)
        self.Kbl = -K

        [K_CT, _, _] = lqr(A, B, Q, R)
        Kbl_CT = -K_CT

        # initialize adaptive controller gain to baseline LQR controller gain
        self.K_ad = self.Kbl.T

        # -----------------    Example code     ----------------- #
        self.Gamma = 3e-3 * np.eye(16)

        Q_lyap = np.copy(Q)
        Q_lyap[0:3, 0:3] *= 30
        Q_lyap[6:9, 6:9] *= 150
        Q_lyap[14, 14] *= 2e-3
        # -----------------  Example code Ends ----------------- #
        # ----------------- Your Code Here ----------------- #
        # Come up with reasonable value for Gamma matrix and Q_lyap
        # The example code above is a good starting point, feel free to use them or write you own.
        # Tune them to get the better performance

        # ----------------- Your Code Ends Here ----------------- #

        A_m = A + self.B @ Kbl_CT
        self.P = solve_continuous_lyapunov(A_m.T, -Q_lyap)
    def initializeGainMatrix(self):
        """ Calculate the LQR gain matrix and matrices for adaptive controller.

        """

        # ---------------|LQR Controller|-------------------------
        # Use the results of linearization to create a state-space model

        delT = self.timestep*1e-3
        g = self.g
        m = self.m
        Ix = self.Ix
        Iy = self.Iy
        Iz = self.Iz
        n_p = 12 # number of states
        m = 4 # number of integral error terms

        # ----------------- Your Code Here ----------------- #
        # Compute the continuous A, B, Bc, C, D and
        # discretized A_d, B_d, Bc_d, C_d, D_d, for the computation of LQR gain
        A = np.zeros((16, 16))
        A[0,6] = 1; A[1,7] = 1; A[2,8] = 1; A[3,9] = 1; A[4,10] = 1; A[5,11] = 1
        A[6,4] = g; A[7,3] = -g
        A[12,0] = 1
        A[13,1] = 1
        A[14,2] = 1
        A[15,5] = 1

        B = np.zeros((16, 4))
        B[8,0] = 1 / m; B[9,1] = 1 / Ix; B[10,2] = 1 / Iy; B[11,3] = 1 / Iz

        Bc = np.zeros((16,4))
        Bc[12,0] = -1; Bc[13,1] = -1; Bc[14,2] = -1; Bc[15,3] = -1

        concentrate_B = np.concatenate((B, Bc), axis=1)

        C = np.zeros((4,16))
        C[0,0] = 1; C[1,1] = 1; C[2,2] = 1; C[3,5] = 1

        D = np.zeros((4,8))



        sys_Con = signal.StateSpace(A,concentrate_B,C,D)
        sys_Dis = sys_Con.to_discrete(delT)
        A_d = sys_Dis.A
        B_d = sys_Dis.B[:,0:4]
        Bc_d = sys_Dis.B[:,4:8]
        C_d = sys_Dis.C
        D_d = sys_Dis.D
        # ----------------- Your Code Ends Here ----------------- #

        # Record the matrix for later use
        self.B = B  # continuous version of B
        self.A_d = A_d  # discrete version of A
        self.B_d = B_d  # discrete version of B
        self.Bc_d = Bc_d  # discrete version of Bc

        # -----------------    Example code     ----------------- #
        # max_pos = 15.0
        # max_ang = 0.2 * self.pi
        # max_vel = 6.0
        # max_rate = 0.015 * self.pi
        # max_eyI = 3.

        # max_states = np.array([0.1 * max_pos, 0.1 * max_pos, max_pos,
        #                     max_ang, max_ang, max_ang,
        #                     0.5 * max_vel, 0.5 * max_vel, max_vel,
        #                     max_rate, max_rate, max_rate,
        #                     0.1 * max_eyI, 0.1 * max_eyI, 1 * max_eyI, 0.1 * max_eyI])

        # max_inputs = np.array([0.2 * self.U1_max, self.U1_max, self.U1_max, self.U1_max])

        # Q = np.diag(1/max_states**2)
        # R = np.diag(1/max_inputs**2)
        # -----------------  Example code Ends ----------------- #
        # ----------------- Your Code Here ----------------- #
        # Come up with reasonable values for Q and R (state and control weights)
        # The example code above is a good starting point, feel free to use them or write you own.
        # Tune them to get the better performance
        max_pos = 15.0
        max_ang = 0.2 * self.pi
        max_vel = 6.0
        max_rate = 0.015 * self.pi
        max_eyI = 3.0

        max_states = np.array([0.1 * max_pos, 0.1 * max_pos, max_pos,
                            max_ang, max_ang, max_ang,
                            0.5 * max_vel, 0.5 * max_vel, max_vel,
                            max_rate, max_rate, max_rate,
                            0.1 * max_eyI, 0.1 * max_eyI, 1 * max_eyI, 0.1 * max_eyI])

        max_inputs = np.array([0.2 * self.U1_max, self.U1_max, self.U1_max, self.U1_max])

        Q = np.diag(1/max_states**2)
        R = np.diag(1/max_inputs**2)

        # ----------------- Your Code Ends Here ----------------- #

        # solve for LQR gains
        [K, _, _] = dlqr(A_d, B_d, Q, R)
        self.Kbl = -K

        [K_CT, _, _] = lqr(A, B, Q, R)
        Kbl_CT = -K_CT

        # initialize adaptive controller gain to baseline LQR controller gain
        self.K_ad = self.Kbl.T

        # -----------------    Example code     ----------------- #
        # self.Gamma = 3e-3 * np.eye(16)

        # Q_lyap = np.copy(Q)
        # Q_lyap[0:3,0:3] *= 30
        # Q_lyap[6:9,6:9] *= 150
        # Q_lyap[14,14] *= 2e-3
        # -----------------  Example code Ends ----------------- #
        # ----------------- Your Code Here ----------------- #
        # Come up with reasonable value for Gamma matrix and Q_lyap
        # The example code above is a good starting point, feel free to use them or write you own.
        # Tune them to get the better performance
        self.Gamma = 3e-3 * np.eye(16)

        Q_lyap = np.copy(Q)
        Q_lyap[0:3,0:3] *= 30
        Q_lyap[6:9,6:9] *= 150
        Q_lyap[14,14] *= 2e-3

        # ----------------- Your Code Ends Here ----------------- #

        A_m = A + self.B @ Kbl_CT
        self.P = solve_continuous_lyapunov(A_m.T, -Q_lyap)
Exemplo n.º 25
0
def weight_stability(t,
                     n,
                     eta,
                     init_params,
                     jacobians,
                     ntk,
                     init_preds,
                     Y,
                     continuous=False,
                     without_sgd=True,
                     l2_reg_coef=0.0,
                     large_model_regime=False,
                     model=None,
                     dataset=None,
                     return_change_vectors=True,
                     **kwargs):
    """
    :param without_sgd: if without_sgd = True, then only ||w1-w2|| will be returned,
                        otherwise (w1-w2)^T H Sigma^{-1} (w1-w2).
    """
    if l2_reg_coef > 0:
        ntk = ntk + l2_reg_coef * torch.eye(
            ntk.shape[0], dtype=torch.float, device=ntk.device)

    ntk_inv = torch.inverse(ntk)
    old_weights = get_weights_at_time_t(t=t,
                                        eta=eta,
                                        init_params=init_params,
                                        jacobians=jacobians,
                                        ntk=ntk,
                                        ntk_inv=ntk_inv,
                                        init_preds=init_preds,
                                        Y=Y,
                                        continuous=continuous,
                                        large_model_regime=large_model_regime,
                                        model=model,
                                        dataset=dataset,
                                        **kwargs)

    steady_state_inv_cov = None
    if not without_sgd:
        if large_model_regime:
            raise ValueError("SGD formula works only for small models")

        # compute the SGD noise covariance matrix at the end
        assert (model is not None) and (dataset is not None)
        with utils.SetTemporaryParams(model=model, params=old_weights):
            sgd_cov = get_sgd_covariance_full(model=model,
                                              dataset=dataset,
                                              cpu=False,
                                              **kwargs)
            # add small amount of isotropic Gaussian noise to make sgd_cov invertible
            sgd_cov += 1e-10 * torch.eye(
                sgd_cov.shape[0], device=sgd_cov.device, dtype=torch.float)

        # now we compute H Sigma^{-1}
        jacobians_cat = [
            v.view((v.shape[0], -1)) for k, v in jacobians.items()
        ]
        jacobians_cat = torch.cat(jacobians_cat,
                                  dim=1)  # (n_samples * n_outputs, n_params)
        H = torch.mm(jacobians_cat.T, jacobians_cat) + l2_reg_coef * torch.eye(
            jacobians_cat.shape[1], device=ntk.device, dtype=torch.float)
        # steady_state_inv_cov = torch.mm(H, torch.inverse(sgd_cov))
        with utils.Timing(description="Solving the Lyapunov equation"):
            steady_state_cov = solve_continuous_lyapunov(
                a=utils.to_numpy(H), q=utils.to_numpy(sgd_cov))
        steady_state_cov = torch.tensor(steady_state_cov,
                                        dtype=torch.float,
                                        device=ntk.device)
        # add small amount of isotropic Gaussian noise to make steady_state_cov invertible
        steady_state_cov += 1e-10 * torch.eye(steady_state_cov.shape[0],
                                              device=steady_state_cov.device,
                                              dtype=torch.float)
        steady_state_inv_cov = torch.inverse(steady_state_cov)

    change_vectors = []
    change_quantities = []
    n_outputs = init_preds.shape[-1]
    for sample_idx in tqdm(range(n)):
        example_indices = [i for i in range(n) if i != sample_idx]
        example_output_indices = []
        for i in example_indices:
            example_output_indices.extend(
                range(i * n_outputs, (i + 1) * n_outputs))

        new_ntk = ntk.clone()[example_output_indices]
        new_ntk = new_ntk[:, example_output_indices]

        new_ntk_inv = misc.update_ntk_inv(ntk=ntk,
                                          ntk_inv=ntk_inv,
                                          keep_indices=example_output_indices)

        new_init_preds = init_preds[example_indices]
        new_Y = Y[example_indices]

        if not large_model_regime:
            new_jacobians = dict()
            for k, v in jacobians.items():
                new_jacobians[k] = v[example_output_indices]
        else:
            new_jacobians = None

        new_dataset = Subset(dataset, example_indices)

        new_weights = get_weights_at_time_t(
            t=t,
            eta=eta * n / (n - 1),
            init_params=init_params,
            jacobians=new_jacobians,
            ntk=new_ntk,
            ntk_inv=new_ntk_inv,
            init_preds=new_init_preds,
            Y=new_Y,
            continuous=continuous,
            large_model_regime=large_model_regime,
            model=model,
            dataset=new_dataset,
            **kwargs)

        total_change = 0.0

        param_changes = dict()
        for k in old_weights.keys():
            param_changes[k] = (new_weights[k] -
                                old_weights[k]).cpu()  # to save GPU memory

        if return_change_vectors:
            change_vectors.append(param_changes)

        if without_sgd:
            for k in old_weights.keys():
                total_change += torch.sum(param_changes[k]**2)
        else:
            param_changes = [v.flatten() for k, v in param_changes.items()]
            param_changes = torch.cat(param_changes, dim=0)

            total_change = torch.mm(
                param_changes.view((1, -1)),
                torch.mm(steady_state_inv_cov.cpu(), param_changes.view(-1,
                                                                        1)))

        change_quantities.append(total_change)

    return change_vectors, change_quantities
Exemplo n.º 26
0
def measure_group_stability(model, measure, prefix='n_groups_', **kwargs):
    Nf = model.results['n'][-1].copy()

    # alive=(Nf>10**-5 *np.mean(Nf) )
    alive = (Nf > measure['n_death'] * 1.1)

    if not np.sum(alive):
        return

    if not prefix + 'members' in measure:
        measure_groups(model, measure, prefix=prefix, **kwargs)
    r = model.data['growth'].matrix.copy()
    Aij = model.find_data(role='interactions').matrix.copy()
    if 'selfint' in model.data:
        D = model.data['selfint'].matrix.copy()
    else:
        D = np.ones(r.shape)
    if np.max(D) == 0:
        D[:] = 1
    Aij = (Aij.T / D).T
    r /= D

    mwhere = deepcopy(measure[prefix + 'members'])
    S = np.array([len(i) for i in mwhere])

    Smean = np.mean(S)

    conn, mu, sigma, sigrow, gamma, Amean, Astd, Asym = group_interactions(
        Smean, Aij, mwhere, return_all=1)

    from scipy.linalg import inv, solve_continuous_lyapunov
    Alive = Aij[np.ix_(alive, alive)]
    Slive = np.sum(alive)
    Nlive = Nf[alive]
    Dlive = D[alive]
    B = (Alive - np.eye(Slive))
    DN = np.diag(Nlive)
    diag = np.diag(Nlive * Dlive)

    if 0 and not model.find_data(role='threshold'):
        J = np.dot(diag, B)
    else:
        #FUNCTIONAL RESPONSE
        # t=model.find_data(role='threshold')
        # q=model.find_data(role='exponent')
        # if not len(q):
        #     q=1
        ref = model.get_dx(0, model.get_labeled_result(idx=-1))['n'].matrix
        refx = model.get_labeled_result(idx=-1)
        dN = np.diag(0.01 * Nf)

        def adding(d, v):
            d2 = deepcopy(d)
            d2['n'] = d['n'] + v
            return d2

        J = np.array([
            (model.get_dx(0, adding(refx, dN[i]))['n'].matrix - ref) /
            (np.ones(len(Nf)) * dN[i, i]) for i in range(len(Nf))
        ]).T
        J[np.isnan(J)] = 0
        J = J[np.ix_(alive, alive)]

    # Jrel=np.dot(np.dot(np.diag(1./Nlive),B),np.diag(Nlive))
    # Jrel = np.dot(J,np.diag(1./Nlive))
    Jrel = np.dot(np.dot(np.diag(1. / Nlive**2 / Dlive), J), np.diag(Nlive))
    # code_debugger()
    invJ = inv(J)
    Press = -invJ  #*Nlive

    PressRel = -inv(Jrel)

    # code_debugger()
    measure[prefix + 'Pressrel'] = PressRel
    measure[prefix + 'Press'] = Press

    walive = list(np.where(alive)[0])
    livemwhere = [[walive.index(i) for i in m if i in walive] for m in mwhere]
    Pgamma, Pmean, Pstd, Psym = group_interactions(Smean,
                                                   Press,
                                                   livemwhere,
                                                   return_all=1,
                                                   remove_diag=1)[-4:]
    measure[prefix + 'Pressmean'] = Pmean
    measure[prefix + 'Pressstd'] = Pstd
    measure[prefix + 'Presssym'] = Pgamma
    Vgamma, Vmean, Vstd, Vsym = group_interactions(Smean,
                                                   PressRel,
                                                   livemwhere,
                                                   return_all=1,
                                                   remove_diag=1)[-4:]
    measure[prefix + 'Vmean'] = Vmean
    measure[prefix + 'Vstd'] = Vstd
    measure[prefix + 'Vsym'] = Vgamma
    Vmean_self = Vmean.copy()
    # np.fill_diagonal(Vmean_self,0)
    Vmean_self += np.diag([
        getmean(np.diag(PressRel)[m]) / len(m) if len(m) else 0
        for m in livemwhere
    ])  #V adjusted so that diagonal reflects self-control
    measure[prefix + 'Vmean_self'] = Vmean_self

    def make_vec(i, S):
        vec = np.zeros(S)
        vec[i] = 1
        return vec

    Var = np.array([
        solve_continuous_lyapunov(J, -diag * make_vec(m, Slive))
        for m in livemwhere
    ])
    for iv, V in enumerate(Var):
        measure[prefix + 'Var_{}'.format(iv)] = np.array(
            [[np.sum(V[np.ix_(m1, m2)]) for m2 in livemwhere]
             for m1 in livemwhere])

    Var_rel = np.array([
        solve_continuous_lyapunov(Jrel, -diag * make_vec(m, Slive))
        for m in livemwhere
    ])
    for iv, V in enumerate(Var_rel):
        measure[prefix + 'Varrel_{}'.format(iv)] = np.array(
            [[np.sum(V[np.ix_(m1, m2)]) for m2 in livemwhere]
             for m1 in livemwhere])

    conn, mu, sigma, sigrow, gamma, Amean, Astd, Asym = group_interactions(
        Smean, Aij, mwhere, return_all=1)

    def repl(A):
        slive = [len(i) for i in livemwhere]
        return np.concatenate([np.ones(si) * a for a, si in zip(A, slive)])

    feedback = 1 - np.array([
        np.dot(repl(Amean[i]), np.dot(Press, repl(Amean[:, i])))
        for i in range(len(livemwhere))
    ])
Exemplo n.º 27
0
control, in_vecs, c_matrix = state_controllability(A, B)

# Question: Why does A.transpose give the same eigenvectors as the book
# and not plain A??
# Answer: The eig function in the numpy library only calculates
# the right hand eigenvectors. The Scipy library provides a eig function
# in which you can specify whether you want left or right handed eigenvectors.
# To determine controllability you need to calculate the input pole vectors
# which is dependant on the left eigenvectors.

# Calculate eigen vectors and pole vectors
val, vec = LA.eig(A, None, 1, 0, 0, 0)

n = lin.matrix_rank(c_matrix)

P = LA.solve_continuous_lyapunov(A, -B * B.T)


# Display results
G_tf = G.to_tf()
print('\nThe transfer function realization is:')
print('G(s) = ')
print(np.poly1d(G_tf.num[0], variable='s'))
print("----------------")
print(np.poly1d(G_tf.den, variable='s'))

print('\n1) Eigenvalues are: p1 = ', val[0], 'and p2 = ', val[1])
print('   with eigenvectors: q1 = ', vec[:, 0], 'and q2 = ', vec[:, 1])
print('   Input pole vectors are: up1 = ', in_vecs[0], 'and up2 = ', in_vecs[1])

print('\n2) The controllability matrix has rank', n, 'and is given as:')
Exemplo n.º 28
0
    def initializeGainMatrix(self):
        """ Calculate the LQR gain matrix and matrices for adaptive controller.

        """

        # ---------------|LQR Controller|-------------------------
        # Use the results of linearization to create a state-space model

        n_p = 12  # number of states
        m = 4  # number of integral error terms
        delT = 1e-3 * self.timestep

        # ----------------- Your Code Here ----------------- #
        # Compute the continuous A, B, Bc, C, D and
        # discretized A_d, B_d, Bc_d, C_d, D_d, for the computation of LQR gain

        A = np.zeros((16, 16))
        B = np.zeros((16, 4))
        C = np.zeros((4, 16))
        D = np.zeros((4, 4))

        for i in range(0, 6):
            A[i][i + 6] = 1
        A[6][4] = self.g
        A[7][3] = -self.g
        B[8][0] = 1 / self.m
        B[9][1] = 1 / self.Ix
        B[10][2] = 1 / self.Iy
        B[11][3] = 1 / self.Iz

        Bc = np.zeros((12, 4))
        Bc = np.vstack((Bc, -np.eye(4)))
        Bcon = np.concatenate((B, Bc), axis=-1)
        Dcon = np.zeros((4, 8))

        for i in range(0, 4):
            C[i][i] = 1

        # for i in range(12,16):
        for i in range(0, 4):
            A[i + 12][i] = 1

        CT = signal.StateSpace(A, Bcon, C, Dcon)
        DT = CT.to_discrete(delT)

        # DT = signal.StateSpace(A, B, C, D, dt=delT)

        A_d = DT.A
        B_d = DT.B[:, :4]
        Bc_d = DT.B[:, 4:]
        C_d = DT.C
        D_d = DT.D[:, :4]

        # ----------------- Your Code Ends Here ----------------- #

        # Record the matrix for later use
        self.B = B  # continuous version of B
        self.A_d = A_d  # discrete version of A
        self.B_d = B_d  # discrete version of B
        self.Bc_d = Bc_d  # discrete version of Bc

        # -----------------    Example code     ----------------- #
        max_pos = 15.0
        max_ang = 0.2 * self.pi
        max_vel = 6.0
        max_rate = 0.015 * self.pi
        max_eyI = 3.

        max_states = np.array([
            0.1 * max_pos, 0.1 * max_pos, max_pos, max_ang, max_ang, max_ang,
            0.5 * max_vel, 0.5 * max_vel, max_vel, max_rate, max_rate,
            max_rate, 0.1 * max_eyI, 0.1 * max_eyI, 1 * max_eyI, 0.1 * max_eyI
        ])

        max_inputs = np.array(
            [0.2 * self.U1_max, self.U1_max, self.U1_max, self.U1_max])

        # Q = np.diag(1/max_states**2)*22
        Q = np.diag(1 / max_states**2)

        # R = np.diag(1/max_inputs**2)*80
        R = np.diag(1 / max_inputs**2) * 0.5

        # Q = np.eye(16)*0.5
        # R = np.eye(4)*100

        # -----------------  Example code Ends ----------------- #
        # ----------------- Your Code Here ----------------- #
        # Come up with reasonable values for Q and R (state and control weights)
        # The example code above is a good starting point, feel free to use them or write you own.
        # Tune them to get the better performance

        # ----------------- Your Code Ends Here ----------------- #

        # solve for LQR gains
        [K, _, _] = dlqr(A_d, B_d, Q, R)
        self.Kbl = -0.8 * K

        [K_CT, _, _] = lqr(A, B, Q, R)

        Kbl_CT = -K_CT
        # print("Kbl_CT size\n")
        # print(np.shape(Kbl_CT))

        # initialize adaptive controller gain to baseline LQR controller gain
        self.K_ad = self.Kbl.T

        # -----------------    Example code     ----------------- #

        # self.Gamma = 3e-3 * np.eye(16)

        # Q_lyap = np.copy(Q)
        # Q_lyap[0:3,0:3] *= 30
        # Q_lyap[6:9,6:9] *= 150
        # Q_lyap[14,14] *= 2e-3

        # -----------------  Example code Ends ----------------- #
        # ----------------- Your Code Here ----------------- #
        # Come up with reasonable value for Gamma matrix and Q_lyap
        # The example code above is a good starting point, feel free to use them or write you own.
        # Tune them to get the better performance

        self.Gamma = 3e-3 * np.eye(16)

        Q_lyap = np.copy(Q)
        Q_lyap[0:3, 0:3] *= 30
        Q_lyap[6:9, 6:9] *= 150
        Q_lyap[14, 14] *= 2e-3

        # ----------------- Your Code Ends Here ----------------- #

        A_m = A + self.B @ Kbl_CT
        print(np.shape(A_m))

        self.P = solve_continuous_lyapunov(A_m.T, -Q_lyap)
Exemplo n.º 29
0
 def check_continuous_case(self, a, q):
     x = solve_continuous_lyapunov(a, q)
     assert_array_almost_equal(
                       np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
Exemplo n.º 30
0
def solve_lyap(A, E, B, trans=False, options=None):
    """Find a factor of the solution of a Lyapunov equation.

    Returns factor :math:`Z` such that :math:`Z Z^T` is approximately
    the solution :math:`X` of a Lyapunov equation (if E is `None`).

    .. math::
        A X + X A^T + B B^T = 0

    or generalized Lyapunov equation

    .. math::
        A X E^T + E X A^T + B B^T = 0.

    If trans is `True`, then it solves (if E is `None`)

    .. math::
        A^T X + X A + B^T B = 0

    or

    .. math::
        A^T X E + E^T X A + B^T B = 0.

    This uses the `scipy.linalg.spla.solve_continuous_lyapunov` method.
    It is only applicable to the standard Lyapunov equation (E = I).
    Furthermore, it can only solve medium-sized dense problems and
    assumes access to the matrix data of all operators.

    Parameters
    ----------
    A
        The |Operator| A.
    E
        The |Operator| E or `None`.
    B
        The |Operator| B.
    trans
        If the dual equation needs to be solved.
    options
        The |solver_options| to use (see :func:`lyap_solver_options`).

    Returns
    -------
    Z
        Low-rank factor of the Lyapunov equation solution, |VectorArray|
        from `A.source`.
    """
    _solve_lyap_check_args(A, E, B, trans)
    options = _parse_options(options, lyap_solver_options(), 'scipy', None, False)
    assert options['type'] == 'scipy'

    if E is not None:
        raise NotImplementedError
    import scipy.linalg as spla
    A_mat = to_matrix(A, format='dense')
    B_mat = to_matrix(B, format='dense')
    if not trans:
        X = spla.solve_continuous_lyapunov(A_mat, -B_mat.dot(B_mat.T))
    else:
        X = spla.solve_continuous_lyapunov(A_mat.T, -B_mat.T.dot(B_mat))

    Z = chol(X, copy=False)
    Z = A.source.from_numpy(np.array(Z).T)

    return Z
Exemplo n.º 31
0
control, in_vecs, c_matrix = state_controllability(A, B)

# Question: Why does A.transpose give the same eigenvectors as the book
# and not plain A??
# Answer: The eig function in the numpy library only calculates
# the right hand eigenvectors. The Scipy library provides a eig function
# in which you can specify whether you want left or right handed eigenvectors.
# To determine controllability you need to calculate the input pole vectors
# which is dependant on the left eigenvectors.

# Calculate eigen vectors and pole vectors
val, vec = LA.eig(A, None, 1, 0, 0, 0)

n = lin.matrix_rank(c_matrix)

P = LA.solve_continuous_lyapunov(A, -B * B.T)

# Display results
G_tf = G.to_tf()
print('\nThe transfer function realization is:')
print('G(s) = ')
print(np.poly1d(G_tf.num[0], variable='s'))
print("----------------")
print(np.poly1d(G_tf.den, variable='s'))

print('\n1) Eigenvalues are: p1 = ', val[0], 'and p2 = ', val[1])
print('   with eigenvectors: q1 = ', vec[:, 0], 'and q2 = ', vec[:, 1])
print('   Input pole vectors are: up1 = ', in_vecs[0], 'and up2 = ',
      in_vecs[1])

print('\n2) The controlabillity matrix has rank', n, 'and is given as:')
m = 1
g = 9.8
L0 = 1
delta = 0.05
Lmax = 1.5
Lmin = 0.5
p = PendulumParams(m, g, L0, delta, Lmax, Lmin)

f_ode = lambda x: RHS(x, p)
Dfx = nd.Jacobian(f_ode)
E = np.array([0, 0])
A = Dfx(E)

I = np.identity(2)
P = solve_continuous_lyapunov(A, -10*I)


muA = weighted_mu(A, P)
d = compute_d(A, P)

pointsx = np.empty(shape=[1, 0])
pointsy = np.empty(shape=[1, 0])
pointsW = np.empty(shape=[1, 0])
pointsWdot = np.empty(shape=[1, 0])

x1 = Symbol('x1')
x2 = Symbol('x2')
t = Symbol('t')
x = [x1, x2]
Exemplo n.º 33
0
 def check_continuous_case(self, a, q):
     x = solve_continuous_lyapunov(a, q)
     assert_array_almost_equal(
         np.dot(a, x) + np.dot(x,
                               a.conj().transpose()), q)
Exemplo n.º 34
0
def optimize(FC0_obj, FC1_obj, tau_x, mask_EC, mask_Sigma):

    N = FC0_obj.shape[0]

    # optimzation rates (to avoid explosion of activity, Sigma is tuned quicker)
    epsilon_EC = 0.0005
    epsilon_Sigma = 0.05

    # minimal value for tuned EC elements
    min_val_EC = 0.
    # maximal value for tuned EC elements
    max_val_EC = 1.
    # minimal value for tuned Sigma elements
    min_val_Sigma = 0.01

    # initial EC
    EC = np.zeros([N, N])  # initial connectivity
    Sigma = np.eye(N)  # initial noise

    # record best fit (matrix distance between model and empirical FC)
    best_dist = 1e10

    # scaling coefficients for FC0 and FC1 (to compensate that matrix FC1 has elements of smaller magnitude than FC0)
    a0 = np.linalg.norm(FC1_obj) / (np.linalg.norm(FC0_obj) +
                                    np.linalg.norm(FC1_obj))
    a1 = 1. - a0

    stop_opt = False
    i_opt = 0
    while not stop_opt:

        # calculate Jacobian of dynamical system
        J = -np.eye(N) / tau_x + EC

        # calculate FC0 and FC1 for model
        FC0 = spl.solve_continuous_lyapunov(
            J, -Sigma)  # change for solve_lyapunov for python 2
        FC1 = np.dot(FC0, spl.expm(J.T))

        # matrices of model error
        Delta_FC0 = FC0_obj - FC0
        Delta_FC1 = FC1_obj - FC1

        # calculate error between model and empirical data for FC0 and FC_tau (matrix distance)
        dist_FC_tmp = 0.5 * (
            np.linalg.norm(Delta_FC0) / np.linalg.norm(FC0_obj) +
            np.linalg.norm(Delta_FC1) / np.linalg.norm(FC1_obj))

        # calculate Pearson correlation between model and empirical data for FC0 and FC_tau
        Pearson_FC_tmp = 0.5 * (
            stt.pearsonr(FC0.reshape(-1), FC0_obj.reshape(-1))[0] +
            stt.pearsonr(FC1.reshape(-1), FC1_obj.reshape(-1))[0])

        # record best model parameters
        if dist_FC_tmp < best_dist:
            best_dist = dist_FC_tmp
            best_Pearson = Pearson_FC_tmp
            i_best = i_opt
            J_mod_best = np.array(J)
            Sigma_mod_best = np.array(Sigma)
        else:
            stop_opt = i_opt > 50

        # Jacobian update
        Delta_J = np.dot(
            np.linalg.pinv(FC0),
            a0 * Delta_FC0 + np.dot(a1 * Delta_FC1, spl.expm(-J.T))).T

        # update EC (recurrent connectivity)
        EC[mask_EC] += epsilon_EC * Delta_J[mask_EC]
        EC[mask_EC] = np.clip(EC[mask_EC], min_val_EC, max_val_EC)

        # update Sigma (input variances)
        Delta_Sigma = -np.dot(J, Delta_FC0) - np.dot(Delta_FC0, J.T)
        Sigma[mask_Sigma] += epsilon_Sigma * Delta_Sigma[mask_Sigma]
        Sigma[mask_Sigma] = np.maximum(Sigma[mask_Sigma], min_val_Sigma)

        # check for stop
        if not stop_opt:
            i_opt += 1
        else:
            print('stop at step', i_best, 'with best FC dist:', best_dist,
                  '; best FC Pearson:', best_Pearson)

    return (J_mod_best, Sigma_mod_best)