Exemplo n.º 1
0
    def test_tb05ad_balance(self):
        """Test balancing in tb05ad.

        Tests for the cause of the problem reported in issue #11
        balancing permutations were not correctly applied to the
        C and D matrix.
        """

        # find a good test case. Some sparsity,
        # some zero eigenvalues, some non-zero eigenvalues,
        # and proof that the 1st step, with dgebal, does some
        # permutation and some scaling
        crit = False
        n = 8
        while not crit:
            A = np.random.randn(n, n)
            A[np.random.uniform(size=(n, n)) > 0.35] = 0.0

            Aeig = eig(A)[0]
            neig0 = np.sum(np.abs(Aeig) == 0)
            As, T = matrix_balance(A)
            nperm = np.sum(np.diag(T == 0))
            nscale = n - np.sum(T == 1.0)
            crit = nperm < n and nperm >= n//2 and \
                neig0 > 1 and neig0 <= 3 and nscale > 0

        # print("number of permutations", nperm, "eigenvalues=0", neig0)
        B = np.random.randn(8, 4)
        C = np.random.randn(3, 8)

        # do a run
        jomega = 1.0
        At, Bt, Ct, rcond, g_jw, ev, hinvb, info = transform.tb05ad(8,
                                                                    4,
                                                                    3,
                                                                    jomega,
                                                                    A,
                                                                    B,
                                                                    C,
                                                                    job='AG')

        # remove information on Q, in lower sub-triangle part of A
        At = np.triu(At, k=-1)

        # now after the balancing in DGEBAL, and conversion to
        # upper Hessenberg form:
        # At = Q^T * (P^-1 * A * P ) * Q
        # with Q orthogonal
        # Ct = C * P * Q
        # Bt = Q^T * P^-1 * B
        # so test with Ct * At * Bt  ==  C * A * B
        # and verify that eigenvalues of both A matrices are close
        assert_almost_equal(np.dot(np.dot(Ct, At), Bt),
                            np.dot(np.dot(C, A), B))
        # uses a sort, there is no guarantee on the order of eigenvalues
        eigAt = eig(At)[0]
        idxAt = np.argsort(eigAt)
        eigA = eig(A)[0]
        idxA = np.argsort(eigA)
        assert_almost_equal(eigA[idxA], eigAt[idxAt])
Exemplo n.º 2
0
def _minimal_realization_state(A, B, C, tol=1e-6):
    """
    Low-level function to perform the state removel if any for minimal
    realizations. No consistency check is performed.
    """

    # Empty matrices, don't bother
    if A.size == 0:
        return A, B, C

    # scale the system matrix with possible permutations
    A, T = matrix_balance(A)
    # T always has powers of 2 nonzero elements
    B, C = solve(T, B), C @ T

    n = A.shape[0]
    # Make sure that we still have states left, otherwise done
    if n == 0:
        return A, B, C

    # Now obtain the c'ble and o'ble staircase forms
    Ac, Bc, Cc, _ = staircase(A, B, C)
    Ao, Bo, Co, _ = staircase(A, B, C, form='o', invert=True)
    # And compute the distance to rank deficiency.
    kc, *_ = cancellation_distance(Ac, Bc)
    ko, *_ = cancellation_distance(Ao.T, Co.T)

    # If both distances are above tol then we have already minimality
    if min(kc, ko) > tol:
        return A, B, C
    else:
        # Here, we have improved the cancellation distance computations by
        # first scaling the system and then forming the staircase forms.

        # If unctrblity distance is smaller, let it first (no reason)
        if kc <= tol:
            # Start removing and check if the distance gets bigger
            # Observability form removes from top left
            # controllability form removes from bottom right
            while kc <= tol:
                Ac, Bc, Cc = (Ac[:-1, :-1], Bc[:-1, :], Cc[:, :-1])
                if Ac.size == 0:
                    A, B, C = [array([], dtype=float)] * 3
                    break
                else:
                    kc, *_ = cancellation_distance(Ac, Bc)
            # Return the resulting matrices
            A, B, C = Ac, Bc, Cc
            # Same with the o'ble modes, but now kc might have removed
            # unobservable mode already so get the distance again
            ko, *_ = cancellation_distance(A.T, C.T)

        # Still unobservables ?
        if ko <= tol:
            Ao, Bo, Co, To = staircase(A, B, C, form='o', invert=True)
            while ko <= tol:  # Until cancel dist gets big
                Ao, Bo, Co = Ao[1:, 1:], Bo[1:, :], Co[:, 1:]
                if Ao.size == 0:
                    A, B, C = [array([], dtype=float)] * 3
                    break
                else:
                    ko, *_ = cancellation_distance(Ao, Bo)

            # Return the resulting matrices
            A, B, C = Ao, Bo, Co

    return A, B, C
Exemplo n.º 3
0
def haroldlcm(*args, compute_multipliers=True, cleanup_threshold=1e-9):
    """
    Takes n-many 1D numpy arrays and computes the numerical
    least common multiple polynomial. The polynomials are
    assumed to be in decreasing powers, e.g. s^2 + 5 should
    be given as ``[1,0,5]``

    Returns a numpy array holding the polynomial coefficients
    of LCM and a list, of which entries are the polynomial
    multipliers to arrive at the LCM of each input element.

    For the multiplier computation, a variant of [1]_ is used.

    Parameters
    ----------
    args : iterable
        Input arrays. 1-D arrays or array_like sequences of polynomial
        coefficients
    compute_multipliers : bool, optional
        After the computation of the LCM, this switch decides whether the
        multipliers of the given arguments should be computed or skipped.
        A multiplier in this context is ``[1,3]`` for the argument ``[1,2]``
        if the LCM turns out to be ``[1,5,6]``.
    cleanup_threshold : float
        The computed polynomials might contain some numerical noise and after
        finishing everything this value is used to clean up the tiny entries.
        Set this value to zero to turn off this behavior. The default value
        is :math:`10^{-9}`.

    Returns
    --------
    lcmpoly : ndarray
        Resulting 1D polynomial coefficient array for the LCM.
    mults : list
        The multipliers given as a list of 1D arrays, for each given argument.

    Notes
    -----
    If complex-valued arrays are given, only real parts are taken into account.

    Examples
    --------
    >>> a , b = haroldlcm([1,3,0,-4], [1,-4,-3,18], [1,-4,3], [1,-2,-8])
    >>> a
    array([   1.,   -7.,    3.,   59.,  -68., -132.,  144.]
    >>> b
    [array([  1., -10.,  33., -36.]),
     array([  1.,  -3.,  -6.,   8.]),
     array([  1.,  -3., -12.,  20.,  48.]),
     array([  1.,  -5.,   1.,  21., -18.])]
    >>> np.convolve([1, 3, 0, -4], b[0]) # or haroldpolymul() for poly mult
    (array([   1.,   -7.,    3.,   59.,  -68., -132.,  144.]),

    References
    ----------
    .. [1] Karcanias, Mitrouli, "System theoretic based characterisation and
        computation of the least common multiple of a set of polynomials",
        2004, :doi:`10.1016/j.laa.2003.11.009`

    """
    # Regularize the arguments
    args = [np.array(a).squeeze().real for a in args]
    # Add dimension if any scalar arrays such as np.array(1)
    args = [a if a.ndim > 0 else np.atleast_1d(a) for a in args]
    if not all([x.ndim == 1 for x in args]):
        raise ValueError('Input arrays must be 1D.')
    if not all([x.size > 0 for x in args]):
        raise ValueError('Empty arrays are not allowed.')

    # All scalars
    if all([x.size == 1 for x in args]):
        if compute_multipliers:
            return np.array([1.]), [np.array([1.]) for _ in range(len(args))]
        else:
            return np.array([1.])

    # Remove if there are constant polynomials but return their multiplier!
    poppedargs = [x for x in args if x.size > 1]
    # Get the index number of the ones that are popped
    p_ind, l_ind = [], []
    [
        p_ind.append(ind) if x.size == 1 else l_ind.append(ind)
        for ind, x in enumerate(args)
    ]

    # If there are more than one nonconstant polynomial to consider
    if len(poppedargs) > 1:
        a = block_diag(*(map(haroldcompanion, poppedargs)))
        b = np.concatenate([e_i(x.size - 1, -1) for x in poppedargs])
        n = a.shape[0]

        # Balance A
        As, (sca, _) = matrix_balance(a, permute=False, separate=True)
        Bs = b * np.reciprocal(sca)[:, None]

        # Computing full c'bility matrix is redundant we just need to see where
        # the rank drop is (if any!). Due to matrix power, things grow quickly!
        C = Bs
        for _ in range(n - 1):
            C = np.hstack([C, As @ C[:, [-1]]])
            if matrix_rank(C) != C.shape[1]:
                break
        else:
            # No break
            C = np.hstack([C, As @ C[:, [-1]]])

        cols = C.shape[1]
        _, s, v = haroldsvd(C)
        temp = s @ v
        lcmpoly = solve(temp[:cols - 1, :-1], -temp[:cols - 1, -1])
        # Add monic coefficient and flip
        lcmpoly = np.append(lcmpoly, 1)[::-1]
    else:
        lcmpoly = np.trim_zeros(poppedargs[0], 'f')
        lcmpoly = lcmpoly / lcmpoly[0]

    if compute_multipliers:
        n_lcm = lcmpoly.size - 1
        if len(poppedargs) > 1:
            c = block_diag(*[e_i(x.size - 1, 0).T for x in poppedargs]) * sca
            b_lcm, _, _, _ = lstsq(C[:c.shape[1], :-1], Bs)
            c_lcm = c @ C[:c.shape[1], :-1]

            # adj(sI-A) formulas with A being a companion matrix. Use a 3D
            # array where x,y,z = adj(sI-A)[x,y] and z is the coefficient array
            adjA = np.zeros([n_lcm, n_lcm, n_lcm])
            # fill in the adjoint
            for x in range(n_lcm):
                # Diagonal terms
                adjA[x, x, :n_lcm - x] = lcmpoly[:n_lcm - x]
                for y in range(n_lcm):
                    if y < x:  # Upper Triangular terms
                        adjA[x, y, x - y:] = adjA[x, x, :n_lcm - (x - y)]
                    elif y > x:  # Lower Triangular terms
                        adjA[x, y, n_lcm-y:n_lcm+1-y+x] = \
                                                    -lcmpoly[-x-1:n_lcm+1]
            # C*adj(sI-A)*B
            mults = c_lcm @ np.sum(adjA * b_lcm, axis=1)
        else:
            mults = np.zeros((1, n_lcm))
            mults[0, -1] = 1.

        if len(p_ind) > 0:
            temp = np.zeros((len(args), lcmpoly.size), dtype=float)
            temp[p_ind] = lcmpoly
            temp[l_ind, 1:] = mults
            mults = temp

        lcmpoly[abs(lcmpoly) < cleanup_threshold] = 0.
        mults[abs(mults) < cleanup_threshold] = 0.
        mults = [np.trim_zeros(z, 'f') for z in mults]
        return lcmpoly, mults
    else:
        return lcmpoly
Exemplo n.º 4
0
    def expm(self, A, max_mat_mult=100, balance=True):
        ''' max_mat_mult can be used to reduce the computational complexity of the exponential 
            6: at most a Pade order 13 can be used but with no scaling
            5: at most a Pade order 9 is used
            4: at most a Pade order 7 is used
            3: at most a Pade order 5 is used
            2: at most a Pade order 3 is used
        '''
        if (np.any(np.isnan(A))):
            print("Matrix A contains nan")

        # Compute expm(A)*v
        if balance:
            A_bal, D = matrix_balance(A, permute=False)
            Dinv = np.copy(D)
            for i in range(D.shape[0]):
                Dinv[i, i] = 1.0 / D[i, i]
#            assert(np.max(np.abs(A_bal-(Dinv@A@D)))==0.0)
        else:
            A_bal = A

        # Hardcode a matrix order threshold for exact vs. estimated one-norms.
        use_exact_onenorm = A.shape[0] < 200
        h = _ExpmPadeHelper(A_bal, use_exact_onenorm=use_exact_onenorm)
        structure = None

        # Compute the number of mat-mat multiplications needed in theory
        self.mat_mult_in_theory = self.compute_mat_mult(A_bal)
        self.mat_mult = min(self.mat_mult_in_theory, max_mat_mult)
        self.mat_norm = np.linalg.norm(A_bal, 1)

        if self.mat_mult <= 0:
            U, V = self.pade1(A_bal)
            X = _solve_P_Q(U, V, structure=structure)

        if self.mat_mult == 1:
            U, V = self.pade2(A_bal)
            X = _solve_P_Q(U, V, structure=structure)

        if self.mat_mult == 2:
            U, V = h.pade3()
            #            U_, V_ = self.pade3(A_bal)
            #            assert(np.max(np.abs(U-U_))==0.0)
            #            assert(np.max(np.abs(V-V_))==0.0)
            X = _solve_P_Q(U, V, structure=structure)

        # Try Pade order 5.
        if self.mat_mult == 3:
            U, V = h.pade5()
            X = _solve_P_Q(U, V, structure=structure)

        # Try Pade orders 7 and 9.
        if self.mat_mult == 4:
            U, V = h.pade7()
            X = _solve_P_Q(U, V, structure=structure)

        if self.mat_mult == 5:
            U, V = h.pade9()
            X = _solve_P_Q(U, V, structure=structure)

        if self.mat_mult > 5:
            s = self.mat_mult - 6
            U, V = h.pade13_scaled(s)
            X = _solve_P_Q(U, V)
            # X = r_13(A)^(2^s) by repeated squaring.
            for i in range(s):
                X = X.dot(X)

        if (balance):
            X = D @ X @ Dinv
#        assert(np.max(np.abs(expm(A) - X)) == 0.0)
        return X
Exemplo n.º 5
0
def _undiscretize(T, dt, method, prewarp_at, q):

    m, n = T.NumberOfInputs, T.NumberOfStates

    # Error message for log based zoh, foh
    logmsg = ('The matrix logarithm returned a complex array, probably due'
              ' to poles on the negative real axis, and a continous-time '
              'model cannot be obtained via this method without '
              'perturbations.')

    if method == 'zoh':

        if np.any(np.abs(eigvals(T.a)) < np.sqrt(np.spacing(norm(T.a, 1)))):
            raise ValueError('The system has poles near 0, "zoh" method'
                             ' cannot be applied.')
        M = block([[T.a, T.b], [zeros((m, n)), eye(m)]])
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)

        eM = logm(M) * (sca[:, None] * np.reciprocal(sca)) * (1 / dt)
        if np.any(eM.imag):
            raise ValueError(logmsg)

        Ac, Bc, Cc, Dc = eM[:n, :n], eM[:n, n:], T.c, T.d

    elif method == 'foh':
        """
        We use the explicit formulas for Φ, Γ₁, Γ₂

                      [ Φ  Γ₁  Γ₂]
                 logm [ 0  I   I ]
                      [ 0  0   I ]

        Here a direct logarithm won't work since we don't have Γ terms. However
        discrete time matrices are given by

                Ad = exp(Ac*t) = Φ
                Bd = Ac⁻²(Φ - I)²Bc/t                      (*)
                Cd = Cc
                Dc = Dc + Cc[Ac⁻²(Φ - I) - Ac⁻¹]Bc/t

        since Ac⁻¹(Φ-I) = ∫exp(Ac)dt and Ac⁻¹ commutes with Φ, Bd*t = Φ²*Bc,
        the solution follows.
        """
        if np.any(np.abs(eigvals(T.a)) < np.sqrt(np.spacing(norm(T.a, 1)))):
            raise ValueError('The system has poles near 0, "foh" method'
                             ' cannot be applied.')

        M = block([
            [T.a, dt * T.b, zeros((n, m))],  # Notice dt factor from (*)
            [zeros((m, n)), eye(m), eye(m)],
            [zeros((m, n + m)), eye(m)]
        ])
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)
        # Look out for the initial dt factor of T.b
        eM = logm(M) / dt * (sca[:, None] * np.reciprocal(sca))
        if np.any(eM.imag):
            raise ValueError(logmsg)

        Ac = eM[:n, :n]
        Bc0, Bc1 = eM[:n, n:n + m], -eM[:n, n + m:]

        # Now we have Bc0 = Ac⁻(Φ - I)Bc, logm once again to get Bc
        M = block([[T.a, Bc0], [zeros((m, n)), eye(m)]])
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)
        eM = logm(M) / dt * (sca[:, None] * np.reciprocal(sca))
        if np.any(eM.imag):
            raise ValueError(logmsg)

        # Now back-substitute
        Bc, Cc, Dc = eM[:n, n:], T.c, T.d - T.c @ Bc1

    elif method in ('bilinear', 'tustin', 'trapezoidal'):
        if prewarp_at == 0.:
            q = np.array([[-2 / dt, 2 / np.sqrt(dt)], [2 / np.sqrt(dt), -1]])
        else:
            if 1 / (2 * dt) <= prewarp_at:
                raise ValueError('Prewarping frequency is beyond the Nyquist'
                                 ' rate. It has to satisfy 0 < w < 1/(2*Δt)'
                                 ' and Δt being the sampling period in '
                                 'seconds. Δt={0} is given, hence the maximum'
                                 ' allowed is {1} Hz.'.format(
                                     dt, 1 / (2 * dt)))
            prew_rps = 2 * np.pi * prewarp_at
            sq2tan = np.sqrt(2 * np.tan(prew_rps * dt / 2) / prew_rps)
            q = np.array([[-2 / sq2tan**2, 1 / sq2tan], [1 / sq2tan, -1]])

        Ac, Bc, Cc, Dc = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method in ('forward euler', 'forward difference',
                    'forward rectangular', '>>'):
        q = np.array([[-1 / dt, 1 / np.sqrt(dt)], [1 / np.sqrt(dt), 0]])
        Ac, Bc, Cc, Dc = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method in ('backward euler', 'backward difference',
                    'backward rectangular', '<<'):
        # nonproper via lft, compute explicitly.
        with catch_warnings(record=True) as war:
            simplefilter("always")
            try:
                iAd = solve(T.a, eye(n))
            except LinAlgError:
                raise ValueError('The state matrix has eigenvalues at zero '
                                 'and this conversion method can\'t be used.')

        if len(war) > 0:
            warn(
                'The state matrix has eigenvalues too close to imaginary'
                ' axis. This conversion method might give inaccurate '
                'results',
                _rcond_warn,
                stacklevel=2)

        Ac = np.eye(n) - iAd
        Ac /= dt
        Bc = 1 / np.sqrt(dt) * (iAd @ T.b)
        Cc = 1 / np.sqrt(dt) * (T.c @ iAd)
        Dc = T.d - T.c @ iAd @ T.b

    elif method == 'lft':
        if q is None:
            raise ValueError('"lft" method requires a 2x2 interconnection '
                             'matrix "q" between s and z indeterminates.')
        Ac, Bc, Cc, Dc = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    return Ac, Bc, Cc, Dc
Exemplo n.º 6
0
def _discretize(T, dt, method, prewarp_at, q):

    m, n = T.shape[1], T.NumberOfStates

    if method == 'zoh':
        """

                 [A | B]   [ exp(A) | ∫exp(A)dt * B ]   [ Ad | Bd ]
            expm [- - -] = [------------------------] = [---------]
                 [0 | 0]   [   0    |       I       ]   [ C  | D  ]

        """

        M = block([[T.a, T.b], [zeros((m, m + n))]])
        # Don't permute, destroys the zero structure
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)
        # inverted scale after exponentiation via broadcast+elwise mult.
        eM = expm(Ms * dt) * (sca[:, None] * np.reciprocal(sca))
        Ad, Bd, Cd, Dd = eM[:n, :n], eM[:n, n:], T.c, T.d

    elif method == 'foh':
        """
        This conversion is done via the expm() identity

                  [ A*t   B*t   0 ]   [ Φ  Γ₁  Γ₂]
             expm [  0     0    I ] = [ 0  I   I ]
                  [  0     0    0 ]   [ 0  0   I ]

        where Φ, Γ₁, Γ₂ satisfies

            Φ(B*t) = (A*t)Γ₁ + B*t = (A*t)²Γ₂ + (A*t + I)B*t.

        See Franklin, Powell "Digital Control of Dynamic Systems" 3rd ed.
        section 6.3.2
        """
        M = block([[block_diag(block([T.a, T.b]) * dt, eye(m))],
                   [zeros((m, n + 2 * m))]])
        # Don't permute, destroys the zeros/identity structure
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)
        # inverted scale after exponentiation via broadcast+elwise mult.
        eM = expm(Ms) * (sca[:, None] * np.reciprocal(sca))

        Ad = eM[:n, :n]
        Bd0, Bd1 = eM[:n, n:n + m], eM[:n, n + m:]
        Bd = Bd0 + Ad @ Bd1 - Bd1
        Cd = T.c
        Dd = T.d + T.c @ Bd1

    elif method in ('bilinear', 'tustin', 'trapezoidal'):
        if prewarp_at == 0.:
            q = np.array([[1, np.sqrt(dt)], [np.sqrt(dt), dt / 2]])
        else:
            if 1 / (2 * dt) < prewarp_at:
                raise ValueError('Prewarping frequency is beyond the Nyquist'
                                 ' rate. It has to satisfy 0 < w < 1/(2*Δt)'
                                 ' and Δt being the sampling period in '
                                 'seconds. Δt={0} is given, hence the maximum'
                                 ' allowed is {1} Hz.'.format(
                                     dt, 1 / (2 * dt)))
            prew_rps = 2 * np.pi * prewarp_at
            sq2tan = np.sqrt(2 * np.tan(prew_rps * dt / 2) / prew_rps)
            q = np.array([[1, sq2tan], [sq2tan, sq2tan**2 / 2]])

        Ad, Bd, Cd, Dd = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method in ('forward euler', 'forward difference',
                    'forward rectangular', '>>'):
        q = np.array([[1, np.sqrt(dt)], [np.sqrt(dt), 0]])
        Ad, Bd, Cd, Dd = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method in ('backward euler', 'backward difference',
                    'backward rectangular', '<<'):
        q = np.array([[1, np.sqrt(dt)], [np.sqrt(dt), dt]])
        Ad, Bd, Cd, Dd = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method == 'lft':
        if q is None:
            raise ValueError('"lft" method requires a 2x2 interconnection '
                             'matrix "q" between s and z indeterminates.')
        Ad, Bd, Cd, Dd = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    return Ad, Bd, Cd, Dd, dt
Exemplo n.º 7
0
Arquivo: oma.py Projeto: oiseth/koma
def cva_weights(R, balancing_H0=None, balance=False):
    """
    Computes the weights for CVA.

    Arguments
    ---------------------------
    R : double
        n_channels-by-n_channels-by-n_lags large array, each slice in third dimension 
        corresponds to the cross-correlation matrix for a given time lag 
    balancing_H0 : boolean, optional
    balance : boolean, optional

    Returns
    ---------------------------
    W1 : double
        lower triangular Cholesky factor of R+ (see Hermans and van der Auweraer)
    W2 : double
        lower triangular Cholesky factor of R- (see Hermans and van der Auweraer)
    L1 : double
        inverse of W1
    L2 : double
        inverse of W2

    References
    --------------------------------
    Hermans and van Der Auweraer :cite:`Hermans1999`

    """

    i = int(np.shape(R)[2] / 2) - 1
    l = np.shape(R)[0]

    Rp = np.zeros([i * l, i * l])
    Rm = np.zeros([i * l, i * l])

    for row in range(0, i):
        for col in range(0, i):
            # R+
            if row < col:  #above diagonal
                Rp[(row * l):(row * l + l),
                   (col * l):(col * l + l)] = R[:, :, np.abs(row - col)].T
            elif row >= col:  #below or at diagonal
                Rp[(row * l):(row * l + l),
                   (col * l):(col * l + l)] = R[:, :, np.abs(row - col)]

            # R-
            if row <= col:
                Rm[(row * l):(row * l + l),
                   (col * l):(col * l + l)] = R[:, :, np.abs(row - col)]
            elif row > col:
                Rm[(row * l):(row * l + l),
                   (col * l):(col * l + l)] = R[:, :, np.abs(row - col)].T

    if balancing_H0 is not None:
        __, T = matrix_balance(balancing_H0)
        Rp = np.linalg.inv(T) @ Rp @ T
        Rm = np.linalg.inv(T) @ Rm @ T

    if balance:
        Rp, __ = matrix_balance(Rp)
        Rm, __ = matrix_balance(Rm)

    L1 = gaxpy_chol(Rp)
    L2 = gaxpy_chol(Rm)
    W1 = np.linalg.inv(L1)
    W2 = np.linalg.inv(L2)

    return W1, W2, L1, L2
Exemplo n.º 8
0
def _discretize(T, dt, method, prewarp_at, q):

    m, n = T.shape[1], T.NumberOfStates

    if method == 'zoh':
        """

                 [A | B]   [ exp(A) | ∫exp(A)dt * B ]   [ Ad | Bd ]
            expm [- - -] = [------------------------] = [---------]
                 [0 | 0]   [   0    |       I       ]   [ C  | D  ]

        """

        M = block([[T.a, T.b], [zeros((m, m+n))]])
        # Don't permute, destroys the zero structure
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)
        # inverted scale after exponentiation via broadcast+elwise mult.
        eM = expm(Ms*dt) * (sca[:, None] * np.reciprocal(sca))
        Ad, Bd, Cd, Dd = eM[:n, :n], eM[:n, n:], T.c, T.d

    elif method == 'foh':
        """
        This conversion is done via the expm() identity

                  [ A*t   B*t   0 ]   [ Φ  Γ₁  Γ₂]
             expm [  0     0    I ] = [ 0  I   I ]
                  [  0     0    0 ]   [ 0  0   I ]

        where Φ, Γ₁, Γ₂ satisfies

            Φ(B*t) = (A*t)Γ₁ + B*t = (A*t)²Γ₂ + (A*t + I)B*t.

        See Franklin, Powell "Digital Control of Dynamic Systems" 3rd ed.
        section 6.3.2
        """
        M = block([[block_diag(block([T.a, T.b])*dt, eye(m))],
                   [zeros((m, n+2*m))]])
        # Don't permute, destroys the zeros/identity structure
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)
        # inverted scale after exponentiation via broadcast+elwise mult.
        eM = expm(Ms) * (sca[:, None] * np.reciprocal(sca))

        Ad = eM[:n, :n]
        Bd0, Bd1 = eM[:n, n:n+m], eM[:n, n+m:]
        Bd = Bd0 + Ad @ Bd1 - Bd1
        Cd = T.c
        Dd = T.d + T.c @ Bd1

    elif method in ('bilinear', 'tustin', 'trapezoidal'):
        if prewarp_at == 0.:
            q = np.array([[1, np.sqrt(dt)], [np.sqrt(dt), dt/2]])
        else:
            if 1/(2*dt) < prewarp_at:
                raise ValueError('Prewarping frequency is beyond the Nyquist'
                                 ' rate. It has to satisfy 0 < w < 1/(2*Δt)'
                                 ' and Δt being the sampling period in '
                                 'seconds. Δt={0} is given, hence the maximum'
                                 ' allowed is {1} Hz.'.format(dt, 1/(2*dt))
                                 )
            prew_rps = 2 * np.pi * prewarp_at
            sq2tan = np.sqrt(2*np.tan(prew_rps * dt / 2)/prew_rps)
            q = np.array([[1, sq2tan], [sq2tan, sq2tan**2/2]])

        Ad, Bd, Cd, Dd = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method in ('forward euler', 'forward difference',
                    'forward rectangular', '>>'):
        q = np.array([[1, np.sqrt(dt)], [np.sqrt(dt), 0]])
        Ad, Bd, Cd, Dd = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method in ('backward euler', 'backward difference',
                    'backward rectangular', '<<'):
        q = np.array([[1, np.sqrt(dt)], [np.sqrt(dt), dt]])
        Ad, Bd, Cd, Dd = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method == 'lft':
        if q is None:
            raise ValueError('"lft" method requires a 2x2 interconnection '
                             'matrix "q" between s and z indeterminates.')
        Ad, Bd, Cd, Dd = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    return Ad, Bd, Cd, Dd, dt
Exemplo n.º 9
0
def _undiscretize(T, dt, method, prewarp_at, q):

    m, n = T.NumberOfInputs, T.NumberOfStates

    # Error message for log based zoh, foh
    logmsg = ('The matrix logarithm returned a complex array, probably due'
              ' to poles on the negative real axis, and a continous-time '
              'model cannot be obtained via this method without '
              'perturbations.')

    if method == 'zoh':

        if np.any(np.abs(eigvals(T.a)) < np.sqrt(np.spacing(norm(T.a, 1)))):
            raise ValueError('The system has poles near 0, "zoh" method'
                             ' cannot be applied.')
        M = block([[T.a, T.b], [zeros((m, n)), eye(m)]])
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)

        eM = logm(M) * (sca[:, None] * np.reciprocal(sca)) * (1/dt)
        if np.any(eM.imag):
            raise ValueError(logmsg)

        Ac, Bc, Cc, Dc = eM[:n, :n], eM[:n, n:], T.c, T.d

    elif method == 'foh':
        """
        We use the explicit formulas for Φ, Γ₁, Γ₂

                      [ Φ  Γ₁  Γ₂]
                 logm [ 0  I   I ]
                      [ 0  0   I ]

        Here a direct logarithm won't work since we don't have Γ terms. However
        discrete time matrices are given by

                Ad = exp(Ac*t) = Φ
                Bd = Ac⁻²(Φ - I)²Bc/t                      (*)
                Cd = Cc
                Dc = Dc + Cc[Ac⁻²(Φ - I) - Ac⁻¹]Bc/t

        since Ac⁻¹(Φ-I) = ∫exp(Ac)dt and Ac⁻¹ commutes with Φ, Bd*t = Φ²*Bc,
        the solution follows.
        """
        if np.any(np.abs(eigvals(T.a)) < np.sqrt(np.spacing(norm(T.a, 1)))):
            raise ValueError('The system has poles near 0, "foh" method'
                             ' cannot be applied.')

        M = block([[T.a, dt*T.b, zeros((n, m))],  # Notice dt factor from (*)
                   [zeros((m, n)), eye(m), eye(m)],
                   [zeros((m, n+m)), eye(m)]])
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)
        # Look out for the initial dt factor of T.b
        eM = logm(M)/dt * (sca[:, None] * np.reciprocal(sca))
        if np.any(eM.imag):
            raise ValueError(logmsg)

        Ac = eM[:n, :n]
        Bc0, Bc1 = eM[:n, n:n+m], -eM[:n, n+m:]

        # Now we have Bc0 = Ac⁻(Φ - I)Bc, logm once again to get Bc
        M = block([[T.a, Bc0],
                   [zeros((m, n)), eye(m)]])
        Ms, (sca, _) = matrix_balance(M, permute=0, separate=1)
        eM = logm(M)/dt * (sca[:, None] * np.reciprocal(sca))
        if np.any(eM.imag):
            raise ValueError(logmsg)

        # Now back-substitute
        Bc, Cc, Dc = eM[:n, n:], T.c, T.d - T.c @ Bc1

    elif method in ('bilinear', 'tustin', 'trapezoidal'):
        if prewarp_at == 0.:
            q = np.array([[-2/dt, 2/np.sqrt(dt)], [2/np.sqrt(dt), -1]])
        else:
            if 1/(2*dt) <= prewarp_at:
                raise ValueError('Prewarping frequency is beyond the Nyquist'
                                 ' rate. It has to satisfy 0 < w < 1/(2*Δt)'
                                 ' and Δt being the sampling period in '
                                 'seconds. Δt={0} is given, hence the maximum'
                                 ' allowed is {1} Hz.'.format(dt, 1/(2*dt)))
            prew_rps = 2 * np.pi * prewarp_at
            sq2tan = np.sqrt(2*np.tan(prew_rps * dt / 2)/prew_rps)
            q = np.array([[-2/sq2tan**2, 1/sq2tan], [1/sq2tan, -1]])

        Ac, Bc, Cc, Dc = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method in ('forward euler', 'forward difference',
                    'forward rectangular', '>>'):
        q = np.array([[-1/dt, 1/np.sqrt(dt)], [1/np.sqrt(dt), 0]])
        Ac, Bc, Cc, Dc = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    elif method in ('backward euler', 'backward difference',
                    'backward rectangular', '<<'):
        # nonproper via lft, compute explicitly.
        with catch_warnings(record=True) as war:
            simplefilter("always")
            try:
                iAd = solve(T.a, eye(n))
            except LinAlgError:
                raise ValueError('The state matrix has eigenvalues at zero '
                                 'and this conversion method can\'t be used.')

        if len(war) > 0:
            warn('The state matrix has eigenvalues too close to imaginary'
                 ' axis. This conversion method might give inaccurate '
                 'results', _rcond_warn, stacklevel=2)

        Ac = np.eye(n) - iAd
        Ac /= dt
        Bc = 1/np.sqrt(dt) * (iAd @ T.b)
        Cc = 1/np.sqrt(dt) * (T.c @ iAd)
        Dc = T.d - T.c @ iAd @ T.b

    elif method == 'lft':
        if q is None:
            raise ValueError('"lft" method requires a 2x2 interconnection '
                             'matrix "q" between s and z indeterminates.')
        Ac, Bc, Cc, Dc = _simple_lft_connect(q, T.a, T.b, T.c, T.d)

    return Ac, Bc, Cc, Dc
Exemplo n.º 10
0
def _compute_tfinal_and_dt(sys, is_step=True):
    """
    Helper function to estimate a final time and a sampling period for
    time domain simulations. It is essentially geared towards impulse response
    but is also used for step responses.

    For discrete-time models, obviously dt is inherent and only tfinal is
    computed.

    Parameters
    ----------
    sys : {State, Transfer}
        The system to be investigated
    is_step : bool
        Scales the dc value by the magnitude of the nonzero mode since
        integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.
        Default is True.

    Returns
    -------
    tfinal : float
        The final time instance for which the simulation will be performed.
    dt : float
        The estimated sampling period for the simulation.

    Notes
    -----
    Just by evaluating the fastest mode for dt and slowest for tfinal often
    leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
    since dt will be very small and tfinal will be too large though the fast
    mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
    and the simulation would be unnecessarily long and the plot is virtually
    an L shape since the decay is so fast.

    Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
    can be used such that only the modes that have significant effect on the
    time response are taken. But the sensitivity of the eigenvalues complicate
    the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work
    with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
    for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
    the response is dependent on the size of the eigenshapes rather than the
    eigenvalues themselves.

    """
    sqrt_eps = np.sqrt(np.spacing(1.))
    min_points = 100  # min number of points
    min_points_z = 20  # min number of points
    max_points = 10000  # max number of points
    max_points_z = 75000  # max number of points for discrete models
    default_tfinal = 5  # Default simulation horizon
    total_cycles = 5  # number of cycles for oscillating modes
    pts_per_cycle = 25  # Number of points divide a period of oscillation
    log_decay_percent = np.log(100)  # Factor of reduction for real pole decays

    # if a static model is given, don't bother with checks
    if sys._isgain:
        if sys._isdiscrete:
            return sys._dt * min_points_z, sys._dt
        else:
            return default_tfinal, default_tfinal / min_points

    if sys._isdiscrete:
        # System already has sampling fixed  hence we can't fall into the same
        # trap mentioned above. Just get nonintegrating slow modes together
        # with the damping.
        dt = sys._dt
        tfinal = default_tfinal
        p = eigvals(sys.a)
        # Array Masks
        # unstable
        m_u = (np.abs(p) >= 1 + sqrt_eps)
        p_u, p = p[m_u], p[~m_u]
        if p_u.size > 0:
            m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
            t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u]) / dt))
            tfinal = max(tfinal, t_emp)

        # zero - negligible effect on tfinal
        m_z = np.abs(p) < sqrt_eps
        p = p[~m_z]
        # Negative reals- treated as oscillary mode
        m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
        p_nr, p = p[m_nr], p[~m_nr]
        if p_nr.size > 0:
            t_emp = np.max(log_decay_percent / np.abs(
                (np.log(p_nr) / dt).real))
            tfinal = max(tfinal, t_emp)
        # discrete integrators
        m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
        p_int, p = p[m_int], p[~m_int]
        # pure oscillatory modes
        m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
        p_w, p = p[m_w], p[~m_w]
        if p_w.size > 0:
            t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w) / dt).min()
            tfinal = max(tfinal, t_emp)

        if p.size > 0:
            t_emp = log_decay_percent / np.abs((np.log(p) / dt).real).min()
            tfinal = max(tfinal, t_emp)

        if p_int.size > 0:
            tfinal = tfinal * 5

        # Make tfinal an integer multiple of dt
        num_samples = tfinal // dt
        if num_samples > max_points_z:
            tfinal = dt * max_points_z
        else:
            tfinal = dt * num_samples

        return tfinal, dt

    # Improve conditioning via balancing and zeroing tiny entries
    # See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance
    b, (sca, perm) = matrix_balance(sys.a, separate=True)
    p, l, r = eig(b, left=True, right=True)
    # Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12)
    # G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
    eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
    eig_sens = minimum(1e12, eig_sens)
    # Tolerances
    p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
    # Incorporate balancing to outer factors
    l[perm, :] *= reciprocal(sca)[:, None]
    r[perm, :] *= sca[:, None]
    w, v = sys.c @ r, l.T.conj() @ sys.b

    origin = False
    # Computing the "size" of the response of each simple mode
    wn = np.abs(p)
    if np.any(wn == 0.):
        origin = True

    dc = zeros_like(p, dtype=float)
    # well-conditioned nonzero poles, np.abs just in case
    ok = np.abs(eig_sens) <= 1 / sqrt_eps
    # the averaged t→∞ response of each simple λ on each i/o channel
    # See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is
    # R/L eigenvector dependent)
    dc[ok] = norm(v[ok, :], axis=1) * norm(w[:, ok], axis=0) * eig_sens[ok]
    dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
    dc[wn == 0.] = 0.
    # double the oscillating mode magnitude for the conjugate
    dc[p.imag != 0.] *= 2

    # Now get rid of noncontributing integrators and simple modes if any
    relevance = (dc > 0.1 * dc.max()) | ~ok
    psub = p[relevance]
    wnsub = wn[relevance]

    tfinal, dt = [], []
    ints = wnsub == 0.
    iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)

    # Pure imaginary?
    if np.any(iw):
        tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
        dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
    # The rest ~ts = log(%ss value) / exp(Re(λ)t)
    texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
    tfinal += texp_mode.tolist()
    dt += minimum(texp_mode / 50,
                  (2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist()

    # All integrators?
    if len(tfinal) == 0:
        return default_tfinal * 5, default_tfinal * 5 / min_points

    tfinal = np.max(tfinal) * (5 if origin else 1)
    dt = np.min(dt)

    dt = tfinal / max_points if tfinal // dt > max_points else dt
    tfinal = dt * min_points if tfinal // dt < min_points else tfinal

    return tfinal, dt
Exemplo n.º 11
0
def _compute_tfinal_and_dt(sys, is_step=True):
    """
    Helper function to estimate a final time and a sampling period for
    time domain simulations. It is essentially geared towards impulse response
    but is also used for step responses.

    For discrete-time models, obviously dt is inherent and only tfinal is
    computed.

    Parameters
    ----------
    sys : {State, Transfer}
        The system to be investigated
    is_step : bool
        Scales the dc value by the magnitude of the nonzero mode since
        integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.
        Default is True.

    Returns
    -------
    tfinal : float
        The final time instance for which the simulation will be performed.
    dt : float
        The estimated sampling period for the simulation.

    Notes
    -----
    Just by evaluating the fastest mode for dt and slowest for tfinal often
    leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
    since dt will be very small and tfinal will be too large though the fast
    mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
    and the simulation would be unnecessarily long and the plot is virtually
    an L shape since the decay is so fast.

    Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
    can be used such that only the modes that have significant effect on the
    time response are taken. But the sensitivity of the eigenvalues complicate
    the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work
    with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
    for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
    the response is dependent on the size of the eigenshapes rather than the
    eigenvalues themselves.

    """
    sqrt_eps = np.sqrt(np.spacing(1.))
    min_points = 100  # min number of points
    min_points_z = 20  # min number of points
    max_points = 10000  # max number of points
    max_points_z = 75000  # max number of points for discrete models
    default_tfinal = 5  # Default simulation horizon
    total_cycles = 5  # number of cycles for oscillating modes
    pts_per_cycle = 25  # Number of points divide a period of oscillation
    log_decay_percent = np.log(100)  # Factor of reduction for real pole decays

    # if a static model is given, don't bother with checks
    if sys._isgain:
        if sys._isdiscrete:
            return sys._dt*min_points_z, sys._dt
        else:
            return default_tfinal, default_tfinal / min_points

    if sys._isdiscrete:
        # System already has sampling fixed  hence we can't fall into the same
        # trap mentioned above. Just get nonintegrating slow modes together
        # with the damping.
        dt = sys._dt
        tfinal = default_tfinal
        p = eigvals(sys.a)
        # Array Masks
        # unstable
        m_u = (np.abs(p) >= 1 + sqrt_eps)
        p_u, p = p[m_u], p[~m_u]
        if p_u.size > 0:
            m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
            t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u])/dt))
            tfinal = max(tfinal, t_emp)

        # zero - negligible effect on tfinal
        m_z = np.abs(p) < sqrt_eps
        p = p[~m_z]
        # Negative reals- treated as oscillary mode
        m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
        p_nr, p = p[m_nr], p[~m_nr]
        if p_nr.size > 0:
            t_emp = np.max(log_decay_percent / np.abs((np.log(p_nr)/dt).real))
            tfinal = max(tfinal, t_emp)
        # discrete integrators
        m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
        p_int, p = p[m_int], p[~m_int]
        # pure oscillatory modes
        m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
        p_w, p = p[m_w], p[~m_w]
        if p_w.size > 0:
            t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w)/dt).min()
            tfinal = max(tfinal, t_emp)

        if p.size > 0:
            t_emp = log_decay_percent / np.abs((np.log(p)/dt).real).min()
            tfinal = max(tfinal, t_emp)

        if p_int.size > 0:
            tfinal = tfinal * 5

        # Make tfinal an integer multiple of dt
        num_samples = tfinal // dt
        if num_samples > max_points_z:
            tfinal = dt * max_points_z
        else:
            tfinal = dt * num_samples

        return tfinal, dt

    # Improve conditioning via balancing and zeroing tiny entries
    # See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance
    b, (sca, perm) = matrix_balance(sys.a, separate=True)
    p, l, r = eig(b, left=True, right=True)
    # Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12)
    # G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
    eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
    eig_sens = minimum(1e12, eig_sens)
    # Tolerances
    p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
    # Incorporate balancing to outer factors
    l[perm, :] *= reciprocal(sca)[:, None]
    r[perm, :] *= sca[:, None]
    w, v = sys.c @ r, l.T.conj() @ sys.b

    origin = False
    # Computing the "size" of the response of each simple mode
    wn = np.abs(p)
    if np.any(wn == 0.):
        origin = True

    dc = zeros_like(p, dtype=float)
    # well-conditioned nonzero poles, np.abs just in case
    ok = np.abs(eig_sens) <= 1/sqrt_eps
    # the averaged t→∞ response of each simple λ on each i/o channel
    # See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is
    # R/L eigenvector dependent)
    dc[ok] = norm(v[ok, :], axis=1)*norm(w[:, ok], axis=0)*eig_sens[ok]
    dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
    dc[wn == 0.] = 0.
    # double the oscillating mode magnitude for the conjugate
    dc[p.imag != 0.] *= 2

    # Now get rid of noncontributing integrators and simple modes if any
    relevance = (dc > 0.1*dc.max()) | ~ok
    psub = p[relevance]
    wnsub = wn[relevance]

    tfinal, dt = [], []
    ints = wnsub == 0.
    iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)

    # Pure imaginary?
    if np.any(iw):
        tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
        dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
    # The rest ~ts = log(%ss value) / exp(Re(λ)t)
    texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
    tfinal += texp_mode.tolist()
    dt += minimum(texp_mode / 50,
                  (2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist()

    # All integrators?
    if len(tfinal) == 0:
        return default_tfinal*5, default_tfinal*5/min_points

    tfinal = np.max(tfinal)*(5 if origin else 1)
    dt = np.min(dt)

    dt = tfinal / max_points if tfinal // dt > max_points else dt
    tfinal = dt * min_points if tfinal // dt < min_points else tfinal

    return tfinal, dt
Exemplo n.º 12
0
def _ideal_tfinal_and_dt(sys, is_step=True):
    """helper function to compute ideal simulation duration tfinal and dt, the
    time increment. Usually called by _default_time_vector, whose job it is to
    choose a realistic time vector. Considers both poles and zeros.

    For discrete-time models, dt is inherent and only tfinal is computed.

    Parameters
    ----------
    sys : StateSpace or TransferFunction
        The system whose time response is to be computed
    is_step : bool
        Scales the dc value by the magnitude of the nonzero mode since
        integrating the impulse response gives 
        :math:`\int e^{-\lambda t} = -e^{-\lambda t}/ \lambda`
        Default is True.

    Returns
    -------
    tfinal : float
        The final time instance for which the simulation will be performed.
    dt : float
        The estimated sampling period for the simulation.

    Notes
    -----
    Just by evaluating the fastest mode for dt and slowest for tfinal often
    leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
    since dt will be very small and tfinal will be too large though the fast
    mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
    and the simulation would be unnecessarily long and the plot is virtually
    an L shape since the decay is so fast.

    Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
    can be used such that only the modes that have significant effect on the
    time response are taken. But the sensitivity of the eigenvalues complicate
    the matter since dlambda = <w, dA*v> with <w,v> = 1. Hence we can only work
    with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
    for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
    the response is dependent on the size of the eigenshapes rather than the
    eigenvalues themselves.

    By Ilhan Polat, with modifications by Sawyer Fuller to integrate into
    python-control 2020.08.17
    """

    sqrt_eps = np.sqrt(np.spacing(1.))
    default_tfinal = 5  # Default simulation horizon
    default_dt = 0.1
    total_cycles = 5  # number of cycles for oscillating modes
    pts_per_cycle = 25  # Number of points divide a period of oscillation
    log_decay_percent = np.log(100)  # Factor of reduction for real pole decays

    if sys.is_static_gain():
        tfinal = default_tfinal
        dt = sys.dt if isdtime(sys, strict=True) else default_dt
    elif isdtime(sys, strict=True):
        dt = sys.dt
        A = _convertToStateSpace(sys).A
        tfinal = default_tfinal
        p = eigvals(A)
        # Array Masks
        # unstable
        m_u = (np.abs(p) >= 1 + sqrt_eps)
        p_u, p = p[m_u], p[~m_u]
        if p_u.size > 0:
            m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
            t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u]) / dt))
            tfinal = max(tfinal, t_emp)

        # zero - negligible effect on tfinal
        m_z = np.abs(p) < sqrt_eps
        p = p[~m_z]
        # Negative reals- treated as oscillary mode
        m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
        p_nr, p = p[m_nr], p[~m_nr]
        if p_nr.size > 0:
            t_emp = np.max(log_decay_percent / np.abs(
                (np.log(p_nr) / dt).real))
            tfinal = max(tfinal, t_emp)
        # discrete integrators
        m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
        p_int, p = p[m_int], p[~m_int]
        # pure oscillatory modes
        m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
        p_w, p = p[m_w], p[~m_w]
        if p_w.size > 0:
            t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w) / dt).min()
            tfinal = max(tfinal, t_emp)

        if p.size > 0:
            t_emp = log_decay_percent / np.abs((np.log(p) / dt).real).min()
            tfinal = max(tfinal, t_emp)

        if p_int.size > 0:
            tfinal = tfinal * 5
    else:  # cont time
        sys_ss = _convertToStateSpace(sys)
        # Improve conditioning via balancing and zeroing tiny entries
        # See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance
        b, (sca, perm) = matrix_balance(sys_ss.A, separate=True)
        p, l, r = eig(b, left=True, right=True)
        # Reciprocal of inner product <w,v> for each eigval, (bound the ~infs by 1e12)
        # G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
        eig_sens = np.reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
        eig_sens = minimum(1e12, eig_sens)
        # Tolerances
        p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
        # Incorporate balancing to outer factors
        l[perm, :] *= np.reciprocal(sca)[:, None]
        r[perm, :] *= sca[:, None]
        w, v = sys_ss.C.dot(r), l.T.conj().dot(sys_ss.B)

        origin = False
        # Computing the "size" of the response of each simple mode
        wn = np.abs(p)
        if np.any(wn == 0.):
            origin = True

        dc = np.zeros_like(p, dtype=float)
        # well-conditioned nonzero poles, np.abs just in case
        ok = np.abs(eig_sens) <= 1 / sqrt_eps
        # the averaged t->inf response of each simple eigval on each i/o channel
        # See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is
        # R/L eigenvector dependent)
        dc[ok] = norm(v[ok, :], axis=1) * norm(w[:, ok], axis=0) * eig_sens[ok]
        dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
        dc[wn == 0.] = 0.
        # double the oscillating mode magnitude for the conjugate
        dc[p.imag != 0.] *= 2

        # Now get rid of noncontributing integrators and simple modes if any
        relevance = (dc > 0.1 * dc.max()) | ~ok
        psub = p[relevance]
        wnsub = wn[relevance]

        tfinal, dt = [], []
        ints = wnsub == 0.
        iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)

        # Pure imaginary?
        if np.any(iw):
            tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
            dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
        # The rest ~ts = log(%ss value) / exp(Re(eigval)t)
        texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
        tfinal += texp_mode.tolist()
        dt += minimum(
            texp_mode / 50,
            (2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist()

        # All integrators?
        if len(tfinal) == 0:
            return default_tfinal * 5, default_dt * 5

        tfinal = np.max(tfinal) * (5 if origin else 1)
        dt = np.min(dt)

    return tfinal, dt
Exemplo n.º 13
0
def haroldlcm(*args, compute_multipliers=True, cleanup_threshold=1e-9):
    """
    Takes n-many 1D numpy arrays and computes the numerical
    least common multiple polynomial. The polynomials are
    assumed to be in decreasing powers, e.g. s^2 + 5 should
    be given as ``[1,0,5]``

    Returns a numpy array holding the polynomial coefficients
    of LCM and a list, of which entries are the polynomial
    multipliers to arrive at the LCM of each input element.

    For the multiplier computation, a variant of [1]_ is used.

    Parameters
    ----------
    args : iterable
        Input arrays. 1-D arrays or array_like sequences of polynomial
        coefficients
    compute_multipliers : bool, optional
        After the computation of the LCM, this switch decides whether the
        multipliers of the given arguments should be computed or skipped.
        A multiplier in this context is ``[1,3]`` for the argument ``[1,2]``
        if the LCM turns out to be ``[1,5,6]``.
    cleanup_threshold : float
        The computed polynomials might contain some numerical noise and after
        finishing everything this value is used to clean up the tiny entries.
        Set this value to zero to turn off this behavior. The default value
        is :math:`10^{-9}`.

    Returns
    --------
    lcmpoly : ndarray
        Resulting 1D polynomial coefficient array for the LCM.
    mults : list
        The multipliers given as a list of 1D arrays, for each given argument.

    Notes
    -----
    If complex-valued arrays are given, only real parts are taken into account.

    Examples
    --------
    >>> a , b = haroldlcm([1,3,0,-4], [1,-4,-3,18], [1,-4,3], [1,-2,-8])
    >>> a
    array([   1.,   -7.,    3.,   59.,  -68., -132.,  144.]
    >>> b
    [array([  1., -10.,  33., -36.]),
     array([  1.,  -3.,  -6.,   8.]),
     array([  1.,  -3., -12.,  20.,  48.]),
     array([  1.,  -5.,   1.,  21., -18.])]
    >>> np.convolve([1, 3, 0, -4], b[0]) # or haroldpolymul() for poly mult
    (array([   1.,   -7.,    3.,   59.,  -68., -132.,  144.]),

    References
    ----------
    .. [1] Karcanias, Mitrouli, "System theoretic based characterisation and
        computation of the least common multiple of a set of polynomials",
        2004, :doi:`10.1016/j.laa.2003.11.009`

    """
    # Regularize the arguments
    args = [np.array(a).squeeze().real for a in args]
    # Add dimension if any scalar arrays such as np.array(1)
    args = [a if a.ndim > 0 else np.atleast_1d(a) for a in args]
    if not all([x.ndim == 1 for x in args]):
        raise ValueError('Input arrays must be 1D.')
    if not all([x.size > 0 for x in args]):
        raise ValueError('Empty arrays are not allowed.')

    # All scalars
    if all([x.size == 1 for x in args]):
        if compute_multipliers:
            return np.array([1.]), [np.array([1.]) for _ in range(len(args))]
        else:
            return np.array([1.])

    # Remove if there are constant polynomials but return their multiplier!
    poppedargs = [x for x in args if x.size > 1]
    # Get the index number of the ones that are popped
    p_ind, l_ind = [], []
    [p_ind.append(ind) if x.size == 1 else l_ind.append(ind)
        for ind, x in enumerate(args)]

    # If there are more than one nonconstant polynomial to consider
    if len(poppedargs) > 1:
        a = block_diag(*(map(haroldcompanion, poppedargs)))
        b = np.concatenate([e_i(x.size-1, -1) for x in poppedargs])
        n = a.shape[0]

        # Balance A
        As, (sca, _) = matrix_balance(a, permute=False, separate=True)
        Bs = b*np.reciprocal(sca)[:, None]

        # Computing full c'bility matrix is redundant we just need to see where
        # the rank drop is (if any!). Due to matrix power, things grow quickly!
        C = Bs
        for _ in range(n-1):
            C = np.hstack([C, As @ C[:, [-1]]])
            if matrix_rank(C) != C.shape[1]:
                break
        else:
            # No break
            C = np.hstack([C, As @ C[:, [-1]]])

        cols = C.shape[1]
        _, s, v = haroldsvd(C)
        temp = s @ v
        lcmpoly = solve(temp[:cols-1, :-1], -temp[:cols-1, -1])
        # Add monic coefficient and flip
        lcmpoly = np.append(lcmpoly, 1)[::-1]
    else:
        lcmpoly = np.trim_zeros(poppedargs[0], 'f')
        lcmpoly = lcmpoly/lcmpoly[0]

    if compute_multipliers:
        n_lcm = lcmpoly.size - 1
        if len(poppedargs) > 1:
            c = block_diag(*[e_i(x.size-1, 0).T for x in poppedargs]) * sca
            b_lcm, _, _, _ = lstsq(C[:c.shape[1], :-1], Bs)
            c_lcm = c @ C[:c.shape[1], :-1]

            # adj(sI-A) formulas with A being a companion matrix. Use a 3D
            # array where x,y,z = adj(sI-A)[x,y] and z is the coefficient array
            adjA = np.zeros([n_lcm, n_lcm, n_lcm])
            # fill in the adjoint
            for x in range(n_lcm):
                # Diagonal terms
                adjA[x, x, :n_lcm-x] = lcmpoly[:n_lcm-x]
                for y in range(n_lcm):
                    if y < x:  # Upper Triangular terms
                        adjA[x, y, x-y:] = adjA[x, x, :n_lcm-(x-y)]
                    elif y > x:  # Lower Triangular terms
                        adjA[x, y, n_lcm-y:n_lcm+1-y+x] = \
                                                    -lcmpoly[-x-1:n_lcm+1]
            # C*adj(sI-A)*B
            mults = c_lcm @ np.sum(adjA * b_lcm, axis=1)
        else:
            mults = np.zeros((1, n_lcm))
            mults[0, -1] = 1.

        if len(p_ind) > 0:
            temp = np.zeros((len(args), lcmpoly.size), dtype=float)
            temp[p_ind] = lcmpoly
            temp[l_ind, 1:] = mults
            mults = temp

        lcmpoly[abs(lcmpoly) < cleanup_threshold] = 0.
        mults[abs(mults) < cleanup_threshold] = 0.
        mults = [np.trim_zeros(z, 'f') for z in mults]
        return lcmpoly, mults
    else:
        return lcmpoly
Exemplo n.º 14
0
def _minimal_realization_state(A, B, C, tol=1e-6):
    """
    Low-level function to perform the state removel if any for minimal
    realizations. No consistency check is performed.
    """

    # Empty matrices, don't bother
    if A.size == 0:
        return A, B, C

    # scale the system matrix with possible permutations
    A, T = matrix_balance(A)
    # T always has powers of 2 nonzero elements
    B, C = solve(T, B), C @ T

    n = A.shape[0]
    # Make sure that we still have states left, otherwise done
    if n == 0:
        return A, B, C

    # Now obtain the c'ble and o'ble staircase forms
    Ac, Bc, Cc, _ = staircase(A, B, C)
    Ao, Bo, Co, _ = staircase(A, B, C, form='o', invert=True)
    # And compute the distance to rank deficiency.
    kc, *_ = cancellation_distance(Ac, Bc)
    ko, *_ = cancellation_distance(Ao.T, Co.T)

    # If both distances are above tol then we have already minimality
    if min(kc, ko) > tol:
        return A, B, C
    else:
        # Here, we have improved the cancellation distance computations by
        # first scaling the system and then forming the staircase forms.

        # If unctrblity distance is smaller, let it first (no reason)
        if kc <= tol:
            # Start removing and check if the distance gets bigger
            # Observability form removes from top left
            # controllability form removes from bottom right
            while kc <= tol:
                Ac, Bc, Cc = (Ac[:-1, :-1], Bc[:-1, :], Cc[:, :-1])
                if Ac.size == 0:
                    A, B, C = [array([], dtype=float)]*3
                    break
                else:
                    kc, *_ = cancellation_distance(Ac, Bc)
            # Return the resulting matrices
            A, B, C = Ac, Bc, Cc
            # Same with the o'ble modes, but now kc might have removed
            # unobservable mode already so get the distance again
            ko, *_ = cancellation_distance(A.T, C.T)

        # Still unobservables ?
        if ko <= tol:
            Ao, Bo, Co, To = staircase(A, B, C, form='o', invert=True)
            while ko <= tol:  # Until cancel dist gets big
                Ao, Bo, Co = Ao[1:, 1:], Bo[1:, :], Co[:, 1:]
                if Ao.size == 0:
                    A, B, C = [array([], dtype=float)]*3
                else:
                    ko, *_ = cancellation_distance(Ao, Bo)

            # Return the resulting matrices
            A, B, C = Ao, Bo, Co

    return A, B, C