コード例 #1
0
ファイル: librom.py プロジェクト: rocky420/sharpy
        def test_balreal_direct_py(self):

            Nx, Nu, Ny = 6, 4, 2
            ss = libss.random_ss(Nx, Nu, Ny, dt=0.1, stable=True)

            ### direct balancing
            hsv, T, Ti = balreal_direct_py(ss.A,
                                           ss.B,
                                           ss.C,
                                           DLTI=True,
                                           full_outputs=False)
            ssb = copy.deepcopy(ss)
            ssb.project(Ti, T)

            # compare freq. resp.
            kv = np.array([0., .5, 3., 5.67])
            Y = ss.freqresp(kv)
            Yb = ssb.freqresp(kv)
            er_max = np.max(np.abs(Yb - Y))
            assert er_max / np.max(np.abs(Y)) < 1e-10, 'error too large'

            # test full_outputs option
            hsv, U, Vh, Qc, Qo = balreal_direct_py(ss.A,
                                                   ss.B,
                                                   ss.C,
                                                   DLTI=True,
                                                   full_outputs=True)

            # build M matrix and SVD
            sinv = hsv**(-0.5)
            T2 = libsp.dot(Qc, Vh.T * sinv)
            Ti2 = np.dot((U * sinv).T, Qo.T)
            assert np.linalg.norm(T2 - T) < 1e-13, 'error too large'
            assert np.linalg.norm(Ti2 - Ti) < 1e-13, 'error too large'

            ssb2 = copy.deepcopy(ss)
            ssb2.project(Ti2, T2)
            Yb2 = ssb2.freqresp(kv)
            er_max = np.max(np.abs(Yb2 - Y))
            assert er_max / np.max(np.abs(Y)) < 1e-10, 'error too large'
コード例 #2
0
ファイル: librom.py プロジェクト: wong-hl/sharpy
def balfreq(SS, DictBalFreq):
    """
    Method for frequency limited balancing.

    The Observability and controllability Gramians over the frequencies kv
    are solved in factorised form. Balanced modes are then obtained with a
    square-root method.

    Details:

        * Observability and controllability Gramians are solved in factorised form
          through explicit integration. The number of integration points determines
          both the accuracy and the maximum size of the balanced model.

        * Stability over all (Nb) balanced states is achieved if:

            a. one of the Gramian is integrated through the full Nyquist range
            b. the integration points are enough.


    Input:

    - DictBalFreq: dictionary specifying integration method with keys:

        - ``frequency``: defines limit frequencies for balancing. The balanced
           model will be accurate in the range ``[0,F]``, where ``F`` is the value of
           this key. Note that ``F`` units must be consistent with the units specified
           in the ``self.ScalingFacts`` dictionary.

        - ``method_low``: ``['gauss','trapz']`` specifies whether to use gauss
          quadrature or trapezoidal rule in the low-frequency range ``[0,F]``.

        - ``options_low``: options to use for integration in the low-frequencies.
          These depend on the integration scheme (See below).

        - ``method_high``: method to use for integration in the range [F,F_N],
          where F_N is the Nyquist frequency. See 'method_low'.

        - ``options_high``: options to use for integration in the high-frequencies.

        - ``check_stability``: if True, the balanced model is truncated to
          eliminate unstable modes - if any is found. Note that very accurate
          balanced model can still be obtained, even if high order modes are
          unstable. Note that this option is overridden if ""

        - ``get_frequency_response``: if True, the function also returns the
          frequency response evaluated at the low-frequency range integration
          points. If True, this option also allows to automatically tune the
          balanced model.


    Future options:
        - Ncpu: for parallel run


    The following integration schemes are available:
        - ``trapz``: performs integration over equally spaced points using
          trapezoidal rule. It accepts options dictionaries with keys:

             - ``points``: number of integration points to use (including
               domain boundary)

        - ``gauss`` performs gauss-lobotto quadrature. The domain can be
          partitioned in Npart sub-domain in which the gauss-lobotto quadrature
          of order Ord can be applied. A total number of Npart*Ord points is
          required. It accepts options dictionaries of the form:

             - ``partitions``: number of partitions

             - ``order``: quadrature order.


    Examples:

        The following dictionary

        >>>   DictBalFreq={'frequency': 1.2,
        >>>                'method_low': 'trapz',
        >>>                'options_low': {'points': 12},
        >>>                'method_high': 'gauss',
        >>>                'options_high': {'partitions': 2, 'order': 8},
        >>>                'check_stability': True }


        balances the state-space model in the frequency range [0, 1.2]
        using:

            a. 12 equally-spaced points integration of the Gramians in
               the low-frequency range [0,1.2] and

            b. A 2 Gauss-Lobotto 8-th order quadratures of the controllability
               Gramian in the high-frequency range.


        A total number of 28 integration points will be required, which will
        result into a balanced model with number of states

        >>>    min{ 2*28* number_inputs, 2*28* number_outputs }


        The model is finally truncated so as to retain only the first Ns stable
        modes.
    """

    ### check input dictionary
    if 'frequency' not in DictBalFreq:
        raise NameError('Solution dictionary must include the "frequency" key')

    if 'method_low' not in DictBalFreq:
        warnings.warn('Setting default options for low-frequency integration')
        DictBalFreq['method_low'] = 'trapz'
        DictBalFreq['options_low'] = {'points': 12}

    if 'method_high' not in DictBalFreq:
        warnings.warn('Setting default options for high-frequency integration')
        DictBalFreq['method_high'] = 'gauss'
        DictBalFreq['options_high'] = {'partitions': 2, 'order': 8}

    if 'check_stability' not in DictBalFreq:
        DictBalFreq['check_stability'] = True

    if 'output_modes' not in DictBalFreq:
        DictBalFreq['output_modes'] = True

    if 'get_frequency_response' not in DictBalFreq:
        DictBalFreq['get_frequency_response'] = False

    ### get integration points and weights

    # Nyquist frequency
    kn = np.pi / SS.dt

    Opt = DictBalFreq['options_low']
    if DictBalFreq['method_low'] == 'trapz':
        kv_low, wv_low = get_trapz_weights(0., DictBalFreq['frequency'],
                                           Opt['points'], False)
    elif DictBalFreq['method_low'] == 'gauss':
        kv_low, wv_low = get_gauss_weights(0., DictBalFreq['frequency'],
                                           Opt['partitions'], Opt['order'])
    else:
        raise NameError('Invalid value %s for key "method_low"' %
                        DictBalFreq['method_low'])

    Opt = DictBalFreq['options_high']
    if DictBalFreq['method_high'] == 'trapz':
        if Opt['points'] == 0:
            warnings.warn('You have chosen no points in high frequency range!')
            kv_high, wv_high = [], []
        else:
            kv_high, wv_high = get_trapz_weights(DictBalFreq['frequency'], kn,
                                                 Opt['points'], True)
    elif DictBalFreq['method_high'] == 'gauss':
        if Opt['order'] * Opt['partitions'] == 0:
            warnings.warn('You have chosen no points in high frequency range!')
            kv_high, wv_high = [], []
        else:
            kv_high, wv_high = get_gauss_weights(DictBalFreq['frequency'], kn,
                                                 Opt['partitions'],
                                                 Opt['order'])
    else:
        raise NameError('Invalid value %s for key "method_high"' %
                        DictBalFreq['method_high'])

    ### -------------------------------------------------- loop frequencies

    ### merge vectors
    Nk_low = len(kv_low)
    kvdt = np.concatenate((kv_low, kv_high)) * SS.dt
    wv = np.concatenate((wv_low, wv_high)) * SS.dt
    zv = np.cos(kvdt) + 1.j * np.sin(kvdt)

    Eye = libsp.eye_as(SS.A)
    Zc = np.zeros((SS.states, 2 * SS.inputs * len(kvdt)), )
    Zo = np.zeros((SS.states, 2 * SS.outputs * Nk_low), )

    if DictBalFreq['get_frequency_response']:
        Yfreq = np.empty((
            SS.outputs,
            SS.inputs,
            Nk_low,
        ), dtype=np.complex_)
        kv = kv_low

    for kk in range(len(kvdt)):

        zval = zv[kk]
        Intfact = wv[kk]  # integration factor

        Qctrl = Intfact * libsp.solve(zval * Eye - SS.A, SS.B)
        kkvec = range(2 * kk * SS.inputs, 2 * (kk + 1) * SS.inputs)
        Zc[:, kkvec[:SS.inputs]] = Qctrl.real
        Zc[:, kkvec[SS.inputs:]] = Qctrl.imag

        ### ----- frequency response
        if DictBalFreq['get_frequency_response'] and kk < Nk_low:
            Yfreq[:, :, kk] = (1. / Intfact) * \
                              libsp.dot(SS.C, Qctrl, type_out=np.ndarray) + SS.D

        ### ----- observability
        if kk >= Nk_low:
            continue

        Qobs = Intfact * libsp.solve(np.conj(zval) * Eye - SS.A.T, SS.C.T)

        kkvec = range(2 * kk * SS.outputs, 2 * (kk + 1) * SS.outputs)
        Zo[:, kkvec[:SS.outputs]] = Intfact * Qobs.real
        Zo[:, kkvec[SS.outputs:]] = Intfact * Qobs.imag

    # delete full matrices
    Kernel = None
    Qctrl = None
    Qobs = None

    # LRSQM (optimised)
    U, hsv, Vh = scalg.svd(np.dot(Zo.T, Zc), full_matrices=False)
    sinv = hsv**(-0.5)
    T = np.dot(Zc, Vh.T * sinv)
    Ti = np.dot((U * sinv).T, Zo.T)
    # Zc,Zo=None,None

    ### build frequency balanced model
    Ab = libsp.dot(Ti, libsp.dot(SS.A, T))
    Bb = libsp.dot(Ti, SS.B)
    Cb = libsp.dot(SS.C, T)
    SSb = libss.ss(Ab, Bb, Cb, SS.D, dt=SS.dt)

    ### Eliminate unstable modes - if any:
    if DictBalFreq['check_stability']:
        for nn in range(1, len(hsv) + 1):
            eigs_trunc = scalg.eigvals(SSb.A[:nn, :nn])
            eigs_trunc_max = np.max(np.abs(eigs_trunc))
            if eigs_trunc_max > 1. - 1e-16:
                SSb.truncate(nn - 1)
                hsv = hsv[:nn - 1]
                T = T[:, :nn - 1]
                Ti = Ti[:nn - 1, :]
                break

    outs = (SSb, hsv)
    if DictBalFreq['output_modes']:
        outs += (T, Ti, Zc, Zo, U, Vh)
    return outs
コード例 #3
0
ファイル: librom.py プロジェクト: wong-hl/sharpy
def balreal_iter(A,
                 B,
                 C,
                 lowrank=True,
                 tolSmith=1e-10,
                 tolSVD=1e-6,
                 kmin=None,
                 tolAbs=False,
                 Print=False,
                 outFacts=False):
    """
    Find balanced realisation of DLTI system.

    Notes:

        Lyapunov equations are solved using iterative squared Smith
        algorithm, in its low or full rank version. These implementations are
        as per the low_rank_smith and smith_iter functions respectively but,
        for computational efficiency, the iterations are rewritten here so as to
        solve for the observability and controllability Gramians contemporary.


    - Exploiting sparsity:

        This algorithm is not ideal to exploit sparsity. However, the following
        strategies are implemented:

            - if the A matrix is provided in sparse format, the powers of A will be
              calculated exploiting sparsity UNTIL the number of non-zero elements
              is below 15% the size of A. Upon this threshold, the cost of the matrix
              multiplication rises dramatically, and A is hence converted to a dense
              numpy array.
    """

    ### Solve Lyapunov equations
    # Notation reminder:
    # scipy: A X A.T - X = -Q
    # contr: A W A.T - W = - B B.T
    # obser: A.T W A - W = - C.T C
    # low-rank smith: A.T X A - X = -Q Q.T

    # matrices size
    N = A.shape[0]
    rC = B.shape[1]
    rO = C.shape[0]

    if lowrank:  # low-rank square-Smith iteration (with SVD)

        # initialise smith iteration
        DeltaNorm = 1e6  # error
        DeltaNormNext = DeltaNorm**2  # error expected at next iter

        kk = 0
        Qck = B
        Qok = C.T

        if Print:
            print('Iter\tMaxZ\t|\trank_c\trank_o\tA size')
        while DeltaNorm > tolSmith and DeltaNormNext > 1e-3 * tolSmith:

            ###### controllability
            ### compute Ak^2 * Qck
            # (future: use block Arnoldi)
            Qcright = libsp.dot(A, Qck)
            MaxZhere = np.max(np.abs(Qcright))

            ### enlarge Z matrices
            Qck = np.concatenate((Qck, Qcright), axis=1)
            Qcright = None
            rC = Qck.shape[1]

            if kmin == None or kmin < rC:
                ### "cheap" SVD truncation
                Uc, svc = scalg.svd(Qck,
                                    full_matrices=False,
                                    overwrite_a=True,
                                    lapack_driver='gesdd')[:2]
                # import scipy.linalg.interpolative as sli
                # Ucnew,svcnew,temp=sli.svd(Qck,tolSVD)
                if tolAbs:
                    rcmax = np.sum(svc > tolSVD)
                else:
                    rcmax = np.sum(svc > tolSVD * svc[0])
                if kmin != None:
                    rC = max(rcmax, kmin)
                else:
                    rC = rcmax
                Qck = Uc[:, :rC] * svc[:rC]
                # free memory
                Uc = None
                Qcright = None

            ###### observability
            ### compute Ak^2 * Qok
            # (future: use block Arnoldi)
            Qoright = np.transpose(libsp.dot(Qok.T, A))
            DeltaNorm = max(MaxZhere, np.max(np.abs(Qoright)))

            ### enlarge Z matrices
            Qok = np.concatenate((Qok, Qoright), axis=1)
            Qoright = None
            rO = Qok.shape[1]

            if kmin == None or kmin < rO:
                ### "cheap" SVD truncation
                Uo, svo = scalg.svd(Qok, full_matrices=False)[:2]

                if tolAbs:
                    romax = np.sum(svo > tolSVD)
                else:
                    romax = np.sum(svo > tolSVD * svo[0])
                if kmin != None:
                    rO = max(romax, kmin)
                else:
                    rO = romax
                Qok = Uo[:, :rO] * svo[:rO]
                Uo = None

            ##### Prepare next time step
            if Print:
                print('%.3d\t%.2e\t%.5d\t%.5d\t%.5d' %
                      (kk, DeltaNorm, rC, rO, N))
            DeltaNormNext = DeltaNorm**2

            if DeltaNorm > tolSmith and DeltaNormNext > 1e-3 * tolSmith:

                # compute power
                if type(A) is libsp.csc_matrix:
                    A = A.dot(A)
                    # check sparsity
                    if A.size > 0.15 * N**2:
                        A = A.toarray()
                elif type(A) is np.ndarray:
                    A = np.linalg.matrix_power(A, 2)
                else:
                    raise NameError('Type of A not supported')

            ### update
            kk = kk + 1

        A = None

    else:  # full-rank squared smith iteration (with Cholevsky)

        raise NameError('Use balreal_iter_old instead!')

    # find min size (only if iter used)
    cc, co = Qck.shape[1], Qok.shape[1]
    if Print:
        print('cc=%.2d, co=%.2d' % (cc, co))
        print('rank(Zc)=%.4d\trank(Zo)=%.4d' % (rcmax, romax))

    # build M matrix and SVD
    M = libsp.dot(Qok.T, Qck)
    U, s, Vh = scalg.svd(M, full_matrices=False)

    if outFacts:
        return s, Qck, Qok

    else:
        sinv = s**(-0.5)
        T = libsp.dot(Qck, Vh.T * sinv)
        Tinv = np.dot((U * sinv).T, Qok.T)

    if Print:
        print('rank(Zc)=%.4d\trank(Zo)=%.4d' % (rcmax, romax))

    return s, T, Tinv, rcmax, romax
コード例 #4
0
ファイル: test_balancing.py プロジェクト: sananazir/sharpy
    def test_balreal_direct_py(self):
        Nx, Nu, Ny = 6, 4, 2
        ss = libss.random_ss(Nx, Nu, Ny, dt=0.1, stable=True)

        ### direct balancing
        hsv, T, Ti = librom.balreal_direct_py(ss.A,
                                              ss.B,
                                              ss.C,
                                              DLTI=True,
                                              full_outputs=False)
        ssb = copy.deepcopy(ss)

        # Note: notation below is correct and consistent with documentation
        # SHARPy historically uses notation different from regular literature notation (i.e. 'swapped')
        ssb.project(Ti, T)

        # Compare freq. resp. - inconclusive!
        # The system is consistently transformed using T and Tinv - system dynamics do not change, independent of
        # choice of T and Tinv. Frequency response will yield the same response:
        kv = np.linspace(0.01, 10)
        Y = ss.freqresp(kv)
        Yb = ssb.freqresp(kv)
        er_max = np.max(np.abs(Yb - Y))
        assert er_max / np.max(
            np.abs(Y)) < 1e-10, 'Error too large in frequency response'

        # Compare grammians:
        Wc = scalg.solve_discrete_lyapunov(ssb.A, np.dot(ssb.B, ssb.B.T))
        Wo = scalg.solve_discrete_lyapunov(ssb.A.T, np.dot(ssb.C.T, ssb.C))

        er_grammians = np.max(np.abs(Wc - Wo))
        # Print grammians to compare:
        if er_grammians / np.max(np.abs(Wc)) > 1e-10:
            print('Controllability grammian, Wc:\n', Wc)
            print('Observability grammian, Wo:\n', Wo)

        er_hankel = np.max(np.abs(np.diag(hsv) - Wc))
        # Print hsv to compare:
        if er_hankel / np.max(np.abs(Wc)) > 1e-10:
            print('Controllability grammian, Wc:\n', Wc)
            print('Hankel values matrix, HSV:\n', hsv)

        assert er_grammians / np.max(np.abs(
            Wc)) < 1e-10, 'Relative error in Wc-Wo is too large -> Wc != Wo'
        assert er_hankel / np.max(np.abs(
            Wc)) < 1e-10, 'Relative error in Wc-HSV is too large -> Wc != HSV'

        # The test below is inconclusive for the direct procedure! T and Tinv are produced from svd(M)
        # This means that going back from svd(M) to T and Tinv will yield the same result for any choice of T and Tinv
        # Unless something else is wrong (e.g. a typo) - so leaving it in.
        # test full_outputs option
        hsv, U, Vh, Qc, Qo = librom.balreal_direct_py(ss.A,
                                                      ss.B,
                                                      ss.C,
                                                      DLTI=True,
                                                      full_outputs=True)

        # build M matrix and SVD
        sinv = hsv**(-0.5)
        T2 = libsp.dot(Qc, Vh.T * sinv)
        Ti2 = np.dot((U * sinv).T, Qo.T)
        assert np.linalg.norm(T2 - T) < 1e-13, 'Error too large'
        assert np.linalg.norm(Ti2 - Ti) < 1e-13, 'Error too large'

        ssb2 = copy.deepcopy(ss)
        ssb2.project(Ti2, T2)
        Yb2 = ssb2.freqresp(kv)
        er_max = np.max(np.abs(Yb2 - Y))
        assert er_max / np.max(np.abs(Y)) < 1e-10, 'Error too large'
コード例 #5
0
ファイル: hale_stability.py プロジェクト: ngoiz/sharpy_cases
# speed_ref = uvlm.ScalingFacts['speed']
# force_ref = uvlm.ScalingFacts['force']
# time_ref = uvlm.ScalingFacts['time']
# t_uvlm_scale = time.time() - t_uvlm_scale0
# print('...UVLM Scaling complete in %.2f s' % t_uvlm_scale)

# UVLM remove gust input - gusts not required in analysis
uvlm.SS.B = libsp.csc_matrix(
    aeroelastic_system.linuvlm.SS.B[:, :6 * aeroelastic_system.linuvlm.Kzeta])
uvlm.SS.D = libsp.csc_matrix(
    aeroelastic_system.linuvlm.SS.D[:, :6 * aeroelastic_system.linuvlm.Kzeta])
#
# UVLM projection onto modes
print('Projecting UVLM onto modes')
gain_input = libsp.dot(
    gain_struct_2_aero,
    sclalg.block_diag(beam.U[:, :beam.Nmodes], beam.U[:, :beam.Nmodes]))
gain_output = libsp.dot(beam.U[:, :beam.Nmodes].T, gain_aero_2_struct)
uvlm.SS.addGain(gain_input, where='in')
uvlm.SS.addGain(gain_output, where='out')
print('...complete')

# # Krylov MOR
# rom = krylovrom.KrylovReducedOrderModel()
# rom.initialise(data, aeroelastic_system.linuvlm.SS)
# frequency_continuous_k = np.array([0.1j])
# frequency_continuous_w = 2 * u_inf * frequency_continuous_k / ws.c_root
# interpolation_point = np.exp(frequency_continuous_w*aeroelastic_system.linuvlm.SS.dt)
#
# rom.run(algorithm, r, interpolation_point)
#
コード例 #6
0
Fref0=Sol.linuvlm.ScalingFacts['force']
tref0=Sol.linuvlm.ScalingFacts['time']
print('\t\tdone in %.2f sec' %Sol.linuvlm.cpu_summary['nondim'])


# remove gust input
print('removing gust input started...')
t0=time.time()
Sol.linuvlm.SS.B=libsp.csc_matrix(Sol.linuvlm.SS.B[:,:6*Sol.linuvlm.Kzeta])
Sol.linuvlm.SS.D=Sol.linuvlm.SS.D[:,:6*Sol.linuvlm.Kzeta]
print('\t\tdone in %.2f sec' %(time.time()-t0))

# project
print('projection of UVLM eq.s started...')
t0=time.time()
Kin =libsp.dot(Kas, sc.linalg.block_diag(gebm.U[:,:gebm.Nmodes],
					 									gebm.U[:,:gebm.Nmodes]))
Kout=libsp.dot(gebm.U[:,:gebm.Nmodes].T,Ksa)
Sol.linuvlm.SS.addGain(Kin, where='in')
Sol.linuvlm.SS.addGain(Kout, where='out')
print('\t\tdone in %.2f sec' %(time.time()-t0))

Kin,Kout=None,None
Kas,Ksa=None,None
Sol.Kforces=None
Sol.Kdisp=None
Sol.Kvel_disp=None


########################################################################### ROM

cpubal=time.time()