Esempio n. 1
0
def GEN_RLS_id(id_method, y, u, na, nb, nc, nd, nf, theta, max_iterations):

    ylength = y.size
        
    # input/output number
    m = 1; p = 1
    
    # max number of non predictable data
    nbth = nb + theta
    val = max(na, nbth, nc, nd, nf)
    # whole data
    N = ylength
    
    # Total Order: both LTI and time varying part
    nt = na + nb + nc + nd + nf + 1
    nh = max([na,nc])
    
    ## Iterative Identification Algorithm

    ## Parameters Initialization
    # Confidence Parameter
    Beta = 1e4
    # Covariance matrix of parameter teta
    p_t = Beta*np.eye(nt-1,nt-1)
    P_t = np.zeros((nt-1,nt-1,N))
    for i in range(N):
            P_t[:,:,i] = p_t
    # Gain
    K_t = np.zeros((nt-1,N))
        
    # First estimatate
    teta = np.zeros((nt-1,N))
    eta = np.zeros(N)
    # Forgetting factors
    L_t = 1
    l_t = L_t*np.ones(N)
    #
    Yp = np.zeros(N)
    E = np.zeros(N)
    fi = np.zeros((1,nt-1,N))
   
    ## Propagation
    for k in range(N):
        if k > val:
            ## Step 1: Regressor vector
            vecY = y[k-na:k][::-1]                      # Y vector
            vecYp = Yp[k-nf:k][::-1]                    # Yp vector
            #             
            vecU = u[k-nb-theta:k-theta][::-1]          # U vector
            # 
            #vecE = E[k-nh:k][::-1]                     # E vector
            vecE = E[k-nc:k][::-1] 
                       
           # choose input-output model
            if id_method == 'ARMAX':
                fi[:,:,k] = np.hstack((-vecY, vecU, vecE))
            elif id_method == 'ARX':
                fi[:,:,k] = np.hstack((-vecY, vecU))
            elif id_method == 'OE':
                fi[:,:,k] = np.hstack((-vecYp, vecU))
            elif id_method == 'FIR':
                fi[:,:,k] = np.hstack((vecU))
            phi = fi[:,:,k].T
            
            ## Step 2: Gain Update
            # Gain of parameter teta
            K_t[:,k:k+1] = np.dot(np.dot(P_t[:,:,k-1],phi),np.linalg.inv(l_t[k-1] + np.dot(np.dot(phi.T,P_t[:,:,k-1]),phi)))

            ## Step 3: Parameter Update
            teta[:,k] = teta[:,k-1] + np.dot(K_t[:,k:k+1],(y[k] - np.dot(phi.T,teta[:,k-1])))
            
            ## Step 4: A posteriori prediction-error
            Yp[k] = np.dot(phi.T,teta[:,k]) + eta[k]
            E[k] = y[k] - Yp[k]

            ## Step 5. Parameter estimate covariance update
            P_t[:,:,k] = (1/l_t[k-1])*(np.dot(np.eye(nt-1) - np.dot(K_t[:,k:k+1],phi.T),P_t[:,:,k-1]))

            ## Step 6: Forgetting factor update
            l_t[k] = 1.0
        
    # Error Norm
    Vn = old_div((np.linalg.norm(y - Yp, 2) ** 2), (2 * (N-val)))
    
    # Model Output
    y_id = Yp
    
    # Parameters
    THETA = teta[:,-1]
    
    # building TF coefficient vectors
    valH = max(nc, na + nd)
    valG = max(nb + theta, na + nf)   
     
    # G    
    # numG (B)
    if id_method == 'ARMA':
        NUM = 1.0     
    else: 
        NUM = np.zeros(valG)
        ng = nf if id_method == 'OE' else na
        NUM[theta:nb + theta] = THETA[ng:nb+ng]
    # denG (A*F)
    A = cnt.tf(np.hstack((1, np.zeros((na)))), np.hstack((1, THETA[:na])),1)
    
    if id_method == 'OE':
        F = cnt.tf(np.hstack((1, np.zeros((nf)))), np.hstack((1, THETA[:nf])),1)
    else:
        F = cnt.tf(np.hstack((1, np.zeros((nf)))), np.hstack((1, THETA[na+nb+nc+nd:na+nb+nc+nd+nf])),1)
    _, deng = cnt.tfdata(A*F) 
    denG = np.array(deng[0])
    DEN = np.zeros(valG + 1)
    DEN[0:na+nf+1] = denG
    
    # H
    # numH (C)
    if id_method == 'OE':
        NUMH = 1
    else:
        NUMH = np.zeros(valH + 1)
        NUMH[0] = 1.
        NUMH[1:nc + 1] = THETA[na+nb:na+nb+nc]
    # denH (A*D)
    D = cnt.tf(np.hstack((1, np.zeros((nd)))), np.hstack((1, THETA[na+nb+nc:na+nb+nc+nd])),1)
    _, denh = cnt.tfdata(A*D)
    denH = np.array(denh[0])
    DENH = np.zeros(valH + 1)
    DENH[0:na+nd+1] = denH
    
    
    return NUM, DEN, NUMH, DENH, Vn, y_id
Esempio n. 2
0
    def do_sys_id(self):
        num_poles = 2
        num_zeros = 1
        if not self._use_subspace:
            method = 'ARMAX'
            #sys_id = system_identification(self._y.T, self._u[0], method, IC='BIC', na_ord=[0, 5], nb_ord=[1, 5], nc_ord=[0, 5], delays=[0, 5], ARMAX_max_iterations=300, tsample=self._Ts, centering='MeanVal')
            sys_id = system_identification(self._y.T,
                                           self._u[0],
                                           method,
                                           ARMAX_orders=[num_poles, 1, 1, 0],
                                           ARMAX_max_iterations=300,
                                           tsample=self._Ts,
                                           centering='MeanVal')

            print(sys_id.G)

            if self._verbose:
                print(sys_id.G)
                print("System poles of discrete G: ", cnt.pole(sys_id.G))

            # Convert to continuous tf
            G = harold.Transfer(sys_id.NUMERATOR,
                                sys_id.DENOMINATOR,
                                dt=self._Ts)
            G_cont = harold.undiscretize(G, method='zoh')
            self._sys_tf = G_cont
            self._A, self._B, self._C, self._D = harold.transfer_to_state(
                G_cont, output='matrices')

            if self._verbose:
                print("Continuous tf:", G_cont)

            # Convert to state space, because ARMAX gives transfer function
            ss_roll = cnt.tf2ss(sys_id.G)
            A = np.asarray(ss_roll.A)
            B = np.asarray(ss_roll.B)
            C = np.asarray(ss_roll.C)
            D = np.asarray(ss_roll.D)
            if self._verbose:
                print(ss_roll)

            # simulate identified system using input from data
            xid, yid = fsetSIM.SS_lsim_process_form(A, B, C, D, self._u)
            y_error = self._y - yid
            self._fitness = 1 - (y_error.var() / self._y.var())**2
            if self._verbose:
                print("Fittness %", self._fitness * 100)

            if self._plot:
                plt.figure(1)
                plt.plot(self._t[0], self._y[0])
                plt.plot(self._t[0], yid[0])
                plt.xlabel("Time")
                plt.title("Time response Y(t)=U*G(t)")
                plt.legend([
                    self._y_name, self._y_name + '_identified: ' +
                    '{:.3f} fitness'.format(self._fitness)
                ])
                plt.grid()
                plt.show()

        else:
            sys_id = system_identification(self._y,
                                           self._u,
                                           self._subspace_method,
                                           SS_fixed_order=num_poles,
                                           SS_p=self._subspace_p,
                                           SS_f=50,
                                           tsample=self._Ts,
                                           SS_A_stability=True,
                                           centering='MeanVal')
            #sys_id = system_identification(self._y, self._u, self._subspace_method, SS_orders=[1,10], SS_p=self._subspace_p, SS_f=50, tsample=self._Ts, SS_A_stability=True, centering='MeanVal')
            if self._verbose:
                print("x0", sys_id.x0)
                print("A", sys_id.A)
                print("B", sys_id.B)
                print("C", sys_id.C)
                print("D", sys_id.D)

            A = sys_id.A
            B = sys_id.B
            C = sys_id.C
            D = sys_id.D

            # Get discrete transfer function from state space
            sys_tf = cnt.ss2tf(A, B, C, D)
            if self._verbose:
                print("TF ***in z domain***", sys_tf)

            # Get numerator and denominator
            (num, den) = cnt.tfdata(sys_tf)

            # Convert to continuous tf
            G = harold.Transfer(num, den, dt=self._Ts)
            if self._verbose:
                print(G)
            G_cont = harold.undiscretize(G, method='zoh')
            self._sys_tf = G_cont
            self._A, self._B, self._C, self._D = harold.transfer_to_state(
                G_cont, output='matrices')
            if self._verbose:
                print("Continuous tf:", G_cont)

            # get zeros
            tmp_tf = cnt.ss2tf(self._A, self._B, self._C, self._D)
            self._zeros = cnt.zero(tmp_tf)

            # simulate identified system using discrete system
            xid, yid = fsetSIM.SS_lsim_process_form(A, B, C, D, self._u,
                                                    sys_id.x0)
            y_error = self._y - yid
            self._fitness = 1 - (y_error.var() / self._y.var())**2
            if self._verbose:
                print("Fittness %", self._fitness * 100)

            if self._plot:
                plt.figure(1)
                plt.plot(self._t[0], self._y[0])
                plt.plot(self._t[0], yid[0])
                plt.xlabel("Time")
                plt.title("Time response Y(t)=U*G(t)")
                plt.legend([
                    self._y_name, self._y_name + '_identified: ' +
                    '{:.3f} fitness'.format(self._fitness)
                ])
                plt.grid()
                plt.show()
Esempio n. 3
0
 def testSISOtfdata(self, siso):
     """Call tfdata()"""
     tfdata_1 = tfdata(siso.tf2)
     tfdata_2 = tfdata(siso.tf2)
     for i in range(len(tfdata_1)):
         np.testing.assert_array_almost_equal(tfdata_1[i], tfdata_2[i])
Esempio n. 4
0
# 制御モデル
import control
from control.matlab import tf, tfdata

P = tf([0, 1], [1, 2, 3])
print(P)
[[numP]], [[denP]] = tfdata(P)
print(numP, denP)
Esempio n. 5
0
def GEN_id(id_method, y, u, na, nb, nc, nd, nf, theta, max_iterations, st_m,
           st_c):

    ylength = y.size

    # max predictable order
    val = max(nb + theta, na, nc, nd, nf)

    # input/output number
    m = 1
    p = 1

    # number of optimization variables
    n_coeff = na + nb + nc + nd + nf

    # Calling the optimization problem
    (solver, w_lb, w_ub, g_lb, g_ub) = opt_id(m, p, na, np.array([nb]),
                                              nc, nd, nf, n_coeff,
                                              np.array([theta]), val,
                                              np.atleast_2d(u), y, id_method,
                                              max_iterations, st_m, st_c)

    # Set first-guess solution
    w_0 = np.zeros((1, n_coeff))
    w_y = np.zeros((1, ylength))
    w_0 = np.hstack([w_0, w_y])
    if id_method == 'BJ' or id_method == 'GEN' or id_method == 'ARARX' or id_method == 'ARARMAX':
        w_0 = np.hstack([w_0, w_y, w_y])

    # Call the NLP solver
    sol = solver(lbx=w_lb, ubx=w_ub, x0=w_0, lbg=g_lb, ubg=g_ub)

    # model output: info from the solver
    f_opt = sol["f"]  # objective function
    x_opt = sol["x"]  # optimization variables = model coefficients
    iterations = solver.stats()['iter_count']  # iteration number
    y_id = x_opt[-ylength:].full()[:, 0]  # model output
    THETA = np.array(x_opt[:n_coeff])[:, 0]

    # estimated error norm
    Vn = old_div((np.linalg.norm((y_id - y), 2)**2), (2 * ylength))

    # building TF coefficient vectors
    valH = max(nc, na + nd)
    valG = max(nb + theta, na + nf)

    # G
    # numG (B)
    if id_method == 'ARMA':
        NUM = 1.0
    else:
        NUM = np.zeros(valG)
        NUM[theta:nb + theta] = THETA[na:nb + na]
    # denG (A*F)
    A = cnt.tf(np.hstack((1, np.zeros((na)))), np.hstack((1, THETA[:na])), 1)
    F = cnt.tf(np.hstack((1, np.zeros((nf)))),
               np.hstack((1, THETA[na + nb + nc + nd:na + nb + nc + nd + nf])),
               1)
    _, deng = cnt.tfdata(A * F)
    denG = np.array(deng[0])
    DEN = np.zeros(valG + 1)
    DEN[0:na + nf + 1] = denG

    # H
    # numH (C)
    if id_method == 'OE':
        NUMH = 1
    else:
        NUMH = np.zeros(valH + 1)
        NUMH[0] = 1.
        NUMH[1:nc + 1] = THETA[na + nb:na + nb + nc]
    # denH (A*D)
    D = cnt.tf(np.hstack((1, np.zeros((nd)))),
               np.hstack((1, THETA[na + nb + nc:na + nb + nc + nd])), 1)
    _, denh = cnt.tfdata(A * D)
    denH = np.array(denh[0])
    DENH = np.zeros(valH + 1)
    DENH[0:na + nd + 1] = denH

    return NUM, DEN, NUMH, DENH, Vn, y_id
Esempio n. 6
0
def GEN_MISO_id(id_method, y, u, na, nb, nc, nd, nf, theta, max_iterations,
                st_m, st_c):
    #nb = np.array(nb)
    #theta = np.array(theta)
    u = 1. * np.atleast_2d(u)
    ylength = y.size
    ystd, y = rescale(y)
    [udim, ulength] = u.shape
    eps = np.zeros(y.size)
    Reached_max = False
    # checking dimension
    if nb.size != udim:
        sys.exit(
            "Error! nb must be a matrix, whose dimensions must be equal to yxu"
        )
    #        return np.array([[1.]]),np.array([[0.]]),np.array([[0.]]),np.inf,Reached_max
    elif theta.size != udim:
        sys.exit("Error! theta matrix must have yxu dimensions")
    #        return np.array([[1.]]),np.array([[0.]]),np.array([[0.]]),np.inf,Reached_max
    else:
        nbth = nb + theta
        Ustd = np.zeros(udim)
        for j in range(udim):
            Ustd[j], u[j] = rescale(u[j])

        # max predictable dimension
        val = max(na, np.max(nbth), nc, nd, nf)

        # input/output number
        m = udim
        p = 1

        # number of optimization variables
        n_coeff = na + np.sum(nb[:]) + nc + nd + nf

        # Build the optimization problem
        (solver, w_lb, w_ub, g_lb,
         g_ub) = opt_id(m, p, na, nb, nc, nd, nf, n_coeff, theta, val,
                        np.atleast_2d(u), y, id_method, max_iterations, st_m,
                        st_c)

        # Set first-guess solution
        w_0 = np.zeros((1, n_coeff))
        w_0 = np.hstack([w_0, np.atleast_2d(y)])
        if id_method == 'BJ' or id_method == 'GEN' or id_method == 'ARARX' or id_method == 'ARARMAX':
            w_0 = np.hstack([w_0, np.atleast_2d(y), np.atleast_2d(y)])

        # Call the NLP solver
        sol = solver(lbx=w_lb, ubx=w_ub, x0=w_0, lbg=g_lb, ubg=g_ub)

        # model output: info from the solver
        f_opt = sol["f"]  # objective function
        x_opt = sol["x"]  # optimization variables = model coefficients
        iterations = solver.stats()['iter_count']  # iteration number
        y_id0 = x_opt[-ylength:].full()[:, 0]  # model output
        THETA = np.array(x_opt[:n_coeff])[:, 0]

        # Check iteration numbers
        if iterations >= max_iterations:
            print("Warning! Reached maximum iterations")
            Reached_max = True

        # estimated error norm
        Vn = old_div((np.linalg.norm((y_id0 - y), 2)**2), (2 * ylength))

        # rescaling Yid
        y_id = y_id0 * ystd

        # building TF coefficient vectors
        valH = max(nc, na + nd)
        valG = max(np.max(nbth), na + nf)
        Nb = np.sum(nb[:])

        # H = (C/(A*D))
        if id_method == 'OE':
            NUMH = np.ones((1, 1))
        else:
            NUMH = np.zeros((1, valH + 1))
            NUMH[0, 0] = 1.
            NUMH[0, 1:nc + 1] = THETA[na + Nb:na + Nb + nc]
        #
        # DENH = np.zeros((1, val + 1))
        # DENH[0, 0] = 1.
        # DENH[0, 1:nd + 1] = THETA[Nb+na+nc:Nb+na+nc+nd]

        A = cnt.tf(np.hstack((1, np.zeros((na)))), np.hstack((1, THETA[:na])),
                   1)
        D = cnt.tf(np.hstack((1, np.zeros((nd)))),
                   np.hstack((1, THETA[na + Nb + nc:na + Nb + nc + nd])), 1)

        _, denh = cnt.tfdata(A * D)
        denH = np.array(denh[0])
        DENH = np.zeros((1, valH + 1))
        DENH[0, 0:na + nd + 1] = denH

        # G = (B/(A*F))
        F = cnt.tf(
            np.hstack((1, np.zeros((nf)))),
            np.hstack((1, THETA[na + Nb + nc + nd:na + Nb + nc + nd + nf])), 1)

        _, deng = cnt.tfdata(A * F)
        denG = np.array(deng[0])
        DEN = np.zeros((udim, valG + 1))
        #DEN = np.zeros((udim, den.shape[1] + 1))
        #DEN = np.zeros((udim, den.shape[1]))
        #DEN[:, 0] = np.ones(udim)

        if id_method == 'ARMA':
            NUM = np.ones((udim, 1))
        else:
            NUM = np.zeros((udim, valG))
        #
        for k in range(udim):
            if id_method != 'ARMA':
                THETA[na + np.sum(nb[0:k]):na +
                      np.sum(nb[0:k + 1]
                             )] = THETA[na + np.sum(nb[0:k]):na +
                                        np.sum(nb[0:k + 1])] * ystd / Ustd[k]
                NUM[k, theta[k]:theta[k] +
                    nb[k]] = THETA[na + np.sum(nb[0:k]):na +
                                   np.sum(nb[0:k + 1])]
            #DEN[k, 1:den.shape[1] + 1] = den
            #DEN[k,:] = den
            DEN[k, 0:na + nf + 1] = denG

        # check_stH = True if any(np.roots(DENH)>=1.0) else False
        # check_stG = True if any(np.roots(DEN)>=1.0) else False
        # if check_stH or check_stG:
        #     IDsys_unst =
        # if st_c is True:
        #     if solver.stats()['return_status'] != 'Maximum_Iterations_Exceeded':

        return DEN, NUM, NUMH, DENH, Vn, y_id, Reached_max
Esempio n. 7
0
def GEN_RLS_MISO_id(id_method, y, u, na, nb, nc, nd, nf, theta,
                    max_iterations):
    nb = np.array(nb)
    theta = np.array(theta)
    u = 1. * np.atleast_2d(u)
    ylength = y.size
    ystd, y = rescale(y)
    [udim, ulength] = u.shape
    eps = np.zeros(y.size)
    Reached_max = False
    # checking dimension
    if nb.size != udim:
        sys.exit(
            "Error! nb must be a matrix, whose dimensions must be equal to yxu"
        )
    #        return np.array([[1.]]),np.array([[0.]]),np.array([[0.]]),np.inf,Reached_max
    elif theta.size != udim:
        sys.exit("Error! theta matrix must have yxu dimensions")
    #        return np.array([[1.]]),np.array([[0.]]),np.array([[0.]]),np.inf,Reached_max
    else:
        nbth = nb + theta
        Ustd = np.zeros(udim)
        for j in range(udim):
            Ustd[j], u[j] = rescale(u[j])

        # max number of non predictable data
        val = max(na, np.max(nbth), nc, nd, nf)
        # whole data
        N = ylength

        # Total Order: both LTI and time varying part
        nt = na + np.sum(nb[:]) + nc + nd + nf + 1
        nh = max([na, nc, nf])

        ## Iterative Identification Algorithm

        ## Parameters Initialization
        # Confidence Parameter
        Beta = 1e4
        # Covariance matrix of parameter teta
        p_t = Beta * np.eye(nt - 1, nt - 1)
        P_t = np.zeros((nt - 1, nt - 1, N))
        for i in range(N):
            P_t[:, :, i] = p_t
        # Gain
        K_t = np.zeros((nt - 1, N))

        # First estimatate
        teta = np.zeros((nt - 1, N))
        #eta = np.zeros(N)
        # Forgetting factors
        L_t = 1
        l_t = L_t * np.ones(N)
        #
        Yp = y.copy()
        E = np.zeros(N)
        fi = np.zeros((1, nt - 1, N))

        ## Propagation
        for k in range(N):
            if k > val:
                ## Step 1: Regressor vector
                vecY = y[k - na:k][::-1]  # Y vector
                vecYp = Yp[k - nf:k][::-1]  # Yp vector
                #
                vecU = []
                for nb_i in range(udim):  # U vector
                    vecu = u[nb_i, :][k - nb[nb_i] - theta[nb_i]:k -
                                      theta[nb_i]][::-1]
                    vecU = np.hstack((vecU, vecu))
                #
                #vecE = E[k-nh:k][::-1]                   # E vector
                vecE = E[k - nc:k][::-1]

                # choose input-output model
                if id_method == 'ARMAX':
                    fi[:, :, k] = np.hstack((-vecY, vecU, vecE))
                elif id_method == 'ARX':
                    fi[:, :, k] = np.hstack((-vecY, vecU))
                elif id_method == 'OE':
                    fi[:, :, k] = np.hstack((-vecYp, vecU))
                elif id_method == 'FIR':
                    fi[:, :, k] = np.hstack((vecU))
                phi = fi[:, :, k].T

                ## Step 2: Gain Update
                # Gain of parameter teta
                K_t[:, k:k + 1] = np.dot(
                    np.dot(P_t[:, :, k - 1], phi),
                    np.linalg.inv(l_t[k - 1] +
                                  np.dot(np.dot(phi.T, P_t[:, :,
                                                           k - 1]), phi)))

                ## Step 3: Parameter Update
                teta[:, k] = teta[:, k - 1] + np.dot(
                    K_t[:, k:k + 1], (y[k] - np.dot(phi.T, teta[:, k - 1])))

                ## Step 4: A posteriori prediction-error
                Yp[k] = np.dot(phi.T, teta[:, k])  #+ eta[k]
                E[k] = y[k] - Yp[k]

                ## Step 5. Parameter estimate covariance update
                P_t[:, :, k] = (1 / l_t[k - 1]) * (np.dot(
                    np.eye(nt - 1) - np.dot(K_t[:, k:k + 1], phi.T),
                    P_t[:, :, k - 1]))

                ## Step 6: Forgetting factor update
                l_t[k] = 1.0

        # Error Norm
        Vn = old_div((np.linalg.norm(y - Yp, 2)**2), (2 * (N - val)))

        # Model Output
        y_id = Yp * ystd

        # Parameters
        THETA = teta[:, -1]

        #if iterations >= max_iterations:
        #   print("Warning! Reached maximum iterations")
        #  Reached_max = True

        # building TF coefficient vectors
        valH = max(nc, na + nd)
        valG = max(np.max(nbth), na + nf)
        Nb = np.sum(nb[:])

        # H = (C/(A*D))
        if id_method == 'OE':
            NUMH = np.ones((1, 1))
        else:
            NUMH = np.zeros((1, valH + 1))
            NUMH[0, 0] = 1.
            NUMH[0, 1:nc + 1] = THETA[na + Nb:na + Nb + nc]
        #
        # DENH = np.zeros((1, val + 1))
        # DENH[0, 0] = 1.
        # DENH[0, 1:nd + 1] = THETA[Nb+na+nc:Nb+na+nc+nd]

        A = cnt.tf(np.hstack((1, np.zeros((na)))), np.hstack((1, THETA[:na])),
                   1)
        D = cnt.tf(np.hstack((1, np.zeros((nd)))),
                   np.hstack((1, THETA[na + Nb + nc:na + Nb + nc + nd])), 1)

        _, denh = cnt.tfdata(A * D)
        denH = np.array(denh[0])
        DENH = np.zeros((1, valH + 1))
        DENH[0, 0:na + nd + 1] = denH

        # G = (B/(A*F))
        if id_method == 'OE':
            F = cnt.tf(np.hstack((1, np.zeros((nf)))),
                       np.hstack((1, THETA[:nf])), 1)
        else:
            F = cnt.tf(
                np.hstack((1, np.zeros((nf)))),
                np.hstack(
                    (1, THETA[na + Nb + nc + nd:na + Nb + nc + nd + nf])), 1)

        _, deng = cnt.tfdata(A * F)
        denG = np.array(deng[0])
        DEN = np.zeros((udim, valG + 1))
        #DEN = np.zeros((udim, den.shape[1] + 1))
        #DEN = np.zeros((udim, den.shape[1]))
        #DEN[:, 0] = np.ones(udim)

        if id_method == 'ARMA':
            NUM = np.ones((udim, 1))
        else:
            NUM = np.zeros((udim, valG))
        #
        ng = nf if id_method == 'OE' else na
        for k in range(udim):
            if id_method != 'ARMA':
                THETA[ng + np.sum(nb[0:k]):ng +
                      np.sum(nb[0:k + 1]
                             )] = THETA[ng + np.sum(nb[0:k]):ng +
                                        np.sum(nb[0:k + 1])] * ystd / Ustd[k]
                NUM[k, theta[k]:theta[k] +
                    nb[k]] = THETA[ng + np.sum(nb[0:k]):ng +
                                   np.sum(nb[0:k + 1])]
            #DEN[k, 1:den.shape[1] + 1] = den
            #DEN[k,:] = den
            DEN[k, 0:na + nf + 1] = denG

        return DEN, NUM, NUMH, DENH, Vn, y_id, Reached_max
Esempio n. 8
0
MixerGain = 8

lpfoutput = (lpfoutputI + 1j * lpfoutputQ)
# there are L Nyquist periods in one period of p(t) (for Tropp's analysis
# Tx=Tp or L=N).
L = N

# 1-st order RC low-pass filter
# En este fragmento de codigo se hallo la respuesta al impulso del filtro RC
# Implementado analogamente.
tau = 1 / (2 * pi * fc)
B = 1
A = [tau, 1]
lpf = tf(B, A)
lpf_d = c2d(lpf, 1 / (float(W)), 'tustin')
[[Bd]], [[Ad]] = tfdata(lpf_d)

original = x
### Simulando la funcion impz de matlab.
x = zeros(25)
x[0] = 1
h = lfilter(Bd, Ad, x)

# Finds closest time value to perform sampling
i_tstart = argmin(abs(tspice - Td))

lpfoutput = lpfoutput[i_tstart:-1]
tspice = tspice[i_tstart:-1]
lpfoutput = lpfoutput - mean(lpfoutput)
lpfoutput = lpfoutput / MixerGain