示例#1
0
# this is for the signal averaged over periods
T1 = np.r_[npp * Ptr, np.r_[0:(Rest - 1) * npp + 1:npp]]

# select nonlinear functions
nly = None
pnlss1 = Pnlss(degree=[2, 3], structure='full')
w = [0, 1]
tahn1 = Tanhdryfriction(eps=0.01, w=w)
if nlfunc.casefold() == 'pnlss':
    nlx = NLS([pnlss1])
    nly = NLS([pnlss1])
elif nlfunc.casefold() == 'tahn':
    nlx = NLS([pnlss1, tahn1])
    nlx = NLS([tahn1])

model = NLSS(linmodel)
model.add_nl(nlx=nlx, nly=nly)
model.set_signal(sig)
model.transient(T1)
model.optimize(lamb=100, weight=weight, nmax=25)

#raise SystemExit(0)

# get best model on validation data. Change Transient settings, as there is
# only one realization
nl_errvec = model.extract_model(yval, uval, T1=npp)

models = [linmodel, model]
descrip = [type(mod).__name__ for mod in models]
descrip = tuple(descrip)  # convert to tuple for legend concatenation in figs
# simulation error
示例#2
0
def identify_nlss(data, linmodel, nlx, nly, nmax=25, info=2):
    Rest = data.yest.shape[2]
    T1 = np.r_[data.npp * data.Ntr,
               np.r_[0:(Rest - 1) * data.npp + 1:data.npp]]
    model = NLSS(linmodel)
    # model._cost_normalize = 1
    model.add_nl(nlx=nlx, nly=nly)
    model.set_signal(data.sig)
    model.transient(T1)
    model.optimize(lamb=100, weight=weight, nmax=nmax, info=info)
    # get best model on validation data. Change Transient settings, as there is
    # only one realization
    nl_errvec = model.extract_model(data.yval,
                                    data.uval,
                                    T1=data.npp * data.Ntr,
                                    info=info)

    return model, nl_errvec
示例#3
0
def identify(data, nlx, nly, nmax=25, info=2, fnsi=False):
    # transient: Add one period before the start of each realization. Note that
    # this is for the signal averaged over periods
    Rest = data.yest.shape[2]
    T1 = np.r_[data.npp * data.Ntr,
               np.r_[0:(Rest - 1) * data.npp + 1:data.npp]]

    linmodel = Subspace(data.sig)
    linmodel._cost_normalize = 1
    linmodel.estimate(2, 5, weight=weight)
    linmodel.optimize(weight=weight, info=info)

    # estimate NLSS
    model = NLSS(linmodel)
    # model._cost_normalize = 1
    model.add_nl(nlx=nlx, nly=nly)
    model.set_signal(data.sig)
    model.transient(T1)
    model.optimize(lamb=100, weight=weight, nmax=nmax, info=info)
    # get best model on validation data. Change Transient settings, as there is
    # only one realization
    nl_errvec = model.extract_model(data.yval,
                                    data.uval,
                                    T1=data.npp * data.Ntr,
                                    info=info)
    models = [linmodel, model]
    descrip = [type(mod).__name__ for mod in models]

    if fnsi:
        # FNSI can only use 1 realization
        sig = deepcopy(data.sig)
        # This is stupid, but unfortunately nessecary
        sig.y = sig.y[:, :, 0][:, :, None]
        sig.u = sig.u[:, :, 0][:, :, None]
        sig.R = 1
        sig.average()
        fnsi1 = FNSI()
        fnsi1.set_signal(sig)
        fnsi1.add_nl(nlx=nlx)
        fnsi1.estimate(n=2, r=5, weight=weight)
        fnsi1.transient(T1)
        fnsi2 = deepcopy(fnsi1)
        fnsi2.optimize(lamb=100, weight=weight, nmax=nmax, info=info)
        models = models + [fnsi1, fnsi2]
        descrip = descrip + ['FNSI', 'FNSI optimized']

    descrip = tuple(descrip)  # convert to tuple for legend concatenation
    # simulation error
    val = np.empty((*data.yval.shape, len(models)))
    est = np.empty((*data.ym.shape, len(models)))
    test = np.empty((*data.ytest.shape, len(models)))
    for i, model in enumerate(models):
        test[..., i] = model.simulate(data.utest, T1=data.npp * data.Ntr)[1]
        val[..., i] = model.simulate(data.uval, T1=data.npp * data.Ntr)[1]
        est[..., i] = model.simulate(data.um, T1=T1)[1]

    Pest = data.yest.shape[3]

    # convenience inline functions
    def stack(ydata, ymodel):        return \
np.concatenate((ydata[..., None], (ydata[..., None] - ymodel)), axis=2)

    def rms(y):
        return np.sqrt(np.mean(y**2, axis=0))

    est_err = stack(data.ym, est)  # (npp*R,p,nmodels)
    val_err = stack(data.yval, val)
    test_err = stack(data.ytest, test)
    noise = np.abs(np.sqrt(Pest * data.covY.squeeze()))
    print()
    print(f"err for models: signal, {descrip}")
    # print(f'rms error noise:\n{rms(noise)}     \ndb: \n{db(rms(noise))} ')
    # only print error for p = 0. Almost equal to p = 1
    print(f'rms error est (db): \n{db(rms(est_err[:,0]))}')
    print(f'rms error val (db): \n{db(rms(val_err[:,0]))}')
    # print(f'rms error test: \n{rms(test_err)}  \ndb: \n{db(rms(test_err))}')
    return Result(est_err, val_err, test_err, noise, nl_errvec, descrip)
示例#4
0
    figs['bla'] = (plt.gcf(), plt.gca())
    return figs


nmax = 100
info = 1
"""Check optimization; how good is it to find the true system parameters?
This depends on the number of parameters, as we expect for a nonlinear
optimization problem."""

E = Efull
F = Ffull[:p]
# Many parameters
nlx = [Pnl(degree=[2, 3], structure='full')]
nly = [Pnl(degree=[2, 3], structure='full')]
true_model = NLSS(A, B, C, D, E, F)
true_model.add_nl(nlx=nlx, nly=nly)
data2 = simulate(true_model)
res2 = identify(data2, nlx, nly, nmax=nmax, info=info)
figs = plot(res2, data2, p=1)
figs = plot_bla(res2, data2, p=1)

# Few parameters, LM able to estimate system properly
# Diagonal is only for state equation. If n == p, we can use diagonal for
# output, but that is not the intended usage.
nlx = [Pnl(degree=[2, 3], structure='diagonal')]
nly = [Pnl(degree=[2, 3], structure='statesonly')]
true_model = NLSS(A, B, C, D, E, F)
true_model.add_nl(nlx=nlx, nly=nly)
data1 = simulate(true_model)  # generate data from true model
# estimate model from data
示例#5
0

nmax = 100
info = 1
"""Check optimization; how good is it to find the true system parameters?
This depends on the number of parameters, as we expect for a nonlinear
optimization problem."""

# Few parameters, LM able to estimate system properly
# Diagonal is only for state equation. If n == p, we can use diagonal for
# output, but that is not the intended usage.
E = Efull
F = Ffull[:p]
nlx = [Pnl(degree=[2, 3], structure='diagonal')]
nly = [Pnl(degree=[2, 3], structure='statesonly')]
true_model = NLSS(A, B, C, D, E, F)
true_model.add_nl(nlx=nlx, nly=nly)
data1 = simulate(true_model)  # generate data from true model
res1 = identify(data1, nlx, nly, nmax=nmax,
                info=info)  # estimate model from data

## Many parameters
nlx = [Pnl(degree=[2, 3], structure='full')]
nly = [Pnl(degree=[2, 3], structure='full')]
true_model = NLSS(A, B, C, D, E, F)
true_model.add_nl(nlx=nlx, nly=nly)
data2 = simulate(true_model)
res2 = identify(data2, nlx, nly, nmax=nmax, info=info)

figs = plot(res1, data1, p=1)
figs = plot(res2, data2, p=1)
示例#6
0
文件: nlss.py 项目: pawsen/pyvib
poly2y = Polynomial(exponent=exp2, w=Wy)
poly3y = Polynomial(exponent=exp3, w=Wy)

poly1x = Polynomial_x(exponent=2, w=[0, 1])
poly2x = Polynomial_x(exponent=3, w=[0, 1])
poly3x = Polynomial_x(exponent=4, w=[0, 1])

tahn1 = Tanhdryfriction(eps=0.1, w=Wt)

F = np.array([])
nly = None

nlx = NLS([tahn1])
E = 1e0 * Efull[:, :len(nlx.nls)]

true_model = NLSS(A, B, C, D, E, F)
true_model.add_nl(nlx=nlx, nly=nly)

# excitation signal
RMSu = 0.05  # Root mean square value for the input signal
npp = 1024  # Number of samples
R = 4  # Number of phase realizations (one for validation and one for
# testing)
P = 3  # Number of periods
kind = 'Odd'  # 'Full','Odd','SpecialOdd', or 'RandomOdd': kind of multisine
m = D.shape[1]  # number of inputs
p = C.shape[0]  # number of outputs
fs = 1  # normalized sampling rate
Ntr = 5
if True:
    # get predictable random numbers. https://dilbert.com/strip/2001-10-25
示例#7
0
epsf = f'{eps}'.replace('.', '')

# cont time
a, b, c, d = mkc2ss(M, K, C)
fact = 1
# include velocity in output
if len(wd) == 6:
    c = np.vstack((c, np.hstack((np.zeros((3, 3)), np.eye(3)))))
    d = np.vstack((d, np.zeros((3, 3))))
    fact = 2
csys = signal.StateSpace(a, b, c, d)
Ec = np.zeros((2 * ndof, 1))
Fc = np.zeros((fact * ndof, 0))
Ec[ndof + nldof] = -muN

cmodel = NLSS(csys.A, csys.B, csys.C, csys.D, Ec, Fc)
cmodel.add_nl(nlx=nlx, nly=nly)


def fex_cont(A, u, t):
    t = np.atleast_1d(t)
    fex = np.zeros((len(t), ndof))
    fex[:, fdof] = A * u(t)
    return fex


def simulate_cont(sys, A, t):
    nt = len(t)
    y = np.empty((R, nt, sys.outputs))
    x = np.empty((R, nt, len(sys.A)))
    u = np.empty((R, nt))