Beispiel #1
0
def identify_fnsi(data, nlx, nly, n=6, r=15, nmax=25, optimize=True, info=2):
    fnsi_errvec = []
    # FNSI can only use 1 realization
    sig = deepcopy(data.sig)
    # This is stupid, but unfortunately nessecary
    sig.y = sig.y[:, :, 0][:, :, None]
    sig.u = sig.u[:, :, 0][:, :, None]
    sig.R = 1
    sig.average()
    fnsi1 = FNSI()
    fnsi1.set_signal(sig)
    fnsi1.add_nl(nlx=nlx)
    fnsi1.estimate(n=n, r=r, weight=weight, bd_method=bd_method)
    fnsi1.transient(T1=data.npp * data.Ntr)
    if optimize:
        try:
            fnsi1.optimize(lamb=100, weight=weight, nmax=nmax, info=info)
            fnsi_errvec = fnsi1.extract_model(data.yval,
                                              data.uval,
                                              T1=data.npp * data.Ntr,
                                              info=info)
        except ValueError as e:
            print(f'FNSI optimization failed with {e}')
    return fnsi1, fnsi_errvec
def identify(data, nlx, nly, nmax=25, info=2, fnsi=False):
    # transient: Add one period before the start of each realization. Note that
    # this is for the signal averaged over periods
    Rest = data.yest.shape[2]
    T1 = np.r_[data.npp * data.Ntr,
               np.r_[0:(Rest - 1) * data.npp + 1:data.npp]]

    linmodel = Subspace(data.sig)
    linmodel._cost_normalize = 1
    linmodel.estimate(2, 5, weight=weight)
    linmodel.optimize(weight=weight, info=info)

    # estimate NLSS
    model = NLSS(linmodel)
    # model._cost_normalize = 1
    model.add_nl(nlx=nlx, nly=nly)
    model.set_signal(data.sig)
    model.transient(T1)
    model.optimize(lamb=100, weight=weight, nmax=nmax, info=info)
    # get best model on validation data. Change Transient settings, as there is
    # only one realization
    nl_errvec = model.extract_model(data.yval,
                                    data.uval,
                                    T1=data.npp * data.Ntr,
                                    info=info)
    models = [linmodel, model]
    descrip = [type(mod).__name__ for mod in models]

    if fnsi:
        # FNSI can only use 1 realization
        sig = deepcopy(data.sig)
        # This is stupid, but unfortunately nessecary
        sig.y = sig.y[:, :, 0][:, :, None]
        sig.u = sig.u[:, :, 0][:, :, None]
        sig.R = 1
        sig.average()
        fnsi1 = FNSI()
        fnsi1.set_signal(sig)
        fnsi1.add_nl(nlx=nlx)
        fnsi1.estimate(n=2, r=5, weight=weight)
        fnsi1.transient(T1)
        fnsi2 = deepcopy(fnsi1)
        fnsi2.optimize(lamb=100, weight=weight, nmax=nmax, info=info)
        models = models + [fnsi1, fnsi2]
        descrip = descrip + ['FNSI', 'FNSI optimized']

    descrip = tuple(descrip)  # convert to tuple for legend concatenation
    # simulation error
    val = np.empty((*data.yval.shape, len(models)))
    est = np.empty((*data.ym.shape, len(models)))
    test = np.empty((*data.ytest.shape, len(models)))
    for i, model in enumerate(models):
        test[..., i] = model.simulate(data.utest, T1=data.npp * data.Ntr)[1]
        val[..., i] = model.simulate(data.uval, T1=data.npp * data.Ntr)[1]
        est[..., i] = model.simulate(data.um, T1=T1)[1]

    Pest = data.yest.shape[3]

    # convenience inline functions
    def stack(ydata, ymodel):        return \
np.concatenate((ydata[..., None], (ydata[..., None] - ymodel)), axis=2)

    def rms(y):
        return np.sqrt(np.mean(y**2, axis=0))

    est_err = stack(data.ym, est)  # (npp*R,p,nmodels)
    val_err = stack(data.yval, val)
    test_err = stack(data.ytest, test)
    noise = np.abs(np.sqrt(Pest * data.covY.squeeze()))
    print()
    print(f"err for models: signal, {descrip}")
    # print(f'rms error noise:\n{rms(noise)}     \ndb: \n{db(rms(noise))} ')
    # only print error for p = 0. Almost equal to p = 1
    print(f'rms error est (db): \n{db(rms(est_err[:,0]))}')
    print(f'rms error val (db): \n{db(rms(val_err[:,0]))}')
    # print(f'rms error test: \n{rms(test_err)}  \ndb: \n{db(rms(test_err))}')
    return Result(est_err, val_err, test_err, noise, nl_errvec, descrip)
Beispiel #3
0
if p == 1:
    Wy = [1]
    Wt = [1]
elif p == 2:
    Wt = [0, 1]

F = np.array([])
nly = None

# for identification
tahn1 = Tanhdryfriction(eps=0.1, w=Wt)
nlx = [tahn1]
E = 1e0 * Efull[:, :len(nlx)]

true_model = FNSI(A, B, C, D, E, F)
true_model.add_nl(nlx=nlx, nly=nly)

# excitation signal
RMSu = 0.05  # Root mean square value for the input signal
npp = 2048  # Number of samples
R = 3  # Number of phase realizations (one for validation and one for
# testing)
P = 3  # Number of periods
kind = 'Odd'  # 'Full','Odd','SpecialOdd', or 'RandomOdd': kind of multisine
m = D.shape[1]  # number of inputs
p = C.shape[0]  # number of outputs
fs = 1  # normalized sampling rate
Ntr = 2
if True:
    # get predictable random numbers. https://dilbert.com/strip/2001-10-25
Beispiel #4
0
inl = np.array([[0,-1]])
nl_spline = NL_spline(inl, nspl=15)
nl = NL_force()
nl.add(nl_spline)
iu = 0
idof = [0]
nldof = []

ims = 60
nmax = 40
ncur = 6
nlist = np.arange(2, nmax+3, 2)
dof = 0

# nonlinear identification
fnsi = FNSI(nlin, nl, idof, fmin, fmax)
fnsi.calc_EY()
fnsi.svd_comp(ims)
sd = fnsi.stabilisation_diagram(nlist)
fnsi.id(ncur)
fnsi.nl_coeff(iu, dof)
fsdlin, ax = plot_stab(fnsi, nlist, sca)

# Linear identification at high level
nl = NL_force()
fnsi2 = FNSI(nlin, nl, idof, fmin, fmax)
fnsi2.calc_EY()
fnsi2.svd_comp(ims)
fnsi2.id(ncur)
fnsi2.nl_coeff(iu, dof)
Beispiel #5
0
n = 2
maxr = 20
dof = 0
iu = 0
xpowers = np.array([[2], [3]])

# subspace model
lin1 = Subspace(sig2)
# models, infodict = linmodel.scan(n, maxr, weight=False)
# ensure we use same dimension as for the fnsi model
lin1.estimate(n, maxr)
lin2 = deepcopy(lin1)
lin2.optimize(weight=False)

# Linear model
fnsi1 = FNSI(sig)
fnsi1.estimate(n, maxr)
fnsi1.nl_coeff(iu)

# initial nonlinear model
fnsi2 = FNSI(sig)
fnsi2.nlterms('state', xpowers)
fnsi2.estimate(n, maxr)
fnsi2.nl_coeff(iu)
fnsi2.transient(T1=npp)

covY = np.ones((round(npp // 2), 1, 1))
# optimized models
fnsi3 = deepcopy(fnsi2)
fnsi4 = deepcopy(fnsi2)  # freq. weighted model
fnsi5 = deepcopy(fnsi2)  # freq. weighted model
Beispiel #6
0
# dof where nonlinearity is
nldof = 6
# method to estimate BD
bd_method = 'explicit'
#bd_method = 'nr'

# ims: matrix block order. At least n+1
# nmax: max model order for stabilisation diagram
# ncur: model order for erstimation
ims = 40
nmax = 20
ncur = 6
nlist = np.arange(2, nmax + 3, 2)

nl = NL_force()
fnsi = FNSI(slin, nl, idof, lin.fmin, lin.fmax)
fnsi.calc_EY()
fnsi.svd_comp(ims)
fnsi.stabilization(nlist)
# Do identification
fnsi.id(ncur, bd_method)
fnsi.calc_modal()
fnsi.nl_coeff(lin.iu, nldof)

# Load nonlinear signal
nlin = load(nonlin=True)
snlin = Signal(nlin.u, nlin.fs, nlin.y)
snlin.cut(nlin.nsper, per)

# Linear identification on nonlinear signal
fnsi_nl1 = FNSI(snlin, nl, idof, nlin.fmin, nlin.fmax)
Beispiel #7
0
# idof are selected dofs.
# iu are dofs of force
iu = 0
idof = [0, 1]

# ims: matrix block order. At least n+1
# nmax: max model order for stabilisation diagram
# ncur: model order for estimation
ims = 22
nmax = 20
ncur = 4
nlist = np.arange(2, nmax + 3, 2)

## nonlinear identification at high level
# Calculate stabilization diagram
fnsi = FNSI(snlin, nl, idof, fmin, fmax)
fnsi.calc_EY()
fnsi.svd_comp(ims)
sd = fnsi.stabilization(nlist)
# Do estimation
fnsi.id(ncur)
fnsi.nl_coeff(iu, dof)

## linear identification at high level
nl = NL_force()
fnsi2 = FNSI(snlin, nl, idof, fmin, fmax)
fnsi2.calc_EY()
fnsi2.svd_comp(ims)
fnsi2.id(ncur)
fnsi2.nl_coeff(iu, dof)
Beispiel #8
0
u = u.transpose(2,0,1)[:,:,None,Ptr:]
y = y.transpose(2,0,1)[:,:,None,Ptr:]

# start ID
sig = Signal(u,y,fs=fs)
um, ym = sig.average()
sig.lines = lines

nlx = [Polynomial(exponent=ex, w=w) for ex in exponent]
# nlx = None

#n = 4
bd_method = 'nr'
bd_method = 'opt'
# bd_method = 'explicit'
fnsi1 = FNSI()
fnsi1.set_signal(sig)
fnsi1.add_nl(nlx=nlx)
fnsi1.estimate(n=n, r=r, bd_method=bd_method, weight=False)
fnsi1.transient(T1=N)
fnsi2 = deepcopy(fnsi1)
fnsi2.optimize(lamb=100, weight=False, nmax=10, info=1)

if nlx is not None:
    G, knl = fnsi2.nl_coeff(iu)

    cr = knl.real
    cim = knl.imag
    ratio =  np.log10(np.abs(cr.mean(0)/cim.mean(0)));
    print('Ratio of the real and imaginary parts of the nonlinear coefficient (log)');
    print(f'{ratio}')