Esempio n. 1
0
    print("\n α = {0}, r = {1} "
          " =>   mean RMSE after spin-up = {2} \n runtime: {3}"
          .format(alpha, r, np.mean(RMSEs), t1-t0))

    return ensembles, mu_as, P_as, RMSEs, spreads


def iterate_main(param_set, log_name, q):  # q is instance of mp.Queue()
    RMSE_info = []
    for (alpha, r) in param_set:
                _, _, _, RMSEs, _ = main(alpha, r, log_name)
                RMSE_info += [((alpha, r), RMSEs)]
    q.put(RMSE_info)


ensembles, mu_as, P_as, RMSEs, spreads = enkf(0.16, 5.5, 'whatever')

P = P_as[0]
mu_f = np.mean(ens_0, axis=0)
P = (1/(Ne-1)) * sum([np.outer((e - mu_f), (e - mu_f).T)
                        for e in ens_0])
L, U = np.linalg.eigh(P)


if __name__ == "__main__":
    alpha_lower = 0
    alpha_upper = 0.5
    alpha_step = 0.02
    r_lower = 1
    r_upper = 6
    r_step = 0.5
Esempio n. 2
0
def enkf_test(dt,tf,mux0,P0,YK,Qk,Rk,flag_adapt=False):
	global nameBit

	# measurement influence matrix
	Hk = np.array([ [1.0,0.0] ])

	eqom_use = eqom_enkf
	
	if flag_adapt:
		ENKF = enkf.adaptive_enkf(2,0,eqom_use,Hk,Qk,Rk,Ns=100)
	else:
		# create nonadaptive EnKF object
		ENKF = enkf.enkf(2,0,eqom_use,Hk,Qk,Rk,Ns=100)

	nSteps = int(tf/dt)+1
	ts = 0.0

	#initialize EnKF
	ENKF.init(mux0,P0,ts)
	# initialize performance object
	simOut = trials_processing.simOutput()

	xf = np.zeros((nSteps,2))
	Pf = np.zeros((nSteps,2,2))
	Nf = np.zeros(nSteps)
	XK = np.zeros((nSteps,2,ENKF._N))
	tk = np.arange(0.0,tf,dt)

	#get the mean and covariance estimates
	Nf[0] = ENKF.get_N()
	xf[0,:] = np.mean(ENKF.xk,axis=1)
	Pxx = np.zeros((2,2))
	for k in range(ENKF.get_N()):
		Pxx = Pxx + 1.0/(1.0+float(ENKF._N))*np.outer(ENKF.xk[:,k]-xf[0,:],ENKF.xk[:,k]-xf[0,:])
	Pf[0,:,:] = Pxx.copy()

	t1 = time.time()
	for k in range(1,nSteps):
		# get the new measurement
		ym = np.array([YK[k]])
		ts = ts + dt
		# sync the ENKF, with continuous-time integration
		# propagate filter
		ENKF.propagateOde(dt)
		#ENKF.propagate(dt)
		# update
		ENKF.update(ym)
		# log
		xf[k,:] = np.mean(ENKF.xk,axis=1)
		Pxx = np.zeros((2,2))
		for kj in range(ENKF.get_N()):
			Pxx = Pxx + 1.0/(float(ENKF._N)-1.0)*np.outer(ENKF.xk[:,kj]-xf[k,:],ENKF.xk[:,kj]-xf[k,:])
		Pf[k,:,:] = Pxx.copy()
		Nf[k] = ENKF.get_N()
		# check that the eigenvalukes are reasonably bounded
		w = np.linalg.eigvalsh(Pf[k,:,:].copy())
		for jj in range(len(w)):
			if math.fabs(w[jj]) > 1.0e6:
				simOut.fail_singular_covariance(k)
				print("Covariance eigenvalue too large, t = %f" % (ts))
				return(xf,Pf,Nf,XK,simOut)
		if not flag_adapt:
			XK[k,:,:] = ENKF.xk.copy()
	t2 = time.time()
	print("Elapsed time: %f sec" % (t2-t1))
	simOut.complete(nSteps)

	return(xf,Pf,Nf,XK,simOut)
Esempio n. 3
0
def main(argin='./',adaptFlag = False):
    # output file
    FOUT = open('python_enkf_test.csv','w')
    FOUT.write('t,x1,x2,ymeas,x1hat,x2hat,P11,P22\n');

    # initialize EKF
    #Qkin = np.array([[20.0]])#continuous-time integration value
    Qkin = np.array([[20.0]])#Euler integration value
    Hkin = np.array([[1.0,0.0]])
    Rkin = np.array([ [0.0001] ])
    if not adaptFlag:
        EnKF = enkf.enkf(2,1,stateDerivativeEKF,Hk=Hkin,Qk=Qkin,Rk=Rkin,Ns=50)
    else:
        EnKF = enkf.adaptive_enkf(2,1,stateDerivativeEKF,Hk=Hkin,Qk=Qkin,Rk=Rkin)

    dt = 0.01
    tfin = 10.0#10.0
    nSteps = int(tfin/dt)
    tsim = 0.0

    xk = np.array([1.0,0.0])
    yk = simMeasurementFunction(xk,tsim)

    # initial covariance
    P0 = np.diag([Rkin[0,0],1.0])
    EnKF.init(xk,P0)

    Enkfx = np.mean(EnKF.xk,axis=1)

    xt = np.zeros((nSteps,2))
    xf = np.zeros((nSteps,2))
    XF = np.zeros((nSteps,2,EnKF._N))
    yt = np.zeros(nSteps)
    Npts = np.zeros(nSteps)
    Pxd = np.zeros((nSteps,2))
    Pxx = np.zeros((2,2))
    for k in range(nSteps):
        # log
        xt[k,:] = xk.copy()
        xf[k,:] = Enkfx.copy()
        if not adaptFlag:
            XF[k,:,:] = EnKF.xk.copy()
        Pxd[k,0] = Pxx[0,0]
        Pxd[k,1] = Pxx[1,1]
        # propagate filter
        EnKF.propagate(dt)
        #EnKF.propagateOde(dt)
        # simulate
        y = sp.odeint(stateDerivative,xk,np.array([tsim,tsim+dt]),args=([],) )
        xk = y[-1,:].copy()

        # update time
        tsim = tsim + dt
        # measurement
        ymeas = simMeasurementFunction(xk,tsim)
        # store measurement
        yt[k] = ymeas[0]
        # update EKF
        EnKF.update(ymeas)
        EnKF.resample()
        # get the mean and covariance estimate out
        Enkfx = np.mean(EnKF.xk,axis=1)

        # store number of points
        Npts[k] = EnKF.get_N()
        Pxx = np.zeros((2,2))
        for k in range(EnKF.get_N()):
            Pxx = Pxx + 1.0/(1.0+float(EnKF._N))*np.outer(EnKF.xk[:,k]-Enkfx,EnKF.xk[:,k]-Enkfx)
        # log to file
        FOUT.write('%f,%f,%f,%f,%f,%f,%f,%f\n' % (tsim,xk[0],xk[1],ymeas[0],Enkfx[0],Enkfx[1],Pxx[0,0],Pxx[1,1]) )

    FOUT.close()
    print('Completed simulation')

    # plot
    tplot = np.arange(0.0,tfin,dt)
    fig = plt.figure()

    ax = []
    for k in range(4):
        if k < 2:
            nam = 'x' + str(k+1)
        else:
            nam = 'e' + str(k-1)
        ax.append( fig.add_subplot(2,2,k+1,ylabel=nam) )
        if k < 2:
            ax[k].plot(tplot,xt[:,k],'b-')
            ax[k].plot(tplot,xf[:,k],'r--')
            if k == 0:
                ax[k].plot(tplot,yt,'y-')
        elif k < 4:
            ax[k].plot(tplot,xt[:,k-2]-xf[:,k-2],'b-')
            ax[k].plot(tplot,3.0*np.sqrt(Pxd[:,k-2]),'r--')
            ax[k].plot(tplot,-3.0*np.sqrt(Pxd[:,k-2]),'r--')
            if k == 2:
                ax[k].plot(tplot,xt[:,k-2]-yt,'y-')

        ax[k].grid()
    fig.show()

    if not adaptFlag:
        fig2 = plt.figure()
        ax = fig2.add_subplot(121,ylabel='x1')
        ax.plot(tplot,xt[:,0],'b-')
        ax.plot(tplot,XF[:,0,:],'d')
        ax.grid()

        ax = fig2.add_subplot(122,ylabel='x2')
        ax.plot(tplot,xt[:,1],'b-')
        ax.plot(tplot,XF[:,1,:],'d')
        ax.grid()        

        fig2.show()
        pass
    else:
        fig2 = plt.figure()
        ax = fig2.add_subplot(111,ylabel='N')
        ax.plot(tplot,Npts)
        ax.grid()

        fig2.show()

    raw_input("Return to exit")

    print("Completed test_enky.py")
    return
Esempio n. 4
0
def enkf_test(dt,tf,mux0,P0,YK,Qk,Rk,flag_adapt=False):
	global nameBit

	# measurement influence matrix
	Hk = np.array([ [1.0,0.0] ])

	if nameBit == 1:
		eqom_use = eqom_enkf
	if nameBit == 2:
		eqom_use = eqom_enkf
	if nameBit == 3:
		eqom_use = eqom_enkf

	if flag_adapt:
		ENKF = enkf.adaptive_enkf(2,0,eqom_use,Hk,Qk,Rk,Ns=100)
	else:
		# create nonadaptive EnKF object
		ENKF = enkf.enkf(2,0,eqom_use,Hk,Qk,Rk,Ns=100)

	nSteps = int(tf/dt)+1
	ts = 0.0

	#initialize EnKF
	ENKF.init(mux0,P0,ts)

	xf = np.zeros((nSteps,2))
	Pf = np.zeros((nSteps,4))
	Nf = np.zeros(nSteps)
	XK = np.zeros((nSteps,2,ENKF._N))
	tk = np.arange(0.0,tf,dt)

	#get the mean and covariance estimates
	Nf[0] = ENKF.get_N()
	xf[0,:] = np.mean(ENKF.xk,axis=1)
	Pxx = np.zeros((2,2))
	for k in range(ENKF.get_N()):
		Pxx = Pxx + 1.0/(1.0+float(ENKF._N))*np.outer(ENKF.xk[:,k]-xf[0,:],ENKF.xk[:,k]-xf[0,:])
	Pf[0,:] = Pxx.reshape((4,))

	t1 = time.time()
	for k in range(1,nSteps):
		# get the new measurement
		ym = np.array([YK[k]])
		ts = ts + dt
		# sync the ENKF, with continuous-time integration
		# propagate filter
		ENKF.propagateOde(dt)
		#ENKF.propagate(dt)
		# update
		ENKF.update(ym)
		# resample ??
		#ENKF.resample()
		# log
		xf[k,:] = np.mean(ENKF.xk,axis=1)
		Pxx = np.zeros((2,2))
		for kj in range(ENKF.get_N()):
			Pxx = Pxx + 1.0/(float(ENKF._N)-1.0)*np.outer(ENKF.xk[:,kj]-xf[k,:],ENKF.xk[:,kj]-xf[k,:])
		Pf[k,:] = Pxx.reshape((4,))
		Nf[k] = ENKF.get_N()
		if not flag_adapt:
			XK[k,:,:] = ENKF.xk.copy()
	t2 = time.time()
	print("Elapsed time: %f sec" % (t2-t1))

	return(xf,Pf,Nf,XK)