コード例 #1
0
ファイル: ekf_trials.py プロジェクト: fatadama/estimation
def ekf_test(dt,tf,mux0,P0,YK,Qk,Rk):

	#Qk = np.array([[1.0*dt]])
	#Rk = np.array([[1.0]])

	# create EKF object
	EKF = ekf.ekf(2,0,eqom_ekf,eqom_jacobian_ekf,eqom_gk_ekf,Qk)

	nSteps = int(tf/dt)+1
	ts = 0.0

	# initialize EKF
	EKF.init_P(mux0,P0,ts)
	# initialize performance object
	simOut = trials_processing.simOutput()

	xf = np.zeros((nSteps,2))
	Pf = np.zeros((nSteps,2,2))
	tk = np.arange(0.0,tf,dt)

	xf[0,:] = EKF.xhat.copy()
	Pf[0,:,:] = EKF.Pk.copy()

	t1 = time.time()
	for k in range(1,nSteps):
		# get the new measurement
		ym = np.array([YK[k]])
		ts = ts + dt
		# sync the EKF, with continuous-time integration
		EKF.propagateOde(dt)
		#EKF.propagateRK4(dt)
		EKF.update(ts,ym,measurement_ekf,measurement_gradient,Rk)
		# check that the eigenvalukes are reasonably bounded
		w = np.linalg.eigvalsh(EKF.Pk.copy())
		for jj in range(len(w)):
			if math.fabs(w[jj]) > 1.0e6:
				simOut.fail_singular_covariance(k)
				print("Covariance eigenvalue too large, t = %f" % (ts))
				return(xf,Pf,simOut)
		# copy
		xf[k,:] = EKF.xhat.copy()
		Pf[k,:,:] = EKF.Pk.copy()
		t2 = time.time()
	print("Elapsed time: %f sec" % (t2-t1))
	simOut.complete(nSteps)

	return(xf,Pf,simOut)
コード例 #2
0
ファイル: ukf_trials.py プロジェクト: fatadama/estimation
def ukf_test(dt,tf,mux0,P0,YK,Qk,Rk):
	UKF = ukf.ukf(2,0,1,eqom_ukf,Qk)

	nSteps = int(tf/dt)+1
	ts = 0.0

	# initialize UKF
	UKF.init_P(mux0,P0,ts)
	# initialize performance object
	simOut = trials_processing.simOutput()

	xf = np.zeros((nSteps,2))
	Pf = np.zeros((nSteps,2,2))
	tk = np.arange(0.0,tf,dt)

	xf[0,:] = UKF.xhat.copy()
	Pf[0,:,:] = UKF.Pk.copy()

	t1 = time.time()
	for k in range(1,nSteps):
		# get the new measurement
		ym = np.array([YK[k]])
		ts = ts + dt
		# sync the UKF, with continuous-time integration
		try:
			UKF.sync(dt,ym,measurement_ukf,Rk,True)
		except np.linalg.linalg.LinAlgError:
			print("Singular covariance at t = %f" % (ts))
			simOut.fail_singular_covariance(k)
			return(xf,Pf,simOut)
		# check that the eigenvalukes are reasonably bounded
		w = np.linalg.eigvalsh(UKF.Pk.copy())
		for jj in range(len(w)):
			if math.fabs(w[jj]) > 1.0e6:
				simOut.fail_singular_covariance(k)
				print("Covariance eigenvalue too large, t = %f" % (ts))
				return(xf,Pf,simOut)
		# copy
		#if k < nSteps-1:
		xf[k,:] = UKF.xhat.copy()
		Pf[k,:,:] = UKF.Pk.copy()
	t2 = time.time()
	print("Elapsed time: %f sec" % (t2-t1))
	simOut.complete(nSteps)

	return(xf,Pf,simOut)
コード例 #3
0
ファイル: enkf_trials.py プロジェクト: fatadama/estimation
def enkf_test(dt,tf,mux0,P0,YK,Qk,Rk,flag_adapt=False):
	global nameBit

	# measurement influence matrix
	Hk = np.array([ [1.0,0.0] ])

	eqom_use = eqom_enkf
	
	if flag_adapt:
		ENKF = enkf.adaptive_enkf(2,0,eqom_use,Hk,Qk,Rk,Ns=100)
	else:
		# create nonadaptive EnKF object
		ENKF = enkf.enkf(2,0,eqom_use,Hk,Qk,Rk,Ns=100)

	nSteps = int(tf/dt)+1
	ts = 0.0

	#initialize EnKF
	ENKF.init(mux0,P0,ts)
	# initialize performance object
	simOut = trials_processing.simOutput()

	xf = np.zeros((nSteps,2))
	Pf = np.zeros((nSteps,2,2))
	Nf = np.zeros(nSteps)
	XK = np.zeros((nSteps,2,ENKF._N))
	tk = np.arange(0.0,tf,dt)

	#get the mean and covariance estimates
	Nf[0] = ENKF.get_N()
	xf[0,:] = np.mean(ENKF.xk,axis=1)
	Pxx = np.zeros((2,2))
	for k in range(ENKF.get_N()):
		Pxx = Pxx + 1.0/(1.0+float(ENKF._N))*np.outer(ENKF.xk[:,k]-xf[0,:],ENKF.xk[:,k]-xf[0,:])
	Pf[0,:,:] = Pxx.copy()

	t1 = time.time()
	for k in range(1,nSteps):
		# get the new measurement
		ym = np.array([YK[k]])
		ts = ts + dt
		# sync the ENKF, with continuous-time integration
		# propagate filter
		ENKF.propagateOde(dt)
		#ENKF.propagate(dt)
		# update
		ENKF.update(ym)
		# log
		xf[k,:] = np.mean(ENKF.xk,axis=1)
		Pxx = np.zeros((2,2))
		for kj in range(ENKF.get_N()):
			Pxx = Pxx + 1.0/(float(ENKF._N)-1.0)*np.outer(ENKF.xk[:,kj]-xf[k,:],ENKF.xk[:,kj]-xf[k,:])
		Pf[k,:,:] = Pxx.copy()
		Nf[k] = ENKF.get_N()
		# check that the eigenvalukes are reasonably bounded
		w = np.linalg.eigvalsh(Pf[k,:,:].copy())
		for jj in range(len(w)):
			if math.fabs(w[jj]) > 1.0e6:
				simOut.fail_singular_covariance(k)
				print("Covariance eigenvalue too large, t = %f" % (ts))
				return(xf,Pf,Nf,XK,simOut)
		if not flag_adapt:
			XK[k,:,:] = ENKF.xk.copy()
	t2 = time.time()
	print("Elapsed time: %f sec" % (t2-t1))
	simOut.complete(nSteps)

	return(xf,Pf,Nf,XK,simOut)
コード例 #4
0
ファイル: sir_trials.py プロジェクト: fatadama/estimation
def sir_test(dt,tf,mux0,P0,YK,Qk,Rk,Nparticles = 100):
	global mux_sample
	global P_sample
	global Ru
	global Qu
	Ru = Rk.copy()
	Qu = Qk.copy()
	mux_sample = mux0.copy()
	P_sample = P0.copy()

	# number of particles
	Nsu = Nparticles

	# add in this functionality so we can change the propagation function dependent on the nameBit ... may or may not be needed
	# create SIR object
	SIR = sir.sir(2,Nsu,eqom_use,processNoise,measurementPdf)

	nSteps = int(tf/dt)+1
	ts = 0.0

	# initialize the particle filter
	SIR.init(initialParticle)
	# initialize performance object
	simOut = trials_processing.simOutput()
	
	# the estimate (weighted mean)
	#xf = np.zeros((nSteps,2))
	#tk = np.arange(0.0,tf,dt)
	px1 = np.zeros((nSteps,SIR.Ns))
	px2 = np.zeros((nSteps,SIR.Ns))
	weights = np.zeros((nSteps,SIR.Ns))

	px1[0,:] = SIR.XK[0,:].copy()
	px2[0,:] = SIR.XK[1,:].copy()
	weights[0,:] = SIR.WI.copy()

	t1 = time.time()
	for k in range(1,nSteps):
		# get the new measurement
		ym = np.array([YK[k]])
		ts = ts + dt
		# call SIR
		SIR.update(dt,ym)
		# store
		px1[k,:] = SIR.XK[0,:].copy()
		px2[k,:] = SIR.XK[1,:].copy()
		weights[k,:] = SIR.WI.copy()
		# resample
		SIR.sample()
	t2 = time.time()
	print("Elapsed time: %f sec" % (t2-t1))
	simOut.complete(nSteps)

	# sort out the most likely particle at each time
	xml = np.zeros((nSteps,2))
	for k in range(nSteps):
		idxk = np.argmax(weights[k,:])
		xml[k,0] = px1[k,idxk]
		xml[k,1] = px2[k,idxk]
	# compute the mean and covariance over time
	mux = np.zeros((nSteps,2))
	Pxx = np.zeros((nSteps,2,2))
	for k in range(nSteps):
		mux[k,0] = np.sum( np.multiply(px1[k,:],weights[k,:]) )
		mux[k,1] = np.sum( np.multiply(px2[k,:],weights[k,:]) )
		Pxk = np.zeros((2,2))
		for j in range(Nsu):
			iv = np.array([ px1[k,j]-mux[k,0],px2[k,j]-mux[k,1] ])
			Pxk = Pxk + weights[k,j]*np.outer(iv,iv)
			Pxx[k,:,:] = Pxk.copy()

	return(mux,Pxx,px1,px2,weights,simOut)