Exemplo n.º 1
0
	def FindBaselineSection(xi,xf,xAvg_,uAvg_):
		"""
		xi: starting index
		xf: ending index (inclusive)
		Finds the baseline for a section; returns the index for the end of the baseline
		(last index in the baseline)
		If there is no baseline to begin with the current index is returned
		"""
		dirn = klm.sign(xf-xi) 					# direction of change
		ip = xi-dirn 								# previous index
		ic = xi 							 	 	# current index
		count = 0
		while(klm.sign(ic-xf)!=dirn): 			  		# find out if we have finished
			# Kalman filter to find baseline (with resets)
			(xf1[ic], uf1[ic]) = klm.KalFilIter(xf1[ip],xAvg_*dirn,xm[ic], uf1[ip],uAvg_,um, 1,1,1)
			# Kalman filtered baseline
			#(xf2[ic], uf2[ic]) = klm.KalFilIter(xf2[ip],xAvg_*dirn,xf1[ic], uf2[ip],uAvg_,uf1[ic], 1,1,1)
			(xf2[ic], uf2[ic]) = (xf1[ic], uf1[ic])
			d_x2x2 = xf2[ic]-xf2[ip]
			
			# increase count in direction of skew
			dCount = klm.sign(d_x2x2-xAvg_*dirn)
			if (dCount==-klm.sign(count)): 	# reset count if skew has changed
				count = dCount
			else:
				count += dCount
			print("~",ic,ip,count,d_x2x2,xm[ic],xf1[ic],xf2[ic])
			if (abs(count)>=countTN):
				return ic-countTN*dirn
			ip = ic
			ic += dirn
		return ip
Exemplo n.º 2
0
					(qVal[i]-qValsR[j-1])/(qValsR[j]-qValsR[j-1]))
			qValsR.insert(j,qVals[i])

ppsE_T = [0]*len(ser_T)	 	# expected time of serial arrival
covU_T = [0]*len(ser_T)	 	# expected uncertainty
ardU_t = 0.5				# uncertainty in arduino times
#ardD_t = (ser_T[-1]-ser_T[0])/(len(ser_T)-1)-1000 	# arduino drift per millisecond (from post-analysis)
ardD_t = 0
	
covU_T[0] = 50
ppsE_T[0] = ser_T[0]-ppsser_xR[qValsR.index(q_T[0])]

for i in range(len(ppsE_T)-1):
	qValiCur = qValsR.index(q_T[i])
	qValiNext = qValsR.index(q_T[1+i])
	ppsE_T[1+i], covU_T[1+i] = klm.KalFilIter(ppsE_T[i], 1000+ardD_t-(ppsser_xR[qValiNext]-ppsser_xR[qValiCur]),
									ser_T[1+i]-ppsser_xR[qValiNext], covU_T[i], ardU_t, ppsser_uR[qValiNext])
	#print(1000+ardD_t-(ppsser_xR[qValiNext]-ppsser_xR[qValiCur]), (ser_T[1+i]-ppsser_xR[qValiNext])-(ser_T[i]-ppsser_xR[qValiCur]))

ppsser_dT = [ser_T[i]-pps_T[i] for i in range(len(ser_T))]

qValsN = [0]*len(qVals)
q_TN = [0]*len(q_T)
qMax = max(qVals)
qMin = min(qVals)
for i in range(len(qVals)):
	qValsN[i] = (qVals[i]-qMin)/(qMax-qMin)
for i in range(len(q_T)):
	q_TN[i] = (q_T[i]-qMin)/(qMax-qMin)

mplt.rcParams.update({'font.size': 15})
fig = plt.figure(figsize=(11,6))
Exemplo n.º 3
0
ser_Tb[0] = ser_Tf[0]

cResetLast = 0

# cycle through data and use Kalman filter.
# Reset Kalman when there is a jump in the baseline (detected from consistent skew in filtered dt values)
# baseline takes average value for the period -- need to register start and end for region
# problem: not all fall into one region -- there are sometimes drifts between regions

region_i = 0  # start of region
region_f = 0  # end of region
region_b = False  # whether we are in a region
j = 1
while (j < (len(ser_Tf))):
    # Kalman filter to find baseline (with resets)
    (ser_Tf[j], ser_Uf[j]) = klm.KalFilIter(ser_Tf[j - 1], avg_T, ser_Tm[j],
                                            ser_Uf[j - 1], utp, utm, 1, 1, 1)
    # Kalman filtered baseline
    (ser_Tf2[j], ser_Uf2[j]) = klm.KalFilIter(ser_Tf2[j - 1], avg_T, ser_Tf[j],
                                              ser_Uf2[j - 1], utp, ser_Uf[j],
                                              1, 1, 1)
    # just Kalman filter
    (ser_Tf_[j], ser_Uf_[j]) = klm.KalFilIter(ser_Tf_[j - 1], avg_T, ser_Tm[j],
                                              ser_Uf_[j - 1], utp, utm, 1, 1,
                                              1)
    serser_dTf[j - 1] = ser_Tf[j] - ser_Tf[j - 1]
    serser_dTf2[j - 1] = ser_Tf2[j] - ser_Tf2[j - 1]

    # increase count in direction of skew
    dCount = np.sign(serser_dTf2[j - 1] - avg_T)
    if (np.sign(dCount) == -np.sign(count)):  # reset count if skew has changed
        count = 0
Exemplo n.º 4
0
def KalBaseline(xm,up,um,A=1,B=1,H=1, countTY=5, countTN=10):
	""" Input variables
	returns (xf,uf)
	x,u: filter variable and its uncertainty; -f,-m: filtered, measured; _: previous
	A: mixing factor for last measurement contribution; B: mixing factor for dxp; H: convert between measurement, state
	countTN: number of consecutive skewed values before the baseline is ended
	countTY: minimum size of baseline
	"""
	""" Working variables
	-t: temporary
	d-: difference
	"""	
	length = len(xm)
	xf1 = [0]*length 			 	 	 	 	# Kalman filtered value, with resets
	xf2 = [0]*length 			 	 	 	 	# Kalman filtered time 2nd order, with resets
	xb = [0]*length 						 	# stores times for baseline
	
	
	uf1 = [0]*length 						 	# uncertainty in filtered time
	uf2 = [0]*length 						 	# uncertainty in filtered time
	ub = [0]*length 							# uncertainty in baseline time
	
	avg_x = (xm[-1]-xm[0])/(len(xm)-1) 			# average difference
	avg_u = um/(len(xm)-1) 					# uncertainty in average value
#	avg_x = 1000.52
#	avg_u = 0
	print(avg_x)
	
	uf1[0] = um
	uf2[0] = um
	xf1[0] = xm[0]
	xf2[0] = xm[0]
	xb[0] = xf1[0]
	
	ib = [] 							  	 	# stores list of indices[x,y] which form range
											# of flat baselines (from x->y inclusive)
	
	# cycle through data and use Kalman filter.
	# Reset Kalman when there is a jump in the baseline (detected from consistent skew in filtered values)
	# baseline takes average value for the period -- need to register start and end for region
	# problem: not all fall into one region -- there are sometimes drifts between regions
	
	jCur = 1
	
	def FindBaselineSection(xi,xf,xAvg_,uAvg_):
		"""
		xi: starting index
		xf: ending index (inclusive)
		Finds the baseline for a section; returns the index for the end of the baseline
		(last index in the baseline)
		If there is no baseline to begin with the current index is returned
		"""
		dirn = klm.sign(xf-xi) 					# direction of change
		ip = xi-dirn 								# previous index
		ic = xi 							 	 	# current index
		count = 0
		while(klm.sign(ic-xf)!=dirn): 			  		# find out if we have finished
			# Kalman filter to find baseline (with resets)
			(xf1[ic], uf1[ic]) = klm.KalFilIter(xf1[ip],xAvg_*dirn,xm[ic], uf1[ip],uAvg_,um, 1,1,1)
			# Kalman filtered baseline
			#(xf2[ic], uf2[ic]) = klm.KalFilIter(xf2[ip],xAvg_*dirn,xf1[ic], uf2[ip],uAvg_,uf1[ic], 1,1,1)
			(xf2[ic], uf2[ic]) = (xf1[ic], uf1[ic])
			d_x2x2 = xf2[ic]-xf2[ip]
			
			# increase count in direction of skew
			dCount = klm.sign(d_x2x2-xAvg_*dirn)
			if (dCount==-klm.sign(count)): 	# reset count if skew has changed
				count = dCount
			else:
				count += dCount
			print("~",ic,ip,count,d_x2x2,xm[ic],xf1[ic],xf2[ic])
			if (abs(count)>=countTN):
				return ic-countTN*dirn
			ip = ic
			ic += dirn
		return ip
				
	while(True):
		print("jCur",jCur)
		jPrev = jCur
		xf1[jCur-1] = (xm[jCur]-avg_x+xm[jCur-1])/2 	 	# set the previous filtered value to avg of last
		xf2[jCur-1] = xf1[jCur-1]
		print("set",jCur-1,xf1[jCur-1])
		jCur = FindBaselineSection(jCur, length-1, avg_x, avg_u)# find index of end of current baseline
		print("FB",jCur,jPrev)
		if (jCur>jPrev+1): 						 	# check whether there a baseline
			continueOn = True
			ib.append([0,jCur]) 					 	# add the region to list
			if (len(ib)>1): 					 	 	# check whether there is a previous region
				ib[-1][0] = ib[-2][1]+1 				# use previous region to bound the current baseline
			avg_x_ = (xf2[ib[-1][1]]-xf2[ib[-1][0]])/(ib[-1][1]-ib[-1][0])
			d_xf2_ = [xf2[i+1]-xf2[i] for i in range(ib[-1][0],ib[-1][1])]
			avg_u_ = np.std(d_xf2_)
			(avg_x_,avg_u_) = klm.KalFilIter(avg_x,0,avg_x_,avg_u,avg_u,avg_u_,1,1,1)
			print("averages",avg_x,avg_u,";",avg_x_,avg_u_)
			jPrev = FindBaselineSection(jCur-1,ib[-1][0], avg_x_, avg_u) 	# work backwards to find start of region
			print("FB",jPrev)
			if (jCur-jPrev<countTY):
				ib = ib[:-1]
				continueOn = False
			if(continueOn):
				for i_ in range(ib[-1][0],ib[-1][1]+1,1): 	# assign baseline values
					xb[i_] = xf2[i_]
					ub[i_] = uf2[i_]
				avg_x_ = (xb[ib[-1][1]]-xb[ib[-1][0]])/(ib[-1][1]-ib[-1][0])
				d_xb_ = [xb[i+1]-xb[i] for i in range(ib[-1][0],ib[-1][1])]
				avg_u_ = np.std(d_xb_)
				(avg_x,avg_u) = klm.KalFilIter(avg_x,0,avg_x_,avg_u,avg_u,avg_u_,1,1,1)
				print("averages",avg_x,avg_u)
				print("ib:",ib[-1])
			#jCur += 1 							 	# add one if we have the end of a baseline
		jCur += 2
		if (jCur >= length):
			return (xb,ub,ib,xf1,xf2)
Exemplo n.º 5
0
]
ppsk1e_dT = [
    dataCol[oset_est][i] - dataCol[oset_pps][i] for i in range(len(dataRow))
]
ppsser_dT = [
    dataCol[oset_ser][i] - dataCol[oset_pps][i] for i in range(len(dataRow))
]

klm_T = [dataCol[oset_ser][0]] * len(dataCol[oset_ser])
klm_U = 1
est_dT = 999.983
est_U = 0.001
meas_U = 40
for i in range(len(klm_T) - 1):
    klm_T[1 + i], klm_U = klm.KalFilIter(klm_T[i], est_dT,
                                         dataCol[oset_ser][1 + i], klm_U,
                                         est_U, meas_U)

ppsklm_dT = [klm_T[i] - dataCol[oset_pps][i] for i in range(len(klm_T))]

print(len(ppsklm_dT), 12 * (len(ppsklm_dT) / 100)**(1 / 4))

pltDat = [serser_dT, ppspps_dT, k1ek1e_dT, ppsk1e_dT, ppsser_dT, ppsklm_dT]
savDat = [
    "serser_dT", "ppspps_dT", "k1ek1e_dT", "ppsk1e_dT", "ppsser_dT",
    "ppsklm_dT"
]
titDat = [
    "Consecutive serial", "Consecutive PPS",
    "Consecutive single Kalman estimate", "PPS to single Kalman estimate",
    "PPS to serial", "PPS to new Klm"
Exemplo n.º 6
0
ser_T = ser_T[start:end]
pps_T = pps_T[start:end]

serE_T = [0] * len(ser_T)  # expected time of serial arrival
covU_T = [0] * len(ser_T)  # expected uncertainty
ardU_t = 0.0005  # uncertainty in arduino times
ardD_t = (pps_T[-1] - pps_T[0]) / (
    len(pps_T) - 1
) - 1000  # arduino drift per millisecond (from post-analysis) - defined as ard_ms in 1s - 1000
serU_t = 150  # uncertainty in gps serial arrival times

covU_T[0] = 100
serE_T[0] = ser_T[0]
for i in range(len(serE_T) - 1):
    serE_T[1 + i], covU_T[1 + i] = klm.KalFilIter(serE_T[i], 1000 + ardD_t,
                                                  ser_T[1 + i], covU_T[i],
                                                  ardU_t, serU_t)

ppsserE_dT = [0] * len(serE_T)
for i in range(len(serE_T)):
    ppsserE_dT[i] = serE_T[i] - pps_T[i]

ppsser_dT = [0] * len(ser_T)
for i in range(len(ppsser_dT)):
    ppsser_dT[i] = ser_T[i] - pps_T[i]

serser_dT = [0] * (len(ser_T) - 1)
for i in range(len(serser_dT)):
    serser_dT[i] = ser_T[1 + i] - ser_T[i]

ppspps_dT = [0] * (len(ser_T) - 1)
Exemplo n.º 7
0
for iSeg in range(segNum):
    # get segment data and find second lengths
    ppsData = pps_T[iSeg * segLen:(iSeg + 1) * segLen]
    serData = ser_T[iSeg * segLen:(iSeg + 1) * segLen]
    serser_dData = [
        serData[1 + i] - serData[i] for i in range(len(serData) - 1)
    ]
    if (timing_PPS == True):
        secLenx_ = (ppsData[-1] - ppsData[0]) / (len(ppsData) - 1)
        secLenU_ = SecLenU_p / np.sqrt(len(ppsData))
    else:
        secLenx_ = (serData[-1] - serData[0]) / (len(serData) - 1)
        secLenU_ = np.std(serser_dData) * 2 / segLen
    if (i == 0): secLenFu = secLenU_

    (secLenFx, secLenFu) = klm.KalFilIter(secLenFx, 0, secLenx_, secLenFu, 1,
                                          secLenU_, 1, 1, 1)

    # find secser and then find offset
    secser_dT = [
        serData[i] - serData[0] - i * secLenFx for i in range(len(serData))
    ]
    binVals = np.histogram(secser_dT, bins=binEdges)[0]
    binVals = [
        binVals[i] / (sum(binVals) * binWidth) for i in range(len(binVals))
    ]

    #	plt.plot(binMids, binVals)
    #	plt.plot(binMids, binValsC)
    #	plt.show()

    #	if (iSeg>0):
Exemplo n.º 8
0
for i in range(sampleNum):
    sample_pps_T = pps_T[i * sampleSize:(i + 1) * sampleSize]
    sample_ser_T = ser_T[i * sampleSize:(i + 1) * sampleSize]
    sample_serser_dT = [
        sample_ser_T[1 + i] - sample_ser_T[i]
        for i in range(len(sample_ser_T) - 1)
    ]
    if (timing_PPS == True):
        avgTx_ = (sample_pps_T[-1] - sample_pps_T[0]) / (len(sample_pps_T) - 1)
        avgTu_ = avgTu_p / np.sqrt(len(sample_pps_T))
    else:
        avgTx_ = (sample_ser_T[-1] - sample_ser_T[0]) / (len(sample_ser_T) - 1)
        avgTu_ = np.std(sample_serser_dT) * 2 / sampleSize
    if (i == 0): avgTu = avgTu_

    (avgTx, avgTu) = klm.KalFilIter(avgTx, 0, avgTx_, avgTu, 1, avgTu_, 1, 1,
                                    1)
    for j in range(sampleSize):
        secser_dT[i * sampleSize + j] = sample_ser_T[j] - (ser_T[0] + tOff)
        tOff += avgTx
    print("avgtx,avgTu", avgTx, avgTu, avgTu_)

# get template distribution

binValsC = np.histogram(ppsser_dTC, bins=binEdges)[0]
binVals = np.histogram(secser_dT, bins=binEdges)[0]
binValsC2 = [
    float(binValsC[i] / (sum(binValsC) * binWidth))
    for i in range(len(binValsC))
]
binVals2 = [
    float(binVals[i] / (sum(binVals) * binWidth)) for i in range(len(binVals))
Exemplo n.º 9
0
        avg = 0
        for i in range(len(x)):
            if (x[i] == 0): avg += 1000
            else: avg += y[i] / x[i]
        avg /= len(x)
        avgEnd = (y[-1] - y[0]) / (len(y) - 1)
        #actT = (z[-1]-z[0])/(len(z)-1)
        actT = CrossCor(x, z) / CrossCor(x, x)

        kalIter = 20  # number of datapoints to filter
        kalIter = min(kalIter, int(len(y) / 2) - 1)
        ky = [y[kalIter], y[-kalIter - 1]]  # start and end filtered values
        ku = [150, 150]
        for i in range(0, kalIter):
            (ky[1], ku[1]) = kal.KalFilIter(ky[1], 1000, y[-kalIter + i],
                                            ku[1], ue, um, 1, 1, 1)
            (ky[0], ku[0]) = kal.KalFilIter(ky[0], -1000, y[kalIter - 1 - i],
                                            ku[0], ue, um, 1, 1, 1)
        avgEndKal1 = (ky[1] - ky[0]) / (len(y) - 1)

        #avgDist = MinimiseWidth(y)[0]

        #		ky = [0]*len(y)
        #		ku = [um]*len(y)
        #		for i in range(1,len(x)):
        #			(ky[i],ku[i]) = kal.KalFilIter(ky[i-1],avgEndKal1,y[i],ku[i-1],ue,um,1,1,1)
        #		avgEndKal1 = (ky[-1]-ky[0])/(len(y)-1)

        estDifEnd.append(avgEnd)
        estAvg.append(avg)
        estCCor.append(ccor)