f_mesh = open(m_in, 'r')
lines = f_mesh.readlines()
nF = len(lines)-1
nX = int(lines[0].split(',')[1])
truestates = np.zeros((nF, nX*4), dtype = np.float32)
predstates = np.zeros((nF, nX*4), dtype = np.float32)
for i in range(1,nF+1):
	line = lines[i]
	truestates[i-1,:] = [float(x) for x in line.split(',')[1:]]

predstates[0,:] = truestates[0,:]

rms_vel = np.zeros(nF)
rms_pos = np.zeros(nF)

flowstream = FlowStream(flow_in)
ret_flow, flowframe = flowstream.read()

kf = IteratedKalmanFilter(distmesh, frame, flowframe, cuda = cuda, sparse = sparse)
self = kf.state

#Test labels
#with self._fbo4:
#	m = gloo.read_pixels()
#np.max(m[:,:,2])
#np.sum(m[:,:,2] == 255)
#np.unique(m[:,:,2])
#kf.state.E[0]
#kf.state.labels
#np.unique(m[:,:,1])
def runIEKF(eps_Z, eps_J, eps_M, eps_F):	
	print 'Running with:'
	print '- eps_Z:', eps_Z
	print '- eps_J:', eps_J
	print '- eps_M:', eps_M
	print '- eps_F:', eps_F

	if mask_flow:
		notes = 'masked_iekf'
	else:
		notes = 'iekf'
	notes += '_eps_Z_%f'%eps_Z
	notes += '_eps_J_%f'%eps_J
	notes += '_eps_M_%f'%eps_M
	notes += '_eps_F_%f'%eps_F

	img_out = './synthetictests/' + name + '/' + ff + '_' + notes + '_pred/'
	if not os.path.isdir(img_out):
		os.makedirs(img_out)

	print 'Loading synthetic data streams'
	capture = VideoStream(v_in, threshold)
	frame = capture.current_frame()
	mask, ctrs, fd = capture.backsub()
	distmesh = DistMesh(frame, h0 = gridsize)
	distmesh.load(dm_in)
	
	#Load true data
	f_mesh = open(m_in, 'r')
	lines = f_mesh.readlines()
	nF = len(lines)-1
	nX = int(lines[0].split(',')[1])
	truestates = np.zeros((nF, nX*4), dtype = np.float32)
	predstates = np.zeros((nF, nX*4), dtype = np.float32)
	for i in range(1,nF+1):
		line = lines[i]
		truestates[i-1,:] = [float(x) for x in line.split(',')[1:]]
	
	predstates[0,:] = truestates[0,:]
	
	rms_vel = np.zeros(nF)
	rms_pos = np.zeros(nF)
	
	flowstream = FlowStream(flow_in)
	ret_flow, flowframe = flowstream.read()
	kf = IteratedKalmanFilter(distmesh, frame, flowframe, cuda = cuda, sparse = sparse,\
	 eps_F = eps_F, eps_Z = eps_Z, eps_J = eps_J, eps_M = eps_M, nI = nI)
	
	count = 0
	print 'Tracking with Kalman filter'
	while(capture.isOpened() and count < max_frames):
	#for idx in range(1):
		count += 1
		ret, frame, grayframe, mask = capture.read()
		ret_flow, flowframe = flowstream.read()
		if ret is False or ret_flow is False:
			break
	
		print 'Frame %d' % count 
		kf.compute(grayframe, flowframe, mask, maskflow = mask_flow, imageoutput = img_out+'solution_frame_%03d'%count)
		#kf.compute(grayframe, flowframe, mask, maskflow = mask_flow)
	
		predstates[count,:] = np.squeeze(kf.state.X)
		r_pos = truestates[count,0:(2*nX)]-predstates[count,0:(2*nX)]
		r_vel = truestates[count,(2*nX):]-predstates[count,(2*nX):]
		rms_pos[count] = np.sqrt(np.mean(np.multiply(r_pos, r_pos)))
		rms_vel[count] = np.sqrt(np.mean(np.multiply(r_vel, r_vel)))
		print 'RMS_pos:', rms_pos[count], 'RMS_vel:', rms_vel[count]
	
	print 'Saving'
	np.savez('./synthetictests/' + name + '/' + ff + '_' + notes + '_pred.npz', predstates, truestates, rms_pos, rms_vel)
	
	print 'Done... how\'d we do?'
	
	#Make plot of tracking error
	plt.plot(range(nF), rms_pos, label='RMS position')
	plt.plot(range(nF), rms_vel, label='RMS velocity')
	plt.legend(loc='upper left')
	plt.ylabel('RMS')
	plt.xlabel('Frame')
	plt.savefig('./synthetictests/' + name + '/' + ff + '_' + notes + '_pred_rms.eps')
def main():
	usage = """run_kalmanfilter.py [input_avi_file] [optic_flow_path] [output_avi_file] [threshold]

HydraGL. State space model using an extended Kalman filter to track Hydra in video

Dependencies:
-Vispy*
-Numpy
-PyCuda**
-DistMesh 
-HDF
-OpenCV2
-matplotlib

Notes:
*  Uses OpenGL rendering. If using remotely, you'll need to set up a VirtualGL server
** If have a CUDA compatible graphics card

Example: 
./run_kalmanfilter.py ./video/johntest_brightcontrast_short.avi ... 
 ./video/johntest_brightcontrast_short/flow ./video/output.avi -s 15

For help:
./run_kalmanfilter.py -h 

Ben Lansdell
02/16/2016
"""

	parser = argparse.ArgumentParser()
	parser.add_argument('fn_in', default='./video/johntest_brightcontrast_short.avi', 
		help='input video file, any format readable by OpenCV', nargs = '?')
	parser.add_argument('flow_in', default='./video/johntest_brightcontrast_short/flow', 
		help='input optic flow path', nargs = '?')
	parser.add_argument('fn_out', default='./video/johntest_brightcontrast_short_output.avi', 
		help='avi output video file', nargs='?')
	parser.add_argument('-n', '--name', default='johntest_brightcontrast_short', 
		help='name for saving run images', nargs='?')
	parser.add_argument('-t', '--threshold', default=9,
		help='threshold intensity below which is background', type = int)
	parser.add_argument('-s', '--gridsize', default=22,
		help='edge length for mesh (smaller is finer; unstable much further below 18)', type = int)
	parser.add_argument('-c', '--cuda', default=True,
		help='whether or not to do analysis on CUDA', type = bool)
	args = parser.parse_args()

	if len(sys.argv) == 1:
		print("No command line arguments provided, using defaults")
	
	capture = VideoStream(args.fn_in, args.threshold)

	frame = capture.current_frame()
	mask, ctrs, fd = capture.backsub()
	distmesh = DistMesh(frame, h0 = args.gridsize)
	distmesh.createMesh(ctrs, fd, frame, plot = True)
	
	#Load flow data from directory
	flowstream = FlowStream(args.flow_in)
	ret_flow, flowframe = flowstream.peek()

	if ret_flow:
		kf = IteratedMSKalmanFilter(distmesh, frame, flowframe, cuda = args.cuda, sparse = True, multi = True)
	else:
		print 'Cannot read flow stream'
		return 

	#kf.compute(capture.gray_frame(), flowframe)
	nI = 3
	count = 0
	while(capture.isOpened()):
		count += 1
		print 'Frame %d' % count 
		ret, frame, grayframe, mask = capture.read()
		ret_flow, flowframe = flowstream.read()
		if ret is False or ret_flow is False:
			break
		#for i in range(nI):
		#	print 'Iteration %d' % i 
		#	raw_input("Finished. Press Enter to continue")
		#	kf.compute(grayframe, flowframe)
		kf.compute(grayframe, flowframe, mask, imageoutput = 'screenshots/' + args.name + '_frame_' + str(count))
	capture.release()
	output.release()
	cv2.destroyAllWindows()
	raw_input("Finished. Press ENTER to exit")
Exemple #4
0
def main(argv):
	if len(argv) < 3:
		print 'Not enough arguments'
		return 
	ff = argv[1]
	name = argv[2]

	if len(argv) == 4:
		flownotes = argv[3]
	elif len(argv) == 3:
		flownotes = ''
	else:
		print 'Too many arguments'
		return

	#name = 'square5_gradient_texture_rot2'
	#name = 'square4_gradient_texture_rot1'
	#name = 'square3_gradient_texture'
	#name = 'square2_gradient'
	#name = 'square1'
	#name = 'hydra1'
	#ff = 'translate_leftup_stretch'
	#ff = 'translate_leftup'
	cuda = True
	sparse = True

	#Brox flow:
	#flownotes = ''
	#flownotes = '_solver_it_20' 
	#flownotes = '_solver_it_5'
	#flownotes = '_inner_it_20'
	#flownotes = '_inner_it_5'
	#flownotes = '_alpha_0.4'
	#flownotes = '_alpha_0.1'
	#flownotes = '_gamma_100'
	#flownotes = '_gamma_25'

	#DeepFlow
	#flownotes = '_deep'

	#SimpleFlow
	#flownotes = '_simple'

	m_in = './synthetictests/' + name + '/' + ff + '_mesh.txt'
	#Video and Brox flow in
	v_in = './synthetictests/' + name + '/' + ff + '/' + ff + '.avi'
	flow_in = './synthetictests/' + name + '/' + ff + '/' + ff + '_flow' + flownotes

	#Output
	img_out = './synthetictests/' + name + '/' + ff + '_testflow' + flownotes + '/'
	if not os.path.isdir(img_out):
		os.makedirs(img_out)

	threshold = 8

	print 'Loading synthetic data streams'
	capture = VideoStream(v_in, threshold)
	frame = capture.current_frame()
	mask, ctrs, fd = capture.backsub()

	f_mesh = open(m_in, 'r')
	lines = f_mesh.readlines()
	nF = len(lines)-1
	nX = frame.shape[0]

	flowstream = FlowStream(flow_in)
	ret_flow, flowframe = flowstream.read()

	rms_flow = np.zeros(nF)
	true_flow = np.zeros((nX, nX, 2))
	rel_abs_res = np.zeros((nF, nX, nX))
	abs_res = np.zeros((nF, nX, nX))
	video = np.zeros((nF, nX, nX))

	samp_frac = 0.1

	count = 0
	print 'Comparing true flow to computed flow'

	abs_flow_sample = []
	abs_res_sample = []
	video_sample = []

	while(capture.isOpened()):
		count += 1
		ret, frame, grayframe, mask = capture.read()
		ret_flow, flowframe = flowstream.read()
		if ret is False or ret_flow is False:
			break
		print 'Frame %d'%count

		video[count,:,:] = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

		#Compute true flow based on all points in object undergoing flow field
		#Find all pts in mask
		#.... HORRIBLY horribly slow

		for i in range(nX):
			for j in range(nX):
				m = mask[i,j]
				(fx, fy) = flowfields[ff]((i,j),0)
				true_flow[i,j,:] = [fx*m, fy*m]
				res = m*(true_flow[i,j,:] - flowframe[i,j,:])
				abs_res[count,i,j] = np.sqrt(np.sum(res*res))
				if m > 0:
					rel_abs_res[count,i,j] = np.sqrt(np.sum(res*res))/np.sqrt(fx*fx+fy*fy)
					u = np.random.uniform()
					if u < samp_frac:
						abs_flow_sample += [np.sqrt(fx*fx+fy*fy)]
						abs_res_sample += [abs_res[count,i,j]]
						video_sample += [video[count,i,j]]

		#Load computed flow and compare
		r = true_flow - flowframe
		s = np.sum(mask)
		rms_flow[count] = np.sqrt(np.sum(np.multiply(r, r))/s)
		plt.clf()
		plt.imshow(abs_res[count,:,:])
		plt.colorbar()
		plt.savefig(img_out + 'flow_abs_res_%03d.png'%count)
		plt.clf()
		plt.imshow(rel_abs_res[count,:,:])
		plt.colorbar()
		plt.savefig(img_out + 'flow_rel_abs_res_%03d.png'%count)
		#cv2.imwrite(img_out + 'flow_abs_res_%03d.png'%count, abs_res[count,:,:])
		print 'RMS_flow:', rms_flow[count]

	print 'Done... how\'d we do?'

	df = pd.DataFrame({'abs_flow_sample':pd.Series(abs_flow_sample),\
		'abs_res_sample':pd.Series(abs_res_sample),\
		'video_sample':pd.Series(video_sample)})

	#Save output
	df.to_pickle(img_out + "flow_errors.pkl")
	np.save(img_out + "rms_flow.npz",rms_flow)

	g1 = sns.jointplot("video_sample", "abs_res_sample", data = df, kind="kde", color="b")
	g1.savefig(img_out + 'intensity_abs_res.eps')
	g2 = sns.jointplot("abs_flow_sample", "abs_res_sample", data = df, kind="kde", color="b")
	g2.savefig(img_out + 'flow_abs_res.eps')

	#Plot abs error in flow vs abs magnitude of flow
	plt.clf()
	plt.plot(range(nF), rms_flow, label='RMS flow')
	plt.legend(loc='upper left')
	plt.ylabel('RMS')
	plt.xlabel('Frame')
	plt.savefig(img_out + 'flow_rms.eps')