distmesh = DistMesh(frame, h0 = gridsize) mask, ctrs, h = findObjectThreshold(frame, threshold = threshold) distmesh.createMesh(ctrs, h, frame, plot=False) kf = IteratedMSKalmanFilter(distmesh, frame, flowframe, cuda, multi = True) kf.state.render() count = 0 for i in range(3): count += 1 print 'Frame %d' % count frame = video[:,:,i] mask = (frame > 0).astype(np.uint8) flowframe = flow[:,:,:,i] time.sleep(1) kf.compute(frame, flowframe, mask) #Extract stats meshpts = stats.meshpts ave_update_time = stats.stateupdatetc[0]/stats.stateupdatetc[1] ave_pred_time = stats.statepredtime[0]/stats.stateupdatetc[1] jacpartitions = stats.jacobianpartitions hesspartitions = stats.hessianpartitions ave_nrenders = stats.renders[0]/stats.stateupdatetc[1] ave_jacrenders = stats.jacobianrenderstc[1]/stats.stateupdatetc[1] ave_hessrenders = stats.hessianrenderstc[1]/stats.stateupdatetc[1] ave_theoryrenders = ave_jacrenders + ave_hessrenders ave_jac_time = stats.jacobianrenderstc[0]/stats.stateupdatetc[1] ave_hess_time = stats.hessianrenderstc[0]/stats.stateupdatetc[1]
def main(): usage = """run_kalmanfilter.py [input_avi_file] [optic_flow_path] [output_avi_file] [threshold] HydraGL. State space model using an extended Kalman filter to track Hydra in video Dependencies: -Vispy* -Numpy -PyCuda** -DistMesh -HDF -OpenCV2 -matplotlib Notes: * Uses OpenGL rendering. If using remotely, you'll need to set up a VirtualGL server ** If have a CUDA compatible graphics card Example: ./run_kalmanfilter.py ./video/johntest_brightcontrast_short.avi ... ./video/johntest_brightcontrast_short/flow ./video/output.avi -s 15 For help: ./run_kalmanfilter.py -h Ben Lansdell 02/16/2016 """ parser = argparse.ArgumentParser() parser.add_argument('fn_in', default='./video/johntest_brightcontrast_short.avi', help='input video file, any format readable by OpenCV', nargs = '?') parser.add_argument('flow_in', default='./video/johntest_brightcontrast_short/flow', help='input optic flow path', nargs = '?') parser.add_argument('fn_out', default='./video/johntest_brightcontrast_short_output.avi', help='avi output video file', nargs='?') parser.add_argument('-n', '--name', default='johntest_brightcontrast_short', help='name for saving run images', nargs='?') parser.add_argument('-t', '--threshold', default=9, help='threshold intensity below which is background', type = int) parser.add_argument('-s', '--gridsize', default=22, help='edge length for mesh (smaller is finer; unstable much further below 18)', type = int) parser.add_argument('-c', '--cuda', default=True, help='whether or not to do analysis on CUDA', type = bool) args = parser.parse_args() if len(sys.argv) == 1: print("No command line arguments provided, using defaults") capture = VideoStream(args.fn_in, args.threshold) frame = capture.current_frame() mask, ctrs, fd = capture.backsub() distmesh = DistMesh(frame, h0 = args.gridsize) distmesh.createMesh(ctrs, fd, frame, plot = True) #Load flow data from directory flowstream = FlowStream(args.flow_in) ret_flow, flowframe = flowstream.peek() if ret_flow: kf = IteratedMSKalmanFilter(distmesh, frame, flowframe, cuda = args.cuda, sparse = True, multi = True) else: print 'Cannot read flow stream' return #kf.compute(capture.gray_frame(), flowframe) nI = 3 count = 0 while(capture.isOpened()): count += 1 print 'Frame %d' % count ret, frame, grayframe, mask = capture.read() ret_flow, flowframe = flowstream.read() if ret is False or ret_flow is False: break #for i in range(nI): # print 'Iteration %d' % i # raw_input("Finished. Press Enter to continue") # kf.compute(grayframe, flowframe) kf.compute(grayframe, flowframe, mask, imageoutput = 'screenshots/' + args.name + '_frame_' + str(count)) capture.release() output.release() cv2.destroyAllWindows() raw_input("Finished. Press ENTER to exit")
#print 'Test initjacobian' #print 'CPU:', z_cpu #print 'GPU:', z_gpu # ##Perturb vertices a bit and rerender #idx = 10 #eps = 1e-9 #print 'Test jz' #for idx in range(36): # kf.state.X[idx,0] += deltaX # kf.state.refresh() # kf.state.render() # kf.state.X[idx,0] -= deltaX # # jz_gpu = cuda.jz(state) # jz_cpu = cuda.jz_CPU(state) # print idx, 'CPU:', jz_cpu, 'GPU:', jz_gpu, '% diff:', 100*abs(jz_gpu-jz_cpu)/(jz_cpu+eps) # #print 'Test j' #for i in range(20): # for j in range(20): # j_gpu = cuda.j(state, deltaX, i, j) # j_cpu = cuda.j_CPU(state, deltaX, i, j) # print i, j, 'CPU:', j_cpu, 'GPU:', j_gpu, '% diff:', 100*abs(j_gpu-j_cpu)/(j_cpu+eps) (e_im, e_fx, e_fy, e_m, fx, fy) = kf.compute(frame, flowframe, mask, imageoutput = './test_forces') print 'Error image:', e_im print 'Error flow x:', e_fx print 'Error flow y:', e_fy print 'Error mask:', e_m