def main(filename=None): if filename is None: if (len(sys.argv) < 2): print "python radar_tracker.py <.pal file>" sys.exit() filename = sys.argv[1] use_turked = filename.find('-track') != -1 \ and os.path.exists(filename.replace('-track','')) print "Loading annotation file . . ." annotations_tracked = parse(filename) absolute_path_to_relative_path(annotations_tracked) if use_turked: annotations_turked = parse(filename.replace('-track', '')) copy_from_turked(annotations_turked, annotations_tracked) print len(annotations_tracked) # args = parse_args(sys.argv[2], sys.argv[3]) data_folder = os.path.dirname( image_name_to_map_file(annotations_tracked[0].imageName)) args = { 'cam_num': 2, 'params': LoadParameters( open(data_folder + '/params.ini', 'r').readline().rstrip()) } args['video'] = image_name_to_map_file( annotations_tracked[0].imageName).replace('.map', '2.avi') params = args['params'] video_reader = VideoReader(args['video']) #print "Loading file %s" % args['map'] #rdr_map = loadRDRCamMap(args['map']) rdr_map = load_all_radar_info(annotations_tracked) set_annotations_ids_using_radar(annotations_tracked, rdr_map, args) fill_gaps(annotations_tracked, rdr_map) for annotation in annotations_tracked: for rect in annotation.rects: rect.classID = int(rect.classID) if use_turked: copy_from_tracked(annotations_turked, annotations_tracked) save_filename = filename.split('.')[0] + "-with-depth.pal" save(save_filename, annotations_tracked) if use_turked: save_filename = filename.split('.')[0].replace('-track', '') + "-with-depth.pal" save(save_filename, annotations_turked) # compute_statistics(annotations_turked, rdr_map); show_3D(annotations_tracked, rdr_map, args, True, False, True)
def trackbarOnchange(t, prev_t): if abs(t - prev_t) > 1: video_reader.setFrame(t) if __name__ == '__main__': args = parse_args(sys.argv[1], sys.argv[2]) cam_num = int(sys.argv[2][-5]) video_file = args['video'] params = args['params'] cam = params['cam'][cam_num - 1] video_reader = VideoReader(video_file) gps_reader = GPSReader(args['gps']) GPSData = gps_reader.getNumericData() imu_transforms = IMUTransforms(GPSData) T_from_i_to_l = np.linalg.inv(params['lidar']['T_from_l_to_i']) all_data = np.load(sys.argv[3]) map_data = all_data['data'] #map_data = map_data[map_data[:,3] > 60, :] # map points are defined w.r.t the IMU position at time 0 # each entry in map_data is (x,y,z,intensity,framenum). print "Hit 'q' to quit" trackbarInit = False
print(ratio_d) if __name__ == "__main__": if (len(sys.argv) < 4): print "python radar_tracker.py <.al file> <directory of .avi> <video file>" sys.exit() filename = (sys.argv[1]) print "Loading annotation file . . ." annotations = parseXML(filename) print len(annotations) args = parse_args(sys.argv[2], sys.argv[3]) params = args['params'] video_reader = VideoReader(args['video']) rdr_map = loadRDRCamMap(args['map']) set_annotations_ids_using_radar(annotations, rdr_map, args) fill_gaps(annotations, rdr_map) for annotation in annotations: for rect in annotation.rects: rect.classID = int(rect.classID) save_filename = filename.split('.')[0] + "_new.al" saveXML(save_filename, annotations) # compute_statistics(annotations, rdr_map); # show_3D(annotations, rdr_map,args, True, False, True); # print "Writing new annotations into a file..." # saveXML(filename.split('.')[1] + "_with_distance.al", annotations);
t = time.time() count += 1 if count > 1: iren.GetRenderWindow().GetRenderers().GetFirstRenderer().RemoveActor(self.actor) self.actor = actor iren.GetRenderWindow().GetRenderers().GetFirstRenderer().AddActor(self.actor) if count == 1: iren.GetRenderWindow().GetRenderers().GetFirstRenderer().ResetCamera() iren.GetRenderWindow().GetRenderers().GetFirstRenderer().GetActiveCamera().Zoom(1.6) iren.GetRenderWindow().Render() print 'render:', time.time() - t if __name__ == '__main__': reader = VideoReader(sys.argv[1]) renderWindow = vtk.vtkRenderWindow() renderWindow.SetSize(1280/2, 960/2) interactor = vtk.vtkRenderWindowInteractor() interactor.SetRenderWindow(renderWindow) renderer = vtk.vtkRenderer() renderWindow.AddRenderer(renderer) renderWindow.Render() cb = ImageGrabberCallback(reader) interactor.AddObserver('TimerEvent', cb.execute) timerId = interactor.CreateRepeatingTimer(1) interactor.Start()
T_to_w_from_i[0:3, 0:3] = R_to_i_from_w(roll_0, pitch_0, yaw_0).transpose() enu_wrt_imu_0 = np.dot(T_to_w_from_i, pos_wrt_imu_0).transpose() # ENU w.r.t imu at time 0 -> WGS84 wgs84_pts = ENUtoWGS84(gps_dat[0, 1:4], enu_wrt_imu_0[:, 0:3]).transpose() return wgs84_pts if __name__ == '__main__': video_filename = sys.argv[1] path, vfname = os.path.split(video_filename) vidname = vfname.split('.')[0] cam_num = int(vidname[-1]) gps_filename = path + '/' + vidname[0:-1] + '_gps.out' video_reader = VideoReader(video_filename, num_splits=1) gps_reader = GPSReader(gps_filename) gps_dat = gps_reader.getNumericData() cam = getCameraParams()[cam_num - 1] labels = loadmat(sys.argv[2]) lp = labels['left'] rp = labels['right'] l_gps_coord = pixelToGPS(lp, gps_dat, cam) r_gps_coord = pixelToGPS(rp, gps_dat, cam) print "gps_dat: " print gps_dat[:, 1:4] print "l_gps_dat: " print l_gps_coord
from VideoReader import * from ArgParser import * import sys, cv2 if __name__ == '__main__': args = parse_args(sys.argv[1], sys.argv[2]) write = True fourcc = cv2.cv.CV_FOURCC(*'MPG1') vid_name = '/deep/u/jkiske/test.mpg' writer = None fnum = 0 with VideoReader(args['video']) as reader: while True: (success, I) = reader.getNextFrame() if not success: break if write == True: if writer == None: writer = cv2.VideoWriter(vid_name, fourcc, 50, (I.shape[1], I.shape[0])) writer.write(I) fnum += 1 print fnum if fnum >= 50: break else: cv2.imshow('video', I) cv2.waitKey(1)
#possible to do other filters such as removing objects beyond certain distance, height filters etc #mask = disp > disp.min() return points if __name__ == '__main__': args = parse_args(sys.argv[1], sys.argv[2]) cam_num = int(sys.argv[2][-5]) params = args['params'] assert (cam_num == 1) video_file_left = args['video'] video_file_right = args['opposite_video'] video_reader_left = VideoReader(video_file_left) video_reader_right = VideoReader(video_file_right) fnum = 0 while True: for t in range(10): (successL, imgL) = video_reader_left.getNextFrame() (successR, imgR) = video_reader_right.getNextFrame() """ WRITEDIR = 'data/' imgL = cv2.cvtColor(imgL, cv2.COLOR_RGB2GRAY) imgR = cv2.cvtColor(imgR, cv2.COLOR_RGB2GRAY) cv2.imwrite(WRITEDIR+ 'left_%06d.png' % fnum, imgL) cv2.imwrite(WRITEDIR+ 'right_%06d.png' % fnum, imgR) fnum+=1;
#print (rx,ry,rz) if __name__ == '__main__': vfname = sys.argv[2] vidname = vfname.split('.')[0] vidname2 = vidname[:-1] + '2' video_filename2 = sys.argv[1] + '/' + vidname2 + '.avi' args = parse_args(sys.argv[1], sys.argv[2]) gps_reader = GPSReader(args['gps']) params = args['params'] cam1 = params['cam'][0] cam2 = params['cam'][1] video_reader1 = VideoReader(args['video']) video_reader2 = VideoReader(video_filename2) gps_reader = GPSReader(args['gps']) GPSData = gps_reader.getNumericData() imu_transforms = IMUTransforms(GPSData) ldr_map = loadLDRCamMap(args['map']) if '--full' in sys.argv: total_num_frames = GPSData.shape[0] start_fn = 0 step = 10 num_fn = int(total_num_frames / step) # this has been flipped for the q50 cloud_r.SetBackground(0., 0., 0.)
from VideoReader import * patternShape = (7,10) # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) if __name__ == '__main__': video_filename = sys.argv[1] path, vfname = os.path.split(video_filename) vfname_2 = vfname.replace('1','2') outdir = sys.argv[2] outname = sys.argv[3] cv2.namedWindow('video') video_reader_cam_1 = VideoReader(video_filename) video_reader_cam_2 = VideoReader(path + '/' + vfname_2) framenum = 0 right_corners = None left_corners = None right_imgpoints = [ ] left_imgpoints = [ ] right_frames = [ ] left_frames = [ ] firstTime = True ct = 1; fileList = open(outdir+'/filelist.txt', 'w'); while True:
from VideoReader import * from ArgParser import * import sys, cv2 """ usage: python PlayStereoVideo.py <path_to_data> <basename><camera_num>.avi for example: python PlayStereoVideo.py /scail/group/deeplearning/driving_data/q50_data/4-2-14-monterey/ 17N_b1.avi under this command, reader1 will load 17N_b1.avi reader2 will load 17N_b2.avi If you switch the command to python PlayStereoVideo.py /scail/group..../4-2-14-monterey/ 17N_b2.avi, then you will have reader1 loading 17N_b2.avi and reader2 loading 17N_b1.avi. The point is that reader1 does not necessairly load the camera 1. """ if __name__ == '__main__': args = parse_args(sys.argv[1], sys.argv[2]) reader1 = VideoReader(args['video']) reader2 = VideoReader(args['opposite_video']) while True: (success1, I1) = reader1.getNextFrame() (success2, I2) = reader2.getNextFrame() if not success1 or not success2: break cv2.imshow('video1', cv2.pyrDown(I1)) cv2.imshow('video2', cv2.pyrDown(I2)) cv2.waitKey(5)
def __init__(self, video_file, cam_params): vfilePath, vFile = os.path.split(video_file) self.cam_num = int(vFile[-5]) self.video_reader = VideoReader(video_file) self.cam = cam_params