def __init__(self): args = parse_args(sys.argv[1], sys.argv[2]) planar = glob.glob(sys.argv[1] + '/*_planar.npz')[0] npz = np.load(planar) self.planes = npz['planes'] (self.imu_transforms_mk1, self.gps_data_mk1, self.gps_times_mk1) = get_transforms(args, 'mark1') (self.imu_transforms_mk2, self.gps_data_mk2, self.gps_times_mk2) = get_transforms(args, 'mark2') self.back_projector = BackProjector(args) self.vr = VideoReader(args['video']) self.t = 1 cv2.namedWindow('image', cv2.WINDOW_AUTOSIZE) cv2.setMouseCallback('image', self.mouseHandler) self.lanes = None self.markings = None
# plt.show() def exportData(self, file_name): lanes = {} lanes['num_lanes'] = self.num_lanes lanes['planes'] = self.planes lanes['filt_ground'] = self.filt_ground[:self.filt_ground_idx] for num in xrange(self.num_lanes): lane = self.lanes[num] lanes['lane' + str(num)] = lane np.savez(file_name, **lanes) if __name__ == '__main__': args = parse_args(sys.argv[1], sys.argv[2]) plane_file = args['fullname'] + '_ground.npz' lanes_file = sys.argv[1] + '/multilane_points.npz' pf = PlaneFitter(args, plane_file, lanes_file) for i in xrange(0, pf.pos.shape[0], 25): # for i in xrange(1000, 2000, 100): if i % 1000 == 0: print '%d/%d' % (i, pf.pos.shape[0]) n, p0, err = pf.findGroundPlane(i, radius=25) pf.correctLanes() pf.exportData(sys.argv[1] + '/multilane_points_planar.npz')
if closest_pt_wrt_car[1] < 0: if pt_right == None or norm < pt_right[0]: pt_right = (norm, idx, lane_num) nearest_pts = np.array( (self.lanes[pt_left[-1]].pts[pt_left[1]], (self.lanes[pt_right[-1]].pts[pt_right[1]]))) self.nearest_pts[pos_idx / step] = nearest_pts # print pos_idx # print nearest_pts def exportData(self, file_name): lanes = {} lanes['num_lanes'] = 2 for num in xrange(2): lane = self.nearest_pts[:, num, :] lanes['lane' + str(num)] = lane np.savez(file_name, **lanes) print 'Saved', file_name if __name__ == '__main__': args = parse_args(sys.argv[1], sys.argv[2]) lanes_file = glob.glob(sys.argv[1] + '/*done.npz')[0] lc = LaneClassifier(args, lanes_file) lc.classifyNearbyLanes(1) lc.exportData(sys.argv[1] + '/test.npz')
subprocess.call(untar_cmd.split()) os.remove(new_file) params = local_folder + '/params.ini' folder_params = local_folder + '/' + folder_name + '/params.ini' shutil.copy(params, folder_params) configurator.set('organized', True) if configurator.config['map'] == False: for run in sorted(glob.glob(local_folder + '/*/')): if os.path.isdir(run): print run base = run.split('/')[-2] temp_vid = base + '2.avi' args = parse_args(run, temp_vid) mb = MapBuilder(args, 1, 600, 0.5, 0.1) output = run + '/' + base + '_bg.npz' if not os.path.isfile(output): mb.buildMap(['no-trees']) mb.exportData(output) output = run + '/' + base + '_ground.npz' if not os.path.isfile(output): mb.buildMap(['no-trees', 'ground']) mb.exportData(output) configurator.set('map', True) if configurator.config['multilane'] == False:
def __init__(self): if len(sys.argv) <= 2 or '--help' in sys.argv: print """Usage: {name} folder/ video.avi """.format(name = sys.argv[0]) sys.exit(-1) args = parse_args(sys.argv[1], sys.argv[2]) self.args = args bg_file = glob.glob(args['fullname'] + '*bg.npz')[0] print sys.argv self.small_step = 5 self.large_step = 10 self.startup_complete = False ##### Grab all the transforms ###### self.absolute = False (self.imu_transforms_mk1, self.gps_data_mk1, self.gps_times_mk1) = get_transforms(args, 'mark1', self.absolute) (self.imu_transforms_mk2, self.gps_data_mk2, self.gps_times_mk2) = get_transforms(args, 'mark2', self.absolute) self.mk2_t = 0 self.t = self.mk2_to_mk1() self.cur_imu_transform = self.imu_transforms_mk1[self.t, :,:] self.imu_kdtree = cKDTree(self.imu_transforms_mk1[:, :3, 3]) self.params = args['params'] self.lidar_params = self.params['lidar'] self.T_from_i_to_l = np.linalg.inv(self.lidar_params['T_from_l_to_i']) cam_num = args['cam_num'] self.cam_params = self.params['cam'][cam_num] # Load the MobilEye file self.mbly_loader = MblyLoader(args) self.mbly_rot = [0.0, -0.005, -0.006] self.mbly_T = [5.4, 0.0, -1.9] # Is the flyover running self.running = False # Has the user changed the time self.manual_change = 0 ###### Set up the renderers ###### self.cloud_ren = vtk.vtkRenderer() self.cloud_ren.SetViewport(0, 0, 1.0, 1.0) self.cloud_ren.SetBackground(0, 0, 0) self.img_ren = vtk.vtkRenderer() # self.img_ren.SetViewport(0.7, 0.0, 1.0, 0.37) self.img_ren.SetViewport(0.6, 0.6, 1.0, 1.0) self.img_ren.SetInteractive(False) self.img_ren.SetBackground(0.1, 0.1, 0.1) self.win = vtk.vtkRenderWindow() self.win.StereoCapableWindowOff() self.win.AddRenderer(self.cloud_ren) self.win.AddRenderer(self.img_ren) self.win.SetSize(800, 400) self.iren = vtk.vtkRenderWindowInteractor() self.iren.SetRenderWindow(self.win) ###### Cloud Actors ###### print 'Adding raw points' raw_npz = np.load(bg_file) pts = raw_npz['data'] raw_cloud = VtkPointCloud(pts[:, :3], np.ones(pts[:, :3].shape) * 255) raw_actor = raw_cloud.get_vtk_color_cloud() self.raw_lidar = VTKCloudTree(raw_cloud, raw_actor, build_tree=False) self.raw_lidar_2d = DataTree(self.raw_lidar.pts[:, :-1], build_tree=False) self.raw_lidar.actor.GetProperty().SetPointSize(2) self.raw_lidar.actor.GetProperty().SetOpacity(0.1) self.raw_lidar.actor.SetPickable(0) self.cloud_ren.AddActor(self.raw_lidar.actor) print 'Adding car' self.car = load_ply('../mapping/viz/gtr.ply') self.car.SetPickable(0) self.car.GetProperty().LightingOff() self.cloud_ren.AddActor(self.car) self.mbly_vtk_boxes = [] # Car: 0, Truck: 1, Bike: 2, Other: 3-7 red = np.array((1, 0, 0)) green = np.array((0, 1, 0)) blue = np.array((0, 0, 1)) white = red+green+blue self.mbly_obj_colors = [red, green, blue, white] self.mbly_vtk_lanes = [] # Dashed: 0, Solid: 1, undecided: 2, Edge: 3, Double: 4, Botts_Dots: 5 self.mbly_lane_color = [green, green, red, blue, white, green] self.mbly_lane_size = [2, 3, 1, 1, 2, 1] self.mbly_lane_subsamp = [20, 1, 1, 1, 1, 40] # Use our custom mouse interactor self.interactor = LaneInteractorStyle(self.iren, self.cloud_ren, self) self.iren.SetInteractorStyle(self.interactor) ###### 2D Projection Actors ###### self.video_reader = VideoReader(args['video']) self.img_actor = None self.I = None ###### Add Callbacks ###### print 'Rendering' self.iren.Initialize() # Set up time self.iren.AddObserver('TimerEvent', self.update) self.timer = self.iren.CreateRepeatingTimer(10) self.iren.Start()
subprocess.call(untar_cmd.split()) os.remove(new_file) params = local_folder + '/params.ini' folder_params = local_folder + '/' + folder_name + '/params.ini' shutil.copy(params, folder_params) configurator.set('organized', True) if configurator.config['map'] == False: for run in sorted(glob.glob(local_folder + '/*/')): if os.path.isdir(run): print run base = run.split('/')[-2] temp_vid = base + '604.zip' args = parse_args(run, temp_vid) mb = MapBuilder(args, 1, 600, 0.5, 0.1) output = run + '/' + base + '_bg.npz' if not os.path.isfile(output): mb.buildMap(['no-trees']) mb.exportData(output) output = run + '/' + base + '_ground.npz' if not os.path.isfile(output): mb.buildMap(['no-trees', 'ground']) mb.exportData(output) configurator.set('map', True) if configurator.config['multilane'] == False:
def __init__(self): self.start = 0 self.step = 5 self.end = self.step * 500 self.count = 0 self.ren = vtk.vtkRenderer() args = parse_args(sys.argv[1], sys.argv[2]) # Transforms self.imu_transforms = get_transforms(args) self.trans_wrt_imu = self.imu_transforms[ self.start:self.end:self.step, 0:3, 3] self.params = args['params'] self.lidar_params = self.params['lidar'] ml = MultiLane(sys.argv[3], sys.argv[4], 2, 2) ml.extendLanes() saveInterp(ml.interp, ml.rightLanes + ml.leftLanes) ml.filterLaneMarkings() print 'Adding filtered points' pts = ml.lanes.copy() raw_cloud = VtkPointCloud(pts[:, :3], pts[:, 4]) raw_actor = raw_cloud.get_vtk_cloud(zMin=0, zMax=100) self.ren.AddActor(raw_actor) try: npz = np.load('cluster.npz') print 'Loading clusters from file' ml.lanes = npz['data'] ml.times = npz['t'] except IOError: print 'Clustering points' ml.clusterLanes() ml.saveLanes('cluster.npz') ml.sampleLanes() print 'Adding clustered points' clusters = ml.lanes.copy() cluster_cloud = VtkPointCloud(clusters[:, :3], clusters[:, -2]) cluster_actor = cluster_cloud.get_vtk_cloud(zMin=0, zMax=4) cluster_actor.GetProperty().SetPointSize(10) self.ren.AddActor(cluster_actor) print 'Interpolating lanes' ml.interpolateLanes() interp_lanes = ml.interp_lanes.copy() interp_lanes_cloud = VtkPointCloud(interp_lanes[:, :3], interp_lanes[:, 3]) interp_lanes_actor = interp_lanes_cloud.get_vtk_cloud(zMin=0, zMax=4) self.ren.AddActor(interp_lanes_actor) # ml.fixMissingPoints() # saveClusters(ml.lanes, ml.times, -1, 5) print 'Adding car' self.car = load_ply('../mapping/viz/gtr.ply') self.ren.AddActor(self.car) self.car.GetProperty().LightingOff() print 'Rendering' self.ren.ResetCamera() self.win = vtk.vtkRenderWindow() self.ren.SetBackground(0, 0, 0) self.win.AddRenderer(self.ren) self.win.SetSize(800, 400) self.iren = vtk.vtkRenderWindowInteractor() self.iren .SetRenderWindow(self.win) mouseInteractor = vtk.vtkInteractorStyleTrackballCamera() self.iren.SetInteractorStyle(mouseInteractor) self.iren.Initialize() # Whether to write video self.record = False # Set up time self.iren.AddObserver('TimerEvent', self.update) self.timer = self.iren.CreateRepeatingTimer(100) # Add keypress event self.iren.AddObserver('KeyPressEvent', self.keyhandler) self.mode = 'ahead' self.iren.Start()
import cv2 import glob import sys from ArgParser import parse_args from LaneClassLabeler import ImageLoader from flask import Flask, render_template app = Flask(__name__) @app.route('/') def hello(): return render_template('main.html', test='Hello', video_src="static/test.jpg", generated_src='static/test2.jpg') if __name__ == '__main__': args = parse_args('../process/data/4-20-14-280/280N_a', '280N_a2.avi') il = ImageLoader(args) I = il.nextFrame(10) cv2.imwrite('static/test.jpg', I) app.run(debug=True)
def __init__(self): if len(sys.argv) <= 2 or '--help' in sys.argv: print """Usage: {name} folder/ video.avi """.format(name=sys.argv[0]) sys.exit(-1) args = parse_args(sys.argv[1], sys.argv[2]) self.args = args bg_file = glob.glob(args['fullname'] + '*bg.npz')[0] print sys.argv self.small_step = 5 self.large_step = 10 self.startup_complete = False ##### Grab all the transforms ###### self.absolute = False (self.imu_transforms_mk1, self.gps_data_mk1, self.gps_times_mk1) = get_transforms(args, 'mark1', self.absolute) (self.imu_transforms_mk2, self.gps_data_mk2, self.gps_times_mk2) = get_transforms(args, 'mark2', self.absolute) self.mk2_t = 0 self.t = self.mk2_to_mk1() self.cur_imu_transform = self.imu_transforms_mk1[self.t, :, :] self.imu_kdtree = cKDTree(self.imu_transforms_mk1[:, :3, 3]) self.params = args['params'] self.lidar_params = self.params['lidar'] self.T_from_i_to_l = np.linalg.inv(self.lidar_params['T_from_l_to_i']) cam_num = args['cam_num'] self.cam_params = self.params['cam'][cam_num] # Load the MobilEye file self.mbly_loader = MblyLoader(args) self.mbly_rot = [0.0, -0.005, -0.006] self.mbly_T = [5.4, 0.0, -1.9] # Is the flyover running self.running = False # Has the user changed the time self.manual_change = 0 ###### Set up the renderers ###### self.cloud_ren = vtk.vtkRenderer() self.cloud_ren.SetViewport(0, 0, 1.0, 1.0) self.cloud_ren.SetBackground(0, 0, 0) self.img_ren = vtk.vtkRenderer() # self.img_ren.SetViewport(0.7, 0.0, 1.0, 0.37) self.img_ren.SetViewport(0.6, 0.6, 1.0, 1.0) self.img_ren.SetInteractive(False) self.img_ren.SetBackground(0.1, 0.1, 0.1) self.win = vtk.vtkRenderWindow() self.win.StereoCapableWindowOff() self.win.AddRenderer(self.cloud_ren) self.win.AddRenderer(self.img_ren) self.win.SetSize(800, 400) self.iren = vtk.vtkRenderWindowInteractor() self.iren.SetRenderWindow(self.win) ###### Cloud Actors ###### print 'Adding raw points' raw_npz = np.load(bg_file) pts = raw_npz['data'] raw_cloud = VtkPointCloud(pts[:, :3], np.ones(pts[:, :3].shape) * 255) raw_actor = raw_cloud.get_vtk_color_cloud() self.raw_lidar = VTKCloudTree(raw_cloud, raw_actor, build_tree=False) self.raw_lidar_2d = DataTree(self.raw_lidar.pts[:, :-1], build_tree=False) self.raw_lidar.actor.GetProperty().SetPointSize(2) self.raw_lidar.actor.GetProperty().SetOpacity(0.1) self.raw_lidar.actor.SetPickable(0) self.cloud_ren.AddActor(self.raw_lidar.actor) print 'Adding car' self.car = load_ply('../mapping/viz/gtr.ply') self.car.SetPickable(0) self.car.GetProperty().LightingOff() self.cloud_ren.AddActor(self.car) self.mbly_vtk_boxes = [] # Car: 0, Truck: 1, Bike: 2, Other: 3-7 red = np.array((1, 0, 0)) green = np.array((0, 1, 0)) blue = np.array((0, 0, 1)) white = red + green + blue self.mbly_obj_colors = [red, green, blue, white] self.mbly_vtk_lanes = [] # Dashed: 0, Solid: 1, undecided: 2, Edge: 3, Double: 4, Botts_Dots: 5 self.mbly_lane_color = [green, green, red, blue, white, green] self.mbly_lane_size = [2, 3, 1, 1, 2, 1] self.mbly_lane_subsamp = [20, 1, 1, 1, 1, 40] # Use our custom mouse interactor self.interactor = LaneInteractorStyle(self.iren, self.cloud_ren, self) self.iren.SetInteractorStyle(self.interactor) ###### 2D Projection Actors ###### self.video_reader = VideoReader(args['video']) self.img_actor = None self.I = None ###### Add Callbacks ###### print 'Rendering' self.iren.Initialize() # Set up time self.iren.AddObserver('TimerEvent', self.update) self.timer = self.iren.CreateRepeatingTimer(10) self.iren.Start()
def __init__(self): self.start = 0 self.step = 5 self.end = self.step * 500 self.count = 0 self.ren = vtk.vtkRenderer() args = parse_args(sys.argv[1], sys.argv[2]) # Transforms self.imu_transforms = get_transforms(args) self.trans_wrt_imu = self.imu_transforms[self.start:self.end:self.step, 0:3, 3] self.params = args['params'] self.lidar_params = self.params['lidar'] ml = MultiLane(sys.argv[3], sys.argv[4], 2, 2) ml.extendLanes() saveInterp(ml.interp, ml.rightLanes + ml.leftLanes) ml.filterLaneMarkings() print 'Adding filtered points' pts = ml.lanes.copy() raw_cloud = VtkPointCloud(pts[:, :3], pts[:, 4]) raw_actor = raw_cloud.get_vtk_cloud(zMin=0, zMax=100) self.ren.AddActor(raw_actor) try: npz = np.load('cluster.npz') print 'Loading clusters from file' ml.lanes = npz['data'] ml.times = npz['t'] except IOError: print 'Clustering points' ml.clusterLanes() ml.saveLanes('cluster.npz') ml.sampleLanes() print 'Adding clustered points' clusters = ml.lanes.copy() cluster_cloud = VtkPointCloud(clusters[:, :3], clusters[:, -2]) cluster_actor = cluster_cloud.get_vtk_cloud(zMin=0, zMax=4) cluster_actor.GetProperty().SetPointSize(10) self.ren.AddActor(cluster_actor) print 'Interpolating lanes' ml.interpolateLanes() interp_lanes = ml.interp_lanes.copy() interp_lanes_cloud = VtkPointCloud(interp_lanes[:, :3], interp_lanes[:, 3]) interp_lanes_actor = interp_lanes_cloud.get_vtk_cloud(zMin=0, zMax=4) self.ren.AddActor(interp_lanes_actor) # ml.fixMissingPoints() # saveClusters(ml.lanes, ml.times, -1, 5) print 'Adding car' self.car = load_ply('../mapping/viz/gtr.ply') self.ren.AddActor(self.car) self.car.GetProperty().LightingOff() print 'Rendering' self.ren.ResetCamera() self.win = vtk.vtkRenderWindow() self.ren.SetBackground(0, 0, 0) self.win.AddRenderer(self.ren) self.win.SetSize(800, 400) self.iren = vtk.vtkRenderWindowInteractor() self.iren.SetRenderWindow(self.win) mouseInteractor = vtk.vtkInteractorStyleTrackballCamera() self.iren.SetInteractorStyle(mouseInteractor) self.iren.Initialize() # Whether to write video self.record = False # Set up time self.iren.AddObserver('TimerEvent', self.update) self.timer = self.iren.CreateRepeatingTimer(100) # Add keypress event self.iren.AddObserver('KeyPressEvent', self.keyhandler) self.mode = 'ahead' self.iren.Start()