logging.info('- found pose_id: %d' % self.pose_id) else: self.pose_id = 0 logging.info('- take default pose_id = 0') def __getitem__(self, key): return self.info[key] def __contains__(self, key): return True if key in self.info else False def build_camera(self): return Camera(camera_dir=self.camera_dir, pose_id=self.pose_id) if __name__ == "__main__": setupLogging('log/augmentation/Video.log', logging.DEBUG, 'w') video = Video(video_dir='augmentation/scenes/cam578/Mar15-10h') camera = video.build_camera() #video_file = 'augmentation/scenes/cam578/Mar15-10h/Mar15-10h.json' #video_info = json.load(open(atcity(video_file))) #video_info['video_dir'] = 'augmentation/scenes/cam578/Mar15-10h' #video.load(video_info=video_info) #assert video.example_background is not None #cv2.imshow('test', video.example_background) #cv2.waitKey(-1)
bpy.data.objects[car_name].hide_render = True bpy.data.objects[car_name0].hide_render = False # render scene bpy.ops.render.render(write_still=True, layer='Depth') _rename(render_dir, 'depth0001', 'depth-%03d.png' % car_i0) if params['save_blender_files']: bpy.ops.wm.save_as_mainfile( filepath=atcity(op.join(render_dir, 'render.blend'))) # logging.info ('objects in the end of frame: %d' % len(bpy.data.objects)) logging.info('make_snapshot: successfully finished a frame') setupLogging('log/augmentation/renderScene.log', logging.INFO, 'a') traffic_path = op.join(WORK_DIR, TRAFFIC_FILENAME) logging.info('traffic_path: %s' % traffic_path) frame_info = json.load(open(traffic_path)) setParamUnlessThere(frame_info, 'render_cars_as_cubes', False) # place all cars car_names = [] for i, vehicle in enumerate(frame_info['vehicles']): if frame_info['render_cars_as_cubes']: location = (vehicle['x'], vehicle['y'], 0.1) bpy.ops.mesh.primitive_cube_add(location=location, radius=0.3) else: collection_id = vehicle['collection_id'] model_id = vehicle['model_id']
job['no_annotations'] = args.no_annotations job['traffic_file'] = args.traffic_file job['video_dir'] = args.video_info_dir setParamUnlessThere(job, 'out_video_dir', op.dirname(args.traffic_file)) return job if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--save_blender_files', action='store_true') parser.add_argument('--logging_level', default=20, type=int) parser.add_argument('--timeout', type=int, help='maximum running time, in munutes') parser.add_argument( '--no_annotations', action='store_true', help='will speed up rendering since individual cars wont be rendered') parser.add_argument('--frame_range', default='[::]', help='python style ranges, e.g. "[5::2]"') parser.add_argument('--video_info_dir', required=True) parser.add_argument('--traffic_file', required=True) args = parser.parse_args() setupLogging('log/augmentation/ProcessVideo.log', args.logging_level, 'w') job = make_job(args) process_video(job)
parser.add_argument('--logging_level', default=20, type=int) parser.add_argument('--frame_range', default='[::]', help='python style ranges, e.g. "[5::2]"') parser.add_argument('--in_db_file', required=True) parser.add_argument('--video_dir', required=True) parser.add_argument('--traffic_file', required=True, help='output .json file where to write traffic info. ' 'Can be "traffic.json" in video output dir.') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--speed_kph', type=int) group.add_argument('--num_cars', type=int) args = parser.parse_args() setupLogging('log/augmentation/GenerateTraffic.log', args.logging_level, 'w') if not op.exists(atcity(op.dirname(args.traffic_file))): os.makedirs(atcity(op.dirname(args.traffic_file))) job = { 'frame_range': args.frame_range, 'in_db_file': args.in_db_file, 'video_dir': args.video_dir, 'out_video_dir': op.dirname(args.in_db_file) } if args.speed_kph is not None: setParamUnlessThere(job, 'speed_kph', args.speed_kph) elif args.num_cars is not None: setParamUnlessThere(job, 'num_cars', args.num_cars) else:
''' generate lanes map with cars as icons for visualization ''' # make azimuth_map a 2D array img = self.azimuth_map.copy() #alpha, azimuth_map = azimuth_map[:,:,-1], azimuth_map[:,:,0] # put cars on top for v in self.vehicles: cv2.circle(img, (v['x'], v['y']), 5, (128, 128, 128), -1) return img if __name__ == "__main__": setupLogging('log/augmentation/traffic.log', logging.DEBUG, 'w') video_dir = 'augmentation/scenes/cam166/Feb23-09h' collection_names = [ '7c7c2b02ad5108fe5f9082491d52810', 'uecadcbca-a400-428d-9240-a331ac5014f6' ] timestamp = datetime.now() video = Video(video_dir) camera = video.build_camera() cad = Cad(collection_names) #model = TrafficModel (camera, video, cad=cad, speed_kph=10, burn_in=True) model = TrafficModelRandom(camera, video, cad, num_cars_mean=10)
import os, sys sys.path.insert(0, os.path.join(os.getenv('CITY_PATH'), 'src')) import logging from learning.video2images import video2images from learning.helperSetup import setupLogging, atcity setupLogging('log/learning/Video2Images.log', logging.INFO, 'a') video2images('data/camdata/cam166/Feb14-08h/src.avi', 'data/labelme/try')