def main(): # Create a ZEDCamera object zed = sl.Camera() # Create a InitParameters object and set configuration parameters init_params = sl.InitParameters() init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default fps: 60) # Use a right-handed Y-up coordinate system init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP init_params.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters # Open the camera err = zed.open(init_params) if err != sl.ERROR_CODE.SUCCESS: exit(1) # Enable positional tracking with default parameters. # Positional tracking needs to be enabled before using spatial mapping py_transform = sl.Transform() tracking_parameters = sl.TrackingParameters(init_pos=py_transform) err = zed.enable_tracking(tracking_parameters) if err != sl.ERROR_CODE.SUCCESS: exit(1) # Enable spatial mapping mapping_parameters = sl.SpatialMappingParameters() err = zed.enable_spatial_mapping(mapping_parameters) if err != sl.ERROR_CODE.SUCCESS: exit(1) # Grab data during 500 frames i = 0 py_mesh = sl.Mesh() # Create a Mesh object runtime_parameters = sl.RuntimeParameters() while i < 500: # For each new grab, mesh data is updated if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # In the background, spatial mapping will use newly retrieved images, depth and pose to update the mesh mapping_state = zed.get_spatial_mapping_state() print("\rImages captured: {0} / 500 || {1}".format(i, mapping_state)) i = i + 1 print("\n") # Extract, filter and save the mesh in an obj file print("Extracting Mesh...\n") zed.extract_whole_mesh(py_mesh) print("Filtering Mesh...\n") py_mesh.filter(sl.MeshFilterParameters()) # Filter the mesh (remove unnecessary vertices and faces) print("Saving Mesh...\n") py_mesh.save("mesh.obj") # Disable tracking and mapping and close the camera zed.disable_spatial_mapping() zed.disable_tracking() zed.close()
def main(): #if len(sys.argv) != 2: # print("Please specify path to .svo file.") # exit() #filepath = sys.argv[1] #print("Reading SVO file: {0}".format(filepath)) cam = sl.Camera() #init = sl.InitParameters(svo_input_filename=filepath) init = sl.InitParameters() #new init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default # Use a right-handed Y-up coordinate system init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP init.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.TrackingParameters(transform) cam.enable_tracking(tracking) cam.enable_spatial_mapping(spatial) pymesh = sl.Mesh() print("Processing...") #for i in range(200): while True: try: cam.grab(runtime) cam.request_mesh_async() except KeyboardInterrupt: cam.extract_whole_mesh(pymesh) cam.disable_tracking() cam.disable_spatial_mapping() filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH) print("Filtering params : {0}.".format( pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture( sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA) print("Applying texture : {0}.".format(apply_texture)) print_mesh_information(pymesh, apply_texture) save_filter(filter_params) save_mesh(pymesh) cam.close() print("\nFINISH") raise
def main(): if len(sys.argv) != 2: print("Please specify path to .svo file.") exit() filepath = sys.argv[1] print("Reading SVO file: {0}".format(filepath)) cam = sl.Camera() input_type = sl.InputType() input_type.set_from_svo_file(filepath) init = sl.InitParameters() init.input = input_type status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.PositionalTrackingParameters(transform) cam.enable_positional_tracking(tracking) cam.enable_spatial_mapping(spatial) pymesh = sl.Mesh() print("Processing...") for i in range(200): cam.grab(runtime) cam.request_spatial_map_async() cam.extract_whole_spatial_map(pymesh) cam.disable_positional_tracking() cam.disable_spatial_mapping() filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.HIGH) print("Filtering params : {0}.".format(pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture(sl.MESH_TEXTURE_FORMAT.RGBA) print("Applying texture : {0}.".format(apply_texture)) print_mesh_information(pymesh, apply_texture) save_filter(filter_params) save_mesh(pymesh) cam.close() print("\nFINISH")
def test(): zed = sl.Camera() init_params = sl.InitParameters() init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default fps: 60) init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP # Use a right-handed Y-up coordinate system init_params.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters zed.open(init_params) # Configure spatial mapping parameters mapping_parameters = sl.SpatialMappingParameters( sl.MAPPING_RESOLUTION.MAPPING_RESOLUTION_LOW, sl.MAPPING_RANGE.MAPPING_RANGE_FAR) mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.SPATIAL_MAP_TYPE_MESH mapping_parameters.save_texture = True filter_params = sl.MeshFilterParameters( ) # not available for fused point cloud filter_params.set( sl.MESH_FILTER.MESH_FILTER_LOW) # not available for fused point cloud # Enable tracking and mapping py_transform = sl.Transform() tracking_parameters = sl.TrackingParameters(init_pos=py_transform) zed.enable_tracking(tracking_parameters) zed.enable_spatial_mapping(mapping_parameters) mesh = sl.Mesh() # Create a mesh object timer = 0 runtime_parameters = sl.RuntimeParameters() print("Getting Frames...") # Grab 500 frames and stop while timer < 500: if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # When grab() = SUCCESS, a new image, depth and pose is available. # Spatial mapping automatically ingests the new data to build the mesh. timer += 1 print("Saving...") # Retrieve the spatial map zed.extract_whole_spatial_map(mesh) # Filter the mesh mesh.filter(filter_params) # not available for fused point cloud # Apply the texture mesh.apply_texture() # not available for fused point cloud # Save the mesh in .obj format mesh.save("mesh.obj") print("Done")
def __init__(self): self.maps = list() #self.map = [] self.mesh = sl.Mesh() print("Finding ZED...") self.zed = sl.Camera() self.translation = [0, 0, 0] self.orientation = [0, 0, 0, 0] self.init_params = sl.InitParameters() py_transform = sl.Transform() trackparms = sl.TrackingParameters(init_pos=py_transform) trackparms.enable_pose_smoothing = True self.tracking_params = trackparms self.mapping_params = sl.SpatialMappingParameters( resolution=sl.MAPPING_RESOLUTION.MAPPING_RESOLUTION_LOW, mapping_range=sl.MAPPING_RANGE.MAPPING_RANGE_FAR, save_texture=True) self.filter_params = sl.MeshFilterParameters() self.runtime_params = sl.RuntimeParameters() print("Starting ZED...") self.start_camera() self.update_pose() self.has_requested_map = False self.last_update_time = time.time()
# Grab data during 3000 frames i = 0 py_mesh = sl.Mesh() # Create a Mesh object runtime_parameters = sl.RuntimeParameters() while i < 3000: if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # In background, spatial mapping will use new images, depth and pose to create and update the mesh. No specific functions are required here. mapping_state = zed.get_spatial_mapping_state() # Print spatial mapping state print("\rImages captured: {0} / 3000 || {1}".format(i, mapping_state)) i = i + 1 err = zed.extract_whole_spatial_map(py_mesh) # Extract the whole mesh filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.LOW) py_mesh.filter(filter_params) # Filter the mesh (remove unnecessary vertices and faces) py_mesh.save("mesh.obj") # Save the mesh in an obj file # Disable tracking and mapping and close the camera zed.disable_spatial_mapping() zed.disable_positional_tracking() zed.close()
def main(): if not sys.argv or len(sys.argv) != 2: print( "Only the path of the output SVO file should be passed as argument." ) exit(1) cam = sl.Camera() init = sl.InitParameters() init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP # Use a right-handed Y-up coordinate system init.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters # new for SVO init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE ## status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.TrackingParameters(transform) cam.enable_tracking(tracking) cam.enable_spatial_mapping(spatial) #from Positional Tracking: # Track the camera position until Keyboard Interupt (ctrl-C) zed_pose = sl.Pose() zed_imu = sl.IMUData() runtime_parameters = sl.RuntimeParameters() path = '/media/nvidia/SD1/translation.csv' position_file = open(path, 'w') #END from positional tracking pymesh = sl.Mesh() print("Processing...") #new for SVO path_output = sys.argv[1] err = cam.enable_recording( path_output, sl.SVO_COMPRESSION_MODE.SVO_COMPRESSION_MODE_AVCHD) if err != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit(1) print("SVO is Recording, use Ctrl-C to stop.") frames_recorded = 0 ## while True: try: cam.grab(runtime) # new for SVO state = cam.record() if state["status"]: frames_recorded += 1 print("Frame count: " + str(frames_recorded), end="\r") ## cam.request_mesh_async() # Get the pose of the left eye of the camera with reference to the world frame cam.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD) cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE) # Display the translation and timestamp py_translation = sl.Translation() tx = round(zed_pose.get_translation(py_translation).get()[0], 3) ty = round(zed_pose.get_translation(py_translation).get()[1], 3) tz = round(zed_pose.get_translation(py_translation).get()[2], 3) position_file.write("{0},{1},{2},{3}\n".format( tx, ty, tz, zed_pose.timestamp)) except KeyboardInterrupt: cam.extract_whole_mesh(pymesh) cam.disable_tracking() cam.disable_spatial_mapping() # new for .svo cam.disable_recording() ## filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH) print("Filtering params : {0}.".format( pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture( sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA) print("Applying texture : {0}.".format(apply_texture)) print_mesh_information(pymesh, apply_texture) save_filter(filter_params) save_mesh(pymesh) cam.close() position_file.close() save_position(path) print("\nFINISH") raise
def main(): #if len(sys.argv) != 2: # print("Please specify path to .svo file.") # exit() #filepath = sys.argv[1] #print("Reading SVO file: {0}".format(filepath)) cam = sl.Camera() #init = sl.InitParameters(svo_input_filename=filepath) init = sl.InitParameters() #new init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default # Use a right-handed Y-up coordinate system init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP init.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.TrackingParameters(transform) cam.enable_tracking(tracking) cam.enable_spatial_mapping(spatial) #from Positional Tracking: # Track the camera position until Keyboard Interupt (ctrl-C) #i = 0 zed_pose = sl.Pose() zed_imu = sl.IMUData() runtime_parameters = sl.RuntimeParameters() path = '/media/nvidia/SD1/translation.csv' position_file = open(path, 'w') #END from positional tracking pymesh = sl.Mesh() print("Processing...") #for i in range(200): while True: try: cam.grab(runtime) cam.request_mesh_async() # Get the pose of the left eye of the camera with reference to the world frame cam.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD) cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE) # Display the translation and timestamp py_translation = sl.Translation() tx = round(zed_pose.get_translation(py_translation).get()[0], 3) ty = round(zed_pose.get_translation(py_translation).get()[1], 3) tz = round(zed_pose.get_translation(py_translation).get()[2], 3) #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp)) position_file.write("{0},{1},{2},{3}\n".format( tx, ty, tz, zed_pose.timestamp)) except KeyboardInterrupt: cam.extract_whole_mesh(pymesh) cam.disable_tracking() cam.disable_spatial_mapping() filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH) print("Filtering params : {0}.".format( pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture( sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA) print("Applying texture : {0}.".format(apply_texture)) #print_mesh_information(pymesh, apply_texture) #save_filter(filter_params) #save_mesh(pymesh) cam.close() position_file.close() #save_position(path) save_all(filter_params, pymesh, path) print("\nFINISH") raise
def main(): print("Running Spatial Mapping sample ... Press 'q' to quit") # Create a Camera object zed = sl.Camera() # Create a InitParameters object and set configuration parameters init_params = sl.InitParameters() init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode init_params.coordinate_units = sl.UNIT.METER # Set coordinate units init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP # OpenGL coordinates # If applicable, use the SVO given as parameter # Otherwise use ZED live stream if len(sys.argv) == 2: filepath = sys.argv[1] print("Using SVO file: {0}".format(filepath)) init_params.set_from_svo_file(filepath) # Open the camera status = zed.open(init_params) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() # Get camera parameters camera_parameters = zed.get_camera_information( ).camera_configuration.calibration_parameters.left_cam pymesh = sl.Mesh() # Current incremental mesh image = sl.Mat() # Left image from camera pose = sl.Pose() # Camera pose tracking data viewer = gl.GLViewer() viewer.init(camera_parameters, pymesh) spatial_mapping_parameters = sl.SpatialMappingParameters() tracking_state = sl.POSITIONAL_TRACKING_STATE.OFF mapping_state = sl.SPATIAL_MAPPING_STATE.NOT_ENABLED mapping_activated = False last_call = time.time() # Timestamp of last mesh request # Enable positional tracking err = zed.enable_positional_tracking() if err != sl.ERROR_CODE.SUCCESS: print(repr(err)) exit() # Set runtime parameters runtime = sl.RuntimeParameters() while viewer.is_available(): # Grab an image, a RuntimeParameters object must be given to grab() if zed.grab(runtime) == sl.ERROR_CODE.SUCCESS: # Retrieve left image zed.retrieve_image(image, sl.VIEW.LEFT) # Update pose data (used for projection of the mesh over the current image) tracking_state = zed.get_position(pose) if mapping_activated: mapping_state = zed.get_spatial_mapping_state() # Compute elapsed time since the last call of Camera.request_spatial_map_async() duration = time.time() - last_call # Ask for a mesh update if 500ms elapsed since last request if (duration > .5 and viewer.chunks_updated()): zed.request_spatial_map_async() last_call = time.time() if zed.get_spatial_map_request_status_async( ) == sl.ERROR_CODE.SUCCESS: zed.retrieve_spatial_map_async(pymesh) viewer.update_chunks() change_state = viewer.update_view(image, pose.pose_data(), tracking_state, mapping_state) if change_state: if not mapping_activated: init_pose = sl.Transform() zed.reset_positional_tracking(init_pose) # Configure spatial mapping parameters spatial_mapping_parameters.resolution_meter = sl.SpatialMappingParameters( ).get_resolution_preset(sl.MAPPING_RESOLUTION.MEDIUM) spatial_mapping_parameters.use_chunk_only = True spatial_mapping_parameters.save_texture = False # Set to True to apply texture over the created mesh spatial_mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.MESH # Enable spatial mapping zed.enable_spatial_mapping() # Clear previous mesh data pymesh.clear() viewer.clear_current_mesh() # Start timer last_call = time.time() mapping_activated = True else: # Extract whole mesh zed.extract_whole_spatial_map(pymesh) filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MEDIUM) # Filter the extracted mesh pymesh.filter(filter_params, True) viewer.clear_current_mesh() # If textures have been saved during spatial mapping, apply them to the mesh if (spatial_mapping_parameters.save_texture): print("Save texture set to : {}".format( spatial_mapping_parameters.save_texture)) pymesh.apply_texture(sl.MESH_TEXTURE_FORMAT.RGBA) # Save mesh as an obj file filepath = "mesh_gen.obj" status = pymesh.save(filepath) if status: print("Mesh saved under " + filepath) else: print("Failed to save the mesh under " + filepath) mapping_state = sl.SPATIAL_MAPPING_STATE.NOT_ENABLED mapping_activated = False image.free(memory_type=sl.MEM.CPU) pymesh.clear() # Disable modules and close camera zed.disable_spatial_mapping() zed.disable_positional_tracking() zed.close()
def main(): if len(sys.argv) != 3: print( "Please specify collection time (seconds), and path to save files") exit() max_time = sys.argv[1] print(max_time) path = sys.argv[2] print(path) #delay program 60 sec, so that user can get to start location print( "\nYou have 60 seconds to get to start location before program will begin" ) time.sleep(60) print("\nInitializing camera") cam = sl.Camera() init = sl.InitParameters() # Use HD720 video mode (default Use a right-handed Y-up coordinate system) init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP # Set units in meters init.coordinate_units = sl.UNIT.UNIT_METER status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.TrackingParameters(transform) cam.enable_tracking(tracking) cam.enable_spatial_mapping(spatial) #for Positional Tracking: zed_pose = sl.Pose() zed_imu = sl.IMUData() runtime_parameters = sl.RuntimeParameters() #temporary file to save translation data to, until you know the filename (given from the user) #path = '/media/nvidia/SD1/translation.csv' position_file = open((path + ".csv"), 'w+') pymesh = sl.Mesh() print("Camera setup") #get start time start_time = time.time() print("Starting to collect data") while (time.time() - start_time) < float(max_time): cam.grab(runtime) cam.request_mesh_async() # Get the pose of the left eye of the camera with reference to the world frame cam.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD) cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE) # Display the translation and timestamp py_translation = sl.Translation() tx = round(zed_pose.get_translation(py_translation).get()[0], 3) ty = round(zed_pose.get_translation(py_translation).get()[1], 3) tz = round(zed_pose.get_translation(py_translation).get()[2], 3) #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp)) position_file.write("{0},{1},{2},{3}\n".format(tx, ty, tz, zed_pose.timestamp)) print( "Finished collecting data, extracting mesh, saving and shutting down camera" ) cam.extract_whole_mesh(pymesh) cam.disable_tracking() cam.disable_spatial_mapping() filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH) print("Filtering params : {0}.".format(pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture( sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA) print("Applying texture : {0}.".format(apply_texture)) #print_mesh_information(pymesh, apply_texture) #save_filter(filter_params) #save_mesh(pymesh) cam.close() position_file.close() #save_position(path) save_all_path_arg(filter_params, pymesh, path) print("\nFINISH")