def update_map_blocking(self): mesh = sl.Mesh() # Retrieve the spatial map, this is blocking self.zed.extract_whole_spatial_map(mesh) # Filter the mesh mesh.filter(self.filter_params) # not available for fused point cloud self.mesh = mesh
def main(): #if len(sys.argv) != 2: # print("Please specify path to .svo file.") # exit() #filepath = sys.argv[1] #print("Reading SVO file: {0}".format(filepath)) cam = sl.Camera() #init = sl.InitParameters(svo_input_filename=filepath) init = sl.InitParameters() #new init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default # Use a right-handed Y-up coordinate system init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP init.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.TrackingParameters(transform) cam.enable_tracking(tracking) cam.enable_spatial_mapping(spatial) pymesh = sl.Mesh() print("Processing...") #for i in range(200): while True: try: cam.grab(runtime) cam.request_mesh_async() except KeyboardInterrupt: cam.extract_whole_mesh(pymesh) cam.disable_tracking() cam.disable_spatial_mapping() filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH) print("Filtering params : {0}.".format( pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture( sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA) print("Applying texture : {0}.".format(apply_texture)) print_mesh_information(pymesh, apply_texture) save_filter(filter_params) save_mesh(pymesh) cam.close() print("\nFINISH") raise
def main(): # Create a ZEDCamera object zed = sl.Camera() # Create a InitParameters object and set configuration parameters init_params = sl.InitParameters() init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default fps: 60) # Use a right-handed Y-up coordinate system init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP init_params.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters # Open the camera err = zed.open(init_params) if err != sl.ERROR_CODE.SUCCESS: exit(1) # Enable positional tracking with default parameters. # Positional tracking needs to be enabled before using spatial mapping py_transform = sl.Transform() tracking_parameters = sl.TrackingParameters(init_pos=py_transform) err = zed.enable_tracking(tracking_parameters) if err != sl.ERROR_CODE.SUCCESS: exit(1) # Enable spatial mapping mapping_parameters = sl.SpatialMappingParameters() err = zed.enable_spatial_mapping(mapping_parameters) if err != sl.ERROR_CODE.SUCCESS: exit(1) # Grab data during 500 frames i = 0 py_mesh = sl.Mesh() # Create a Mesh object runtime_parameters = sl.RuntimeParameters() while i < 500: # For each new grab, mesh data is updated if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # In the background, spatial mapping will use newly retrieved images, depth and pose to update the mesh mapping_state = zed.get_spatial_mapping_state() print("\rImages captured: {0} / 500 || {1}".format(i, mapping_state)) i = i + 1 print("\n") # Extract, filter and save the mesh in an obj file print("Extracting Mesh...\n") zed.extract_whole_mesh(py_mesh) print("Filtering Mesh...\n") py_mesh.filter(sl.MeshFilterParameters()) # Filter the mesh (remove unnecessary vertices and faces) print("Saving Mesh...\n") py_mesh.save("mesh.obj") # Disable tracking and mapping and close the camera zed.disable_spatial_mapping() zed.disable_tracking() zed.close()
def main(): cam = sl.Camera() init = sl.InitParameters() init.depth_mode = sl.DEPTH_MODE.ULTRA init.coordinate_units = sl.UNIT.METER init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP if len(sys.argv) == 2: filepath = sys.argv[1] print("Reading SVO file: {0}".format(filepath)) init.set_from_svo_file(filepath) status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit(1) runtime = sl.RuntimeParameters() runtime.sensing_mode = sl.SENSING_MODE.STANDARD runtime.measure3D_reference_frame = sl.REFERENCE_FRAME.WORLD spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.PositionalTrackingParameters(transform) cam.enable_positional_tracking(tracking) pymesh = sl.Mesh() pyplane = sl.Plane() reset_tracking_floor_frame = sl.Transform() found = 0 print("Processing...") i = 0 while i < 1000: if cam.grab(runtime) == sl.ERROR_CODE.SUCCESS: err = cam.find_floor_plane(pyplane, reset_tracking_floor_frame) if i > 200 and err == sl.ERROR_CODE.SUCCESS: found = 1 print('Floor found!') pymesh = pyplane.extract_mesh() break i += 1 if found == 0: print('Floor was not found, please try with another SVO.') cam.close() exit(0) cam.disable_positional_tracking() save_mesh(pymesh) cam.close() print("\nFINISH")
def main(): if len(sys.argv) != 2: print("Please specify path to .svo file.") exit() filepath = sys.argv[1] print("Reading SVO file: {0}".format(filepath)) cam = sl.Camera() input_type = sl.InputType() input_type.set_from_svo_file(filepath) init = sl.InitParameters() init.input = input_type status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.PositionalTrackingParameters(transform) cam.enable_positional_tracking(tracking) cam.enable_spatial_mapping(spatial) pymesh = sl.Mesh() print("Processing...") for i in range(200): cam.grab(runtime) cam.request_spatial_map_async() cam.extract_whole_spatial_map(pymesh) cam.disable_positional_tracking() cam.disable_spatial_mapping() filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.HIGH) print("Filtering params : {0}.".format(pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture(sl.MESH_TEXTURE_FORMAT.RGBA) print("Applying texture : {0}.".format(apply_texture)) print_mesh_information(pymesh, apply_texture) save_filter(filter_params) save_mesh(pymesh) cam.close() print("\nFINISH")
def test(): zed = sl.Camera() init_params = sl.InitParameters() init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default fps: 60) init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP # Use a right-handed Y-up coordinate system init_params.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters zed.open(init_params) # Configure spatial mapping parameters mapping_parameters = sl.SpatialMappingParameters( sl.MAPPING_RESOLUTION.MAPPING_RESOLUTION_LOW, sl.MAPPING_RANGE.MAPPING_RANGE_FAR) mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.SPATIAL_MAP_TYPE_MESH mapping_parameters.save_texture = True filter_params = sl.MeshFilterParameters( ) # not available for fused point cloud filter_params.set( sl.MESH_FILTER.MESH_FILTER_LOW) # not available for fused point cloud # Enable tracking and mapping py_transform = sl.Transform() tracking_parameters = sl.TrackingParameters(init_pos=py_transform) zed.enable_tracking(tracking_parameters) zed.enable_spatial_mapping(mapping_parameters) mesh = sl.Mesh() # Create a mesh object timer = 0 runtime_parameters = sl.RuntimeParameters() print("Getting Frames...") # Grab 500 frames and stop while timer < 500: if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # When grab() = SUCCESS, a new image, depth and pose is available. # Spatial mapping automatically ingests the new data to build the mesh. timer += 1 print("Saving...") # Retrieve the spatial map zed.extract_whole_spatial_map(mesh) # Filter the mesh mesh.filter(filter_params) # not available for fused point cloud # Apply the texture mesh.apply_texture() # not available for fused point cloud # Save the mesh in .obj format mesh.save("mesh.obj") print("Done")
def simple_test(): mesh = sl.Mesh() ma = MeshAnalyzer(mesh) p0 = [0, 0] p1 = [0, 1] p2 = [0, 2] p3 = [1, 0] p4 = [1, 1] p5 = [1, 2] p6 = [2, 0] p7 = [2, 1] p8 = [2, 2] p9 = [0, 0.001] """ print(ma.area_of_triangle(p1, p0, p6)) print(ma.area_of_triangle(p1, p0, p8)) print(ma.area_of_triangle(p1, p8, p6)) print(ma.area_of_triangle(p8, p0, p6)) """ # check edges and corners assert ma.is_point_in_triangle_simple(p4, p0, p6, p8) is True assert ma.is_point_in_triangle_simple(p3, p0, p6, p8) is True assert ma.is_point_in_triangle_simple(p7, p0, p6, p8) is True assert ma.is_point_in_triangle_simple(p0, p0, p6, p8) is True assert ma.is_point_in_triangle_simple(p6, p0, p6, p8) is True assert ma.is_point_in_triangle_simple(p8, p0, p6, p8) is True # check outside assert ma.is_point_in_triangle_simple(p1, p0, p6, p8) is False assert ma.is_point_in_triangle_simple(p3, p0, p2, p8) is False assert ma.is_point_in_triangle_simple(p5, p0, p2, p6) is False assert ma.is_point_in_triangle_simple(p3, p2, p6, p8) is False # check close assert ma.is_point_in_triangle_simple(p9, p0, p6, p8) is False
def update_map_async(self, apply_texture=False): if not self.has_requested_map: self.zed.request_spatial_map_async() self.has_requested_map = True # if mesh is ready, update stored mesh status = self.zed.get_spatial_map_request_status_async() time_from_last_update = time.time() - self.last_update_time if status == sl.ERROR_CODE.SUCCESS and time_from_last_update > 1 / MESH_REFRESH_RATE: mesh = sl.Mesh() # retrieve generated mesh self.zed.retrieve_spatial_map_async(mesh) # Filter the mesh mesh.filter( self.filter_params) # not available for fused point cloud self.mesh = mesh self.has_requested_map = False self.last_update_time = time.time() if apply_texture: if DEBUG: print("applying texture...") self.mesh.apply_texture() self.mesh.save("MYMAP") return status
def __init__(self): self.maps = list() #self.map = [] self.mesh = sl.Mesh() print("Finding ZED...") self.zed = sl.Camera() self.translation = [0, 0, 0] self.orientation = [0, 0, 0, 0] self.init_params = sl.InitParameters() py_transform = sl.Transform() trackparms = sl.TrackingParameters(init_pos=py_transform) trackparms.enable_pose_smoothing = True self.tracking_params = trackparms self.mapping_params = sl.SpatialMappingParameters( resolution=sl.MAPPING_RESOLUTION.MAPPING_RESOLUTION_LOW, mapping_range=sl.MAPPING_RANGE.MAPPING_RANGE_FAR, save_texture=True) self.filter_params = sl.MeshFilterParameters() self.runtime_params = sl.RuntimeParameters() print("Starting ZED...") self.start_camera() self.update_pose() self.has_requested_map = False self.last_update_time = time.time()
def main(): print("Running Plane Detection sample ... Press 'q' to quit") # Create a camera object zed = sl.Camera() # Set configuration parameters init = sl.InitParameters() init.coordinate_units = sl.UNIT.METER init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP # OpenGL coordinate system # If applicable, use the SVO given as parameter # Otherwise use ZED live stream if len(sys.argv) == 2: filepath = sys.argv[1] print("Reading SVO file: {0}".format(filepath)) init.set_from_svo_file(filepath) # Open the camera status = zed.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit(1) # Get camera info and check if IMU data is available camera_infos = zed.get_camera_information() has_imu = camera_infos.sensors_configuration.gyroscope_parameters.is_available # Initialize OpenGL viewer viewer = gl.GLViewer() viewer.init( camera_infos.camera_configuration.calibration_parameters.left_cam, has_imu) image = sl.Mat() # current left image pose = sl.Pose() # positional tracking data plane = sl.Plane() # detected plane mesh = sl.Mesh() # plane mesh find_plane_status = sl.ERROR_CODE.SUCCESS tracking_state = sl.POSITIONAL_TRACKING_STATE.OFF # Timestamp of the last mesh request last_call = time.time() user_action = gl.UserAction() user_action.clear() # Enable positional tracking before starting spatial mapping zed.enable_positional_tracking() runtime_parameters = sl.RuntimeParameters() runtime_parameters.measure3D_reference_frame = sl.REFERENCE_FRAME.WORLD while viewer.is_available(): if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # Retrieve left image zed.retrieve_image(image, sl.VIEW.LEFT) # Update pose data (used for projection of the mesh over the current image) tracking_state = zed.get_position(pose) if tracking_state == sl.POSITIONAL_TRACKING_STATE.OK: # Compute elapse time since the last call of plane detection duration = time.time() - last_call # Ask for a mesh update on mouse click if user_action.hit: image_click = [ user_action.hit_coord[0] * camera_infos. camera_configuration.camera_resolution.width, user_action.hit_coord[1] * camera_infos. camera_configuration.camera_resolution.height ] find_plane_status = zed.find_plane_at_hit( image_click, plane) # Check if 500 ms have elapsed since last mesh request if duration > .5 and user_action.press_space: # Update pose data (used for projection of the mesh over the current image) reset_tracking_floor_frame = sl.Transform() find_plane_status = zed.find_floor_plane( plane, reset_tracking_floor_frame) last_call = time.time() if find_plane_status == sl.ERROR_CODE.SUCCESS: mesh = plane.extract_mesh() viewer.update_mesh(mesh, plane.type) user_action = viewer.update_view(image, pose.pose_data(), tracking_state) viewer.exit() image.free(sl.MEM.CPU) mesh.clear() # Disable modules and close camera zed.disable_positional_tracking() zed.close()
tracking_parameters = sl.PositionalTrackingParameters() err = zed.enable_positional_tracking(tracking_parameters) if err != sl.ERROR_CODE.SUCCESS: exit(1) mapping_parameters = sl.SpatialMappingParameters() err = zed.enable_spatial_mapping(mapping_parameters) if err != sl.ERROR_CODE.SUCCESS: exit(1) # Grab data during 3000 frames i = 0 py_mesh = sl.Mesh() # Create a Mesh object runtime_parameters = sl.RuntimeParameters() while i < 3000: if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # In background, spatial mapping will use new images, depth and pose to create and update the mesh. No specific functions are required here. mapping_state = zed.get_spatial_mapping_state() # Print spatial mapping state print("\rImages captured: {0} / 3000 || {1}".format(i, mapping_state)) i = i + 1 err = zed.extract_whole_spatial_map(py_mesh) # Extract the whole mesh
def main(): if not sys.argv or len(sys.argv) != 2: print( "Only the path of the output SVO file should be passed as argument." ) exit(1) cam = sl.Camera() init = sl.InitParameters() init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP # Use a right-handed Y-up coordinate system init.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters # new for SVO init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE ## status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.TrackingParameters(transform) cam.enable_tracking(tracking) cam.enable_spatial_mapping(spatial) #from Positional Tracking: # Track the camera position until Keyboard Interupt (ctrl-C) zed_pose = sl.Pose() zed_imu = sl.IMUData() runtime_parameters = sl.RuntimeParameters() path = '/media/nvidia/SD1/translation.csv' position_file = open(path, 'w') #END from positional tracking pymesh = sl.Mesh() print("Processing...") #new for SVO path_output = sys.argv[1] err = cam.enable_recording( path_output, sl.SVO_COMPRESSION_MODE.SVO_COMPRESSION_MODE_AVCHD) if err != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit(1) print("SVO is Recording, use Ctrl-C to stop.") frames_recorded = 0 ## while True: try: cam.grab(runtime) # new for SVO state = cam.record() if state["status"]: frames_recorded += 1 print("Frame count: " + str(frames_recorded), end="\r") ## cam.request_mesh_async() # Get the pose of the left eye of the camera with reference to the world frame cam.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD) cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE) # Display the translation and timestamp py_translation = sl.Translation() tx = round(zed_pose.get_translation(py_translation).get()[0], 3) ty = round(zed_pose.get_translation(py_translation).get()[1], 3) tz = round(zed_pose.get_translation(py_translation).get()[2], 3) position_file.write("{0},{1},{2},{3}\n".format( tx, ty, tz, zed_pose.timestamp)) except KeyboardInterrupt: cam.extract_whole_mesh(pymesh) cam.disable_tracking() cam.disable_spatial_mapping() # new for .svo cam.disable_recording() ## filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH) print("Filtering params : {0}.".format( pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture( sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA) print("Applying texture : {0}.".format(apply_texture)) print_mesh_information(pymesh, apply_texture) save_filter(filter_params) save_mesh(pymesh) cam.close() position_file.close() save_position(path) print("\nFINISH") raise
def main(): #if len(sys.argv) != 2: # print("Please specify path to .svo file.") # exit() #filepath = sys.argv[1] #print("Reading SVO file: {0}".format(filepath)) cam = sl.Camera() #init = sl.InitParameters(svo_input_filename=filepath) init = sl.InitParameters() #new init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 # Use HD720 video mode (default # Use a right-handed Y-up coordinate system init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP init.coordinate_units = sl.UNIT.UNIT_METER # Set units in meters status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.TrackingParameters(transform) cam.enable_tracking(tracking) cam.enable_spatial_mapping(spatial) #from Positional Tracking: # Track the camera position until Keyboard Interupt (ctrl-C) #i = 0 zed_pose = sl.Pose() zed_imu = sl.IMUData() runtime_parameters = sl.RuntimeParameters() path = '/media/nvidia/SD1/translation.csv' position_file = open(path, 'w') #END from positional tracking pymesh = sl.Mesh() print("Processing...") #for i in range(200): while True: try: cam.grab(runtime) cam.request_mesh_async() # Get the pose of the left eye of the camera with reference to the world frame cam.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD) cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE) # Display the translation and timestamp py_translation = sl.Translation() tx = round(zed_pose.get_translation(py_translation).get()[0], 3) ty = round(zed_pose.get_translation(py_translation).get()[1], 3) tz = round(zed_pose.get_translation(py_translation).get()[2], 3) #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp)) position_file.write("{0},{1},{2},{3}\n".format( tx, ty, tz, zed_pose.timestamp)) except KeyboardInterrupt: cam.extract_whole_mesh(pymesh) cam.disable_tracking() cam.disable_spatial_mapping() filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH) print("Filtering params : {0}.".format( pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture( sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA) print("Applying texture : {0}.".format(apply_texture)) #print_mesh_information(pymesh, apply_texture) #save_filter(filter_params) #save_mesh(pymesh) cam.close() position_file.close() #save_position(path) save_all(filter_params, pymesh, path) print("\nFINISH") raise
def main(): print("Running Spatial Mapping sample ... Press 'q' to quit") # Create a Camera object zed = sl.Camera() # Create a InitParameters object and set configuration parameters init_params = sl.InitParameters() init_params.camera_resolution = sl.RESOLUTION.HD720 # Use HD720 video mode init_params.coordinate_units = sl.UNIT.METER # Set coordinate units init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP # OpenGL coordinates # If applicable, use the SVO given as parameter # Otherwise use ZED live stream if len(sys.argv) == 2: filepath = sys.argv[1] print("Using SVO file: {0}".format(filepath)) init_params.set_from_svo_file(filepath) # Open the camera status = zed.open(init_params) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() # Get camera parameters camera_parameters = zed.get_camera_information( ).camera_configuration.calibration_parameters.left_cam pymesh = sl.Mesh() # Current incremental mesh image = sl.Mat() # Left image from camera pose = sl.Pose() # Camera pose tracking data viewer = gl.GLViewer() viewer.init(camera_parameters, pymesh) spatial_mapping_parameters = sl.SpatialMappingParameters() tracking_state = sl.POSITIONAL_TRACKING_STATE.OFF mapping_state = sl.SPATIAL_MAPPING_STATE.NOT_ENABLED mapping_activated = False last_call = time.time() # Timestamp of last mesh request # Enable positional tracking err = zed.enable_positional_tracking() if err != sl.ERROR_CODE.SUCCESS: print(repr(err)) exit() # Set runtime parameters runtime = sl.RuntimeParameters() while viewer.is_available(): # Grab an image, a RuntimeParameters object must be given to grab() if zed.grab(runtime) == sl.ERROR_CODE.SUCCESS: # Retrieve left image zed.retrieve_image(image, sl.VIEW.LEFT) # Update pose data (used for projection of the mesh over the current image) tracking_state = zed.get_position(pose) if mapping_activated: mapping_state = zed.get_spatial_mapping_state() # Compute elapsed time since the last call of Camera.request_spatial_map_async() duration = time.time() - last_call # Ask for a mesh update if 500ms elapsed since last request if (duration > .5 and viewer.chunks_updated()): zed.request_spatial_map_async() last_call = time.time() if zed.get_spatial_map_request_status_async( ) == sl.ERROR_CODE.SUCCESS: zed.retrieve_spatial_map_async(pymesh) viewer.update_chunks() change_state = viewer.update_view(image, pose.pose_data(), tracking_state, mapping_state) if change_state: if not mapping_activated: init_pose = sl.Transform() zed.reset_positional_tracking(init_pose) # Configure spatial mapping parameters spatial_mapping_parameters.resolution_meter = sl.SpatialMappingParameters( ).get_resolution_preset(sl.MAPPING_RESOLUTION.MEDIUM) spatial_mapping_parameters.use_chunk_only = True spatial_mapping_parameters.save_texture = False # Set to True to apply texture over the created mesh spatial_mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.MESH # Enable spatial mapping zed.enable_spatial_mapping() # Clear previous mesh data pymesh.clear() viewer.clear_current_mesh() # Start timer last_call = time.time() mapping_activated = True else: # Extract whole mesh zed.extract_whole_spatial_map(pymesh) filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MEDIUM) # Filter the extracted mesh pymesh.filter(filter_params, True) viewer.clear_current_mesh() # If textures have been saved during spatial mapping, apply them to the mesh if (spatial_mapping_parameters.save_texture): print("Save texture set to : {}".format( spatial_mapping_parameters.save_texture)) pymesh.apply_texture(sl.MESH_TEXTURE_FORMAT.RGBA) # Save mesh as an obj file filepath = "mesh_gen.obj" status = pymesh.save(filepath) if status: print("Mesh saved under " + filepath) else: print("Failed to save the mesh under " + filepath) mapping_state = sl.SPATIAL_MAPPING_STATE.NOT_ENABLED mapping_activated = False image.free(memory_type=sl.MEM.CPU) pymesh.clear() # Disable modules and close camera zed.disable_spatial_mapping() zed.disable_positional_tracking() zed.close()
def main(): if len(sys.argv) != 3: print( "Please specify collection time (seconds), and path to save files") exit() max_time = sys.argv[1] print(max_time) path = sys.argv[2] print(path) #delay program 60 sec, so that user can get to start location print( "\nYou have 60 seconds to get to start location before program will begin" ) time.sleep(60) print("\nInitializing camera") cam = sl.Camera() init = sl.InitParameters() # Use HD720 video mode (default Use a right-handed Y-up coordinate system) init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720 init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP # Set units in meters init.coordinate_units = sl.UNIT.UNIT_METER status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() runtime = sl.RuntimeParameters() spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.TrackingParameters(transform) cam.enable_tracking(tracking) cam.enable_spatial_mapping(spatial) #for Positional Tracking: zed_pose = sl.Pose() zed_imu = sl.IMUData() runtime_parameters = sl.RuntimeParameters() #temporary file to save translation data to, until you know the filename (given from the user) #path = '/media/nvidia/SD1/translation.csv' position_file = open((path + ".csv"), 'w+') pymesh = sl.Mesh() print("Camera setup") #get start time start_time = time.time() print("Starting to collect data") while (time.time() - start_time) < float(max_time): cam.grab(runtime) cam.request_mesh_async() # Get the pose of the left eye of the camera with reference to the world frame cam.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD) cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE) # Display the translation and timestamp py_translation = sl.Translation() tx = round(zed_pose.get_translation(py_translation).get()[0], 3) ty = round(zed_pose.get_translation(py_translation).get()[1], 3) tz = round(zed_pose.get_translation(py_translation).get()[2], 3) #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp)) position_file.write("{0},{1},{2},{3}\n".format(tx, ty, tz, zed_pose.timestamp)) print( "Finished collecting data, extracting mesh, saving and shutting down camera" ) cam.extract_whole_mesh(pymesh) cam.disable_tracking() cam.disable_spatial_mapping() filter_params = sl.MeshFilterParameters() filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH) print("Filtering params : {0}.".format(pymesh.filter(filter_params))) apply_texture = pymesh.apply_texture( sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA) print("Applying texture : {0}.".format(apply_texture)) #print_mesh_information(pymesh, apply_texture) #save_filter(filter_params) #save_mesh(pymesh) cam.close() position_file.close() #save_position(path) save_all_path_arg(filter_params, pymesh, path) print("\nFINISH")
def main(): cam = sl.Camera() init = sl.InitParameters() init.depth_mode = sl.DEPTH_MODE.ULTRA init.coordinate_units = sl.UNIT.METER init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP if len(sys.argv) == 2: filepath = sys.argv[1] print("Reading SVO file: {0}".format(filepath)) init.set_from_svo_file(filepath) status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit(1) runtime = sl.RuntimeParameters() runtime.sensing_mode = sl.SENSING_MODE.STANDARD runtime.measure3D_reference_frame = sl.REFERENCE_FRAME.WORLD spatial = sl.SpatialMappingParameters() transform = sl.Transform() tracking = sl.PositionalTrackingParameters(transform) cam.enable_positional_tracking(tracking) pymesh = sl.Mesh() pyplane = sl.Plane() reset_tracking_floor_frame = sl.Transform() found = 0 print("Processing...") i = 0 calibration_params = cam.get_camera_information().calibration_parameters # Focal length of the left eye in pixels focal_left_x = calibration_params.left_cam.fx focal_left_y = calibration_params.left_cam.fy # First radial distortion coefficient # k1 = calibration_params.left_cam.disto[0] # Translation between left and right eye on z-axis # tz = calibration_params.T.z # Horizontal field of view of the left eye in degrees # h_fov = calibration_params.left_cam.h_fov print("fx", focal_left_x) print("fy", focal_left_y) print("cx", calibration_params.left_cam.cx) print("cy", calibration_params.left_cam.cy) print("distortion", calibration_params.left_cam.disto) cloud = sl.Mat() while i < 1000: if cam.grab(runtime) == sl.ERROR_CODE.SUCCESS : err = cam.find_floor_plane(pyplane, reset_tracking_floor_frame) if err == sl.ERROR_CODE.SUCCESS: # if i > 200 and err == sl.ERROR_CODE.SUCCESS: found = 1 print('Floor found!') pymesh = pyplane.extract_mesh() #get gound plane info print("floor normal",pyplane.get_normal()) #print normal of plane print("floor equation", pyplane.get_plane_equation()) print("plane center", pyplane.get_center()) print("plane pose", pyplane.get_pose()) # camera_pose = sl.Pose() # py_translation = sl.Translation() # tracking_state = cam.get_position(camera_pose) # # if tracking_state == sl.POSITIONAL_TRACKING_STATE.OK: # rotation = camera_pose.get_rotation_vector() # rx = round(rotation[0], 2) # ry = round(rotation[1], 2) # rz = round(rotation[2], 2) # translation = camera_pose.get_translation(py_translation) # tx = round(translation.get()[0], 2) # ty = round(translation.get()[1], 2) # tz = round(translation.get()[2], 2) # text_translation = str((tx, ty, tz)) # text_rotation = str((rx, ry, rz)) # pose_data = camera_pose.pose_data(sl.Transform()) # print("translation",text_translation) # print("rotation",text_rotation) cam.retrieve_measure(cloud, sl.MEASURE.XYZRGBA) break i+=1 if found == 0: print('Floor was not found, please try with another SVO.') cam.close() exit(0) #load image path = "/home/hcl/Documents/ZED/pics/Explorer_HD1080_SN14932_16-24-06.png" im = cv2.imread(path) h, w = im.shape[0], im.shape[1] print("orignal image shape", h,w) im = im[:,:int(w/2),:] #get left image # im = cv2.resize(im,(int(im.shape[1]/2),int(im.shape[0]/2))) #warped img warped = np.zeros((h,w,3)) roll = -80 roll *= np.pi/180 Rx = np.matrix([ [1, 0,0], [0, np.cos(roll), -np.sin(roll)], [0, np.sin(roll), np.cos(roll)] ]) #point cloud # i = 500 # j = 355 if False: point_cloud = sl.Mat() cam.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA) for i in range(100): for j in range(1000): point3D = point_cloud.get_value(i,j) # x = point3D[0] # y = point3D[1] # z = point3D[2] color = point3D[1][3] if ~np.isnan(color) and ~np.isinf(color): xyz = point3D[1][:3].reshape((-1,1)) # xyz_hom = np.vstack((xyz,1)) print('point3D',point3D) warped_index = Rx @ xyz #NEED TO USE H**O COORDINATES? divide by last column? print('warped_index',warped_index) RGB = im[j,i,:] RGB = np.dstack(([255],[255],[255])) print('RGB',RGB) warped[int(np.round(warped_index[0]+10)),int(np.round(warped_index[1])),:] = RGB cam.disable_positional_tracking() # save_mesh(pymesh) cam.close() print("\nFINISH")