def save_depth_frame(frame, serial_number, file_name): print_verbose("Saving to " + file_name) ply = rs.save_to_ply(file_name) # Set options to the desired values # In this example we'll generate a textual PLY with normals (mesh is already created by default) ply.set_option(rs.save_to_ply.option_ply_binary, True) ply.set_option(rs.save_to_ply.option_ply_normals, args.normals) # Apply the processing block to the frameset which contains the depth frame and the texture colorized = colorizer.process(frame) ply.process(colorized)
def take_snapshot(): pipeline = rs.pipeline() config = rs.config() config.enable_stream(stream_type=rs.stream.depth, width=480, height=270, format=rs.format.z16, framerate=15) pipeline.start(config) cnt = 0 try: while True: frames = pipeline.wait_for_frames() cnt += 1 if cnt > 5: depth_frame = frames.get_depth_frame() if not depth_frame: continue print("Get depth frame") currentTime = time.time() filename = 'snapshot_' + str(currentTime) + '.ply' ply = rs.save_to_ply('point_cloud/' + filename) ply.set_option(rs.save_to_ply.option_ply_binary, False) ply.set_option(rs.save_to_ply.option_ignore_color, True) ply.set_option(rs.save_to_ply.option_ply_normals, False) ply.set_option(rs.save_to_ply.option_ply_mesh, False) print("Saving point cloud") ply.process(depth_frame) print("Point cloud is created as {}".format(filename)) pipeline.stop() return filename break except: print("error occurred") pipeline.stop()
def export_to_ply(): # Declare pointcloud object, for calculating pointclouds and texture mappings pc = rs2.pointcloud() # We want the points object to be persistent so we can display the last cloud when a frame drops points = rs2.points() # Declare RealSense pipeline, encapsulating the actual device and sensors pipe = rs2.pipeline() config = rs2.config() # Enable depth stream # config.enable_stream(rs2.stream.depth) config.enable_stream(rs2.stream.depth, 640, 480, rs2.format.z16, 30) config.enable_stream(rs2.stream.color, 640, 480, rs2.format.rgb8, 30) # Start streaming with chosen configuration pipe.start(config) # We'll use the colorizer to generate texture for our PLY # (alternatively, texture can be obtained from color or infrared stream) colorizer = rs2.colorizer() colorizer.set_option(rs2.option.color_scheme, 0) try: # Wait for the next set of frames from the camera frames = pipe.wait_for_frames() colorized = colorizer.process(frames) # Create save_to_ply object ply = rs2.save_to_ply("1.ply") # Set options to the desired values # In this example we'll generate a textual PLY with normals (mesh is already created by default) ply.set_option(rs2.save_to_ply.option_ply_binary, True) ply.set_option(rs2.save_to_ply.option_ply_normals, False) print("Saving to 1.ply...") # Apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) print("Done") finally: pipe.stop()
colorizer = rs.colorizer() try: # Wait for the next set of frames from the camera frames = pipe.wait_for_frames() depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() #if not depth_frame or not color_frame: # continue colorized = colorizer.process(frames) # Create save_to_ply object ply = rs.save_to_ply("ply.ply") # Set options to the desired values # In this example we'll generate a textual PLY with normals (mesh is already created by default) ply.set_option(rs.save_to_ply.option_ply_binary, False) ply.set_option(rs.save_to_ply.option_ply_normals, True) print("Saving to ply.ply...") # Apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) print("Done") #time.sleep(5) # Convert images to numpy arrays depth_image = np.asanyarray(depth_frame.get_data())
# Stack RGB and depth horizontally list_stack = [] list_stack.append(color_image) list_stack.append(depth_colormap) images = np.hstack(list_stack) # Show RGB and depth cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL) cv2.imshow('RealSense', images) cv2.waitKey(1) # Save to press 's' ch = cv2.waitKey(25) if ch == ord('s'): print('Save') save_images(cam, idx, color_image, depth_image) ply = rs.save_to_ply(file_ply.format(cam, idx)) ply.set_option(rs.save_to_ply.option_ply_binary, False) ply.set_option(rs.save_to_ply.option_ply_normals, False) print('Saving to ply...') ply.process(colorized) print('Done') break elif ch == 27: break finally: # Stop streaming pipeline.stop()
pipe = rs.pipeline() config = rs.config() # Enable depth stream config.enable_stream(rs.stream.depth) # Start streaming with chosen configuration pipe.start(config) # We'll use the colorizer to generate texture for our PLY # (alternatively, texture can be obtained from color or infrared stream) colorizer = rs.colorizer() try: # Wait for the next set of frames from the camera frames = pipe.wait_for_frames() colorized = colorizer.process(frames) # Create save_to_ply object ply = rs.save_to_ply("1.ply") # Set options to the desired values # In this example we'll generate a textual PLY with normals (mesh is already created by default) ply.set_option(rs.save_to_ply.option_ply_binary, False) ply.set_option(rs.save_to_ply.option_ply_normals, True) print("Saving to 1.ply...") # Apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) print("Done") finally: pipe.stop()
# Declare pointcloud object, for calculating pointclouds and texture mappings pc = rs.pointcloud() # We want the points object to be persistent so we can display the last cloud when a frame drops points = rs.points() pipeline = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth) pipeline.start(config) colorizer = rs.colorizer() try: frames = pipeline.wait_for_frames() colorized = colorizer.process(frames) # Create save_to_ply object filepath = 'meshes/1.ply' ply = rs.save_to_ply(filepath) # Set options to the desired values ply.set_option(rs.save_to_ply.option_ply_binary, True) ply.set_option(rs.save_to_ply.option_ply_normals, True) print('Saving to {} ...'.format(filepath)) # Apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) print('Done') finally: pipeline.stop()
colorizer = rs.colorizer() try: # Wait for the next set of frames from the camera frames = pipeline.wait_for_frames() depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() #if not depth_frame or not color_frame: # continue colorized = colorizer.process(frames) # Create save_to_ply object ply = rs.save_to_ply(f'{path_to_file}{file_name_str}{pot_number}_ply.ply') # Set options to the desired values # In this example we'll generate a textual PLY with normals (mesh is already created by default) ply.set_option(rs.save_to_ply.option_ply_binary, False) ply.set_option(rs.save_to_ply.option_ply_normals, True) print(f'Saving to {path_to_file}{file_name_str}{pot_number}_ply.ply') # Apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) print("Done") #time.sleep(5) # Convert images to numpy arrays depth_image = np.asanyarray(depth_frame.get_data())
# Enable depth stream config.enable_stream(rs.stream.depth) # Start streaming with chosen configuration pipe.start(config) # We'll use the colorizer to generate texture for our PLY # (alternatively, texture can be obtained from color or infrared stream) colorizer = rs.colorizer() # %% 4 capture image try: # Wait for the next set of frames from the camera frames = pipe.wait_for_frames() colorized = colorizer.process(frames) # Create save_to_ply object ply = rs.save_to_ply(ply_file) # Set options to the desired values # In this example we'll generate a textual PLY with normals (mesh is already created by default) ply.set_option(rs.save_to_ply.option_ply_binary, False) ply.set_option(rs.save_to_ply.option_ply_normals, True) print("Saving...") # Apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) print("Done") finally: pipe.stop()
for _ in range(30): pipe.wait_for_frames() # wait for the next set of frames from the camera frames: rs2.composite_frame = pipe.wait_for_frames() print("Applying thresh filter") # apply the thresh filter on the frame we just got filtered_frames: rs2.depth_frame = thresh_filter.process(frames) # colorize the filtered frames colorized = colorizer.process(filtered_frames) print("Creating ply file") # create save_to_ply object ply: rs2.save_to_ply = rs2.save_to_ply("temp.ply") # set options to the desired values # w'll generate a textual PLY with binary (mesh is already created by default) ply.set_option(rs2.save_to_ply.option_ply_binary, True) ply.set_option(rs2.save_to_ply.option_ply_normals, False) print("Saving as ply file") # apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) print("Finished saving as ply file") # -- convert from ply to stl print("Converting to stl file") # read the mesh from the ply file mesh = om.read_trimesh("temp.ply")
if not os.path.exists(root_folder): os.makedirs(root_folder) current_idx = None created_files = [] try: for idx in range(len(devices_)): current_idx = idx frames = pipelines[idx].wait_for_frames() colorized = colorizer.process(frames) # Create save_to_ply object ply_folder = os.path.join(root_folder, serial_numbers[idx], 'pointcloud') if not os.path.exists(ply_folder): os.makedirs(ply_folder) filename = os.path.join(ply_folder, 'pointcloud_camera__{}__{}__{}.ply'.format(serial_numbers[idx], today, time_execution)) ply = rs.save_to_ply(filename) # Set options to the desired values ply.set_option(rs.save_to_ply.option_ply_binary, True) ply.set_option(rs.save_to_ply.option_ply_normals, True) # Apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) created_files.append(filename) except: print('Error with camera: {} S/N: {}'.format(current_idx + 1, serial_numbers[current_idx])) finally: for idx in range(len(devices_)): pipelines[idx].stop() if len(created_files) > 0:
config = rs.config() # Enable depth stream config.enable_stream(rs.stream.depth) # Start streaming with chosen configuration pipe.start(config) # We'll use the colorizer to generate texture for our PLY # (alternatively, texture can be obtained from color or infrared stream) colorizer = rs.colorizer() try: # Wait for the next set of frames from the camera frames = pipe.wait_for_frames() colorized = colorizer.process(frames) # Create save_to_ply object ply = rs.save_to_ply("cloud.ply") # Set options to the desired values # In this example we'll generate a textual PLY with normals (mesh is already created by default) ply.set_option(rs.save_to_ply.option_ply_binary, False) ply.set_option(rs.save_to_ply.option_ply_normals, True) print("Saving to cloud.ply...") # Apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) print("Done") finally: pipe.stop()
def take_snapshot_rotation(queue): pipeline = rs.pipeline() pc = rs.pointcloud() config = rs.config() config.enable_stream(stream_type=rs.stream.depth, width=640, height=480, format=rs.format.z16, framerate=30) config.enable_stream(stream_type=rs.stream.color, width=640, height=480, format=rs.format.bgr8, framerate=30) pipeline.start(config) cnt = 0 #try: while True: frames = pipeline.wait_for_frames() cnt += 1 if cnt > 5: depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() if not depth_frame or not color_frame: continue print("Get depth frame") color_image = np.asanyarray(color_frame.get_data()) color_image = color_image[..., ::-1] #Get intrinsic depth_intrinsics = rs.video_stream_profile( depth_frame.profile).get_intrinsics() w, h = depth_intrinsics.width, depth_intrinsics.height Rtilt = np.reshape(np.eye(3), -1) K = np.array([ depth_intrinsics.fx, 0, 0, 0, depth_intrinsics.fy, 0, depth_intrinsics.ppx, depth_intrinsics.ppy, 1 ]) print(Rtilt, K) if save_file: filename = 'snapshot_' + str(currentTime) + '.ply' if not os.path.exists('point_cloud'): os.mkdir('point_cloud') ply = rs.save_to_ply('point_cloud/' + filename) ply.set_option(rs.save_to_ply.option_ply_binary, False) ply.set_option(rs.save_to_ply.option_ignore_color, True) ply.set_option(rs.save_to_ply.option_ply_normals, False) ply.set_option(rs.save_to_ply.option_ply_mesh, False) print("Saving point cloud...") ply.process(depth_frame) print("Point cloud is created as {}".format(filename)) rgb_filename = filename[:-3] + 'jpg' if not os.path.exists('rgb_image'): os.mkdir('rgb_image') imageio.imwrite('rgb_image/' + rgb_filename, color_image) print("RGB Image is created as {}".format(rgb_filename)) else: decimate = rs.decimation_filter() decimate.set_option(rs.option.filter_magnitude, 2**1) depth_frame = decimate.process(depth_frame) points = pc.calculate(depth_frame) pc.map_to(depth_frame) pipeline.stop() break if save_file: with open('point_cloud/' + filename, 'r') as input_file: header_cnt = 8 cnt = 0 all_lines = input_file.readlines() #Get header and data header = all_lines[:header_cnt] split_line = header[3].strip().split(' ') vertex_cnt = int(split_line[2]) data = all_lines[header_cnt:(vertex_cnt + header_cnt)] point_cloud = [] # Random sampling replace = True if vertex_cnt < 20000 else False sampled_int = np.random.choice(vertex_cnt, 20000, replace=replace) if vertex_cnt < 20000: vertex_cnt = vertex_cnt else: vertex_cnt = 20000 header[3] = ' '.join( split_line[:2] + [str(vertex_cnt) + '\n'] ) #We will randomly choose 20000 points, so change the point cloud info #Get point cloud if vertex_cnt >= 20000: idx = -1 for line in data: idx += 1 if idx not in sampled_int: continue line_split = line.strip().split(' ') new_line = [] for item in line_split: new_line.append(float(item)) point_cloud.append(new_line) else: for line in data: line_split = line.strip().split(' ') new_line = [] for item in line_split: new_line.append(float(item)) point_cloud.append(new_line) #Apply rotation by 90 degree along x-axis axis = [1, 0, 0] axis = axis / norm(axis) theta = math.pi / 2 rot = Rotation.from_rotvec(theta * axis) new_point_cloud = rot.apply(point_cloud) # Saving point cloud out_filename = filename[:-4] + '_r.ply' with open('point_cloud/' + out_filename, 'w') as output_file: output_file.writelines(header) for line in new_point_cloud.tolist(): print_line = '' for item in line: print_line += "{:.5f}".format(item) + ' ' print_line += '\n' output_file.write(print_line) print('Rotation completed') #queue.put(out_filename) else: v = points.get_vertices() verts = np.asanyarray(v).view(np.float32).reshape(-1, 3) # xyz verts = verts[verts[:, 2] < 5] # clip where z is farther than 10 verts = verts[verts[:, 2] > 0.1] # clip where z is closer than 0.1 data = verts vertex_cnt = data.shape[0] # Random sampling replace = True if vertex_cnt < 20000 else False sampled_int = np.random.choice(vertex_cnt, 20000, replace=replace) point_cloud = verts[sampled_int] if vertex_cnt < 20000: vertex_cnt = vertex_cnt else: vertex_cnt = 20000 #Apply rotation by 90 degree along x-axis axis = [1, 0, 0] axis = axis / norm(axis) theta = -math.pi / 2 rot = Rotation.from_rotvec(theta * axis) new_point_cloud = rot.apply(point_cloud) print('Rotation completed') #print("Point cloud shape", new_point_cloud.shape) # (20000, 3) pc_path = os.path.join(BASE_DIR, 'point_cloud', 'pc.npy') np.save(pc_path, new_point_cloud) img_path = os.path.join(BASE_DIR, 'rgb_image', 'rgb.npy') #print("Image shape", color_image.shape) #(480, 640, 3) np.save(img_path, color_image) queue.put(pc_path) queue.put(img_path) queue.put((Rtilt, K)) print("Sending queue finished") """ Using shared array for multiprocessing... replaced
movepose(centralPose, 0.5, 0.5) print("Moving to central pose.\n") time.sleep(6) vfov = np.deg2rad(42.5) hfov = np.deg2rad(69.4) #hfov = np.deg2rad(42.5) offset = np.array([-35e-3, 70e-3, 0, 0, 0, 0]) frameCentre = centralPose - offset pc = rs.pointcloud() points = rs.points() colorizer = rs.colorizer() frames = pipeline.wait_for_frames() colorized = colorizer.process(frames) ply = rs.save_to_ply("pointCloud.ply") ply.set_option(rs.save_to_ply.option_ply_binary, False) ply.set_option(rs.save_to_ply.option_ply_normals, True) ply.process(colorized) depth_frame = frames.get_depth_frame() depth_image = np.asanyarray(depth_frame.get_data()) images = depth_image np.save('depth_image', depth_image) cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) cv2.imshow('RealSense', images) cv2.waitKey(5) filename = "scan_depth.png" cv2.imwrite(filename, depth_image)
for idx in range(len(devices_)): pipelines.append(rs.pipeline()) configs.append(rs.config()) configs[idx].enable_device(serial_numbers[idx]) configs[idx].enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) pipelines[idx].start(configs[idx]) colorizer = rs.colorizer() save_image = False current_idx = None try: for idx in range(len(devices_)): current_idx = idx frames = pipelines[idx].wait_for_frames() colorized = colorizer.process(frames) # Create save_to_ply object ply = rs.save_to_ply('meshes/pointcloud_camera_{}.ply'.format(idx + 1)) # Set options to the desired values ply.set_option(rs.save_to_ply.option_ply_binary, True) ply.set_option(rs.save_to_ply.option_ply_normals, True) # Apply the processing block to the frameset which contains the depth frame and the texture ply.process(colorized) print('Done') except: print('Error with camera: {} S/N: {}'.format(current_idx + 1, serial_numbers[current_idx])) finally: for idx in range(len(devices_)): pipelines[idx].stop()