def __init__(self, resolution=(640, 480), depth_framerate=6, color_framerate=30, include_depth=False, decimation_magnitude=1, spatial_smooth=True, temporal_smooth=True): self.include_depth = include_depth self.pipeline = rs.pipeline() #Create a config and configure the pipeline to stream # different resolutions of color and depth streams self.config = rs.config() w, h = resolution self.config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, color_framerate) if include_depth: self.config.enable_stream(rs.stream.depth, w, h, rs.format.z16, depth_framerate) # Start streaming self.profile = self.pipeline.start(self.config) # Getting the depth sensor's depth scale (see rs-align example for explanation) self.color_sensor = self.profile.get_device().first_color_sensor() if include_depth: self.depth_sensor = self.profile.get_device().first_depth_sensor() self.depth_scale = self.depth_sensor.get_depth_scale() # Create an align object # rs.align allows us to perform alignment of depth frames to others frames # The "align_to" is the stream type to which we plan to align depth frames. align_to = rs.stream.depth self.align = rs.align(align_to) # We'll want to store the intrinsics once we start getting images. self.aligned_depth_K = None self.filters = [] # Decimation filter if decimation_magnitude > 1: decimate_filter = rs.decimation_filter() decimate_filter.set_option(rs.option.filter_magnitude, decimation_magnitude) self.filters.append(decimate_filter) if spatial_smooth or temporal_smooth: self.filters.append(rs.disparity_transform()) if spatial_smooth: self.filters.append(rs.spatial_filter()) if temporal_smooth: self.filters.append(rs.temporal_filter()) if spatial_smooth or temporal_smooth: self.filters.append(rs.disparity_transform(False)) # Pointcloud conversion utility. self.pc = rs.pointcloud() self.depth_profile = rs.video_stream_profile(self.profile.get_stream(rs.stream.depth)) self.depth_intrinsics = self.depth_profile.get_intrinsics() self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height
def spatial_filtering(self, depth_frame, magnitude=2, alpha=0.5, delta=20, holes_fill=0): spatial = rs.spatial_filter() spatial.set_option(rs.option.filter_magnitude, magnitude) spatial.set_option(rs.option.filter_smooth_alpha, alpha) spatial.set_option(rs.option.filter_smooth_delta, delta) spatial.set_option(rs.option.holes_fill, holes_fill) depth_frame = spatial.process(depth_frame) return depth_frame
depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() if not depth_frame or not color_frame: continue depth_image = np.asanyarray(depth_frame.get_data()) color_image = np.asanyarray(color_frame.get_data()) if not accurate: dd = cv2.applyColorMap( cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_SUMMER) dd = cv2.normalize(dd, None, 0, 255, cv2.NORM_MINMAX) else: spatial = rs.spatial_filter() spatial.set_option(rs.option.holes_fill, 3) filtered_depth = spatial.process(depth_frame) dd = np.asanyarray(filtered_depth.get_data()) dd = cv2.applyColorMap(cv2.convertScaleAbs(dd, alpha=0.03), cv2.COLORMAP_SUMMER) dd = cv2.normalize(dd, None, 0, 255, cv2.NORM_MINMAX) depth_frame = filtered_depth if prepare_pc > 0: prepare_pc -= 1 points = pc.calculate(depth_frame) pc.map_to(color_frame) #depth_frame = decimate.process(depth_frame) images = np.vstack((color_image, dd))
assert inp_dim % 32 == 0 assert inp_dim > 32 if CUDA: model.cuda() model.eval() # Setup Realsense pipeline pipe = rs.pipeline() configure = rs.config() width = 640; height = 480; configure.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30) configure.enable_stream(rs.stream.color, width, height, rs.format.rgb8, 30) dec_filter = rs.decimation_filter () # Decimation - reduces depth frame density spat_filter = rs.spatial_filter() # Spatial - edge-preserving spatial smoothing temp_filter = rs.temporal_filter() # Temporal - reduces temporal noise pipe.start(configure) align_to = rs.stream.color align = rs.align(align_to) while(1): # temp = pipe.wait_for_frames() # aligned_frames = align.process(temp) # depth_frame = aligned_frames.get_depth_frame() # filtered = dec_filter.process(depth_frame) # filtered = spat_filter.process(fisltered) # filtered = temp_filter.process(filtered) # aligned_depth_frame = np.asanyarray(filtered.get_data(),dtype=np.uint8) # aligned_depth_frame is a 640x480 depth image
def main(): if not os.path.exists(args.out_rdir): os.mkdir(args.out_rdir) print('Created out_rgb_dir') if not os.path.exists(args.out_ddir): os.mkdir(args.out_ddir) print('Created out_depth_dir') if not os.path.exists(args.annotations_dir): os.mkdir(args.annotations_dir) print('Created args_out_dir') #Create pipeline for realsense2 pipe = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30) config.enable_stream(rs.stream.color, 640, 360, rs.format.rgb8, 30) profile = pipe.start(config) # Declare filters dc_ftr = rs.decimation_filter () # Decimation - reduces depth frame density st_ftr = rs.spatial_filter() # Spatial - edge-preserving spatial smoothing tp_ftr = rs.temporal_filter() # Temporal - reduces temporal noise th_ftr = rs.threshold_filter() align_to = rs.stream.color align = rs.align(align_to) rgbd = np.zeros((1,4,360,640)) i=0 print('################|Initialization seguence completed.|################') try: while(1): # Get frameset of color and depth frames = pipe.wait_for_frames() # Align the depth frame to color frame aligned_frames = align.process(frames) # Get aligned frames aligned_depth_frame = aligned_frames.get_depth_frame() # 640x480 color_frame = aligned_frames.get_color_frame() # Validate that both frames are valid if not aligned_depth_frame or not color_frame: continue # Post processing filtered = tp_ftr.process(st_ftr.process(dc_ftr.process(aligned_depth_frame))) filtered = th_ftr.process(filtered)#can be removed if person is outside frame rgb = np.asanyarray(color_frame.get_data()) rgb = np.transpose(rgb,(2,0,1)) depth = np.asanyarray(aligned_depth_frame.get_data(),dtype=np.uint16) depth = depth im.show(rgb,args.bb) im.rgbsave(i, rgb, str('./' + args.out_rdir)) im.dsave(i, depth, str('./' + args.out_ddir)) np.savetxt('./' + args.annotations_dir + '/' + str(i) + '.csv', args.bb, delimiter=',') i = i+ 1 print('saved'+str(i)+'image') # time.sleep(1) finally: pipe.stop() print('################| Error in pipeline |################')