def __init__(self, resolution=(640, 480), depth_framerate=6, color_framerate=30, include_depth=False, decimation_magnitude=1, spatial_smooth=True, temporal_smooth=True): self.include_depth = include_depth self.pipeline = rs.pipeline() #Create a config and configure the pipeline to stream # different resolutions of color and depth streams self.config = rs.config() w, h = resolution self.config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, color_framerate) if include_depth: self.config.enable_stream(rs.stream.depth, w, h, rs.format.z16, depth_framerate) # Start streaming self.profile = self.pipeline.start(self.config) # Getting the depth sensor's depth scale (see rs-align example for explanation) self.color_sensor = self.profile.get_device().first_color_sensor() if include_depth: self.depth_sensor = self.profile.get_device().first_depth_sensor() self.depth_scale = self.depth_sensor.get_depth_scale() # Create an align object # rs.align allows us to perform alignment of depth frames to others frames # The "align_to" is the stream type to which we plan to align depth frames. align_to = rs.stream.depth self.align = rs.align(align_to) # We'll want to store the intrinsics once we start getting images. self.aligned_depth_K = None self.filters = [] # Decimation filter if decimation_magnitude > 1: decimate_filter = rs.decimation_filter() decimate_filter.set_option(rs.option.filter_magnitude, decimation_magnitude) self.filters.append(decimate_filter) if spatial_smooth or temporal_smooth: self.filters.append(rs.disparity_transform()) if spatial_smooth: self.filters.append(rs.spatial_filter()) if temporal_smooth: self.filters.append(rs.temporal_filter()) if spatial_smooth or temporal_smooth: self.filters.append(rs.disparity_transform(False)) # Pointcloud conversion utility. self.pc = rs.pointcloud() self.depth_profile = rs.video_stream_profile(self.profile.get_stream(rs.stream.depth)) self.depth_intrinsics = self.depth_profile.get_intrinsics() self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height
def capture(): global img_q, save w, h = 640, 480 # Configure depth and color streams pipeline = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth, w, h, rs.format.z16, 30) config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, 30) # Start streaming pipeline.start(config) pc = rs.pointcloud() decimate = rs.decimation_filter() decimate.set_option(rs.option.filter_magnitude, 2**1) prepare_pc = 10 thr = 150 l = 10 while True: frames = pipeline.wait_for_frames() depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame() if not depth_frame or not color_frame: continue depth_image = np.asanyarray(depth_frame.get_data()) color_image = np.asanyarray(color_frame.get_data()) dd = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_SUMMER) dd = cv2.normalize(dd, None, 0, 255, cv2.NORM_MINMAX) if prepare_pc > 0: prepare_pc -= 1 points = pc.calculate(depth_frame) pc.map_to(color_frame) out = np.zeros((h, w, 4), np.uint8) depth = cv2.cvtColor(dd, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(depth, thr, 255, cv2.THRESH_BINARY_INV) ret, mask2 = cv2.threshold(depth, thr - l, 255, cv2.THRESH_BINARY) mask = cv2.bitwise_and(mask, mask2) out[:, :, :3] = cv2.bitwise_and(color_image, cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)) out[:, :, 3] = mask images = np.vstack( (np.hstack((color_image, dd)), np.hstack((cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR), out[:, :, :3])))) if not img_q.full(): img_q.put_nowait(images) if save.value == 1: save_img(color_image, dd, depth_image) save.value = 0
path = 'out/' w, h = 1280, 720 w, h = 640, 480 # Configure depth and color streams pipeline = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth, w, h, rs.format.z16, 30) config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, 30) # Start streaming pipeline.start(config) pc = rs.pointcloud() decimate = rs.decimation_filter() decimate.set_option(rs.option.filter_magnitude, 2**1) accurate = False prepare_pc = 10 tt = time.time() try: while True: print(1 / (time.time() - tt)) tt = time.time() frames = pipeline.wait_for_frames() depth_frame = frames.get_depth_frame() color_frame = frames.get_color_frame()
assert inp_dim % 32 == 0 assert inp_dim > 32 if CUDA: model.cuda() model.eval() # Setup Realsense pipeline pipe = rs.pipeline() configure = rs.config() width = 640; height = 480; configure.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30) configure.enable_stream(rs.stream.color, width, height, rs.format.rgb8, 30) dec_filter = rs.decimation_filter () # Decimation - reduces depth frame density spat_filter = rs.spatial_filter() # Spatial - edge-preserving spatial smoothing temp_filter = rs.temporal_filter() # Temporal - reduces temporal noise pipe.start(configure) align_to = rs.stream.color align = rs.align(align_to) while(1): # temp = pipe.wait_for_frames() # aligned_frames = align.process(temp) # depth_frame = aligned_frames.get_depth_frame() # filtered = dec_filter.process(depth_frame) # filtered = spat_filter.process(fisltered) # filtered = temp_filter.process(filtered)
def main(): if not os.path.exists(args.out_rdir): os.mkdir(args.out_rdir) print('Created out_rgb_dir') if not os.path.exists(args.out_ddir): os.mkdir(args.out_ddir) print('Created out_depth_dir') if not os.path.exists(args.annotations_dir): os.mkdir(args.annotations_dir) print('Created args_out_dir') #Create pipeline for realsense2 pipe = rs.pipeline() config = rs.config() config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30) config.enable_stream(rs.stream.color, 640, 360, rs.format.rgb8, 30) profile = pipe.start(config) # Declare filters dc_ftr = rs.decimation_filter () # Decimation - reduces depth frame density st_ftr = rs.spatial_filter() # Spatial - edge-preserving spatial smoothing tp_ftr = rs.temporal_filter() # Temporal - reduces temporal noise th_ftr = rs.threshold_filter() align_to = rs.stream.color align = rs.align(align_to) rgbd = np.zeros((1,4,360,640)) i=0 print('################|Initialization seguence completed.|################') try: while(1): # Get frameset of color and depth frames = pipe.wait_for_frames() # Align the depth frame to color frame aligned_frames = align.process(frames) # Get aligned frames aligned_depth_frame = aligned_frames.get_depth_frame() # 640x480 color_frame = aligned_frames.get_color_frame() # Validate that both frames are valid if not aligned_depth_frame or not color_frame: continue # Post processing filtered = tp_ftr.process(st_ftr.process(dc_ftr.process(aligned_depth_frame))) filtered = th_ftr.process(filtered)#can be removed if person is outside frame rgb = np.asanyarray(color_frame.get_data()) rgb = np.transpose(rgb,(2,0,1)) depth = np.asanyarray(aligned_depth_frame.get_data(),dtype=np.uint16) depth = depth im.show(rgb,args.bb) im.rgbsave(i, rgb, str('./' + args.out_rdir)) im.dsave(i, depth, str('./' + args.out_ddir)) np.savetxt('./' + args.annotations_dir + '/' + str(i) + '.csv', args.bb, delimiter=',') i = i+ 1 print('saved'+str(i)+'image') # time.sleep(1) finally: pipe.stop() print('################| Error in pipeline |################')