def post_process_depth_frame(depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0, spatial_smooth_alpha=0.5, spatial_smooth_delta=20, temporal_smooth_alpha=0.4, temporal_smooth_delta=20): """ RS에서 획득한 깊이 프레임 전처리 필터링 하는 함수 Return: filtered_frame : rs.frame() """ # Post processing possible only on the depth_frame assert (depth_frame.is_depth_frame()) # Available filters and control options for the filters decimation_filter = rs.decimation_filter() spatial_filter = rs.spatial_filter() temporal_filter = rs.temporal_filter() filter_magnitude = rs.option.filter_magnitude filter_smooth_alpha = rs.option.filter_smooth_alpha filter_smooth_delta = rs.option.filter_smooth_delta # Apply the control parameters for the filter decimation_filter.set_option(filter_magnitude, decimation_magnitude) spatial_filter.set_option(filter_magnitude, spatial_magnitude) spatial_filter.set_option(filter_smooth_alpha, spatial_smooth_alpha) spatial_filter.set_option(filter_smooth_delta, spatial_smooth_delta) temporal_filter.set_option(filter_smooth_alpha, temporal_smooth_alpha) temporal_filter.set_option(filter_smooth_delta, temporal_smooth_delta) # Apply the filters filtered_frame = decimation_filter.process(depth_frame) filtered_frame = spatial_filter.process(filtered_frame) filtered_frame = temporal_filter.process(filtered_frame) return filtered_frame
def __init__(self): ctx = rs.context() self.devices = ctx.query_devices() self.configs = list() self.filters = list() for device in self.devices: config = rs.config() config.enable_device(device.get_info(rs.camera_info.serial_number)) config.enable_stream(rs.stream.depth, IMG_WIDTH, IMG_HEIGHT, rs.format.z16, 30) config.enable_stream(rs.stream.color, IMG_WIDTH, IMG_HEIGHT, rs.format.bgr8, 30) self.configs.append(config) align = rs.align(rs.stream.color) spatial = rs.spatial_filter() spatial.set_option(rs.option.filter_magnitude, 5) spatial.set_option(rs.option.filter_smooth_alpha, 1) spatial.set_option(rs.option.filter_smooth_delta, 50) spatial.set_option(ts.option.holes_fill, 3) temporal = rs.temporal_filter() hole_filling = rs.hole_filling_filter() depth_to_disparity = rs.disparity_transform(True) disparity_to_depth = rs.disparity_transform(False) decimate = rs.decimation_filter() self.filters.append({ 'align': align, 'spatial': spatial, 'temporal': temporal, 'hole': hole_filling, 'disparity': depth_to_disparity, 'depth': disparity_to_depth, 'decimate': decimate })
def post_process_depth_frame(depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0, spatial_smooth_alpha=0.5, spatial_smooth_delta=20, temporal_smooth_alpha=0.4, temporal_smooth_delta=20): assert (depth_frame.is_depth_frame()) # Available filters and control options for the filters decimation_filter = rs.decimation_filter() spatial_filter = rs.spatial_filter() temporal_filter = rs.temporal_filter() filter_magnitude = rs.option.filter_magnitude filter_smooth_alpha = rs.option.filter_smooth_alpha filter_smooth_delta = rs.option.filter_smooth_delta # Apply the control parameters for the filter decimation_filter.set_option(filter_magnitude, decimation_magnitude) spatial_filter.set_option(filter_magnitude, spatial_magnitude) spatial_filter.set_option(filter_smooth_alpha, spatial_smooth_alpha) spatial_filter.set_option(filter_smooth_delta, spatial_smooth_delta) temporal_filter.set_option(filter_smooth_alpha, temporal_smooth_alpha) temporal_filter.set_option(filter_smooth_delta, temporal_smooth_delta) # Apply the filters filtered_frame = decimation_filter.process(depth_frame) filtered_frame = spatial_filter.process(filtered_frame) filtered_frame = temporal_filter.process(filtered_frame) return filtered_frame
def __init__(self, filters=[]): """ Connect to RealSense and initialize filters :param filters: [String, ...], default=[]: '' TODO list filters """ self.pipe = rs.pipeline() cfg = rs.config() profile = self.pipe.start(cfg) # camera parameters self.depth_scale = profile.get_device().first_depth_sensor( ).get_depth_scale() # filters to apply to depth images self.filters = filters if 'align' in self.filters: self.align = rs.align(rs.stream.color) if 'decimation' in self.filters: self.decimation = rs.decimation_filter() self.decimation.set_option(rs.option.filter_magnitude, 4) if 'spatial' in self.filters: self.spatial = rs.spatial_filter() # self.spatial.set_option(rs.option.holes_fill, 3) self.spatial.set_option(rs.option.filter_magnitude, 5) self.spatial.set_option(rs.option.filter_smooth_alpha, 1) self.spatial.set_option(rs.option.filter_smooth_delta, 50) if 'temporal' in self.filters: # TODO self.temporal = rs.temporal_filter() self.temporal_iters = 3 if 'hole_filling' in self.filters: self.hole_filling = rs.hole_filling_filter() if 'colorize' in self.filters: self.colorizer = rs.colorizer()
def __init__(self, parameters, neural): self.parameters = parameters self.neural = neural self.stream_frame = None config = rs.config() self.pipeline = rs.pipeline() config.enable_stream(rs.stream.depth, self.parameters.depth_width, self.parameters.depth_height, rs.format.z16, self.parameters.depth_fps) config.enable_stream(rs.stream.color, self.parameters.color_width, self.parameters.color_height, rs.format.rgb8, self.parameters.color_fps) profile = self.pipeline.start(config) self.depth_sensor = profile.get_device().first_depth_sensor( ).get_depth_scale() self.align_stream = rs.align(rs.stream.color) self.decimation = rs.decimation_filter() self.hole_filling = rs.hole_filling_filter() self.spatial = rs.spatial_filter() self.neural = neural print("camera loop has stared") threading.Thread.__init__(self)
def get_depth_filter_list(decimate=True, d2d=True, spatial=True, temporal=True): filters = [] if decimate: dec_filt = rs.decimation_filter() dec_filt.set_option(rs.option.filter_magnitude, 2) filters.append(dec_filt) if d2d: depth2disparity = rs.disparity_transform() filters.append(depth2disparity) if spatial: spat = rs.spatial_filter() spat.set_option(rs.option.holes_fill, FILL_ALL_ZERO_PIXELS) filters.append(spat) if temporal: temp = rs.temporal_filter() filters.append(temp) if d2d: disparity2depth = rs.disparity_transform(False) filters.append(disparity2depth) return filters
def __init__(self, w=640, h=480, depth=True, frame_rate=30): ''' Initializing the Python RealSense Control Flow: w: Int (default = 640, can also be 1280) h: Int (default = 480, can also be 720) depth: Bool (default = True) frame_rate: Int (default = 30) RGB and Depth formats are: bgr8, z16 Note: In this class, variables should not be directly changed. ''' self.width = w self.height = h self.depth_on = depth self._pipeline = rs.pipeline() self._config = rs.config() self._config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, frame_rate) self._intrinsic = None if depth: self.align = rs.align(rs.stream.color) self._preset = 0 # Presets (for D415): # 0: Custom # 1: Default # 2: Hand # 3: High Accuracy # 4: High Density # 5: Medium Density # depth interpolation self.interpolation = cv2.INTER_NEAREST # use nearest neighbor # self.interpolation = cv2.INTER_LINEAR # linear # self.interpolation = cv2.INTER_CUBIC # cubic # beautify depth image for viewing self._config.enable_stream(rs.stream.depth, w, h, rs.format.z16, frame_rate) self.colorizer = rs.colorizer() # initialize filters self.decimation = rs.decimation_filter() self.decimation.set_option(rs.option.filter_magnitude, 4) self.depth_to_disparity = rs.disparity_transform(True) self.spatial = rs.spatial_filter() self.spatial.set_option(rs.option.filter_magnitude, 5) self.spatial.set_option(rs.option.filter_smooth_alpha, 0.5) self.spatial.set_option(rs.option.filter_smooth_delta, 20) self.temporal = rs.temporal_filter() self.disparity_to_depth = rs.disparity_transform(False) print( "Initialized RealSense Camera\nw: {}, h: {}, depth: {}, frame_rate: {}" .format(w, h, depth, frame_rate))
def __init__(self, cam_id, filter_depth=True, frame=None, registration_mode=RealSenseRegistrationMode.DEPTH_TO_COLOR): self._running = None self.id = cam_id self._registration_mode = registration_mode self._filter_depth = filter_depth self._frame = frame if self._frame is None: self._frame = 'realsense' self._color_frame = '%s_color' % (self._frame) # realsense objects self._pipe = rs.pipeline() self._cfg = rs.config() self._align = rs.align(rs.stream.color) # camera parameters self._depth_scale = None self._intrinsics = np.eye(3) # post-processing filters self._colorizer = rs.colorizer() self._spatial_filter = rs.spatial_filter() self._hole_filling = rs.hole_filling_filter()
def filtering(self, frame): '''Filter setting''' # Decimation # decimation = rs.decimation_filter() decimation.set_option(rs.option.filter_magnitude, 1) # Spatial # spatial = rs.spatial_filter() # spatial.set_option(rs.option.filter_magnitude, 5) spatial.set_option(rs.option.filter_smooth_alpha, 0.6) spatial.set_option(rs.option.filter_smooth_delta, 8) # spatial.set_option(rs.option.holes_fill, 3) # Temporal # temporal = rs.temporal_filter() temporal.set_option(rs.option.filter_smooth_alpha, 0.5) temporal.set_option(rs.option.filter_smooth_delta, 20) # Hole # hole_filling = rs.hole_filling_filter() ## depth_to_disparity = rs.disparity_transform(True) disparity_to_depth = rs.disparity_transform(False) '''Appling filter''' frame = decimation.process(frame) frame = depth_to_disparity.process(frame) frame = spatial.process(frame) frame = temporal.process(frame) frame = disparity_to_depth.process(frame) frame = hole_filling.process(frame) return frame
def __init__(self): # Create a pipeline self.pipeline = rs.pipeline() #Create a config and configure the pipeline to stream # different resolutions of color and depth streams config = rs.config() config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 60) config.enable_stream(rs.stream.color, 640, 360, rs.format.bgr8, 60) # Start streaming self.profile = self.pipeline.start(config) s = self.profile.get_device().query_sensors()[1] s.set_option(rs.option.exposure, 80) depth_sensor = self.profile.get_device().first_depth_sensor() self.depth_scale = depth_sensor.get_depth_scale() self.spat_filter = rs.spatial_filter( ) # Spatial - edge-preserving spatial smoothing self.temp_filter = rs.temporal_filter( ) # Temporal - reduces temporal noise # Create an align object # rs.align allows us to perform alignment of depth frames to others frames # The "align_to" is the stream type to which we plan to align depth frames. align_to = rs.stream.color self.align = rs.align(align_to)
def get_frame_stream(self): # Wait for a coherent pair of frames: depth and color frames = self.pipeline.wait_for_frames() aligned_frames = self.align.process(frames) depth_frame = aligned_frames.get_depth_frame() color_frame = aligned_frames.get_color_frame() if not depth_frame or not color_frame: # If there is no frame, probably camera not connected, return False print( "Error, impossible to get the frame, make sure that the Intel Realsense camera is correctly connected" ) print("Depth frame: " + str(bool(depth_frame)) + " Color: " + str(bool(color_frame))) return False, None, None # Apply filter to fill the Holes in the depth image spatial = rs.spatial_filter() spatial.set_option(rs.option.holes_fill, 3) filtered_depth = spatial.process(depth_frame) hole_filling = rs.hole_filling_filter() filled_depth = hole_filling.process(filtered_depth) # Create colormap to show the depth of the Objects colorizer = rs.colorizer() depth_colormap = np.asanyarray( colorizer.colorize(filled_depth).get_data()) # Convert images to numpy arrays depth_image = np.asanyarray(filled_depth.get_data()) color_image = np.asanyarray(color_frame.get_data()) return True, color_image, depth_frame
def __init__(self, w=640, h=480, clipping_dist_meters=1): self.w, self.h = w, h # Create a pipeline self.pipeline = rs.pipeline() # Create a config and configure the pipeline to stream config = rs.config() config.enable_stream(rs.stream.depth, w, h, rs.format.z16, 30) config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, 30) # Start streaming profile = self.pipeline.start(config) # Getting the depth sensor's depth scale (see rs-align example for explanation) depth_sensor = profile.get_device().first_depth_sensor() self.depth_scale = depth_sensor.get_depth_scale() print("Depth Scale is: ", self.depth_scale) # We will be removing the background of objects more than # clipping_distance_in_meters meters away self.clipping_distance = clipping_dist_meters / self.depth_scale # Create an align object # rs.align allows us to perform alignment of depth frames to others frames # The "align_to" is the stream type to which we plan to align depth frames. align_to = rs.stream.color self.align = rs.align(align_to) #set up depth post-processing filters self.decimation_filter = rs.decimation_filter() #default is 2 self.spatial_filter = rs.spatial_filter(smooth_alpha=.6, smooth_delta=20, magnitude=2, hole_fill=0) self.hole_fill_filter = rs.hole_filling_filter( ) #default is fill according to neighboring pixel farthest from sensor
def _filter_depth_frame(depth_frame): """ 滤波器,用于获取坐标前的深度图像处理 :param depth_frame: 深度帧 :return: 滤波后的深度帧 """ dec = rs.decimation_filter() dec.set_option(rs.option.filter_magnitude, 1) depth_frame_pro = dec.process(depth_frame) depth2disparity = rs.disparity_transform() depth_frame_pro = depth2disparity.process(depth_frame_pro) spat = rs.spatial_filter() # 启用空洞填充,5为填充所有零像素 spat.set_option(rs.option.holes_fill, 5) depth_frame_pro = spat.process(depth_frame_pro) temp = rs.temporal_filter() depth_frame_pro = temp.process(depth_frame_pro) disparity2depth = rs.disparity_transform(False) depth_frame_pro = disparity2depth.process(depth_frame_pro) # depth_image_pro = np.asanyarray(depth_frame_pro.get_data()) # depth_colormap_pro = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_pro, alpha=0.03), cv2.COLORMAP_JET) return depth_frame_pro
def post_process_depth_frame(depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0, spatial_smooth_alpha=0.5, spatial_smooth_delta=20, temporal_smooth_alpha=0.4, temporal_smooth_delta=20): """ Filter the depth frame acquired using the Intel RealSense device Parameters ----------- depth_frame : rs.frame() The depth frame to be post-processed decimation_magnitude : double The magnitude of the decimation filter spatial_magnitude : double The magnitude of the spatial filter spatial_smooth_alpha : double The alpha value for spatial filter based smoothening spatial_smooth_delta : double The delta value for spatial filter based smoothening temporal_smooth_alpha : double The alpha value for temporal filter based smoothening temporal_smooth_delta : double The delta value for temporal filter based smoothening Return: ---------- filtered_frame : rs.frame() The post-processed depth frame """ # Post processing possible only on the depth_frame assert (depth_frame.is_depth_frame()) # Available filters and control options for the filters decimation_filter = rs.decimation_filter() spatial_filter = rs.spatial_filter() temporal_filter = rs.temporal_filter() filter_magnitude = rs.option.filter_magnitude filter_smooth_alpha = rs.option.filter_smooth_alpha filter_smooth_delta = rs.option.filter_smooth_delta # Apply the control parameters for the filter decimation_filter.set_option(filter_magnitude, decimation_magnitude) spatial_filter.set_option(filter_magnitude, spatial_magnitude) spatial_filter.set_option(filter_smooth_alpha, spatial_smooth_alpha) spatial_filter.set_option(filter_smooth_delta, spatial_smooth_delta) temporal_filter.set_option(filter_smooth_alpha, temporal_smooth_alpha) temporal_filter.set_option(filter_smooth_delta, temporal_smooth_delta) # Apply the filters filtered_frame = decimation_filter.process(depth_frame) filtered_frame = spatial_filter.process(filtered_frame) filtered_frame = temporal_filter.process(filtered_frame) return filtered_frame
def prepare_filters(self): # prepare post-processing filters decimate = rs.decimation_filter() decimate.set_option(rs.option.filter_magnitude, 2 ** 3) spatial = rs.spatial_filter() spatial.set_option(rs.option.filter_magnitude, 5) spatial.set_option(rs.option.filter_smooth_alpha, 1) spatial.set_option(rs.option.filter_smooth_delta, 50) spatial.set_option(rs.option.holes_fill, 3) colorizer = rs.colorizer() self.filters = [rs.disparity_transform(), rs.decimation_filter(), rs.spatial_filter(), rs.temporal_filter(), rs.disparity_transform(False)]
def spatial_filtering(depth_frame, magnitude=2, alpha=0.5, delta=20, holes_fill=0): spatial = rs.spatial_filter() spatial.set_option(rs.option.filter_magnitude, magnitude) spatial.set_option(rs.option.filter_smooth_alpha, alpha) spatial.set_option(rs.option.filter_smooth_delta, delta) spatial.set_option(rs.option.holes_fill, holes_fill) depth_frame = spatial.process(depth_frame) return depth_frame
def __init__(self): self.filters = [ rs.decimation_filter(RESCALE), rs.disparity_transform(True), rs.hole_filling_filter(1), rs.spatial_filter(0.5, 8, 2, 2), rs.temporal_filter(0.5, 20, 1), rs.disparity_transform(False) ]
def spatial(self, frame): #apply spatial filtering to the frame sent in spatial = rs.spatial_filter() spatial.set_option(rs.option.filter_magnitude, 5) spatial.set_option(rs.option.filter_smooth_alpha, .25) spatial.set_option(rs.option.filter_smooth_delta, 50) spatial.set_option(rs.option.holes_fill, 1) filtered_depth = spatial.process(frame) return filtered_depth
def post_process_depth_frame(depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0, spatial_smooth_alpha=0.5, spatial_smooth_delta=20, temporal_smooth_alpha=0.4, temporal_smooth_delta=20): """ Filter the depth frame acquired using the Intel RealSense device Parameters: ----------- depth_frame : rs.frame() The depth frame to be post-processed decimation_magnitude : double The magnitude of the decimation filter spatial_magnitude : double The magnitude of the spatial filter spatial_smooth_alpha : double The alpha value for spatial filter based smoothening spatial_smooth_delta : double The delta value for spatial filter based smoothening temporal_smooth_alpha : double The alpha value for temporal filter based smoothening temporal_smooth_delta : double The delta value for temporal filter based smoothening Return: ---------- filtered_frame : rs.frame() The post-processed depth frame """ # Post processing possible only on the depth_frame assert (depth_frame.is_depth_frame()) # Available filters and control options for the filters decimation_filter = rs.decimation_filter() spatial_filter = rs.spatial_filter() temporal_filter = rs.temporal_filter() filter_magnitude = rs.option.filter_magnitude filter_smooth_alpha = rs.option.filter_smooth_alpha filter_smooth_delta = rs.option.filter_smooth_delta # Apply the control parameters for the filter decimation_filter.set_option(filter_magnitude, decimation_magnitude) spatial_filter.set_option(filter_magnitude, spatial_magnitude) spatial_filter.set_option(filter_smooth_alpha, spatial_smooth_alpha) spatial_filter.set_option(filter_smooth_delta, spatial_smooth_delta) temporal_filter.set_option(filter_smooth_alpha, temporal_smooth_alpha) temporal_filter.set_option(filter_smooth_delta, temporal_smooth_delta) # Apply the filters filtered_frame = decimation_filter.process(depth_frame) filtered_frame = spatial_filter.process(filtered_frame) filtered_frame = temporal_filter.process(filtered_frame) return filtered_frame
def preprocess_depth_frame(self, depth_frame): decimation = rs.decimation_filter() depth_image = decimation.process(depth_frame) spatial = rs.spatial_filter() #filtered_depth = spatial.process(depth_frame) spatial.set_option(rs.option.filter_magnitude, 5) spatial.set_option(rs.option.filter_smooth_alpha, 1) spatial.set_option(rs.option.filter_smooth_delta, 50) spatial.set_option(rs.option.holes_fill, 3) depth_frame = spatial.process(depth_frame) return depth_frame
def GetStandardDeviationsFromBag(bag_file_path, frame_index_difference = 10, do_analysis_every_n_frames = 1, bag_timeout_ms = 500, filter=False): try: pipeline = rs.pipeline() config = rs.config() rs.config.enable_device_from_file(config, bag_file_path, repeat_playback=False) profile = pipeline.start(config).get_device().as_playback().set_real_time(False) depth_frames_deque = deque() SDs = [] FNs = [] all_frame_numbers = [] frames_since_last_analysis = 0 if filter: spatial = rs.spatial_filter() decimation = rs.decimation_filter() hole_filling = rs.hole_filling_filter() hole_filling.set_option(rs.option.holes_fill, 2) while True: frames = pipeline.wait_for_frames(timeout_ms=bag_timeout_ms) fn = frames.frame_number all_frame_numbers += [fn] frames_since_last_analysis += 1 cur_depth_frame = frames.get_depth_frame() if filter: cur_depth_frame = decimation.process(cur_depth_frame) cur_depth_frame = spatial.process(cur_depth_frame) cur_depth_frame = hole_filling.process(cur_depth_frame) depth_frames_deque.append(cur_depth_frame) if len(depth_frames_deque) > frame_index_difference: cur_depth_image = np.asanyarray(cur_depth_frame.get_data()) past_depth_image = np.asanyarray(depth_frames_deque.popleft().get_data()) if frames_since_last_analysis >= do_analysis_every_n_frames: SDs += [calculateSD(cur_depth_image,past_depth_image)] FNs += [fn] frames_since_last_analysis = 0 except Exception as e: print(e) if "arrive" in str(e): pipeline.stop() del(pipeline) del(profile) return np.array(all_frame_numbers),np.array(FNs),np.array(SDs) else: raise(e) pass finally: return np.array(all_frame_numbers),np.array(FNs),np.array(SDs)
def start_pipe(self, align=True, usb3=True): if not self.pipelineStarted: if align: print('Etablissement de la connection caméra') # Create a config and configure the pipeline to stream # different resolutions of color and depth streams self.pipeline = rs.pipeline() # Create a config and configure the pipeline to stream # different resolutions of color and depth streams config = rs.config() config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) # Start streaming self.profile = self.pipeline.start(config) align_to = rs.stream.color self.align = rs.align(align_to) time.sleep(1) # self.pipeline = rs.pipeline() # config = rs.config() # # if usb3: # config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30) # config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30) # # else: # self.profile = config.resolve(self.pipeline) # does not start streaming # # self.profile = self.pipeline.start(config) # self.pipelineStarted = True # # Align the two streams # align_to = rs.stream.color # self.align = rs.align(align_to) self.pipelineStarted = True # Get depth scale depth_sensor = self.profile.get_device().first_depth_sensor() self.depth_scale = depth_sensor.get_depth_scale() # Création des filtres self.hole_filling = rs.hole_filling_filter() self.temporal_filter = rs.temporal_filter() self.spatial_filter = rs.spatial_filter() self.depth_to_disparity = rs.disparity_transform(False) # Get Intrinsic parameters self.get_intrinsic() print('Caméra Ouverte')
def init_depth_cam(self): self.pipeline_profile = self.pipeline.start(self.config) self.depth_sensor = self.pipeline_profile.get_device().first_depth_sensor() self.depth_sensor.set_option(rs.option.emitter_enabled, 1) self.depth_sensor.set_option(rs.option.laser_power, 250) self.depth_sensor.set_option(rs.option.depth_units, 0.0001) #changed 0.0001 self.temp_filter=rs.temporal_filter() self.temp_filter.set_option(rs.option.filter_smooth_alpha,0.8) self.temp_filter.set_option(rs.option.filter_smooth_delta,10) self.temp_filter.set_option(rs.option.holes_fill,1.0) self.spatial_filter=rs.spatial_filter() self.spatial_filter.set_option(rs.option.holes_fill,3) device = self.pipeline_profile.get_device()
def start(self): self.pipeline = rs.pipeline() self.pipeline.start(self.config) # 画角調整に必要 self.align = rs.align(rs.stream.color) # 深度マップにSpatial Filterをかける設定 self.spatial = rs.spatial_filter() self.spatial.set_option(rs.option.filter_magnitude, 5) self.spatial.set_option(rs.option.filter_smooth_alpha, 1) self.spatial.set_option(rs.option.filter_smooth_delta, 50) self.spatial.set_option(rs.option.holes_fill, 3)
def depth_filter(depth_frame): depth_to_disparity = rs.disparity_transform(True) disparity_to_depth = rs.disparity_transform(False) hole_filling = rs.hole_filling_filter(2) spatial = rs.spatial_filter() depth_frame = depth_to_disparity.process(depth_frame) depth_frame = spatial.process(depth_frame) # frame = temporal.process(frame) depth_frame = disparity_to_depth.process(depth_frame) depth_frame = hole_filling.process(depth_frame) return depth_frame
def run(self): self.pipeline, config = capture_data.init_capture_data() profile = self.pipeline.start(config) depth_sensor = profile.get_device().first_depth_sensor() depth_scale = depth_sensor.get_depth_scale() clipping_distance_in_meters = 1 clipping_distance = clipping_distance_in_meters / depth_scale align_to = rs.stream.color align = rs.align(align_to) while True: frames = self.pipeline.wait_for_frames() aligned_frames = align.process(frames) depth_frame = aligned_frames.get_depth_frame() color_frame = aligned_frames.get_color_frame() spatial = rs.spatial_filter() spatial.set_option(rs.option.holes_fill, 3) depth_frame = spatial.process(depth_frame) Pixel_Coord, segmented_cloud = capture_data.get_object_points( color_frame, depth_frame) depth_image = np.asanyarray(depth_frame.get_data()) color_image = np.asanyarray(color_frame.get_data()) color_image_copy = copy.deepcopy(color_image) if len(Pixel_Coord) > 0: color_image_copy, object_mask, bbox_coordinates = capture_data.get_mask( Pixel_Coord, color_image_copy) else: object_mask = np.zeros((480, 640, 3), np.uint8) bbox_coordinates = [0, 0, 640, 480] if not depth_frame or not color_frame: continue # for i in Pixel_Coord: # cv2.circle(color_image_copy, (int(i[0]), int(i[1])), 2, (0, 255, 0), -1) depth_colormap = cv2.applyColorMap( cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET) images = np.hstack((color_image_copy, object_mask)) images_raw = np.hstack((color_image, object_mask)) full_data = [ images_raw, bbox_coordinates, segmented_cloud, depth_frame ] full_data = np.array(full_data) rgbImage = cv2.cvtColor(images, cv2.COLOR_BGR2RGB) h, w, ch = rgbImage.shape bytesPerLine = ch * w convertToQtFormat = QtGui.QImage(rgbImage.data, w, h, bytesPerLine, QtGui.QImage.Format_RGB888) p = convertToQtFormat.scaled(640, 240, Qt.KeepAspectRatio) self.changePixmap.emit(p) self.imagesPixmap.emit(full_data)
def getDepthFrame(self): # frames = self.pipeline.wait_for_frames() if (len(self.depthFramesBuffer) > 0): print(',') depthFrame = self.depthFramesBuffer.__getitem__(0) spatial = rs.spatial_filter() spatial.set_option(rs.option.filter_magnitude, 2) spatial.set_option(rs.option.filter_smooth_alpha, 1) spatial.set_option(rs.option.filter_smooth_delta, 50) spatial.set_option(rs.option.holes_fill, 2) # hole_filling = rs.hole_filling_filter() # hole_filling.set_option(rs.option.filter_magnitude, 2) thresh = rs.threshold_filter(0.1, 2) # # depthImage = cv2.bilateralFilter(depthImage,9,75,75) depthFrame = thresh.process(depthFrame) depthFrame = spatial.process(depthFrame) # depthFrame = hole_filling.process(depthFrame) depthImage = np.asanyarray(depthFrame.get_data()) # depthImage = depthImage * 255 # depthImage = cv2.medianBlur(depthImage,1) # depthImage = cv2.GaussianBlur(depthImage,(3,3),3) del self.depthFramesBuffer[0] else: frames = self.pipeline.wait_for_frames() self.colorFramesBuffer.append(frames.get_color_frame()) depthFrame = frames.get_depth_frame() depthframe = rs.hole_filling_filter.process(depthFrame) depthImage = np.asanyarray(depthFrame.get_data()) depthImage = cv2.GaussianBlur(depthImage, (11, 11), 5) depthImage = cv2.medianBlur(depthImage, 51) depthImage = cv2.bilateralFilter(depthImage, 9, 75, 75) print(len(self.depthFramesBuffer)) # del self.framesBuffer[-1] depthColorMap = cv2.applyColorMap( cv2.convertScaleAbs(depthImage, alpha=0.003), cv2.COLORMAP_JET) return depthColorMap
def __init__(self, debugFlag=False, debugPath=''): self.debugFlag = debugFlag # Decimation - reduces depth frame density self.decimateFilter = rs.decimation_filter() self.thresholdFilter = rs.threshold_filter(min_dist = 1.8, max_dist = 3) # Converts from depth representation to disparity representation and vice - versa in depth frames self.depth_to_disparity = rs.disparity_transform(True) # Spatial - edge-preserving spatial smoothing self.spatial_filter = rs.spatial_filter() # Temporal - reduces temporal noise self.temporalFilter = rs.temporal_filter() self.disparity_to_depth = rs.disparity_transform(False)
def filtering(self): depth_to_disparity = rs.disparity_transform(True) disparity_to_depth = rs.disparity_transform(False) spatial = rs.spatial_filter() temporal = rs.temporal_filter() hole_filling = rs.hole_filling_filter() for frame in self.depth_frams: frame = depth_to_disparity.process(frame) frame = spatial.process(frame) frame = temporal.process(frame) frame = disparity_to_depth.process(frame) frame = hole_filling.process(frame) self.aligned_depth_frame = frame.get_data() self.colorized_depth = np.asanyarray( self.colorizer.colorize(frame).get_data())
def start(self): # Start streaming self.pipeline = rs.pipeline() self.pipeline.start(self.config) self.align = rs.align(rs.stream.color) # Spatial Filterをかける self.spatial = rs.spatial_filter() self.spatial.set_option(rs.option.filter_magnitude, 5) self.spatial.set_option(rs.option.filter_smooth_alpha, 1) self.spatial.set_option(rs.option.filter_smooth_delta, 50) self.spatial.set_option(rs.option.holes_fill, 3) # self.hole = rs.hole_filling_filter() print('pipline start')
def post_processing(frame, enable_spatial=True, enable_temporal=True, enable_hole=True, spatial_params=[(rs.option.filter_magnitude, 5), (rs.option.filter_smooth_alpha, 1), (rs.option.filter_smooth_delta, 50), (rs.option.holes_fill, 3)], temporal_params=[], hole_params=[]): """Filters to cleanup depth maps. """ # Filters and settings depth_to_disparity = rs.disparity_transform(True) disparity_to_depth = rs.disparity_transform(False) # Depth to disparity before spatial and temporal filters frame = depth_to_disparity.process(frame) # Spatial filter if enable_spatial: # Settings spatial = rs.spatial_filter() for spatial_param in spatial_params: spatial.set_option(spatial_param[0], spatial_param[1]) # Apply on frame frame = spatial.process(frame) # Temporal filter if enable_temporal: temporal = rs.temporal_filter() for temporal_param in temporal_params: temporal.set_option(temporal_param[0], temporal_param[1]) frame = temporal.process(frame) # Back to depth frame = disparity_to_depth.process(frame) # Hole filling if enable_hole: hole_filling = rs.hole_filling_filter() for hole_param in hole_params: hole_filling.set_option(hole_param[0], hole_param[1]) frame = hole_filling.process(frame) return frame
profile = pipeline.get_active_profile() depth_sensor = profile.get_device().first_depth_sensor() depth_scale = depth_sensor.get_depth_scale() depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth)) depth_intrinsics = depth_profile.get_intrinsics() w, h = depth_intrinsics.width, depth_intrinsics.height # Processing blocks pc = rs.pointcloud() decimate = rs.decimation_filter() decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate) colorizer = rs.colorizer() filters = [rs.disparity_transform(), rs.spatial_filter(), rs.temporal_filter(), rs.disparity_transform(False)] # pyglet window = pyglet.window.Window( config=gl.Config( double_buffer=True, samples=8 # MSAA ), resizable=True, vsync=True) keys = pyglet.window.key.KeyStateHandler() window.push_handlers(keys)