Exemplo n.º 1
0
    def __init__(self, filters=[]):
        """
        Connect to RealSense and initialize filters
        :param filters: [String, ...], default=[]: '' TODO list filters
        """
        self.pipe = rs.pipeline()
        cfg = rs.config()
        profile = self.pipe.start(cfg)
        # camera parameters
        self.depth_scale = profile.get_device().first_depth_sensor(
        ).get_depth_scale()

        # filters to apply to depth images
        self.filters = filters
        if 'align' in self.filters:
            self.align = rs.align(rs.stream.color)
        if 'decimation' in self.filters:
            self.decimation = rs.decimation_filter()
            self.decimation.set_option(rs.option.filter_magnitude, 4)
        if 'spatial' in self.filters:
            self.spatial = rs.spatial_filter()
            # self.spatial.set_option(rs.option.holes_fill, 3)
            self.spatial.set_option(rs.option.filter_magnitude, 5)
            self.spatial.set_option(rs.option.filter_smooth_alpha, 1)
            self.spatial.set_option(rs.option.filter_smooth_delta, 50)
        if 'temporal' in self.filters:
            # TODO
            self.temporal = rs.temporal_filter()
            self.temporal_iters = 3
        if 'hole_filling' in self.filters:
            self.hole_filling = rs.hole_filling_filter()
        if 'colorize' in self.filters:
            self.colorizer = rs.colorizer()
Exemplo n.º 2
0
    def __init__(self, parameters, neural):
        self.parameters = parameters
        self.neural = neural

        self.stream_frame = None
        config = rs.config()
        self.pipeline = rs.pipeline()
        config.enable_stream(rs.stream.depth, self.parameters.depth_width,
                             self.parameters.depth_height, rs.format.z16,
                             self.parameters.depth_fps)
        config.enable_stream(rs.stream.color, self.parameters.color_width,
                             self.parameters.color_height, rs.format.rgb8,
                             self.parameters.color_fps)

        profile = self.pipeline.start(config)
        self.depth_sensor = profile.get_device().first_depth_sensor(
        ).get_depth_scale()
        self.align_stream = rs.align(rs.stream.color)
        self.decimation = rs.decimation_filter()
        self.hole_filling = rs.hole_filling_filter()
        self.spatial = rs.spatial_filter()

        self.neural = neural
        print("camera loop has stared")

        threading.Thread.__init__(self)
Exemplo n.º 3
0
    def get_frame_stream(self):
        # Wait for a coherent pair of frames: depth and color
        frames = self.pipeline.wait_for_frames()
        aligned_frames = self.align.process(frames)
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        if not depth_frame or not color_frame:
            # If there is no frame, probably camera not connected, return False
            print(
                "Error, impossible to get the frame, make sure that the Intel Realsense camera is correctly connected"
            )
            print("Depth frame: " + str(bool(depth_frame)) + " Color: " +
                  str(bool(color_frame)))
            return False, None, None

        # Apply filter to fill the Holes in the depth image
        spatial = rs.spatial_filter()
        spatial.set_option(rs.option.holes_fill, 3)
        filtered_depth = spatial.process(depth_frame)

        hole_filling = rs.hole_filling_filter()
        filled_depth = hole_filling.process(filtered_depth)

        # Create colormap to show the depth of the Objects
        colorizer = rs.colorizer()
        depth_colormap = np.asanyarray(
            colorizer.colorize(filled_depth).get_data())

        # Convert images to numpy arrays
        depth_image = np.asanyarray(filled_depth.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        return True, color_image, depth_frame
Exemplo n.º 4
0
 def filtering(self, frame):
     '''Filter setting'''
     # Decimation #
     decimation = rs.decimation_filter()
     decimation.set_option(rs.option.filter_magnitude, 1)
     # Spatial #
     spatial = rs.spatial_filter()
     # spatial.set_option(rs.option.filter_magnitude, 5)
     spatial.set_option(rs.option.filter_smooth_alpha, 0.6)
     spatial.set_option(rs.option.filter_smooth_delta, 8)
     # spatial.set_option(rs.option.holes_fill, 3)
     # Temporal #
     temporal = rs.temporal_filter()
     temporal.set_option(rs.option.filter_smooth_alpha, 0.5)
     temporal.set_option(rs.option.filter_smooth_delta, 20)
     # Hole #
     hole_filling = rs.hole_filling_filter()
     ##
     depth_to_disparity = rs.disparity_transform(True)
     disparity_to_depth = rs.disparity_transform(False)
     '''Appling filter'''
     frame = decimation.process(frame)
     frame = depth_to_disparity.process(frame)
     frame = spatial.process(frame)
     frame = temporal.process(frame)
     frame = disparity_to_depth.process(frame)
     frame = hole_filling.process(frame)
     return frame
    def __init__(self):
        # Initialize RealSense intrinsics
        self.diagonal = np.linalg.norm((self.W, self.H))
        self.pipeline = rs.pipeline()
        self.aligner = rs.align(rs.stream.color)
        self.config = rs.config()
        self.config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
        self.config.enable_stream(rs.stream.color, self.W, self.H,
                                  rs.format.bgr8, 30)
        self.temporal_filter = rs.temporal_filter()
        self.hole_filling_filter = rs.hole_filling_filter()

        # Start up camera
        profile = self.pipeline.start(self.config)

        # Set camera options
        sensor = profile.get_device().first_depth_sensor()
        sensor.set_option(rs.option.enable_auto_exposure, 1)
        # sensor.set_option(rs.option.exposure, 5000)

        # Acquire an initial set of frames used for calibration
        # Flush 10 frames to get the Intel temporal filter warmed up
        for i in range(30):
            self.__acquire_raw_aligned()

        # Save a snapshot of the background for later subtraction, blur it for denoising purposes
        self.__depth_background = ndimage.gaussian_filter(
            self.__depth_raw_aligned, 20)
Exemplo n.º 6
0
    def __init__(self, w=640, h=480, clipping_dist_meters=1):
        self.w, self.h = w, h
        # Create a pipeline
        self.pipeline = rs.pipeline()

        # Create a config and configure the pipeline to stream
        config = rs.config()
        config.enable_stream(rs.stream.depth, w, h, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, 30)
        # Start streaming
        profile = self.pipeline.start(config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        depth_sensor = profile.get_device().first_depth_sensor()
        self.depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: ", self.depth_scale)

        # We will be removing the background of objects more than
        #  clipping_distance_in_meters meters away
        self.clipping_distance = clipping_dist_meters / self.depth_scale

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        self.align = rs.align(align_to)

        #set up depth post-processing filters
        self.decimation_filter = rs.decimation_filter()  #default is 2
        self.spatial_filter = rs.spatial_filter(smooth_alpha=.6,
                                                smooth_delta=20,
                                                magnitude=2,
                                                hole_fill=0)
        self.hole_fill_filter = rs.hole_filling_filter(
        )  #default is fill according to neighboring pixel farthest from sensor
Exemplo n.º 7
0
    def __init__(self,
                 cam_id,
                 filter_depth=True,
                 frame=None,
                 registration_mode=RealSenseRegistrationMode.DEPTH_TO_COLOR):
        self._running = None

        self.id = cam_id
        self._registration_mode = registration_mode
        self._filter_depth = filter_depth

        self._frame = frame

        if self._frame is None:
            self._frame = 'realsense'
        self._color_frame = '%s_color' % (self._frame)

        # realsense objects
        self._pipe = rs.pipeline()
        self._cfg = rs.config()
        self._align = rs.align(rs.stream.color)

        # camera parameters
        self._depth_scale = None
        self._intrinsics = np.eye(3)

        # post-processing filters
        self._colorizer = rs.colorizer()
        self._spatial_filter = rs.spatial_filter()
        self._hole_filling = rs.hole_filling_filter()
Exemplo n.º 8
0
    def __init__(self):

        ctx = rs.context()
        self.devices = ctx.query_devices()
        self.configs = list()
        self.filters = list()
        for device in self.devices:
            config = rs.config()
            config.enable_device(device.get_info(rs.camera_info.serial_number))
            config.enable_stream(rs.stream.depth, IMG_WIDTH, IMG_HEIGHT,
                                 rs.format.z16, 30)
            config.enable_stream(rs.stream.color, IMG_WIDTH, IMG_HEIGHT,
                                 rs.format.bgr8, 30)
            self.configs.append(config)
            align = rs.align(rs.stream.color)
            spatial = rs.spatial_filter()
            spatial.set_option(rs.option.filter_magnitude, 5)
            spatial.set_option(rs.option.filter_smooth_alpha, 1)
            spatial.set_option(rs.option.filter_smooth_delta, 50)
            spatial.set_option(ts.option.holes_fill, 3)
            temporal = rs.temporal_filter()
            hole_filling = rs.hole_filling_filter()
            depth_to_disparity = rs.disparity_transform(True)
            disparity_to_depth = rs.disparity_transform(False)
            decimate = rs.decimation_filter()
            self.filters.append({
                'align': align,
                'spatial': spatial,
                'temporal': temporal,
                'hole': hole_filling,
                'disparity': depth_to_disparity,
                'depth': disparity_to_depth,
                'decimate': decimate
            })
Exemplo n.º 9
0
 def __init__(self):
     self.filters = [
         rs.decimation_filter(RESCALE),
         rs.disparity_transform(True),
         rs.hole_filling_filter(1),
         rs.spatial_filter(0.5, 8, 2, 2),
         rs.temporal_filter(0.5, 20, 1),
         rs.disparity_transform(False)
     ]
Exemplo n.º 10
0
    def __init__(self):
        self.pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.depth, REALSENSE_WIDTH, REALSENSE_HEIGHT, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, REALSENSE_WIDTH, REALSENSE_HEIGHT, rs.format.bgr8, 30)
        self.colorizer = rs.colorizer()
        self.hole_filler = rs.hole_filling_filter()
        self.profile = self.pipeline.start(config)

        # Skip 5 first frames to give the Auto-Exposure time to adjust
        for x in range(5):
            self.pipeline.wait_for_frames()
 def refresh_mat(self):
     self.frames = pipeline.wait_for_frames()
     self.depth = self.frames.get_depth_frame()
     self.color = self.frames.get_color_frame()
     hole_filling = rs.hole_filling_filter()
     self.depth = hole_filling.process(self.depth)
     # depth_profile = depth.get_profile()
     # color_profile = color.get_profile()
     # print(depth_profile)
     # print(color_profile)
     self.depthmat = np.asanyarray(self.depth.get_data())
     self.colormat = np.asanyarray(self.color.get_data())
Exemplo n.º 12
0
def GetStandardDeviationsFromBag(bag_file_path, frame_index_difference = 10, do_analysis_every_n_frames = 1, bag_timeout_ms = 500, filter=False):

    try:
        pipeline = rs.pipeline()
        config = rs.config()
        rs.config.enable_device_from_file(config, bag_file_path, repeat_playback=False)
        profile = pipeline.start(config).get_device().as_playback().set_real_time(False)
        
        depth_frames_deque = deque()
        SDs = []
        FNs = []
        all_frame_numbers = []
        frames_since_last_analysis = 0

        if filter:
            spatial = rs.spatial_filter()
            decimation = rs.decimation_filter()
            hole_filling = rs.hole_filling_filter()
            hole_filling.set_option(rs.option.holes_fill, 2)

        while True: 
            frames = pipeline.wait_for_frames(timeout_ms=bag_timeout_ms)
            fn = frames.frame_number
            all_frame_numbers += [fn]
            frames_since_last_analysis += 1
            cur_depth_frame = frames.get_depth_frame()

            if filter:
                cur_depth_frame = decimation.process(cur_depth_frame)
                cur_depth_frame = spatial.process(cur_depth_frame)
                cur_depth_frame = hole_filling.process(cur_depth_frame)

            depth_frames_deque.append(cur_depth_frame)

            if len(depth_frames_deque) > frame_index_difference:
                cur_depth_image = np.asanyarray(cur_depth_frame.get_data())
                past_depth_image = np.asanyarray(depth_frames_deque.popleft().get_data())
                if frames_since_last_analysis >= do_analysis_every_n_frames:
                    SDs += [calculateSD(cur_depth_image,past_depth_image)]
                    FNs += [fn]
                    frames_since_last_analysis = 0
    except Exception as e:
        print(e)
        if "arrive" in str(e):
            pipeline.stop()
            del(pipeline)
            del(profile)
            return np.array(all_frame_numbers),np.array(FNs),np.array(SDs)
        else:
            raise(e)
        pass
    finally:
        return np.array(all_frame_numbers),np.array(FNs),np.array(SDs)
Exemplo n.º 13
0
    def start_pipe(self, align=True, usb3=True):
        if not self.pipelineStarted:
            if align:
                print('Etablissement de la connection caméra')
                # Create a config and configure the pipeline to stream
                #  different resolutions of color and depth streams
                self.pipeline = rs.pipeline()

                # Create a config and configure the pipeline to stream
                #  different resolutions of color and depth streams
                config = rs.config()
                config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16,
                                     30)
                config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8,
                                     30)

                # Start streaming
                self.profile = self.pipeline.start(config)

                align_to = rs.stream.color
                self.align = rs.align(align_to)

                time.sleep(1)

                # self.pipeline = rs.pipeline()
                # config = rs.config()
                #
                # if usb3:
                #     config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
                #     config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
                #
                # else:
                #     self.profile = config.resolve(self.pipeline)  # does not start streaming
                #
                # self.profile = self.pipeline.start(config)
                # self.pipelineStarted = True
                # # Align the two streams
                # align_to = rs.stream.color
                # self.align = rs.align(align_to)
                self.pipelineStarted = True
                # Get depth scale
                depth_sensor = self.profile.get_device().first_depth_sensor()
                self.depth_scale = depth_sensor.get_depth_scale()

                # Création des filtres
                self.hole_filling = rs.hole_filling_filter()
                self.temporal_filter = rs.temporal_filter()
                self.spatial_filter = rs.spatial_filter()
                self.depth_to_disparity = rs.disparity_transform(False)
                # Get Intrinsic parameters
                self.get_intrinsic()
                print('Caméra Ouverte')
Exemplo n.º 14
0
def depth_filter(depth_frame):
    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)
    hole_filling = rs.hole_filling_filter(2)
    spatial = rs.spatial_filter()

    depth_frame = depth_to_disparity.process(depth_frame)
    depth_frame = spatial.process(depth_frame)
    # frame = temporal.process(frame)
    depth_frame = disparity_to_depth.process(depth_frame)
    depth_frame = hole_filling.process(depth_frame)

    return depth_frame
Exemplo n.º 15
0
    def init_video_capture(self):
        try:
            # Create a pipeline
            self.pipeline = rs.pipeline()

            # Create a config and configure the pipeline to stream
            #  different resolutions of color and depth streams
            config = rs.config()
            config.enable_stream(rs.stream.depth, DEPTH_RES_X, DEPTH_RES_Y,
                                 rs.format.z16, DEPTH_FPS)
            config.enable_stream(rs.stream.color, RGB_RES_X, RGB_RES_Y,
                                 rs.format.bgr8, RGB_FPS)
            # config.enable_stream(rs.stream.infrared, 1, DEPTH_RES_X, DEPTH_RES_Y, rs.format.y8, DEPTH_FPS)
            # config.enable_stream(rs.stream.infrared, 2, DEPTH_RES_X, DEPTH_RES_Y, rs.format.y8, DEPTH_FPS)

            # Start streaming
            profile = self.pipeline.start(config)

            # Getting the depth sensor's depth scale (see rs-align example for explanation)
            depth_sensor = profile.get_device().first_depth_sensor()
            self.depth_scale = depth_sensor.get_depth_scale()
            # print("[RealSense D435]: Depth scale", self.depth_scale)

            # TODO: Allow settings to be changed on initializing the function
            depth_sensor.set_option(rs.option.laser_power, 360)  # 0 - 360
            depth_sensor.set_option(
                rs.option.depth_units,
                0.001)  # Number of meters represented by a single depth unit

            # Create an align object
            # rs.align allows us to perform alignment of depth frames to others frames
            # The "align_to" is the stream type to which we plan to align depth frames.
            align_to = rs.stream.color
            self.align = rs.align(align_to)

            self.hole_filling_filter = rs.hole_filling_filter()
            self.decimation_filter = rs.decimation_filter()
            self.temporal_filter = rs.temporal_filter()
        except Exception as e:
            print('[RealsenseD435Camera]: ERROR:', e, file=sys.stderr)
            print(
                '[RealsenseD435Camera]: Could not initialize camera. If the resource is busy, check if any other script '
                'is currently accessing the camera. If this is not the case, replug the camera and try again.',
                file=sys.stderr)
            sys.exit(0)

        self.init_colorizer()

        self.started = False
        self.read_lock = threading.Lock()
Exemplo n.º 16
0
 def filtering(self):
     depth_to_disparity = rs.disparity_transform(True)
     disparity_to_depth = rs.disparity_transform(False)
     spatial = rs.spatial_filter()
     temporal = rs.temporal_filter()
     hole_filling = rs.hole_filling_filter()
     for frame in self.depth_frams:
         frame = depth_to_disparity.process(frame)
         frame = spatial.process(frame)
         frame = temporal.process(frame)
         frame = disparity_to_depth.process(frame)
         frame = hole_filling.process(frame)
     self.aligned_depth_frame = frame.get_data()
     self.colorized_depth = np.asanyarray(
         self.colorizer.colorize(frame).get_data())
Exemplo n.º 17
0
def post_processing(frame,
                    enable_spatial=True,
                    enable_temporal=True,
                    enable_hole=True,
                    spatial_params=[(rs.option.filter_magnitude, 5),
                                    (rs.option.filter_smooth_alpha, 1),
                                    (rs.option.filter_smooth_delta, 50),
                                    (rs.option.holes_fill, 3)],
                    temporal_params=[],
                    hole_params=[]):
    """Filters to cleanup depth maps.
    """
    # Filters and settings
    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)

    # Depth to disparity before spatial and temporal filters
    frame = depth_to_disparity.process(frame)

    # Spatial filter
    if enable_spatial:
        # Settings
        spatial = rs.spatial_filter()
        for spatial_param in spatial_params:
            spatial.set_option(spatial_param[0], spatial_param[1])

        # Apply on frame
        frame = spatial.process(frame)

    # Temporal filter
    if enable_temporal:
        temporal = rs.temporal_filter()
        for temporal_param in temporal_params:
            temporal.set_option(temporal_param[0], temporal_param[1])
        frame = temporal.process(frame)

    # Back to depth
    frame = disparity_to_depth.process(frame)

    # Hole filling
    if enable_hole:
        hole_filling = rs.hole_filling_filter()
        for hole_param in hole_params:
            hole_filling.set_option(hole_param[0], hole_param[1])
        frame = hole_filling.process(frame)

    return frame
Exemplo n.º 18
0
 def __init__(self, configFile):
     self.config = json.load(open(configFile))
     self.advanced_cfg = json.load(open(self.config["advanced"]))
     self.advanced_cfg = str(self.advanced_cfg).replace("'", '\"')
     self._running = None,
     self.id = self.config["id"]
     self._pipe = rs.pipeline()
     self._rs_cfg = rs.config()
     self._align = rs.align(rs.stream.color)
     self._intrinsics = {}
     ctx = rs.context()
     self._spatial_filter = rs.spatial_filter()
     self._hole_filter = rs.hole_filling_filter()
     self.depth_cam = True
     # prints out cams and S/N, which should be defined as id in config
     for d in ctx.devices:
         logger.info("Cam {d} connected.", d=d)
Exemplo n.º 19
0
    def __init__(self):
        # ---------------------------------------------------------
        # real sense
        # ---------------------------------------------------------
        # # ストリーム(Depth/Color)の設定
        print("start Depth camera")
        width: int = 640
        height: int = 480
        config = rs.config()
        config.enable_stream(rs.stream.color, width, height, rs.format.bgr8,
                             30)
        config.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30)

        #
        self.colorizer = rs.colorizer()

        #temporal filter
        self.temporal = rs.temporal_filter()

        #hole filling filter
        self.hole_filling = rs.hole_filling_filter()

        # ストリーミング開始
        self.pipeline = rs.pipeline()
        profile = self.pipeline.start(config)

        # Skip 5 first frames to give the Auto-Exposure time to adjust
        for x in range(5):
            self.pipeline.wait_for_frames()

        #get device information
        depth_sensor = profile.get_device().first_depth_sensor()
        #print("depth sensor:",depth_sensor)
        self.depth_scale = depth_sensor.get_depth_scale()
        #print("depth scale:",depth_scale)
        clipping_distance_in_meters = 1.0  # meter
        clipping_distance = clipping_distance_in_meters / self.depth_scale
        print("clipping_distance:", clipping_distance)

        # Alignオブジェクト生成
        align_to = rs.stream.color
        self.align = rs.align(align_to)

        self.frameNo = 0
        self.timenow = ""
Exemplo n.º 20
0
 def create_filters(self):
     spatial = rs.spatial_filter()
     spatial.set_option(rs.option.filter_magnitude, 5)
     spatial.set_option(rs.option.filter_smooth_alpha, 1)
     spatial.set_option(rs.option.filter_smooth_delta, 50)
     spatial.set_option(rs.option.holes_fill, 3)
     temporal = rs.temporal_filter()
     hole_filling = rs.hole_filling_filter()
     disparity_to_depth = rs.disparity_transform(False)
     depth_to_disparity = rs.disparity_transform(True)
     filters = {
         "S": spatial,
         "T": temporal,
         "H": hole_filling,
         "DZ": disparity_to_depth,
         "ZD": depth_to_disparity
     }
     return filters
Exemplo n.º 21
0
    def getFrames(self):
        # フレーム待ち(Color & Depth)
        frames = self.pipeline.wait_for_frames()
        #frameNo = frames.get_frame_number()
        aligned_frames = self.align.process(frames)
        color_frame = aligned_frames.get_color_frame()
        depth_frame = aligned_frames.get_depth_frame()

        self.color_image = np.asanyarray(color_frame.get_data())
        self.depth_image = np.asanyarray(
            self.colorizer.colorize(depth_frame).get_data())
        #hole filter
        hole_filling = rs.hole_filling_filter()
        filled_depth_frame = hole_filling.process(depth_frame)
        self.filled_depth_values = np.asanyarray(
            filled_depth_frame.get_data())  #get values [cm]
        # print(filled_depth_values[100,100])
        self.filled_depth_image = np.asanyarray(
            self.colorizer.colorize(filled_depth_frame).get_data())
Exemplo n.º 22
0
    def __init__(self, serial=None, resolution="high", calib_file=None):
        self._is_start = False
        self._frame_id = 0
        self._auto_exp_skip = 30
        self.calib_file = calib_file

        msg = "`resolution` can only be [`low` or `high`]"
        assert resolution in ['low', 'high'], msg

        # query any connected realsense
        if serial is None:
            serials = utils.rs_discover_cams()
            if serials:
                self._serial = serials[0]
            else:
                raise ValueError(
                    "Could not find connected camera. Ensure USB 3.0 is used.")
        else:
            self._serial = serial

        if resolution == "low":
            self._height = constants.D415.HEIGHT_L.value
            self._width = constants.D415.WIDTH_L.value
            self._fps = constants.D415.FPS_L.value
        else:
            self._height = constants.D415.HEIGHT_H.value
            self._width = constants.D415.WIDTH_H.value
            self._fps = constants.D415.FPS_H.value
        self._resolution = (self._width, self._height)

        # pipeline and config
        self._pipe = rs.pipeline()
        self._cfg = rs.config()

        # post-processing
        self._spatial_filter = rs.spatial_filter()
        self._hole_filling = rs.hole_filling_filter()
        self._temporal_filter = rs.temporal_filter()

        # misc
        self._colorizer = rs.colorizer()
        self._align = rs.align(rs.stream.color)
Exemplo n.º 23
0
def get_filter(filter_name: str, *args) -> rs2.filter_interface:
    """
    Basically a factory for filters (Maybe change to Dict 'cause OOP)
    :param filter_name: The filter name
    :param args: The arguments for the filter
    :return: A filter which corresponds to the filter name with it's arguments
    """
    if filter_name == 'decimation_filter':
        return rs2.decimation_filter(*args)
    elif filter_name == 'threshold_filter':
        return rs2.threshold_filter(*args)
    elif filter_name == 'disparity_transform':
        return rs2.disparity_transform(*args)
    elif filter_name == 'spatial_filter':
        return rs2.spatial_filter(*args)
    elif filter_name == 'temporal_filter':
        return rs2.temporal_filter(*args)
    elif filter_name == 'hole_filling_filter':
        return rs2.hole_filling_filter(*args)
    else:
        raise Exception(f'The filter \'{filter_name}\' does not exist!')
Exemplo n.º 24
0
def post_process(image):
	spatial_filter = rs.spatial_filter()
	temporal_filter = rs.temporal_filter()
	hole_filling_filter = rs.hole_filling_filter()

	filter_magnitude = rs.option.filter_magnitude
	filter_smooth_alpha = rs.option.filter_smooth_alpha
	filter_smooth_delta = rs.option.filter_smooth_delta

	spatial_filter.set_option(filter_magnitude, 2)
	spatial_filter.set_option(filter_smooth_alpha, 0.5)
	spatial_filter.set_option(filter_smooth_delta, 20)
	temporal_filter.set_option(filter_smooth_alpha, 0.4)
	temporal_filter.set_option(filter_smooth_delta, 20)

	# Apply the filters
	filtered_frame = spatial_filter.process(image)
	filtered_frame = temporal_filter.process(filtered_frame)
	#filtered_frame = hole_filling_filter.process(filtered_frame)

	return filtered_frame
Exemplo n.º 25
0
def filter_depth(frame):
    """
    some filtering processes will greatly decrease FPS (e.g. spatial)
    decimation process will reduce image size exponentially

    reference: https://github.com/IntelRealSense/librealsense/blob/jupyter/notebooks/depth_filters.ipynb
    """
    # setup filter processes
    #decimation = rs.decimation_filter() # decimation will cut frame size (exponentially)
    #depth_to_disparity = rs.disparity_transform(True)
    #spatial = rs.spatial_filter() # spatial operation is quite slow
    #temporal = rs.temporal_filter()
    #disparity_to_depth = rs.disparity_transform(False)
    hole_filling = rs.hole_filling_filter()
    # apply the filters
    #frame = depth_to_disparity.process(frame)
    #frame = spatial.process(frame)
    #frame = temporal.process(frame)
    #frame = disparity_to_depth.process(frame)
    frame = hole_filling.process(frame)
    return frame
    def __init__(self,
                 image_w=640,
                 image_h=480,
                 image_d=3,
                 compress_depth=True):
        self.combined_array = None
        self.compress_depth = compress_depth
        self.running = True
        self.image_w = image_w
        self.image_h = image_h
        #self.depth, self.color = None, None

        #Set stream speed and resolution
        self.fps = 30
        #start post processing
        self.colorizer = colorizer()
        self.holefiller = hole_filling_filter(1)
        self.spatial = spatial_filter()
        self.temporal = temporal_filter()
        #start the Realsense pipeline
        self.pipe = pipeline()
        #load the configuration
        cfg = config()
        cfg.enable_stream(stream.color,
                          stream_index=-1,
                          width=848,
                          height=480,
                          framerate=30)
        cfg.enable_stream(stream.depth,
                          stream_index=-1,
                          width=848,
                          height=480,
                          framerate=30)
        #start the pipeline
        profile = self.pipe.start(cfg)
        #set units to 100um
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_sensor.set_option(option.depth_units, 0.0001)
Exemplo n.º 27
0
    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, DEPTH_CAMERA_RES[0],
                         DEPTH_CAMERA_RES[1], rs.format.z16, 30)
    config.enable_stream(rs.stream.color, COLOR_CAMERA_RES[0],
                         COLOR_CAMERA_RES[1], rs.format.bgr8, 30)

    # zero hole filling filter
    spatial = rs.spatial_filter()
    spatial.set_option(rs.option.filter_magnitude, 5)
    spatial.set_option(rs.option.filter_smooth_alpha, 1)
    spatial.set_option(rs.option.filter_smooth_delta, 50)
    spatial.set_option(rs.option.holes_fill, 3)
    hole_filling = rs.hole_filling_filter()

    # Align process for realsense frames
    align_to = rs.stream.color
    align = rs.align(align_to)

    # Start streaming for Realsense
    pipeline.start(config)

    # Start connecting pupil tracker
    context = zmq.Context()
    req = context.socket(zmq.REQ)  #open a req port to talk to pupil
    req.connect("tcp://%s:%s" % (addr, req_port))
    req.send(b'SUB_PORT')  # ask for the sub port
    sub_port = req.recv()
Exemplo n.º 28
0
depth_scale = depth_sensor.get_depth_scale()
align_to = rs.stream.color
align = rs.align(align_to)

# Setting up filters
dec_filter = rs.decimation_filter()
dec_filter.set_option(rs.option.filter_magnitude, 4)

spa_filter = rs.spatial_filter()
spa_filter.set_option(rs.option.filter_magnitude, 5)
spa_filter.set_option(rs.option.filter_smooth_alpha, 1)
spa_filter.set_option(rs.option.filter_smooth_delta, 50)
spa_filter.set_option(rs.option.holes_fill, 3)

tmp_filter = rs.temporal_filter()
hol_fillin = rs.hole_filling_filter()

dep_to_dis = rs.disparity_transform(True)
dis_to_dep = rs.disparity_transform(False)

# Setting up indices
d1, d2 = int(480 / 4), int(640 / 4)
idx, jdx = np.indices((d1, d2))
idx_back = np.clip(idx - 1, 0, idx.max()).flatten()
idx_front = np.clip(idx + 1, 0, idx.max()).flatten()
jdx_back = np.clip(jdx - 1, 0, jdx.max()).flatten()
jdx_front = np.clip(jdx + 1, 0, jdx.max()).flatten()
idx = idx.flatten()
jdx = jdx.flatten()

# rand_idx = np.random.choice(np.arange(idx.shape[0]), size=d1*d2, replace=False)
Exemplo n.º 29
0
def capture():
    framecap = 0
    framecap_count = 0
    start_capture = False

    #img = AsciiImage("lena.jpg", scalefactor=0.2)
    #print(img)
    #img.render("output.png", fontsize=8, bg_color=(20,20,20), fg_color=(255,255,255))

    # Create a pipeline
    pipeline = rs.pipeline()

    #Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 6)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 6)

    # Start streaming
    profile = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)

    # We will be removing the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 1.2  #1 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    # Streaming loop
    try:
        while True:
            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()
            # frames.get_depth_frame() is a 640x360 depth image

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame(
            )  # aligned_depth_frame is a 640x480 depth image
            color_frame = aligned_frames.get_color_frame()

            hole_filling = rs.hole_filling_filter()
            filled_depth = hole_filling.process(aligned_depth_frame)

            # Validate that both frames are valid
            if not filled_depth or not color_frame:
                continue

            depth_image = np.asanyarray(filled_depth.get_data())
            color_image = np.asanyarray(color_frame.get_data())

            # Remove background - Set pixels further than clipping_distance to grey
            grey_color = 255
            depth_image_3d = np.dstack(
                (depth_image, depth_image,
                 depth_image))  #depth image is 1 channel, color is 3 channels

            bg_removed = np.where(
                (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
                grey_color, color_image)
            #need to fix this
            bg_removed2 = np.where(
                (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
                0, color_image)
            # Render images for color
            depth_colormap = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_image, alpha=0.2), cv2.COLORMAP_HSV)

            # crop the image using array slices -- it's a NumPy array
            # after all!
            cropped = bg_removed[0:720, 280:1000]
            cropped2 = bg_removed2[0:720, 280:1000]

            ################################################################
            # open cv stuff
            ################################################################

            #color_image = np.asanyarray(color_frame.get_data())
            gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
            faces = faceCascade.detectMultiScale(gray,
                                                 scaleFactor=1.1,
                                                 minNeighbors=5,
                                                 minSize=(30, 30),
                                                 flags=cv2.CASCADE_SCALE_IMAGE)

            far = ""
            farColor = (0, 0, 0)

            ################################################################
            # open cv stuff
            ################################################################

            ##############################################################################
            # ASCII on term
            ##############################################################################
            img = AsciiImage(
                cropped2,
                scalefactor=0.19,
            )
            print(img)
            # Draw a rectangle around the faces
            for (x, y, w, h) in faces:
                dist = aligned_depth_frame.get_distance(x, y)
                if (dist < clipping_distance_in_meters):
                    #far = "range" #far
                    #farColor=(0,255,0)
                    start_capture = True

                #cv2.putText(color_image,far,(5,15),5, 1,farColor, lineType=cv2.LINE_AA)
                #cv2.rectangle(color_image, (x, y), (x+w, y+h),farColor, 2)

            ##############################################################################
            # saving no background images
            ##############################################################################
            if framecap < 7 and start_capture == True:
                framecap_count = framecap_count + 1
                if framecap_count > 2:
                    framecap_string = str(framecap)
                    cv2.imwrite("maked_dump/" + framecap_string + ".jpg",
                                cropped)
                    framecap = framecap + 1
                    framecap_count = 0
            ##############################################################################

            images = np.hstack((bg_removed, depth_colormap))

            # create windows
            #cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
            #cv2.imshow('Align Example', images)
            #cv2.imshow('RealSense', color_image)

            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if framecap == 7 or key & 0xFF == ord('q') or key == 27:
                #converter()
                cv2.destroyAllWindows()
                break
    finally:
        pipeline.stop()
Exemplo n.º 30
0
def main():

    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)

    # Start streaming
    #queue =rs.frame_queue(50)
    cfg = pipeline.start(config)  #, queue)
    align = rs.align(rs.stream.color)
    profile = cfg.get_stream(rs.stream.depth)
    vis = o3d.visualization.Visualizer()
    vis.create_window('PCD', width=1280, height=720)
    pointcloud = o3d.geometry.PointCloud()
    geom_added = False

    intrinsics = profile.as_video_stream_profile().get_intrinsics()
    print(intrinsics)

    depth_sensor = cfg.get_device().first_depth_sensor()

    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)

    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 3  # 1 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # filters
    # decimation filter
    decimation = rs.decimation_filter()
    decimation.set_option(rs.option.filter_magnitude, 4)

    # spatial filter
    spatial = rs.spatial_filter()
    spatial.set_option(rs.option.filter_magnitude, 5)
    spatial.set_option(rs.option.filter_smooth_alpha, 1)
    spatial.set_option(rs.option.filter_smooth_delta, 50)

    temporal = rs.temporal_filter()

    hole_filling = rs.hole_filling_filter()

    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)
    while True:
        dt0 = datetime.now()
        frames = pipeline.wait_for_frames()
        #frames = queue.wait_for_frame().as_frameset()
        frames = align.process(frames)
        profile = frames.get_profile()

        depth_frame = frames.get_depth_frame()
        # depth_frame = decimation.process(depth_frame)
        depth_frame = depth_to_disparity.process(depth_frame)
        depth_frame = spatial.process(depth_frame)
        depth_frame = temporal.process(depth_frame)
        depth_frame = disparity_to_depth.process(depth_frame)
        #depth_frame = hole_filling.process(depth_frame)

        # We will be removing the background of objects more than

        color_frame = frames.get_color_frame()
        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        img_depth = o3d.geometry.Image(depth_image)
        img_color = o3d.geometry.Image(color_image)
        rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
            img_color,
            img_depth,
            depth_trunc=clipping_distance_in_meters,
            convert_rgb_to_intensity=False)

        pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
            intrinsics.width, intrinsics.height, intrinsics.fx, intrinsics.fy,
            intrinsics.ppx, intrinsics.ppy)
        pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
            rgbd, pinhole_camera_intrinsic)
        pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
                       [0, 0, 0, 1]])
        pointcloud.points = pcd.points
        pointcloud.colors = pcd.colors

        #clusteringDepthImage(pointcloud)

        if geom_added == False:
            vis.add_geometry(pointcloud)
            geom_added = True

        vis.update_geometry(pointcloud)
        vis.poll_events()
        vis.update_renderer()

        # cv2.imshow('bgr', color_image)
        # key = cv2.waitKey(1)
        # if key == ord('q'):
        #    break

        process_time = datetime.now() - dt0
        print("FPS: " + str(1 / process_time.total_seconds()))

    pipeline.stop()
    cv2.destroyAllWindows()
    vis.destroy_window()
    del vis