예제 #1
0
파일: pyrs.py 프로젝트: yiskw713/pyrs
    def __init__(self, w=640, h=480, depth=True, frame_rate=30):
        '''
        Initializing the Python RealSense Control Flow:
        w: Int (default = 640, can also be 1280) 
        h: Int (default = 480, can also be 720)
        depth: Bool (default = True)
        frame_rate: Int (default = 30)

        RGB and Depth formats are: bgr8, z16

        Note: In this class, variables should not be directly changed.
        '''
        self.width = w
        self.height = h
        self.depth_on = depth
        self._pipeline = rs.pipeline()
        self._config = rs.config()
        self._config.enable_stream(rs.stream.color, w, h, rs.format.bgr8,
                                   frame_rate)
        self._intrinsic = None

        if depth:
            self.align = rs.align(rs.stream.color)
            self._preset = 0
            # Presets (for D415):
            # 0: Custom
            # 1: Default
            # 2: Hand
            # 3: High Accuracy
            # 4: High Density
            # 5: Medium Density

            # depth interpolation
            self.interpolation = cv2.INTER_NEAREST  # use nearest neighbor
            # self.interpolation = cv2.INTER_LINEAR  # linear
            # self.interpolation = cv2.INTER_CUBIC  # cubic

            # beautify depth image for viewing
            self._config.enable_stream(rs.stream.depth, w, h, rs.format.z16,
                                       frame_rate)
            self.colorizer = rs.colorizer()

            # initialize filters
            self.decimation = rs.decimation_filter()
            self.decimation.set_option(rs.option.filter_magnitude, 4)

            self.depth_to_disparity = rs.disparity_transform(True)

            self.spatial = rs.spatial_filter()
            self.spatial.set_option(rs.option.filter_magnitude, 5)
            self.spatial.set_option(rs.option.filter_smooth_alpha, 0.5)
            self.spatial.set_option(rs.option.filter_smooth_delta, 20)

            self.temporal = rs.temporal_filter()

            self.disparity_to_depth = rs.disparity_transform(False)

        print(
            "Initialized RealSense Camera\nw: {}, h: {}, depth: {}, frame_rate: {}"
            .format(w, h, depth, frame_rate))
예제 #2
0
def get_depth_filter_list(decimate=True,
                          d2d=True,
                          spatial=True,
                          temporal=True):
    filters = []
    if decimate:
        dec_filt = rs.decimation_filter()
        dec_filt.set_option(rs.option.filter_magnitude, 2)
        filters.append(dec_filt)

    if d2d:
        depth2disparity = rs.disparity_transform()
        filters.append(depth2disparity)

    if spatial:
        spat = rs.spatial_filter()
        spat.set_option(rs.option.holes_fill, FILL_ALL_ZERO_PIXELS)
        filters.append(spat)

    if temporal:
        temp = rs.temporal_filter()
        filters.append(temp)

    if d2d:
        disparity2depth = rs.disparity_transform(False)
        filters.append(disparity2depth)

    return filters
예제 #3
0
 def filtering(self, frame):
     '''Filter setting'''
     # Decimation #
     decimation = rs.decimation_filter()
     decimation.set_option(rs.option.filter_magnitude, 1)
     # Spatial #
     spatial = rs.spatial_filter()
     # spatial.set_option(rs.option.filter_magnitude, 5)
     spatial.set_option(rs.option.filter_smooth_alpha, 0.6)
     spatial.set_option(rs.option.filter_smooth_delta, 8)
     # spatial.set_option(rs.option.holes_fill, 3)
     # Temporal #
     temporal = rs.temporal_filter()
     temporal.set_option(rs.option.filter_smooth_alpha, 0.5)
     temporal.set_option(rs.option.filter_smooth_delta, 20)
     # Hole #
     hole_filling = rs.hole_filling_filter()
     ##
     depth_to_disparity = rs.disparity_transform(True)
     disparity_to_depth = rs.disparity_transform(False)
     '''Appling filter'''
     frame = decimation.process(frame)
     frame = depth_to_disparity.process(frame)
     frame = spatial.process(frame)
     frame = temporal.process(frame)
     frame = disparity_to_depth.process(frame)
     frame = hole_filling.process(frame)
     return frame
예제 #4
0
    def __init__(self):

        ctx = rs.context()
        self.devices = ctx.query_devices()
        self.configs = list()
        self.filters = list()
        for device in self.devices:
            config = rs.config()
            config.enable_device(device.get_info(rs.camera_info.serial_number))
            config.enable_stream(rs.stream.depth, IMG_WIDTH, IMG_HEIGHT,
                                 rs.format.z16, 30)
            config.enable_stream(rs.stream.color, IMG_WIDTH, IMG_HEIGHT,
                                 rs.format.bgr8, 30)
            self.configs.append(config)
            align = rs.align(rs.stream.color)
            spatial = rs.spatial_filter()
            spatial.set_option(rs.option.filter_magnitude, 5)
            spatial.set_option(rs.option.filter_smooth_alpha, 1)
            spatial.set_option(rs.option.filter_smooth_delta, 50)
            spatial.set_option(ts.option.holes_fill, 3)
            temporal = rs.temporal_filter()
            hole_filling = rs.hole_filling_filter()
            depth_to_disparity = rs.disparity_transform(True)
            disparity_to_depth = rs.disparity_transform(False)
            decimate = rs.decimation_filter()
            self.filters.append({
                'align': align,
                'spatial': spatial,
                'temporal': temporal,
                'hole': hole_filling,
                'disparity': depth_to_disparity,
                'depth': disparity_to_depth,
                'decimate': decimate
            })
예제 #5
0
    def _filter_depth_frame(depth_frame):
        """
		滤波器,用于获取坐标前的深度图像处理
		:param depth_frame: 深度帧
		:return: 滤波后的深度帧
		"""
        dec = rs.decimation_filter()
        dec.set_option(rs.option.filter_magnitude, 1)
        depth_frame_pro = dec.process(depth_frame)

        depth2disparity = rs.disparity_transform()
        depth_frame_pro = depth2disparity.process(depth_frame_pro)

        spat = rs.spatial_filter()
        # 启用空洞填充,5为填充所有零像素
        spat.set_option(rs.option.holes_fill, 5)
        depth_frame_pro = spat.process(depth_frame_pro)

        temp = rs.temporal_filter()
        depth_frame_pro = temp.process(depth_frame_pro)

        disparity2depth = rs.disparity_transform(False)
        depth_frame_pro = disparity2depth.process(depth_frame_pro)

        # depth_image_pro = np.asanyarray(depth_frame_pro.get_data())
        # depth_colormap_pro = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_pro, alpha=0.03), cv2.COLORMAP_JET)

        return depth_frame_pro
예제 #6
0
 def __init__(self):
     self.filters = [
         rs.decimation_filter(RESCALE),
         rs.disparity_transform(True),
         rs.hole_filling_filter(1),
         rs.spatial_filter(0.5, 8, 2, 2),
         rs.temporal_filter(0.5, 20, 1),
         rs.disparity_transform(False)
     ]
예제 #7
0
def depth_filter(depth_frame):
    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)
    hole_filling = rs.hole_filling_filter(2)
    spatial = rs.spatial_filter()

    depth_frame = depth_to_disparity.process(depth_frame)
    depth_frame = spatial.process(depth_frame)
    # frame = temporal.process(frame)
    depth_frame = disparity_to_depth.process(depth_frame)
    depth_frame = hole_filling.process(depth_frame)

    return depth_frame
    def __init__(self, debugFlag=False, debugPath=''):
        self.debugFlag = debugFlag

        # Decimation - reduces depth frame density
        self.decimateFilter = rs.decimation_filter()

        self.thresholdFilter = rs.threshold_filter(min_dist = 1.8, max_dist = 3)

        # Converts from depth representation to disparity representation and vice - versa in depth frames
        self.depth_to_disparity = rs.disparity_transform(True)
        # Spatial    - edge-preserving spatial smoothing
        self.spatial_filter = rs.spatial_filter()
        # Temporal   - reduces temporal noise
        self.temporalFilter = rs.temporal_filter()
        self.disparity_to_depth = rs.disparity_transform(False)
예제 #9
0
 def filtering(self):
     depth_to_disparity = rs.disparity_transform(True)
     disparity_to_depth = rs.disparity_transform(False)
     spatial = rs.spatial_filter()
     temporal = rs.temporal_filter()
     hole_filling = rs.hole_filling_filter()
     for frame in self.depth_frams:
         frame = depth_to_disparity.process(frame)
         frame = spatial.process(frame)
         frame = temporal.process(frame)
         frame = disparity_to_depth.process(frame)
         frame = hole_filling.process(frame)
     self.aligned_depth_frame = frame.get_data()
     self.colorized_depth = np.asanyarray(
         self.colorizer.colorize(frame).get_data())
예제 #10
0
 def disparity(self, frame):
     """
     converts the depth frame to disparity
     """
     disparity = rs.disparity_transform(True)
     depthFrame = disparity.process(frame)
     return depthFrame
    def prepare_filters(self):
        # prepare post-processing filters
        decimate = rs.decimation_filter()
        decimate.set_option(rs.option.filter_magnitude, 2 ** 3)
        spatial = rs.spatial_filter()
        spatial.set_option(rs.option.filter_magnitude, 5)
        spatial.set_option(rs.option.filter_smooth_alpha, 1)
        spatial.set_option(rs.option.filter_smooth_delta, 50)
        spatial.set_option(rs.option.holes_fill, 3)

        colorizer = rs.colorizer()
        self.filters = [rs.disparity_transform(),
                        rs.decimation_filter(),
                        rs.spatial_filter(),
                        rs.temporal_filter(),
                        rs.disparity_transform(False)]
예제 #12
0
def post_processing(frame,
                    enable_spatial=True,
                    enable_temporal=True,
                    enable_hole=True,
                    spatial_params=[(rs.option.filter_magnitude, 5),
                                    (rs.option.filter_smooth_alpha, 1),
                                    (rs.option.filter_smooth_delta, 50),
                                    (rs.option.holes_fill, 3)],
                    temporal_params=[],
                    hole_params=[]):
    """Filters to cleanup depth maps.
    """
    # Filters and settings
    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)

    # Depth to disparity before spatial and temporal filters
    frame = depth_to_disparity.process(frame)

    # Spatial filter
    if enable_spatial:
        # Settings
        spatial = rs.spatial_filter()
        for spatial_param in spatial_params:
            spatial.set_option(spatial_param[0], spatial_param[1])

        # Apply on frame
        frame = spatial.process(frame)

    # Temporal filter
    if enable_temporal:
        temporal = rs.temporal_filter()
        for temporal_param in temporal_params:
            temporal.set_option(temporal_param[0], temporal_param[1])
        frame = temporal.process(frame)

    # Back to depth
    frame = disparity_to_depth.process(frame)

    # Hole filling
    if enable_hole:
        hole_filling = rs.hole_filling_filter()
        for hole_param in hole_params:
            hole_filling.set_option(hole_param[0], hole_param[1])
        frame = hole_filling.process(frame)

    return frame
예제 #13
0
 def create_filters(self):
     spatial = rs.spatial_filter()
     spatial.set_option(rs.option.filter_magnitude, 5)
     spatial.set_option(rs.option.filter_smooth_alpha, 1)
     spatial.set_option(rs.option.filter_smooth_delta, 50)
     spatial.set_option(rs.option.holes_fill, 3)
     temporal = rs.temporal_filter()
     hole_filling = rs.hole_filling_filter()
     disparity_to_depth = rs.disparity_transform(False)
     depth_to_disparity = rs.disparity_transform(True)
     filters = {
         "S": spatial,
         "T": temporal,
         "H": hole_filling,
         "DZ": disparity_to_depth,
         "ZD": depth_to_disparity
     }
     return filters
예제 #14
0
    def start_pipe(self, align=True, usb3=True):
        if not self.pipelineStarted:
            if align:
                print('Etablissement de la connection caméra')
                # Create a config and configure the pipeline to stream
                #  different resolutions of color and depth streams
                self.pipeline = rs.pipeline()

                # Create a config and configure the pipeline to stream
                #  different resolutions of color and depth streams
                config = rs.config()
                config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16,
                                     30)
                config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8,
                                     30)

                # Start streaming
                self.profile = self.pipeline.start(config)

                align_to = rs.stream.color
                self.align = rs.align(align_to)

                time.sleep(1)

                # self.pipeline = rs.pipeline()
                # config = rs.config()
                #
                # if usb3:
                #     config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
                #     config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
                #
                # else:
                #     self.profile = config.resolve(self.pipeline)  # does not start streaming
                #
                # self.profile = self.pipeline.start(config)
                # self.pipelineStarted = True
                # # Align the two streams
                # align_to = rs.stream.color
                # self.align = rs.align(align_to)
                self.pipelineStarted = True
                # Get depth scale
                depth_sensor = self.profile.get_device().first_depth_sensor()
                self.depth_scale = depth_sensor.get_depth_scale()

                # Création des filtres
                self.hole_filling = rs.hole_filling_filter()
                self.temporal_filter = rs.temporal_filter()
                self.spatial_filter = rs.spatial_filter()
                self.depth_to_disparity = rs.disparity_transform(False)
                # Get Intrinsic parameters
                self.get_intrinsic()
                print('Caméra Ouverte')
예제 #15
0
def get_filter(filter_name: str, *args) -> rs2.filter_interface:
    """
    Basically a factory for filters (Maybe change to Dict 'cause OOP)
    :param filter_name: The filter name
    :param args: The arguments for the filter
    :return: A filter which corresponds to the filter name with it's arguments
    """
    if filter_name == 'decimation_filter':
        return rs2.decimation_filter(*args)
    elif filter_name == 'threshold_filter':
        return rs2.threshold_filter(*args)
    elif filter_name == 'disparity_transform':
        return rs2.disparity_transform(*args)
    elif filter_name == 'spatial_filter':
        return rs2.spatial_filter(*args)
    elif filter_name == 'temporal_filter':
        return rs2.temporal_filter(*args)
    elif filter_name == 'hole_filling_filter':
        return rs2.hole_filling_filter(*args)
    else:
        raise Exception(f'The filter \'{filter_name}\' does not exist!')
예제 #16
0
def encode_by_colorization(depth_frame,
                           min_depth,
                           max_depth,
                           use_disparity=False):
    """Encoded given realsense depth_frame as colorized image.
    Depth limit are in meters.

    Returns color image as numpy array.

    """
    filtered = threshold(depth_frame, min_depth, max_depth)
    color_filter = rs.colorizer()

    color_filter.set_option(rs.option.histogram_equalization_enabled, 0)
    color_filter.set_option(rs.option.color_scheme, 9.0)
    color_filter.set_option(rs.option.min_distance, min_depth)
    color_filter.set_option(rs.option.max_distance, max_depth)

    if use_disparity:
        filtered = rs.disparity_transform(True).process(filtered)

    colorized = color_filter.process(filtered)
    arr = np.asanyarray(colorized.get_data()).copy()
    return arr
min_depth = 0.3
max_depth = 4.0

# Create colorizer object
colorizer = rs.colorizer()
colorizer.set_option(rs.option.color_scheme, 9)
colorizer.set_option(rs.option.histogram_equalization_enabled, 0)
colorizer.set_option(rs.option.min_distance, min_depth)
colorizer.set_option(rs.option.max_distance, max_depth)
# Filter
thr_filter = rs.threshold_filter()
thr_filter.set_option(rs.option.min_distance, min_depth)
thr_filter.set_option(rs.option.max_distance, max_depth)

# Disparity
dp_filter = rs.disparity_transform()

#https://dev.intelrealsense.com/docs/depth-image-compression-by-colorization-for-intel-realsense-depth-cameras#section-6references
#https://github.com/TetsuriSonoda/rs-colorize/blob/master/rs-colorize/rs-colorize.cpp

start = timer()
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.moveWindow("RealSense", 0, 0)

depth_sensor = profile.get_device().first_depth_sensor()

intrinsics = True
try:
    while True:

        # Wait for a coherent pair of frames: depth and color
예제 #18
0
def main():

    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)

    # Start streaming
    #queue =rs.frame_queue(50)
    cfg = pipeline.start(config)  #, queue)
    align = rs.align(rs.stream.color)
    profile = cfg.get_stream(rs.stream.depth)
    vis = o3d.visualization.Visualizer()
    vis.create_window('PCD', width=1280, height=720)
    pointcloud = o3d.geometry.PointCloud()
    geom_added = False

    intrinsics = profile.as_video_stream_profile().get_intrinsics()
    print(intrinsics)

    depth_sensor = cfg.get_device().first_depth_sensor()

    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)

    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 3  # 1 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # filters
    # decimation filter
    decimation = rs.decimation_filter()
    decimation.set_option(rs.option.filter_magnitude, 4)

    # spatial filter
    spatial = rs.spatial_filter()
    spatial.set_option(rs.option.filter_magnitude, 5)
    spatial.set_option(rs.option.filter_smooth_alpha, 1)
    spatial.set_option(rs.option.filter_smooth_delta, 50)

    temporal = rs.temporal_filter()

    hole_filling = rs.hole_filling_filter()

    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)
    while True:
        dt0 = datetime.now()
        frames = pipeline.wait_for_frames()
        #frames = queue.wait_for_frame().as_frameset()
        frames = align.process(frames)
        profile = frames.get_profile()

        depth_frame = frames.get_depth_frame()
        # depth_frame = decimation.process(depth_frame)
        depth_frame = depth_to_disparity.process(depth_frame)
        depth_frame = spatial.process(depth_frame)
        depth_frame = temporal.process(depth_frame)
        depth_frame = disparity_to_depth.process(depth_frame)
        #depth_frame = hole_filling.process(depth_frame)

        # We will be removing the background of objects more than

        color_frame = frames.get_color_frame()
        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        img_depth = o3d.geometry.Image(depth_image)
        img_color = o3d.geometry.Image(color_image)
        rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
            img_color,
            img_depth,
            depth_trunc=clipping_distance_in_meters,
            convert_rgb_to_intensity=False)

        pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
            intrinsics.width, intrinsics.height, intrinsics.fx, intrinsics.fy,
            intrinsics.ppx, intrinsics.ppy)
        pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
            rgbd, pinhole_camera_intrinsic)
        pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
                       [0, 0, 0, 1]])
        pointcloud.points = pcd.points
        pointcloud.colors = pcd.colors

        #clusteringDepthImage(pointcloud)

        if geom_added == False:
            vis.add_geometry(pointcloud)
            geom_added = True

        vis.update_geometry(pointcloud)
        vis.poll_events()
        vis.update_renderer()

        # cv2.imshow('bgr', color_image)
        # key = cv2.waitKey(1)
        # if key == ord('q'):
        #    break

        process_time = datetime.now() - dt0
        print("FPS: " + str(1 / process_time.total_seconds()))

    pipeline.stop()
    cv2.destroyAllWindows()
    vis.destroy_window()
    del vis
예제 #19
0
def image_process():
    pipeline = rs.pipeline()

    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_sensor.set_option(rs.option.visual_preset, 4)

    # 创建align对象
    align_to = rs.stream.color
    align = rs.align(align_to)

    try:
        while True:
            frames = pipeline.wait_for_frames()

            # 对齐
            align_frames = align.process(frames)

            depth_frame = align_frames.get_depth_frame()
            color_frame = align_frames.get_color_frame()

            if not depth_frame or not color_frame:
                continue

            # color_image = np.asanyarray(color_frame.get_data())
            # cv2.imshow('rgb', color_image)

            depth_image = np.asanyarray(depth_frame.get_data())
            depth_colormap = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

            dec = rs.decimation_filter()
            dec.set_option(rs.option.filter_magnitude, 1)
            depth_frame_pro = dec.process(depth_frame)

            depth2disparity = rs.disparity_transform()
            depth_frame_pro = depth2disparity.process(depth_frame_pro)

            spat = rs.spatial_filter()
            # 启用空洞填充,5为填充所有零像素
            spat.set_option(rs.option.holes_fill, 5)
            depth_frame_pro = spat.process(depth_frame_pro)

            temp = rs.temporal_filter()
            depth_frame_pro = temp.process(depth_frame_pro)

            disparity2depth = rs.disparity_transform(False)
            depth_frame_pro = disparity2depth.process(depth_frame_pro)

            depth_image_pro = np.asanyarray(depth_frame_pro.get_data())
            depth_colormap_pro = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_image_pro, alpha=0.03),
                cv2.COLORMAP_JET)

            images = np.hstack((depth_colormap, depth_colormap_pro))
            cv2.imshow('depth_images', images)

            key = cv2.waitKey(1)
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

    finally:
        pipeline.stop()

    return depth_image_pro
예제 #20
0
def capture_filter_thread(color_np_queue, mask_np_queue):
    global depth_scale, isRunning
    print('> Capture Thread Started')
    # Create a context object. This object owns the handles to all connected realsense devices
    pipeline = rs.pipeline()

    try:
        # Config realsense
        config = rs.config()
        config.enable_stream(rs.stream.depth, IMG_WIDTH, IMG_HEIGHT)
        config.enable_stream(rs.stream.color, IMG_WIDTH, IMG_HEIGHT)

        # Start streaming
        profile = pipeline.start(config)

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames
        align_to = rs.stream.color
        align = rs.align(align_to)

        # Get the depth sensor's depth scale
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        # print('Depth Scale is: ', depth_scale)
        '''''' '''''' '''''' '''''' '''
            Create Filters
        ''' '''''' '''''' '''''' ''''''
        # Decimation Filter - change resolution size!
        # decimation = rs.decimation_filter()
        # decimation.set_option(rs.option.filter_magnitude, 0.5)

        # Spatial Filter
        spatial = rs.spatial_filter()
        spatial.set_option(rs.option.filter_magnitude, 5)
        spatial.set_option(rs.option.filter_smooth_alpha, 1)
        spatial.set_option(rs.option.filter_smooth_delta, 50)
        spatial.set_option(rs.option.holes_fill, 3)

        # Temporal Filter
        temporal = rs.temporal_filter()

        # Hole Filling Filter
        hole_filling = rs.hole_filling_filter()

        # Disparity Transform
        depth_to_disparity = rs.disparity_transform(True)
        disparity_to_depth = rs.disparity_transform(False)

        frameset = None
        filtered_depth_frame = None

        # Skip 5 first frames to give the Auto-Exposure time to adjust
        for x in range(5):
            pipeline.wait_for_frames()

        while isRunning:
            print('---> Capture One Frame')
            frameset = pipeline.wait_for_frames()

            # Validate that both frames are valid
            if not frameset:
                continue

            # put aligned frameset into queue, queue will be blocked if queue is full. Timeout = 5 to throw exception
            alignment = align.process(frameset)

            for x in range(SLIDING_WINDOW):
                # get frame
                frameset = alignment
                filtered_depth_frame = frameset.get_depth_frame()
                # Filtering
                filtered_depth_frame = depth_to_disparity.process(
                    filtered_depth_frame)
                filtered_depth_frame = spatial.process(filtered_depth_frame)
                filtered_depth_frame = temporal.process(filtered_depth_frame)
                filtered_depth_frame = disparity_to_depth.process(
                    filtered_depth_frame)
                filtered_depth_frame = hole_filling.process(
                    filtered_depth_frame)

            color_np_queue.put(
                np.asanyarray(frameset.get_color_frame().get_data()))
            mask_np_queue.put(np.asanyarray(filtered_depth_frame.get_data()))
            del frameset
            gc.collect()

            time.sleep(CAPTURE_CYCLE)
    except:
        traceback.print_exc()
        rs.log_to_console(rs.log_severity.debug)
    finally:
        pipeline.stop()
        print('> Capture Thread Stopped')
예제 #21
0
def save_thread(color_np_queue, mask_np_queue):
    global frameset_queue, isRunning

    print('> Filter & Save Thread Started')
    try:
        # visualising the data
        #colorizer = rs.colorizer()

        sequence = 0
        '''''' '''''' '''''' '''''' '''
            Create Filters
        ''' '''''' '''''' '''''' ''''''
        # Decimation Filter - change resolution size!
        # decimation = rs.decimation_filter()
        # decimation.set_option(rs.option.filter_magnitude, 0.5)

        # Spatial Filter
        spatial = rs.spatial_filter()
        spatial.set_option(rs.option.filter_magnitude, 5)
        spatial.set_option(rs.option.filter_smooth_alpha, 1)
        spatial.set_option(rs.option.filter_smooth_delta, 50)
        spatial.set_option(rs.option.holes_fill, 3)

        # Temporal Filter
        temporal = rs.temporal_filter()

        # Hole Filling Filter
        hole_filling = rs.hole_filling_filter()

        # Disparity Transform
        depth_to_disparity = rs.disparity_transform(True)
        disparity_to_depth = rs.disparity_transform(False)

        frameset = None
        filtered_depth_frame = None
        while isRunning:
            time.sleep(0.2)
            while not frameset_queue.empty():
                time.sleep(0.2)
                print('---> Wait for Frames')
                # filter frames
                for x in range(SLIDING_WINDOW):
                    # print('----> Get Sliding Window')
                    if frameset_queue.empty():
                        break
                    # get depth frame
                    frameset = frameset_queue.get(False)
                    filtered_depth_frame = frameset.get_depth_frame()
                    # Filtering
                    # filtered_depth_frame = decimation.process(filtered_depth_frame)
                    filtered_depth_frame = depth_to_disparity.process(
                        filtered_depth_frame)
                    filtered_depth_frame = spatial.process(
                        filtered_depth_frame)
                    filtered_depth_frame = temporal.process(
                        filtered_depth_frame)
                    filtered_depth_frame = disparity_to_depth.process(
                        filtered_depth_frame)
                    filtered_depth_frame = hole_filling.process(
                        filtered_depth_frame)
                print('---> Got One')

                # Save Images
                color_np = np.asanyarray(frameset.get_color_frame().get_data())
                saveImage(color_np, 'IMAGE_', sequence)

                # save depth image
                #saveImage(np.asanyarray(frameset.get_depth_frame().get_data()), 'DEPTH_', sequence)

                filtered_depth_np = np.asanyarray(
                    filtered_depth_frame.get_data())
                # save filtered depth frame
                #saveImage(filtered_depth_np, 'FILTERED_DEPTH_', sequence)

                # save colorized filtered depth frame
                #saveImage(np.asanyarray(colorizer.colorize(filtered_depth_frame).get_data()), 'COLORIZED_FILTERED_DEPTH_', sequence)

                # save mask from clipped filtered depth frame
                saveImage(createMask(filtered_depth_np, CLIPPING_DISTANCE),
                          'IMAGE_', sequence, True)

                # save clipped filtered color frame
                #replace_color = 0
                #filtered_depth_image_3d = np.dstack((filtered_depth_np, filtered_depth_np, filtered_depth_np))
                #clipped_filtered_color_np = np.where((filtered_depth_image_3d > (CLIPPING_DISTANCE / depth_scale)) | (filtered_depth_image_3d <= 0), replace_color, color_np)
                #saveImage(clipped_filtered_color_np, 'CLIPPED_FILTERED_COLOR_', sequence)

                sequence += 1
    except:
        traceback.print_exc()
    finally:
        print('> Filter & Save Thread Stopped')
예제 #22
0
def print_hi():
    # License: Apache 2.0. See LICENSE file in root directory.
    # Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
    """
    OpenGL Pointcloud viewer with http://pyglet.org

    Usage:
    ------
    Mouse:
        Drag with left button to rotate around pivot (thick small axes),
        with right button to translate and the wheel to zoom.

    Keyboard:
        [p]     Pause
        [r]     Reset View
        [d]     Cycle through decimation values
        [z]     Toggle point scaling
        [x]     Toggle point distance attenuation
        [c]     Toggle color source
        [l]     Toggle lighting
        [f]     Toggle depth post-processing
        [s]     Save PNG (./out.png)
        [e]     Export points to ply (./out.ply)
        [q/ESC] Quit

    Notes:
    ------
    Using deprecated OpenGL (FFP lighting, matrix stack...) however, draw calls
    are kept low with pyglet.graphics.* which uses glDrawArrays internally.

    Normals calculation is done with numpy on CPU which is rather slow, should really
    be done with shaders but was omitted for several reasons - brevity, for lowering
    dependencies (pyglet doesn't ship with shader support & recommends pyshaders)
    and for reference.
    """

    # https://stackoverflow.com/a/6802723
    def rotation_matrix(axis, theta):
        """
        Return the rotation matrix associated with counterclockwise rotation about
        the given axis by theta radians.
        """
        axis = np.asarray(axis)
        axis = axis / math.sqrt(np.dot(axis, axis))
        a = math.cos(theta / 2.0)
        b, c, d = -axis * math.sin(theta / 2.0)
        aa, bb, cc, dd = a * a, b * b, c * c, d * d
        bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
        return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
                         [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
                         [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])

    class AppState:
        def __init__(self, *args, **kwargs):
            self.pitch, self.yaw = math.radians(-10), math.radians(-15)
            self.translation = np.array([0, 0, 1], np.float32)
            self.distance = 2
            self.mouse_btns = [False, False, False]
            self.paused = False
            self.decimate = 0
            self.scale = True
            self.attenuation = False
            self.color = True
            self.lighting = False
            self.postprocessing = False

        def reset(self):
            self.pitch, self.yaw, self.distance = 0, 0, 2
            self.translation[:] = 0, 0, 1

        @property
        def rotation(self):
            Rx = rotation_matrix((1, 0, 0), math.radians(-self.pitch))
            Ry = rotation_matrix((0, 1, 0), math.radians(-self.yaw))
            return np.dot(Ry, Rx).astype(np.float32)

    state = AppState()

    # Configure streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    # other_stream, other_format = rs.stream.infrared, rs.format.y8
    other_stream, other_format = rs.stream.color, rs.format.rgb8
    config.enable_stream(other_stream, 640, 480, other_format, 30)

    # Start streaming
    pipeline.start(config)
    profile = pipeline.get_active_profile()

    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    depth_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.depth))
    depth_intrinsics = depth_profile.get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height

    # Processing blocks
    pc = rs.pointcloud()
    decimate = rs.decimation_filter()
    decimate.set_option(rs.option.filter_magnitude, 2**state.decimate)
    colorizer = rs.colorizer()
    filters = [
        rs.disparity_transform(),
        rs.spatial_filter(),
        rs.temporal_filter(),
        rs.disparity_transform(False)
    ]

    # pyglet
    window = pyglet.window.Window(
        config=gl.Config(
            double_buffer=True,
            samples=8  # MSAA
        ),
        resizable=True,
        vsync=True)
    keys = pyglet.window.key.KeyStateHandler()
    window.push_handlers(keys)

    def convert_fmt(fmt):
        """rs.format to pyglet format string"""
        return {
            rs.format.rgb8: 'RGB',
            rs.format.bgr8: 'BGR',
            rs.format.rgba8: 'RGBA',
            rs.format.bgra8: 'BGRA',
            rs.format.y8: 'L',
        }[fmt]

    # Create a VertexList to hold pointcloud data
    # Will pre-allocates memory according to the attributes below
    vertex_list = pyglet.graphics.vertex_list(w * h, 'v3f/stream',
                                              't2f/stream', 'n3f/stream')
    # Create and allocate memory for our color data
    other_profile = rs.video_stream_profile(profile.get_stream(other_stream))
    image_data = pyglet.image.ImageData(w, h,
                                        convert_fmt(other_profile.format()),
                                        (gl.GLubyte * (w * h * 3))())

    if (pyglet.version.startswith('1.')
            and not pyglet.version.startswith('1.4')):
        # pyglet.clock.ClockDisplay has be removed in 1.4
        fps_display = pyglet.clock.ClockDisplay()
    else:
        fps_display = pyglet.window.FPSDisplay(window)

    @window.event
    def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
        w, h = map(float, window.get_size())

        if buttons & pyglet.window.mouse.LEFT:
            state.yaw -= dx * 0.5
            state.pitch -= dy * 0.5

        if buttons & pyglet.window.mouse.RIGHT:
            dp = np.array((dx / w, -dy / h, 0), np.float32)
            state.translation += np.dot(state.rotation, dp)

        if buttons & pyglet.window.mouse.MIDDLE:
            dz = dy * 0.01
            state.translation -= (0, 0, dz)
            state.distance -= dz

    def handle_mouse_btns(x, y, button, modifiers):
        state.mouse_btns[0] ^= (button & pyglet.window.mouse.LEFT)
        state.mouse_btns[1] ^= (button & pyglet.window.mouse.RIGHT)
        state.mouse_btns[2] ^= (button & pyglet.window.mouse.MIDDLE)

    window.on_mouse_press = window.on_mouse_release = handle_mouse_btns

    @window.event
    def on_mouse_scroll(x, y, scroll_x, scroll_y):
        dz = scroll_y * 0.1
        state.translation -= (0, 0, dz)
        state.distance -= dz

    def on_key_press(symbol, modifiers):
        if symbol == pyglet.window.key.R:
            state.reset()

        if symbol == pyglet.window.key.P:
            state.paused ^= True

        if symbol == pyglet.window.key.D:
            state.decimate = (state.decimate + 1) % 3
            decimate.set_option(rs.option.filter_magnitude, 2**state.decimate)

        if symbol == pyglet.window.key.C:
            state.color ^= True

        if symbol == pyglet.window.key.Z:
            state.scale ^= True

        if symbol == pyglet.window.key.X:
            state.attenuation ^= True

        if symbol == pyglet.window.key.L:
            state.lighting ^= True

        if symbol == pyglet.window.key.F:
            state.postprocessing ^= True

        if symbol == pyglet.window.key.S:
            pyglet.image.get_buffer_manager().get_color_buffer().save(
                'out.png')

        if symbol == pyglet.window.key.Q:
            window.close()

    window.push_handlers(on_key_press)

    def axes(size=1, width=1):
        """draw 3d axes"""
        gl.glLineWidth(width)
        pyglet.graphics.draw(
            6, gl.GL_LINES,
            ('v3f',
             (0, 0, 0, size, 0, 0, 0, 0, 0, 0, size, 0, 0, 0, 0, 0, 0, size)),
            ('c3f', (
                1,
                0,
                0,
                1,
                0,
                0,
                0,
                1,
                0,
                0,
                1,
                0,
                0,
                0,
                1,
                0,
                0,
                1,
            )))

    def frustum(intrinsics):
        """draw camera's frustum"""
        w, h = intrinsics.width, intrinsics.height
        batch = pyglet.graphics.Batch()

        for d in range(1, 6, 2):

            def get_point(x, y):
                p = rs.rs2_deproject_pixel_to_point(intrinsics, [x, y], d)
                batch.add(2, gl.GL_LINES, None, ('v3f', [0, 0, 0] + p))
                return p

            top_left = get_point(0, 0)
            top_right = get_point(w, 0)
            bottom_right = get_point(w, h)
            bottom_left = get_point(0, h)

            batch.add(2, gl.GL_LINES, None, ('v3f', top_left + top_right))
            batch.add(2, gl.GL_LINES, None, ('v3f', top_right + bottom_right))
            batch.add(2, gl.GL_LINES, None,
                      ('v3f', bottom_right + bottom_left))
            batch.add(2, gl.GL_LINES, None, ('v3f', bottom_left + top_left))

        batch.draw()

    def grid(size=1, n=10, width=1):
        """draw a grid on xz plane"""
        gl.glLineWidth(width)
        s = size / float(n)
        s2 = 0.5 * size
        batch = pyglet.graphics.Batch()

        for i in range(0, n + 1):
            x = -s2 + i * s
            batch.add(2, gl.GL_LINES, None, ('v3f', (x, 0, -s2, x, 0, s2)))
        for i in range(0, n + 1):
            z = -s2 + i * s
            batch.add(2, gl.GL_LINES, None, ('v3f', (-s2, 0, z, s2, 0, z)))

        batch.draw()

    @window.event
    def on_draw():
        window.clear()

        gl.glEnable(gl.GL_DEPTH_TEST)
        gl.glEnable(gl.GL_LINE_SMOOTH)

        width, height = window.get_size()
        gl.glViewport(0, 0, width, height)

        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        gl.gluPerspective(60, width / float(height), 0.01, 20)

        gl.glMatrixMode(gl.GL_TEXTURE)
        gl.glLoadIdentity()
        # texcoords are [0..1] and relative to top-left pixel corner, add 0.5 to center
        gl.glTranslatef(0.5 / image_data.width, 0.5 / image_data.height, 0)
        image_texture = image_data.get_texture()
        # texture size may be increased by pyglet to a power of 2
        tw, th = image_texture.owner.width, image_texture.owner.height
        gl.glScalef(image_data.width / float(tw),
                    image_data.height / float(th), 1)

        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        gl.gluLookAt(0, 0, 0, 0, 0, 1, 0, -1, 0)

        gl.glTranslatef(0, 0, state.distance)
        gl.glRotated(state.pitch, 1, 0, 0)
        gl.glRotated(state.yaw, 0, 1, 0)

        if any(state.mouse_btns):
            axes(0.1, 4)

        gl.glTranslatef(0, 0, -state.distance)
        gl.glTranslatef(*state.translation)

        gl.glColor3f(0.5, 0.5, 0.5)
        gl.glPushMatrix()
        gl.glTranslatef(0, 0.5, 0.5)
        grid()
        gl.glPopMatrix()

        psz = max(window.get_size()) / float(max(w, h)) if state.scale else 1
        gl.glPointSize(psz)
        distance = (0, 0, 1) if state.attenuation else (1, 0, 0)
        gl.glPointParameterfv(gl.GL_POINT_DISTANCE_ATTENUATION,
                              (gl.GLfloat * 3)(*distance))

        if state.lighting:
            ldir = [0.5, 0.5, 0.5]  # world-space lighting
            ldir = np.dot(state.rotation, (0, 0, 1))  # MeshLab style lighting
            ldir = list(ldir) + [0]  # w=0, directional light
            gl.glLightfv(gl.GL_LIGHT0, gl.GL_POSITION, (gl.GLfloat * 4)(*ldir))
            gl.glLightfv(gl.GL_LIGHT0, gl.GL_DIFFUSE,
                         (gl.GLfloat * 3)(1.0, 1.0, 1.0))
            gl.glLightfv(gl.GL_LIGHT0, gl.GL_AMBIENT,
                         (gl.GLfloat * 3)(0.75, 0.75, 0.75))
            gl.glEnable(gl.GL_LIGHT0)
            gl.glEnable(gl.GL_NORMALIZE)
            gl.glEnable(gl.GL_LIGHTING)

        gl.glColor3f(1, 1, 1)
        texture = image_data.get_texture()
        gl.glEnable(texture.target)
        gl.glBindTexture(texture.target, texture.id)
        gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER,
                           gl.GL_NEAREST)

        # comment this to get round points with MSAA on
        gl.glEnable(gl.GL_POINT_SPRITE)

        if not state.scale and not state.attenuation:
            gl.glDisable(gl.GL_MULTISAMPLE)  # for true 1px points with MSAA on
        vertex_list.draw(gl.GL_POINTS)
        gl.glDisable(texture.target)
        if not state.scale and not state.attenuation:
            gl.glEnable(gl.GL_MULTISAMPLE)

        gl.glDisable(gl.GL_LIGHTING)

        gl.glColor3f(0.25, 0.25, 0.25)
        frustum(depth_intrinsics)
        axes()

        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        gl.glOrtho(0, width, 0, height, -1, 1)
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()
        gl.glMatrixMode(gl.GL_TEXTURE)
        gl.glLoadIdentity()
        gl.glDisable(gl.GL_DEPTH_TEST)

        fps_display.draw()

    def run(dt):
        global w, h
        window.set_caption("RealSense (%dx%d) %dFPS (%.2fms) %s" %
                           (w, h, 0 if dt == 0 else 1.0 / dt, dt * 1000,
                            "PAUSED" if state.paused else ""))

        if state.paused:
            return

        success, frames = pipeline.try_wait_for_frames(timeout_ms=0)
        if not success:
            return

        depth_frame = frames.get_depth_frame().as_video_frame()
        other_frame = frames.first(other_stream).as_video_frame()

        depth_frame = decimate.process(depth_frame)

        if state.postprocessing:
            for f in filters:
                depth_frame = f.process(depth_frame)

        # Grab new intrinsics (may be changed by decimation)
        depth_intrinsics = rs.video_stream_profile(
            depth_frame.profile).get_intrinsics()
        w, h = depth_intrinsics.width, depth_intrinsics.height

        color_image = np.asanyarray(other_frame.get_data())

        colorized_depth = colorizer.colorize(depth_frame)
        depth_colormap = np.asanyarray(colorized_depth.get_data())

        if state.color:
            mapped_frame, color_source = other_frame, color_image
        else:
            mapped_frame, color_source = colorized_depth, depth_colormap

        points = pc.calculate(depth_frame)
        pc.map_to(mapped_frame)

        # handle color source or size change
        fmt = convert_fmt(mapped_frame.profile.format())
        global image_data
        if (image_data.format, image_data.pitch) != (fmt,
                                                     color_source.strides[0]):
            empty = (gl.GLubyte * (w * h * 3))()
            image_data = pyglet.image.ImageData(w, h, fmt, empty)
        # copy image data to pyglet
        image_data.set_data(fmt, color_source.strides[0],
                            color_source.ctypes.data)

        verts = np.asarray(points.get_vertices(2)).reshape(h, w, 3)
        texcoords = np.asarray(points.get_texture_coordinates(2))

        if len(vertex_list.vertices) != verts.size:
            vertex_list.resize(verts.size // 3)
            # need to reassign after resizing
            vertex_list.vertices = verts.ravel()
            vertex_list.tex_coords = texcoords.ravel()

        # copy our data to pre-allocated buffers, this is faster than assigning...
        # pyglet will take care of uploading to GPU
        def copy(dst, src):
            """copy numpy array to pyglet array"""
            # timeit was mostly inconclusive, favoring slice assignment for safety
            np.array(dst, copy=False)[:] = src.ravel()
            # ctypes.memmove(dst, src.ctypes.data, src.nbytes)

        copy(vertex_list.vertices, verts)
        copy(vertex_list.tex_coords, texcoords)

        if state.lighting:
            # compute normals
            dy, dx = np.gradient(verts, axis=(0, 1))
            n = np.cross(dx, dy)

            # can use this, np.linalg.norm or similar to normalize, but OpenGL can do this for us, see GL_NORMALIZE above
            # norm = np.sqrt((n*n).sum(axis=2, keepdims=True))
            # np.divide(n, norm, out=n, where=norm != 0)

            # import cv2
            # n = cv2.bilateralFilter(n, 5, 1, 1)

            copy(vertex_list.normals, n)

        if keys[pyglet.window.key.E]:
            points.export_to_ply('./out.ply', mapped_frame)

    pyglet.clock.schedule(run)

    try:
        pyglet.app.run()
    finally:
        pipeline.stop()
    pointcloud = PointCloud()
    i = 0

    while True:
        dt0 = datetime.now()
        pointcloud.clear()

        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)

        color_frame = aligned_frames.get_color_frame()
        color_image = np.asanyarray(color_frame.get_data())
        depth_frame = aligned_frames.get_depth_frame()

        depth_frame = rs.decimation_filter(1).process(depth_frame)
        depth_frame = rs.disparity_transform(True).process(depth_frame)
        depth_frame = rs.spatial_filter().process(depth_frame)
        depth_frame = rs.temporal_filter().process(depth_frame)
        depth_frame = rs.disparity_transform(False).process(depth_frame)
        # depth_frame = rs.hole_filling_filter().process(depth_frame)

        depth_image = np.asanyarray(depth_frame.get_data())
        color_image1 = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)

        cv2.namedWindow('color image', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('color image', cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR))
        cv2.namedWindow('depth image', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('depth image', depth_image)

        depth = Image(depth_image)
        color = Image(color_image)
예제 #24
0
profile = pipeline.get_active_profile()

depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()

depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height

# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2**state.decimate)
colorizer = rs.colorizer()
filters = [
    rs.disparity_transform(),
    rs.spatial_filter(),
    rs.temporal_filter(),
    rs.disparity_transform(False)
]

# pyglet
window = pyglet.window.Window(
    config=gl.Config(
        double_buffer=True,
        samples=8  # MSAA
    ),
    resizable=True,
    vsync=True)
keys = pyglet.window.key.KeyStateHandler()
window.push_handlers(keys)
pipeline.start(config)
profile = pipeline.get_active_profile()

depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()

depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height

# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
colorizer = rs.colorizer()
filters = [rs.disparity_transform(),
           rs.spatial_filter(),
           rs.temporal_filter(),
           rs.disparity_transform(False)]


# pyglet
window = pyglet.window.Window(
    config=gl.Config(
        double_buffer=True,
        samples=8  # MSAA
    ),
    resizable=True, vsync=True)
keys = pyglet.window.key.KeyStateHandler()
window.push_handlers(keys)
예제 #26
0
def main():
    global colour_image
    global depth_image

    # open3d visualiser
    visualiser = open3d.visualization.Visualizer()
    point_cloud = open3d.geometry.PointCloud()
    point_cloud.points = open3d.utility.Vector3dVector(
        np.array([[i, i, i] for i in range(-5, 5)]))
    visualiser.create_window()
    visualiser.add_geometry(point_cloud)

    # Create windows
    cv2.namedWindow("Colour", cv2.WINDOW_NORMAL)
    cv2.namedWindow("Depth", cv2.WINDOW_NORMAL)

    # Camera config
    config = rs.config()
    config.enable_stream(rs.stream.depth, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, rs.format.bgr8, 30)

    # Start camera pipeline
    pipeline = rs.pipeline()
    pipeline.start(config)

    # Depth and colour alignment
    align_to = rs.stream.color
    align = rs.align(align_to)

    # Filters
    spatial = rs.spatial_filter()
    temporal = rs.temporal_filter(0.4, 20, 5)
    disparity = rs.disparity_transform(True)
    coloriser = rs.colorizer()

    # Get stream profile and camera intrinsics
    profile = pipeline.get_active_profile()
    depth_profile = rs.video_stream_profile(profile.get_stream(
        rs.stream.depth))
    intrinsics = depth_profile.get_intrinsics()

    # Create camera intrinsics for open3d
    intrinsic = open3d.camera.PinholeCameraIntrinsic(
        intrinsics.width, intrinsics.height, intrinsics.fx, intrinsics.fy,
        intrinsics.width // 2, intrinsics.height // 2)

    while True:
        # Obtain and align frames
        current_frame = pipeline.wait_for_frames()
        current_frame = align.process(current_frame)

        depth_frame = temporal.process(current_frame.get_depth_frame())
        # Get colour and depth frames
        #depth_image = np.asanyarray(spatial.process(current_frame.get_depth_frame()).get_data())
        depth_image = np.asanyarray(depth_frame.get_data())
        colour_image = np.asanyarray(
            current_frame.get_color_frame().get_data())

        # Create rgbd image
        #rgbd = open3d.geometry.create_rgbd_image_from_color_and_depth(open3d.geometry.Image(cv2.cvtColor(colour_image, cv2.COLOR_BGR2RGB)), open3d.geometry.Image(depth_image), convert_rgb_to_intensity=False)

        # Create point cloud
        #pcd = open3d.geometry.create_point_cloud_from_rgbd_image(rgbd, intrinsic)

        # Update point cloud for visualiser
        #point_cloud.points = pcd.points
        #point_cloud.colors = pcd.colors

        # Update visualiser
        #visualiser.update_geometry()
        #visualiser.poll_events()
        #visualiser.update_renderer()

        depth_image = np.asanyarray(coloriser.colorize(depth_frame).get_data())

        cv2.imshow("Depth", depth_image)
        cv2.imshow("Colour", colour_image)
        cv2.waitKey(1)
예제 #27
0
USE_PRESET_FILE = True
PRESET_FILE = "../cfg/d4xx-default.json"

RTSP_STREAMING_ENABLE = True
RTSP_PORT = "8554"
RTSP_MOUNT_POINT = "/d4xx"

# List of filters to be applied, in this order.
# https://github.com/IntelRealSense/librealsense/blob/master/doc/post-processing-filters.md

filters = [[True, "Decimation Filter",
            rs.decimation_filter()],
           [True, "Threshold Filter",
            rs.threshold_filter()],
           [True, "Depth to Disparity",
            rs.disparity_transform(True)],
           [True, "Spatial Filter",
            rs.spatial_filter()],
           [True, "Temporal Filter",
            rs.temporal_filter()],
           [False, "Hole Filling Filter",
            rs.hole_filling_filter()],
           [True, "Disparity to Depth",
            rs.disparity_transform(False)]]

#
# The filters can be tuned with opencv_depth_filtering.py script, and save the default values to here
# Individual filters have different options so one have to apply the values accordingly
#

# decimation_magnitude = 8
예제 #28
0
align = rs.align(align_to)

# Setting up filters
dec_filter = rs.decimation_filter()
dec_filter.set_option(rs.option.filter_magnitude, 4)

spa_filter = rs.spatial_filter()
spa_filter.set_option(rs.option.filter_magnitude, 5)
spa_filter.set_option(rs.option.filter_smooth_alpha, 1)
spa_filter.set_option(rs.option.filter_smooth_delta, 50)
spa_filter.set_option(rs.option.holes_fill, 3)

tmp_filter = rs.temporal_filter()
hol_fillin = rs.hole_filling_filter()

dep_to_dis = rs.disparity_transform(True)
dis_to_dep = rs.disparity_transform(False)

# Setting up indices
d1, d2 = int(480 / 4), int(640 / 4)
idx, jdx = np.indices((d1, d2))
idx_back = np.clip(idx - 1, 0, idx.max()).flatten()
idx_front = np.clip(idx + 1, 0, idx.max()).flatten()
jdx_back = np.clip(jdx - 1, 0, jdx.max()).flatten()
jdx_front = np.clip(jdx + 1, 0, jdx.max()).flatten()
idx = idx.flatten()
jdx = jdx.flatten()

# rand_idx = np.random.choice(np.arange(idx.shape[0]), size=d1*d2, replace=False)
f1 = (idx_front * d2 + jdx)  # [rand_idx]
f2 = (idx_back * d2 + jdx)  # [rand_idx]
print("Depth Scale is: " , depth_scale)

# We will be removing the background of objects more than
#  clipping_distance_in_meters meters away
clipping_distance_in_meters = 4 #1 meter
clipping_distance = clipping_distance_in_meters / depth_scale

# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
decimate = rs.decimation_filter(magnitude=2)
spatial = rs.spatial_filter(smooth_alpha=0.5, smooth_delta=28, magnitude=2, hole_fill=1)
temporal = rs.temporal_filter(smooth_alpha=0.05, smooth_delta=28, persistence_control=2)
depth_to_disparity = rs.disparity_transform(True)
disparity_to_depth = rs.disparity_transform(False)

# Streaming loop
try:
    while True:
        # Get frameset of color and depth
        success, frames = pipeline.try_wait_for_frames(timeout_ms=20)
        if not success:
            continue
        # frames.get_depth_frame() is a 640x360 depth image

        # Align the depth frame to color frame
        aligned_frames = align.process(frames)

        # Get aligned frames
def create_pipeline(config: dict):
    """Sets up the pipeline to extract depth and rgb frames

    Arguments:
        config {dict} -- A dictionary mapping for configuration. see default.yaml

    Returns:
        tuple -- pipeline, process modules, filters, t265 device (optional)
    """
    # Create pipeline and config for D4XX,L5XX
    pipeline = rs.pipeline()
    rs_config = rs.config()

    # IF t265 is enabled, need to handle seperately
    t265_dev = None
    t265_sensor = None
    t265_pipeline = rs.pipeline()
    t265_config = rs.config()

    if config['playback']['enabled']:
        # Load recorded bag file
        rs.config.enable_device_from_file(
            rs_config, config['playback']['file'],
            config['playback'].get('repeat', False))

        # This code is only activated if the user points to a T265 Recorded Bag File
        if config['tracking']['enabled']:
            rs.config.enable_device_from_file(
                t265_config, config['tracking']['playback']['file'],
                config['playback'].get('repeat', False))

            t265_config.enable_stream(rs.stream.pose)
            t265_pipeline.start(t265_config)
            profile_temp = t265_pipeline.get_active_profile()
            t265_dev = profile_temp.get_device()
            t265_playback = t265_dev.as_playback()
            t265_playback.set_real_time(False)

    else:
        # Ensure device is connected
        ctx = rs.context()
        devices = ctx.query_devices()
        if len(devices) == 0:
            logging.error("No connected Intel Realsense Device!")
            sys.exit(1)

        if config['advanced']:
            logging.info(
                "Attempting to enter advanced mode and upload JSON settings file"
            )
            load_setting_file(ctx, devices, config['advanced'])

        if config['tracking']['enabled']:
            # Cycle through connected devices and print them
            for dev in devices:
                dev_name = dev.get_info(rs.camera_info.name)
                print("Found {}".format(dev_name))
                if "Intel RealSense D4" in dev_name:
                    pass
                elif "Intel RealSense T265" in dev_name:
                    t265_dev = dev
                elif "Intel RealSense L515" in dev_name:
                    pass

            if config['tracking']['enabled']:
                if len(devices) != 2:
                    logging.error("Need 2 connected Intel Realsense Devices!")
                    sys.exit(1)
                if t265_dev is None:
                    logging.error("Need Intel Realsense T265 Device!")
                    sys.exit(1)

                if t265_dev:
                    # Unable to open as a pipeline, must use sensors
                    t265_sensor = t265_dev.query_sensors()[0]
                    profiles = t265_sensor.get_stream_profiles()
                    pose_profile = [
                        profile for profile in profiles
                        if profile.stream_name() == 'Pose'
                    ][0]
                    t265_sensor.open(pose_profile)
                    t265_sensor.start(callback_pose)
                    logging.info("Started streaming Pose")

    rs_config.enable_stream(rs.stream.depth, config['depth']['width'],
                            config['depth']['height'], rs.format.z16,
                            config['depth']['framerate'])
    # other_stream, other_format = rs.stream.infrared, rs.format.y8
    rs_config.enable_stream(rs.stream.color, config['color']['width'],
                            config['color']['height'], rs.format.rgb8,
                            config['color']['framerate'])

    # Start streaming
    pipeline.start(rs_config)
    profile = pipeline.get_active_profile()

    depth_sensor = profile.get_device().first_depth_sensor()
    color_sensor = profile.get_device().first_color_sensor()

    depth_scale = depth_sensor.get_depth_scale()
    # depth_sensor.set_option(rs.option.global_time_enabled, 1.0)
    # color_sensor.set_option(rs.option.global_time_enabled, 1.0)

    if config['playback']['enabled']:
        dev = profile.get_device()
        playback = dev.as_playback()
        playback.set_real_time(False)

    # Processing blocks
    filters = []
    decimate = None
    align = rs.align(rs.stream.color)
    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)
    # Decimation
    if config.get("filters").get("decimation"):
        filt = config.get("filters").get("decimation")
        if filt.get('active', True):
            filt.pop('active', None)  # Remove active key before passing params
            decimate = rs.decimation_filter(**filt)

    # Spatial
    if config.get("filters").get("spatial"):
        filt = config.get("filters").get("spatial")
        if filt.get('active', True):
            filt.pop('active', None)  # Remove active key before passing params
            my_filter = rs.spatial_filter(**filt)
            filters.append(my_filter)

    # Temporal
    if config.get("filters").get("temporal"):
        filt = config.get("filters").get("temporal")
        if filt.get('active', True):
            filt.pop('active', None)  # Remove active key before passing params
            my_filter = rs.temporal_filter(**filt)
            filters.append(my_filter)

    process_modules = (align, depth_to_disparity, disparity_to_depth, decimate)

    intrinsics = get_intrinsics(pipeline, rs.stream.color)
    proj_mat = create_projection_matrix(intrinsics)

    sensor_meta = dict(depth_scale=depth_scale)
    config['sensor_meta'] = sensor_meta

    # Note that sensor must be saved so that it is not garbage collected
    t265_device = dict(pipeline=t265_pipeline, sensor=t265_sensor)

    return pipeline, process_modules, filters, proj_mat, t265_device
예제 #31
0
def get_pointcloud_frame(bagfile_dir, filter = False):

    # -------------------------------------------- #
    # Read the bag file and save point cloud to .ply file
    # n_img: number of matrix you want to save
    # interval: number of frames between saved matrix
    # -------------------------------------------- #

    input = bagfile_dir

    try:
        # Declare pointcloud object, for calculating pointclouds and texture mappings
        pc = rs.pointcloud()

        # We want the points object to be persistent so we can display the last cloud when a frame drops
        points = rs.points()
        
        # Create pipeline
        pipeline = rs.pipeline()

        # Create a config object
        config = rs.config()
        
        # Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
        rs.config.enable_device_from_file(config, input)
    
        # Configure the pipeline to stream the depth and color stream
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)

        # Declare filters
        dec_filter = rs.decimation_filter()   # Decimation - reduces depth frame density
        
        spat_filter = rs.spatial_filter()          # Spatial    - edge-preserving spatial smoothing
        
        temp_filter = rs.temporal_filter()    # Temporal   - reduces temporal noise

        fill_filter = rs.hole_filling_filter()  # Hole Filling filter

        disp_filter = rs.disparity_transform()  # Disparity Transform filter

        # Start streaming from file
        profile = pipeline.start(config)

        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: " , depth_scale)

        while(True):

            # Get frameset of depth
            frames = pipeline.wait_for_frames()

            # Get depth frame
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()

            if (filter):
                # Perform filtering
                filtered = dec_filter.process(depth_frame)

                filtered = spat_filter.process(filtered)

                filtered = fill_filter.process(filtered)

                depth_frame = temp_filter.process(filtered)

                color_frame = dec_filter.process(color_frame)
            
            # Tell pointcloud object to map to this color frame
            pc.map_to(color_frame)

            # Generate the pointcloud and texture mappings
            points = pc.calculate(depth_frame)
            
            # yield
            points_array = np.asanyarray(points.get_vertices())
            
            yield np.asanyarray(depth_frame.get_data()), points_array
                
    finally:
        pass
    return