Example #1
0
def set_pointcloud_variable():
    pc = rs.pointcloud()
    decimate = rs.decimation_filter()
    decimate.set_option(rs.option.filter_magnitude, 2**1)
    colorizer = rs.colorizer()

    return pc, decimate, colorizer
Example #2
0
 def filtering(self, frame):
     '''Filter setting'''
     # Decimation #
     decimation = rs.decimation_filter()
     decimation.set_option(rs.option.filter_magnitude, 1)
     # Spatial #
     spatial = rs.spatial_filter()
     # spatial.set_option(rs.option.filter_magnitude, 5)
     spatial.set_option(rs.option.filter_smooth_alpha, 0.6)
     spatial.set_option(rs.option.filter_smooth_delta, 8)
     # spatial.set_option(rs.option.holes_fill, 3)
     # Temporal #
     temporal = rs.temporal_filter()
     temporal.set_option(rs.option.filter_smooth_alpha, 0.5)
     temporal.set_option(rs.option.filter_smooth_delta, 20)
     # Hole #
     hole_filling = rs.hole_filling_filter()
     ##
     depth_to_disparity = rs.disparity_transform(True)
     disparity_to_depth = rs.disparity_transform(False)
     '''Appling filter'''
     frame = decimation.process(frame)
     frame = depth_to_disparity.process(frame)
     frame = spatial.process(frame)
     frame = temporal.process(frame)
     frame = disparity_to_depth.process(frame)
     frame = hole_filling.process(frame)
     return frame
    def prepare_filters(self):
        # prepare post-processing filters
        decimate = rs.decimation_filter()
        decimate.set_option(rs.option.filter_magnitude, 2 ** 3)
        spatial = rs.spatial_filter()
        spatial.set_option(rs.option.filter_magnitude, 5)
        spatial.set_option(rs.option.filter_smooth_alpha, 1)
        spatial.set_option(rs.option.filter_smooth_delta, 50)
        spatial.set_option(rs.option.holes_fill, 3)

        colorizer = rs.colorizer()
        self.filters = [rs.disparity_transform(),
                        rs.decimation_filter(),
                        rs.spatial_filter(),
                        rs.temporal_filter(),
                        rs.disparity_transform(False)]
Example #4
0
    def __init__(self, filters=[]):
        """
        Connect to RealSense and initialize filters
        :param filters: [String, ...], default=[]: '' TODO list filters
        """
        self.pipe = rs.pipeline()
        cfg = rs.config()
        profile = self.pipe.start(cfg)
        # camera parameters
        self.depth_scale = profile.get_device().first_depth_sensor(
        ).get_depth_scale()

        # filters to apply to depth images
        self.filters = filters
        if 'align' in self.filters:
            self.align = rs.align(rs.stream.color)
        if 'decimation' in self.filters:
            self.decimation = rs.decimation_filter()
            self.decimation.set_option(rs.option.filter_magnitude, 4)
        if 'spatial' in self.filters:
            self.spatial = rs.spatial_filter()
            # self.spatial.set_option(rs.option.holes_fill, 3)
            self.spatial.set_option(rs.option.filter_magnitude, 5)
            self.spatial.set_option(rs.option.filter_smooth_alpha, 1)
            self.spatial.set_option(rs.option.filter_smooth_delta, 50)
        if 'temporal' in self.filters:
            # TODO
            self.temporal = rs.temporal_filter()
            self.temporal_iters = 3
        if 'hole_filling' in self.filters:
            self.hole_filling = rs.hole_filling_filter()
        if 'colorize' in self.filters:
            self.colorizer = rs.colorizer()
Example #5
0
    def __init__(self, parameters, neural):
        self.parameters = parameters
        self.neural = neural

        self.stream_frame = None
        config = rs.config()
        self.pipeline = rs.pipeline()
        config.enable_stream(rs.stream.depth, self.parameters.depth_width,
                             self.parameters.depth_height, rs.format.z16,
                             self.parameters.depth_fps)
        config.enable_stream(rs.stream.color, self.parameters.color_width,
                             self.parameters.color_height, rs.format.rgb8,
                             self.parameters.color_fps)

        profile = self.pipeline.start(config)
        self.depth_sensor = profile.get_device().first_depth_sensor(
        ).get_depth_scale()
        self.align_stream = rs.align(rs.stream.color)
        self.decimation = rs.decimation_filter()
        self.hole_filling = rs.hole_filling_filter()
        self.spatial = rs.spatial_filter()

        self.neural = neural
        print("camera loop has stared")

        threading.Thread.__init__(self)
Example #6
0
    def __init__(self, color, ip, port, topic, interval):
        Publisher.__init__(self, ip, port, topic)
        self.interval = interval
        self.lastUpdate = self.millis(self)

        pipe = rs.pipeline()
        cfg = rs.config()
        cfg.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        cfg.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

        profile = pipe.start(cfg)
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)

        self.running = True
        self.color = color

        zero_vec = (0.0, 0.0, 0.0)
        self.depth_frame = zero_vec
        self.color_frame = zero_vec
        self.depth_colormap = zero_vec

        # Processing blocks
        self.pc = rs.pointcloud()
        self.decimate = rs.decimation_filter()
        #self.decimate.set_option(rs.option.filter_magnitude, 2 ** self.decimate)
        self.colorizer = rs.colorizer()
Example #7
0
    def __init__(self, address):
        asyncore.dispatcher.__init__(self)
        print("Launching Realsense Camera Server")
        self.pipeline, self.sensor = open_pipeline()
        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
        print('sending acknowledgement to', address)

        # reduce the resolution of the depth image using post processing
        self.decimate_filter = rs.decimation_filter()
        self.decimate_filter.set_option(rs.option.filter_magnitude, 2)
        self.frame_data = ''
        self.frame_length = 0
        self.connect((MC_IP_ADDRESS, PORT))
        self.packet_id = 0

        self.remaining_bytes = 0
        self.buffer = bytearray()

        self.color_image = np.array([])
        self.depth_image = np.array([])
        self.depth_scale = 0
        self.aligned_frames = []
        self.intrinsics = rs.intrinsics()

        # yolov3 detection result
        boxes = dict()
        boxes['detection_classes'] = []
        boxes['detection_boxes'] = []
        boxes['detection_scores'] = []
        boxes['bgr_img'] = []
        self.boxes = boxes
Example #8
0
    def __init__(self, w=640, h=480, clipping_dist_meters=1):
        self.w, self.h = w, h
        # Create a pipeline
        self.pipeline = rs.pipeline()

        # Create a config and configure the pipeline to stream
        config = rs.config()
        config.enable_stream(rs.stream.depth, w, h, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, 30)
        # Start streaming
        profile = self.pipeline.start(config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        depth_sensor = profile.get_device().first_depth_sensor()
        self.depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: ", self.depth_scale)

        # We will be removing the background of objects more than
        #  clipping_distance_in_meters meters away
        self.clipping_distance = clipping_dist_meters / self.depth_scale

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        self.align = rs.align(align_to)

        #set up depth post-processing filters
        self.decimation_filter = rs.decimation_filter()  #default is 2
        self.spatial_filter = rs.spatial_filter(smooth_alpha=.6,
                                                smooth_delta=20,
                                                magnitude=2,
                                                hole_fill=0)
        self.hole_fill_filter = rs.hole_filling_filter(
        )  #default is fill according to neighboring pixel farthest from sensor
Example #9
0
def map_func():
    pipe_list, DEPTH_SCALE = get_pipes()
    print("{} pipes created!".format(len(pipe_list)))
    map_point_cloud = np.zeros((1, 3), np.float32)

    decimate = rs.decimation_filter()

    while True:
        frameset = get_frames(
            pipe_list,
            decimate)  #get the depth and pose frames from the camera stream
        points, current_pos = translate_frame_to_points(frameset, DEPTH_SCALE)

        print("points_shape: ", points.shape)
        if map_point_cloud.shape[0] >= MAX_POINT_COUNT:
            print("They don't wanna see me in the ends")
            map_point_cloud = map_point_cloud[(points.shape[0] - 1):-1]

        map_point_cloud = np.append(
            map_point_cloud, points,
            axis=0)  #append the points to the overall map

        portrait = scan_portrait(map_point_cloud, current_pos)

        cv2.imshow("circumstances", portrait)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
Example #10
0
def computeBackground(options, pipeline, align):
    count = 0
    minVal = np.full(
        (constants.FRAME_DECIMATED_HEIGHT, constants.FRAME_DECIMATED_WIDTH),
        constants.MAX_FLOAT32,
        dtype=np.float32)
    decimation = rs.decimation_filter()
    decimation.set_option(rs.option.filter_magnitude, 4)

    temporal = rs.temporal_filter()
    while count < 60:
        frames = pipeline.wait_for_frames()
        alignedFrames = align.process(frames)
        depth = alignedFrames.get_depth_frame()
        #        depth = frames.get_depth_frame()

        filtered_depth = decimation.process(depth)
        #        filtered_depth = temporal.process(filtered_depth)
        #        filtered_depth = depth

        d = np.asanyarray(filtered_depth.get_data()).astype(np.float32)
        print d.shape, d.dtype
        zeros = d.size - np.count_nonzero(d)
        print('Input:zeros:' + str(zeros) + ' total:' + str(d.size))
        d[d == 0.] = constants.MAX_FLOAT32
        minVal = np.minimum(minVal, d)
        print('Minval: zeros:' +
              str(minVal[minVal == constants.MAX_FLOAT32].size) + ' total:' +
              str(minVal.size))
        count = count + 1

    return inpaint(options, minVal)
Example #11
0
    def on(self):
        # Create a pipeline
        self.pipeline = rs.pipeline()

        #Create a config and configure the pipeline to stream
        config = rs.config()
        config.enable_stream(rs.stream.depth, self.arg["width"],
                             self.arg["height"], rs.format.z16,
                             self.arg["fps"])
        config.enable_stream(rs.stream.infrared, 1, self.arg["width"],
                             self.arg["height"], rs.format.y8, self.arg["fps"])
        config.enable_stream(rs.stream.color, self.arg["width"],
                             self.arg["height"], rs.format.bgr8,
                             self.arg["fps"])
        profile = self.pipeline.start(config)

        # apply advanced mode
        dev = profile.get_device()
        advnc_mode = rs.rs400_advanced_mode(dev)
        json_obj = json.load(open(self.arg["preset_path"]))
        json_string = str(json_obj).replace("'", '\"')
        advnc_mode.load_json(json_string)

        # decimate
        decimate = rs.decimation_filter()
        decimate.set_option(rs.option.filter_magnitude,
                            2**self.arg["decimate_scale"])
Example #12
0
    def __init__(self):

        ctx = rs.context()
        self.devices = ctx.query_devices()
        self.configs = list()
        self.filters = list()
        for device in self.devices:
            config = rs.config()
            config.enable_device(device.get_info(rs.camera_info.serial_number))
            config.enable_stream(rs.stream.depth, IMG_WIDTH, IMG_HEIGHT,
                                 rs.format.z16, 30)
            config.enable_stream(rs.stream.color, IMG_WIDTH, IMG_HEIGHT,
                                 rs.format.bgr8, 30)
            self.configs.append(config)
            align = rs.align(rs.stream.color)
            spatial = rs.spatial_filter()
            spatial.set_option(rs.option.filter_magnitude, 5)
            spatial.set_option(rs.option.filter_smooth_alpha, 1)
            spatial.set_option(rs.option.filter_smooth_delta, 50)
            spatial.set_option(ts.option.holes_fill, 3)
            temporal = rs.temporal_filter()
            hole_filling = rs.hole_filling_filter()
            depth_to_disparity = rs.disparity_transform(True)
            disparity_to_depth = rs.disparity_transform(False)
            decimate = rs.decimation_filter()
            self.filters.append({
                'align': align,
                'spatial': spatial,
                'temporal': temporal,
                'hole': hole_filling,
                'disparity': depth_to_disparity,
                'depth': disparity_to_depth,
                'decimate': decimate
            })
Example #13
0
def main():
    cam_pipes, depth_scale = c_man.get_pipes()
    #print("{} pipes created!".format(len(cam_pipes)))

    dec_filter = rs.decimation_filter()
    dep_scan = im_scan.scan_portait()

    traj_recv = Trajectory_Reciever()
    stay_go_sender = Publisher(3500)

    while True:
        frameset = c_man.get_frames(cam_pipes, dec_filter)
        scan_mat = dep_scan.get_portrait_from_frames(frameset, depth_scale)

        #dep_scan.conv_reduce()
        _, trans_position = dep_scan.get_pose()
        xz_pos = np.delete(trans_position, 1, axis=0)

        x_vel, y_vel = traj_recv.get_trajectory()
        control_vec = dep_scan.vector_from_vel(x_vel, y_vel)

        print("!dir_vec: ", control_vec)
        soft_danger, rgb_mat = real_oord_test(dep_scan,
                                              dir_vec=control_vec,
                                              position=xz_pos)

        stay_go_sender.send({"soft_danger": soft_danger})
        print("-------------------------pass_fail: ", soft_danger)
        cv2.imshow("circumstances", rgb_mat)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
Example #14
0
    def _filter_depth_frame(depth_frame):
        """
		滤波器,用于获取坐标前的深度图像处理
		:param depth_frame: 深度帧
		:return: 滤波后的深度帧
		"""
        dec = rs.decimation_filter()
        dec.set_option(rs.option.filter_magnitude, 1)
        depth_frame_pro = dec.process(depth_frame)

        depth2disparity = rs.disparity_transform()
        depth_frame_pro = depth2disparity.process(depth_frame_pro)

        spat = rs.spatial_filter()
        # 启用空洞填充,5为填充所有零像素
        spat.set_option(rs.option.holes_fill, 5)
        depth_frame_pro = spat.process(depth_frame_pro)

        temp = rs.temporal_filter()
        depth_frame_pro = temp.process(depth_frame_pro)

        disparity2depth = rs.disparity_transform(False)
        depth_frame_pro = disparity2depth.process(depth_frame_pro)

        # depth_image_pro = np.asanyarray(depth_frame_pro.get_data())
        # depth_colormap_pro = cv2.applyColorMap(cv2.convertScaleAbs(depth_image_pro, alpha=0.03), cv2.COLORMAP_JET)

        return depth_frame_pro
Example #15
0
    def __init__(self, pipeline):
        self.pipe = pipeline
        self.scale = 1
        self.w = 640
        self.h = 480
        self.pc = None
        # Processing blocks
        self.pc = rs.pointcloud()
        self.decimate = rs.decimation_filter()
        self.decimate.set_option(rs.option.filter_magnitude, 2**state.decimate)
        self.colorizer = rs.colorizer()
        self.state = state  # General ImageHandler model state
        self.out = out  # General Image handler output if not changed

        # Data from camera
        self.image = None
        self.verts = None

        # Setup camera
        try:
            # Get stream profile and camera intrinsics
            profile = pipeline.get_active_profile()
            depth_profile = rs.video_stream_profile(
                profile.get_stream(rs.stream.depth))
            self.depth_intrinsics = depth_profile.get_intrinsics()
            self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height
            depth_sensor = profile.get_device().first_depth_sensor()
            self.scale = depth_sensor.get_depth_scale()
        except RuntimeError as err:
            print(err)
Example #16
0
    def __init__(self, w=640, h=480, depth=True, frame_rate=30):
        '''
        Initializing the Python RealSense Control Flow:
        w: Int (default = 640, can also be 1280) 
        h: Int (default = 480, can also be 720)
        depth: Bool (default = True)
        frame_rate: Int (default = 30)

        RGB and Depth formats are: bgr8, z16

        Note: In this class, variables should not be directly changed.
        '''
        self.width = w
        self.height = h
        self.depth_on = depth
        self._pipeline = rs.pipeline()
        self._config = rs.config()
        self._config.enable_stream(rs.stream.color, w, h, rs.format.bgr8,
                                   frame_rate)
        self._intrinsic = None

        if depth:
            self.align = rs.align(rs.stream.color)
            self._preset = 0
            # Presets (for D415):
            # 0: Custom
            # 1: Default
            # 2: Hand
            # 3: High Accuracy
            # 4: High Density
            # 5: Medium Density

            # depth interpolation
            self.interpolation = cv2.INTER_NEAREST  # use nearest neighbor
            # self.interpolation = cv2.INTER_LINEAR  # linear
            # self.interpolation = cv2.INTER_CUBIC  # cubic

            # beautify depth image for viewing
            self._config.enable_stream(rs.stream.depth, w, h, rs.format.z16,
                                       frame_rate)
            self.colorizer = rs.colorizer()

            # initialize filters
            self.decimation = rs.decimation_filter()
            self.decimation.set_option(rs.option.filter_magnitude, 4)

            self.depth_to_disparity = rs.disparity_transform(True)

            self.spatial = rs.spatial_filter()
            self.spatial.set_option(rs.option.filter_magnitude, 5)
            self.spatial.set_option(rs.option.filter_smooth_alpha, 0.5)
            self.spatial.set_option(rs.option.filter_smooth_delta, 20)

            self.temporal = rs.temporal_filter()

            self.disparity_to_depth = rs.disparity_transform(False)

        print(
            "Initialized RealSense Camera\nw: {}, h: {}, depth: {}, frame_rate: {}"
            .format(w, h, depth, frame_rate))
Example #17
0
def post_process_depth_frame(depth_frame,
                             decimation_magnitude=1.0,
                             spatial_magnitude=2.0,
                             spatial_smooth_alpha=0.5,
                             spatial_smooth_delta=20,
                             temporal_smooth_alpha=0.4,
                             temporal_smooth_delta=20):

    assert (depth_frame.is_depth_frame())

    # Available filters and control options for the filters
    decimation_filter = rs.decimation_filter()
    spatial_filter = rs.spatial_filter()
    temporal_filter = rs.temporal_filter()

    filter_magnitude = rs.option.filter_magnitude
    filter_smooth_alpha = rs.option.filter_smooth_alpha
    filter_smooth_delta = rs.option.filter_smooth_delta

    # Apply the control parameters for the filter
    decimation_filter.set_option(filter_magnitude, decimation_magnitude)
    spatial_filter.set_option(filter_magnitude, spatial_magnitude)
    spatial_filter.set_option(filter_smooth_alpha, spatial_smooth_alpha)
    spatial_filter.set_option(filter_smooth_delta, spatial_smooth_delta)
    temporal_filter.set_option(filter_smooth_alpha, temporal_smooth_alpha)
    temporal_filter.set_option(filter_smooth_delta, temporal_smooth_delta)

    # Apply the filters
    filtered_frame = decimation_filter.process(depth_frame)
    filtered_frame = spatial_filter.process(filtered_frame)
    filtered_frame = temporal_filter.process(filtered_frame)

    return filtered_frame
Example #18
0
def post_process_depth_frame(depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0, spatial_smooth_alpha=0.5,
                             spatial_smooth_delta=20, temporal_smooth_alpha=0.4, temporal_smooth_delta=20):
    """ 
    RS에서 획득한 깊이 프레임 전처리 필터링 하는 함수
        Return:
            filtered_frame : rs.frame()
    """

    # Post processing possible only on the depth_frame
    assert (depth_frame.is_depth_frame())

    # Available filters and control options for the filters
    decimation_filter = rs.decimation_filter()
    spatial_filter = rs.spatial_filter()
    temporal_filter = rs.temporal_filter()

    filter_magnitude = rs.option.filter_magnitude
    filter_smooth_alpha = rs.option.filter_smooth_alpha
    filter_smooth_delta = rs.option.filter_smooth_delta

    # Apply the control parameters for the filter
    decimation_filter.set_option(filter_magnitude, decimation_magnitude)
    spatial_filter.set_option(filter_magnitude, spatial_magnitude)
    spatial_filter.set_option(filter_smooth_alpha, spatial_smooth_alpha)
    spatial_filter.set_option(filter_smooth_delta, spatial_smooth_delta)
    temporal_filter.set_option(filter_smooth_alpha, temporal_smooth_alpha)
    temporal_filter.set_option(filter_smooth_delta, temporal_smooth_delta)

    # Apply the filters
    filtered_frame = decimation_filter.process(depth_frame)
    filtered_frame = spatial_filter.process(filtered_frame)
    filtered_frame = temporal_filter.process(filtered_frame)

    return filtered_frame
Example #19
0
def get_depth_filter_list(decimate=True,
                          d2d=True,
                          spatial=True,
                          temporal=True):
    filters = []
    if decimate:
        dec_filt = rs.decimation_filter()
        dec_filt.set_option(rs.option.filter_magnitude, 2)
        filters.append(dec_filt)

    if d2d:
        depth2disparity = rs.disparity_transform()
        filters.append(depth2disparity)

    if spatial:
        spat = rs.spatial_filter()
        spat.set_option(rs.option.holes_fill, FILL_ALL_ZERO_PIXELS)
        filters.append(spat)

    if temporal:
        temp = rs.temporal_filter()
        filters.append(temp)

    if d2d:
        disparity2depth = rs.disparity_transform(False)
        filters.append(disparity2depth)

    return filters
def post_process_depth_frame(depth_frame,
                             decimation_magnitude=1.0,
                             spatial_magnitude=2.0,
                             spatial_smooth_alpha=0.5,
                             spatial_smooth_delta=20,
                             temporal_smooth_alpha=0.4,
                             temporal_smooth_delta=20):
    """
    Filter the depth frame acquired using the Intel RealSense device

    Parameters:
    -----------
    depth_frame 	 	 	 : rs.frame()
                               The depth frame to be post-processed
    decimation_magnitude : double
                              The magnitude of the decimation filter
    spatial_magnitude 	 : double
                            The magnitude of the spatial filter
    spatial_smooth_alpha	 : double
                            The alpha value for spatial filter based smoothening
    spatial_smooth_delta	 : double
                            The delta value for spatial filter based smoothening
    temporal_smooth_alpha : double
                            The alpha value for temporal filter based smoothening
    temporal_smooth_delta : double
                            The delta value for temporal filter based smoothening


    Return:
    ----------
    filtered_frame : rs.frame()
                       The post-processed depth frame
    """

    # Post processing possible only on the depth_frame
    assert (depth_frame.is_depth_frame())

    # Available filters and control options for the filters
    decimation_filter = rs.decimation_filter()
    spatial_filter = rs.spatial_filter()
    temporal_filter = rs.temporal_filter()

    filter_magnitude = rs.option.filter_magnitude
    filter_smooth_alpha = rs.option.filter_smooth_alpha
    filter_smooth_delta = rs.option.filter_smooth_delta

    # Apply the control parameters for the filter
    decimation_filter.set_option(filter_magnitude, decimation_magnitude)
    spatial_filter.set_option(filter_magnitude, spatial_magnitude)
    spatial_filter.set_option(filter_smooth_alpha, spatial_smooth_alpha)
    spatial_filter.set_option(filter_smooth_delta, spatial_smooth_delta)
    temporal_filter.set_option(filter_smooth_alpha, temporal_smooth_alpha)
    temporal_filter.set_option(filter_smooth_delta, temporal_smooth_delta)

    # Apply the filters
    filtered_frame = decimation_filter.process(depth_frame)
    filtered_frame = spatial_filter.process(filtered_frame)
    filtered_frame = temporal_filter.process(filtered_frame)

    return filtered_frame
Example #21
0
    def decimation(self, frame):
        #apply decimation filter to the frame that is sent in. Downscaling
        #must call depthImageCV2 to display

        decimation = rs.decimation_filter()
        decimation.set_option(rs.option.filter_magnitude, 2)
        decimated_depth = decimation.process(frame)
        return decimated_depth
Example #22
0
 def __init__(self):
     self.filters = [
         rs.decimation_filter(RESCALE),
         rs.disparity_transform(True),
         rs.hole_filling_filter(1),
         rs.spatial_filter(0.5, 8, 2, 2),
         rs.temporal_filter(0.5, 20, 1),
         rs.disparity_transform(False)
     ]
Example #23
0
def mainSegment(options = None):
    pipeline, depthScale, align = initCamera()
    print '--------------------------------------------'
    print options
    counter = 0
    if constants.PROFILE_ON:
        pr  = cProfile.Profile() #profile
        pr.enable() #profile
    try:
        warmUp(pipeline)
        minVal = computeBackground(options, pipeline, align)
        net = bp.newNet()
        t0 =  time.time()
        counterOld = counter
        decimation = rs.decimation_filter()
        decimation.set_option(rs.option.filter_magnitude,
                              constants.DECIMATION_FACTOR)
        temporal = rs.temporal_filter(0.5, 20, 5)#valid 1 of last 2
        while True:
            counter = counter + 1
            frames = pipeline.wait_for_frames()
#            alignedFrames = align.process(frames)
#            depth = alignedFrames.get_depth_frame()
            depth = frames.get_depth_frame()
            filtered_depth = decimation.process(depth)
            filtered_depth = temporal.process(filtered_depth)
            # intrinsics already reflects decimation
            intrinsics = filtered_depth.profile.as_video_stream_profile().intrinsics

#            filtered_depth = depth
#            if counter % 2 == 1:
                #help the temporal filter since we cannot process full rate
#                continue
            d = np.asanyarray(filtered_depth.get_data()).astype(np.float32)
            alpha = subtraction(options, minVal, d)
            if (options and options.get('display')):
                cv2.imshow('Contour', alpha)
            if constants.PROFILE_ON:
                result = pr.runcall(bp.process, options, net, d, alpha,
                                    depthScale, intrinsics) #profile
            else:
                result = bp.process(options, net, d, alpha, depthScale,
                                    intrinsics)
            if (options and options.get('display')):
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            if counter % 10 == 0:
                t1 = time.time()
                print '{:.3f} images/sec'.format((counter - counterOld)/(t1-t0))
                t0 = t1
                counterOld = counter
            yield json.dumps(result)
    finally:
        if constants.PROFILE_ON:
            pr.disable()
            pr.print_stats()#profile
        pipeline.stop()
def post_process_depth_frame(depth_frame, decimation_magnitude=1.0, spatial_magnitude=2.0, spatial_smooth_alpha=0.5,
                             spatial_smooth_delta=20, temporal_smooth_alpha=0.4, temporal_smooth_delta=20):
    """
    Filter the depth frame acquired using the Intel RealSense device

    Parameters:
    -----------
    depth_frame 	 	 	 : rs.frame()
                               The depth frame to be post-processed
    decimation_magnitude : double
                              The magnitude of the decimation filter
    spatial_magnitude 	 : double
                            The magnitude of the spatial filter
    spatial_smooth_alpha	 : double
                            The alpha value for spatial filter based smoothening
    spatial_smooth_delta	 : double
                            The delta value for spatial filter based smoothening
    temporal_smooth_alpha : double
                            The alpha value for temporal filter based smoothening
    temporal_smooth_delta : double
                            The delta value for temporal filter based smoothening


    Return:
    ----------
    filtered_frame : rs.frame()
                       The post-processed depth frame
    """

    # Post processing possible only on the depth_frame
    assert (depth_frame.is_depth_frame())

    # Available filters and control options for the filters
    decimation_filter = rs.decimation_filter()
    spatial_filter = rs.spatial_filter()
    temporal_filter = rs.temporal_filter()

    filter_magnitude = rs.option.filter_magnitude
    filter_smooth_alpha = rs.option.filter_smooth_alpha
    filter_smooth_delta = rs.option.filter_smooth_delta

    # Apply the control parameters for the filter
    decimation_filter.set_option(filter_magnitude, decimation_magnitude)
    spatial_filter.set_option(filter_magnitude, spatial_magnitude)
    spatial_filter.set_option(filter_smooth_alpha, spatial_smooth_alpha)
    spatial_filter.set_option(filter_smooth_delta, spatial_smooth_delta)
    temporal_filter.set_option(filter_smooth_alpha, temporal_smooth_alpha)
    temporal_filter.set_option(filter_smooth_delta, temporal_smooth_delta)

    # Apply the filters
    filtered_frame = decimation_filter.process(depth_frame)
    filtered_frame = spatial_filter.process(filtered_frame)
    filtered_frame = temporal_filter.process(filtered_frame)

    return filtered_frame
Example #25
0
 def preprocess_depth_frame(self, depth_frame):
     decimation = rs.decimation_filter()
     depth_image = decimation.process(depth_frame)
     spatial = rs.spatial_filter()
     #filtered_depth = spatial.process(depth_frame)
     spatial.set_option(rs.option.filter_magnitude, 5)
     spatial.set_option(rs.option.filter_smooth_alpha, 1)
     spatial.set_option(rs.option.filter_smooth_delta, 50)
     spatial.set_option(rs.option.holes_fill, 3)
     depth_frame = spatial.process(depth_frame)
     return depth_frame
    def __init__(self, profile):
        depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
        self.depth_intrinsics = depth_profile.get_intrinsics()
        self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height

        self.state = PointCloudState()
        self.out = np.empty((self.h, self.w, 3), dtype=np.uint8)

        self.pc = rs.pointcloud()
        self.decimate = rs.decimation_filter()
        self.decimate.set_option(rs.option.filter_magnitude, 2 ** self.state.decimate)
Example #27
0
 def __init__(self):
     # Configure depth and color streams
     self.__pipeline__ = rs.pipeline()
     self.__config__ = rs.config()
     self.__config__.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
     self.__config__.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
     self.__profile__ = None
     self.__depth_profile__ = None
     self.__depth_intrinsics__ = None
     self.__decimate__ = rs.decimation_filter()
     self.__colorizer__ = rs.colorizer()
def GetStandardDeviationsFromBag(bag_file_path, frame_index_difference = 10, do_analysis_every_n_frames = 1, bag_timeout_ms = 500, filter=False):

    try:
        pipeline = rs.pipeline()
        config = rs.config()
        rs.config.enable_device_from_file(config, bag_file_path, repeat_playback=False)
        profile = pipeline.start(config).get_device().as_playback().set_real_time(False)
        
        depth_frames_deque = deque()
        SDs = []
        FNs = []
        all_frame_numbers = []
        frames_since_last_analysis = 0

        if filter:
            spatial = rs.spatial_filter()
            decimation = rs.decimation_filter()
            hole_filling = rs.hole_filling_filter()
            hole_filling.set_option(rs.option.holes_fill, 2)

        while True: 
            frames = pipeline.wait_for_frames(timeout_ms=bag_timeout_ms)
            fn = frames.frame_number
            all_frame_numbers += [fn]
            frames_since_last_analysis += 1
            cur_depth_frame = frames.get_depth_frame()

            if filter:
                cur_depth_frame = decimation.process(cur_depth_frame)
                cur_depth_frame = spatial.process(cur_depth_frame)
                cur_depth_frame = hole_filling.process(cur_depth_frame)

            depth_frames_deque.append(cur_depth_frame)

            if len(depth_frames_deque) > frame_index_difference:
                cur_depth_image = np.asanyarray(cur_depth_frame.get_data())
                past_depth_image = np.asanyarray(depth_frames_deque.popleft().get_data())
                if frames_since_last_analysis >= do_analysis_every_n_frames:
                    SDs += [calculateSD(cur_depth_image,past_depth_image)]
                    FNs += [fn]
                    frames_since_last_analysis = 0
    except Exception as e:
        print(e)
        if "arrive" in str(e):
            pipeline.stop()
            del(pipeline)
            del(profile)
            return np.array(all_frame_numbers),np.array(FNs),np.array(SDs)
        else:
            raise(e)
        pass
    finally:
        return np.array(all_frame_numbers),np.array(FNs),np.array(SDs)
Example #29
0
    def takeDepth(self, showMillis=0) -> bool:

        if not self.streaming:
            if self.startStream():
                for i in range(self.numReads
                               ):  # let cam adapt to current light situation
                    frames = self.handle.wait_for_frames()
                    _ = frames.get_depth_frame()
                    time.sleep(0.1)
            else:
                config.log(f"D415 startStream failed")
                return False

        decimate = rs.decimation_filter()
        decimate.set_option(rs.option.filter_magnitude, 3)

        frames = self.handle.wait_for_frames()

        depthFrame = frames.get_depth_frame()
        if depthFrame is None:
            config.log(f"could not acquire depth from D415")
            return False

        else:
            decimated = decimate.process(
                depthFrame)  # make 428x240x3 from 1280x720x3

            # Grab new intrinsics (may be changed by decimation)
            # depth_intrinsics = rs.video_stream_profile(depth_frame.profile).get_intrinsics()
            # w, h = depth_intrinsics.width, depth_intrinsics.height
            pointCloud = self.pc.calculate(decimated)

            # Pointcloud data to arrays
            verts = pointCloud.get_vertices()
            self.depth = np.asanyarray(verts).view(np.float32).reshape(
                -1, 3)  # xzy

            # Colorize depth frame to jet colormap
            depth_color_frame = self.colorizer.colorize(depthFrame)

            # Convert depth_frame to numpy array to render image in opencv
            self.depthColored = np.asanyarray(depth_color_frame.get_data())

            #depthImage = np.asanyarray(depthFrame.get_data())
            #depthColored = cv2.applyColorMap(cv2.convertScaleAbs(depthImage), cv2.COLORMAP_MAGMA) #.COLORMAP_RAINBOW)
            if showMillis > 0:
                cv2.imshow(f"D415 depth", self.depthColored)
                cv2.waitKey(showMillis)
                cv2.destroyAllWindows()

            config.log(f"D415 depth points available")
            return True
Example #30
0
 def __init__(self,
              pipeline,
              streamertype=STREAM_RGB,
              pc=None,
              colorizer=None):
     self.pipeline = pipeline
     self.streamtype = streamertype
     self.decimate_filter = rs.decimation_filter()
     decimate = 1
     self.decimate_filter.set_option(rs.option.filter_magnitude,
                                     2**decimate)
     self.colorizer = colorizer
     self.pc = pc
     self.frame_data = ''
Example #31
0
    def init_video_capture(self):
        try:
            # Create a pipeline
            self.pipeline = rs.pipeline()

            # Create a config and configure the pipeline to stream
            #  different resolutions of color and depth streams
            config = rs.config()
            config.enable_stream(rs.stream.depth, DEPTH_RES_X, DEPTH_RES_Y,
                                 rs.format.z16, DEPTH_FPS)
            config.enable_stream(rs.stream.color, RGB_RES_X, RGB_RES_Y,
                                 rs.format.bgr8, RGB_FPS)
            # config.enable_stream(rs.stream.infrared, 1, DEPTH_RES_X, DEPTH_RES_Y, rs.format.y8, DEPTH_FPS)
            # config.enable_stream(rs.stream.infrared, 2, DEPTH_RES_X, DEPTH_RES_Y, rs.format.y8, DEPTH_FPS)

            # Start streaming
            profile = self.pipeline.start(config)

            # Getting the depth sensor's depth scale (see rs-align example for explanation)
            depth_sensor = profile.get_device().first_depth_sensor()
            self.depth_scale = depth_sensor.get_depth_scale()
            # print("[RealSense D435]: Depth scale", self.depth_scale)

            # TODO: Allow settings to be changed on initializing the function
            depth_sensor.set_option(rs.option.laser_power, 360)  # 0 - 360
            depth_sensor.set_option(
                rs.option.depth_units,
                0.001)  # Number of meters represented by a single depth unit

            # Create an align object
            # rs.align allows us to perform alignment of depth frames to others frames
            # The "align_to" is the stream type to which we plan to align depth frames.
            align_to = rs.stream.color
            self.align = rs.align(align_to)

            self.hole_filling_filter = rs.hole_filling_filter()
            self.decimation_filter = rs.decimation_filter()
            self.temporal_filter = rs.temporal_filter()
        except Exception as e:
            print('[RealsenseD435Camera]: ERROR:', e, file=sys.stderr)
            print(
                '[RealsenseD435Camera]: Could not initialize camera. If the resource is busy, check if any other script '
                'is currently accessing the camera. If this is not the case, replug the camera and try again.',
                file=sys.stderr)
            sys.exit(0)

        self.init_colorizer()

        self.started = False
        self.read_lock = threading.Lock()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
pipeline.start(config)

# Get stream profile and camera intrinsics
profile = pipeline.get_active_profile()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height

# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
colorizer = rs.colorizer()


def mouse_cb(event, x, y, flags, param):

    if event == cv2.EVENT_LBUTTONDOWN:
        state.mouse_btns[0] = True

    if event == cv2.EVENT_LBUTTONUP:
        state.mouse_btns[0] = False

    if event == cv2.EVENT_RBUTTONDOWN:
        state.mouse_btns[1] = True