コード例 #1
0
    def __init__(self, resolution=(640, 480), depth_framerate=6, 
                 color_framerate=30, include_depth=False, decimation_magnitude=1,
                 spatial_smooth=True, temporal_smooth=True):
        self.include_depth = include_depth
        self.pipeline = rs.pipeline()
        #Create a config and configure the pipeline to stream
        #  different resolutions of color and depth streams
        self.config = rs.config()
        w, h = resolution
        self.config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, color_framerate)
        if include_depth:
            self.config.enable_stream(rs.stream.depth, w, h, rs.format.z16, depth_framerate)

        # Start streaming
        self.profile = self.pipeline.start(self.config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        self.color_sensor = self.profile.get_device().first_color_sensor()
        if include_depth:
            self.depth_sensor = self.profile.get_device().first_depth_sensor()
            self.depth_scale = self.depth_sensor.get_depth_scale()

            # Create an align object
            # rs.align allows us to perform alignment of depth frames to others frames
            # The "align_to" is the stream type to which we plan to align depth frames.
            align_to = rs.stream.depth
            self.align = rs.align(align_to)

            # We'll want to store the intrinsics once we start getting images.
            self.aligned_depth_K = None

            self.filters = []
            # Decimation filter
            if decimation_magnitude > 1:
                decimate_filter = rs.decimation_filter()
                decimate_filter.set_option(rs.option.filter_magnitude, decimation_magnitude)
                self.filters.append(decimate_filter)
            
            if spatial_smooth or temporal_smooth:
                self.filters.append(rs.disparity_transform())
            if spatial_smooth:
                self.filters.append(rs.spatial_filter())
            if temporal_smooth:
                self.filters.append(rs.temporal_filter())
            if spatial_smooth or temporal_smooth:
                self.filters.append(rs.disparity_transform(False))

            # Pointcloud conversion utility.
            self.pc = rs.pointcloud()
            
            self.depth_profile = rs.video_stream_profile(self.profile.get_stream(rs.stream.depth))
            self.depth_intrinsics = self.depth_profile.get_intrinsics()
            self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height
コード例 #2
0
def start_camera_stream():
    # comment our when not testing in sim...

    global pipeline, config
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    pipeline.start(config)

    profile = pipeline.get_active_profile()
    image_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))
    image_intrinsics = image_profile.get_intrinsics()
    frame_w, frame_h = image_intrinsics.width, image_intrinsics.height
コード例 #3
0
    def get_frame(self, include_pointcloud=False, do_alignment=True):
        # Get frameset of color and depth
        frames = self.pipeline.wait_for_frames()

        if not self.include_depth:
            return np.asanyarray(frames.get_color_frame().get_data()), None, None
                
        # Align the depth frame to color frame
        if do_alignment:
            frames = self.align.process(frames)

        # Get aligned frames
        aligned_depth_frame = frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
        color_frame = frames.get_color_frame()

        # Apply filters
        for filter in self.filters:
            aligned_depth_frame = filter.process(aligned_depth_frame)

        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            logging.error("Invalid aligned or color frame.")
            return

        if self.aligned_depth_K is None:
            depth_intrinsics = rs.video_stream_profile(aligned_depth_frame.profile).get_intrinsics()
            self.aligned_depth_K = to_camera_matrix(depth_intrinsics)
            self.aligned_depth_inv = np.linalg.inv(self.aligned_depth_K)
            np.savetxt("d415_intrinsics.csv", self.aligned_depth_K)

        # Extract aligned depth frame intrinsics
        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        if include_pointcloud:
            points = self.pc.calculate(aligned_depth_frame)
            self.pc.map_to(aligned_depth_frame)
        else:
            points = None

        return color_image, depth_image, points
        
コード例 #4
0
ファイル: main.py プロジェクト: ETHANAK17/CS496finalProject
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
        layers = net.getLayerNames()
        output_layers = [
            layers[i[0] - 1] for i in net.getUnconnectedOutLayers()
        ]
        """
        iminitalize video from realsense
        """

        pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        config.enable_stream(rs.stream.accel)
        pipeline.start(config)
        profile = pipeline.get_active_profile()
        image_profile = rs.video_stream_profile(
            profile.get_stream(rs.stream.color))
        image_intrinsics = image_profile.get_intrinsics()
        frame_w, frame_h = image_intrinsics.width, image_intrinsics.height

        print('image: {} w  x {} h pixels'.format(frame_w, frame_h))

        colors = np.random.uniform(0, 255, size=(len(classes), 3))
        myColor = (20, 20, 230)

        # Now, look for target...
        ####################################################################################
        # Conduct Mission
        ####################################################################################

        logging.info("Searching for target...")
コード例 #5
0
state = AppState()

pipeline = rs.pipeline()
config = rs.config()

# Start streaming
pipeline.start(config)

# # xx = rs.stream(0).infrared
#
# yy = rs.composite_frame(profile.get_stream(rs.stream.depth))
#     # .get_infrared_frame(rs.stream(0).infrared)

# Get stream profile and camera intrinsics
profile = pipeline.get_active_profile()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height

# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2**state.decimate)
colorizer = rs.colorizer()


def mouse_cb(event, x, y, flags, param):

    if event == cv2.EVENT_LBUTTONDOWN:
        state.mouse_btns[0] = True