def poll_frames(self):
        """
        Poll for frames from the enabled Intel RealSense devices.
        If temporal post processing is enabled, the depth stream is averaged over a certain amount of frames

        Parameters:
        -----------

        """
        frames = {}
        for (serial, device) in self._enabled_devices.items():
            streams = device.pipeline_profile.get_streams()
            frameset = rs.composite_frame(rs.frame())
            device.pipeline.poll_for_frames(frameset)
            if frameset.size() == len(streams):
                frames[serial] = {}
                for stream in streams:
                    if (rs.stream.infrared == stream.stream_type()):
                        key_ = (stream.stream_type(), stream.stream_index())
                    else:
                        key_ = stream.stream_type()
                    frame = frameset.first_or_default(stream.stream_type())
                    frames[serial][key_] = frame

        return frames
    def poll_frames(self):
        """
        Poll for frames from the enabled Intel RealSense devices.
        If temporal post processing is enabled, the depth stream is averaged over a certain amount of frames

        Parameters:
        -----------

        """
        frames = {}
        for (serial, device) in self._enabled_devices.items():
            streams = device.pipeline_profile.get_streams()
            frameset = rs.composite_frame(rs.frame())
            device.pipeline.poll_for_frames(frameset)
            if frameset.size() == len(streams):
                frames[serial] = {}
                for stream in streams:
                    if (rs.stream.infrared == stream.stream_type()):
                        frame = frameset.get_infrared_frame(stream.stream_index())
                        key_ = (stream.stream_type(), stream.stream_index())
                    else:
                        frame = frameset.first_or_default(stream.stream_type())
                        key_ = stream.stream_type()
                    frames[serial][key_] = frame

        return frames
示例#3
0
    def getFrames(self):
        # frame_right = self.pipeline_right.wait_for_frames()
        # frame_left = self.pipeline_left.wait_for_frames()
        time.sleep(0.04)
        frame_left = rs.composite_frame(rs.frame())
        frame_right = rs.composite_frame(rs.frame())

        self.playback_right.resume()
        s_left = self.pipeline_right.poll_for_frames(frame_left)
        self.playback_right.pause()

        self.playback_left.resume()
        s_right = self.pipeline_left.poll_for_frames(frame_right)
        self.playback_left.pause()
        if not s_right or not s_left:
            [frame_left, frame_right] = self.getFrames()

        return [frame_left, frame_right]
示例#4
0
def expect(depth_frame=None, color_frame=None, nothing_else=False):
    """
    Looks at the syncer queue and gets the next frame from it if available, checking its contents
    against the expected frame numbers.
    """
    global syncer, playback_status
    f = syncer.poll_for_frame()
    if playback_status is not None:
        countdown = 50  # 5 seconds
        while not f and playback_status != rs.playback_status.stopped:
            countdown -= 1
            if countdown == 0:
                break
            time.sleep(0.1)
            f = syncer.poll_for_frame()
    # NOTE: f will never be None
    if not f:
        test.check(depth_frame is None, "expected a depth frame")
        test.check(color_frame is None, "expected a color frame")
        return False

    log.d("Got", f)

    fs = rs.composite_frame(f)

    if fs:
        depth = fs.get_depth_frame()
    else:
        depth = rs.depth_frame(f)
    test.info("actual depth", depth)
    test.check_equal(depth_frame is None, not depth)
    if depth_frame is not None and depth:
        test.check_equal(depth.get_frame_number(), depth_frame)

    if fs:
        color = fs.get_color_frame()
    elif not depth:
        color = rs.video_frame(f)
    else:
        color = None
    test.info("actual color", color)
    test.check_equal(color_frame is None, not color)
    if color_frame is not None and color:
        test.check_equal(color.get_frame_number(), color_frame)

    if nothing_else:
        f = syncer.poll_for_frame()
        test.info("Expected nothing else; actual", f)
        test.check(not f)

    return True
    def poll_frames(self):
        """
        Poll for frames from the enabled Intel RealSense devices.
        If temporal post processing is enabled, the depth stream is averaged over a certain amount of frames

        Parameters:
        -----------

        """
        frames = {}
        for (serial, device) in self._enabled_devices.items():
            streams = device.pipeline_profile.get_streams()
            frameset = rs.composite_frame(rs.frame())
            # pdb.set_trace()
            get_frame = device.pipeline.poll_for_frames(frameset)
            frameset = device.pipeline.wait_for_frames(1000)
            # pdb.set_trace()
            # frameset = rs.align(rs.stream.color).process(frameset)
            # print get_frame
            # print frameset.size()
            time.sleep(0.02)  # two camera can be 0.02, one camera is 0.08
            # time.sleep(0.08)
            # pdb.set_trace()
            if frameset.size() == len(streams):
                frames[serial] = {}
                # pdb.set_trace()
                for stream in streams:
                    if (rs.stream.infrared == stream.stream_type()):
                        frame = frameset.get_infrared_frame(
                            stream.stream_index())
                        key_ = (stream.stream_type(), stream.stream_index())
                    else:
                        frame = frameset.first_or_default(stream.stream_type())
                        key_ = stream.stream_type()
                    frames[serial][key_] = frame

        return frames
import pyrealsense2 as rs2
import time

p = rs2.pipeline()

p.start()

while True:
    frames = rs2.composite_frame(p.wait_for_frames())
    depth = rs2.depth_frame(frames.get_depth_frame())

    width = depth.get_height()
    height = depth.get_width()

    dist_to_center = depth.get_distance(int(width / 2), int(height / 2))

    print("The camera is facing an object " + str(dist_to_center) +
          "meters away")
    # time.sleep(.5)