def convert_bag_to_mp4(file_name,
                       fps=30,
                       width=1280,
                       height=720,
                       output_file_name='output.mp4'):
    '''
    generate mp4 file from a bag file
    '''
    try:
        pipeline = rs.pipeline()
        config = rs.config()
        rs.config.enable_device_from_file(config,
                                          file_name,
                                          repeat_playback=False)
        prof = pipeline.start(config)
        device = prof.get_device().as_playback()
        device.set_real_time(False)

        #set config for recorded realsense data
        config.enable_stream(rs.stream.depth, width, height, rs.format.z16,
                             fps)
        config.enable_stream(rs.stream.color, width, height, rs.format.rgb8,
                             fps)
        playback = rs.playback(device)
        i = 0

        # Make instance for VideoWriter
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        writer = cv2.VideoWriter(output_file_name, fourcc, fps,
                                 (width, height))
        print("Opening bag file: {}".format(file_name))
        while True:
            frames = pipeline.wait_for_frames()
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            color_image = np.asanyarray(color_frame.get_data())
            color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
            writer.write(color_image)
            if playback.current_status() != rs.playback_status.playing:
                print('Reach to file end')
                break
    finally:
        print("mp4 file is generated")
        pass
예제 #2
0
파일: sw.py 프로젝트: krbeverx/librealsense
def playback(filename, use_syncer=True):
    """
    One of the two initialization functions:

    Use playback() to initialize a software device for reading from a rosbag file. This device will
    NOT generate frames! Only the expect() functions will be available.

    If you use this function, it replaces init().

    This should be followed by start() to actually start "streaming".

    :param filename: The path to the file to open for playback
    :param use_syncer: If True, a syncer will be used to attempt frame synchronization -- otherwise
                       a regular queue will be used
    """
    ctx = rs.context()
    #
    global device
    device = rs.playback(ctx.load_device(filename))
    device.set_real_time(False)
    device.set_status_changed_callback(playback_callback)
    #
    global depth_sensor, color_sensor
    sensors = device.query_sensors()
    depth_sensor = next(s for s in sensors if s.name == "Depth")
    color_sensor = next(s for s in sensors if s.name == "Color")
    #
    global depth_profile, color_profile
    depth_profile = next(p for p in depth_sensor.profiles
                         if p.stream_type() == rs.stream.depth)
    color_profile = next(p for p in color_sensor.profiles
                         if p.stream_type() == rs.stream.color)
    #
    global syncer
    if use_syncer:
        syncer = rs.syncer(
            100
        )  # We don't want to lose any frames so uses a big queue size (default is 1)
    else:
        syncer = rs.frame_queue(100)
    #
    global playback_status
    playback_status = rs.playback_status.unknown
예제 #3
0
# class_names = ['person']

# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
# rs.config.enable_device_from_file(config, args.input)
rs.config.enable_device_from_file(
    config, '/home/xipeng/Documents/20190304_011637.bag')
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
# Start streaming
profile = pipeline.start(config)

# Read the bag frame by frame, not by real time !!!!!!!!
device = profile.get_device()
playback = rs.playback(device)
playback.set_real_time(False)

# videoWriter = cv2.VideoWriter('test.mp4', cv2.VideoWriter_fourcc(*'MJPG'), 1, (1280, 720))
ii = 0

try:

    while True:
        ii += 1

        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
예제 #4
0
import pyrealsense2 as rs
import numpy as np

rs.pipeline()
    rs.pipeline.try_wait_for_frames()
    rs.pipeline.wait_for_frames()
rs.pipeline_profile()
    rs.pipeline_profile.get_device()
    rs.pipeline_profile.get_stream()
    rs.pipeline_profile.get_streams()
rs.pipeline_wrapper()
rs.playback()
rs.playback_status()
rs.points()
rs.pose()
rs.pose_frame()
rs.pose_stream_profile()

rs.pipeline
rs.pipeline.get_active_profile()
rs.pipeline.poll_for_frames()
rs.pipeline.start()
rs.pipeline.stop()
rs.pipeline.wait_for_frames()
rs.pipeline.try_wait_for_frames()
pipe = rs.pipeline()

config = rs.config()
config.enable_stream
config.enable_all_streams
config.enable_device