def run(config):

    print("making fragments from RGBD sequence.")
    make_clean_folder(join(config["path_dataset"], config["folder_fragment"]))

    [color_files, depth_files,
     pose_files] = get_rgbd_file_lists(config["path_dataset"],
                                       config["has_tracking"])
    n_files = len(color_files)
    n_fragments = int(
        math.ceil(float(n_files) / config['n_frames_per_fragment']))

    if config["python_multi_threading"] is True:
        from joblib import Parallel, delayed
        import multiprocessing
        import subprocess
        MAX_THREAD = min(multiprocessing.cpu_count(), n_fragments)
        Parallel(n_jobs=MAX_THREAD)(delayed(process_single_fragment)
                                    (fragment_id, color_files, depth_files,
                                     pose_files, n_files, n_fragments, config)
                                    for fragment_id in range(n_fragments))
    else:
        for fragment_id in range(n_fragments):
            process_single_fragment(fragment_id, color_files, depth_files,
                                    pose_files, n_files, n_fragments, config)
Ejemplo n.º 2
0
def run(config):
    print("register fragments.")
    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
    ply_file_names = get_file_list(
        join(config["path_dataset"], config["folder_fragment"]), ".ply")
    make_clean_folder(join(config["path_dataset"], config["folder_scene"]))
    make_posegraph_for_scene(ply_file_names, config)
    optimize_posegraph_for_scene(config["path_dataset"], config)
Ejemplo n.º 3
0
def startRecordingCallback(req):
  global record, enable_tracking, relative_frame, tracking_frame, tacking_frame_count, path_output, path_depth, path_color, path_pose, frame_count, sensor_data

  rospy.loginfo(rospy.get_caller_id() + ": Start Recording")

  enable_tracking = req.enable_tracking

  path_output = req.directory
  path_depth = join(path_output, "depth")
  path_color = join(path_output, "color")

  make_clean_folder(path_output)
  make_clean_folder(path_depth)
  make_clean_folder(path_color)

  if enable_tracking:
    sensor_data = deque()
    tracking_frame = req.tracking_frame
    relative_frame = req.relative_frame
    path_pose = join(path_output, "pose")
    make_clean_folder(path_pose)

  frame_count = 0
  tacking_frame_count = 0
  record = True

  return StartRecordingResponse(True)
Ejemplo n.º 4
0
def startRecordingCallback(req):
    global record, record_rosbag, pipeline, frame_count
    global path_output, path_depth, path_color, path_bag

    rospy.loginfo(rospy.get_caller_id() + "Start Recording")

    path_output = req.directory
    path_depth = join(path_output, "depth")
    path_color = join(path_output, "color")
    path_bag = join(path_output, "realsense.bag")

    make_clean_folder(path_output)
    make_clean_folder(path_depth)
    make_clean_folder(path_color)

    frame_count = 0

    # Create a pipeline
    if pipeline is None:
        pipeline = rs.pipeline()

    #Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    config = rs.config()

    if record_imgs or record_rosbag:
        # note: using 640 x 480 depth resolution produces smooth depth boundaries
        #       using rs.format.bgr8 for color image format for OpenCV based image visualization
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        if record_rosbag:
            config.enable_record_to_file(path_bag)

    # Start streaming
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()

    # Using preset HighAccuracy for recording
    if record_rosbag or record_imgs:
        depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_scale = depth_sensor.get_depth_scale()

    # We will not display the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 3  # 3 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    record = True
    return StartRecordingResponse(True)
Ejemplo n.º 5
0
def archiveData(path_output):
    global depth_images, color_images, rgb_poses, intrinsics
    path_depth = join(path_output, "depth")
    path_color = join(path_output, "color")
    path_pose = join(path_output, "pose")

    make_clean_folder(path_output)
    make_clean_folder(path_depth)
    make_clean_folder(path_color)
    make_clean_folder(path_pose)

    for s in range(len(color_images)):
        # Save your OpenCV2 image as a jpeg
        o3d.io.write_image("%s/%06d.png" % (path_depth, s), depth_images[s])
        o3d.io.write_image("%s/%06d.jpg" % (path_color, s), color_images[s])
        write_pose("%s/%06d.pose" % (path_pose, s), rgb_poses[s])
        save_intrinsic_as_json(join(path_output, "camera_intrinsic.json"),
                               intrinsics)