def run_focal_length_calibration(target_size, adjust_side):
    number_of_images = 25
    timeout_s = 30

    cfg = rs.config()
    cfg.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, 30)
    cfg.enable_stream(rs.stream.infrared, 2, 1280, 720, rs.format.y8, 30)

    lq = rs.frame_queue(capacity=number_of_images, keep_frames=True)
    rq = rs.frame_queue(capacity=number_of_images, keep_frames=True)

    counter = 0
    flags = [False, False]

    def cb(frame):
        nonlocal counter, flags
        if counter > number_of_images:
            return
        for f in frame.as_frameset():
            p = f.get_profile()
            if p.stream_index() == 1:
                lq.enqueue(f)
                flags[0] = True
            if p.stream_index() == 2:
                rq.enqueue(f)
                flags[1] = True
            if all(flags):
                counter += 1
        flags = [False, False]

    pipe = rs.pipeline(ctx)
    pp = pipe.start(cfg, cb)
    dev = pp.get_device()

    try:
        print('Starting Focal-Length calibration...')
        print(f'\tTarget Size:\t{target_size}')
        print(f'\tSide Adjustment:\t{adjust_side}')
        stime = time.time()
        while counter < number_of_images:
            time.sleep(0.5)
            if timeout_s < (time.time() - stime):
                raise RuntimeError(
                    f"Failed to capture {number_of_images} frames in {timeout_s} seconds, got only {counter} frames"
                )

        adev = dev.as_auto_calibrated_device()
        table, ratio, angle = adev.run_focal_length_calibration(
            lq, rq, target_size[0], target_size[1], fl_adjust_map[adjust_side],
            progress_callback)
        print('Focal-Length calibration finished')
        print(f'\tRatio:\t{ratio}')
        print(f'\tAngle:\t{angle}')
        adev.set_calibration_table(table)
        adev.write_calibration()
    finally:
        pipe.stop()
    def __init__(self,
                 callback=None,
                 max_queue_size=100000,
                 statistics=False,
                 serial_number=None):
        self._id = serial_number
        self._max_queue_size = max_queue_size

        self._logger = ModifiedLogger(
            logging.getLogger('test'),
            {'serial': self._id},
        )
        self._process = callback

        self.lrs_queue = rs.frame_queue(capacity=self._max_queue_size,
                                        keep_frames=True)

        self._post_process_queue = Queue(maxsize=self._max_queue_size)
        self._producer_thread = None
        self._consumer_thread = None
        self._block_queue_event = threading.Event()
        self.block()  # ignoring frames until self.start is called
        self._terminate_producer_event = threading.Event()

        self.statistics = statistics

        self._producing_times = collections.deque()
        self._producer_queue_sizes = collections.deque()
        self._consuming_times = collections.deque()
        self._consumer_queue_sizes = collections.deque()
        self._memory_samples = collections.deque()
    def start(self):
        """Initialize the queue structure and starting the producer-consumer threads

        first, unblocking the threading queue and clear all thread's events and then
        initialize the producer-consumer threads.
        """

        self.unblock()
        self._terminate_producer_event.clear()

        if self.lrs_queue is None:
            self.lrs_queue = rs.frame_queue(capacity=self._max_queue_size,
                                            keep_frames=True)
        if self._post_process_queue is None:
            self._post_process_queue = Queue(maxsize=self._max_queue_size)

        # in case self.start is being called multiple times in a row and self.stop is not being called between.
        if self._producer_thread is None:
            self._logger.info("initializing a producer thread")
            self._producer_thread = threading.Thread(
                target=self._produce_frames,
                name="producer_thread(LRSFrameQueueManager)")
            self._producer_thread.setDaemon(True)
            self._logger.info("starting the producer thread")
            self._producer_thread.start()

        # # in case self.start is being called multiple times in a row and self.stop is not being called between.
        if self._consumer_thread is None:
            self._logger.info("initializing a consumer thread")
            self._consumer_thread = threading.Thread(
                target=self._consume_frames,
                name="consumer_thread(LRSFrameQueueManager)")
            self._consumer_thread.setDaemon(True)
            self._logger.info("starting the consumer thread")
            self._consumer_thread.start()
    def __init__(self, res_w=1280, res_h=720, fps=30, use_color=True, use_depth=True, show_depth=True, preset=1, color_scheme=0):
        assert use_color or use_depth, "At least 1 of the 2 flags 'use_color' and 'use_depth' must be set to True"

        self.use_color = use_color
        self.use_depth = use_depth
        self.use_color_depth = use_color and use_depth
        self.show_depth = use_depth and show_depth
        # Resolution in pixels
        self.res_w = res_w
        self.res_h = res_h
        # Preset: 1=default, 2=hand, 3=high accuracy,

        # Setup of the sensor
        self.pipe = rs.pipeline()
        self.cfg = rs.config()
        if self.use_color_depth: self.align = rs.align(rs.stream.color)

        # Color stream
        if self.use_color: self.cfg.enable_stream(rs.stream.color, res_w, res_h, rs.format.bgr8, 30)
        # Depth stream to configuration
        # Format z16: 0<=z<=65535
        if self.use_depth: self.cfg.enable_stream(rs.stream.depth, res_w, res_h, rs.format.z16, fps)
        self.queue = rs.frame_queue(2 if self.use_color_depth else 1) # With a buffer capacity of 2, we reduce latency
        self.pipe_profile = self.pipe.start(self.cfg, self.queue)
        
        # Color scheme: 0: Jet, 1: Classic, 2: WhiteToBlack, 3: BlackToWhite, 4: Bio, 5: Cold, 6: Warm, 7: Quantized, 8: Pattern, 9: Hue
        if self.show_depth:
            self.colorizer = rs.colorizer()
            self.colorizer.set_option(rs.option.color_scheme, color_scheme)

        if self.use_depth:
            self.dev = self.pipe_profile.get_device()
            # self.advnc_mode = rs.rs400_advanced_mode(self.dev)
            # print("Advanced mode is", "enabled" if self.advnc_mode.is_enabled() else "disabled")
            self.depth_sensor = self.dev.first_depth_sensor()
            # Set preset
            self.preset = preset
            self.depth_sensor.set_option(rs.option.visual_preset, self.preset)
            print(f"Current preset: {self.depth_sensor.get_option_value_description(rs.option.visual_preset, self.depth_sensor.get_option(rs.option.visual_preset))}")

        # # Set the disparity shift     
        # if disparity_shift != -1:
        #     self.depth_table_control_group = self.advnc_mode.get_depth_table()
        #     self.depth_table_control_group.disparityShift = disparity_shift
        #     self.advnc_mode.set_depth_table(self.depth_table_control_group)
        # self.depth_table_control_group = self.advnc_mode.get_depth_table()
        # print("Disparity shift:", self.depth_table_control_group.disparityShift)

        # Set depth units
        # self.set_depth_units(depth_units)

            # Get intrinsics
            self.stream_profile = self.pipe_profile.get_stream(rs.stream.depth) # Fetch stream profile for depth stream
            self.intrinsics = self.stream_profile.as_video_stream_profile().get_intrinsics() # Downcast to video_stream_profile and fetch intrinsics

            # Get depth scale
            self.depth_scale = self.depth_sensor.get_depth_scale()

            self.get_depth_units()
def calculate_target_z(target_size):
    number_of_images = 50  # The required number of frames is 10+
    timeout_s = 30

    cfg = rs.config()
    cfg.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8, 30)

    q = rs.frame_queue(capacity=number_of_images, keep_frames=True)
    # Frame queues q2, q3 should be left empty. Provision for future enhancements.
    q2 = rs.frame_queue(capacity=number_of_images, keep_frames=True)
    q3 = rs.frame_queue(capacity=number_of_images, keep_frames=True)

    counter = 0

    def cb(frame):
        nonlocal counter
        if counter > number_of_images:
            return
        for f in frame.as_frameset():
            q.enqueue(f)
            counter += 1

    pipe = rs.pipeline(ctx)
    pp = pipe.start(cfg, cb)
    dev = pp.get_device()

    try:
        stime = time.time()
        while counter < number_of_images:
            time.sleep(0.5)
            if timeout_s < (time.time() - stime):
                raise RuntimeError(
                    f"Failed to capture {number_of_images} frames in {timeout_s} seconds, got only {counter} frames"
                )

        adev = dev.as_auto_calibrated_device()
        print('Calculating distance to target...')
        print(f'\tTarget Size:\t{target_size}')
        target_z = adev.calculate_target_z(q, q2, q3, target_size[0],
                                           target_size[1])
        print(f'Calculated distance to target is {target_z}')
    finally:
        pipe.stop()

    return target_z
Esempio n. 6
0
 def __init__(self, rgb_sensor):
     self._stop = False
     self.frames = []
     self.count_drops = 0
     self.frame_drops_info = {}
     self.prev_hw_timestamp = 0.0
     self.prev_fnum = 0
     self.first_frame = True
     self.lrs_queue = rs.frame_queue(capacity=100000, keep_frames=True)
     self.post_process_queue = Queue(maxsize=1000000)
     self.rgb_sensor = rgb_sensor
Esempio n. 7
0
def playback(filename, use_syncer=True):
    """
    One of the two initialization functions:

    Use playback() to initialize a software device for reading from a rosbag file. This device will
    NOT generate frames! Only the expect() functions will be available.

    If you use this function, it replaces init().

    This should be followed by start() to actually start "streaming".

    :param filename: The path to the file to open for playback
    :param use_syncer: If True, a syncer will be used to attempt frame synchronization -- otherwise
                       a regular queue will be used
    """
    ctx = rs.context()
    #
    global device
    device = rs.playback(ctx.load_device(filename))
    device.set_real_time(False)
    device.set_status_changed_callback(playback_callback)
    #
    global depth_sensor, color_sensor
    sensors = device.query_sensors()
    depth_sensor = next(s for s in sensors if s.name == "Depth")
    color_sensor = next(s for s in sensors if s.name == "Color")
    #
    global depth_profile, color_profile
    depth_profile = next(p for p in depth_sensor.profiles
                         if p.stream_type() == rs.stream.depth)
    color_profile = next(p for p in color_sensor.profiles
                         if p.stream_type() == rs.stream.color)
    #
    global syncer
    if use_syncer:
        syncer = rs.syncer(
            100
        )  # We don't want to lose any frames so uses a big queue size (default is 1)
    else:
        syncer = rs.frame_queue(100)
    #
    global playback_status
    playback_status = rs.playback_status.unknown
Esempio n. 8
0
    def init(self, device, width, height):
        self.un_init()
        self.frame_queue = rs.frame_queue(10)
        self.device = device
        self.sensor = self.device.first_depth_sensor()

        stream_profiles = self.sensor.get_stream_profiles()
        video_stream_profiles = []

        for item in stream_profiles:
            video_stream_profiles.append(item.as_video_stream_profile())

        depth_video_stream_profiles = filter(
            lambda x: x.stream_type() == rs.stream.depth,
            video_stream_profiles)

        depth_video_stream_profiles = list(depth_video_stream_profiles)

        sorted_depth_video_stream_profiles = sorted(
            depth_video_stream_profiles, key=lambda x: x.fps(), reverse=True)

        width_height_sorted_depth_video_stream_profiles = filter(
            lambda x: x.width() == width and x.height() == height,
            sorted_depth_video_stream_profiles)

        width_height_sorted_depth_video_stream_profiles = list(
            width_height_sorted_depth_video_stream_profiles)

        self.sensor_profile = width_height_sorted_depth_video_stream_profiles[
            0]
        self.width = self.sensor_profile.width()
        self.height = self.sensor_profile.height()
        self.depth = np.empty(self.height * self.width, dtype=np.uint)
        self.distance = np.empty(self.width, dtype=np.float)
        self.sensor.open(self.sensor_profile)
        self.sensor.start(self.frame_queue)
Esempio n. 9
0
    config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)

    # Start streaming to the slow processing function.
    # This stream will lag, causing the occasional frame drop.
    print("Slow callback")
    pipeline.start(config)
    start = time.time()
    while time.time() - start < 5:
        frames = pipeline.wait_for_frames()
        slow_processing(frames)
    pipeline.stop()

    # Start streaming to the the slow processing function by way of a frame queue.
    # This stream will occasionally hiccup, but the frame_queue will prevent frame loss.
    print("Slow callback + queue")
    queue = rs.frame_queue(50)
    pipeline.start(config, queue)
    start = time.time()
    while time.time() - start < 5:
        frames = queue.wait_for_frame()
        slow_processing(frames)
    pipeline.stop()

    # Start streaming to the the slower processing function by way of a frame queue.
    # This stream will drop frames because the delays are long enough that the backed up
    # frames use the entire internal frame pool preventing the SDK from creating more frames.
    print("Slower callback + queue")
    queue = rs.frame_queue(50)
    pipeline.start(config, queue)
    start = time.time()
    while time.time() - start < 5:
Esempio n. 10
0
def init(syncer_matcher=rs.matchers.default):
    """
    One of the two initialization functions:

    Use init() to initialize a software device that will generate frames (as opposed to playback()
    which will initialize a software device for reading from a rosbag and will NOT generate frames).

    This should be followed by start() to actually start "streaming".

    This sets multiple module variables that are used to generate software frames, and initializes
    a syncer automatically if needed.

    :param syncer_matcher: The matcher to use in the syncer (if None, no syncer will be used) --
                           the default will compare all streams according to timestamp
    """
    global gap_c, gap_d
    gap_d = 1000 / fps_d
    gap_c = 1000 / fps_c
    #
    global pixels, w, h
    pixels = bytearray(b'\x00' * (w * h * 2))  # Dummy data
    #
    global device
    device = rs.software_device()
    if syncer_matcher is not None:
        device.create_matcher(syncer_matcher)
    #
    global depth_sensor, color_sensor
    depth_sensor = device.add_sensor("Depth")
    color_sensor = device.add_sensor("Color")
    #
    depth_stream = rs.video_stream()
    depth_stream.type = rs.stream.depth
    depth_stream.uid = 0
    depth_stream.width = 640
    depth_stream.height = 480
    depth_stream.bpp = 2
    depth_stream.fmt = rs.format.z16
    depth_stream.fps = fps_d
    #
    global depth_profile
    depth_profile = rs.video_stream_profile(
        depth_sensor.add_video_stream(depth_stream))
    #
    color_stream = rs.video_stream()
    color_stream.type = rs.stream.color
    color_stream.uid = 1
    color_stream.width = 640
    color_stream.height = 480
    color_stream.bpp = 2
    color_stream.fmt = rs.format.rgba8
    color_stream.fps = fps_c
    #
    global color_profile
    color_profile = rs.video_stream_profile(
        color_sensor.add_video_stream(color_stream))
    #
    # We don't want to lose any frames so use a big queue size (default is 1)
    global syncer
    if syncer_matcher is not None:
        syncer = rs.syncer(100)
    else:
        syncer = rs.frame_queue(100)
    #
    global playback_status
    playback_status = None
Esempio n. 11
0
ctx = rs.context()
sd.add_to(ctx)

dev = ctx.query_devices()[0]
for d in ctx.query_devices():
    if d.get_info(rs.camera_info.name) == name:
        dev = d

images_path = "."
if (len(sys.argv) > 1):
    images_path = str(sys.argv[1])

rec = rs.recorder(images_path + "/1.bag", dev)
sensor = rec.query_sensors()[0]

q = rs.frame_queue()
sensor.open(sensor.get_stream_profiles())
sensor.start(q)

files = glob.glob1(images_path, "gt*")
index = []
for f in files:
    idx = (f.split('-')[1]).split('.')[0]
    index.append(int(idx))

for i in index:
    left_name = images_path + "/left-" + str(i) + ".png"
    depth_name = images_path + "/gt-" + str(i) + ".png"
    result_name = images_path + "/res-" + str(i) + ".png"
    denoised_name = images_path + "/res_denoised-" + str(i) + ".png"
Esempio n. 12
0
def main():
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)

    # Start streaming
    queue = rs.frame_queue(50)
    cfg = pipeline.start(config, queue)
    align = rs.align(rs.stream.color)
    profile = cfg.get_stream(rs.stream.depth)

    geom_added = False

    intrinsics = profile.as_video_stream_profile().get_intrinsics()
    print(intrinsics)

    depth_sensor = cfg.get_device().first_depth_sensor()

    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)

    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 3  # 1 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # filters
    # decimation filter
    decimation = rs.decimation_filter()
    decimation.set_option(rs.option.filter_magnitude, 4)

    # spatial filter
    spatial = rs.spatial_filter()
    spatial.set_option(rs.option.filter_magnitude, 5)
    spatial.set_option(rs.option.filter_smooth_alpha, 1)
    spatial.set_option(rs.option.filter_smooth_delta, 50)

    temporal = rs.temporal_filter()

    hole_filling = rs.hole_filling_filter()

    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)

    dt0 = datetime.now()
    frames = queue.wait_for_frame().as_frameset()
    frames = align.process(frames)
    profile = frames.get_profile()

    depth_frame = frames.get_depth_frame()
    # depth_frame = decimation.process(depth_frame)
    depth_frame = depth_to_disparity.process(depth_frame)
    depth_frame = spatial.process(depth_frame)
    depth_frame = temporal.process(depth_frame)
    depth_frame = disparity_to_depth.process(depth_frame)
    depth_frame = hole_filling.process(depth_frame)

    # We will be removing the background of objects more than

    color_frame = frames.get_color_frame()

    # Convert images to numpy arrays
    depth_image = np.asanyarray(depth_frame.get_data())
    color_image = np.asanyarray(color_frame.get_data())

    img_depth = o3d.geometry.Image(depth_image)
    img_color = o3d.geometry.Image(color_image)
    rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
        img_color,
        img_depth,
        depth_trunc=clipping_distance_in_meters,
        convert_rgb_to_intensity=False)

    pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
        intrinsics.width, intrinsics.height, intrinsics.fx, intrinsics.fy,
        intrinsics.ppx, intrinsics.ppy)
    pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
        rgbd, pinhole_camera_intrinsic)
    pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])

    #print("Testing IO for point cloud ...")
    #pcd = o3d.io.read_point_cloud("../../test_data/fragment.pcd")
    print(pcd)
    #o3d.io.write_point_cloud("data/realsense.pcd", pcd)
    clusteringDepthImage(pcd)

    if geom_added == False:
        pass
    #     vis.add_geometry(pointcloud)
    #     geom_added = True
    #
    # vis.update_geometry(pointcloud)
    # vis.poll_events()
    # vis.update_renderer()

    # cv2.imshow('bgr', color_image)
    # key = cv2.waitKey(1)
    # if key == ord('q'):
    #    break

    process_time = datetime.now() - dt0
    print("FPS: " + str(1 / process_time.total_seconds()))

    pipeline.stop()
    cv2.destroyAllWindows()
    vis.destroy_window()
    del vis
Esempio n. 13
0
def main():

    app_config = low_res_config
    # objects = np.empty(NUMBER_OF_OBJECTS, dtype=object)
    objects = []
    old_time = time.time()
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('./data/output.avi', fourcc, 30.0, (CAMERA_WIDTH, CAMERA_HEIGHT))
    pipeline = rs.pipeline()
    queue = rs.frame_queue(5000, keep_frames=True)
    config = rs.config()

    #Visualizer(CAMERA_WIDTH,CAMERA_HEIGHT, "Visualizer", resizable=True)

    samplegrid = SampleGrid(app_config)
    samplegrid.initialize()



    # note: using 640 x 480 depth resolution produces smooth depth boundaries
    #       using rs.format.bgr8 for color image format for OpenCV based image visualization
    config.enable_stream(rs.stream.depth, app_config["IMAGE_WIDTH"], app_config["IMAGE_HEIGHT"], rs.format.z16, 30)
    config.enable_stream(rs.stream.color, app_config["IMAGE_WIDTH"], app_config["IMAGE_HEIGHT"], rs.format.rgb8, 30)

    config.enable_record_to_file("./data/record.bag")
    #config.enable_device_from_file("./data/realsense.bag", repeat_playback=True)
    #config.enable_device_from_file("./data/record_homeroom_low.bag", repeat_playback=True)
    # Start streaming
    profile = pipeline.start(config, queue)
    depth_sensor = profile.get_device().first_depth_sensor()
    #depth_sensor.set_option(rs.option.max_distance, 4)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_scale = depth_sensor.get_depth_scale()

    # We will not display the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 4  # 3 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # rs400::advanced_mode
    # advanced_device(camera.getDevice());
    # auto
    # depth_table = advanced_device.get_depth_table();
    # depth_table.depthClampMax = 1300; // 1
    # m30 if depth
    # unit
    # at
    # 0.001
    # advanced_device.set_depth_table(depth_table);



    # filters
    # decimation filter
    decimation = rs.decimation_filter()
    decimation.set_option(rs.option.filter_magnitude, 4)

    # spatial filter
    spatial = rs.spatial_filter()
    spatial.set_option(rs.option.filter_magnitude, 5)
    spatial.set_option(rs.option.filter_smooth_alpha, 1)
    spatial.set_option(rs.option.filter_smooth_delta, 50)

    temporal = rs.temporal_filter()

    hole_filling = rs.hole_filling_filter()

    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    transformation_matrix_1 = [[0,0, -1], [0,1, 0], [1, 0,0]]#, [0, 0, 0, 1]]
    transformation_matrix_2 = [[1,0, 0], [0,0, -1], [0, 1,0]]#, [0, 0, 0, 1]]

    #transformation_matrix = [[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]

    #inv_transform = np.linalg.inv(transformation_matrix)

    # Streaming loop
    frame_count = 0
    intrinsics = profile.get_stream(rs.stream.depth).as_video_stream_profile().get_intrinsics()

    speedestimation = SpeedEstimation(intrinsics, config)
    while frame_count < 1000:

        cycle_time_start = time.time()

        # Get frameset of color and depth
        # frames = pipeline.wait_for_frames()
        start = time.time()
        frames = queue.wait_for_frame().as_frameset()
        current_time = frames.get_frame_metadata(rs.frame_metadata_value.time_of_arrival)


        # Align the depth frame to color frame
        aligned_frames = align.process(frames)

        # Get aligned frames
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        if frame_count < 60:
            #print(frame_count)
            frame_count += 1
            continue

        #depth_frame = decimation.process(depth_frame)
        # depth_frame = depth_to_disparity.process(depth_frame)
        # depth_frame = spatial.process(depth_frame)
        # depth_frame = temporal.process(depth_frame)
        # depth_frame = disparity_to_depth.process(depth_frame)
        # depth_frame = hole_filling.process(depth_frame)

        # Validate that both frames are valid
        if not depth_frame or not color_frame:
            continue

        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        end = time.time()
        print("Frame Post Processing took: " + str((end - start) * 1000) + "ms")

        #############
        ####
        ## Create Point Cloud
        ####
        #############

        start = time.time()
        pc = rs.pointcloud()
        points = pc.calculate(depth_frame)
        point_array = np.asarray(points.get_vertices(2))


        end = time.time()
        print("Pointcloud generation took: " + str((end - start) * 1000) + "ms")
        print(point_array.size)

        # Transformation code
        # newrow = np.ones((point_array.shape[0],1))
        # point_array = np.append(point_array,newrow,axis=1)
        point_array = np.dot(point_array,transformation_matrix_1)
        point_array = np.dot(point_array,transformation_matrix_2)

        # point_array = np.delete(point_array, np.s_[3:4], axis=1)

        #pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(point_array))
        #res = pcd.estimate_normals()
        #print(res)
        #o3d.visualization.draw_geometries([pcd])

        ## FLood fill
        start = time.time()
        #samplegrid.update(np.asarray(pcd.points))
        samplegrid.update(point_array)
        end = time.time()
        print("update took: " + str((end - start) * 1000) + "ms")

        start = time.time()
        floorplane = samplegrid.findFloor()
        end = time.time()
        print("Floor detection: " + str((end - start) * 1000) + "ms")


        # normal = np.array(floorplane.n)
        # point = np.array(floorplane.p)
        # d = -point.dot(normal)
        # xx, yy = np.meshgrid(range(10), range(10))
        #
        # # calculate corresponding z
        # z = (-normal[0] * xx - normal[1] * yy - d) * 1. / normal[2]
        #
        # # plot the surface
        # plt3d = plt.figure().gca(projection='3d')
        # plt3d.plot_surface(xx, yy, z)
        # plt.show()
        # #plt.savefig("planeimg.png")

        trans = transformFromGroundPlane(floorplane.n,floorplane.p)
        indices = []
        newrow = np.ones((point_array.shape[0], 1))
        point_array = np.append(point_array, newrow, axis=1)
        point_array = np.dot(point_array, np.array(trans))
        point_array = np.delete(point_array, np.s_[3:4], axis=1)

        trans = [j for sub in trans for j in sub]
        # for i, arr in enumerate(point_array):
        #     arr[0] = trans[0] * arr[0] + trans[4] * arr[1]+ trans[8] * arr[2] + trans[12]
        #     arr[1] = trans[1] * arr[0] + trans[5] * arr[1] + trans[9] * arr[2] + trans[13]
        #     arr[2] = trans[2] * arr[0] + trans[6] * arr[1] + trans[10] * arr[2] + trans[14]
        #point_array = np.delete(point_array, indices, axis=0)
        end = time.time()
        # print("Removal of clipping: " + str((end - start) * 1000) + "ms")
        # print(point_array.size)



        start = time.time()
        #speedestimation.step(point_array)
        #speedestimation.step(pcd.points)

        end = time.time()
        print("Speed estimation took: " + str((end - start) * 1000) + "ms")

        # for obj in objects:
        #     if obj.status != "active":
        #         continue
        #
        #     corners = np.asarray(obj.bounding_box.get_box_points())
        #     extent = obj.bounding_box.get_extent()
        #     pixels = []
        #     for point in corners:
        #         pixel = rs.rs2_project_point_to_pixel(intrinsics, point.tolist())
        #         pixel = [int(coord) for coord in pixel]
        #         pixels.append(pixel)
        #
        #     cv2.rectangle(color_image, tuple(pixels[1]), tuple(pixels[2]), (0, 255, 0), 2)
        #
        #
        #     label = "Obj-ID: "+str(obj.id) +"\n" \
        #             + "vx: " + str(round(obj.speed_vector[0],2))  + " m/s \n"  \
        #             + "vy: " + str(round(obj.speed_vector[1], 2)) + " m/s \n" \
        #             + "vz: " + str(round(obj.speed_vector[2], 2))+ " m/s \n" \
        #             + "speed  " + str(round(obj.speed_vector[3],2)) + "m/s \n"
        #     position = tuple(pixels[5])
        #     y0,dy = position[1], 12
        #     for i, line in enumerate(label.split('\n')):
        #         y = y0 + i * dy
        #         #cv2.putText(img, line, (50, y), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
        #         cv2.putText(color_image, line,(position[0]+10,y) , cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 0), 1)
        #
        #
        # end = time.time()
        # print("2D projection time: " + str((end - start) * 1000) + "ms")
        #
        # cv2.putText(color_image, str(frame_count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (255, 255, 0), 1)
        # out_img = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
        # cv2.imshow('img', out_img)
        # k = cv2.waitKey(1)
        # mesh = o3d.geometry.TriangleMesh.create_coordinate_frame()
        # pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(point_array))
        # pcd.transform(trans)
        # indices = []
        # for i, arr in enumerate(np.asarray(pcd.points)):
        #     if arr[2] < 0.05:
        #         indices.append(i)

        # point_array = np.delete(pcd.points, indices, axis=0)
        # pcd.points = o3d.utility.Vector3dVector(point_array)
        # o3d.visualization.draw_geometries([pcd, mesh])
        cycle_time_end = time.time()
        print("Cycle time: " + str((cycle_time_end - cycle_time_start) * 1000) + "ms")
    out.release()
    pipeline.stop()
Esempio n. 14
0
# test scenario:
# for each option, set value, wait 10 frames, check value with get_option and get_frame_metadata
# values set for each option are min, max, and default values
ctx = rs.context()
dev = ctx.query_devices()[0]

try:
    color_sensor = dev.first_color_sensor()
    # Using a profile common to both L500 and D400
    color_profile = next(p for p in color_sensor.profiles if p.fps() == 30
                         and p.stream_type() == rs.stream.color
                         and p.format() == rs.format.yuyv
                         and p.as_video_stream_profile().width() == 640
                         and p.as_video_stream_profile().height() == 480)
    color_sensor.open(color_profile)
    lrs_queue = rs.frame_queue(capacity=10, keep_frames=False)
    color_sensor.start(lrs_queue)

    iteration = 0
    option_index = -1  # running over options
    value_index = -1  # 0, 1, 2 for min, max, default
    # number of frames to wait between set_option and checking metadata
    # is set as 10 - the expected delay is ~120ms for Win and ~80-90ms for Linux
    num_of_frames_to_wait = 15
    while True:
        try:
            lrs_frame = lrs_queue.wait_for_frame(5000)
            if iteration == 0:
                option_index = option_index + 1
                if option_index == len(color_options):
                    break
def main():
    # objects = np.empty(NUMBER_OF_OBJECTS, dtype=object)
    objects = []
    old_time = time.time()
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('./data/output.avi', fourcc, 30.0, (1280, 720))
    pipeline = rs.pipeline()
    queue = rs.frame_queue(5000, keep_frames=True)
    config = rs.config()

    vis = o3d.visualization.Visualizer()
    vis.create_window('PCD', width=1280, height=720)
    pointcloud = o3d.geometry.PointCloud()
    vis.add_geometry(pointcloud)
    geom_added = False

    # note: using 640 x 480 depth resolution produces smooth depth boundaries
    #       using rs.format.bgr8 for color image format for OpenCV based image visualization
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)

    config.enable_device_from_file("./data/realsense.bag",
                                   repeat_playback=True)

    # Start streaming
    profile = pipeline.start(config, queue)
    depth_sensor = profile.get_device().first_depth_sensor()

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_scale = depth_sensor.get_depth_scale()

    # We will not display the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 4  # 3 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # filters
    # decimation filter
    decimation = rs.decimation_filter()
    decimation.set_option(rs.option.filter_magnitude, 4)

    # spatial filter
    spatial = rs.spatial_filter()
    spatial.set_option(rs.option.filter_magnitude, 5)
    spatial.set_option(rs.option.filter_smooth_alpha, 1)
    spatial.set_option(rs.option.filter_smooth_delta, 50)

    temporal = rs.temporal_filter()

    hole_filling = rs.hole_filling_filter()

    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    transformation_matrix = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0],
                             [0, 0, 0, 1]]
    inv_transform = np.linalg.inv(transformation_matrix)
    #grid = Grid(config_perception)
    #grid.initialize()
    # Streaming loop
    frame_count = 0
    intrinsics = profile.get_stream(
        rs.stream.depth).as_video_stream_profile().get_intrinsics()
    pcd_old = o3d.geometry.PointCloud()

    while frame_count < 1000:

        cycle_time_start = time.time()

        # Get frameset of color and depth
        # frames = pipeline.wait_for_frames()
        frames = queue.wait_for_frame().as_frameset()
        current_time = frames.get_frame_metadata(
            rs.frame_metadata_value.time_of_arrival)

        start = time.time()
        # Align the depth frame to color frame
        aligned_frames = align.process(frames)

        # Get aligned frames
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        # Validate that both frames are valid
        if not depth_frame or not color_frame:
            continue

        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        end = time.time()
        #print("Frame Post Processing took: " + str((end - start) * 1000) + "ms")

        #############
        ####
        ## Create Point Cloud
        ####
        #############

        start = time.time()
        img_depth = o3d.geometry.Image(depth_image)
        img_color = o3d.geometry.Image(color_image)
        rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
            img_color,
            img_depth,
            depth_trunc=clipping_distance_in_meters,
            convert_rgb_to_intensity=False)

        pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
            intrinsics.width, intrinsics.height, intrinsics.fx, intrinsics.fy,
            intrinsics.ppx, intrinsics.ppy)
        pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
            rgbd, pinhole_camera_intrinsic)
        pcd = pcd.remove_non_finite_points(remove_nan=True,
                                           remove_infinite=True)
        pcd = pcd.voxel_down_sample(voxel_size=0.05)
        points = np.asarray(pcd.points)
        pcd_tree = o3d.geometry.KDTreeFlann(pcd)
        start = time.time()
        [k, idx, _] = pcd_tree.search_knn_vector_3d(pcd.points[1500], 4)
        end = time.time()
        print("Neighbour search: " + str((end - start) * 1000) + "ms")
        print(idx)
Esempio n. 16
0
    if len(devices) is 0: exit(1)

    # This tutorial will access only a single device, but it is trivial to extend to multiple devices
    dev = devices[0]
    print("\nUsing device 0, an %s" %
          dev.get_camera_info(rs.camera_info.device_name))
    print("    Serial number: %s" %
          dev.get_camera_info(rs.camera_info.device_serial_number))
    print("    Firmware version: %s" %
          dev.get_camera_info(rs.camera_info.camera_firmware_version))

    # Configure depth to run at VGA resolution at 30 frames per second
    dev.open(rs.stream_profile(rs.stream.depth, 640, 480, 30, rs.format.z16))

    # Configure frame queue of size one and start streaming into it
    queue = rs.frame_queue(1)
    dev.start(queue)

    # Determine depth value corresponding to one meter
    one_meter = int(1.0 / dev.get_depth_scale())

    while True:
        # This call waits until a new coherent set of frames is available on a device
        # Calls to get_frame_data(...) and get_frame_timestamp(...) on a device will return stable values until wait_for_frames(...) is called
        frame = queue.wait_for_frame()

        # Retrieve depth data, which was previously configured 640 x 480 image of 16-bit depth values
        frame_data = frame.get_data()
        depth_frame = [0] * (len(frame_data) / 2)
        for i in xrange(len(frame_data) / 2):
            depth_frame[i] = (frame_data[i * 2] + frame_data[i * 2 + 1] * 256)
Esempio n. 17
0
# Create pipeline
pipeline = rs.pipeline()

# Create a config object
config = rs.config()

# Tell config that we will use a recorded device from file to be used by the pipeline through playback.
rs.config.enable_device_from_file(config, args.input)

# Configure the pipeline to stream the depth stream
# Change this parameters according to the recorded bag file resolution
config.enable_stream(rs.stream.depth, rs.format.z16, 30)
config.enable_stream(rs.stream.color, rs.format.rgb8, 30)

queue = rs.frame_queue(100, keep_frames=True)

# Start streaming from file
profile = pipeline.start(config)

depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: ", depth_scale)

clipping_distance_in_meters = 0.7
clipping_distance = clipping_distance_in_meters / depth_scale

# Create opencv window to render image in
# cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)
align_to = rs.stream.color
align = rs.align(align_to)