Пример #1
0
    def get_frame(self):
        success, frames = self.pipe.try_wait_for_frames()

        if not success:
            return None

        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        w = rs.video_frame(depth_frame).width
        h = rs.video_frame(depth_frame).height

        #         if self.ICP:
        #             self.save_frame_to_img(depth_frame, color_frame)

        pc = rs.pointcloud()
        points = pc.calculate(depth_frame)

        vertices = np.asanyarray(points.get_vertices()).view(
            np.float32).reshape(h, w, 3)
        vertices = self.preprocess_point_cloud(vertices)

        self.frame_counter += 1

        if self.ICP:
            return points, color_frame

        return vertices
Пример #2
0
def expect(depth_frame=None, color_frame=None, nothing_else=False):
    """
    Looks at the syncer queue and gets the next frame from it if available, checking its contents
    against the expected frame numbers.
    """
    global syncer, playback_status
    f = syncer.poll_for_frame()
    if playback_status is not None:
        countdown = 50  # 5 seconds
        while not f and playback_status != rs.playback_status.stopped:
            countdown -= 1
            if countdown == 0:
                break
            time.sleep(0.1)
            f = syncer.poll_for_frame()
    # NOTE: f will never be None
    if not f:
        test.check(depth_frame is None, "expected a depth frame")
        test.check(color_frame is None, "expected a color frame")
        return False

    log.d("Got", f)

    fs = rs.composite_frame(f)

    if fs:
        depth = fs.get_depth_frame()
    else:
        depth = rs.depth_frame(f)
    test.info("actual depth", depth)
    test.check_equal(depth_frame is None, not depth)
    if depth_frame is not None and depth:
        test.check_equal(depth.get_frame_number(), depth_frame)

    if fs:
        color = fs.get_color_frame()
    elif not depth:
        color = rs.video_frame(f)
    else:
        color = None
    test.info("actual color", color)
    test.check_equal(color_frame is None, not color)
    if color_frame is not None and color:
        test.check_equal(color.get_frame_number(), color_frame)

    if nothing_else:
        f = syncer.poll_for_frame()
        test.info("Expected nothing else; actual", f)
        test.check(not f)

    return True
Пример #3
0
s.set_option(rs2.option.gamma, 100)
s.set_option(rs2.option.hue, 0)
s.set_option(rs2.option.saturation, 50)
s.set_option(rs2.option.sharpness, 0)
s.set_option(rs2.option.white_balance, 2800)

VALS = []

POINTER = 0

while True:
    start_time = time.time()

    # Waits to get frames, and gets information about the frame.
    frames = rs2.composite_frame(pipe.wait_for_frames())
    frame = rs2.video_frame(frames.get_color_frame())
    depth = rs2.depth_frame(frames.get_depth_frame())

    if not frame:
        continue

    IMG = np.asanyarray(frame.get_data())

    # Convert from RGB to HSV, helps with filtering
    HSV = cv2.cvtColor(IMG, cv2.COLOR_BGR2HSV)

    # Define upper and lower bounds for HSV variables
    LOWER_COLOR = np.array([70, 80, 255])
    UPPER_COLOR = np.array([95, 180, 255])

    # Create mask within hsv range
Пример #4
0
    while True:
        t1 = time.time()
        #wait for frames
        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)

        color_aligned_to_depth = aligned_frames.first(rs.stream.color)
        color_image = np.asanyarray(color_aligned_to_depth.get_data())
        #        color_image = decrease_brightness(color_image, 70)
        depth_frame = frames.first(rs.stream.depth)
        points = pc.calculate(depth_frame)
        # Feed into YOLO
        image = Image.fromarray(color_image)
        frame = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
        #        frameBU = np.copy(frame)
        w = rs.video_frame(depth_frame).width
        h = rs.video_frame(depth_frame).height
        verts = np.asanyarray(points.get_vertices()).view(np.float32).reshape(
            h, w, 3)  # Uncropped PC

        frame_size = frame.shape[:2]
        image_data = utils.image_preporcess(np.copy(frame),
                                            [input_size, input_size])
        image_data = image_data[np.newaxis, ...]
        prev_time = time.time()

        pred_sbbox, pred_mbbox, pred_lbbox = sess.run(
            [return_tensors[1], return_tensors[2], return_tensors[3]],
            feed_dict={return_tensors[0]: image_data})

        pred_bbox = np.concatenate([
config.enable_stream(rs2.stream.infrared, 640, 480, rs2.format.y8,
                     30)  # Start Color Stream
config.enable_stream(rs2.stream.depth, 640, 480, rs2.format.z16,
                     30)  # Start Depth Stream
config.enable_stream(rs2.stream.color, 640, 480, rs2.format.bgr8,
                     30)  # Start Color Stream

# Start Streaming
pipe.start(config)

try:
    while True:

        # Wait for coherent pair of frames: Depth and Color
        frames = rs2.composite_frame(pipe.wait_for_frames())
        ir_frame = rs2.video_frame(frames.get_infrared_frame())
        depth_frame = rs2.depth_frame(frames.get_depth_frame())
        color_frame = rs2.video_frame(frames.get_color_frame())
        if not ir_frame or not depth_frame or not color_frame:  #TODO: check if this is actually useful
            continue

        # Convert images to numpy arrays
        ir_image = np.asanyarray(ir_frame.get_data())
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        # Apply colormap on depth image (image must be converted to 8-bit per pixel first for compatibility, may be left alone for analysis)
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        # Threshold and greyscale operations