예제 #1
0
def getDepthPipeline(FRAME_WIDTH, FRAME_HEIGHT, id=None):
    pipeline = rs.pipeline()
    config = rs.config()
    try:
        if (id != None):
            config.enable_device(id)
    except Exception as e:
        print(e)

    config.enable_stream(rs.stream.color, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.bgr8, 30)
    config.enable_stream(rs.stream.depth, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.z16, 30)
    config.enable_stream(rs.stream.infrared, 1, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.y8, 30)
    config.enable_stream(rs.stream.infrared, 2, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.y8, 30)

    print("[INFO] Starting streaming...")
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    align_to = rs.stream.color
    align = rs.align(align_to)

    return pipeline, config, profile, align, depth_scale
예제 #2
0
def pub_img():
    '''Callback function of subscribed topic.
    Here images get converted and features detected'''
    global stamp

    # cap = cv2.VideoCapture(1)

    pipe = rs.pipeline()
    config = rs.config()
    width = 640; height = 480;
    config.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, width, height, rs.format.rgb8, 30)
    profile = pipe.start(config)
    align_to = rs.stream.color
    align = rs.align(align_to)
    pub_rgb = rospy.Publisher("/output/image_raw/compressed_rgb", CompressedImage, queue_size=1)
    pub_depth = rospy.Publisher("/output/image_raw/compressed_depth", CompressedImage, queue_size=1)


    print('Hey! I started publishing images.')

    while(True):

        temp = pipe.wait_for_frames()
        aligned_frames = align.process(temp)
        aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        if not aligned_depth_frame or not color_frame:
            pass
        buffer = np.zeros((height,width,4))

        buffer[:,:,0:3] = (np.asanyarray(color_frame.get_data(),dtype=np.uint8))
        buffer[:,:,3] = np.asanyarray(aligned_depth_frame.get_data(),dtype=np.uint8)

        #### Create CompressedImage ####
        msg_rgb = CompressedImage()
        msg_depth = CompressedImage()
        msg_rgb.header.stamp = rospy.Time.now()
        msg_depth.header.stamp = msg_rgb.header.stamp#rospy.Time.now()
        msg_rgb.format = "rgb"#f'{stamp}'
        msg_depth.format = "depth"
        msg_rgb.data = np.array(cv2.imencode('.jpg', buffer[:,:,0:3])[1]).tostring()
        msg_depth.data = np.array(cv2.imencode('.jpg', buffer[:,:,3])[1]).tostring()
        # Publish depth frame
        # pub = rospy.Publisher("/output/image_raw/compressed", CompressedImage, queue_size=1)
        # r = rospy.Rate(10)
        # try:
        pub_rgb.publish(msg_rgb)
        pub_depth.publish(msg_depth)
        # r.sleep()
        # except:
        #     print('Could not publish')

        stamp += 1
        if stamp >= 100000:
            stamp = 0
예제 #3
0
def initialize_camera(width, height):
    """
    Set up camera pipeline for given resolution
    """

    pipe = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, width, height, rs.format.rgb8, 30)
    profile = pipe.start(config)
    align_to = rs.stream.color
    align = rs.align(align_to)
예제 #4
0
    def __init__(self, resolution=(640, 480), depth_framerate=6, 
                 color_framerate=30, include_depth=False, decimation_magnitude=1,
                 spatial_smooth=True, temporal_smooth=True):
        self.include_depth = include_depth
        self.pipeline = rs.pipeline()
        #Create a config and configure the pipeline to stream
        #  different resolutions of color and depth streams
        self.config = rs.config()
        w, h = resolution
        self.config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, color_framerate)
        if include_depth:
            self.config.enable_stream(rs.stream.depth, w, h, rs.format.z16, depth_framerate)

        # Start streaming
        self.profile = self.pipeline.start(self.config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        self.color_sensor = self.profile.get_device().first_color_sensor()
        if include_depth:
            self.depth_sensor = self.profile.get_device().first_depth_sensor()
            self.depth_scale = self.depth_sensor.get_depth_scale()

            # Create an align object
            # rs.align allows us to perform alignment of depth frames to others frames
            # The "align_to" is the stream type to which we plan to align depth frames.
            align_to = rs.stream.depth
            self.align = rs.align(align_to)

            # We'll want to store the intrinsics once we start getting images.
            self.aligned_depth_K = None

            self.filters = []
            # Decimation filter
            if decimation_magnitude > 1:
                decimate_filter = rs.decimation_filter()
                decimate_filter.set_option(rs.option.filter_magnitude, decimation_magnitude)
                self.filters.append(decimate_filter)
            
            if spatial_smooth or temporal_smooth:
                self.filters.append(rs.disparity_transform())
            if spatial_smooth:
                self.filters.append(rs.spatial_filter())
            if temporal_smooth:
                self.filters.append(rs.temporal_filter())
            if spatial_smooth or temporal_smooth:
                self.filters.append(rs.disparity_transform(False))

            # Pointcloud conversion utility.
            self.pc = rs.pointcloud()
            
            self.depth_profile = rs.video_stream_profile(self.profile.get_stream(rs.stream.depth))
            self.depth_intrinsics = self.depth_profile.get_intrinsics()
            self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height
예제 #5
0
    def __init__(self, config, board, usestream=None):

        # initialize realsense specific settings
        self.pipeline = rs.pipeline()
        self.realsense_config = rs.config()

        # FIXME: missing frames when using videostream or too slow processing
        # https://github.com/IntelRealSense/librealsense/issues/2216

        # initialize the base class
        super().__init__(config, board, usestream)

        # Use recorded depth and color streams and its configuration
        # If problems with colors occur, check bgr/rgb channels configurations
        if usestream is not None:
            rs.config.enable_device_from_file(self.realsense_config, usestream)
            self.realsense_config.enable_all_streams()

        # Configure depth and color streams
        else:
            self.realsense_config.enable_stream(rs.stream.depth, self.width,
                                                self.height, rs.format.z16, 30)
            self.realsense_config.enable_stream(rs.stream.color, self.width,
                                                self.height, rs.format.bgr8,
                                                30)

        # Create alignment primitive with color as its target stream:
        self.alignment_stream = rs.align(rs.stream.color)

        # Start streaming
        # TODO: optionally rename variable to a more speaking one
        # FIXME: program ends here without further message
        try:
            self.profile = self.pipeline.start(self.realsense_config)
            # Getting the depth sensor's depth scale
            depth_sensor = self.profile.get_device().first_depth_sensor()
            self.depth_scale = depth_sensor.get_depth_scale()

            self.initialized = True

        except RuntimeError as e:
            logger.fatal("camera could not be initialized: {}".format(e))
            self.initialized = False
            # FIXME: follow up?

        logger.debug("Depth Scale is: {}".format(self.depth_scale))
예제 #6
0
    def __init__(self, config = None):
        Camera_Base.__init__(self, config)

        if(self.data['camera_name'] != "d400"):
            raise TypeExcept("Camera name is not d400")   
        
        #Set pipeline and config
        self.pipeline = rs.pipeline()
        cfg = rs.config()
        cfg.enable_stream(rs.stream.color, self.data['color']['width'], self.data['color']['height'], rs.format.bgr8, self.data['color']['fps'])
        cfg.enable_stream(rs.stream.depth, self.data['depth']['width'], self.data['depth']['height'], rs.format.z16, self.data['depth']['fps'])

        # Start streaming
        profile = self.pipeline.start(cfg)

        depth_sensor = profile.get_device().first_depth_sensor()
        self.depth_scale = depth_sensor.get_depth_scale()

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        self.align = rs.align(align_to)
예제 #7
0
def start():
    pipeline = rs.pipeline()
    config = rs.config()

    print("Start load conf")
    config.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

    profile = pipeline.start(config)

    depth_sensor = profile.get_device().first_depth_sensor()
    set_short_range(depth_sensor)
    colorizer = rs.colorizer()
    print("Conf loaded")
    align_to = rs.stream.color
    align = rs.align(align_to)

    try:
        while True:
            color_image, depth_color_image, depth_image = get_color_depth(
                pipeline, align, colorizer)
            if color_image is None and color_image is None and color_image is None:
                continue

            color_image, depth_color_image, xy0, xy1, idx_to_coordinates = get_right_hand_coords(
                color_image, depth_color_image)
            if xy0 is not None or xy1 is not None:
                z_val_f, z_val_s, m_xy, c_xy, xy0_f, xy1_f, x, y, z = get_filtered_values(
                    depth_image, xy0, xy1)
                pyautogui.moveTo(int(x), int(3500 - z))  # , duration=0.05
                if draw_cam_out(color_image, depth_color_image, xy0_f, xy1_f,
                                c_xy, m_xy):
                    break
    finally:
        hands.close()
        pipeline.stop()
        model.cuda()

    model.eval()

    # Setup Realsense pipeline
    pipe = rs.pipeline()
    configure = rs.config()
    width = 640; height = 480;
    configure.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30)
    configure.enable_stream(rs.stream.color, width, height, rs.format.rgb8, 30)
    dec_filter = rs.decimation_filter ()   # Decimation - reduces depth frame density
    spat_filter = rs.spatial_filter()      # Spatial    - edge-preserving spatial smoothing
    temp_filter = rs.temporal_filter()    # Temporal   - reduces temporal noise
    pipe.start(configure)
    align_to = rs.stream.color
    align = rs.align(align_to)

    while(1):

        # temp = pipe.wait_for_frames()
        # aligned_frames = align.process(temp)
        # depth_frame = aligned_frames.get_depth_frame()
        # filtered = dec_filter.process(depth_frame)
        # filtered = spat_filter.process(fisltered)
        # filtered = temp_filter.process(filtered)

        # aligned_depth_frame = np.asanyarray(filtered.get_data(),dtype=np.uint8) # aligned_depth_frame is a 640x480 depth image
        # color_frame = np.asanyarray(aligned_frames.get_color_frame().get_data(),dtype=np.uint8)

        img, box = yolo_output(color_frame,model,['cell phone', 'person'], confidence, nms_thesh, CUDA, inp_dim)
        print('BOX:', box)
def main():

    if not os.path.exists(args.out_rdir):
        os.mkdir(args.out_rdir)
        print('Created out_rgb_dir')
    if not os.path.exists(args.out_ddir):
        os.mkdir(args.out_ddir)
        print('Created out_depth_dir')
    if not os.path.exists(args.annotations_dir):
        os.mkdir(args.annotations_dir)
        print('Created args_out_dir')

    #Create pipeline for realsense2
    pipe = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 360, rs.format.rgb8, 30)
    profile = pipe.start(config)

    # Declare filters
    dc_ftr = rs.decimation_filter ()   # Decimation - reduces depth frame density
    st_ftr = rs.spatial_filter()          # Spatial    - edge-preserving spatial smoothing
    tp_ftr = rs.temporal_filter()    # Temporal   - reduces temporal noise
    th_ftr = rs.threshold_filter()

    align_to = rs.stream.color
    align = rs.align(align_to)
    rgbd = np.zeros((1,4,360,640))
    i=0
    print('################|Initialization seguence completed.|################')


    try:
        while(1):

            # Get frameset of color and depth
            frames = pipe.wait_for_frames()

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame() #  640x480
            color_frame = aligned_frames.get_color_frame()

            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue

            # Post processing
            filtered = tp_ftr.process(st_ftr.process(dc_ftr.process(aligned_depth_frame)))
            filtered = th_ftr.process(filtered)#can be removed if person is outside frame

            rgb = np.asanyarray(color_frame.get_data())
            rgb = np.transpose(rgb,(2,0,1))
            depth = np.asanyarray(aligned_depth_frame.get_data(),dtype=np.uint16)
            depth = depth
            im.show(rgb,args.bb)
            im.rgbsave(i, rgb, str('./' + args.out_rdir))
            im.dsave(i, depth, str('./' + args.out_ddir))
            np.savetxt('./' + args.annotations_dir + '/' + str(i) + '.csv', args.bb, delimiter=',')
            i = i+ 1
            print('saved'+str(i)+'image')
            # time.sleep(1)
    finally:
        pipe.stop()
        print('################| Error in pipeline |################')
    def start(self):
        try:
            # Setup:
            pipe = rs.pipeline()

            #configure profiles
            config = rs.config()
            config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 6)
            config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 6)

            profile = pipe.start(config)

            # Skip 5 first frames to give the Auto-Exposure time to adjust
            for x in range(5):
                pipe.wait_for_frames()

            while True:
                # Store next frameset for later processing:
                frameset = pipe.wait_for_frames()
                color_frame = frameset.get_color_frame()
                color = np.asanyarray(color_frame.get_data())

                # Standard OpenCV boilerplate for running the net:# Stand
                height, width = color.shape[:2]
                expected = 300
                aspect = width / height
                resized_image = cv2.resize(
                    color, (int(round(expected * aspect)), expected))
                crop_start = int(round(expected * (aspect - 1) / 2))
                crop_img = resized_image[0:expected,
                                         crop_start:crop_start + expected]

                net = cv2.dnn.readNetFromCaffe(
                    "MobileNetSSD_deploy.prototxt",
                    "MobileNetSSD_deploy.caffemodel")
                in_scale_factor = 0.007843
                mean_val = 127.53
                class_names = ("background", "aeroplane", "bicycle", "bird",
                               "boat", "bottle", "bus", "car", "cat", "chair",
                               "cow", "diningtable", "dog", "horse",
                               "motorbike", "person", "pottedplant", "sheep",
                               "sofa", "train", "tvmonitor")

                blob = cv2.dnn.blobFromImage(crop_img, in_scale_factor,
                                             (expected, expected), mean_val,
                                             False)
                net.setInput(blob, "data")
                detections = net.forward("detection_out")

                label = detections[0, 0, 0, 1]
                xmin = detections[0, 0, 0, 3]
                ymin = detections[0, 0, 0, 4]
                xmax = detections[0, 0, 0, 5]
                ymax = detections[0, 0, 0, 6]

                class_name = class_names[int(label)]

                align = rs.align(rs.stream.color)
                frameset = align.process(frameset)
                aligned_depth_frame = frameset.get_depth_frame()
                depth = np.asanyarray(aligned_depth_frame.get_data())

                # Crop depth data:
                scale = height / expected
                xmin_depth = int((xmin * expected + crop_start) * scale)
                ymin_depth = int((ymin * expected) * scale)
                xmax_depth = int((xmax * expected + crop_start) * scale)
                ymax_depth = int((ymax * expected) * scale)
                depth = depth[xmin_depth:xmax_depth,
                              ymin_depth:ymax_depth].astype(float)

                # Get data scale from the device and convert to meters
                depth_scale = profile.get_device().first_depth_sensor(
                ).get_depth_scale()
                depth = depth * depth_scale
                dist, _, _, _ = cv2.mean(depth)
                print("Detected a {0} {1:.3} meters away.".format(
                    class_name, dist))

                json_object = json.dumps(DataObject(class_name, dist),
                                         default=jsonDefault)
                self.send_to_hub_callback(json_object)

        except Exception as e:
            print(e)
            pass
예제 #11
0
    def run(self):
        # ========================
        # 1. Configure all streams
        # ========================
        self.rspipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.depth, self.width, self.height,
                             rs.format.z16, 30)
        config.enable_stream(rs.stream.color, self.width, self.height,
                             rs.format.bgr8, 30)

        # ======================
        # 2. Start the streaming
        # ======================
        print("Starting up the Intel Realsense...")
        print("")
        profile = self.rspipeline.start(config)

        # Load the configuration here
        self.loadConfiguration(profile, self.json_file)

        # =================================
        # 3. The depth sensor's depth scale
        # =================================
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: ", depth_scale)
        print("")

        # ==========================================
        # 4. Create an align object.
        #    Align the depth image to the rgb image.
        # ==========================================
        align_to = rs.stream.depth
        align = rs.align(align_to)

        try:
            # ===========================================
            # 5. Skip the first 30 frames.
            # This gives the Auto-Exposure time to adjust
            # ===========================================
            for x in range(30):
                frames = self.rspipeline.wait_for_frames()
                # Align the depth frame to color frame
                aligned_frames = align.process(frames)

            print("Intel Realsense started successfully.")
            print("")

            # ===========================================
            # 6. Setup gstreamer
            # Usefull gst resources / cheatsheets
            # https://github.com/matthew1000/gstreamer-cheat-sheet/blob/master/rtmp.md
            # http://wiki.oz9aec.net/index.php/Gstreamer_cheat_sheet
            # https://github.com/matthew1000/gstreamer-cheat-sheet/blob/master/mixing.md
            # ===========================================
            CLI = ''
            caps = 'caps="video/x-raw,format=BGR,width=' + str(
                self.width
            ) + ',height=' + str(
                self.height * 2
            ) + ',framerate=(fraction)30/1,pixel-aspect-ratio=(fraction)1/1"'

            if platform.system() == "Linux":
                #assuming Linux means RPI

                CLI = 'flvmux name=mux streamable=true latency=3000000000 ! rtmpsink location="' + self.rtmp_url + ' live=1 flashver=FME/3.0%20(compatible;%20FMSc%201.0)" \
                    appsrc name=mysource format=TIME do-timestamp=TRUE is-live=TRUE ' + str(
                    caps) + ' ! \
                    videoconvert !  omxh264enc ! h264parse ! video/x-h264 ! \
                    queue max-size-buffers=0 max-size-bytes=0 max-size-time=180000000 min-threshold-buffers=1 leaky=upstream ! mux. \
                    alsasrc ! audio/x-raw, format=S16LE, rate=44100, channels=1 ! voaacenc bitrate=44100 ! aacparse ! audio/mpeg, mpegversion=4 ! \
                    queue max-size-buffers=0 max-size-bytes=0 max-size-time=4000000000 min-threshold-buffers=1 ! mux.'

            elif platform.system() == "Darwin":
                #macos
                #CLI='flvmux name=mux streamable=true ! rtmpsink location="'+  self.rtmp_url +' live=1 flashver=FME/3.0%20(compatible;%20FMSc%201.0)" \
                #    appsrc name=mysource format=TIME do-timestamp=TRUE is-live=TRUE '+ str(caps) +' ! \
                #    videoconvert ! vtenc_h264 ! video/x-h264 ! h264parse ! video/x-h264 ! \
                #    queue max-size-buffers=4 ! flvmux name=mux. \
                #    osxaudiosrc do-timestamp=true ! audioconvert ! audioresample ! audio/x-raw,rate=48000 ! faac bitrate=48000 ! audio/mpeg ! aacparse ! audio/mpeg, mpegversion=4 ! \
                #    queue max-size-buffers=4 ! mux.'

                CLI = 'appsrc name=mysource format=TIME do-timestamp=TRUE is-live=TRUE caps="video/x-raw,format=BGR,width=' + str(
                    self.width
                ) + ',height=' + str(
                    self.height * 2
                ) + ',framerate=(fraction)30/1,pixel-aspect-ratio=(fraction)1/1" ! videoconvert ! vtenc_h264 ! video/x-h264 ! h264parse ! video/x-h264 ! queue max-size-buffers=4 ! flvmux name=mux ! rtmpsink location="' + self.rtmp_url + '" sync=true   osxaudiosrc do-timestamp=true ! audioconvert ! audioresample ! audio/x-raw,rate=48000 ! faac bitrate=48000 ! audio/mpeg ! aacparse ! audio/mpeg, mpegversion=4 ! queue max-size-buffers=4 ! mux.'

            #TODO: windows

            print(CLI)
            self.gstpipe = Gst.parse_launch(CLI)

            self.appsrc = self.gstpipe.get_by_name("mysource")
            self.appsrc.set_property('emit-signals',
                                     True)  #tell sink to emit signals

            # Set up a pipeline bus watch to catch errors.
            self.bus = self.gstpipe.get_bus()
            self.bus.connect("message", self.on_bus_message)

            self.gstpipe.set_state(Gst.State.PLAYING)
            intrinsics = True

            buff = Gst.Buffer.new_allocate(None,
                                           self.width * self.height * 3 * 2,
                                           None)
            buff_depth_index = (self.width * self.height * 3) - 1

            hsv = np.zeros((self.height, self.width, 3), dtype=np.float32)
            hsv8 = np.zeros((self.height, self.width, 3), dtype=np.int8)
            depth_hsv = np.zeros((self.height, self.width, 3),
                                 dtype=np.float32)
            depth_image = np.zeros((self.height, self.width, 3),
                                   dtype=np.float32)

            start = timer()
            while not self.exit.is_set():
                # ======================================
                # 7. Wait for a coherent pair of frames:
                # ======================================
                frames = self.rspipeline.wait_for_frames(1000)
                framestart = timer()

                # =======================================
                # 8. Align the depth frame to color frame
                # =======================================
                aligned_frames = align.process(frames)

                # ================================================
                # 9. Fetch the depth and colour frames from stream
                # ================================================
                depth_frame = aligned_frames.get_depth_frame()
                color_frame = aligned_frames.get_color_frame()
                if not depth_frame or not color_frame:
                    pass

                # print the camera intrinsics just once. it is always the same
                if intrinsics:
                    print("Intel Realsense Camera Intrinsics: ")
                    print("========================================")
                    print(depth_frame.profile.as_video_stream_profile().
                          intrinsics)
                    print(color_frame.profile.as_video_stream_profile().
                          intrinsics)
                    print("")
                    intrinsics = False

                # =====================================
                # 10. Apply filtering to the depth image
                # =====================================
                # Apply a spatial filter without hole_filling (i.e. holes_fill=0)
                # depth_frame = self.spatial_filtering(depth_frame, magnitude=2, alpha=0.5, delta=10, holes_fill=1)
                # Apply hole filling filter
                # depth_frame = self.hole_filling(depth_frame)

                # ==================================
                # 11. Convert images to numpy arrays
                # ==================================
                depth_image = np.asanyarray(depth_frame.get_data()).astype(
                    np.float32)
                color_image = np.asanyarray(color_frame.get_data())

                # ======================================================================
                # 12. Conver depth to hsv
                # ==================================
                # We need to encode/pack the 16bit depth value to RGB
                # we do this by treating it as the Hue in HSV.
                # we then encode HSV to RGB and stream that
                # on the other end we reverse RGB to HSV, H will give us the depth value back.
                # HSV elements are in the 0-1 range so we need to normalize the depth array to 0-1
                # First set a far plane and set everything beyond that to 0

                clipped = depth_image > 4000
                depth_image[clipped] = 0

                # Now normalize using that far plane
                # cv expects the H in degrees, not 0-1 :(
                depth_image *= (360 / 4000)
                depth_hsv[:, :, 0] = depth_image
                depth_hsv[:, :, 1] = 1
                depth_hsv[:, :, 2] = 1
                discard = depth_image == 0
                s = depth_hsv[:, :, 1]
                v = depth_hsv[:, :, 2]
                s[discard] = 0
                v[discard] = 0

                # cv2.cvtColor to convert HSV to RGB
                hsv = cv2.cvtColor(depth_hsv, cv2.COLOR_HSV2BGR_FULL)

                # cv2 needs hsv to 8bit (0-255) to stack with the color image
                hsv8 = (hsv * 255).astype(np.uint8)

                buff.fill(0, color_image.tobytes())
                buff.fill(buff_depth_index, hsv8.tobytes())
                self.appsrc.emit("push-buffer", buff)

                #process any messages from gstreamer
                msg = self.bus.pop_filtered(Gst.MessageType.ERROR
                                            | Gst.MessageType.WARNING
                                            | Gst.MessageType.EOS
                                            | Gst.MessageType.INFO
                                            | Gst.MessageType.STATE_CHANGED)

                while (msg):
                    self.on_bus_message(msg)
                    msg = self.bus.pop_filtered(
                        Gst.MessageType.ERROR | Gst.MessageType.WARNING
                        | Gst.MessageType.EOS | Gst.MessageType.INFO
                        | Gst.MessageType.STATE_CHANGED)

                if (not self.exit.is_set()):
                    try:
                        if (not self.previewQueue.full()):
                            self.previewQueue.put_nowait((color_image, hsv8))
                    except:
                        pass

                msgprocesstime = timer()
                print("gstreamer fps:%s d:%s" %
                      (str(1 /
                           (msgprocesstime - start)), str(framestart - start)))
                start = timer()

        except:
            e = sys.exc_info()[0]
            print("Unexpected Error: %s" % e)
            self.statusQueue.put_nowait("ERROR: Unexpected Error: %s" % e)

        finally:
            # Stop streaming
            print("Stop realsense pipeline")
            self.rspipeline.stop()
            print("Pause gstreamer pipe")
            try:
                if (self.gstpipe.get_state()[1] is not Gst.State.PAUSED):
                    self.gstpipe.set_state(Gst.State.PAUSED)
            except:
                self.statusQueue.put_nowait("ERROR: Error pausing gstreamer")
                print("Error pausing gstreamer")

        self.statusQueue.put_nowait("INFO: Exiting Realsense Capture process")
        print("Exiting capture loop")
예제 #12
0
def capture():
  global img_q, save
  w, h = 640, 480

  # Configure depth and color streams
  pipeline = rs.pipeline()
  config = rs.config()
  config.enable_stream(rs.stream.depth, w, h, rs.format.z16, 30)
  config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, 30)
  
  # Start streaming
  profile = pipeline.start(config)

  # Getting the depth sensor's depth scale (see rs-align example for explanation)
  depth_sensor = profile.get_device().first_depth_sensor()
  depth_scale = depth_sensor.get_depth_scale()
  print("Depth Scale is: " , depth_scale)

  # We will be removing the background of objects more than
  #  clipping_distance_in_meters meters away
  clipping_distance_in_meters = 1 #1 meter
  clipping_distance = clipping_distance_in_meters / depth_scale

  # Create an align object
  # rs.align allows us to perform alignment of depth frames to others frames
  # The "align_to" is the stream type to which we plan to align depth frames.
  align_to = rs.stream.color
  align = rs.align(align_to)

  thr = 150
  l = 10
  while True:
    frames = pipeline.wait_for_frames()
    aligned_frames = align.process(frames)

    aligned_depth_frame = aligned_frames.get_depth_frame()
    color_frame = aligned_frames.get_color_frame()
        
    if not aligned_depth_frame or not color_frame:
      continue

    depth_image = np.asanyarray(aligned_depth_frame.get_data())
    color_image = np.asanyarray(color_frame.get_data())

    grey_color = 153

    out = np.zeros((h, w, 4),np.uint8)
    
    depth_image_3d = np.dstack((depth_image,depth_image,depth_image))
    out[:,:,:3] = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), 0, color_image)
    out[:,:,3] = np.where((depth_image > clipping_distance) | (depth_image <= 0), 0, 255)
    
    depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
    
    images = np.hstack((color_image, depth_colormap, out[:,:,:3]))
    
    if not img_q.full():
      img_q.put_nowait(images)

    if save.value == 1:
      save_img(out)
      save.value = 0
예제 #13
0
def camera_process(_running, _collision, _buffer):
    FRAME_WIDTH = 640
    FRAME_HEIGHT = 480

    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.bgr8, 30)
    config.enable_stream(rs.stream.depth, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.z16, 30)
    config.enable_stream(rs.stream.infrared, 1, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.y8, 30)
    config.enable_stream(rs.stream.infrared, 2, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.y8, 30)

    print("[INFO] Starting streaming...")
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    align_to = rs.stream.color
    align = rs.align(align_to)

    clipping_distance_in_meters = 1.5
    collision_percent = 0.15

    clipping_distance = clipping_distance_in_meters / depth_scale

    while True:
        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)

        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame(
        )  # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        frame = frames.get_color_frame()

        depth_frame = aligned_frames.get_depth_frame(
        )  # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        ir_frame_1 = frames.get_infrared_frame(1)
        ir_frame_2 = frames.get_infrared_frame(2)
        ir_frame_both = frames.get_infrared_frame()

        # Convert images to numpy arrays
        color_image = np.asanyarray(color_frame.get_data())

        depth_image = np.asanyarray(aligned_depth_frame.get_data())

        grey_color = 0
        depth_image_3d = np.dstack(
            (depth_image, depth_image,
             depth_image))  #depth image is 1 channel, color is 3 channels

        ir_image_1 = np.asanyarray(ir_frame_1.get_data())
        ir_image_1 = cv2.applyColorMap(
            cv2.convertScaleAbs(ir_image_1, alpha=1), cv2.COLOR_BGRA2GRAY)

        ir_image_2 = np.asanyarray(ir_frame_2.get_data())
        ir_image_2 = cv2.applyColorMap(
            cv2.convertScaleAbs(ir_image_2, alpha=1), cv2.COLOR_BGRA2GRAY)

        depth_image = np.asanyarray(depth_frame.get_data())
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.02), cv2.COLORMAP_JET)

        scaled_size = (frame.width, frame.height)
        # expand image dimensions to have shape: [1, None, None, 3]
        # i.e. a single-column array, where each item in the column has the pixel RGB value
        image_expanded = np.expand_dims(color_image, axis=0)

        frame = cv2.cvtColor(np.asanyarray(frame.get_data()),
                             cv2.COLOR_BGR2RGB)
        bg_removed = np.where(
            (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
            grey_color, ir_image_1)
        bg_removed[bg_removed != [0, 0, 0]] = 255

        white_count = np.count_nonzero(bg_removed == 255)
        black_count = np.count_nonzero(bg_removed == 0)
        pixels = bg_removed.shape[0] * bg_removed.shape[1] * bg_removed.shape[2]
        if (white_count > pixels * collision_percent):
            _collision.value = True
        else:
            _collision.value = False

        cv2.imshow("bg_removed", bg_removed)
        t = cv2.waitKey(1)
        if t == ord('q'):
            _running.value = False
            break
        # _, _buffer.value = cv2.imencode('.jpg', bg_removed)
        print(type(base64.b64encode(cv2.imencode('.jpg', bg_removed)[1])))
        try:
            _buffer.value = str(
                base64.b64encode(cv2.imencode('.jpg', bg_removed)[1]))
        except Exception as e:
            print(e)
예제 #14
0
    def init(self):
        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        self.config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8,
                                  30)

        # Start pipeline
        attempt = 1  # 尝试次数,成功则为-1
        while (attempt > 0):
            try:
                profile = self.pipeline.start(self.config)
            except (RuntimeError):
                print("\033[0;31m未检测到相机!重试%s/5\033[0m" % (attempt))
                attempt += 1
                if attempt > 5:
                    os._exit(0)
            else:
                attempt = 0
        while (attempt >= 0):
            try:
                self.frames = self.pipeline.wait_for_frames()
            except (RuntimeError):
                print("\033[0;31m相机卡死!重试%s/5\033[0m" % attempt)
                attempt += 1
                if attempt > 5:
                    os._exit(0)
            else:
                attempt = -1
        color_frame = self.frames.get_color_frame()
        depth_frame = self.frames.get_depth_frame()
        # 设置相机参数
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_sensor.set_option(rs.option.motion_range, 140)  # 最远深度 220
        depth_sensor.set_option(rs.option.accuracy, 3)  # max=3
        depth_sensor.set_option(rs.option.filter_option, 7)  # 7
        depth_sensor.set_option(rs.option.confidence_threshold, 15)  # 15
        self.depth_scale = depth_sensor.get_depth_scale()  # 用于将深度图像转为以米为单位

        # Color Intrinsics
        intr = color_frame.profile.as_video_stream_profile().intrinsics
        camera_parameters = {
            'fx': intr.fx,
            'fy': intr.fy,
            'ppx': intr.ppx,
            'ppy': intr.ppy,
            'height': intr.height,
            'width': intr.width
        }
        with open('intrinsics.json', 'w') as fp:
            json.dump(camera_parameters, fp)

        depth_intr = depth_frame.profile.as_video_stream_profile().intrinsics
        depth_parameters = {
            'fx': depth_intr.fx,
            'fy': depth_intr.fy,
            'ppx': depth_intr.ppx,
            'ppy': depth_intr.ppy,
            'height': depth_intr.height,
            'width': depth_intr.width
        }
        with open('depth_intrinsics.json', 'w') as fp:
            json.dump(depth_parameters, fp)

        depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(
            color_frame.profile)
        extrin_parameters = {
            'rot': depth_to_color_extrin.rotation,
            'tran': depth_to_color_extrin.translation
        }
        with open('extrinsics.json', 'w') as fp:
            json.dump(extrin_parameters, fp)

        align_to = rs.stream.color
        self.align = rs.align(align_to)

        for i in range(30):
            self.frames = self.pipeline.wait_for_frames()