コード例 #1
0
ファイル: pyrs.py プロジェクト: yiskw713/pyrs
    def __init__(self, w=640, h=480, depth=True, frame_rate=30):
        '''
        Initializing the Python RealSense Control Flow:
        w: Int (default = 640, can also be 1280) 
        h: Int (default = 480, can also be 720)
        depth: Bool (default = True)
        frame_rate: Int (default = 30)

        RGB and Depth formats are: bgr8, z16

        Note: In this class, variables should not be directly changed.
        '''
        self.width = w
        self.height = h
        self.depth_on = depth
        self._pipeline = rs.pipeline()
        self._config = rs.config()
        self._config.enable_stream(rs.stream.color, w, h, rs.format.bgr8,
                                   frame_rate)
        self._intrinsic = None

        if depth:
            self.align = rs.align(rs.stream.color)
            self._preset = 0
            # Presets (for D415):
            # 0: Custom
            # 1: Default
            # 2: Hand
            # 3: High Accuracy
            # 4: High Density
            # 5: Medium Density

            # depth interpolation
            self.interpolation = cv2.INTER_NEAREST  # use nearest neighbor
            # self.interpolation = cv2.INTER_LINEAR  # linear
            # self.interpolation = cv2.INTER_CUBIC  # cubic

            # beautify depth image for viewing
            self._config.enable_stream(rs.stream.depth, w, h, rs.format.z16,
                                       frame_rate)
            self.colorizer = rs.colorizer()

            # initialize filters
            self.decimation = rs.decimation_filter()
            self.decimation.set_option(rs.option.filter_magnitude, 4)

            self.depth_to_disparity = rs.disparity_transform(True)

            self.spatial = rs.spatial_filter()
            self.spatial.set_option(rs.option.filter_magnitude, 5)
            self.spatial.set_option(rs.option.filter_smooth_alpha, 0.5)
            self.spatial.set_option(rs.option.filter_smooth_delta, 20)

            self.temporal = rs.temporal_filter()

            self.disparity_to_depth = rs.disparity_transform(False)

        print(
            "Initialized RealSense Camera\nw: {}, h: {}, depth: {}, frame_rate: {}"
            .format(w, h, depth, frame_rate))
コード例 #2
0
ファイル: Camera.py プロジェクト: GeranS/volkerrail-dempers
    def __init__(self):
        self.conversion_service = ConversionService.get_instance()

        self.pipeline = rs.pipeline()
        config = rs.config()

        self.threshold_filter = rs.threshold_filter()
        self.threshold_filter.set_option(rs.option.max_distance, 4)

        config.enable_stream(rs.stream.depth, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, rs.format.bgr8, 30)

        # Start streaming
        self.pipeline.start(config)
        self.profile = self.pipeline.get_active_profile()

        self.sensor = self.profile.get_device().first_depth_sensor()
        self.sensor.set_option(rs.option.visual_preset, value=1)
        self.sensor.set_option(rs.option.motion_range, value=200)

        # Set colour palette to white to black
        self.colorizer = rs.colorizer(3)

        self.colorizer.set_option(rs.option.min_distance, 0)
        self.colorizer.set_option(rs.option.max_distance, 0.1)
        self.colorizer.set_option(rs.option.histogram_equalization_enabled,
                                  False)
コード例 #3
0
ファイル: realsensecv.py プロジェクト: yumion/onodera-lab
    def read(self, is_filtered=False, raw_frame=False):
        # Flag capture available
        ret = True
        # get frames
        frames = self.pipeline.wait_for_frames()
        aligned_frames = self.align.process(
            frames)  # Align the depth frame to color frame
        # separate RGB and Depth image
        self.color_frame = aligned_frames.get_color_frame()  # RGB
        self.depth_frame = aligned_frames.get_depth_frame()  # Depth

        if not self.color_frame or not self.depth_frame:
            ret = False
            return ret, (None, None)

        if raw_frame:
            return ret, (self.color_frame, self.depth_frame)

        else:
            # Get filtered depth frame
            depth_frame = self.filtering(
                self.depth_frame) if is_filtered else self.depth_frame
            # Express depth frame by heatmap
            depth_colorized_frame = rs.colorizer().colorize(depth_frame)
            # Convert images to numpy arrays
            color_image = np.array(self.color_frame.get_data())
            depth_image = np.array(depth_colorized_frame.get_data())
            # depth_colorized_image = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.08), cv2.COLORMAP_JET)
            return ret, (color_image, depth_image)
コード例 #4
0
    def playback(self):
        playback_window_title_suffix = 'Streaming Loop (esc to cancel)'
        depth_window = 'Depth Frames ' + playback_window_title_suffix
        color_window = 'Color Frames ' + playback_window_title_suffix
        cv2.namedWindow(depth_window, cv2.WINDOW_AUTOSIZE)
        cv2.namedWindow(color_window, cv2.WINDOW_AUTOSIZE)
        self.pipeline = start_pipeline(advanced_mode=self.video_grabber_config.advanced_mode,
                                       width=self.video_grabber_config.width,
                                       height=self.video_grabber_config.height,
                                       fps=self.video_grabber_config.fps,
                                       preset_file=self.video_grabber_config.preset_file,
                                       from_file=self.video_grabber_config.record_to_file)
        while True:
            if self.pipeline is None:
                break
            frames = self.pipeline.wait_for_frames()
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            depth_frame = rs.colorizer().colorize(depth_frame)
            if not depth_frame or not color_frame:
                continue
            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())

            # Show images
            cv2.imshow(color_window, color_image)
            cv2.imshow(depth_window, depth_image)
            key = cv2.waitKey(1)
            # Exit on esc key
            if key == 27:
                cv2.destroyAllWindows()
                break
コード例 #5
0
    def __init__(self):
        """ Initializes the class. """

        super(RealsenseRead, self).__init__()

        self.Helpers = Helpers("Realsense D415 Reader")

        self.colorizer = rs.colorizer()

        # OpenCV fonts
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.black = (0, 0, 0)
        self.green = (0, 255, 0)
        self.white = (255, 255, 255)

        # Starts the Realsense module
        self.Realsense = Realsense()
        # Connects to the Realsense camera
        self.profile = self.Realsense.connect()

        # Starts the socket module
        self.Socket = Socket("Realsense D415 Reader")

        # Sets up the object detection model
        self.Model = Model()

        self.Helpers.logger.info(
            "Realsense D415 Reader Class initialization complete.")
コード例 #6
0
ファイル: depth_camera.py プロジェクト: zhangfaquan/Sparkie
    def __init__(self, color, ip, port, topic, interval):
        Publisher.__init__(self, ip, port, topic)
        self.interval = interval
        self.lastUpdate = self.millis(self)

        pipe = rs.pipeline()
        cfg = rs.config()
        cfg.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        cfg.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

        profile = pipe.start(cfg)
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)

        self.running = True
        self.color = color

        zero_vec = (0.0, 0.0, 0.0)
        self.depth_frame = zero_vec
        self.color_frame = zero_vec
        self.depth_colormap = zero_vec

        # Processing blocks
        self.pc = rs.pointcloud()
        self.decimate = rs.decimation_filter()
        #self.decimate.set_option(rs.option.filter_magnitude, 2 ** self.decimate)
        self.colorizer = rs.colorizer()
コード例 #7
0
    def initializeStreamFromBag(self, path_to_bag):

        if os.path.splitext(path_to_bag)[1] != '.bag':
            print("The given file is not of correct file format.")
            print("Only .bag files are accepted")
            exit()

        # Create pipeline
        self.pipeline_bag = rs.pipeline()

        # Create a config object
        config_bag = rs.config()

        # Tell config that we will use a recorded device from file to be used by the pipeline through playback.
        rs.config.enable_device_from_file(config_bag, path_to_bag)

        # Configure the pipeline to stream the depth stream
        # Change this parameters according to the recorded bag file resolution
        # TODO: configurable parameters
        config_bag.enable_stream(rs.stream.depth)
        config_bag.enable_stream(rs.stream.color)
        #config_bag.enable_all_streams()

        # Start streaming from file
        self.pipeline_bag.start(config_bag)

        # Create colorizer object
        self.colorizer_bag = rs.colorizer()

        # Create an align object
        align_to = rs.stream.color
        self.align = rs.align(align_to)
コード例 #8
0
    def initRealSense(self, resolution=(640, 480), fps=60):

        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        self.config = rs.config()

        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = self.config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        self.device_product_line = str(
            device.get_info(rs.camera_info.product_line))

        # Enable stream for RealSense
        res_x = resolution[0]
        res_y = resolution[1]
        self.config.enable_stream(rs.stream.depth, res_x, res_y, rs.format.z16,
                                  fps)
        self.config.enable_stream(rs.stream.color, res_x, res_y,
                                  rs.format.bgr8, fps)

        self.pipeline.start(self.config)

        # Depth jetmap init
        self.colorizer = rs.colorizer()
        # Create an align object
        align_to = rs.stream.color
        self.align = rs.align(align_to)
コード例 #9
0
    def get_frame_stream(self):
        # Wait for a coherent pair of frames: depth and color
        frames = self.pipeline.wait_for_frames()
        aligned_frames = self.align.process(frames)
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        if not depth_frame or not color_frame:
            # If there is no frame, probably camera not connected, return False
            print(
                "Error, impossible to get the frame, make sure that the Intel Realsense camera is correctly connected"
            )
            print("Depth frame: " + str(bool(depth_frame)) + " Color: " +
                  str(bool(color_frame)))
            return False, None, None

        # Apply filter to fill the Holes in the depth image
        spatial = rs.spatial_filter()
        spatial.set_option(rs.option.holes_fill, 3)
        filtered_depth = spatial.process(depth_frame)

        hole_filling = rs.hole_filling_filter()
        filled_depth = hole_filling.process(filtered_depth)

        # Create colormap to show the depth of the Objects
        colorizer = rs.colorizer()
        depth_colormap = np.asanyarray(
            colorizer.colorize(filled_depth).get_data())

        # Convert images to numpy arrays
        depth_image = np.asanyarray(filled_depth.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        return True, color_image, depth_frame
コード例 #10
0
    def video(self):
        align_to = rs.stream.color
        align = rs.align(align_to)
        for i in range(10):
            self.pipeline.wait_for_frames()
        while True:
            frames = self.pipeline.wait_for_frames()
            aligned_frames = align.process(frames)
            color_frame = aligned_frames.get_color_frame()
            depth_frame = aligned_frames.get_depth_frame()

            self.depth_frame = depth_frame

            color_image = np.asanyarray(color_frame.get_data())
            self.color_intrin = color_frame.profile.as_video_stream_profile().intrinsics

            depth_color_frame = rs.colorizer().colorize(depth_frame)

            # Convert depth_frame to numpy array to render image in opencv
            depth_color_image = np.asanyarray(depth_color_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())
            color_cvt = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
            self.show(color_cvt)

            break
コード例 #11
0
def set_pointcloud_variable():
    pc = rs.pointcloud()
    decimate = rs.decimation_filter()
    decimate.set_option(rs.option.filter_magnitude, 2**1)
    colorizer = rs.colorizer()

    return pc, decimate, colorizer
コード例 #12
0
ファイル: publish.py プロジェクト: tloula/autonav
    def __init__(self):
        AutoNavNode.__init__(self, "rs_publisher")

        # Read ROS Params
        self.read_param("/DisplayCameraImages", "DISPLAY", False)
        self.read_param("/ObjectCropBottom", "CROP_BOTTOM", 0.40)
        self.read_param("/ObjectCropTop", "CROP_TOP", 0.10)
        self.read_param("/ObjectCropSide", "CROP_SIDE", 0.30)
        self.read_param("/DanielDebug", "DANIEL_DEBUG", False)

        # Connect to RealSense Camera
        self.pipeline = rs.pipeline()
        profile = self.pipeline.start()

        # Initialize Camera
        self.SCALE = 0.0010000000475
        depth_scale = profile.get_device().first_depth_sensor(
        ).get_depth_scale()
        if abs(self.SCALE - depth_scale) >= 0.0000000000001:
            rospy.logerr("Depth scale changed to %.13f" % depth_scale)

        # Initialize Colorizer
        self.colorizer = rs.colorizer()

        # Publish camera data
        self.color_pub = rospy.Publisher("camera/color/image_raw",
                                         Image,
                                         queue_size=1)
        self.depth_pub = rospy.Publisher("camera/depth/image_rect_raw",
                                         Float64MultiArray,
                                         queue_size=1)
コード例 #13
0
def get_depth(y,x,depth,frame,frames):
    colorizer = rs.colorizer()
    # Create alignment primitive with color as its target stream:
    align = rs.align(rs.stream.color)
    frames = align.process(frames)

    # Update color and depth frames:
    aligned_depth_frame = frames.get_depth_frame()
    colorized_depth = np.asanyarray(colorizer.colorize(aligned_depth_frame).get_data())
    height, width = frame.shape[:2]
    expected = int(300)
    aspect = int(width / height)
    resized_image = cv2.resize(frame, (int(round(expected * aspect)), expected))
    crop_start = int(round(expected * (aspect - 1) / 2))
    crop_img = resized_image[0:expected, crop_start:crop_start+expected]
    scale = height / expected
    
    depth = np.asanyarray(aligned_depth_frame.get_data())

    cv2.circle(colorized_depth, (x, y), 2, (255,255,255), thickness=2)
    cv2.imshow("depth", colorized_depth)

    z = aligned_depth_frame.get_distance(x,y)
    w = 640
    h = 480
    x = (2 * math.tan(29 * 3.14159265359 / 180) * z) * ((x - w/2) / 640)
    y = (2 * math.tan(22.5 * 3.14 / 180) * z) * ((y - h/2) / 480)
    real_values = [z*100,-x*100,-y*100]
    return real_values
コード例 #14
0
ファイル: cap.py プロジェクト: hobbitsyfeet/3DMeasure
    def capture_colour(self):
        pipeline = rs2.pipeline()
        pipeline.start()
         # Streaming loop
        while True:
            # Get frameset of depth
            frames = pipeline.wait_for_frames()
            
            # Get depth frame
            frame = frames.get_depth_frame()
            # Colorize depth frame to jet colormap
            depth_color_frame = rs2.colorizer().colorize(depth_frame)

            # Convert depth_frame to numpy array to render image in opencv
            depth_color_image = np.asanyarray(depth_color_frame.get_data())

            # Render image in opencv window
            cv2.imshow("Video Stream", depth_color_image)
            key = cv2.waitKey(1)
            # if pressed escape exit program
            if key == 27:
                cv2.destroyAllWindows()
                break

        # Create opencv window to render image in
        cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)
コード例 #15
0
 def depthImageCV2(self, depthFrame):
     #inputs the depth frame
     #then apply color mappuing to the image into a numpyarray to be displayed in cv2
     colorized = rs.colorizer(3)  #can change vaule for color map
     colorized_depth = np.asanyarray(
         colorized.colorize(depthFrame).get_data())
     return colorized_depth
コード例 #16
0
def cameraConfig(threshold_distance):
    # Configure depth and color streams
    pipeline = rs.pipeline()
    colorizer = rs.colorizer()
    config = rs.config()
    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))

    depth_sensor = pipeline_profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    # Start streaming
    clipping_distance_in_meters = threshold_distance
    clipping_distance = clipping_distance_in_meters / depth_scale
    align_to = rs.stream.color

    return pipeline, align_to, config, clipping_distance
コード例 #17
0
    def __init__(self,
                 cam_id,
                 filter_depth=True,
                 frame=None,
                 registration_mode=RealSenseRegistrationMode.DEPTH_TO_COLOR):
        self._running = None

        self.id = cam_id
        self._registration_mode = registration_mode
        self._filter_depth = filter_depth

        self._frame = frame

        if self._frame is None:
            self._frame = 'realsense'
        self._color_frame = '%s_color' % (self._frame)

        # realsense objects
        self._pipe = rs.pipeline()
        self._cfg = rs.config()
        self._align = rs.align(rs.stream.color)

        # camera parameters
        self._depth_scale = None
        self._intrinsics = np.eye(3)

        # post-processing filters
        self._colorizer = rs.colorizer()
        self._spatial_filter = rs.spatial_filter()
        self._hole_filling = rs.hole_filling_filter()
コード例 #18
0
def aligned(config, frames):
    color_frame = frames.get_color_frame()
    align = rs.align(rs.stream.color)
    frameset = align.process(frames)
    aligned_depth_frame = frameset.get_depth_frame()

    colorizer = rs.colorizer()
    push_depth = np.asanyarray(aligned_depth_frame.get_data())

    colorized_depth = np.asanyarray(colorizer.colorize(aligned_depth_frame).get_data())
    color = np.asanyarray(color_frame.get_data())
    alpha = config['alpha']
    beta = 1 - alpha
    images = cv2.addWeighted(color, alpha, colorized_depth, beta, 0.0)

    low_threshold = 50
    ratio = 2  # recommend ratio 2:1 - 3:1
    kernal = 3
    depth_image = np.asanyarray(aligned_depth_frame.get_data())
    depth_to_jet = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
    edges = cv2.Canny(depth_to_jet, low_threshold, low_threshold * ratio, kernal)
    # images = depth_image
    edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
    images = cv2.addWeighted(images, alpha, edges, beta, 0.0)

    return images, push_depth, color
コード例 #19
0
ファイル: rsRecorder.py プロジェクト: rofreli/DepthCam
    def recording(self, file, aviMovie):
        if (aviMovie):
            fourcc = cv2.VideoWriter_fourcc(*'DIVX')
            outRGB = cv2.VideoWriter(file + '.avi', fourcc, 30,
                                     (self.frame_width, self.frame_height))
            outDEPTH = cv2.VideoWriter(file + '-D.avi', fourcc, 30,
                                       (self.frame_width, self.frame_height))

        while True:

            if aviMovie:
                frames = self.newPipe.wait_for_frames()

                color_frame = frames.get_color_frame()
                depth_frame = frames.get_depth_frame()
                depth_image = np.asanyarray(depth_frame.get_data())
                color_image = np.asanyarray(color_frame.get_data())

                colorizer = rs.colorizer()
                colorizer.set_option(rs.option.color_scheme, 2)
                # white to black
                colorized_depth = np.asanyarray(
                    colorizer.colorize(depth_frame).get_data())

                cv2.imshow('Color', color_image)
                cv2.imshow('Depth', colorized_depth)

                outRGB.write(color_image)
                outDEPTH.write(colorized_depth)

            ch = cv2.waitKey(1)
            if ch == ord('s'):
                self.newPipe.stop()
                self.pipe.start(self.config)
                break
コード例 #20
0
    def show_frames(self, end_time=None):
        """Gets and shows frames obtained from the stream."""
        # align depth image with color image
        if self._align_frames:
            align_to = rs.stream.color
            align = rs.align(align_to)

        if end_time is not None:
            start = time.time()
        try:
            while True:
                # waiting for a frame (Color & Depth)
                try:
                    frames = self._pipeline.wait_for_frames(5000)
                    if self._align_frames:
                        frames = align.process(frames)
                except RuntimeError:
                    break
                color_frame = frames.get_color_frame()

                if self._save_type in ['D', 'RGBD', 'RGBDIR']:
                    depth_frame = frames.get_depth_frame()
                if self._save_type in ['IR', 'RGBDIR']:
                    infrared_frame = frames.get_infrared_frame()

                if not color_frame:
                    continue
                if (self._save_type in ['D', 'RGBD', 'RGBDIR']) \
                   and not depth_frame:
                    continue
                if (self._save_type in ['IR', 'RGBDIR']) \
                   and not infrared_frame:
                    continue
                images = np.asanyarray(color_frame.get_data())
                if self._save_type in ['D', 'RGBD', 'RGBDIR']:
                    depth_color_frame = rs.colorizer().colorize(depth_frame)
                    depth_color_image = np.asanyarray(
                        depth_color_frame.get_data())
                    images = np.hstack((images, depth_color_image))
                if self._save_type in ['IR', 'RGBDIR']:
                    infrared_image = np.asanyarray(infrared_frame.get_data())
                    infrared_3c_image = cv2.cvtColor(infrared_image,
                                                     cv2.COLOR_GRAY2BGR)
                    images = np.hstack((images, infrared_3c_image))

                # displaying
                dst_images = self.scale_to_width(images, 800)
                cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
                cv2.moveWindow('RealSense', 100, 200)
                cv2.imshow('RealSense', dst_images)
                if cv2.waitKey(1) & 0xff == 27:
                    break
                if end_time is not None:
                    # save for sec
                    elapsed_time = time.time() - start
                    if elapsed_time > end_time:
                        break
        finally:
            # stop streaming
            self.close()
コード例 #21
0
    def __init__(self, filters=[]):
        """
        Connect to RealSense and initialize filters
        :param filters: [String, ...], default=[]: '' TODO list filters
        """
        self.pipe = rs.pipeline()
        cfg = rs.config()
        profile = self.pipe.start(cfg)
        # camera parameters
        self.depth_scale = profile.get_device().first_depth_sensor(
        ).get_depth_scale()

        # filters to apply to depth images
        self.filters = filters
        if 'align' in self.filters:
            self.align = rs.align(rs.stream.color)
        if 'decimation' in self.filters:
            self.decimation = rs.decimation_filter()
            self.decimation.set_option(rs.option.filter_magnitude, 4)
        if 'spatial' in self.filters:
            self.spatial = rs.spatial_filter()
            # self.spatial.set_option(rs.option.holes_fill, 3)
            self.spatial.set_option(rs.option.filter_magnitude, 5)
            self.spatial.set_option(rs.option.filter_smooth_alpha, 1)
            self.spatial.set_option(rs.option.filter_smooth_delta, 50)
        if 'temporal' in self.filters:
            # TODO
            self.temporal = rs.temporal_filter()
            self.temporal_iters = 3
        if 'hole_filling' in self.filters:
            self.hole_filling = rs.hole_filling_filter()
        if 'colorize' in self.filters:
            self.colorizer = rs.colorizer()
コード例 #22
0
def test_bag(input_file):
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_device_from_file(input_file, False)
    config.enable_all_streams()
    profile = pipeline.start(config)
    device = profile.get_device()
    playback = device.as_playback()
    playback.set_real_time(False)
    try:
        while True:
            frames = pipeline.wait_for_frames()
            print 'Xx'
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            if not depth_frame or not color_frame:
                continue
            depth_color_frame = rs.colorizer().colorize(depth_frame)
            depth_image = np.asanyarray(depth_color_frame.get_data())
            depth_colormap_resize = cv2.resize(depth_image,(424,240))
            color_image = np.asanyarray(color_frame.get_data())
            color_cvt = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
            color_cvt_2 = cv2.resize(color_cvt, (320,240))
            images = np.hstack((color_cvt_2, depth_colormap_resize))
            cv2.namedWindow('Color', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Color', images)
        time.sleep(1)
        pipeline.stop()
    except RuntimeError:
        print '{} unindexed'.format(input_file)
    finally:
        print 'tested'
        pass
コード例 #23
0
    def __init__(self):
        # ---------------------------------------------------------
        # real sense
        # ---------------------------------------------------------
        # # ストリーム(Depth/Color)の設定
        print("start Depth camera")
        width: int = 640
        height: int = 480
        config = rs.config()
        config.enable_stream(rs.stream.color, width, height, rs.format.bgr8,
                             30)
        config.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30)

        #
        self.colorizer = rs.colorizer()

        # ストリーミング開始
        self.pipeline = rs.pipeline()
        profile = self.pipeline.start(config)

        #get device information
        depth_sensor = profile.get_device().first_depth_sensor()
        #print("depth sensor:",depth_sensor)
        self.depth_scale = depth_sensor.get_depth_scale()
        #print("depth scale:",depth_scale)
        clipping_distance_in_meters = 1.0  # meter
        clipping_distance = clipping_distance_in_meters / self.depth_scale
        print("clipping_distance:", clipping_distance)

        # Alignオブジェクト生成
        align_to = rs.stream.color
        self.align = rs.align(align_to)

        self.frameNo = 0
        self.timenow = ""
コード例 #24
0
    def get_imgs_from_frames(self, color_frame, depth_frame):
        # get_data * depth_scale to get actual distance?
        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        # TODO: Currently not profile = self.pipeline.start(config) actually being used
        # Remove background - Set pixels further than clipping_distance to grey
        grey_color = 153
        # depth image is 1 channel, color is 3 channels
        depth_image_3d = np.dstack((depth_image, depth_image, depth_image))
        bg_removed = np.where(
            (depth_image_3d > self.clipping_distance) | (depth_image_3d <= 0),
            grey_color, color_image)
        # Render images
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        # make dgr image
        b, g, r = cv2.split(color_image)
        print("colorizer mode: ", 2)
        colorizer = rs.colorizer(2)
        depth_colorized = np.asanyarray(
            colorizer.colorize(depth_frame).get_data())
        dgr = cv2.merge((depth_colorized[:, :, 0], g, r))

        return color_image, depth_colorized, dgr
コード例 #25
0
    def __init__(self, pipeline):
        self.pipe = pipeline
        self.scale = 1
        self.w = 640
        self.h = 480
        self.pc = None
        # Processing blocks
        self.pc = rs.pointcloud()
        self.decimate = rs.decimation_filter()
        self.decimate.set_option(rs.option.filter_magnitude, 2**state.decimate)
        self.colorizer = rs.colorizer()
        self.state = state  # General ImageHandler model state
        self.out = out  # General Image handler output if not changed

        # Data from camera
        self.image = None
        self.verts = None

        # Setup camera
        try:
            # Get stream profile and camera intrinsics
            profile = pipeline.get_active_profile()
            depth_profile = rs.video_stream_profile(
                profile.get_stream(rs.stream.depth))
            self.depth_intrinsics = depth_profile.get_intrinsics()
            self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height
            depth_sensor = profile.get_device().first_depth_sensor()
            self.scale = depth_sensor.get_depth_scale()
        except RuntimeError as err:
            print(err)
コード例 #26
0
    def __init__(self, path=None, dim=None, hz=None):
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        self.colorizer = rs.colorizer()
        # https://intelrealsense.github.io/librealsense/python_docs/_generated/pyrealsense2.colorizer.html?highlight=color#pyrealsense2.colorizer.colorize
        # 0 - Jet
        # 1 - Classic
        # 2 - WhiteToBlack
        # 3 - BlackToWhite
        # 4 - Bio
        # 5 - Cold
        # 6 - Warm
        # 7 - Quantized
        # 8 - Pattern
        self.colorizer.set_option(rs.option.color_scheme, 0)
        if path is not None:
            print("Playback from bag:", path,
                  "(ignoring dimension & frequency params)")
            self.config.enable_device_from_file(path)
        else:
            print("Depth stream:", dim, "@", hz, "hz")
            self.config.enable_stream(rs.stream.depth, dim[0], dim[1],
                                      rs.format.z16, hz)

        self.sinks = None
        self.color_sinks = None
コード例 #27
0
def main():
    try:
        check_license_and_variables_exist()
        sdk_path = os.environ["CUBEMOS_SKEL_SDK"]
        #initialize the api with a valid license key in default_license_dir()
        api = Api(default_license_dir())
        model_path = os.path.join(sdk_path, "models", "skeleton-tracking",
                                  "fp32", "skeleton-tracking.cubemos")
        api.load_model(CM_TargetComputeDevice.CM_CPU, model_path)
    except Exception as ex:
        print("Exception occured: \"{}\"".format(ex))

    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
    pipeline.start(config)

    c = rs.colorizer()
    align_to = rs.stream.color
    align = rs.align(align_to)
    while True:
        frames = pipeline.wait_for_frames()
        # Align the depth frame to color frame
        aligned_frames = align.process(frames)

        color = aligned_frames.get_color_frame()
        depth = aligned_frames.get_depth_frame()
        if not depth: continue

        color_data = color.as_frame().get_data()
        color_image = np.asanyarray(color_data)

        skeletons = api.estimate_keypoints(color_image, 192)

        # perform inference again to demonstrate tracking functionality.
        # usually you would estimate the keypoints on another image and then
        # update the tracking id
        #new_skeletons = api.estimate_keypoints(img, 192)
        #new_skeletons = api.update_tracking_id(skeletons, new_skeletons)

        render_result(skeletons, color_image, confidence_threshold=0.5)

        depth_colormap = c.colorize(depth)
        depth_data = depth_colormap.as_frame().get_data()
        depth_image = np.asanyarray(depth_data)
        depth_image = cv2.cvtColor(depth_image, cv2.COLOR_RGB2BGR)

        added_image = cv2.addWeighted(color_image, 0.5, depth_image, 0.5, 0)

        cv2.imshow('color', color_image)
        cv2.imshow('depth', depth_image)
        cv2.imshow('overlay', added_image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    return
コード例 #28
0
def inference(model):
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

    #
    # # Start streaming
    pipeline.start(config)
    #

    while True:
        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        depth_frame = frames.get_depth_frame()
        if not color_frame or not depth_frame:
            continue

        color_image = np.asanyarray(color_frame.get_data())
        colorizer = rs.colorizer()
        colorizer.set_option(rs.option.color_scheme, 2)

        align = rs.align(rs.stream.color)
        frameset = align.process(frames)

        aligned_depth_frame = frameset.get_depth_frame()
        colorized_depth = np.asanyarray(
            colorizer.colorize(aligned_depth_frame).get_data())

        # depth_gray = cv2.cvtColor(colorized_depth, cv2.COLOR_BGR2GRAY)
        #
        # x, mask = cv2.threshold(depth_gray, 15, 255, cv2.THRESH_BINARY_INV)
        #
        # dst = cv2.inpaint(depth_gray, mask, 3, cv2.INPAINT_NS)
        #
        # dst = cv2.cvtColor(dst, cv2.COLOR_GRAY2RGB)

        inference_results = model.detect([color_image], verbose=1)

        # Display results
        r = inference_results[0]
        print(r['class_ids'])
        result_image = visualize_mask(color_image,
                                      r['rois'],
                                      r['masks'],
                                      r['class_ids'],
                                      class_names,
                                      r['scores'],
                                      title="Predictions")

        # Show images
        cv2.imshow('RealSense', result_image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    pipeline.stop()
    cv2.destroyAllWindows()
コード例 #29
0
    def __init__(self, res_w=1280, res_h=720, fps=30, use_color=True, use_depth=True, show_depth=True, preset=1, color_scheme=0):
        assert use_color or use_depth, "At least 1 of the 2 flags 'use_color' and 'use_depth' must be set to True"

        self.use_color = use_color
        self.use_depth = use_depth
        self.use_color_depth = use_color and use_depth
        self.show_depth = use_depth and show_depth
        # Resolution in pixels
        self.res_w = res_w
        self.res_h = res_h
        # Preset: 1=default, 2=hand, 3=high accuracy,

        # Setup of the sensor
        self.pipe = rs.pipeline()
        self.cfg = rs.config()
        if self.use_color_depth: self.align = rs.align(rs.stream.color)

        # Color stream
        if self.use_color: self.cfg.enable_stream(rs.stream.color, res_w, res_h, rs.format.bgr8, 30)
        # Depth stream to configuration
        # Format z16: 0<=z<=65535
        if self.use_depth: self.cfg.enable_stream(rs.stream.depth, res_w, res_h, rs.format.z16, fps)
        self.queue = rs.frame_queue(2 if self.use_color_depth else 1) # With a buffer capacity of 2, we reduce latency
        self.pipe_profile = self.pipe.start(self.cfg, self.queue)
        
        # Color scheme: 0: Jet, 1: Classic, 2: WhiteToBlack, 3: BlackToWhite, 4: Bio, 5: Cold, 6: Warm, 7: Quantized, 8: Pattern, 9: Hue
        if self.show_depth:
            self.colorizer = rs.colorizer()
            self.colorizer.set_option(rs.option.color_scheme, color_scheme)

        if self.use_depth:
            self.dev = self.pipe_profile.get_device()
            # self.advnc_mode = rs.rs400_advanced_mode(self.dev)
            # print("Advanced mode is", "enabled" if self.advnc_mode.is_enabled() else "disabled")
            self.depth_sensor = self.dev.first_depth_sensor()
            # Set preset
            self.preset = preset
            self.depth_sensor.set_option(rs.option.visual_preset, self.preset)
            print(f"Current preset: {self.depth_sensor.get_option_value_description(rs.option.visual_preset, self.depth_sensor.get_option(rs.option.visual_preset))}")

        # # Set the disparity shift     
        # if disparity_shift != -1:
        #     self.depth_table_control_group = self.advnc_mode.get_depth_table()
        #     self.depth_table_control_group.disparityShift = disparity_shift
        #     self.advnc_mode.set_depth_table(self.depth_table_control_group)
        # self.depth_table_control_group = self.advnc_mode.get_depth_table()
        # print("Disparity shift:", self.depth_table_control_group.disparityShift)

        # Set depth units
        # self.set_depth_units(depth_units)

            # Get intrinsics
            self.stream_profile = self.pipe_profile.get_stream(rs.stream.depth) # Fetch stream profile for depth stream
            self.intrinsics = self.stream_profile.as_video_stream_profile().get_intrinsics() # Downcast to video_stream_profile and fetch intrinsics

            # Get depth scale
            self.depth_scale = self.depth_sensor.get_depth_scale()

            self.get_depth_units()
コード例 #30
0
    def __init__(self):
        self.pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        self.pipeline.start(config)

        self.colorizer = rs.colorizer(0)
コード例 #31
0
    # Start streaming from file
    pipeline.start(config)

    # Create opencv window to render image in
    cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)

    # Streaming loop
    while True:
        # Get frameset of depth
        frames = pipeline.wait_for_frames()

        # Get depth frame
        depth_frame = frames.get_depth_frame()

        # Colorize depth frame to jet colormap
        depth_color_frame = rs.colorizer().colorize(depth_frame)

        # Convert depth_frame to numpy array to render image in opencv
        depth_color_image = np.asanyarray(depth_color_frame.get_data())

        # Render image in opencv window
        cv2.imshow("Depth Stream", depth_color_image)
        key = cv2.waitKey(1)
        # if pressed escape exit program
        if key == 27:
            cv2.destroyAllWindows()
            break

finally:
    pass
コード例 #32
0
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
pipeline.start(config)

# Get stream profile and camera intrinsics
profile = pipeline.get_active_profile()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height

# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
colorizer = rs.colorizer()


def mouse_cb(event, x, y, flags, param):

    if event == cv2.EVENT_LBUTTONDOWN:
        state.mouse_btns[0] = True

    if event == cv2.EVENT_LBUTTONUP:
        state.mouse_btns[0] = False

    if event == cv2.EVENT_RBUTTONDOWN:
        state.mouse_btns[1] = True

    if event == cv2.EVENT_RBUTTONUP:
        state.mouse_btns[1] = False