示例#1
0
    def __init__(self):
        self.conversion_service = ConversionService.get_instance()

        self.pipeline = rs.pipeline()
        config = rs.config()

        self.threshold_filter = rs.threshold_filter()
        self.threshold_filter.set_option(rs.option.max_distance, 4)

        config.enable_stream(rs.stream.depth, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, rs.format.bgr8, 30)

        # Start streaming
        self.pipeline.start(config)
        self.profile = self.pipeline.get_active_profile()

        self.sensor = self.profile.get_device().first_depth_sensor()
        self.sensor.set_option(rs.option.visual_preset, value=1)
        self.sensor.set_option(rs.option.motion_range, value=200)

        # Set colour palette to white to black
        self.colorizer = rs.colorizer(3)

        self.colorizer.set_option(rs.option.min_distance, 0)
        self.colorizer.set_option(rs.option.max_distance, 0.1)
        self.colorizer.set_option(rs.option.histogram_equalization_enabled,
                                  False)
示例#2
0
    def getDepthFrame(self):
        # frames = self.pipeline.wait_for_frames()

        if (len(self.depthFramesBuffer) > 0):
            print(',')
            depthFrame = self.depthFramesBuffer.__getitem__(0)

            spatial = rs.spatial_filter()
            spatial.set_option(rs.option.filter_magnitude, 2)
            spatial.set_option(rs.option.filter_smooth_alpha, 1)
            spatial.set_option(rs.option.filter_smooth_delta, 50)
            spatial.set_option(rs.option.holes_fill, 2)

            # hole_filling = rs.hole_filling_filter()
            # hole_filling.set_option(rs.option.filter_magnitude, 2)

            thresh = rs.threshold_filter(0.1, 2)

            #
            # depthImage = cv2.bilateralFilter(depthImage,9,75,75)
            depthFrame = thresh.process(depthFrame)
            depthFrame = spatial.process(depthFrame)
            # depthFrame = hole_filling.process(depthFrame)
            depthImage = np.asanyarray(depthFrame.get_data())
            # depthImage = depthImage * 255

            # depthImage = cv2.medianBlur(depthImage,1)
            # depthImage = cv2.GaussianBlur(depthImage,(3,3),3)

            del self.depthFramesBuffer[0]
        else:
            frames = self.pipeline.wait_for_frames()
            self.colorFramesBuffer.append(frames.get_color_frame())

            depthFrame = frames.get_depth_frame()
            depthframe = rs.hole_filling_filter.process(depthFrame)
            depthImage = np.asanyarray(depthFrame.get_data())

            depthImage = cv2.GaussianBlur(depthImage, (11, 11), 5)
            depthImage = cv2.medianBlur(depthImage, 51)
            depthImage = cv2.bilateralFilter(depthImage, 9, 75, 75)

            print(len(self.depthFramesBuffer))
            # del self.framesBuffer[-1]

        depthColorMap = cv2.applyColorMap(
            cv2.convertScaleAbs(depthImage, alpha=0.003), cv2.COLORMAP_JET)

        return depthColorMap
    def __init__(self, debugFlag=False, debugPath=''):
        self.debugFlag = debugFlag

        # Decimation - reduces depth frame density
        self.decimateFilter = rs.decimation_filter()

        self.thresholdFilter = rs.threshold_filter(min_dist = 1.8, max_dist = 3)

        # Converts from depth representation to disparity representation and vice - versa in depth frames
        self.depth_to_disparity = rs.disparity_transform(True)
        # Spatial    - edge-preserving spatial smoothing
        self.spatial_filter = rs.spatial_filter()
        # Temporal   - reduces temporal noise
        self.temporalFilter = rs.temporal_filter()
        self.disparity_to_depth = rs.disparity_transform(False)
示例#4
0
def get_filter(filter_name: str, *args) -> rs2.filter_interface:
    """
    Basically a factory for filters (Maybe change to Dict 'cause OOP)
    :param filter_name: The filter name
    :param args: The arguments for the filter
    :return: A filter which corresponds to the filter name with it's arguments
    """
    if filter_name == 'decimation_filter':
        return rs2.decimation_filter(*args)
    elif filter_name == 'threshold_filter':
        return rs2.threshold_filter(*args)
    elif filter_name == 'disparity_transform':
        return rs2.disparity_transform(*args)
    elif filter_name == 'spatial_filter':
        return rs2.spatial_filter(*args)
    elif filter_name == 'temporal_filter':
        return rs2.temporal_filter(*args)
    elif filter_name == 'hole_filling_filter':
        return rs2.hole_filling_filter(*args)
    else:
        raise Exception(f'The filter \'{filter_name}\' does not exist!')
示例#5
0
def capture_point_cloud(number_of_frames: int = 1, camera_id: str = "", max_dist: float = 0.5, min_dist: float = 0.1) \
        -> np.numarray:
    """
    Start a pipeline with a depth stream configuration (and enable a given camera id, if given. else - takes depth
    frames from all connected cameras). Then, wait for frameset, apply a threshold filter, convert to depth frame,
    and pass it to a point cloud realsense2 object to process and get the points, which it returns as a numpy array.
    :param number_of_frames: The number of frames to sample.
    :param camera_id: The camera id to get the depth frame from.
    :param max_dist: the maximum distance between the camera and the object.
    :param min_dist: the minimum distance between the camera and the object.
    :return: point cloud's points as a numpy array.
    """
    # Initialize the numpy array that we will return
    vertices: np.numarray = np.asanyarray([])
    # Declare RealSense pipeline, encapsulating the actual device and sensor (depth sensor)
    pipe: rs2.pipeline = rs2.pipeline()
    config: rs2.config = rs2.config()
    # Enable depth stream
    config.enable_stream(rs2.stream.depth)
    # If there was given a camera_id, enable it in the configs
    if len(camera_id) == 12:
        config.enable_device(camera_id)
    # Start streaming with chosen configuration
    pipe.start(config)

    # Create a thresh filter with max distance of 0.7 meter
    thresh_filter: rs2.filter = rs2.threshold_filter(max_dist=max_dist, min_dist=min_dist)

    try:
        if DEBUG:
            print("Taking Picture")
        # Wait for 30 frames to make it more accurate
        for _ in range(30):
            pipe.wait_for_frames()

        # Wait for the next set of frames from the camera
        frames: rs2.composite_frame = pipe.wait_for_frames()

        if DEBUG:
            print("Processing Picture")

        # Apply the thresh filter on the frames we just got
        filtered_frames: rs2.composite_frame = thresh_filter.process(frames).as_frameset()

        # Convert the filtered frames to depth frame
        filtered_depth_frame: rs2.depth_frame = filtered_frames.get_depth_frame()

        # Declare realsense's pointcloud object, for calculating pointclouds
        rs_pc: rs2.pointcloud = rs2.pointcloud()

        # Get the point cloud's points of the filtered depth frame
        points: rs2.points = rs_pc.calculate(filtered_depth_frame)

        # Convert the points to a numpy array
        vertices: np.numarray = np.asanyarray(points.get_vertices())
        # Eliminate the origin points (there are a lot of (0,0,0) that just make the calculation time longer)
        vertices: np.numarray = np.asanyarray([[x, y, z] for (x, y, z) in vertices if not x == y == z == 0])
    finally:
        # stop the pipeline
        pipe.stop()
    # return the point cloud's points
    return vertices
示例#6
0
# declare RealSense pipeline, encapsulating the actual device and sensors
pipe: rs2.pipeline = rs2.pipeline()
config: rs2.config = rs2.config()
# enable depth stream
config.enable_stream(rs2.stream.depth)

# start streaming with chosen configuration
pipe.start(config)

# we'll use the colorizer to generate texture for our PLY
# (alternatively, texture can be obtained from color or infrared stream)
colorizer: rs2.colorizer = rs2.colorizer()

# create a thresh filter with max distance of 0.7 meter
thresh_filter: rs2.filter = rs2.threshold_filter(max_dist=2.0, min_dist=0.0)

try:
    # wait for 30 frames to make it more accurate
    for _ in range(30):
        pipe.wait_for_frames()

    # wait for the next set of frames from the camera
    frames: rs2.composite_frame = pipe.wait_for_frames()

    print("Applying thresh filter")
    # apply the thresh filter on the frame we just got
    filtered_frames: rs2.depth_frame = thresh_filter.process(frames)

    # colorize the filtered frames
    colorized = colorizer.process(filtered_frames)
示例#7
0
def start_node(task_queue, result_queue):

    ##################################################################
    # Vibration Control Util
    ##################################################################

    try:
        board = Arduino('COM5')
    except:
        board = Arduino('COM3')

    pins = [
        board.get_pin('d:10:p'),
        board.get_pin('d:9:p'),
        board.get_pin('d:6:p'),
        board.get_pin('d:5:p'),
        board.get_pin('d:3:p')
    ]

    def set_vib(l: list):
        val, idx = min((val, idx) for (idx, val) in enumerate(l))
        pins[idx].write(max(1 - l[idx] / 60, 0))
        for i in range(len(l)):
            if i != idx:
                pins[i].write(0)

    ##################################################################
    # Vision Setup
    ##################################################################

    pipeline = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)

    pipeline.start(config)
    print("Start camera pipelining ...")

    align_to = rs.align(rs.stream.color)

    profile_data = pipeline.wait_for_frames().get_color_frame()

    # Calculate crop Size
    cropSize = (0, 0)
    if profile_data.width / float(profile_data.height) > WHRatio:
        cropSize = (int(profile_data.height * WHRatio), profile_data.height)
    else:
        cropSize = (profile_data.width, int(profile_data.width / WHRatio))
    crop = [(int((profile_data.width - cropSize[1]) / 2),
             int((profile_data.height - cropSize[0]) / 2)), cropSize]

    last_frame_number = 0

    colorizer = rs.colorizer()

    decimation = rs.decimation_filter()
    threshold = rs.threshold_filter()
    spatial = rs.spatial_filter()

    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)

    try:
        while (True):
            frame = pipeline.wait_for_frames()

            frame = align_to.process(frame)

            color_frame = frame.get_color_frame()
            depth_frame = frame.get_depth_frame()

            depth_frame = decimation.process(depth_frame)
            depth_frame = threshold.process(depth_frame)
            depth_frame = depth_to_disparity.process(depth_frame)
            depth_frame = spatial.process(depth_frame)
            depth_frame = disparity_to_depth.process(depth_frame)

            color_mat = np.asanyarray(color_frame.get_data())
            depth_mat = np.asanyarray(depth_frame.get_data())

            if depthmap_visualization:
                depth_colormap = np.asanyarray(
                    colorizer.colorize(depth_frame).get_data())
                cv2.imshow("Depth Map", depth_colormap)
            if colormap_visualization:
                cv2.imshow("Color Map", color_mat)

            intensity = obstacleEstimate(depth_mat)
            if depthintensity_verbose:
                print("Intensity:", intensity)

            set_vib(intensity)

            task = None
            objDetectEnabled = False
            try:
                task = task_queue.get_nowait()
                if task is None:
                    break
                else:
                    objDetectEnabled = True
            except queue.Empty:
                pass

            # Detection by keyboard for testing
            key = cv2.waitKey(1)
            if key == 32: objDetectEnabled = True

            if objDetectEnabled:
                if color_frame.get_frame_number() != last_frame_number:
                    last_frame_number = color_frame.get_frame_number()

                    # Record start time
                    if objdetection_verbose:
                        start_time = time.clock()

                    objects = objectDetect(color_mat, depth_mat, crop)

                    # Send result to the message queue
                    if task is not None:
                        res = list(task)
                        res.append(objects)
                        result_queue.put(tuple(res))

                    for obj in objects:
                        className, confidence, rect, m = obj["classname"], obj[
                            "confidence"], obj["rect"], obj["distance"]
                        conf = "%s(%f), %fm" % (className, confidence,
                                                m / 1000)

                        if objdetection_verbose:
                            print("Detected: %s" % conf)

                        if objdetect_visualization:
                            cv2.rectangle(color_mat, (rect[0], rect[1]),
                                          (rect[2], rect[3]), (0, 255, 0))
                            cv2.putText(color_mat, conf, (rect[0], rect[1]),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (0, 0, 0))
                    if objdetection_verbose:
                        print("Detection time: %fs" %
                              (time.clock() - start_time))

                    if objdetect_visualization:
                        cv2.imshow("Object Detection", color_mat)

    except KeyboardInterrupt:
        print("Shutdown realsense worker ...")
    finally:
        # Stop streaming
        pipeline.stop()
        set_vib([60, 60, 60, 60, 60])
示例#8
0
def main():

    if not os.path.exists(args.out_rdir):
        os.mkdir(args.out_rdir)
        print('Created out_rgb_dir')
    if not os.path.exists(args.out_ddir):
        os.mkdir(args.out_ddir)
        print('Created out_depth_dir')
    if not os.path.exists(args.annotations_dir):
        os.mkdir(args.annotations_dir)
        print('Created args_out_dir')

    #Create pipeline for realsense2
    pipe = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 360, rs.format.rgb8, 30)
    profile = pipe.start(config)

    # Declare filters
    dc_ftr = rs.decimation_filter()  # Decimation - reduces depth frame density
    st_ftr = rs.spatial_filter(
    )  # Spatial    - edge-preserving spatial smoothing
    tp_ftr = rs.temporal_filter()  # Temporal   - reduces temporal noise
    th_ftr = rs.threshold_filter()

    align_to = rs.stream.color
    align = rs.align(align_to)
    rgbd = np.zeros((1, 4, 360, 640))
    i = 0
    print(
        '################|Initialization seguence completed.|################')

    try:
        while (1):

            # Get frameset of color and depth
            frames = pipe.wait_for_frames()

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame()  #  640x480
            color_frame = aligned_frames.get_color_frame()

            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue

            # Post processing
            filtered = tp_ftr.process(
                st_ftr.process(dc_ftr.process(aligned_depth_frame)))
            filtered = th_ftr.process(
                filtered)  #can be removed if person is outside frame

            rgb = np.asanyarray(color_frame.get_data())
            rgb = np.transpose(rgb, (2, 0, 1))
            depth = np.asanyarray(aligned_depth_frame.get_data(),
                                  dtype=np.uint16)
            depth = depth
            im.show(rgb, args.bb)
            im.rgbsave(i, rgb, str('./' + args.out_rdir))
            im.dsave(i, depth, str('./' + args.out_ddir))
            np.savetxt('./' + args.annotations_dir + '/' + str(i) + '.csv',
                       args.bb,
                       delimiter=',')
            i = i + 1
            print('saved' + str(i) + 'image')
            # time.sleep(1)
    finally:
        pipe.stop()
        print('################| Error in pipeline |################')
profile = pipeline.start(config)

align_to = rs.stream.depth
align = rs.align(align_to)

min_depth = 0.3
max_depth = 4.0

# Create colorizer object
colorizer = rs.colorizer()
colorizer.set_option(rs.option.color_scheme, 9)
colorizer.set_option(rs.option.histogram_equalization_enabled, 0)
colorizer.set_option(rs.option.min_distance, min_depth)
colorizer.set_option(rs.option.max_distance, max_depth)
# Filter
thr_filter = rs.threshold_filter()
thr_filter.set_option(rs.option.min_distance, min_depth)
thr_filter.set_option(rs.option.max_distance, max_depth)

# Disparity
dp_filter = rs.disparity_transform()

#https://dev.intelrealsense.com/docs/depth-image-compression-by-colorization-for-intel-realsense-depth-cameras#section-6references
#https://github.com/TetsuriSonoda/rs-colorize/blob/master/rs-colorize/rs-colorize.cpp

start = timer()
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.moveWindow("RealSense", 0, 0)

depth_sensor = profile.get_device().first_depth_sensor()
示例#10
0
obstacle_line_thickness_pixel = 10  # [1-DEPTH_HEIGHT]: Number of pixel rows to use to generate the obstacle distance message. For each column, the scan will return the minimum value for those pixels centered vertically in the image.

USE_PRESET_FILE = True
PRESET_FILE = "../cfg/d4xx-default.json"

RTSP_STREAMING_ENABLE = True
RTSP_PORT = "8554"
RTSP_MOUNT_POINT = "/d4xx"

# List of filters to be applied, in this order.
# https://github.com/IntelRealSense/librealsense/blob/master/doc/post-processing-filters.md

filters = [[True, "Decimation Filter",
            rs.decimation_filter()],
           [True, "Threshold Filter",
            rs.threshold_filter()],
           [True, "Depth to Disparity",
            rs.disparity_transform(True)],
           [True, "Spatial Filter",
            rs.spatial_filter()],
           [True, "Temporal Filter",
            rs.temporal_filter()],
           [False, "Hole Filling Filter",
            rs.hole_filling_filter()],
           [True, "Disparity to Depth",
            rs.disparity_transform(False)]]

#
# The filters can be tuned with opencv_depth_filtering.py script, and save the default values to here
# Individual filters have different options so one have to apply the values accordingly
#
示例#11
0
def threshold(depth_frame, min_depth, max_depth):
    threshold_filter = rs.threshold_filter(min_dist=min_depth,
                                           max_dist=max_depth)
    return threshold_filter.process(depth_frame)
示例#12
0
class CameraD435(Camera):

    CFG_MODE_1 = 1  # Enable RGB and depth streams and don't save frames
    CFG_MODE_2 = 2  # Enable RGB and infrared stream and don't save frames
    CFG_MODE_3 = 3  # Enable only infrared stream and don't save frames
    CFG_MODE_4 = 4  # Custom

    # rs2_stream are types of data provided by RealSense device
    stream_type = {
        'depth': rs.stream.depth,
        'colour': rs.stream.color,
        'infrared': rs.stream.infrared
    }

    # rs2_format identifies how binary data is encoded within a frame
    stream_format = {
        'depth': rs.format.z16,
        'colour': rs.format.bgr8,
        'infrared': rs.format.y8
    }

    filters = {
        'decimation': rs.decimation_filter(),
        'threshold': rs.threshold_filter(),
        'depth_to_disparity': rs.disparity_transform(True),
        'spatial': rs.spatial_filter(),
        'temporal': rs.temporal_filter(),
        'hole_filling': rs.hole_filling_filter(),
        'disparity_to_depth': rs.disparity_transform(False)
    }

    DS5_product_ids = [
        "0AD1", "0AD2", "0AD3", "0AD4", "0AD5", "0AF6", "0AFE", "0AFF", "0B00",
        "0B01", "0B03", "0B07", "0B3A", "0B5C"
    ]

    def __init__(self,
                 configuration_mode=1,
                 enable_rgb_stream=True,
                 enable_depth_stream=True,
                 enable_infrared_stream=True,
                 save_rgb_frames=False,
                 save_depth_frames=False,
                 save_infrared_frames=False):

        self._init_mode(configuration_mode, enable_rgb_stream,
                        enable_depth_stream, enable_infrared_stream,
                        save_rgb_frames, save_depth_frames,
                        save_infrared_frames)

        super().__init__(Camera.TYPE_D435)
        self.logger.log_info("Starting camera in configuration mode %d" %
                             configuration_mode)
        self._setup_save_dir()
        self.colour_frame_process = None
        self.depth_frame_process = None
        self.ir1_frame_process = None
        self.ir2_frame_process = None

    def _init_mode(self, configuration_mode, enable_rgb_stream,
                   enable_depth_stream, enable_infrared_stream,
                   save_rgb_frames, save_depth_frames, save_infrared_frames):
        if configuration_mode == self.CFG_MODE_1:
            cfg = [True, True, False, False, False, False]
        elif configuration_mode == self.CFG_MODE_2:
            cfg = [True, False, True, False, False, False]
        elif configuration_mode == self.CFG_MODE_3:
            cfg = [False, False, True, False, False, False]
        else:
            cfg = [
                enable_rgb_stream, enable_depth_stream, enable_infrared_stream,
                save_rgb_frames, save_depth_frames, save_infrared_frames
            ]

        self.enable_rgb_stream = cfg[0]
        self.enable_depth_stream = cfg[1]
        self.enable_infrared_stream = cfg[2]
        self.save_rgb_frames = cfg[3]
        self.save_depth_frames = cfg[4]
        self.save_infrared_frames = cfg[5]

    def _setup_parameters(self):
        self.depth_range_m = [
            self.cfg.d435['depth_min_m'], self.cfg.d435['depth_max_m']
        ]

        if self.cfg.d435['filters']['threshold']:
            self.filters['threshold'].set_option(rs.option.min_distance,
                                                 self.depth_range_m[0])
            self.filters['threshold'].set_option(rs.option.max_distance,
                                                 self.depth_range_m[1])

        self.camera_facing_angle_degree = self.cfg.d435[
            'camera_facing_angle_degree']
        self.device_id = None
        self.obstacle_line_height_ratio = self.cfg.d435[
            'obstacle_line_height_ratio']
        self.obstacle_line_thickness_pixel = self.cfg.d435[
            'obstacle_line_thickness_pixel']

        self.depth_scale = 0
        self.colorizer = rs.colorizer()
        self.depth_hfov_deg = None
        self.depth_vfov_deg = None

        self.stream_cfg = {
            'd_w': self.cfg.d435['depth_width'],
            'd_h': self.cfg.d435['depth_height'],
            'd_fps': self.cfg.d435['depth_fps'],
            'c_w': self.cfg.d435['color_width'],
            'c_h': self.cfg.d435['color_height'],
            'c_fps': self.cfg.d435['color_fps'],
            'i_w': self.cfg.d435['infrared_width'],
            'i_h': self.cfg.d435['infrared_height'],
            'i_fps': self.cfg.d435['infrared_fps']
        }

        self._initialize_compute_vars()
        # body offset - see initial script
        self.metadata = [
            'enable_rgb_stream', 'enable_infrared_stream',
            'enable_depth_stream'
        ]

    def _initialize_compute_vars(self):
        self.vehicle_pitch_rad = None
        self.current_time_us = 0
        self.last_obstacle_distance_sent_ms = 0  # value of current_time_us when obstacle_distance last sent

        # Obstacle distances in front of the sensor, starting from the left in increment degrees to the right
        # See here: https://mavlink.io/en/messages/common.html#OBSTACLE_DISTANCE
        self.min_depth_cm = int(self.depth_range_m[0] * 100)  # In cm
        self.max_depth_cm = int(self.depth_range_m[1] *
                                100)  # In cm, should be a little conservative
        self.distances_array_length = 72
        self.angle_offset = None
        self.increment_f = None
        self.distances = np.ones((self.distances_array_length, ),
                                 dtype=np.uint16) * (self.max_depth_cm + 1)
        self.obstacle_distance_data = None

    def _find_device_that_supports_advanced_mode(self):
        ctx = rs.context()
        devices = ctx.query_devices()
        for dev in devices:
            if dev.supports(rs.camera_info.product_id) and str(
                    dev.get_info(
                        rs.camera_info.product_id)) in self.DS5_product_ids:
                name = rs.camera_info.name
                if dev.supports(name):
                    self.logger.log_info(
                        "Found device that supports advanced mode: %s" %
                        dev.get_info(name))
                    self.device_id = dev.get_info(rs.camera_info.serial_number)
                    return dev

        raise Exception("No device that supports advanced mode was found")

    # Loop until we successfully enable advanced mode
    def _realsense_enable_advanced_mode(self, advnc_mode):
        while not advnc_mode.is_enabled():
            self.logger.log_info("Trying to enable advanced mode...")
            advnc_mode.toggle_advanced_mode(True)
            # At this point the device will disconnect and re-connect.
            self.logger.log_info("Sleeping for 5 seconds...")
            time.sleep(5)
            # The 'dev' object will become invalid and we need to initialize it again
            dev = self._find_device_that_supports_advanced_mode()
            advnc_mode = rs.rs400_advanced_mode(dev)
            self.logger.log_info(
                "Advanced mode is %s"
                "enabled" if advnc_mode.is_enabled() else "disabled")

    # Load the settings stored in the JSON file
    def _realsense_load_settings_file(self, advnc_mode, setting_file):
        # Sanity checks
        if os.path.isfile(setting_file):
            self.logger.log_info("Setting file found: %s" % setting_file)
        else:
            self.logger.log_info("Cannot find setting file: %s" % setting_file)
            sys.exit()

        if advnc_mode.is_enabled():
            self.logger.log_info("Advanced mode is enabled")
        else:
            self.logger.log_info("Device does not support advanced mode")
            sys.exit()

        # Input for load_json() is the content of the json file, not the file path
        with open(setting_file, 'r') as file:
            json_text = file.read().strip()

        advnc_mode.load_json(json_text)

    def _realsense_configure_setting(self, setting_file):
        device = self._find_device_that_supports_advanced_mode()
        advnc_mode = rs.rs400_advanced_mode(device)
        self._realsense_enable_advanced_mode(advnc_mode)
        self._realsense_load_settings_file(advnc_mode, setting_file)

    def _enable_stream(self, stype, config):
        if stype not in ['depth', 'colour', 'infrared']:
            raise Exception("Stream type invalid")

        ix = stype[0]
        if stype == 'infrared':
            config.enable_stream(self.stream_type[stype], 1,
                                 self.stream_cfg['%s_w' % ix],
                                 self.stream_cfg['%s_h' % ix],
                                 self.stream_format[stype],
                                 self.stream_cfg['%s_fps' % ix])
            config.enable_stream(self.stream_type[stype], 2,
                                 self.stream_cfg['%s_w' % ix],
                                 self.stream_cfg['%s_h' % ix],
                                 self.stream_format[stype],
                                 self.stream_cfg['%s_fps' % ix])
        else:
            config.enable_stream(self.stream_type[stype],
                                 self.stream_cfg['%s_w' % ix],
                                 self.stream_cfg['%s_h' % ix],
                                 self.stream_format[stype],
                                 self.stream_cfg['%s_fps' % ix])

        return config

    def _open_pipe(self):
        self.pipe = rs.pipeline()
        config = rs.config()
        # Configure image stream(s)
        if self.device_id:
            # connect to a specific device ID
            config.enable_device(self.device_id)

        if self.enable_rgb_stream:
            config = self._enable_stream('colour', config)
        if self.enable_depth_stream:
            config = self._enable_stream('depth', config)
        if self.enable_infrared_stream:
            config = self._enable_stream('infrared', config)

        # Start streaming with requested config
        profile = self.pipe.start(config)

        if self.enable_depth_stream:
            # Getting the depth sensor's depth scale (see rs-align example for explanation)
            depth_sensor = profile.get_device().first_depth_sensor()
            self.depth_scale = depth_sensor.get_depth_scale()
            self.logger.log_info("Depth scale is: %s" % self.depth_scale)

            self._set_obstacle_distance_params()

    # Setting parameters for the OBSTACLE_DISTANCE message based on actual camera's intrinsics and user-defined params
    def _set_obstacle_distance_params(self):
        # Obtain the intrinsics from the camera itself
        profiles = self.pipe.get_active_profile()
        depth_intrinsics = profiles.get_stream(
            self.stream_type['depth']).as_video_stream_profile().intrinsics
        self.logger.log_info("Depth camera intrinsics: %s" % depth_intrinsics)

        # For forward facing camera with a horizontal wide view:
        #   HFOV=2*atan[w/(2.fx)],
        #   VFOV=2*atan[h/(2.fy)],
        #   DFOV=2*atan(Diag/2*f),
        #   Diag=sqrt(w^2 + h^2)
        self.depth_hfov_deg = m.degrees(2 * m.atan(self.stream_cfg['d_w'] /
                                                   (2 * depth_intrinsics.fx)))
        self.depth_vfov_deg = m.degrees(2 * m.atan(self.stream_cfg['d_h'] /
                                                   (2 * depth_intrinsics.fy)))
        self.logger.log_info("Depth camera HFOV: %0.2f degrees" %
                             self.depth_hfov_deg)
        self.logger.log_info("Depth camera VFOV: %0.2f degrees" %
                             self.depth_vfov_deg)

        self.angle_offset = self.camera_facing_angle_degree - (
            self.depth_hfov_deg / 2)
        self.increment_f = self.depth_hfov_deg / self.distances_array_length
        self.logger.log_info("OBSTACLE_DISTANCE angle_offset: %0.3f" %
                             self.angle_offset)
        self.logger.log_info("OBSTACLE_DISTANCE increment_f: %0.3f" %
                             self.increment_f)
        self.logger.log_info(
            "OBSTACLE_DISTANCE coverage: from %0.3f to %0.3f degrees" %
            (self.angle_offset, self.angle_offset +
             self.increment_f * self.distances_array_length))

        # Sanity check for depth configuration
        if self.obstacle_line_height_ratio < 0 or self.obstacle_line_height_ratio > 1:
            self.logger.log_info(
                "Please make sure the horizontal position is within [0-1]: %s"
                % self.obstacle_line_height_ratio)
            sys.exit()

        if self.obstacle_line_thickness_pixel < 1 or self.obstacle_line_thickness_pixel > self.stream_cfg[
                'd_h']:
            self.logger.log_info(
                "Please make sure the thickness is within [0-depth_height]: %s"
                % self.obstacle_line_thickness_pixel)
            sys.exit()

    # Find the height of the horizontal line to calculate the obstacle distances
    #   - Basis: depth camera's vertical FOV, user's input
    #   - Compensation: vehicle's current pitch angle
    def _find_obstacle_line_height(self):
        # Basic position
        obstacle_line_height = self.stream_cfg[
            'd_h'] * self.obstacle_line_height_ratio

        attitude = self.rb_r.get_key('ATTITUDE')
        if attitude is not None:
            self.vehicle_pitch_rad = attitude['pitch']

        # Compensate for the vehicle's pitch angle if data is available
        if self.vehicle_pitch_rad is not None and self.depth_vfov_deg is not None:
            delta_height = m.sin(self.vehicle_pitch_rad / 2) / m.sin(
                m.radians(self.depth_vfov_deg) / 2) * self.stream_cfg['d_h']
            obstacle_line_height += delta_height

        # Sanity check
        if obstacle_line_height < 0:
            obstacle_line_height = 0
        elif obstacle_line_height > self.stream_cfg['d_h']:
            obstacle_line_height = self.stream_cfg['d_h']

        return obstacle_line_height

    # Calculate the distances array by dividing the FOV (horizontal) into $distances_array_length rays,
    # then pick out the depth value at the pixel corresponding to each ray. Based on the definition of
    # the MAVLink messages, the invalid distance value (below MIN/above MAX) will be replaced with MAX+1.
    #
    # [0]    [35]   [71]    <- Output: distances[72]
    #  |      |      |      <- step = width / 72
    #  ---------------      <- horizontal line, or height/2
    #  \      |      /
    #   \     |     /
    #    \    |    /
    #     \   |   /
    #      \  |  /
    #       \ | /
    #       Camera          <- Input: depth_mat, obtained from depth image
    #
    # Note that we assume the input depth_mat is already processed by at least hole-filling filter.
    # Otherwise, the output array might not be stable from frame to frame.
    # @njit   # Uncomment to optimize for performance. This uses numba which requires llmvlite (see instruction at the top)
    def _distances_from_depth_image(self, obstacle_line_height, depth_mat):
        min_depth_m = self.depth_range_m[0]
        max_depth_m = self.depth_range_m[1]

        # Parameters for depth image
        depth_img_width = depth_mat.shape[1]
        depth_img_height = depth_mat.shape[0]

        # Parameters for obstacle distance message
        step = depth_img_width / self.distances_array_length

        for i in range(self.distances_array_length):
            # Each range (left to right) is found from a set of rows within a column
            #  [ ] -> ignored
            #  [x] -> center + obstacle_line_thickness_pixel / 2
            #  [x] -> center = obstacle_line_height (moving up and down according to the vehicle's pitch angle)
            #  [x] -> center - obstacle_line_thickness_pixel / 2
            #  [ ] -> ignored
            #   ^ One of [distances_array_length] number of columns, from left to right in the image
            center_pixel = obstacle_line_height
            upper_pixel = center_pixel + self.obstacle_line_thickness_pixel / 2
            lower_pixel = center_pixel - self.obstacle_line_thickness_pixel / 2

            # Sanity checks
            if upper_pixel > depth_img_height:
                upper_pixel = depth_img_height
            elif upper_pixel < 1:
                upper_pixel = 1
            if lower_pixel > depth_img_height:
                lower_pixel = depth_img_height - 1
            elif lower_pixel < 0:
                lower_pixel = 0

            # Converting depth from uint16_t unit to metric unit. depth_scale is usually 1mm following ROS convention.
            # dist_m = depth_mat[int(obstacle_line_height), int(i * step)] * depth_scale
            min_point_in_scan = np.min(
                depth_mat[int(lower_pixel):int(upper_pixel),
                          int(i * step)])
            dist_m = min_point_in_scan * self.depth_scale

            # Default value, unless overwritten:
            #   A value of max_distance + 1 (cm) means no obstacle is present.
            #   A value of UINT16_MAX (65535) for unknown/not used.
            self.distances[i] = 65535

            # Note that dist_m is in meter, while distances[] is in cm.
            if dist_m > min_depth_m and dist_m < max_depth_m:
                self.distances[i] = dist_m * 100

        if self.current_time_us == self.last_obstacle_distance_sent_ms:
            # no new frame
            return
        self.last_obstacle_distance_sent_ms = self.current_time_us
        if self.angle_offset is None or self.increment_f is None:
            self.logger.log_warn(
                "Please call set_obstacle_distance_params before continuing")
        else:
            self.obstacle_distance_data = [
                self.
                current_time_us,  # us Timestamp (UNIX time or time since system boot)
                0,  # sensor_type, defined here: https://mavlink.io/en/messages/common.html#MAV_DISTANCE_SENSOR
                self.distances.tolist(),  # distances,    uint16_t[72],   cm
                0,  # increment,    uint8_t,        deg
                self.min_depth_cm,  # min_distance, uint16_t,       cm
                self.max_depth_cm,  # max_distance, uint16_t,       cm
                self.increment_f,  # increment_f,  float,          deg
                self.angle_offset,  # angle_offset, float,          deg
                12  # MAV_FRAME, vehicle-front aligned: https://mavlink.io/en/messages/common.html#MAV_FRAME_BODY_FRD    
            ]
            self.logger.log_debug("Captured obstacle distance data: %s" %
                                  str(self.obstacle_distance_data))

    def _setup_threads(self):
        super()._setup_threads()
        if self.enable_depth_stream:
            self.threads.append(Thread(target=self._save_obstacle_distance))

        self.threads.append(Thread(target=self._check_save_data_flags))
        self.threads.append(Thread(target=self._save_rgb_frames))
        self.threads.append(Thread(target=self._save_depth_frames))
        self.threads.append(Thread(target=self._save_infrared_frames))
        # self.threads.append(Thread(target=self._save_rgb_metadata))
        # self.threads.append(Thread(target=self._save_depth_metadata))
        # self.threads.append(Thread(target=self._save_ir_metadata))

    def _save_obstacle_distance(self):
        while not self.exit_threads:
            self.rb_i.add_key(self.obstacle_distance_data,
                              self.camera_type,
                              'obstacle_distance',
                              expiry=self.cfg.d435['save_redis_expiry'])

    def _save_distance_sensor(self):  # (average or singles ?)
        return

    def _check_save_data_flags(self):
        # Initialise keys
        self.rb_i.add_key(int(self.save_rgb_frames), self.camera_type,
                          'save_rgb_frames')
        self.rb_i.add_key(int(self.save_depth_frames), self.camera_type,
                          'save_depth_frames')
        self.rb_i.add_key(int(self.save_infrared_frames), self.camera_type,
                          'save_infrared_frames')
        while not self.exit_threads:
            self.save_rgb_frames = bool(
                self.rb_i.get_key(self.camera_type, 'save_rgb_frames'))
            self.save_depth_frames = bool(
                self.rb_i.get_key(self.camera_type, 'save_depth_frames'))
            self.save_infrared_frames = bool(
                self.rb_i.get_key(self.camera_type, 'save_infrared_frames'))
            time.sleep(1)

    def _save_frame(self,
                    csvwriter,
                    frame,
                    last_frame_time,
                    ref=None,
                    *prepend):
        try:
            profile = frame.get_profile()
            stype = profile.stream_name()
        except:
            return None

        if ref is None:
            ref = stype.lower()

        if frame:
            timestamp = frame.get_timestamp()
            if timestamp == last_frame_time:
                return timestamp
            try:
                if stype == 'stream.depth':
                    img = np.asanyarray(frame.as_frame().get_data())
                    img = cv2.convertScaleAbs(img, alpha=0.03)
                else:
                    img = np.asanyarray(frame.get_data())

                filename = "%s%s.%s.png" % (self.save_data_dir, str(timestamp),
                                            ref)
                cv2.imwrite(filename, img)
                self.logger.log_debug("Saved %s frame: %s" % (ref, filename))
                self._save_metadata(csvwriter, frame, ref, *prepend)
            except csv.Error:
                self.logger.log_warn("Could not write %s metadata" % ref)
                self.logger.log_debug(traceback.format_exc())
            except:
                self.logger.log_warn("Could not save %s frame" % ref)
                self.logger.log_debug(traceback.format_exc())

            return timestamp

        return None

    def _generate_csv_header(self, ir_stream=False):
        gps_header = [
            'gps_1_lat', 'gps_1_lon', 'gps_1_fix_type', 'gps_2_lat',
            'gps_2_lon', 'gps_2_fix_type'
        ]
        ir_header = ['ir_stream']

        if ir_stream:
            return ir_header + self.frame_metadata_values + gps_header

        return self.frame_metadata_values + gps_header

    def _save_metadata(self, csvwriter, frame, ref=None, *prepend):
        metadata = []
        if prepend:
            metadata = prepend + metadata

        for value in self.frame_metadata_values:
            if frame.supports_frame_metadata(value):
                data = frame.get_frame_metadata(value)
            else:
                data = ""
            metadata.append(data)

        csvwriter.writerow(metadata)

    def _save_rgb_frames(self):
        last_frame_time = None
        filename = self.save_data_dir + 'rgb_metadata.csv'
        file_exists = os.path.exists(filename)
        with open(filename, 'a+', newline='') as csvfile:
            csvwriter = csv.writer(csvfile, delimiter=',')
            if not file_exists:
                header = self._generate_csv_header()
                csvwriter.writerow(header)

            while not self.exit_threads:
                if self.colour_frame_process and self.save_rgb_frames:
                    last_frame_time = self._save_frame(
                        csvwriter, self.colour_frame_process, last_frame_time)

    def _save_depth_frames(self):
        last_frame_time = None
        filename = self.save_data_dir + 'depth_metadata.csv'
        file_exists = os.path.exists(filename)
        with open(filename, 'a+', newline='') as csvfile:
            csvwriter = csv.writer(csvfile, delimiter=',')
            if not file_exists:
                header = self._generate_csv_header()
                csvwriter.writerow(header)

            while not self.exit_threads:
                if self.depth_frame_process and self.save_depth_frames:
                    last_frame_time = self._save_frame(
                        csvwriter, self.depth_frame_process, last_frame_time)

    def _save_infrared_frames(self):
        last_frame_time_1 = None
        last_frame_time_2 = None
        filename = self.save_data_dir + 'infrared_metadata.csv'
        file_exists = os.path.exists(filename)
        with open(filename, 'a+', newline='') as csvfile:
            csvwriter = csv.writer(csvfile, delimiter=',')
            if not file_exists:
                header = self._generate_csv_header(ir_stream=True)
                csvwriter.writerow(header)

            while not self.exit_threads:
                if self.ir1_frame_process and self.save_infrared_frames:
                    last_frame_time_1 = self._save_frame(
                        csvwriter,
                        self.ir1_frame_process,
                        last_frame_time_1,
                        1,
                        ref="infrared1")
                if self.ir2_frame_process and self.save_infrared_frames:
                    last_frame_time_2 = self._save_frame(
                        csvwriter,
                        self.ir2_frame_process,
                        last_frame_time_2,
                        2,
                        ref="infrared2")

    def start(self):
        if self.cfg.d435['use_preset_file']:
            dir_path = os.environ['MYCELIUM_CFG_ROOT']
            preset_file = os.path.join(dir_path, self.cfg.d435['preset_file'])
            self._realsense_configure_setting(preset_file)

        for t in self.threads:
            t.start()

        self._open_pipe()
        while not self.exit_threads:
            self._process_frames()

    def _process_frames(self):
        frames = self.pipe.wait_for_frames()
        color_frame = None
        depth_frame = None
        ir1_frame = None
        ir2_frame = None

        if self.enable_depth_stream:
            depth_frame = frames.get_depth_frame()
        if self.enable_rgb_stream:
            color_frame = frames.get_color_frame()
        if self.enable_infrared_stream:
            ir1_frame = frames.get_infrared_frame(1)
            ir2_frame = frames.get_infrared_frame(2)

        if color_frame:
            self.colour_frame_process = color_frame

        if ir1_frame:
            self.ir1_frame_process = ir1_frame

        if ir2_frame:
            self.ir2_frame_process = ir2_frame

        if depth_frame:
            # Store the timestamp for MAVLink messages
            self.current_time_us = int(round(time.time() * 1000000))

            # Apply the filters
            filtered_frame = depth_frame
            cfg_filters = self.cfg.d435['filters']
            for k, v in cfg_filters.items():
                if v:
                    filtered_frame = self.filters[k].process(filtered_frame)

            self.depth_frame_process = filtered_frame

            # Extract depth in matrix form
            depth_data = filtered_frame.as_frame().get_data()
            depth_mat = np.asanyarray(depth_data)

            # Create obstacle distance data from depth image
            obstacle_line_height = self._find_obstacle_line_height()
            self._distances_from_depth_image(obstacle_line_height, depth_mat)
示例#13
0
def main():
    # Declare pointcloud object, for calculating pointclouds and texture mappings
    pc = rs.pointcloud()
    # We want the points object to be persistent so we can display the last cloud when a frame drops
    points = rs.points()
    pipe = rs.pipeline()
    cfg = rs.config()
    rs.config.enable_device_from_file(cfg,
                                      './20191211_160154.bag',
                                      repeat_playback=False)
    cfg.enable_stream(rs.stream.color, 424, 240, rs.format.rgb8,
                      6)  # color camera
    cfg.enable_stream(rs.stream.depth, 424, 240, rs.format.z16,
                      6)  # depth camera
    # pipe.start(cfg)
    profile = pipe.start(cfg)
    playback = profile.get_device().as_playback()
    playback.set_real_time(False)
    # define Filters
    thres_filter = rs.threshold_filter()
    depth_to_disparity = rs.disparity_transform(True)
    spat_filter = rs.spatial_filter()
    temp_filter = rs.temporal_filter()
    # hole_fill_filter = rs.hole_filling_filter()
    disparity_to_depth = rs.disparity_transform(False)

    i = 0
    while True:
        try:
            frames = pipe.wait_for_frames()
        except:
            break
        print(i)
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        # process depth image
        if True:
            depth_frame = thres_filter.process(depth_frame)
            depth_frame = depth_to_disparity.process(depth_frame)
            depth_frame = spat_filter.process(depth_frame)
            depth_frame = temp_filter.process(depth_frame)
            # depth_frame = hole_fill_filter.process(depth_frame)
            depth_frame = disparity_to_depth.process(depth_frame)

        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        points = pc.calculate(depth_frame)
        cloud = points_to_pcl(points)
        cloud_normals, cluster_indices = region_growing_segmentation(cloud)

        idx = choose_flat_plane(cluster_indices, cloud_normals)
        chosen_plane_indices = cluster_indices[idx]
        arr = cloud.to_array()
        plane_points = arr[chosen_plane_indices]
        plane_coef = fit_plane(plane_points)
        plane_filtered_indices = find_close_points(cloud, plane_coef)
        processed = mark_pixels(depth_colormap, 424, plane_filtered_indices)

        # Dilation
        img_processed = image_post_process(processed)
        cv2.imwrite("./images/proc_" + str(i) + ".png", img_processed)
        i += 1
obstacle_line_height_ratio = 0.18  # [0-1]: 0-Top, 1-Bottom. The height of the horizontal line to find distance to obstacle.
obstacle_line_thickness_pixel = 10 # [1-DEPTH_HEIGHT]: Number of pixel rows to use to generate the obstacle distance message. For each column, the scan will return the minimum value for those pixels centered vertically in the image.

USE_PRESET_FILE = True
PRESET_FILE  = "../cfg/d4xx-default.json"

RTSP_STREAMING_ENABLE = True
RTSP_PORT = "8554"
RTSP_MOUNT_POINT = "/d4xx"

# List of filters to be applied, in this order.
# https://github.com/IntelRealSense/librealsense/blob/master/doc/post-processing-filters.md

filters = [
    [True,  "Decimation Filter",   rs.decimation_filter()],
    [True,  "Threshold Filter",    rs.threshold_filter()],
    [True,  "Depth to Disparity",  rs.disparity_transform(True)],
    [True,  "Spatial Filter",      rs.spatial_filter()],
    [True,  "Temporal Filter",     rs.temporal_filter()],
    [False, "Hole Filling Filter", rs.hole_filling_filter()],
    [True,  "Disparity to Depth",  rs.disparity_transform(False)]
]

#
# The filters can be tuned with opencv_depth_filtering.py script, and save the default values to here
# Individual filters have different options so one have to apply the values accordingly
#

# decimation_magnitude = 8
# filters[0][2].set_option(rs.option.filter_magnitude, decimation_magnitude)
示例#15
0
 def threshold(self, frame, minDistance, maxDistance):
     #apply threshold filter to a depth image from the min distance to max distance to be viewed
     threshold_filter = rs.threshold_filter(minDistance, maxDistance)
     return threshold_filter.process(frame)
示例#16
0
def print_hi(name):
    # Configure depth and color streams
    pipeline = rs.pipeline()
    colorizer = rs.colorizer()
    threshold_distance = 0.4
    tr1 = rs.threshold_filter(min_dist=0.15, max_dist=threshold_distance)
    config = rs.config()
    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))

    depth_sensor = pipeline_profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    # Start streaming
    clipping_distance_in_meters = 0.9
    clipping_distance = clipping_distance_in_meters / depth_scale
    align_to = rs.stream.color
    align = rs.align(align_to)
    pipeline.start(config)
    #thresholds for detect skins
    lower = np.array([0, 48, 80], dtype="uint8")
    upper = np.array([20, 255, 255], dtype="uint8")

    try:
        while True:
            frames = pipeline.wait_for_frames()
            # Align the depth frame to color frame
            aligned_frames = align.process(frames)
            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame(
            )  # aligned_depth_frame is a 640x480 depth image
            color_frame = aligned_frames.get_color_frame()
            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue
            depth_image = np.asanyarray(aligned_depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())
            # Remove background - Set pixels further than clipping_distance to grey
            grey_color = 153
            depth_image_3d = np.dstack(
                (depth_image, depth_image,
                 depth_image))  # depth image is 1 channel, color is 3 channels
            bg_removed = np.where(
                (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
                grey_color, color_image)

            # Render images
            depth_colormap = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
            images = np.hstack((bg_removed, depth_colormap))
            # cv2.namedWindow('Background Removed', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Color+depth', images)

            # images[270:, :] = [0, 0, 0]
            # images[0:240,:] = [0,0,0]
            # images[:, 0:50] = [0, 0, 0]
            # images[:, 630:] = [0, 0, 0]

            img_hsv = cv2.cvtColor(bg_removed, cv2.COLOR_BGR2HSV)
            ## Gen lower mask (0-5) and upper mask (175-180) of RED
            lower_red = np.array([94, 80, 2], dtype="uint8")
            upper_red = np.array([126, 255, 255], dtype="uint8")
            mask = cv2.inRange(img_hsv, lower_red, upper_red)
            bluepen = cv2.bitwise_and(bg_removed, bg_removed, mask=mask)

            cv2.imshow('BluePen', bluepen)
            bgModel = cv2.createBackgroundSubtractorMOG2(0, 50)
            fgmask = bgModel.apply(bluepen)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            img = cv2.bitwise_and(bluepen, bluepen, mask=fgmask)

            # Skin detect and thresholding
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            lower = np.array([94, 80, 2], dtype="uint8")
            upper = np.array([126, 255, 255], dtype="uint8")
            skinMask = cv2.inRange(hsv, lower, upper)
            kernel = np.ones((3, 3), np.uint8)
            skinMask = cv2.erode(skinMask, kernel, iterations=2)
            skinMask = cv2.dilate(skinMask, kernel, iterations=1)

            cv2.imshow('Threshold Hands', skinMask)
            contours, hierarchy = cv2.findContours(skinMask, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            length = len(contours)
            maxArea = -1
            drawing = np.zeros(img.shape, np.uint8)
            if length > 0:
                for i in xrange(length):
                    temp = contours[i]
                    area = cv2.contourArea(temp)
                    if area > maxArea:
                        maxArea = area
                        ci = i
                        res = contours[ci]
                hull = cv2.convexHull(res)
                # drawing = np.zeros(img.shape, np.uint8)
                cx, cy = centroid(res)

                print(aligned_depth_frame.get_distance(cx, cy))
                cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)
            else:
                drawing = np.zeros(img.shape, np.uint8)
            cv2.imshow('DRAWING', drawing)

            # bluepen_gray = cv2.cvtColor(bluepen, cv2.COLOR_BGR2HSV)
            # contours, hierarchy = cv2.findContours(bluepen_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            # length = len(contours)
            # drawing = np.zeros(bluepen_gray.shape, np.uint8)
            # maxArea = -1
            # if length > 0:
            #     for i in xrange(length):
            #         temp = contours[i]
            #         area = cv2.contourArea(temp)
            #         if area > maxArea:
            #             maxArea = area
            #             ci = i
            #             res1 = contours[ci]
            #
            #     hull = cv2.convexHull(res1)
            #
            #     cv2.drawContours(drawing, [res1], 0, (0, 255, 0), 2)
            #     cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
            # cv2.imshow('',drawing)
            # length = len(contours)
            # print(length)
            # drawing = np.zeros(res.shape, np.uint8)
            # maxArea = -1
            # if length > 0:
            #     for i in xrange(length):
            #         temp = contours[i]
            #         area = cv2.contourArea(temp)
            #         if area > maxArea:
            #             maxArea = area
            #             ci = i
            #             res = contours[ci]
            #
            #     hull = cv2.convexHull(res)
            #
            #
            #     cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)
            #     cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
            # cv2.imshow('Only contour for calibration', drawing)

            # cv2.imshow('BluePen', res)

            # converted = cv2.cvtColor(images, cv2.COLOR_BGR2HSV)
            # skinMask = cv2.inRange(converted, lower, upper)
            # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
            # skinMask = cv2.erode(skinMask, kernel, iterations=2)
            # skinMask = cv2.dilate(skinMask, kernel, iterations=1)
            #
            # skinMask = cv2.GaussianBlur(skinMask, (5, 5), 0)
            # skin = cv2.bitwise_and(images, images, mask=skinMask)

            cv2.waitKey(1)

            # img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            # ## Gen lower mask (0-5) and upper mask (175-180) of RED
            # mask1 = cv2.inRange(img_hsv, (0, 50, 20), (5, 255, 255))
            # mask2 = cv2.inRange(img_hsv, (175, 50, 20), (180, 255, 255))
            #
            # ## Merge the mask and crop the red regions
            # mask = cv2.bitwise_or(mask1, mask2)
            # croped = cv2.bitwise_and(img, img, mask=mask)

    finally:

        # Stop streaming
        pipeline.stop()

    # threshold_filter = rs.threshold_filter()
    # threshold_filter.set_option(rs.option.max_distance, 1)
    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
                                         'haarcascade_frontalface_default.xml')
示例#17
0
colorizer = rs.colorizer(3)
align_to = rs.stream.color
align = rs.align(align_to)
spatialFilter= rs.spatial_filter(.25, 50, 5, 1)
disparity = rs.disparity_transform(True)
kernel = np.ones((3,3),np.uint8)


try:
    x = 0
    while x < 10:

        start = timer()
        #x = (.5 * start) - .49
        #print(x)
        thresholdFilter = rs.threshold_filter(2 + x, 5 + x)

        try:
            frames = pipeline.wait_for_frames()
            frames = align.process(frames)
            timeStamp = frames.get_timestamp() / 1000
        except:
            break
        #get data
        motion.get_data(frames, timeStamp)
        depthFrame = frames.get_depth_frame()

        #apply rs filters
        depthFrame1 = thresholdFilter.process(depthFrame)
        depthFrame1 = disparity.process(depthFrame1)
        depthFrame1 = spatialFilter.process(depthFrame1)
示例#18
0
#####################################################
##                  Export to PLY                  ##
#####################################################

# First import the library
import pyrealsense2 as rs
import open3d as o3d
from open3d.open3d.geometry import PointCloud
from open3d.open3d.geometry import OrientedBoundingBox
import numpy as np
import time

pc = rs.pointcloud()
pipe = rs.pipeline()
cfg = rs.config()
thre = rs.threshold_filter(0.1, 2)
align = rs.align(rs.stream.color)

profile = pipe.start(cfg)
intr = profile.get_stream(
    rs.stream.color).as_video_stream_profile().get_intrinsics()
pinhole_camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
    intr.width, intr.height, intr.fx, intr.fy, intr.ppx, intr.ppy)

for i in range(10):
    pipe.wait_for_frames()

frames = pipe.wait_for_frames()
aligned_frames = align.process(frames)
depth_frame = aligned_frames.get_depth_frame()
filtered = thre.process(depth_frame)
示例#19
0
 def create_filter(self):
     self.th_filter = rs.threshold_filter(max_dist=self.max_dist)
     self.sp_filter = rs.spatial_filter()
     self.sp_filter.set_option(rs.option.filter_magnitude, 3.0)
     self.sp_filter.set_option(rs.option.holes_fill, 2.0)
     self.tmp_filter = rs.temporal_filter()
示例#20
0
import multiprocessing
from statistics import mean
import simpleaudio as sa
import wave

# Declare RealSense pipeline, encapsulating the actual device and sensors.
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

pipe = rs.pipeline()

pipe.start(config)
align_to = rs.stream.color
align = rs.align(align_to)
threshold = rs.threshold_filter()
threshold.set_option(rs.option.max_distance, 10)
#_________________________________________________________________________

# Stereo Audio:
# get timesteps for each sample, T is note duration in seconds
sample_rate = 44100
#T = 0.2272753623188406
T = 0.227275362
valor = sample_rate * T
t = np.linspace(0, T, int(valor), False)


def stereo_beep(left, right):

    global t
示例#21
0
def ai():
    # Configure depth and color streams
    test_img = cv2.imread('testImag.jpg')
    pipeline = rs.pipeline()
    config = rs.config()
    colorizer = rs.colorizer()
    config.enable_stream(rs.stream.depth, 480, 270, rs.format.z16, 15)
    config.enable_stream(rs.stream.color, 424, 240, rs.format.bgr8, 15)
    model_file = 'mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite'
    labels = read_label_file('coco_labels.txt')
    interpreter = make_interpreter(model_file)
    interpreter.allocate_tensors()
    ##    align = rs.align(rs.stream.color)
    dist_thres_cm = 180  # cm
    max_depth_m = 8
    min_depth_m = 0.1

    confThreshold = 0.65
    confidence = 0

    off_ul = 15
    off_vl = 10
    off_ur = 25
    off_vr = 15

    corn_1 = (72, 44)
    corn_2 = (168, 92)
    cornColor_1 = (corn_1[0] - off_ul, corn_1[1] - off_vl
                   )  # compensate # FOV cameras
    cornColor_2 = (corn_2[0] + off_ur, corn_2[1] + off_vr)

    filters = [[True, "Decimation Filter",
                rs.decimation_filter()],
               [True, "Threshold Filter",
                rs.threshold_filter()],
               [True, "Depth to Disparity",
                rs.disparity_transform(True)],
               [True, "Spatial Filter",
                rs.spatial_filter()],
               [True, "Temporal Filter",
                rs.temporal_filter()],
               [True, "Hole Filling Filter",
                rs.hole_filling_filter(True)],
               [True, "Disparity to Depth",
                rs.disparity_transform(False)]]

    if filters[1][0] is True:
        filters[1][2].set_option(rs.option.min_distance, min_depth_m)
        filters[1][2].set_option(rs.option.max_distance, max_depth_m)

    # Start streaming
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    progress("INFO: Depth scale is: %.4f" % depth_scale)

    progress("INFO: video recording")
    out = cv2.VideoWriter('avcnet.avi', cv2.VideoWriter_fourcc(*'XVID'), 15,
                          (240, 136))

    # default parameters
    orient = 0
    tim_old = 0.1
    state = 'fly'
    global velocity_y
    velocity_y = 0
    fly_1 = 400
    k = 0.66  # k=(tau/T)/(1+tau/T) tau time constant LPF, T period
    # k=0 # no filtering
    fps = FPS().start()
    # Start streaming

    try:
        while True:

            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            ##            frames = align.process(frames)
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            if not depth_frame or not color_frame:
                continue
            # Apply the filters
            filtered_frame = depth_frame
            for i in range(len(filters)):
                if filters[i][0] is True:
                    filtered_frame = filters[i][2].process(filtered_frame)

            # Extract depth in matrix form
            depth_data = filtered_frame.as_frame().get_data()
            depth_mat = np.asanyarray(depth_data)  # shape 136,240

            # Convert images to numpy arrays
            # output_image = np.asanyarray(colorizer.colorize(filtered_frame).get_data()) #shape: 136,240,3
            color_image = np.asanyarray(color_frame.get_data())

            # calculate distance
            distances = distances_depth_image(depth_mat, min_depth_m,
                                              max_depth_m, depth_scale, corn_1,
                                              corn_2)

            # Stack both images horizontally
            # output_color = cv2.resize(color_image, (240, 136))
            if color_frame:
                imag = cv2.resize(color_image, (300, 300))
                common.set_input(interpreter, imag)
                interpreter.invoke()
                scale = (1, 1)
                objects = detect.get_objects(interpreter, confThreshold, scale)
                data_out = []
                if objects:
                    for obj in objects:
                        inference = []  # clear inference
                        box = obj.bbox
                        inference.extend((obj.id, obj.score, box))
                        # print('inference:',inference)
                        data_out.append(
                            inference)  # list of all detected objects
                    # print('data_out:',data_out)
                    objID = data_out[0][
                        0]  # object with largest confidence selected
                    confidence = data_out[0][1]
                    labeltxt = labels[objID]
                    box = data_out[0][2]
                    if confidence > confThreshold:
                        draw_rect(imag,
                                  box,
                                  confidence,
                                  labeltxt,
                                  use_normalized_coordinates=False)

            output_color = cv2.resize(imag, (240, 136))
            # cv2.rectangle(output_image, corn_1, corn_2, (0, 255, 0), thickness=2)
            cv2.rectangle(output_color,
                          cornColor_1,
                          cornColor_2, (0, 255, 0),
                          thickness=2)

            #===========================================================================
            # classify the input image
            fly = np.min(distances)  # distance in cm
            left = (distances[0:24] <
                    dist_thres_cm).sum()  # object is on the left side
            right = (distances[24:48] <
                     dist_thres_cm).sum()  # object is on the right side
            stop = ((distances[0:48] < dist_thres_cm).sum() == 48) * 48
            # build the label
            my_dict = {'left': left, 'right': right, 'stop': stop}
            maxPair = max(my_dict.items(), key=itemgetter(1))
            fly_f = k * fly_1 + (1 - k) * fly
            fly_1 = fly_f
            proba = int(fly_f)

            if state == 'avoid':
                if fly_f >= dist_thres_cm + 10:
                    state = 'fly'
                    print(state, int(fly_f), file=f)

            else:
                label = 'forward'
                # proba=fly_f
                if fly_f <= dist_thres_cm:
                    label = maxPair[0]
                    # proba=int(fly_f)
                    print(my_dict, int(fly_f), file=f)
                    state = 'avoid'

            label_1 = "{} {} {}".format(label, proba, state)
            # draw the label on the image
            cv2.putText(output_color, label_1, (10, 25),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            # Write the frame into the file 'output.avi'
            out.write(output_color)

            if vehicle.channels['8'] > 1400:
                if state == "fly":
                    event.clear()

                if state == "avoid":
                    event.set()

                    if label == 'left':
                        velocity_y = 0.8
                    if label == 'right':
                        velocity_y = -0.8
                    if label == 'stop':
                        velocity_y = 0

            if vehicle.channels['8'] < 1400:
                event.clear()

            # show the output frame
            cv2.imshow("Frame", output_color)
            key = cv2.waitKey(10) & 0xFF

            # update the FPS counter
            fps.update()
            # if the `Esc` key was pressed, break from the loop
            if key == 27:
                break

    finally:

        # Stop streaming
        pipeline.stop()
        # do a bit of cleanup
        # stop the timer and save FPS information
        fps.stop()

        progress("INFO: elapsed time: {:.2f}".format(fps.elapsed()))
        progress("INFO: approx. FPS: {:.2f}".format(fps.fps()))
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()), file=f)
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()), file=f)
        f.close()

        progress('INFO:end')
        time.sleep(3)

        cv2.destroyAllWindows()
        out.release()