コード例 #1
0
def getDepthPipeline(FRAME_WIDTH, FRAME_HEIGHT, id=None):
    pipeline = rs.pipeline()
    config = rs.config()
    try:
        if (id != None):
            config.enable_device(id)
    except Exception as e:
        print(e)

    config.enable_stream(rs.stream.color, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.bgr8, 30)
    config.enable_stream(rs.stream.depth, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.z16, 30)
    config.enable_stream(rs.stream.infrared, 1, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.y8, 30)
    config.enable_stream(rs.stream.infrared, 2, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.y8, 30)

    print("[INFO] Starting streaming...")
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    align_to = rs.stream.color
    align = rs.align(align_to)

    return pipeline, config, profile, align, depth_scale
コード例 #2
0
def main():
    if not os.path.exists(args.directory):
        os.mkdir(args.directory)
        print('Created out_dir')
    try:
        config = rs.config()
        rs.config.enable_device_from_file(config, args.input)
        pipeline = rs.pipeline()
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
        pipeline.start(config)
        i = 0
        while True:
            print("Saving frame:", i)
            frames = pipeline.wait_for_frames()
            depth_frame = frames.get_depth_frame()
            depth_image = np.asanyarray(depth_frame.get_data())
            cv.imwrite(args.directory + "/" + str(i).zfill(6) + "depth.png", depth_image)
            rgb_frame = frames.get_color_frame()
            rgb_image = np.asanyarray(rgb_frame.get_data())
            r = rgb_image[:,:,0].copy()
            b = rgb_image[:,:,2].copy()
            rgb_image[:,:,0] = b
            rgb_image[:,:,2] = r
            cv.imwrite(args.directory + "/" + str(i).zfill(6) + "rgb.png", rgb_image)
            i += 1
    finally:
        pass
コード例 #3
0
def angle():

    # gets current angle of the realsense

    anglepip = rs.pipeline()
    angleconfig = rs.config()
    angleconfig.enable_stream(rs.stream.accel)
    anglepip.start(angleconfig)
    
    camera_angle = 0
    
    try:
        while True:
            f1 = anglepip.wait_for_frames()
            accel = f1[0].as_motion_frame().get_motion_data()
    
            if not accel:
                print("no frame at cam ")
                continue
    
            accel_angle_x = math.degrees(math.atan2(accel.y, accel.z))
            accel_angle_x = int(accel_angle_x)
            camera_angle= accel_angle_x
            break
    
    finally:
        anglepip.stop()
    
    print("camera angle = " + str(camera_angle))
    return camera_angle        
コード例 #4
0
def pub_img():
    '''Callback function of subscribed topic.
    Here images get converted and features detected'''
    global stamp

    # cap = cv2.VideoCapture(1)

    pipe = rs.pipeline()
    config = rs.config()
    width = 640; height = 480;
    config.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, width, height, rs.format.rgb8, 30)
    profile = pipe.start(config)
    align_to = rs.stream.color
    align = rs.align(align_to)
    pub_rgb = rospy.Publisher("/output/image_raw/compressed_rgb", CompressedImage, queue_size=1)
    pub_depth = rospy.Publisher("/output/image_raw/compressed_depth", CompressedImage, queue_size=1)


    print('Hey! I started publishing images.')

    while(True):

        temp = pipe.wait_for_frames()
        aligned_frames = align.process(temp)
        aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        if not aligned_depth_frame or not color_frame:
            pass
        buffer = np.zeros((height,width,4))

        buffer[:,:,0:3] = (np.asanyarray(color_frame.get_data(),dtype=np.uint8))
        buffer[:,:,3] = np.asanyarray(aligned_depth_frame.get_data(),dtype=np.uint8)

        #### Create CompressedImage ####
        msg_rgb = CompressedImage()
        msg_depth = CompressedImage()
        msg_rgb.header.stamp = rospy.Time.now()
        msg_depth.header.stamp = msg_rgb.header.stamp#rospy.Time.now()
        msg_rgb.format = "rgb"#f'{stamp}'
        msg_depth.format = "depth"
        msg_rgb.data = np.array(cv2.imencode('.jpg', buffer[:,:,0:3])[1]).tostring()
        msg_depth.data = np.array(cv2.imencode('.jpg', buffer[:,:,3])[1]).tostring()
        # Publish depth frame
        # pub = rospy.Publisher("/output/image_raw/compressed", CompressedImage, queue_size=1)
        # r = rospy.Rate(10)
        # try:
        pub_rgb.publish(msg_rgb)
        pub_depth.publish(msg_depth)
        # r.sleep()
        # except:
        #     print('Could not publish')

        stamp += 1
        if stamp >= 100000:
            stamp = 0
コード例 #5
0
def initialize_camera(width, height):
    """
    Set up camera pipeline for given resolution
    """

    pipe = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, width, height, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, width, height, rs.format.rgb8, 30)
    profile = pipe.start(config)
    align_to = rs.stream.color
    align = rs.align(align_to)
コード例 #6
0
    def __init__(self, resolution=(640, 480), depth_framerate=6, 
                 color_framerate=30, include_depth=False, decimation_magnitude=1,
                 spatial_smooth=True, temporal_smooth=True):
        self.include_depth = include_depth
        self.pipeline = rs.pipeline()
        #Create a config and configure the pipeline to stream
        #  different resolutions of color and depth streams
        self.config = rs.config()
        w, h = resolution
        self.config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, color_framerate)
        if include_depth:
            self.config.enable_stream(rs.stream.depth, w, h, rs.format.z16, depth_framerate)

        # Start streaming
        self.profile = self.pipeline.start(self.config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        self.color_sensor = self.profile.get_device().first_color_sensor()
        if include_depth:
            self.depth_sensor = self.profile.get_device().first_depth_sensor()
            self.depth_scale = self.depth_sensor.get_depth_scale()

            # Create an align object
            # rs.align allows us to perform alignment of depth frames to others frames
            # The "align_to" is the stream type to which we plan to align depth frames.
            align_to = rs.stream.depth
            self.align = rs.align(align_to)

            # We'll want to store the intrinsics once we start getting images.
            self.aligned_depth_K = None

            self.filters = []
            # Decimation filter
            if decimation_magnitude > 1:
                decimate_filter = rs.decimation_filter()
                decimate_filter.set_option(rs.option.filter_magnitude, decimation_magnitude)
                self.filters.append(decimate_filter)
            
            if spatial_smooth or temporal_smooth:
                self.filters.append(rs.disparity_transform())
            if spatial_smooth:
                self.filters.append(rs.spatial_filter())
            if temporal_smooth:
                self.filters.append(rs.temporal_filter())
            if spatial_smooth or temporal_smooth:
                self.filters.append(rs.disparity_transform(False))

            # Pointcloud conversion utility.
            self.pc = rs.pointcloud()
            
            self.depth_profile = rs.video_stream_profile(self.profile.get_stream(rs.stream.depth))
            self.depth_intrinsics = self.depth_profile.get_intrinsics()
            self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height
コード例 #7
0
def start_camera_stream():
    # comment our when not testing in sim...

    global pipeline, config
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    pipeline.start(config)

    profile = pipeline.get_active_profile()
    image_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))
    image_intrinsics = image_profile.get_intrinsics()
    frame_w, frame_h = image_intrinsics.width, image_intrinsics.height
コード例 #8
0
ファイル: rs_cams.py プロジェクト: cconstantine/pixsense
 def enable(self):
     logger.info("Enabling camera {0}".format(self.camera_id))
     self.pipeline = rs.pipeline()
     config = rs.config()
     config.enable_device(self.camera_id)
     config.enable_stream(rs.stream.infrared, 848, 480, rs.format.y8, 60)
     config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 60)
     pipeline_profile = self.pipeline.start(config)
     pipeline_profile.get_device().sensors[0].set_option(
         rs.option.emitter_enabled, 0)
     if self.model == 'resnet':
         self.resn = poser.DetectorResnet((848, 480), self.optimize)
     elif self.model == 'densenet':
         self.resn = poser.DetectorDensenet((848, 480), self.optimize)
     else:
         raise RuntimeError(f"Unknown model: {self.model}")
コード例 #9
0
    def __init__(self, config, board, usestream=None):

        # initialize realsense specific settings
        self.pipeline = rs.pipeline()
        self.realsense_config = rs.config()

        # FIXME: missing frames when using videostream or too slow processing
        # https://github.com/IntelRealSense/librealsense/issues/2216

        # initialize the base class
        super().__init__(config, board, usestream)

        # Use recorded depth and color streams and its configuration
        # If problems with colors occur, check bgr/rgb channels configurations
        if usestream is not None:
            rs.config.enable_device_from_file(self.realsense_config, usestream)
            self.realsense_config.enable_all_streams()

        # Configure depth and color streams
        else:
            self.realsense_config.enable_stream(rs.stream.depth, self.width,
                                                self.height, rs.format.z16, 30)
            self.realsense_config.enable_stream(rs.stream.color, self.width,
                                                self.height, rs.format.bgr8,
                                                30)

        # Create alignment primitive with color as its target stream:
        self.alignment_stream = rs.align(rs.stream.color)

        # Start streaming
        # TODO: optionally rename variable to a more speaking one
        # FIXME: program ends here without further message
        try:
            self.profile = self.pipeline.start(self.realsense_config)
            # Getting the depth sensor's depth scale
            depth_sensor = self.profile.get_device().first_depth_sensor()
            self.depth_scale = depth_sensor.get_depth_scale()

            self.initialized = True

        except RuntimeError as e:
            logger.fatal("camera could not be initialized: {}".format(e))
            self.initialized = False
            # FIXME: follow up?

        logger.debug("Depth Scale is: {}".format(self.depth_scale))
コード例 #10
0
ファイル: datasets.py プロジェクト: nire9221/python_grab_n_go
    def __init__(self, width='640', height='480', fps='30'):
        # Variabels for setup
        self.width = width
        self.height = height
        self.fps = fps
        self.imgs = [None]
        self.img_size = 640
        self.half = False

        # Setup
        self.pipe = rs.pipeline()
        self.cfg = rs.config()
        self.cfg.enable_device('036322250716')
        # self.cfg.enable_device(source)  #036322250716

        # Start streaming
        self.profile = self.pipe.start(self.cfg)
        self.path = rs.pipeline_profile()
コード例 #11
0
    def __init__(self, config = None):
        Camera_Base.__init__(self, config)

        if(self.data['camera_name'] != "d400"):
            raise TypeExcept("Camera name is not d400")   
        
        #Set pipeline and config
        self.pipeline = rs.pipeline()
        cfg = rs.config()
        cfg.enable_stream(rs.stream.color, self.data['color']['width'], self.data['color']['height'], rs.format.bgr8, self.data['color']['fps'])
        cfg.enable_stream(rs.stream.depth, self.data['depth']['width'], self.data['depth']['height'], rs.format.z16, self.data['depth']['fps'])

        # Start streaming
        profile = self.pipeline.start(cfg)

        depth_sensor = profile.get_device().first_depth_sensor()
        self.depth_scale = depth_sensor.get_depth_scale()

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        self.align = rs.align(align_to)
コード例 #12
0
def start():
    pipeline = rs.pipeline()
    config = rs.config()

    print("Start load conf")
    config.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

    profile = pipeline.start(config)

    depth_sensor = profile.get_device().first_depth_sensor()
    set_short_range(depth_sensor)
    colorizer = rs.colorizer()
    print("Conf loaded")
    align_to = rs.stream.color
    align = rs.align(align_to)

    try:
        while True:
            color_image, depth_color_image, depth_image = get_color_depth(
                pipeline, align, colorizer)
            if color_image is None and color_image is None and color_image is None:
                continue

            color_image, depth_color_image, xy0, xy1, idx_to_coordinates = get_right_hand_coords(
                color_image, depth_color_image)
            if xy0 is not None or xy1 is not None:
                z_val_f, z_val_s, m_xy, c_xy, xy0_f, xy1_f, x, y, z = get_filtered_values(
                    depth_image, xy0, xy1)
                pyautogui.moveTo(int(x), int(3500 - z))  # , duration=0.05
                if draw_cam_out(color_image, depth_color_image, xy0_f, xy1_f,
                                c_xy, m_xy):
                    break
    finally:
        hands.close()
        pipeline.stop()
コード例 #13
0
def camera_process(_running, _collision, _buffer):
    FRAME_WIDTH = 640
    FRAME_HEIGHT = 480

    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.bgr8, 30)
    config.enable_stream(rs.stream.depth, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.z16, 30)
    config.enable_stream(rs.stream.infrared, 1, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.y8, 30)
    config.enable_stream(rs.stream.infrared, 2, FRAME_WIDTH, FRAME_HEIGHT,
                         rs.format.y8, 30)

    print("[INFO] Starting streaming...")
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    align_to = rs.stream.color
    align = rs.align(align_to)

    clipping_distance_in_meters = 1.5
    collision_percent = 0.15

    clipping_distance = clipping_distance_in_meters / depth_scale

    while True:
        frames = pipeline.wait_for_frames()
        aligned_frames = align.process(frames)

        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame(
        )  # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        frame = frames.get_color_frame()

        depth_frame = aligned_frames.get_depth_frame(
        )  # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        ir_frame_1 = frames.get_infrared_frame(1)
        ir_frame_2 = frames.get_infrared_frame(2)
        ir_frame_both = frames.get_infrared_frame()

        # Convert images to numpy arrays
        color_image = np.asanyarray(color_frame.get_data())

        depth_image = np.asanyarray(aligned_depth_frame.get_data())

        grey_color = 0
        depth_image_3d = np.dstack(
            (depth_image, depth_image,
             depth_image))  #depth image is 1 channel, color is 3 channels

        ir_image_1 = np.asanyarray(ir_frame_1.get_data())
        ir_image_1 = cv2.applyColorMap(
            cv2.convertScaleAbs(ir_image_1, alpha=1), cv2.COLOR_BGRA2GRAY)

        ir_image_2 = np.asanyarray(ir_frame_2.get_data())
        ir_image_2 = cv2.applyColorMap(
            cv2.convertScaleAbs(ir_image_2, alpha=1), cv2.COLOR_BGRA2GRAY)

        depth_image = np.asanyarray(depth_frame.get_data())
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.02), cv2.COLORMAP_JET)

        scaled_size = (frame.width, frame.height)
        # expand image dimensions to have shape: [1, None, None, 3]
        # i.e. a single-column array, where each item in the column has the pixel RGB value
        image_expanded = np.expand_dims(color_image, axis=0)

        frame = cv2.cvtColor(np.asanyarray(frame.get_data()),
                             cv2.COLOR_BGR2RGB)
        bg_removed = np.where(
            (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
            grey_color, ir_image_1)
        bg_removed[bg_removed != [0, 0, 0]] = 255

        white_count = np.count_nonzero(bg_removed == 255)
        black_count = np.count_nonzero(bg_removed == 0)
        pixels = bg_removed.shape[0] * bg_removed.shape[1] * bg_removed.shape[2]
        if (white_count > pixels * collision_percent):
            _collision.value = True
        else:
            _collision.value = False

        cv2.imshow("bg_removed", bg_removed)
        t = cv2.waitKey(1)
        if t == ord('q'):
            _running.value = False
            break
        # _, _buffer.value = cv2.imencode('.jpg', bg_removed)
        print(type(base64.b64encode(cv2.imencode('.jpg', bg_removed)[1])))
        try:
            _buffer.value = str(
                base64.b64encode(cv2.imencode('.jpg', bg_removed)[1]))
        except Exception as e:
            print(e)
コード例 #14
0
class Camera():
    '''相机函数封装'''
    pipeline = rs.pipeline()
    config = rs.config()
    clipping_distance_in_meters = 1.4  # 深度裁剪范围

    def init(self):
        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        self.config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8,
                                  30)

        # Start pipeline
        attempt = 1  # 尝试次数,成功则为-1
        while (attempt > 0):
            try:
                profile = self.pipeline.start(self.config)
            except (RuntimeError):
                print("\033[0;31m未检测到相机!重试%s/5\033[0m" % (attempt))
                attempt += 1
                if attempt > 5:
                    os._exit(0)
            else:
                attempt = 0
        while (attempt >= 0):
            try:
                self.frames = self.pipeline.wait_for_frames()
            except (RuntimeError):
                print("\033[0;31m相机卡死!重试%s/5\033[0m" % attempt)
                attempt += 1
                if attempt > 5:
                    os._exit(0)
            else:
                attempt = -1
        color_frame = self.frames.get_color_frame()
        depth_frame = self.frames.get_depth_frame()
        # 设置相机参数
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_sensor.set_option(rs.option.motion_range, 140)  # 最远深度 220
        depth_sensor.set_option(rs.option.accuracy, 3)  # max=3
        depth_sensor.set_option(rs.option.filter_option, 7)  # 7
        depth_sensor.set_option(rs.option.confidence_threshold, 15)  # 15
        self.depth_scale = depth_sensor.get_depth_scale()  # 用于将深度图像转为以米为单位

        # Color Intrinsics
        intr = color_frame.profile.as_video_stream_profile().intrinsics
        camera_parameters = {
            'fx': intr.fx,
            'fy': intr.fy,
            'ppx': intr.ppx,
            'ppy': intr.ppy,
            'height': intr.height,
            'width': intr.width
        }
        with open('intrinsics.json', 'w') as fp:
            json.dump(camera_parameters, fp)

        depth_intr = depth_frame.profile.as_video_stream_profile().intrinsics
        depth_parameters = {
            'fx': depth_intr.fx,
            'fy': depth_intr.fy,
            'ppx': depth_intr.ppx,
            'ppy': depth_intr.ppy,
            'height': depth_intr.height,
            'width': depth_intr.width
        }
        with open('depth_intrinsics.json', 'w') as fp:
            json.dump(depth_parameters, fp)

        depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(
            color_frame.profile)
        extrin_parameters = {
            'rot': depth_to_color_extrin.rotation,
            'tran': depth_to_color_extrin.translation
        }
        with open('extrinsics.json', 'w') as fp:
            json.dump(extrin_parameters, fp)

        align_to = rs.stream.color
        self.align = rs.align(align_to)

        for i in range(30):
            self.frames = self.pipeline.wait_for_frames()

    def __del__(self):
        self.pipeline.stop()

    def capture(self):
        '''返回:深度图,彩色图'''
        attempt = 1
        while (attempt >= 0):
            try:
                self.frames = self.pipeline.wait_for_frames()
            except (RuntimeError):
                print("\033[0;31m相机卡死!重试%s/5\033[0m" % attempt)
                attempt += 1
                if attempt > 5:
                    os._exit(0)
            else:
                attempt = -1

        # 对齐
        aligned_frames = self.align.process(self.frames)
        aligned_depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            return

        depth_image = np.asanyarray(
            aligned_depth_frame.get_data()) * self.depth_scale
        color_image = np.asanyarray(color_frame.get_data())

        depth_image_3d = np.dstack((depth_image, depth_image, depth_image))
        depth_clipped_color_image = np.where(
            (depth_image_3d > self.clipping_distance_in_meters) |
            (depth_image_3d <= 0), 0,
            color_image)  # 深度距离限制 深度图彩色图交 倒数第二个参数代表黑色

        return depth_image, depth_clipped_color_image
コード例 #15
0
def capture():
  global img_q, save
  w, h = 640, 480

  # Configure depth and color streams
  pipeline = rs.pipeline()
  config = rs.config()
  config.enable_stream(rs.stream.depth, w, h, rs.format.z16, 30)
  config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, 30)
  
  # Start streaming
  profile = pipeline.start(config)

  # Getting the depth sensor's depth scale (see rs-align example for explanation)
  depth_sensor = profile.get_device().first_depth_sensor()
  depth_scale = depth_sensor.get_depth_scale()
  print("Depth Scale is: " , depth_scale)

  # We will be removing the background of objects more than
  #  clipping_distance_in_meters meters away
  clipping_distance_in_meters = 1 #1 meter
  clipping_distance = clipping_distance_in_meters / depth_scale

  # Create an align object
  # rs.align allows us to perform alignment of depth frames to others frames
  # The "align_to" is the stream type to which we plan to align depth frames.
  align_to = rs.stream.color
  align = rs.align(align_to)

  thr = 150
  l = 10
  while True:
    frames = pipeline.wait_for_frames()
    aligned_frames = align.process(frames)

    aligned_depth_frame = aligned_frames.get_depth_frame()
    color_frame = aligned_frames.get_color_frame()
        
    if not aligned_depth_frame or not color_frame:
      continue

    depth_image = np.asanyarray(aligned_depth_frame.get_data())
    color_image = np.asanyarray(color_frame.get_data())

    grey_color = 153

    out = np.zeros((h, w, 4),np.uint8)
    
    depth_image_3d = np.dstack((depth_image,depth_image,depth_image))
    out[:,:,:3] = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), 0, color_image)
    out[:,:,3] = np.where((depth_image > clipping_distance) | (depth_image <= 0), 0, 255)
    
    depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
    
    images = np.hstack((color_image, depth_colormap, out[:,:,:3]))
    
    if not img_q.full():
      img_q.put_nowait(images)

    if save.value == 1:
      save_img(out)
      save.value = 0
コード例 #16
0
# tracks number of attempts to re-acquire a target (if lost)
target_locate_attempts = 0

# Holds the size of a potential target's radius
target_circle_radius = 0

# info related to last (potential) target sighting
last_obj_lon = None
last_obj_lat = None
last_obj_alt = None
last_obj_heading = None
last_point = None  # center point in pixels

# Uncomment below when using actual realsense camera
# Configure realsense camera stream
pipeline = rs.pipeline()
config = rs.config()

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
                help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
                help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
                help="minimum probability to filter weak detections")

# todo: uncomment when deploying...
# args = vars(ap.parse_args())

# initialize the list of class labels MobileNet SSD was trained to
コード例 #17
0
ファイル: stream_old.py プロジェクト: Creearc/IntelCamera
def capture():
    global img_q, save
    w, h = 640, 480

    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, w, h, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, 30)

    # Start streaming
    pipeline.start(config)
    pc = rs.pointcloud()

    decimate = rs.decimation_filter()
    decimate.set_option(rs.option.filter_magnitude, 2**1)
    prepare_pc = 10

    thr = 150
    l = 10
    while True:
        frames = pipeline.wait_for_frames()
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        if not depth_frame or not color_frame:
            continue

        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        dd = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03),
                               cv2.COLORMAP_SUMMER)
        dd = cv2.normalize(dd, None, 0, 255, cv2.NORM_MINMAX)

        if prepare_pc > 0:
            prepare_pc -= 1
            points = pc.calculate(depth_frame)
            pc.map_to(color_frame)

        out = np.zeros((h, w, 4), np.uint8)

        depth = cv2.cvtColor(dd, cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(depth, thr, 255, cv2.THRESH_BINARY_INV)
        ret, mask2 = cv2.threshold(depth, thr - l, 255, cv2.THRESH_BINARY)
        mask = cv2.bitwise_and(mask, mask2)

        out[:, :, :3] = cv2.bitwise_and(color_image,
                                        cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR))
        out[:, :, 3] = mask

        images = np.vstack(
            (np.hstack((color_image, dd)),
             np.hstack((cv2.cvtColor(mask,
                                     cv2.COLOR_GRAY2BGR), out[:, :, :3]))))

        if not img_q.full():
            img_q.put_nowait(images)

        if save.value == 1:
            save_img(color_image, dd, depth_image)
            save.value = 0
コード例 #18
0
def capture():
    global img_q, save
    w, h = 640, 480
    #w, h = 1280, 720

    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, w, h, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, 30)

    # Start streaming
    profile = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: ", depth_scale)

    clipping_distance_in_meters = 2  #1 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    frame_count = 0
    t = time.time()

    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(101, sess)
        output_stride = model_cfg['output_stride']
        while True:

            frames = pipeline.wait_for_frames()
            """
      aligned_frames = align.process(frames)

      aligned_depth_frame = aligned_frames.get_depth_frame()
      color_frame = aligned_frames.get_color_frame()
          
      if not aligned_depth_frame or not color_frame:
        continue

      depth_image = np.asanyarray(aligned_depth_frame.get_data())
      color_image = np.asanyarray(color_frame.get_data())"""

            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()

            if not depth_frame or not color_frame:
                continue

            depth_image = np.asanyarray(
                depth_frame.get_data())  #[:, 300 : 900]
            color_image = np.asanyarray(
                color_frame.get_data())  #[:, 300 : 900]

            out = np.zeros((h, w, 3), np.uint8)

            depth_image_3d = np.dstack((depth_image, depth_image, depth_image))
            out = np.where(
                (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
                0, color_image)

            mask = np.where(
                (depth_image > clipping_distance) | (depth_image <= 0), 0, 255)
            mask = cv2.applyColorMap(cv2.convertScaleAbs(mask, alpha=1),
                                     cv2.COLORMAP_BONE)
            blur_p = 5
            mask = cv2.GaussianBlur(mask, (blur_p, blur_p), 0)

            #mask = np.dstack((mask,mask,mask))
            boxes = []
            boxes = get_contours(mask.copy())
            #boxes = []
            depth_colormap = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_image, alpha=0.03),
                cv2.COLORMAP_BONE)
            #boxes = HAAR(color_image)
            keypoints = dict()
            for box in boxes:
                img = out[box[1]:box[3], box[0]:box[2]]
                img2 = color_image[box[1]:box[3], box[0]:box[2]]
                if img.shape[0] == 0 or img.shape[1] == 0:
                    continue
                input_image, display_image, output_scale = process_input(
                    img, scale_factor=0.7125, output_stride=output_stride)

                heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                    model_outputs, feed_dict={'image:0': input_image})

                pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                    heatmaps_result.squeeze(axis=0),
                    offsets_result.squeeze(axis=0),
                    displacement_fwd_result.squeeze(axis=0),
                    displacement_bwd_result.squeeze(axis=0),
                    output_stride=output_stride,
                    max_pose_detections=10,
                    min_pose_score=0.15)

                keypoint_coords *= output_scale

                tmp = get_coords(img2,
                                 depth_image,
                                 depth_scale,
                                 pose_scores,
                                 keypoint_scores,
                                 keypoint_coords,
                                 min_pose_score=0.15,
                                 min_part_score=0.1)
                if len(tmp.keys()) > len(keypoints.keys()):
                    keypoints = tmp.copy()

            if len(boxes) > 0:
                ask(url, keypoints)
コード例 #19
0
    def __init__(self):
        super().__init__('pcd_publisher_node')

        self.declare_parameter('frame_name', 'camera_frame')
        self.declare_parameter('depth_width', 320)
        self.declare_parameter('depth_height', 240)
        self.declare_parameter('depth_fps', 30)

        param_frame_name = self.get_parameter('frame_name')
        param_depth_width = self.get_parameter('depth_width')
        param_depth_height = self.get_parameter('depth_height')
        param_depth_fps = self.get_parameter('depth_fps')

        # I create a publisher that publishes sensor_msgs.PointCloud2 to the
        # topic 'pcd'. The value '5' refers to the history_depth, which I
        # believe is related to the ROS1 concept of queue size.
        # Read more here:
        # http://wiki.ros.org/rospy/Overview/Publishers%20and%20Subscribers
        self.pcd_publisher = self.create_publisher(sensor_msgs.PointCloud2,
                                                   'rs2_pc', 5)

        pipeline = rs.pipeline()
        config = rs.config()
        # You should modify resolution for stream here.
        config.enable_stream(rs.stream.depth, param_depth_width.value,
                             param_depth_height.value, rs.format.z16,
                             param_depth_fps.value)
        profile = pipeline.start(config)
        frame_count = 0
        try:
            #            prevTime = 0
            while True:
                frames = pipeline.wait_for_frames()
                depth_frame = frames.get_depth_frame()
                pc = rs.pointcloud()
                points = pc.calculate(depth_frame)
                v = points.get_vertices()
                verts = np.asanyarray(v).view(np.float32).reshape(-1, 3)  # xyz

                # Filter only valid points (from BaseRealSenseNode::publishPointCloud in librealsense2)
                # TODO : Speed up filtering valid points with CUDA
                #                verts_valid = []
                #                depth_frame_data = np.asarray(depth_frame.get_data())
                #                index_point = 0
                #                for x in range(depth_frame_data.shape[0]):
                #                    for y in range(depth_frame_data.shape[1]):
                #                        if(depth_frame_data[x][y] > 0):
                #                            verts_valid.append(verts[index_point])
                #                        index_point = index_point + 1
                #                verts_valid = np.asanyarray(verts_valid)

                self.pcd = point_cloud(verts, param_frame_name.value)
                self.pcd_publisher.publish(self.pcd)
                #                curTime = time.time()
                #                sec = curTime - prevTime
                #                fps = 1/(sec)
                frame_count += 1
                if (frame_count % 10 == 0):
                    self.get_logger().info('frame_count : %d' % frame_count)


#                print(frame_count)
#                prevTime = curTime
        finally:
            pipeline.stop()
コード例 #20
0
    def __init__(
            self,
            serial: Optional[str] = None,
            color_img_size: Tuple[int, int] = (1920, 1080),
            depth_img_size: Tuple[int, int] = (1280, 720),
            rectification_function: Optional[RectificationFunctionT] = None,
            logger: Optional[logging.Logger] = None):
        """Initialises a single Intel RealSense device

        Args:
            serial (string, optional): Used to connect to a specific device. Defaults to None.
            color_img_size (width, height) of color image, must be supported by the camera
            depth_img_size (width, height) of depth image, must be supported by camera
            rectification_function (Callable, optional): function to use for image rectification or None to skip rectification

        Raises:
            RuntimeError: If no device is found
        """
        self._logger = logger if logger is not None else logging.getLogger(
            f"IntelRealSense_{serial}")
        if rectification_function is None:
            self._logger.warning(
                "Running camera without distortion parameters. " \
                "Images will NOT be undistored by the camera."
            )
            self._rectification_function = lambda x: x
        else:
            self._rectification_function = rectification_function

        if serial is not None:
            raise NotImplementedError

        self._context = rs.context()
        self._device_list = self._context.query_devices()

        # Check amount of cameras connected
        if (self._device_list.size() == 0):
            raise RuntimeError('No devices found. Is it connected?')
        elif (self._device_list.size() > 1):
            self._logger.warning(
                "There is currently {} devices connected." \
                "Only the first device will be used!"
                .format(self._device_list.size())
            )
        self._device = self._device_list.front()

        for sensor in self._device.sensors:
            if sensor.is_depth_sensor():
                self._sensor_depth = sensor
            else:
                self._sensor_color = sensor

        self._sensor_color.set_option(rs.option.enable_auto_exposure, 0)
        self._sensor_color.set_option(rs.option.enable_auto_white_balance, 0)

        self._pipeline = rs.pipeline()
        self._config = rs.config()
        self._config.enable_stream(rs.stream.color, -1, *color_img_size)
        self._config.enable_stream(rs.stream.depth, -1, *depth_img_size)
        #self._config.enable_device(self._device.get_info(rs.camera_info.serial_number))
        self._profile = self._pipeline.start(self._config)

        self._parameters = self._load_parameters_from_camera()
コード例 #21
0
def main():

    # load serialized caffe model from disk
    print("[INFO] loading model...")

    # for now, just directly supply args here...
    net = cv2.dnn.readNetFromCaffe("MobileNetSSD_deploy.prototxt.txt",
                                   "MobileNetSSD_deploy.caffemodel")
    # net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

    # initialize the video stream, allow the camera sensor to warm up,
    # and initialize the FPS counter

    print("[INFO] starting video stream...")
    # Configure depth and color streams

    pipeline = rs.pipeline()
    config = rs.config()

    if not args.input is None:
        rs.config.enable_device_from_file(config, args.input)

    #config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    #config.enable_stream(rs.stream.color, 1280, 800, rs.format.bgr8, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)

    # Start streaming
    pipeline.start(config)

    fps = FPS().start()

    # loop over the frames from the video stream
    if live:
        while True:
            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            frame = frames.get_color_frame()
            if not frame:
                continue

            frame = np.asanyarray(frame.get_data())
            detect_from_image(frame, net)

            key = cv2.waitKey(1) & 0xFF
            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                cv2.imwrite("savedimage.png", frame)
                break

            # update the FPS counter
            fps.update()

    else:
        frame = cv2.imread("off_02.png")
        detect_from_image(frame, net, True)

        cv2.waitKey(0)

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS:q {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    pipeline.stop()
コード例 #22
0
def main():

    if not os.path.exists(args.out_rdir):
        os.mkdir(args.out_rdir)
        print('Created out_rgb_dir')
    if not os.path.exists(args.out_ddir):
        os.mkdir(args.out_ddir)
        print('Created out_depth_dir')
    if not os.path.exists(args.annotations_dir):
        os.mkdir(args.annotations_dir)
        print('Created args_out_dir')

    #Create pipeline for realsense2
    pipe = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 360, rs.format.rgb8, 30)
    profile = pipe.start(config)

    # Declare filters
    dc_ftr = rs.decimation_filter ()   # Decimation - reduces depth frame density
    st_ftr = rs.spatial_filter()          # Spatial    - edge-preserving spatial smoothing
    tp_ftr = rs.temporal_filter()    # Temporal   - reduces temporal noise
    th_ftr = rs.threshold_filter()

    align_to = rs.stream.color
    align = rs.align(align_to)
    rgbd = np.zeros((1,4,360,640))
    i=0
    print('################|Initialization seguence completed.|################')


    try:
        while(1):

            # Get frameset of color and depth
            frames = pipe.wait_for_frames()

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame() #  640x480
            color_frame = aligned_frames.get_color_frame()

            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue

            # Post processing
            filtered = tp_ftr.process(st_ftr.process(dc_ftr.process(aligned_depth_frame)))
            filtered = th_ftr.process(filtered)#can be removed if person is outside frame

            rgb = np.asanyarray(color_frame.get_data())
            rgb = np.transpose(rgb,(2,0,1))
            depth = np.asanyarray(aligned_depth_frame.get_data(),dtype=np.uint16)
            depth = depth
            im.show(rgb,args.bb)
            im.rgbsave(i, rgb, str('./' + args.out_rdir))
            im.dsave(i, depth, str('./' + args.out_ddir))
            np.savetxt('./' + args.annotations_dir + '/' + str(i) + '.csv', args.bb, delimiter=',')
            i = i+ 1
            print('saved'+str(i)+'image')
            # time.sleep(1)
    finally:
        pipe.stop()
        print('################| Error in pipeline |################')
コード例 #23
0
    def run(self):
        # ========================
        # 1. Configure all streams
        # ========================
        self.rspipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.depth, self.width, self.height,
                             rs.format.z16, 30)
        config.enable_stream(rs.stream.color, self.width, self.height,
                             rs.format.bgr8, 30)

        # ======================
        # 2. Start the streaming
        # ======================
        print("Starting up the Intel Realsense...")
        print("")
        profile = self.rspipeline.start(config)

        # Load the configuration here
        self.loadConfiguration(profile, self.json_file)

        # =================================
        # 3. The depth sensor's depth scale
        # =================================
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: ", depth_scale)
        print("")

        # ==========================================
        # 4. Create an align object.
        #    Align the depth image to the rgb image.
        # ==========================================
        align_to = rs.stream.depth
        align = rs.align(align_to)

        try:
            # ===========================================
            # 5. Skip the first 30 frames.
            # This gives the Auto-Exposure time to adjust
            # ===========================================
            for x in range(30):
                frames = self.rspipeline.wait_for_frames()
                # Align the depth frame to color frame
                aligned_frames = align.process(frames)

            print("Intel Realsense started successfully.")
            print("")

            # ===========================================
            # 6. Setup gstreamer
            # Usefull gst resources / cheatsheets
            # https://github.com/matthew1000/gstreamer-cheat-sheet/blob/master/rtmp.md
            # http://wiki.oz9aec.net/index.php/Gstreamer_cheat_sheet
            # https://github.com/matthew1000/gstreamer-cheat-sheet/blob/master/mixing.md
            # ===========================================
            CLI = ''
            caps = 'caps="video/x-raw,format=BGR,width=' + str(
                self.width
            ) + ',height=' + str(
                self.height * 2
            ) + ',framerate=(fraction)30/1,pixel-aspect-ratio=(fraction)1/1"'

            if platform.system() == "Linux":
                #assuming Linux means RPI

                CLI = 'flvmux name=mux streamable=true latency=3000000000 ! rtmpsink location="' + self.rtmp_url + ' live=1 flashver=FME/3.0%20(compatible;%20FMSc%201.0)" \
                    appsrc name=mysource format=TIME do-timestamp=TRUE is-live=TRUE ' + str(
                    caps) + ' ! \
                    videoconvert !  omxh264enc ! h264parse ! video/x-h264 ! \
                    queue max-size-buffers=0 max-size-bytes=0 max-size-time=180000000 min-threshold-buffers=1 leaky=upstream ! mux. \
                    alsasrc ! audio/x-raw, format=S16LE, rate=44100, channels=1 ! voaacenc bitrate=44100 ! aacparse ! audio/mpeg, mpegversion=4 ! \
                    queue max-size-buffers=0 max-size-bytes=0 max-size-time=4000000000 min-threshold-buffers=1 ! mux.'

            elif platform.system() == "Darwin":
                #macos
                #CLI='flvmux name=mux streamable=true ! rtmpsink location="'+  self.rtmp_url +' live=1 flashver=FME/3.0%20(compatible;%20FMSc%201.0)" \
                #    appsrc name=mysource format=TIME do-timestamp=TRUE is-live=TRUE '+ str(caps) +' ! \
                #    videoconvert ! vtenc_h264 ! video/x-h264 ! h264parse ! video/x-h264 ! \
                #    queue max-size-buffers=4 ! flvmux name=mux. \
                #    osxaudiosrc do-timestamp=true ! audioconvert ! audioresample ! audio/x-raw,rate=48000 ! faac bitrate=48000 ! audio/mpeg ! aacparse ! audio/mpeg, mpegversion=4 ! \
                #    queue max-size-buffers=4 ! mux.'

                CLI = 'appsrc name=mysource format=TIME do-timestamp=TRUE is-live=TRUE caps="video/x-raw,format=BGR,width=' + str(
                    self.width
                ) + ',height=' + str(
                    self.height * 2
                ) + ',framerate=(fraction)30/1,pixel-aspect-ratio=(fraction)1/1" ! videoconvert ! vtenc_h264 ! video/x-h264 ! h264parse ! video/x-h264 ! queue max-size-buffers=4 ! flvmux name=mux ! rtmpsink location="' + self.rtmp_url + '" sync=true   osxaudiosrc do-timestamp=true ! audioconvert ! audioresample ! audio/x-raw,rate=48000 ! faac bitrate=48000 ! audio/mpeg ! aacparse ! audio/mpeg, mpegversion=4 ! queue max-size-buffers=4 ! mux.'

            #TODO: windows

            print(CLI)
            self.gstpipe = Gst.parse_launch(CLI)

            self.appsrc = self.gstpipe.get_by_name("mysource")
            self.appsrc.set_property('emit-signals',
                                     True)  #tell sink to emit signals

            # Set up a pipeline bus watch to catch errors.
            self.bus = self.gstpipe.get_bus()
            self.bus.connect("message", self.on_bus_message)

            self.gstpipe.set_state(Gst.State.PLAYING)
            intrinsics = True

            buff = Gst.Buffer.new_allocate(None,
                                           self.width * self.height * 3 * 2,
                                           None)
            buff_depth_index = (self.width * self.height * 3) - 1

            hsv = np.zeros((self.height, self.width, 3), dtype=np.float32)
            hsv8 = np.zeros((self.height, self.width, 3), dtype=np.int8)
            depth_hsv = np.zeros((self.height, self.width, 3),
                                 dtype=np.float32)
            depth_image = np.zeros((self.height, self.width, 3),
                                   dtype=np.float32)

            start = timer()
            while not self.exit.is_set():
                # ======================================
                # 7. Wait for a coherent pair of frames:
                # ======================================
                frames = self.rspipeline.wait_for_frames(1000)
                framestart = timer()

                # =======================================
                # 8. Align the depth frame to color frame
                # =======================================
                aligned_frames = align.process(frames)

                # ================================================
                # 9. Fetch the depth and colour frames from stream
                # ================================================
                depth_frame = aligned_frames.get_depth_frame()
                color_frame = aligned_frames.get_color_frame()
                if not depth_frame or not color_frame:
                    pass

                # print the camera intrinsics just once. it is always the same
                if intrinsics:
                    print("Intel Realsense Camera Intrinsics: ")
                    print("========================================")
                    print(depth_frame.profile.as_video_stream_profile().
                          intrinsics)
                    print(color_frame.profile.as_video_stream_profile().
                          intrinsics)
                    print("")
                    intrinsics = False

                # =====================================
                # 10. Apply filtering to the depth image
                # =====================================
                # Apply a spatial filter without hole_filling (i.e. holes_fill=0)
                # depth_frame = self.spatial_filtering(depth_frame, magnitude=2, alpha=0.5, delta=10, holes_fill=1)
                # Apply hole filling filter
                # depth_frame = self.hole_filling(depth_frame)

                # ==================================
                # 11. Convert images to numpy arrays
                # ==================================
                depth_image = np.asanyarray(depth_frame.get_data()).astype(
                    np.float32)
                color_image = np.asanyarray(color_frame.get_data())

                # ======================================================================
                # 12. Conver depth to hsv
                # ==================================
                # We need to encode/pack the 16bit depth value to RGB
                # we do this by treating it as the Hue in HSV.
                # we then encode HSV to RGB and stream that
                # on the other end we reverse RGB to HSV, H will give us the depth value back.
                # HSV elements are in the 0-1 range so we need to normalize the depth array to 0-1
                # First set a far plane and set everything beyond that to 0

                clipped = depth_image > 4000
                depth_image[clipped] = 0

                # Now normalize using that far plane
                # cv expects the H in degrees, not 0-1 :(
                depth_image *= (360 / 4000)
                depth_hsv[:, :, 0] = depth_image
                depth_hsv[:, :, 1] = 1
                depth_hsv[:, :, 2] = 1
                discard = depth_image == 0
                s = depth_hsv[:, :, 1]
                v = depth_hsv[:, :, 2]
                s[discard] = 0
                v[discard] = 0

                # cv2.cvtColor to convert HSV to RGB
                hsv = cv2.cvtColor(depth_hsv, cv2.COLOR_HSV2BGR_FULL)

                # cv2 needs hsv to 8bit (0-255) to stack with the color image
                hsv8 = (hsv * 255).astype(np.uint8)

                buff.fill(0, color_image.tobytes())
                buff.fill(buff_depth_index, hsv8.tobytes())
                self.appsrc.emit("push-buffer", buff)

                #process any messages from gstreamer
                msg = self.bus.pop_filtered(Gst.MessageType.ERROR
                                            | Gst.MessageType.WARNING
                                            | Gst.MessageType.EOS
                                            | Gst.MessageType.INFO
                                            | Gst.MessageType.STATE_CHANGED)

                while (msg):
                    self.on_bus_message(msg)
                    msg = self.bus.pop_filtered(
                        Gst.MessageType.ERROR | Gst.MessageType.WARNING
                        | Gst.MessageType.EOS | Gst.MessageType.INFO
                        | Gst.MessageType.STATE_CHANGED)

                if (not self.exit.is_set()):
                    try:
                        if (not self.previewQueue.full()):
                            self.previewQueue.put_nowait((color_image, hsv8))
                    except:
                        pass

                msgprocesstime = timer()
                print("gstreamer fps:%s d:%s" %
                      (str(1 /
                           (msgprocesstime - start)), str(framestart - start)))
                start = timer()

        except:
            e = sys.exc_info()[0]
            print("Unexpected Error: %s" % e)
            self.statusQueue.put_nowait("ERROR: Unexpected Error: %s" % e)

        finally:
            # Stop streaming
            print("Stop realsense pipeline")
            self.rspipeline.stop()
            print("Pause gstreamer pipe")
            try:
                if (self.gstpipe.get_state()[1] is not Gst.State.PAUSED):
                    self.gstpipe.set_state(Gst.State.PAUSED)
            except:
                self.statusQueue.put_nowait("ERROR: Error pausing gstreamer")
                print("Error pausing gstreamer")

        self.statusQueue.put_nowait("INFO: Exiting Realsense Capture process")
        print("Exiting capture loop")
コード例 #24
0
    def start(self):
        try:
            # Setup:
            pipe = rs.pipeline()

            #configure profiles
            config = rs.config()
            config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 6)
            config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 6)

            profile = pipe.start(config)

            # Skip 5 first frames to give the Auto-Exposure time to adjust
            for x in range(5):
                pipe.wait_for_frames()

            while True:
                # Store next frameset for later processing:
                frameset = pipe.wait_for_frames()
                color_frame = frameset.get_color_frame()
                color = np.asanyarray(color_frame.get_data())

                # Standard OpenCV boilerplate for running the net:# Stand
                height, width = color.shape[:2]
                expected = 300
                aspect = width / height
                resized_image = cv2.resize(
                    color, (int(round(expected * aspect)), expected))
                crop_start = int(round(expected * (aspect - 1) / 2))
                crop_img = resized_image[0:expected,
                                         crop_start:crop_start + expected]

                net = cv2.dnn.readNetFromCaffe(
                    "MobileNetSSD_deploy.prototxt",
                    "MobileNetSSD_deploy.caffemodel")
                in_scale_factor = 0.007843
                mean_val = 127.53
                class_names = ("background", "aeroplane", "bicycle", "bird",
                               "boat", "bottle", "bus", "car", "cat", "chair",
                               "cow", "diningtable", "dog", "horse",
                               "motorbike", "person", "pottedplant", "sheep",
                               "sofa", "train", "tvmonitor")

                blob = cv2.dnn.blobFromImage(crop_img, in_scale_factor,
                                             (expected, expected), mean_val,
                                             False)
                net.setInput(blob, "data")
                detections = net.forward("detection_out")

                label = detections[0, 0, 0, 1]
                xmin = detections[0, 0, 0, 3]
                ymin = detections[0, 0, 0, 4]
                xmax = detections[0, 0, 0, 5]
                ymax = detections[0, 0, 0, 6]

                class_name = class_names[int(label)]

                align = rs.align(rs.stream.color)
                frameset = align.process(frameset)
                aligned_depth_frame = frameset.get_depth_frame()
                depth = np.asanyarray(aligned_depth_frame.get_data())

                # Crop depth data:
                scale = height / expected
                xmin_depth = int((xmin * expected + crop_start) * scale)
                ymin_depth = int((ymin * expected) * scale)
                xmax_depth = int((xmax * expected + crop_start) * scale)
                ymax_depth = int((ymax * expected) * scale)
                depth = depth[xmin_depth:xmax_depth,
                              ymin_depth:ymax_depth].astype(float)

                # Get data scale from the device and convert to meters
                depth_scale = profile.get_device().first_depth_sensor(
                ).get_depth_scale()
                depth = depth * depth_scale
                dist, _, _, _ = cv2.mean(depth)
                print("Detected a {0} {1:.3} meters away.".format(
                    class_name, dist))

                json_object = json.dumps(DataObject(class_name, dist),
                                         default=jsonDefault)
                self.send_to_hub_callback(json_object)

        except Exception as e:
            print(e)
            pass
コード例 #25
0
def main(queue, testing):
    if testing:
        CMDS = {
            "roll": 1500,
            "pitch": 1500,
            "throttle": 1500,
            "yaw": 1500,
            "aux1": 1800,
            "aux2": 1800
        }

        roll_cmd = 1000

        while True:
            roll_cmd = roll_cmd + 1
            CMDS["roll"] = roll_cmd

            queue.put(CMDS)
            print("commmands sent: ", CMDS)
            time.sleep(0.1)

    else:
        try:
            # Create a context object. This object owns the handles to all connected realsense devices
            print("creating RS pipeline...")
            pipeline = rs.pipeline()

            # Configure streams
            print("configure streams...")
            config = rs.config()
            config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
            # try fast stream, narrow vertical FOV
            print("enable fast stream...")
            # config.enable_stream(rs.stream.depth, 0, 848, 100, rs.format.z16, 300)
            #config.enable_stream(rs.stream.depth)

            # Start streaming
            print("start streaming...")
            pipeline.start(config)

            # y-values to detect distances between
            detection_lower = 430
            detection_upper = 800

            det_left = 200
            det_right = 400

            loop_number = 0

            while True:
                # This call waits until a new coherent set of frames is available on a device
                # Calls to get_frame_data(...) and get_frame_timestamp(...) on a device will return stable values until wait_for_frames(...) is called
                loop_number = loop_number + 1
                print("realsense loop number: ", loop_number)

                if loop_number >= 1000:
                    break

                print("getting frame data...")
                frames = pipeline.wait_for_frames()
                depth = frames.get_depth_frame()

                if not depth: continue

                depth_image = np.asanyarray(depth.get_data())
                scaled_depth = cv2.convertScaleAbs(depth_image, alpha=0.08)

                truncated_depth = thresholdDepth(scaled_depth)
                truncated_depth, points = detectBlobs(truncated_depth)

                # drive by avoiding objects logic
                # example detections: [(489.6097106933594, 442.31201171875), (364.44451904296875, 405.34393310546875), (376.8372497558594, 278.6385803222656), (132.40438842773438, 347.2413024902344)]
                # (x, y)
                detections = fourSectionAvgColor(truncated_depth)

                if display_image:
                    print("showing image...")
                    depth_colormap = cv2.applyColorMap(scaled_depth,
                                                       cv2.COLORMAP_JET)
                    cv2.imshow('RealSense', depth_colormap)
                    cv2.imshow('Truncated Depth', truncated_depth)

                if (sum(detections[1]) <= 20 and sum(detections[2]) <= 20):
                    # drive_func(drive, "0")
                    mode = "MODE: FORWARD"
                    CMDS = {
                        "roll": 1500,
                        "pitch": 1550,
                        "throttle": 1550,
                        "yaw": 1500,
                        "aux1": 1800,
                        "aux2": 1800
                    }

                elif sum(detections[1]) >= 20:
                    #drive_func(drive, "1")
                    mode = "MODE: FLY RIGHT"
                    CMDS = {
                        "roll": 1500,
                        "pitch": 1500,
                        "throttle": 1550,
                        "yaw": 1550,
                        "aux1": 1800,
                        "aux2": 1800
                    }

                elif sum(detections[2]) >= 20:
                    #drive_func(drive, "2")
                    mode = "MODE: FLY LEFT"
                    CMDS = {
                        "roll": 1500,
                        "pitch": 1500,
                        "throttle": 1550,
                        "yaw": 1450,
                        "aux1": 1800,
                        "aux2": 1800
                    }

                elif (sum(detections[1]) >= 20 and sum(detections[2]) >= 20):
                    #drive_func(drive, "4")
                    mode = "MODE: STOPPED"
                    CMDS = {
                        "roll": 1500,
                        "pitch": 1500,
                        "throttle": 1500,
                        "yaw": 1500,
                        "aux1": 1800,
                        "aux2": 1800
                    }

                queue.put(CMDS)

                print("commmands sent: ", CMDS)
                print(mode)

                # if detection_lower <= point[1] <= detection_upper:
                #     if det_left <= point[0] <= det_right:
                #         subprocess.run(['python3', 'drive.py', '--direction_index', '0'])
                #     elif det_left <= point[0] <= det_right:
                #         subprocess.run(['python3', 'drive.py', '--direction_index', '0'])

                k = cv2.waitKey(1) & 0xFF
                if k == 27:
                    break

            # done with loop
            print("finished loop, landing!")
            CMDS = "land"
            mode = "MODE: STOPPED"
            print("commmands sent: ", CMDS)
            print(mode)

            queue.put(CMDS)
            pipeline.stop()
            exit(0)

        except KeyboardInterrupt:
            CMDS = "land"

            queue.put(CMDS)
            pipeline.stop()
            exit(0)

        except Exception as e:
            print(e)
            pass