예제 #1
0
파일: cityscopy.py 프로젝트: dunland/cspy
    def realsense_init(self):
        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        config = rs.config()
        realsense_ctx = rs.context()
        connected_devices = []

        for i in range(len(realsense_ctx.devices)):
            detected_camera = realsense_ctx.devices[i].get_info(
                rs.camera_info.serial_number)
            connected_devices.append(detected_camera)

        # choose device if more than 1 connected:
        if len(connected_devices) > 1:
            print("choose device by pressing the number:")
            for i in range(len(realsense_ctx.devices)):
                print("[%s]: %s @ %s" % (i, realsense_ctx.devices[i].get_info(
                    rs.camera_info.name), realsense_ctx.devices[i].get_info(
                        rs.camera_info.physical_port)))
            idx = int(input(">>> "))
            device_product_line = connected_devices[idx]
            self.UDP_PORT = 5000 + idx
            print("sending at UDP %s:%s" % (self.UDP_IP, self.UDP_PORT))
        else:
            device_product_line = connected_devices[0]
            self.UDP_PORT = 5000
        config.enable_device(device_product_line)

        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = config.resolve(pipeline_wrapper)

        # Start streaming
        try:
            # setup for USB 3
            try:
                config.enable_stream(rs.stream.color, 1920, 1080,
                                     rs.format.bgr8, 30)
                print("trying to stream 1920x1080...", end=" ")
                self.pipeline.start(config)
            except Exception:
                print("no success.")
                config.enable_stream(rs.stream.color, 1280, 720,
                                     rs.format.bgr8, 30)
                print("trying to stream 1280x720...", end=" ")
                self.pipeline.start(config)
        except Exception:
            # setup for USB 2
            print("no success.")
            config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
            print("streaming in 640x480...", end=" ")
            self.pipeline.start(config)

        # set sensitivity parameters:
        self.device = pipeline_profile.get_device().first_color_sensor()
        self.device.set_option(rs.option.exposure, self.exposure)
        self.device.set_option(rs.option.gain, self.gain)

        print("success!")
        print("Realsense initialization complete.")
예제 #2
0
def make_realsense():
    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()

    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))

    found_rgb = False
    for s in device.sensors:
        if s.get_info(rs.camera_info.name) == 'RGB Camera':
            found_rgb = True
            break
    if not found_rgb:
        print("The demo requires Depth camera with Color sensor")
        exit(0)

    config.enable_stream(rs.stream.depth, 640, 360, rs.format.z16, 60)

    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 640, 360, rs.format.bgr8, 60)

    # Start streaming
    pipeline.start(config)
    return pipeline
예제 #3
0
def create_intelrs_pipeline():
    """ Creates the pipeline for the Intel RS camera stream

    Args:
        run_inferance_on_image: function to perform object detection with an image
        cap: stream from pc/laptop webcam camera 
        show_video: conditional to show the video from the live feed

    Returns:
        pipeline: simplifies the user interaction with the device
    """
    pipeline = rs.pipeline()
    config = rs.config()

    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()

    device_product_line = str(device.get_info(rs.camera_info.product_line))

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start streaming
    pipeline.start(config)

    return pipeline
예제 #4
0
def cameraConfig(threshold_distance):
    # Configure depth and color streams
    pipeline = rs.pipeline()
    colorizer = rs.colorizer()
    config = rs.config()
    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))

    depth_sensor = pipeline_profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    # Start streaming
    clipping_distance_in_meters = threshold_distance
    clipping_distance = clipping_distance_in_meters / depth_scale
    align_to = rs.stream.color

    return pipeline, align_to, config, clipping_distance
예제 #5
0
    def initRealSense(self, resolution=(640, 480), fps=60):

        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        self.config = rs.config()

        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = self.config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        self.device_product_line = str(
            device.get_info(rs.camera_info.product_line))

        # Enable stream for RealSense
        res_x = resolution[0]
        res_y = resolution[1]
        self.config.enable_stream(rs.stream.depth, res_x, res_y, rs.format.z16,
                                  fps)
        self.config.enable_stream(rs.stream.color, res_x, res_y,
                                  rs.format.bgr8, fps)

        self.pipeline.start(self.config)

        # Depth jetmap init
        self.colorizer = rs.colorizer()
        # Create an align object
        align_to = rs.stream.color
        self.align = rs.align(align_to)
예제 #6
0
    def init_reasense(self):
        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        config = rs.config()

        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))

        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16,
                             self.fps)

        if device_product_line == 'L500':
            config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8,
                                 self.fps)
        else:
            config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8,
                                 self.fps)

        self.pc = rs.pointcloud()

        # Start streaming
        self.profile = self.pipeline.start(config)
        depth_sensor = self.profile.get_device().first_depth_sensor()
        self.depth_scale = depth_sensor.get_depth_scale()
        align_to = rs.stream.color
        self.align = rs.align(align_to)
예제 #7
0
    def __init__(self):
        self.isStreaming = False

        # Camera configuration
        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        self.config = rs.config()

        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = self.config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))

        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

        if device_product_line == 'L500':
            self.config.enable_stream(rs.stream.color, 960, 540,
                                      rs.format.bgr8, 30)
        else:
            self.config.enable_stream(rs.stream.color, 640, 480,
                                      rs.format.bgr8, 30)

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        self.align = rs.align(align_to)
예제 #8
0
    def __init__(self):
        self.pipeline = rs.pipeline()
        self.config = rs.config()

        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = self.config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))

        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

        if device_product_line == 'L500':
            self.config.enable_stream(rs.stream.color, 960, 540,
                                      rs.format.bgr8, 30)
        else:
            self.config.enable_stream(rs.stream.color, 640, 480,
                                      rs.format.bgr8, 30)

        profile = self.pipeline.start(self.config)

        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: ", depth_scale)

        clipping_distance_in_meters = 1
        clipping_distance = clipping_distance_in_meters / depth_scale

        align_to = rs.stream.color
        self.align = rs.align(align_to)
예제 #9
0
    def __init__(self):

        bag = r'/home/vdr/Desktop/RealSense/test2.bag'

        self.pipeline = rs.pipeline()
        config = rs.config()

        #From a bag file
        config.enable_device_from_file(bag, False)
        config.enable_all_streams()

        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()

        playback = device.as_playback()
        playback.set_real_time(False)
        '''  #For real time capturing
        device_product_line = str(device.get_info(rs.camera_info.product_line))
        self.depth_sensor = pipeline_profile.get_device().first_depth_sensor()

        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

        if device_product_line == 'L500':
            config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
        else:
            config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        '''
        self.pipeline.start(config)
예제 #10
0
    def __init__(self, width=640, height=480, fps=30):
        super().__init__()
        self.width = width
        self.height = height
        self.fps = fps

        # Configure depth and color streams
        self.pipeline = realsense.pipeline()
        self.config = realsense.config()

        # Get device product line for setting a supporting resolution
        self.pipeline_wrapper = realsense.pipeline_wrapper(self.pipeline)
        self.pipeline_profile = self.config.resolve(self.pipeline_wrapper)
예제 #11
0
def main():
    rospy.init_node("ImageCapturerIntel")
    # FPS.
    RATE = int(rospy.get_param('~RATE', 15))
    # Default webcam.
    CAMERA = int(rospy.get_param('~CAMERAID', 0))

    rospy.loginfo("INTEL" + str(CAMERA))
    image_publisher = rospy.Publisher("camaras/" + str(CAMERA) + "/",
                                      CompressedImage,
                                      queue_size=RATE)
    rospy.loginfo("*Node " + "ImageCapturerIntel-" + str(CAMERA) + " started*")

    pipeline = rs.pipeline()
    config = rs.config()

    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    # Start streaming
    pipeline.start(config)

    while (True):
        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        if not color_frame:
            continue

        # Convert images to numpy arrays
        color_image = np.asanyarray(color_frame.get_data())
        if VERBOSE:
            cv2.imshow('imgIntel', color_image)  # display the captured image
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        msg = CompressedImage()
        msg.header.stamp = rospy.Time.now()
        msg.format = "jpeg"
        msg.data = np.array(cv2.imencode('.jpg', color_image)[1]).tostring()
        image_publisher.publish(msg)

    pipeline.stop()
예제 #12
0
    def __init__(self, model_name):
        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        self.model_name = model_name

        # Get device product line for setting a supporting resolution
        self.pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        self.pipeline_profile = self.config.resolve(self.pipeline_wrapper)
        self.device = self.pipeline_profile.get_device()
        self.device_product_line = str(
            self.device.get_info(rs.camera_info.product_line))
        self.final_color_frames = []
        self.final_depth_frames = []
        print(f'[INFO] Device: {self.device}')
예제 #13
0
    def __init__(self, IMG_SIZE):
        self.pipeline = rs.pipeline()
        self.config = rs.config()

        # Get device product line for setting a supporting resolution
        self.pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        self.pipeline_profile = self.config.resolve(self.pipeline_wrapper)
        self.device = self.pipeline_profile.get_device()
        self.device_product_line = str(self.device.get_info(rs.camera_info.product_line))

        self.config.enable_stream(rs.stream.depth, IMG_SIZE[0], IMG_SIZE[1], rs.format.z16, 30)

        if self.device_product_line == 'L500':
            self.config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
        else:
            self.config.enable_stream(rs.stream.color, IMG_SIZE[0], IMG_SIZE[1], rs.format.bgr8, 30)
    def __init__(self):
        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        config = rs.config()

        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))

        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

        # Start streaming
        self.pipeline.start(config)
예제 #15
0
    def __init__(self, model_name):
        self.model = DepthNet()
        self.preview_size = (720, 720)
        self.project_model = "test2"
        self.model.load_state_dict(torch.load(f'{self.project_model}.net'))
        self.model.eval()
        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        self.model_name = model_name

        # Get device product line for setting a supporting resolution
        self.pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        self.pipeline_profile = self.config.resolve(self.pipeline_wrapper)
        self.device = self.pipeline_profile.get_device()
        self.device_product_line = str(
            self.device.get_info(rs.camera_info.product_line))
        print(f'[INFO] Device: {self.device}')

        self.config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16,
                                  30)
        if self.device_product_line == 'L500':
            self.config.enable_stream(rs.stream.color, 960, 540,
                                      rs.format.bgr8, 30)
        else:
            self.config.enable_stream(rs.stream.color, 1280, 720,
                                      rs.format.bgr8, 30)

        # Start streaming
        self.pipeline.start(self.config)
        print("[INFO] Stream Started...")

        # Aligning depth to color image
        self.align = rs.align(rs.stream.color)

        cv2.namedWindow(f'Project: {self.model_name} Preview')

        while True:
            self.deliver_preview_frame(self.preview_size)
            key = cv2.waitKey(1)

            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                print("[INFO] Stream Stopped")
                return
예제 #16
0
    def _init_device(self):
        """
        Open the pipeline for adquiring frames
        Returns:

        """

        # Threading
        self._lock = threading.Lock()
        self._thread = None
        self._thread_status = 'stopped'  # status: 'stopped', 'running'

        self.pipeline = rs.pipeline()
        self.config = rs.config()

        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = self.config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))

        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        self.config.enable_stream(rs.stream.infrared, 640, 480, rs.format.y8, 30)
        if device_product_line == 'L500':
            self.config.enable_stream(rs.stream.color, 960, 540, rs.format.rgb8, 30)
        else:
            self.config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)

        # Start streaming
        self.profile = self.pipeline.start(self.config)

        self.depth_scale = self.profile.get_device().first_depth_sensor().get_depth_scale()

        self._color = np.zeros((self.color_height, self.color_width, 3))
        self._depth = np.zeros((self.depth_height, self.depth_width))
        self._ir = np.zeros((self.depth_height, self.depth_width))
        self._run()
        logger.info("Searching first frame")
        while True:
            if not np.all(self._depth == 0):
                logger.info("First frame found")
                break
예제 #17
0
    def __init__(self):
        self.pipeline = rs.pipeline()
        config = rs.config()
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

        if device_product_line == 'L500':
            config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
        else:
            config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

        depth_profile = rs.video_stream_profile(
            pipeline_profile.get_stream(rs.stream.depth))
        self.depth_intrinsics = depth_profile.get_intrinsics()

        self.profile = self.pipeline.start(config)
        pass
예제 #18
0
    def __init__(self):
        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        self.config = rs.config()

        # Get device product line for setting a supporting resolution
        self.pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        self.pipeline_profile = self.config.resolve(self.pipeline_wrapper)
        self.device = self.pipeline_profile.get_device()
        self.depth_scale = self.device.first_depth_sensor().get_depth_scale()
        # this is used to align the depth and color images since they are captured
        # from slightly different viewports
        self.aligner = rs.align(rs.stream.color)
        self.device_product_line = str(
            self.device.get_info(rs.camera_info.product_line))

        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        self.config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8,
                                  30)

        # Start streaming
        self.pipeline.start(self.config)
    def __init__(self):
        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        config = rs.config()

        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))

        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        
        # realsense align 맞추기 추가
        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        self.align = rs.align(align_to)

        # Start streaming
        self.pipeline.start(config)
예제 #20
0
    def __init__(self):
        self.pipeline = rs.pipeline()
        self.config = rs.config()

        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = self.config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()

        #For real time capturing
        device_product_line = str(device.get_info(rs.camera_info.product_line))
        self.depth_sensor = pipeline_profile.get_device().first_depth_sensor()

        self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

        if device_product_line == 'L500':
            self.config.enable_stream(rs.stream.color, 960, 540,
                                      rs.format.bgr8, 30)
        else:
            self.config.enable_stream(rs.stream.color, 640, 480,
                                      rs.format.bgr8, 30)

        self.config.enable_record_to_file('test2.bag')
        self.pipeline.start(self.config)
예제 #21
0
    return image


if __name__ == '__main__':
    """
        test everything
    """

    args = sys.argv

    pipeline = rs.pipeline()
    config = rs.config()

    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))

    found_rgb = False
    for s in device.sensors:
        if s.get_info(rs.camera_info.name) == 'RGB Camera':
            found_rgb = True
            break
    if not found_rgb:
        print("The demo requires Depth camera with Color sensor")
        exit(0)

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
예제 #22
0
def main():
    # start node
    rospy.init_node("kinect_find_xyz")
    rospy.loginfo("kinect_find_xyz node initialized")

    # create Point32 publisher
    pointPub = rospy.Publisher(pointPubTopic, PointStamped, queue_size=1)

    # create moving average filters
    WINDOW_SIZE = 4
    moving_avg_x = MovingWindowAverage(WINDOW_SIZE)
    moving_avg_y = MovingWindowAverage(WINDOW_SIZE)
    moving_avg_z = MovingWindowAverage(WINDOW_SIZE)

    # Create a pipeline
    pipeline = rs.pipeline()

    # Create a config and configure the pipeline to stream
    # different resolutions of color and depth streams
    config = rs.config()

    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))

    # enable depth and color streams
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start streaming
    profile = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    while (not rospy.is_shutdown()):
        # grab frames from intel RealSense
        frames = pipeline.wait_for_frames()

        # Align the depth frame to color frame
        aligned_frames = align.process(frames)

        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame(
        )  # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        # get depth frame intrinsics
        depth_intrin = aligned_depth_frame.profile.as_video_stream_profile(
        ).intrinsics

        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            continue

        # convert to numpy arrays
        numpyRGB = np.asanyarray(color_frame.get_data())
        numpyDepth = np.asanyarray(aligned_depth_frame.get_data())

        # find min enclosing circle
        status, circle = findMinEnclosingCircle(numpyRGB)

        if (status):
            # draw circles if they are found
            (x, y), radius = circle
            cv2.circle(numpyRGB, (int(x), int(y)), int(radius), (0, 255, 0), 2)

            # find depth of detected circle
            reading = circle_to_realworld(aligned_depth_frame, circle,
                                          depth_intrin)

            average_x = moving_avg_x.addAndProc(reading[0])
            average_y = moving_avg_y.addAndProc(reading[1])
            average_z = moving_avg_z.addAndProc(reading[2])

            # invert sign of y-point to align with standard cartesian
            # views
            average_y = -1 * average_y

            # create PointStamped message and publish
            pointMessage = PointStamped()
            pointMessage.header.stamp = rospy.Time.now()
            pointMessage.point.x = average_x
            pointMessage.point.y = average_y
            pointMessage.point.z = average_z
            pointPub.publish(pointMessage)

        # publish color image
        imgMsg = imgBridge.cv2_to_imgmsg(numpyRGB, "bgr8")
        imgPub.publish(imgMsg)

        # display images
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(numpyDepth, alpha=0.03), cv2.COLORMAP_JET)
        cv2.imshow('Depth', depth_colormap)
        cv2.imshow('Video', numpyRGB)
        if (cv2.waitKey(1) & 0xFF == ord('q')):
            break
예제 #23
0
def main(argv):

    size, file_cnt = read_parameters(argv)

    pipeline = rs.pipeline()
    config = rs.config()

    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()

    auto_calibrated_device = rs.auto_calibrated_device(device)

    if not auto_calibrated_device:
        print("The connected device does not support auto calibration")
        return

    config.enable_stream(rs.stream.depth, 256, 144, rs.format.z16, 90)
    conf = pipeline.start(config)
    calib_dev = rs.auto_calibrated_device(conf.get_device())

    def on_chip_calib_cb(progress):
        print(". ")

    while True:
        try:
            input = raw_input(
                "Please select what the operation you want to do\nc - on chip calibration\nt - tare calibration\ng - get the active calibration\nw - write new calibration\ne - exit\n"
            )

            if input == 'c':
                print("Starting on chip calibration")
                new_calib, health = calib_dev.run_on_chip_calibration(
                    file_cnt, on_chip_calib_cb, 5000)
                print("Calibration completed")
                print("health factor = ", health)

            if input == 't':
                print("Starting tare calibration")
                ground_truth = float(
                    raw_input("Please enter ground truth in mm\n"))
                new_calib, health = calib_dev.run_tare_calibration(
                    ground_truth, file_cnt, on_chip_calib_cb, 5000)
                print("Calibration completed")
                print("health factor = ", health)

            if input == 'g':
                calib = calib_dev.get_calibration_table()
                print("Calibration", calib)

            if input == 'w':
                print("Writing the new calibration")
                calib_dev.set_calibration_table(new_calib)
                calib_dev.write_calibration()

            if input == 'e':
                pipeline.stop()
                return

            print("Done\n")
        except Exception as e:
            print(e)
        except:
            print("A different Error")
예제 #24
0
def main(args):

    if args.t:  # read tracking datas
        Pubs = JointPub()
        tracking_datas = []
        with open("/home/pku-hr6/yyf_ws/data/arm_running.txt", 'r') as rf:
            lines = rf.readlines()
            for line in lines:
                datas = line.split(" ")
                datas = [float(data) for data in datas]
                print(datas)
                tracking_datas.append(datas)
    else:
        rospy.init_node('data_saver')

    data_nums = args.n
    file_names = args.file
    datas = []
    save_files = os.path.join("./data", file_names + '.txt')
    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    pc = rs.pointcloud()

    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()

    device_product_line = str(device.get_info(rs.camera_info.product_line))
    # device_product_line is SR300

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start streaming
    pipeline.start(config)

    # get scale of depth sensor
    depth_sensor = pipeline_profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    # clipping_distance_in_meters meters away
    clipping_distance_in_meters = 0.5  # 0.5 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # creat align map
    align_to = rs.stream.color
    align = rs.align(align_to)

    l_h = 170
    l_s = 42
    l_v = 41
    u_h = 185
    u_s = 255
    u_v = 255
    l_b = np.array([l_h, l_s, l_v])
    u_b = np.array([u_h, u_s, u_v])
    count = 0
    with open(save_files, "w") as wf:
        # while count < data_nums:
        while len(datas) < data_nums:
            # while True:
            # Wait for a coherent pair of frames: depth and color
            if args.t:
                if count >= len(tracking_datas) - 1:
                    break

            frames = pipeline.wait_for_frames()
            aligned_frame = align.process(frames)

            depth_frame = aligned_frame.get_depth_frame()
            color_frame = aligned_frame.get_color_frame()
            if not depth_frame or not color_frame:
                continue
            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())

            # remove background
            grey_color = 153
            depth_image_3d = np.dstack(
                (depth_image, depth_image,
                 depth_image))  # depth img is 1 channel, color is 3 channels
            bg_rmvd = np.where(
                (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
                grey_color, color_image)
            # get final img
            depth_colormap = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

            hsv_map = cv2.cvtColor(bg_rmvd, cv2.COLOR_BGR2HSV)

            mask = cv2.inRange(hsv_map, l_b, u_b)
            res = cv2.bitwise_and(bg_rmvd, bg_rmvd, mask=mask)

            img = np.hstack((bg_rmvd, depth_colormap))

            # get object from mask map and calculate position
            mask_index = np.nonzero(mask)

            valid_p = False
            depth_point = np.array([])
            if not mask_index[0].shape[0] == 0:
                valid_p = True
                x_index = int(np.median(mask_index[1]))
                y_index = int(np.median(mask_index[0]))
                x_min = x_index - 20
                x_max = x_index + 20
                y_min = y_index - 20
                y_max = y_index + 20
                # Intrinsics & Extrinsics
                depth_intrin = depth_frame.profile.as_video_stream_profile(
                ).intrinsics
                # print(depth_intrin)
                #  640x480  p[314.696 243.657]  f[615.932 615.932]
                depth_pixel = [x_index, y_index]
                dist2obj = depth_frame.get_distance(x_index, y_index)
                depth_point = rs.rs2_deproject_pixel_to_point(
                    depth_intrin, depth_pixel, dist2obj)
                depth_point = [float(dep) * 100
                               for dep in depth_point]  # using cm
                depth_point = [
                    depth_point[2], -depth_point[0], -depth_point[1]
                ]  #[x,y,z](in base) = [z,-x,-y](in camera)
                depth_point = [
                    depth_point[0] + 1.8, depth_point[1], depth_point[2] + 5.3
                ]
                txt = "({:.2f},{:.2f},{:.2f})".format(depth_point[0],
                                                      depth_point[1],
                                                      depth_point[2])
                # print(txt)
                cv2.rectangle(res, (x_min, y_min), (x_max, y_max), (255, 0, 0),
                              2)
                cv2.putText(res, txt, (x_index, y_index),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1)
            cv2.imshow('Align Example', img)
            cv2.imshow("mask", mask)
            cv2.imshow("res", res)
            # print(depth_point)

            # msg = rospy.wait_for_message('/motor_states/pan_tilt_port',MotorStateList,timeout=10)
            # # print(len(msg.motor_states))
            # joints,valid_q = get_joint_angle(msg)
            # if valid_p and valid_q:
            #     depth_point = np.array(depth_point)
            #     # print(joints)
            #     # print(depth_point)
            #     if args.only_q:
            #         data = joints
            #     else:
            #         data = np.append(depth_point, joints)
            #     print(data)
            #     datas.append(data)
            # if args.t: # pubulish joint
            #     data = tracking_datas[count]
            #     Pubs.publish_jointsD(data)
            #     count = count + 1
            if cv2.waitKey(100) & 0xFF == ord('q'):  # quit
                break
        for data in datas:
            for i in range(len(data)):
                wf.write(str(round(data[i], 2)))
                if i != len(data) - 1:
                    wf.write(" ")
                else:
                    wf.write("\n")
예제 #25
0
def print_hi(name):
    # Configure depth and color streams
    pipeline = rs.pipeline()
    colorizer = rs.colorizer()
    threshold_distance = 0.4
    tr1 = rs.threshold_filter(min_dist=0.15, max_dist=threshold_distance)
    config = rs.config()
    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))

    depth_sensor = pipeline_profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)

    if device_product_line == 'L500':
        config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
    else:
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    # Start streaming
    clipping_distance_in_meters = 0.9
    clipping_distance = clipping_distance_in_meters / depth_scale
    align_to = rs.stream.color
    align = rs.align(align_to)
    pipeline.start(config)
    #thresholds for detect skins
    lower = np.array([0, 48, 80], dtype="uint8")
    upper = np.array([20, 255, 255], dtype="uint8")

    try:
        while True:
            frames = pipeline.wait_for_frames()
            # Align the depth frame to color frame
            aligned_frames = align.process(frames)
            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame(
            )  # aligned_depth_frame is a 640x480 depth image
            color_frame = aligned_frames.get_color_frame()
            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue
            depth_image = np.asanyarray(aligned_depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())
            # Remove background - Set pixels further than clipping_distance to grey
            grey_color = 153
            depth_image_3d = np.dstack(
                (depth_image, depth_image,
                 depth_image))  # depth image is 1 channel, color is 3 channels
            bg_removed = np.where(
                (depth_image_3d > clipping_distance) | (depth_image_3d <= 0),
                grey_color, color_image)

            # Render images
            depth_colormap = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
            images = np.hstack((bg_removed, depth_colormap))
            # cv2.namedWindow('Background Removed', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Color+depth', images)

            # images[270:, :] = [0, 0, 0]
            # images[0:240,:] = [0,0,0]
            # images[:, 0:50] = [0, 0, 0]
            # images[:, 630:] = [0, 0, 0]

            img_hsv = cv2.cvtColor(bg_removed, cv2.COLOR_BGR2HSV)
            ## Gen lower mask (0-5) and upper mask (175-180) of RED
            lower_red = np.array([94, 80, 2], dtype="uint8")
            upper_red = np.array([126, 255, 255], dtype="uint8")
            mask = cv2.inRange(img_hsv, lower_red, upper_red)
            bluepen = cv2.bitwise_and(bg_removed, bg_removed, mask=mask)

            cv2.imshow('BluePen', bluepen)
            bgModel = cv2.createBackgroundSubtractorMOG2(0, 50)
            fgmask = bgModel.apply(bluepen)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            img = cv2.bitwise_and(bluepen, bluepen, mask=fgmask)

            # Skin detect and thresholding
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            lower = np.array([94, 80, 2], dtype="uint8")
            upper = np.array([126, 255, 255], dtype="uint8")
            skinMask = cv2.inRange(hsv, lower, upper)
            kernel = np.ones((3, 3), np.uint8)
            skinMask = cv2.erode(skinMask, kernel, iterations=2)
            skinMask = cv2.dilate(skinMask, kernel, iterations=1)

            cv2.imshow('Threshold Hands', skinMask)
            contours, hierarchy = cv2.findContours(skinMask, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            length = len(contours)
            maxArea = -1
            drawing = np.zeros(img.shape, np.uint8)
            if length > 0:
                for i in xrange(length):
                    temp = contours[i]
                    area = cv2.contourArea(temp)
                    if area > maxArea:
                        maxArea = area
                        ci = i
                        res = contours[ci]
                hull = cv2.convexHull(res)
                # drawing = np.zeros(img.shape, np.uint8)
                cx, cy = centroid(res)

                print(aligned_depth_frame.get_distance(cx, cy))
                cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)
            else:
                drawing = np.zeros(img.shape, np.uint8)
            cv2.imshow('DRAWING', drawing)

            # bluepen_gray = cv2.cvtColor(bluepen, cv2.COLOR_BGR2HSV)
            # contours, hierarchy = cv2.findContours(bluepen_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            # length = len(contours)
            # drawing = np.zeros(bluepen_gray.shape, np.uint8)
            # maxArea = -1
            # if length > 0:
            #     for i in xrange(length):
            #         temp = contours[i]
            #         area = cv2.contourArea(temp)
            #         if area > maxArea:
            #             maxArea = area
            #             ci = i
            #             res1 = contours[ci]
            #
            #     hull = cv2.convexHull(res1)
            #
            #     cv2.drawContours(drawing, [res1], 0, (0, 255, 0), 2)
            #     cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
            # cv2.imshow('',drawing)
            # length = len(contours)
            # print(length)
            # drawing = np.zeros(res.shape, np.uint8)
            # maxArea = -1
            # if length > 0:
            #     for i in xrange(length):
            #         temp = contours[i]
            #         area = cv2.contourArea(temp)
            #         if area > maxArea:
            #             maxArea = area
            #             ci = i
            #             res = contours[ci]
            #
            #     hull = cv2.convexHull(res)
            #
            #
            #     cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)
            #     cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
            # cv2.imshow('Only contour for calibration', drawing)

            # cv2.imshow('BluePen', res)

            # converted = cv2.cvtColor(images, cv2.COLOR_BGR2HSV)
            # skinMask = cv2.inRange(converted, lower, upper)
            # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
            # skinMask = cv2.erode(skinMask, kernel, iterations=2)
            # skinMask = cv2.dilate(skinMask, kernel, iterations=1)
            #
            # skinMask = cv2.GaussianBlur(skinMask, (5, 5), 0)
            # skin = cv2.bitwise_and(images, images, mask=skinMask)

            cv2.waitKey(1)

            # img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            # ## Gen lower mask (0-5) and upper mask (175-180) of RED
            # mask1 = cv2.inRange(img_hsv, (0, 50, 20), (5, 255, 255))
            # mask2 = cv2.inRange(img_hsv, (175, 50, 20), (180, 255, 255))
            #
            # ## Merge the mask and crop the red regions
            # mask = cv2.bitwise_or(mask1, mask2)
            # croped = cv2.bitwise_and(img, img, mask=mask)

    finally:

        # Stop streaming
        pipeline.stop()

    # threshold_filter = rs.threshold_filter()
    # threshold_filter.set_option(rs.option.max_distance, 1)
    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
                                         'haarcascade_frontalface_default.xml')
예제 #26
0
    def marker_pose(self):
        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = self.config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))

        self.config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8,
                                  30)
        self.pipeline.start(self.config)

        # self.cap.set(3, 1280)
        # self.cap.set(4, 720)
        # set dictionary size depending on the aruco marker selected
        self.param.adaptiveThreshConstant = 7
        # setup matrix from imu to cam
        self.imu_cam[0][1] = -1.0
        self.imu_cam[0][3] = 0.06
        self.imu_cam[1][0] = -1.0
        self.imu_cam[1][3] = 0.04
        self.imu_cam[2][2] = -1.0
        self.imu_cam[2][3] = -0.08
        self.imu_cam[3][3] = 1.0

        # create vector tvec1, tvec2
        tvec1 = np.zeros((4, 1), dtype=np.float)
        tvec1[3][0] = 1.0
        tvec2 = np.zeros((4, 1), dtype=np.float)
        # tvec2[3][0] = 1.0

        # define the ids of marker
        # a = 0 # the index of first marker need to detect
        self.ids_target[0] = 11
        self.ids_target[1] = 15

        # markerLength=0.4

        while not rospy.is_shutdown():
            frames = self.pipeline.wait_for_frames()
            # ret, frame = self.cap.read()
            color_frame = frames.get_color_frame()
            if not color_frame:
                continue
            frame = np.asanyarray(color_frame.get_data())
            # operations on the frame
            gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

            # lists of ids and the corners belonging to each id
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, self.dict, parameters=self.param)

            if np.all(ids is not None):
                for i in range(0, ids.size):
                    if ids[i][0] == self.ids_target[0]:
                        self.corners = corners[i]
                        markerLength = 0.4

                        ret1 = aruco.estimatePoseSingleMarkers(
                            corners=self.corners,
                            markerLength=markerLength,
                            cameraMatrix=self.mtx,
                            distCoeffs=self.dist)
                        rvec, tvec = ret1[0][0, 0, :], ret1[1][0, 0, :]

                        (rvec - tvec).any(
                        )  # get rid of that nasty numpy value array error

                        tvec1[0][0] = tvec[0]
                        tvec1[1][0] = tvec[1]
                        tvec1[2][0] = tvec[2]

                        ## marker in the body (UAV frane)
                        fly_pos = PS()
                        tvec2 = np.matmul(self.imu_cam, tvec1)
                        fly_pos.pose.position.x = -tvec2[0][0]
                        fly_pos.pose.position.y = -tvec2[1][0]
                        fly_pos.pose.position.z = -tvec2[2][0]
                        # publish marker in body frame
                        self.fly_pos_pub.publish(fly_pos)

                        # if (self.local_pos[3] > -0.1 and self.local_pos[3] < 0.1):
                        marker_pos = PS()
                        tvec2 = np.matmul(self.imu_cam, tvec1)
                        marker_pos.pose.position.x = tvec2[0][0]
                        marker_pos.pose.position.y = tvec2[1][0]
                        marker_pos.pose.position.z = tvec2[2][0]
                        self.aruco_marker_pos_pub.publish(marker_pos)

                        # -- Draw the detected marker and put a reference frame over it
                        # aruco.drawDetectedMarkers(frame, corners, ids)
                        aruco.drawAxis(frame, self.mtx, self.dist, rvec, tvec,
                                       0.1)
                        str_position0 = "Marker Position in Camera frame: x=%f  y=%f  z=%f" % (
                            tvec2[0][0], tvec2[1][0], tvec2[2][0])
                        cv2.putText(frame, str_position0, (0, 50), self.font,
                                    0.7, (0, 255, 0), 1, cv2.LINE_AA)
                        self.rate.sleep()

            cv2.imshow("frame", frame)
            cv2.waitKey(1)
예제 #27
0
                    type=int,
                    default=720,
                    help='Image height expected by OpenCVB')
parser.add_argument('--pipeName',
                    default='',
                    help='The name of the input pipe for image data.')
args = parser.parse_args()

pipeName = '//./pipe/' + args.pipeName
pipeOut = open(pipeName, 'wb')
pipeIn = open(pipeName + 'in', 'rb')

rsPipeline = rs.pipeline()
rsConfig = rs.config()

rsPipeline_wrapper = rs.pipeline_wrapper(rsPipeline)
rsPipeline_profile = rsConfig.resolve(rsPipeline_wrapper)
device = rsPipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))

rsConfig.enable_stream(rs.stream.depth, args.Width, args.Height, rs.format.z16,
                       30)
rsConfig.enable_stream(rs.stream.color, args.Width, args.Height,
                       rs.format.bgr8, 30)
rsConfig.enable_stream(rs.stream.infrared, 1, args.Width, args.Height,
                       rs.format.y8, 30)
rsConfig.enable_stream(rs.stream.infrared, 2, args.Width, args.Height,
                       rs.format.y8, 30)
#rsConfig.enable_stream(rs.stream.gyro)
#rsConfig.enable_stream(rs.stream.accel)
예제 #28
0
import pyrealsense2 as rs
import numpy as np

rs.pipeline()
    rs.pipeline.try_wait_for_frames()
    rs.pipeline.wait_for_frames()
rs.pipeline_profile()
    rs.pipeline_profile.get_device()
    rs.pipeline_profile.get_stream()
    rs.pipeline_profile.get_streams()
rs.pipeline_wrapper()
rs.playback()
rs.playback_status()
rs.points()
rs.pose()
rs.pose_frame()
rs.pose_stream_profile()

rs.pipeline
rs.pipeline.get_active_profile()
rs.pipeline.poll_for_frames()
rs.pipeline.start()
rs.pipeline.stop()
rs.pipeline.wait_for_frames()
rs.pipeline.try_wait_for_frames()
pipe = rs.pipeline()

config = rs.config()
config.enable_stream
config.enable_all_streams
config.enable_device
예제 #29
0
def main():

    xcent = 0
    ycent = 0

    xstart = 0
    xend = 0

    ystart = 0

    yend = 0

    StanSmith = 0

    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()

    # Get device product line for setting a supporting resolution
    pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    pipeline_profile = config.resolve(pipeline_wrapper)
    device = pipeline_profile.get_device()
    device_product_line = str(device.get_info(rs.camera_info.product_line))

    config.enable_stream(rs.stream.color, args.cam_width, args.cam_height,
                         rs.format.bgr8, 30)

    # Start streaming
    pipeline.start(config)

    with tf.Session() as sess:
        model_cfg, model_outputs = posenet.load_model(args.model, sess)
        output_stride = model_cfg['output_stride']

        # ROS exstra
        pub = rospy.Publisher('body_pose', Transform, queue_size=10)
        rospy.init_node('talker', anonymous=True)
        rate = rospy.Rate(100)  # 10hz

        count_stand = 0
        count_sit = 0
        count_unknown = 0

        pose = "unknown"
        color1 = (255, 255, 255)

        queSize = 1
        q1 = []
        for i in range(queSize):
            q1.append(0)

        start = time.time()
        frame_count = 0
        while not rospy.is_shutdown():

            frames = pipeline.wait_for_frames()
            color_frame = frames.get_color_frame()

            # Convert images to numpy arrays
            color_image = np.asanyarray(color_frame.get_data())

            input_image, display_image, output_scale = posenet.read_cap(
                color_image,
                scale_factor=args.scale_factor,
                output_stride=output_stride)

            heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
                model_outputs, feed_dict={'image:0': input_image})

            pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
                heatmaps_result.squeeze(axis=0),
                offsets_result.squeeze(axis=0),
                displacement_fwd_result.squeeze(axis=0),
                displacement_bwd_result.squeeze(axis=0),
                output_stride=output_stride,
                max_pose_detections=10,
                min_pose_score=amin_pose_score)

            keypoint_coords *= output_scale

            if not args.notxt:
                print()
                print("Results for image:")
                for pi in range(len(pose_scores)):
                    if pose_scores[pi] == 0.:
                        break
                    print('Pose #%d, score = %f' % (pi, pose_scores[pi]))

                    poses = [keypoint_scores[pi, :], keypoint_coords[pi, :, :]]
                    left_index = [5, 11, 13]
                    right_index = [6, 12, 14]
                    left_side = []
                    right_side = []

                    for i in left_index:
                        left_side.append([
                            poses[0][i],
                            round(poses[1][i][0]),
                            round(poses[1][i][1])
                        ])

                    for i in right_index:
                        right_side.append([
                            poses[0][i],
                            round(poses[1][i][0]),
                            round(poses[1][i][1])
                        ])

                    if check_side(right_side):
                        if stand_sit(right_side) == 1:
                            q1.pop(0)
                            q1.append(1)
                        else:
                            q1.pop(0)
                            q1.append(-1)
                    elif check_side(left_side):
                        if stand_sit(left_side) == 1:
                            q1.pop(0)
                            q1.append(1)
                        else:
                            q1.pop(0)
                            q1.append(-1)
                    else:
                        q1.pop(0)
                        q1.append(0)
                    print(q1)
                    mean_pose = statistics.mean(q1)

                    if mean_pose > 0:
                        pose = "stand"
                        color1 = (255, 0, 0)
                        count_stand = count_stand + 1
                        StanSmith = 1
                    elif mean_pose < 0:
                        pose = "sit"
                        color1 = (0, 255, 0)
                        count_sit = count_sit + 1
                        StanSmith = 2
                    else:
                        pose = "unknown"
                        color1 = (0, 0, 255)
                        count_unknown = count_unknown + 1
                        StanSmith = 0

                    print("meanpose: %f" % mean_pose)
                    print(pose)

                    lSs = poses[0][5]  # lShoulder score
                    rSs = poses[0][6]  # rShoulder score
                    lHs = poses[0][11]  # lHip score
                    rHs = poses[0][12]  # rHip score
                    lSc = poses[1][5]  # lShoulder coordinates
                    rSc = poses[1][6]  # rShoulder coordinates
                    lHc = poses[1][11]  # lHip coordinates
                    rHc = poses[1][12]  # rHip coordinates

                    if lSs > amin_part_score:  # Check if left shoulder has been spotted
                        lS = 1
                    else:
                        lS = 0

                    if rSs > amin_part_score:  # Check if right shoulder has been spotted
                        rS = 1
                    else:
                        rS = 0

                    if lHs > amin_part_score:  # Check if left hip has been spotted
                        lH = 1
                    else:
                        lH = 0

                    if rHs > amin_part_score:  # Check if right hip has been spotted
                        rH = 1
                    else:
                        rH = 0

                    if lS != 0 or rS != 0:  # At least one shoulder visible
                        if lS == 1 and rS == 1:  # Both shoulders visible
                            if lH == 1 and rH == 1:  # Both shoulders, both hips
                                print("Both shoulders, both hips")
                                ycent = (lSc[0] + rSc[0] + lHc[0] + rHc[0]) / 4
                                xcent = (lSc[1] + rSc[1] + lHc[1] + rHc[1]) / 4

                                # Box creator
                                buffer = min(abs(lSc[1] - rSc[1]),
                                             abs(lHc[1] - rHc[1])) / 2
                                buffer *= 0.80
                                xstart = xcent - buffer
                                xend = xcent + buffer
                                ystart = ycent - buffer
                                yend = ycent + buffer

                            elif lH == 0 and rH == 0:
                                print("Only shoulders")
                                # Do coordinates from both shoulders only
                                ycent = (lSc[0] + rSc[0]) / 2
                                xcent = (lSc[1] + rSc[1]) / 2

                                buffer = abs(lSc[1] - rSc[1]) / 2
                                buffer *= 0.40
                                xstart = xcent - buffer
                                xend = xcent + buffer
                                ystart = ycent - buffer
                                yend = ycent + buffer

                            elif lH == 1:
                                print("Both shoulders, left hip")
                                # Do triangle, left hip both shoulders
                                ycent = (lSc[0] + rSc[0] + lHc[0]) / 3
                                xcent = (lSc[1] + rSc[1] + lHc[1]) / 3

                                buffer = abs(lSc[1] - rSc[1]) / 2
                                buffer *= 0.80
                                xstart = xcent - buffer
                                xend = xcent + buffer
                                ystart = ycent - buffer
                                yend = ycent + buffer

                            else:
                                print("Both shoulders, right hip")
                                # Do triangle right hip both shoulders
                                ycent = (lSc[0] + rSc[0] + rHc[0]) / 3
                                xcent = (lSc[1] + rSc[1] + rHc[1]) / 3

                                buffer = abs(lSc[1] - rSc[1]) / 2
                                buffer *= 0.80
                                xstart = xcent - buffer
                                xend = xcent + buffer
                                ystart = ycent - buffer
                                yend = ycent + buffer

                        else:  # Only one shoulder visible
                            if lS == 1:  # left Shoulder
                                if lH == 1 and rH == 1:
                                    print("Left shoulder, both hips")
                                    # Do lower triangle, left shoulder
                                    ycent = (lSc[0] + lHc[0] + rHc[0]) / 3
                                    xcent = (lSc[1] + lHc[1] + rHc[1]) / 3

                                    buffer = abs(lHc[1] - rHc[1]) / 2
                                    buffer *= 0.75
                                    xstart = xcent - buffer
                                    xend = xcent + buffer
                                    ystart = ycent - buffer
                                    yend = ycent + buffer

                                elif rH == 1:
                                    print("Left shoulder, right hip")
                                    #  Do center between left shoulder and right hip
                                    ycent = (lSc[0] + rHc[0]) / 2
                                    xcent = (lSc[1] + rHc[1]) / 2

                                    buffer = abs(lSc[1] - rHc[1]) / 2
                                    buffer *= 0.80
                                    xstart = xcent - buffer
                                    xend = xcent + buffer
                                    ystart = ycent - buffer
                                    yend = ycent + buffer

                            else:  # right shoulder
                                if lH == 1 and rH == 1:
                                    print("Right shoulder, both hips")
                                    # Do lower triangle, right shoulder
                                    ycent = (rSc[0] + lHc[0] + rHc[0]) / 3
                                    xcent = (rSc[1] + lHc[1] + rHc[1]) / 3

                                    buffer = abs(lHc[1] - rHc[1]) / 2
                                    buffer *= 0.75
                                    xstart = xcent - buffer
                                    xend = xcent + buffer
                                    ystart = ycent - buffer
                                    yend = ycent + buffer

                                elif lH == 1:
                                    print("Right shoulder, left hip")
                                    #  Do center between right shoulder and left hip
                                    ycent = (rSc[0] + lHc[0]) / 2
                                    xcent = (rSc[1] + lHc[1]) / 2

                                    buffer = abs(lHc[1] - rSc[1]) / 2
                                    buffer *= 0.80
                                    xstart = xcent - buffer
                                    xend = xcent + buffer
                                    ystart = ycent - buffer
                                    yend = ycent + buffer
                    else:
                        print('no torso spotted')
                        xcent = 0
                        ycent = 0

                    cv2.circle(display_image, (round(xcent), round(ycent)), 5,
                               color1, 2)  # Draw a circle
                    cv2.rectangle(display_image, (round(xend), round(yend)),
                                  (round(xstart), round(ystart)), color1, 2)

                    # ROS HERE PLS
                    rosmsg = geometry_msgs.msg.Transform()

                    rosmsg.translation.x = pi
                    rosmsg.translation.y = StanSmith
                    rosmsg.translation.z = 0

                    rosmsg.rotation.x = round(xstart)
                    rosmsg.rotation.y = round(ystart)
                    rosmsg.rotation.z = round(xend)
                    rosmsg.rotation.w = round(yend)

                    print(pi)
                    rospy.loginfo(rosmsg)
                    pub.publish(rosmsg)
                    rate.sleep()

            # TODO this isn't particularly fast, use GL for drawing and display someday...
            overlay_image = posenet.draw_skel_and_kp(
                display_image,
                pose_scores,
                keypoint_scores,
                keypoint_coords,
                min_pose_score=amin_pose_score,
                min_part_score=amin_part_score)

            #cv2.putText(overlay_image, "Unknown: %f" % count_unknown, (50,50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
            #cv2.putText(overlay_image, "Sitting: %f" % count_sit, (50,100), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
            #cv2.putText(overlay_image, "Standing: %f" % count_stand, (50,150), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
            cv2.imshow('posenet', overlay_image)
            frame_count += 1
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            print("Unknown: %f" % count_unknown)
            print("Sitting: %f" % count_sit)
            print("Standing: %f" % count_stand)

        print('Average FPS: ', frame_count / (time.time() - start))
예제 #30
0
    def marker_pose(self):
        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = self.config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))

        self.config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8,
                                  15)
        self.pipeline.start(self.config)

        # self.cap.set(3, 1280)
        # self.cap.set(4, 720)
        # set dictionary size depending on the aruco marker selected
        self.param.adaptiveThreshConstant = 7
        # setup matrix from imu to cam
        self.imu_cam[0][1] = -1.0
        self.imu_cam[0][3] = 0.06
        self.imu_cam[1][0] = -1.0
        self.imu_cam[1][3] = 0.04
        self.imu_cam[2][2] = -1.0
        self.imu_cam[2][3] = -0.08
        self.imu_cam[3][3] = 1.0

        # create vector tvec1, tvec2
        tvec1 = np.zeros((4, 1), dtype=np.float)
        tvec1[3][0] = 1.0
        tvec2 = np.zeros((4, 1), dtype=np.float)
        # tvec2[3][0] = 1.0

        # define the ids of marker
        # a = 0 # the index of first marker need to detect
        self.ids_target[0] = 11
        self.ids_target[1] = 15

        # markerLength=0.4

        while not rospy.is_shutdown():
            frames = self.pipeline.wait_for_frames()
            # ret, frame = self.cap.read()
            color_frame = frames.get_color_frame()
            if not color_frame:
                continue
            frame = np.asanyarray(color_frame.get_data())
            # operations on the frame
            gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

            # lists of ids and the corners belonging to each id
            corners, ids, rejectedImgPoints = aruco.detectMarkers(
                gray, self.dict, parameters=self.param)

            if np.all(ids is not None):
                ids_marker = True
                self.check_marker_detection.publish(ids_marker)
                for i in range(0, ids.size):
                    if self.altitude > 3.0:
                        if ids[i][0] == self.ids_target[0]:
                            self.corners = corners[i]

                            markerLength = 0.4

                            ret1 = aruco.estimatePoseSingleMarkers(
                                corners=self.corners,
                                markerLength=markerLength,
                                cameraMatrix=self.mtx,
                                distCoeffs=self.dist)
                            rvec, tvec = ret1[0][0, 0, :], ret1[1][0, 0, :]
                            # -- Draw the detected marker and put a reference frame over it
                            aruco.drawDetectedMarkers(frame, corners, ids)
                            aruco.drawAxis(frame, self.mtx, self.dist, rvec,
                                           tvec, 0.1)
                            str_position0 = "Marker Position in Camera frame: x=%f  y=%f  z=%f" % (
                                tvec[0], tvec[1], tvec[2])
                            cv2.putText(frame, str_position0, (0, 50),
                                        self.font, 0.7, (0, 255, 0), 1,
                                        cv2.LINE_AA)

                            (rvec - tvec).any(
                            )  # get rid of that nasty numpy value array error

                            tvec1[0][0] = tvec[0]
                            tvec1[1][0] = tvec[1]
                            tvec1[2][0] = tvec[2]

                            self.altitude = tvec[2]

                            alpha = 5.0
                            self.beta[0] = abs(
                                np.rad2deg(np.arctan(tvec[0] / tvec[2])))
                            self.beta[1] = abs(
                                np.rad2deg(np.arctan(tvec[1] / tvec[2])))

                            # decide move or decend
                            check_move = self.check_angle(alpha)
                            self.check_move_position.publish(check_move)

                            # marker in the body (UAV frane)
                            marker_pos = PS()
                            tvec2 = np.matmul(self.imu_cam, tvec1)
                            marker_pos.pose.position.x = tvec2[0][0]
                            marker_pos.pose.position.y = tvec2[1][0]
                            marker_pos.pose.position.z = tvec2[2][0]
                            # publish marker in body frame
                            self.aruco_marker_pos_pub.publish(marker_pos)

                            # if (self.local_pos[3] > -0.1 and self.local_pos[3] < 0.1):
                            ## marker in the global frame
                            fly_pos = PS()
                            fly_pos.pose.position.x = tvec2[0][
                                0] + self.local_pos[0]
                            fly_pos.pose.position.y = tvec2[1][
                                0] + self.local_pos[1]
                            fly_pos.pose.position.z = tvec2[2][
                                0] + self.local_pos[2]
                            # publish marker in body frame
                            self.fly_pos_pub.publish(fly_pos)
                            self.rate.sleep()
                            # publish target position in world frame
                            # target_pos = PS()
                            # target_pos.pose.position.x = self.local_pos[0] + tvec2[0][0]
                            # target_pos.pose.position.y = self.local_pos[1] + tvec2[1][0]
                            # self.target_position.publish(target_pos)
                            # if (self.altitude < 4):
                            #     a = 1
                            #     markerLength = 0.08
                            #     break
                            # check_err = np.linalg.norm([tvec2[0][0], tvec2[1][0]])
                            # self.check_error_pos.publish(check_err)
                            # self.rate.sleep()

                            #str_position0 = "Marker Position in Camera frame: x=%f  y=%f " % (tvec2[0][0], tvec2[1][0])
                            # cv2.putText(frame, str_position0, (0, 50), self.font, 1, (0, 255, 0), 2, cv2.LINE_AA)
                            # else:
                            #     # # change form
                            #     rotMat = tr.eulerAnglesToRotationMatrix([0, 0, self.local_pos[3]])
                            #     rotMat = np.matmul(rotMat, self.imu_cam)
                            #     # rotMat = rotMat[0:3, 0:3]
                            #     tvec2 = np.matmul(rotMat, tvec1)
                            #     # publish marker position in uav frame
                            #     marker_pos = PS()
                            #     marker_pos.pose.position.x = tvec2[0][0]
                            #     marker_pos.pose.position.y = tvec2[1][0]
                            #     marker_pos.pose.position.z = tvec2[2][0]
                            #     self.aruco_marker_pos_pub.publish(marker_pos)

                            #     # publish target position in world frame
                            #     target_pos = PS()
                            #     target_pos.pose.position.x = self.local_pos[0] + tvec2[0][0]
                            #     target_pos.pose.position.y = self.local_pos[1] + tvec2[1][0]
                            #     self.target_position.publish(target_pos)

                            #     #str_position0 = "Marker Position in Camera frame: x=%f  y=%f " % (tvec2[0][0], tvec2[1][0])
                            #     #cv2.putText(frame, str_position0, (0, 50), self.font, 1, (0, 255, 0), 2, cv2.LINE_AA)
                            #     check_err = np.linalg.norm([tvec2[0][0], tvec2[1][0]])
                            #     self.check_error_pos.publish(check_err)
                            #     self.rate.sleep()
                    else:
                        if ids[i][0] == self.ids_target[1]:
                            # get corner at index i responsible id at index 1
                            self.corners = corners[i]

                            markerLength = 0.08

                            ret1 = aruco.estimatePoseSingleMarkers(
                                corners=self.corners,
                                markerLength=markerLength,
                                cameraMatrix=self.mtx,
                                distCoeffs=self.dist)
                            rvec, tvec = ret1[0][0, 0, :], ret1[1][0, 0, :]
                            # -- Draw the detected marker and put a reference frame over it
                            aruco.drawDetectedMarkers(frame, corners, ids)
                            aruco.drawAxis(frame, self.mtx, self.dist, rvec,
                                           tvec, 0.1)
                            str_position0 = "Marker Position in Camera frame: x=%f  y=%f  z=%f" % (
                                tvec[0], tvec[1], tvec[2])
                            cv2.putText(frame, str_position0, (0, 50),
                                        self.font, 0.7, (0, 255, 0), 1,
                                        cv2.LINE_AA)

                            (rvec - tvec).any(
                            )  # get rid of that nasty numpy value array error

                            tvec1[0][0] = tvec[0]
                            tvec1[1][0] = tvec[1]
                            tvec1[2][0] = tvec[2]

                            self.altitude = tvec[2]

                            alpha = 3.0

                            self.beta[0] = abs(
                                np.rad2deg(np.arctan(tvec[0] / tvec[2])))
                            self.beta[1] = abs(
                                np.rad2deg(np.arctan(tvec[1] / tvec[2])))

                            # decide move or decend
                            check_move = self.check_angle(alpha)
                            self.check_move_position.publish(check_move)

                            # marker in the body (UAV frane)
                            marker_pos = PS()
                            tvec2 = np.matmul(self.imu_cam, tvec1)
                            marker_pos.pose.position.x = tvec2[0][0]
                            marker_pos.pose.position.y = tvec2[1][0]
                            marker_pos.pose.position.z = tvec2[2][0]
                            # publish marker in body frame
                            self.aruco_marker_pos_pub.publish(marker_pos)

                            # if (self.local_pos[3] > -0.1 and self.local_pos[3] < 0.1):
                            ## marker in the global frame
                            fly_pos = PS()
                            fly_pos.pose.position.x = tvec2[0][
                                0] + self.local_pos[0]
                            fly_pos.pose.position.y = tvec2[1][
                                0] + self.local_pos[1]
                            fly_pos.pose.position.z = tvec2[2][
                                0] + self.local_pos[2]
                            # publish marker in body frame
                            self.fly_pos_pub.publish(fly_pos)
                            self.rate.sleep()
            else:
                ids_marker = False
                self.check_marker_detection.publish(ids_marker)

            cv2.imshow("frame", frame)
            cv2.waitKey(1)