Exemple #1
0
    def __init__(self):

        # Pull constants from config file
        self.lower = rospy.get_param('~lower_color_threshold', [0, 0, 80])
        self.upper = rospy.get_param(
            '~higher_color_threshold', [200, 200, 250])
        self.min_contour_area = rospy.get_param('~min_contour_area', .001)
        self.max_contour_area = rospy.get_param('~max_contour_area', 400)
        self.min_trans = rospy.get_param('~min_trans', .05)
        self.max_velocity = rospy.get_param('~max_velocity', 1)
        self.timeout = rospy.Duration(
            rospy.get_param('~timeout_seconds'), 250000)
        self.min_observations = rospy.get_param('~min_observations', 8)
        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/front/left/image_rect_color')

        # Instantiate remaining variables and objects
        self._observations = deque()
        self._pose_pairs = deque()
        self._times = deque()
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()
        '''
        These variables store the camera information required to perform
        the transformations on the coordinates to move from the subs
        perspective to our global map perspective. This information is
        also necessary to perform the least squares intersection which
        will find the 3D coordinates of the torpedo board based on 2D
        observations from the Camera. More info on this can be found in
        sub8_vision_tools.
        '''

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        # Ros Services so mission can be toggled and info requested
        rospy.Service('~enable', SetBool, self.toggle_search)
        self.multi_obs = MultiObservation(self.camera_model)
        rospy.Service('~pose', VisionRequest, self.request_board3d)
        self.image_pub = Image_Publisher("torp_vision/debug")
        self.point_pub = rospy.Publisher(
            "torp_vision/points", Point, queue_size=1)
        self.mask_image_pub = rospy.Publisher(
            'torp_vison/mask', Image, queue_size=1)

        # Debug
        self.debug = rospy.get_param('~debug', True)
Exemple #2
0
 def __init__(self):
     self.enabled = False
     # Maps ID to running class probabilities
     self.object_map = {}
     # Maps ID to mean volume, used to discriminate buoys / black totem
     self.volume_means = {}
     self.area_means = {}
     self.tf_buffer = tf2_ros.Buffer()
     self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
     self.get_params()
     self.last_panel_points_msg = None
     self.database_client = rospy.ServiceProxy('/database/requests',
                                               ObjectDBQuery)
     self.sub = Image_Subscriber(self.image_topic, self.img_cb)
     self.camera_info = self.sub.wait_for_camera_info()
     self.camera_model = PinholeCameraModel()
     self.camera_model.fromCameraInfo(self.camera_info)
     if self.debug:
         self.image_mux = ImageMux(size=(self.camera_info.height,
                                         self.camera_info.width),
                                   shape=(1, 2),
                                   labels=['Result', 'Mask'])
         self.debug_pub = Image_Publisher('~debug_image')
     self.last_objects = None
     self.last_update_time = rospy.Time.now()
     self.objects_sub = rospy.Subscriber('/pcodar/objects',
                                         PerceptionObjectArray,
                                         self.process_objects,
                                         queue_size=2)
     self.enabled_srv = rospy.Service('~set_enabled', SetBool,
                                      self.set_enable_srv)
     if self.is_training:
         self.enabled = True
    def __init__(self):
        self.tf_listener = tf.TransformListener()

        self.enabled = False
        self.last_image = None
        self.last_image_time = None
        self.camera_model = None
        self.circle_finder = CircleFinder(
            1.0
        )  # Model radius doesn't matter beacause it's not being used for 3D pose

        # Various constants for tuning, debugging. See buoy_finder.yaml for more info
        self.min_observations = rospy.get_param('~min_observations')
        self.debug_ros = rospy.get_param('~debug/ros', True)
        self.debug_cv = rospy.get_param('~debug/cv', False)
        self.min_contour_area = rospy.get_param('~min_contour_area')
        self.max_circle_error = rospy.get_param('~max_circle_error')
        self.max_velocity = rospy.get_param('~max_velocity')
        self.roi_y = rospy.get_param('~roi_y')
        self.roi_height = rospy.get_param('~roi_height')
        camera = rospy.get_param('~camera_topic',
                                 '/camera/front/right/image_rect_color')

        self.buoys = {}
        for color in ['red', 'yellow', 'green']:
            self.buoys[color] = Buoy(color, debug_cv=self.debug_cv)
        if self.debug_cv:
            cv2.waitKey(1)
            self.debug_images = {}

        # Keep latest odom message for sanity check
        self.last_odom = None
        self.odom_sub = rospy.Subscriber('/odom',
                                         Odometry,
                                         self.odom_cb,
                                         queue_size=3)

        self.image_sub = Image_Subscriber(camera, self.image_cb)
        if self.debug_ros:
            self.rviz = rviz.RvizVisualizer(topic='~markers')
            self.mask_pub = Image_Publisher('~mask_image')
            rospy.Timer(rospy.Duration(1), self.print_status)

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()
        self.multi_obs = MultiObservation(self.camera_model)

        rospy.Service('~enable', SetBool, self.toggle_search)
        rospy.Service('~2D', VisionRequest2D, self.request_buoy)
        rospy.Service('~pose', VisionRequest, self.request_buoy3d)

        rospy.loginfo("BUOY FINDER: initialized successfully")
Exemple #4
0
    def __init__(self):

        # Pull constants from config file
        self.override = False
        self.lower = [0, 0, 0]
        self.upper = [0, 0, 0]
        self.min_trans = 0
        self.max_velocity = 0
        self.timeout = 0
        self.min_observations = 0
        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/down/image_rect_color')
        self.goal = None
        self.last_config = None
        self.reconfigure_server = DynamicReconfigureServer(
            VampireIdentifierConfig, self.reconfigure)

        # Instantiate remaining variables and objects
        self._observations = deque()
        self._pose_pairs = deque()
        self._times = deque()
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        # Ros Services so mission can be toggled and info requested
        rospy.Service('~enable', SetBool, self.toggle_search)
        self.multi_obs = MultiObservation(self.camera_model)
        rospy.Service('~pose', VisionRequest, self.request_buoy)
        self.image_pub = Image_Publisher("drac_vision/debug")
        self.point_pub = rospy.Publisher("drac_vision/points",
                                         Point,
                                         queue_size=1)
        self.mask_image_pub = rospy.Publisher('drac_vision/mask',
                                              Image,
                                              queue_size=1)

        # Debug
        self.debug = rospy.get_param('~debug', True)
Exemple #5
0
 def __init__(self):
     self.enabled = False
     self.pattern_pub = rospy.Publisher("/scan_the_code", ScanTheCode, queue_size=3)
     self.tf_buffer = tf2_ros.Buffer()
     self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
     self.get_params()
     self.last_panel_points_msg = None
     self.sub = Image_Subscriber(self.image_topic, self.img_cb)
     info = self.sub.wait_for_camera_info()
     self.camera_model = PinholeCameraModel()
     self.camera_model.fromCameraInfo(info)
     if self.debug:
         self.image_mux = ImageMux(size=(info.height, info.width), shape=(1, 2),
                                   labels=['Result', 'Mask'])
         self.debug_pub = Image_Publisher('~debug_image')
     self.bbox_sub = rospy.Subscriber("stc_led_pts_marshall", PointCloud2, self.panel_points_cb)
     self.classification_list = deque()
     self.enabled = True
    def __init__(self):
        self.debug_gui = False
        self.last2d = None
        self.last3d = None
        self.enabled = False
        self.cam = None
        self.last_image = None
        self.last_found_time = None

        # Constants from launch config file
        self.debug_ros = rospy.get_param("debug_ros", True)
        self.canny_low = rospy.get_param("canny_low", 100)
        self.canny_ratio = rospy.get_param("canny_ratio", 3.0)
        self.thresh_hue_high = rospy.get_param("thresh_hue_high", 60)
        self.thresh_saturation_low = rospy.get_param("thresh_satuation_low",
                                                     100)
        self.min_contour_area = rospy.get_param("min_contour_area", 100)
        self.length_width_ratio_err = rospy.get_param("length_width_ratio_err",
                                                      0.2)
        self.approx_polygon_thresh = rospy.get_param("approx_polygon_thresh",
                                                     10)
        self.shape_match_thresh = rospy.get_param("shape_match_thresh", 0.4)
        camera = rospy.get_param("marker_camera",
                                 "/camera/down/left/image_rect_color")

        if self.debug_ros:
            self.debug_pub = Image_Publisher("debug_image")
            self.markerPub = rospy.Publisher('path_marker_visualization',
                                             Marker,
                                             queue_size=10)
        self.service2D = rospy.Service('/vision/path_marker/2D',
                                       VisionRequest2D, self.cb_2d)
        self.service3D = rospy.Service('/vision/path_marker/pose',
                                       VisionRequest, self.cb_3d)
        self.toggle = rospy.Service('/vision/path_marker/enable', SetBool,
                                    self.enable_cb)
        self.image_sub = Image_Subscriber(camera, self.img_cb)
        camera_info = self.image_sub.wait_for_camera_info()
        self.cam = PinholeCameraModel()
        self.cam.fromCameraInfo(camera_info)
Exemple #7
0
    def __init__(self):

        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/front/left/image_rect_color')

        # Instantiate remaining variables and objects
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()
        '''
        These variables store the camera information required to perform
        the transformations on the coordinates to move from the subs
        perspective to our global map perspective. This information is
        also necessary to perform the least squares intersection which
        will find the 3D coordinates of the torpedo board based on 2D
        observations from the Camera. More info on this can be found in
        sub8_vision_tools.
        '''

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        self.image_pub = Image_Publisher("CLAHE/debug")

        # Debug
        self.debug = rospy.get_param('~debug', True)
Exemple #8
0
    def __init__(self):
        self.debug_gui = False
        self.enabled = False
        self.cam = None

        # Constants from launch config file
        self.debug_ros = rospy.get_param("~debug_ros", True)
        self.canny_low = rospy.get_param("~canny_low", 100)
        self.canny_ratio = rospy.get_param("~canny_ratio", 3.0)
        self.thresh_hue_high = rospy.get_param("~thresh_hue_high", 60)
        self.thresh_saturation_low = rospy.get_param("~thresh_satuation_low",
                                                     100)
        self.min_contour_area = rospy.get_param("~min_contour_area", 100)
        self.epsilon_range = rospy.get_param("~epsilon_range", (0.01, 0.1))
        self.epsilon_step = rospy.get_param("~epsilon_step", 0.01)
        self.shape_match_thresh = rospy.get_param("~shape_match_thresh", 0.4)
        self.min_found_count = rospy.get_param("~min_found_count", 10)
        self.timeout_seconds = rospy.get_param("~timeout_seconds", 2.0)
        # Default to scale model of path marker. Please use set_geometry service
        # to set to correct model of object.
        length = rospy.get_param("~length", 1.2192)
        width = rospy.get_param("~width", 0.1524)
        self.rect_model = RectFinder(length, width)
        self.do_3D = rospy.get_param("~do_3D", True)
        camera = rospy.get_param("~image_topic",
                                 "/camera/down/left/image_rect_color")

        self.tf_listener = tf.TransformListener()

        # Create kalman filter to track 3d position and direction vector for marker in /map frame
        self.state_size = 5  # X, Y, Z, DY, DX
        self.filter = cv2.KalmanFilter(self.state_size, self.state_size)
        self.filter.transitionMatrix = 1. * np.eye(self.state_size,
                                                   dtype=np.float32)
        self.filter.measurementMatrix = 1. * np.eye(self.state_size,
                                                    dtype=np.float32)
        self.filter.processNoiseCov = 1e-5 * np.eye(self.state_size,
                                                    dtype=np.float32)
        self.filter.measurementNoiseCov = 1e-4 * np.eye(self.state_size,
                                                        dtype=np.float32)
        self.filter.errorCovPost = 1. * np.eye(self.state_size,
                                               dtype=np.float32)

        self.reset()
        self.service_set_geometry = rospy.Service('~set_geometry', SetGeometry,
                                                  self._set_geometry_cb)
        if self.debug_ros:
            self.debug_pub = Image_Publisher("~debug_image")
            self.markerPub = rospy.Publisher('~marker', Marker, queue_size=10)
        self.service2D = rospy.Service('~2D', VisionRequest2D,
                                       self._vision_cb_2D)
        if self.do_3D:
            self.service3D = rospy.Service('~pose', VisionRequest,
                                           self._vision_cb_3D)
        self.toggle = rospy.Service('~enable', SetBool, self._enable_cb)

        self.image_sub = Image_Subscriber(camera, self._img_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()
        assert self.camera_info is not None
        self.cam = PinholeCameraModel()
        self.cam.fromCameraInfo(self.camera_info)