コード例 #1
0
ファイル: vrx_classifier.py プロジェクト: kledom/mil
 def __init__(self):
     self.enabled = False
     # Maps ID to running class probabilities
     self.object_map = {}
     # Maps ID to mean volume, used to discriminate buoys / black totem
     self.volume_means = {}
     self.area_means = {}
     self.tf_buffer = tf2_ros.Buffer()
     self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
     self.get_params()
     self.last_panel_points_msg = None
     self.database_client = rospy.ServiceProxy('/database/requests',
                                               ObjectDBQuery)
     self.sub = Image_Subscriber(self.image_topic, self.img_cb)
     self.camera_info = self.sub.wait_for_camera_info()
     self.camera_model = PinholeCameraModel()
     self.camera_model.fromCameraInfo(self.camera_info)
     if self.debug:
         self.image_mux = ImageMux(size=(self.camera_info.height,
                                         self.camera_info.width),
                                   shape=(1, 2),
                                   labels=['Result', 'Mask'])
         self.debug_pub = Image_Publisher('~debug_image')
     self.last_objects = None
     self.last_update_time = rospy.Time.now()
     self.objects_sub = rospy.Subscriber('/pcodar/objects',
                                         PerceptionObjectArray,
                                         self.process_objects,
                                         queue_size=2)
     self.enabled_srv = rospy.Service('~set_enabled', SetBool,
                                      self.set_enable_srv)
     if self.is_training:
         self.enabled = True
コード例 #2
0
ファイル: vampire_identifier.py プロジェクト: uf-mil/mil
    def __init__(self):

        # Pull constants from config file
        self.override = False
        self.lower = [0, 0, 0]
        self.upper = [0, 0, 0]
        self.min_trans = 0
        self.max_velocity = 0
        self.timeout = 0
        self.min_observations = 0
        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/down/image_rect_color')
        self.goal = None
        self.last_config = None
        self.reconfigure_server = DynamicReconfigureServer(
            VampireIdentifierConfig, self.reconfigure)

        # Instantiate remaining variables and objects
        self._observations = deque()
        self._pose_pairs = deque()
        self._times = deque()
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        # Ros Services so mission can be toggled and info requested
        rospy.Service('~enable', SetBool, self.toggle_search)
        self.multi_obs = MultiObservation(self.camera_model)
        rospy.Service('~pose', VisionRequest, self.request_buoy)
        self.image_pub = Image_Publisher("drac_vision/debug")
        self.point_pub = rospy.Publisher("drac_vision/points",
                                         Point,
                                         queue_size=1)
        self.mask_image_pub = rospy.Publisher('drac_vision/mask',
                                              Image,
                                              queue_size=1)

        # Debug
        self.debug = rospy.get_param('~debug', True)
コード例 #3
0
    def __init__(self):

        # Pull constants from config file
        self.lower = rospy.get_param('~lower_color_threshold', [0, 0, 80])
        self.upper = rospy.get_param(
            '~higher_color_threshold', [200, 200, 250])
        self.min_contour_area = rospy.get_param('~min_contour_area', .001)
        self.max_contour_area = rospy.get_param('~max_contour_area', 400)
        self.min_trans = rospy.get_param('~min_trans', .05)
        self.max_velocity = rospy.get_param('~max_velocity', 1)
        self.timeout = rospy.Duration(
            rospy.get_param('~timeout_seconds'), 250000)
        self.min_observations = rospy.get_param('~min_observations', 8)
        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/front/left/image_rect_color')

        # Instantiate remaining variables and objects
        self._observations = deque()
        self._pose_pairs = deque()
        self._times = deque()
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()
        '''
        These variables store the camera information required to perform
        the transformations on the coordinates to move from the subs
        perspective to our global map perspective. This information is
        also necessary to perform the least squares intersection which
        will find the 3D coordinates of the torpedo board based on 2D
        observations from the Camera. More info on this can be found in
        sub8_vision_tools.
        '''

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        # Ros Services so mission can be toggled and info requested
        rospy.Service('~enable', SetBool, self.toggle_search)
        self.multi_obs = MultiObservation(self.camera_model)
        rospy.Service('~pose', VisionRequest, self.request_board3d)
        self.image_pub = Image_Publisher("torp_vision/debug")
        self.point_pub = rospy.Publisher(
            "torp_vision/points", Point, queue_size=1)
        self.mask_image_pub = rospy.Publisher(
            'torp_vison/mask', Image, queue_size=1)

        # Debug
        self.debug = rospy.get_param('~debug', True)
コード例 #4
0
ファイル: buoy_finder.py プロジェクト: uf-mil/SubjuGator
    def __init__(self):
        self.tf_listener = tf.TransformListener()

        self.enabled = False
        self.last_image = None
        self.last_image_time = None
        self.camera_model = None
        self.circle_finder = CircleFinder(1.0)  # Model radius doesn't matter beacause it's not being used for 3D pose

        # Various constants for tuning, debugging. See buoy_finder.yaml for more info
        self.min_observations = rospy.get_param('~min_observations')
        self.debug_ros = rospy.get_param('~debug/ros', True)
        self.debug_cv = rospy.get_param('~debug/cv', False)
        self.min_contour_area = rospy.get_param('~min_contour_area')
        self.max_circle_error = rospy.get_param('~max_circle_error')
        self.max_velocity = rospy.get_param('~max_velocity')
        self.roi_y = rospy.get_param('~roi_y')
        self.roi_height = rospy.get_param('~roi_height')
        camera = rospy.get_param('~camera_topic', '/camera/front/right/image_rect_color')

        self.buoys = {}
        for color in ['red', 'yellow', 'green']:
            self.buoys[color] = Buoy(color, debug_cv=self.debug_cv)
        if self.debug_cv:
            cv2.waitKey(1)
            self.debug_images = {}

        # Keep latest odom message for sanity check
        self.last_odom = None
        self.odom_sub = rospy.Subscriber('/odom', Odometry, self.odom_cb, queue_size=3)

        self.image_sub = Image_Subscriber(camera, self.image_cb)
        if self.debug_ros:
            self.rviz = rviz.RvizVisualizer(topic='~markers')
            self.mask_pub = Image_Publisher('~mask_image')
            rospy.Timer(rospy.Duration(1), self.print_status)

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()
        self.multi_obs = MultiObservation(self.camera_model)

        rospy.Service('~enable', SetBool, self.toggle_search)
        rospy.Service('~2D', VisionRequest2D, self.request_buoy)
        rospy.Service('~pose', VisionRequest, self.request_buoy3d)

        rospy.loginfo("BUOY FINDER: initialized successfully")
コード例 #5
0
ファイル: CLAHE_Processing.py プロジェクト: uf-mil/SubjuGator
    def __init__(self):

        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/front/left/image_rect_color')

        # Instantiate remaining variables and objects
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()
        '''
        These variables store the camera information required to perform
        the transformations on the coordinates to move from the subs
        perspective to our global map perspective. This information is
        also necessary to perform the least squares intersection which
        will find the 3D coordinates of the torpedo board based on 2D
        observations from the Camera. More info on this can be found in
        sub8_vision_tools.
        '''

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        self.image_pub = Image_Publisher("CLAHE/debug")

        # Debug
        self.debug = rospy.get_param('~debug', True)
コード例 #6
0
ファイル: buoy_finder.py プロジェクト: DSsoto/Sub8
class BuoyFinder(object):
    '''
    Node to find red, green, and yellow buoys in a single camera frame.

    Combines several observations and uses a least-squares approach to get a 3D
    position estimate of a buoy when requested.

    Intended to be modular so other approaches can be tried. Adding more sophistication
    to segmentation would increase reliability.
    '''
    def __init__(self):
        self.tf_listener = tf.TransformListener()

        self.enabled = False
        self.last_image = None
        self.last_image_time = None
        self.camera_model = None

        # Various constants for tuning, debugging. See buoy_finder.yaml for more info
        self.min_observations = rospy.get_param('~min_observations')
        self.debug_ros = rospy.get_param('~debug/ros', True)
        self.debug_cv = rospy.get_param('~debug/cv', False)
        self.min_contour_area = rospy.get_param('~min_contour_area')
        self.max_velocity = rospy.get_param('~max_velocity')
        camera = rospy.get_param('~camera_topic', '/camera/front/right/image_rect_color')

        self.buoys = {}
        for color in ['red', 'yellow', 'green']:
            self.buoys[color] = Buoy(color, debug_cv=self.debug_cv)
            self.buoys[color].load_segmentation()
        if self.debug_cv:
            cv2.waitKey(1)
            self.debug_images = {}

        # Keep latest odom message for sanity check
        self.last_odom = None
        self.odom_sub = rospy.Subscriber('/odom', Odometry, self.odom_cb, queue_size=3)

        self.image_sub = Image_Subscriber(camera, self.image_cb)
        if self.debug_ros:
            self.rviz = rviz.RvizVisualizer(topic='~markers')
            self.mask_pub = Image_Publisher('~mask_image')
            rospy.Timer(rospy.Duration(1), self.print_status)

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()
        self.multi_obs = MultiObservation(self.camera_model)

        rospy.Service('~enable', SetBool, self.toggle_search)
        rospy.Service('~2D', VisionRequest2D, self.request_buoy)
        rospy.Service('~pose', VisionRequest, self.request_buoy3d)

        rospy.loginfo("BUOY FINDER: initialized successfully")

    def odom_cb(self, odom):
        self.last_odom = odom

    def toggle_search(self, srv):
        '''
        Callback for standard ~enable service. If true, start
        looking at frames for buoys.
        '''
        if srv.data:
            rospy.loginfo("BUOY FINDER: enabled")
            self.enabled = True
        else:
            rospy.loginfo("BUOY FINDER: disabled")
            self.enabled = False

        return SetBoolResponse(success=True)

    def request_buoy(self, srv):
        '''
        Callback for 2D vision request. Returns centroid
        of buoy found with color specified in target_name
        if found.
        '''
        if not self.enabled or srv.target_name not in self.buoys:
            return VisionRequest2DResponse(found=False)
        response = self.find_single_buoy(srv.target_name)
        if response is False or response is None:
            return VisionRequest2DResponse(found=False)
        center, radius = response
        return VisionRequest2DResponse(
            header=Header(stamp=self.last_image_time, frame_id=self.frame_id),
            pose=Pose2D(
                x=center[0],
                y=center[1],
            ),
            max_x=self.last_image.shape[0],
            max_y=self.last_image.shape[1],
            camera_info=self.image_sub.camera_info,
            found=True
        )

    def request_buoy3d(self, srv):
        '''
        Callback for 3D vision request. Uses recent observations of buoy
        specified in target_name to attempt a least-squares position estimate.
        As buoys are spheres, orientation is meaningless.
        '''
        if srv.target_name not in self.buoys or not self.enabled:
            return VisionRequestResponse(found=False)
        buoy = self.buoys[srv.target_name]
        if buoy.est is None:
            return VisionRequestResponse(found=False)
        return VisionRequestResponse(
            pose=PoseStamped(
                header=Header(stamp=self.last_image_time, frame_id='/map'),
                pose=Pose(
                    position=Point(*buoy.est)
                )
            ),
            found=True
        )

    def image_cb(self, image):
        '''
        Run each time an image comes in from ROS. If enabled,
        attempt to find each color buoy.
        '''
        if not self.enabled:
            return

        # Crop out some of the top and bottom to exclude the floor and surface reflections
        height = image.shape[0]
        roi_y = int(0.2 * height)
        roi_height = height - int(0.2 * height)
        self.roi = (0, roi_y, roi_height, image.shape[1])
        self.last_image = image[self.roi[1]:self.roi[2], self.roi[0]:self.roi[3]]

        if self.debug_ros:
            # Create a blacked out debug image for putting masks in
            self.mask_image = np.zeros(self.last_image.shape, dtype=image.dtype)
        if self.last_image_time is not None and self.image_sub.last_image_time < self.last_image_time:
            # Clear tf buffer if time went backwards (nice for playing bags in loop)
            self.tf_listener.clear()
        self.last_image_time = self.image_sub.last_image_time
        self.find_buoys()
        if self.debug_ros:
            self.mask_pub.publish(self.mask_image)

    def print_status(self, _):
        '''
        Called at 1 second intervals to display the status (not found, n observations, FOUND)
        for each buoy.
        '''
        if self.enabled:
            rospy.loginfo("STATUS: RED='%s', GREEN='%s', YELLOW='%s'",
                          self.buoys['red'].status,
                          self.buoys['green'].status,
                          self.buoys['yellow'].status)

    def find_buoys(self):
        '''
        Run find_single_buoy for each color of buoy
        '''
        for buoy_name in self.buoys:
            self.find_single_buoy(buoy_name)

    def get_best_contour(self, contours):
        '''
        Attempts to find a good buoy contour among those found within the
        thresholded mask. If a good one is found, it return (contour, centroid, area),
        otherwise returns None. Right now the best contour is just the largest.

        TODO: Use smarter contour filtering methods, like checking this it is circle like
        '''
        if len(contours) > 0:
            cnt = max(contours, key=cv2.contourArea)
            area = cv2.contourArea(cnt)
            if area < self.min_contour_area:
                return None
            M = cv2.moments(cnt)
            cx = int(M['m10'] / M['m00'])
            cy = int(M['m01'] / M['m00'])
            tpl_center = (int(cx), int(cy))
            return cnt, tpl_center, area
        else:
            return None

    def find_single_buoy(self, buoy_type):
        '''
        Attempt to find one color buoy in the image.
        1) Create mask for buoy's color in colorspace specified in paramaters
        2) Select the largest contour in this mask
        3) Approximate a circle around this contour
        4) Store the center of this circle and the current tf between /map and camera
           as an observation
        5) If observations for this buoy is now >= min_observations, approximate buoy
           position using the least squares tool imported
        '''
        assert buoy_type in self.buoys.keys(), "Buoys_2d does not know buoy color: {}".format(buoy_type)
        buoy = self.buoys[buoy_type]
        mask = buoy.get_mask(self.last_image)
        kernel = np.ones((5, 5), np.uint8)
        mask = cv2.erode(mask, kernel, iterations=2)
        mask = cv2.dilate(mask, kernel, iterations=2)

        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE,
                                          offset=(self.roi[0], self.roi[1]))
        ret = self.get_best_contour(contours)
        if ret is None:
            buoy.clear_old_observations()
            buoy.status = 'not seen w/ {} obs'.format(buoy.size())
            return
        contour, tuple_center, area = ret
        true_center, rad = cv2.minEnclosingCircle(contour)

        if self.debug_ros:
            cv2.add(self.mask_image.copy(), buoy.cv_colors, mask=mask, dst=self.mask_image)
            cv2.circle(self.mask_image, (int(true_center[0] - self.roi[0]), int(true_center[1]) - self.roi[1]),
                       int(rad), buoy.cv_colors, 2)
        if self.debug_cv:
            self.debug_images[buoy_type] = mask.copy()

        try:
            self.tf_listener.waitForTransform('/map', self.frame_id, self.last_image_time, rospy.Duration(0.2))
        except tf.Exception as e:
            rospy.logwarn("Could not transform camera to map: {}".format(e))
            return False

        if not self.sanity_check(tuple_center, self.last_image_time):
            buoy.status = 'failed sanity check'
            return False

        (t, rot_q) = self.tf_listener.lookupTransform('/map', self.frame_id, self.last_image_time)
        R = mil_ros_tools.geometry_helpers.quaternion_matrix(rot_q)

        buoy.add_observation(true_center, (np.array(t), R), self.last_image_time)

        observations, pose_pairs = buoy.get_observations_and_pose_pairs()
        if len(observations) > self.min_observations:
            buoy.est = self.multi_obs.lst_sqr_intersection(observations, pose_pairs)
            buoy.status = 'Pose found'
            if self.debug_ros:
                self.rviz.draw_sphere(buoy.est, color=buoy.draw_colors,
                                      scaling=(0.2286, 0.2286, 0.2286),
                                      frame='/map', _id=buoy.visual_id)
        else:
            buoy.status = '{} observations'.format(len(observations))

        return tuple_center, rad

    def sanity_check(self, coordinate, timestamp):
        '''
        Check if the observation is unreasonable. More can go here if we want.
        '''
        if self.last_odom is None:
            return False

        linear_velocity = rosmsg_to_numpy(self.last_odom.twist.twist.linear)
        if np.linalg.norm(linear_velocity) > self.max_velocity:
            return False

        return True
コード例 #7
0
class torp_vision:

    def __init__(self):

        # Pull constants from config file
        self.lower = rospy.get_param('~lower_color_threshold', [0, 0, 80])
        self.upper = rospy.get_param(
            '~higher_color_threshold', [200, 200, 250])
        self.min_contour_area = rospy.get_param('~min_contour_area', .001)
        self.max_contour_area = rospy.get_param('~max_contour_area', 400)
        self.min_trans = rospy.get_param('~min_trans', .05)
        self.max_velocity = rospy.get_param('~max_velocity', 1)
        self.timeout = rospy.Duration(
            rospy.get_param('~timeout_seconds'), 250000)
        self.min_observations = rospy.get_param('~min_observations', 8)
        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/front/left/image_rect_color')

        # Instantiate remaining variables and objects
        self._observations = deque()
        self._pose_pairs = deque()
        self._times = deque()
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()
        '''
        These variables store the camera information required to perform
        the transformations on the coordinates to move from the subs
        perspective to our global map perspective. This information is
        also necessary to perform the least squares intersection which
        will find the 3D coordinates of the torpedo board based on 2D
        observations from the Camera. More info on this can be found in
        sub8_vision_tools.
        '''

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        # Ros Services so mission can be toggled and info requested
        rospy.Service('~enable', SetBool, self.toggle_search)
        self.multi_obs = MultiObservation(self.camera_model)
        rospy.Service('~pose', VisionRequest, self.request_board3d)
        self.image_pub = Image_Publisher("torp_vision/debug")
        self.point_pub = rospy.Publisher(
            "torp_vision/points", Point, queue_size=1)
        self.mask_image_pub = rospy.Publisher(
            'torp_vison/mask', Image, queue_size=1)

        # Debug
        self.debug = rospy.get_param('~debug', True)

    def image_cb(self, image):
        '''
        Run each time an image comes in from ROS. If enabled,
        attempt to find the torpedo board.
        '''
        if not self.enabled:
            return

        self.last_image = image

        if self.last_image_time is not None and \
                self.image_sub.last_image_time < self.last_image_time:
            # Clear tf buffer if time went backwards (nice for playing bags in
            # loop)
            self.tf_listener.clear()

        self.last_image_time = self.image_sub.last_image_time
        self.acquire_targets(image)

    def toggle_search(self, srv):
        '''
        Callback for standard ~enable service. If true, start
        looking at frames for buoys.
        '''
        if srv.data:
            rospy.loginfo("TARGET ACQUISITION: enabled")
            self.enabled = True

        else:
            rospy.loginfo("TARGET ACQUISITION: disabled")
            self.enabled = False

        return SetBoolResponse(success=True)

    def request_board3d(self, srv):
        '''
        Callback for 3D vision request. Uses recent observations of target
        board  specified in target_name to attempt a least-squares position
        estimate. Ignoring orientation of board.
        '''
        if not self.enabled:
            return VisionRequestResponse(found=False)
        # buoy = self.buoys[srv.target_name]
        if self.est is None:
            return VisionRequestResponse(found=False)
        return VisionRequestResponse(
            pose=PoseStamped(
                header=Header(stamp=self.last_image_time, frame_id='/map'),
                pose=Pose(position=Point(*self.est))),
            found=True)

    def clear_old_observations(self):
        # Observations older than two seconds are discarded.
        time = rospy.Time.now()
        i = 0
        while i < len(self._times):
            if time - self._times[i] > self.timeout:
                self._times.popleft()
                self._observations.popleft()
                self._pose_pairs.popleft()
            else:
                i += 1
        # print('Clearing')

    def size(self):
        return len(self._observations)

    def add_observation(self, obs, pose_pair, time):
        self.clear_old_observations()
        # print('Adding...')
        if self.size() == 0 or np.linalg.norm(
                self._pose_pairs[-1][0] - pose_pair[0]) > self.min_trans:
            self._observations.append(obs)
            self._pose_pairs.append(pose_pair)
            self._times.append(time)

    def get_observations_and_pose_pairs(self):
        self.clear_old_observations()
        return (self._observations, self._pose_pairs)

    def detect(self, c):
        # initialize the shape name and approximate the contour
        target = "unidentified"
        peri = cv2.arcLength(c, True)

        if peri < self.min_contour_area or peri > self.max_contour_area:
            return target
        approx = cv2.approxPolyDP(c, 0.04 * peri, True)

        if len(approx) == 4:
            target = "Target Aquisition Successful"

        elif len(approx) == 3 or len(approx) == 5:
            target = "Partial Target Acquisition"

        return target

    def CLAHE(self, cv_image):
        '''
        CLAHE (Contrast Limited Adaptive Histogram Equalization)
        This increases the contrast between color channels and allows us to
        better differentiate colors under certain lighting conditions.
        '''
        clahe = cv2.createCLAHE(clipLimit=5., tileGridSize=(4, 4))

        # convert from BGR to LAB color space
        lab = cv2.cvtColor(cv_image, cv2.COLOR_BGR2LAB)
        l, a, b = cv2.split(lab)  # split on 3 different channels

        l2 = clahe.apply(l)  # apply CLAHE to the L-channel

        lab = cv2.merge((l2, a, b))  # merge channels
        cv_image = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)

        return cv_image

    def mask_image(self, cv_image, lower, upper):
        mask = cv2.inRange(cv_image, lower, upper)
        # Remove anything not within the bounds of our mask
        output = cv2.bitwise_and(cv_image, cv_image, mask=mask)

        # Resize to emphasize shapes
        # gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)

        # Blur image so our contours can better find the full shape.
        # blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        if (self.debug):
            try:
                # print(output)
                self.mask_image_pub.publish(
                    self.bridge.cv2_to_imgmsg(np.array(output), 'bgr8'))
            except CvBridgeError as e:
                print(e)

        return output

    def acquire_targets(self, cv_image):
        # Take in the data and get its dimensions.
        height, width, channels = cv_image.shape

        # Run CLAHE.
        cv_image = self.CLAHE(cv_image)

        # Now we generate a color mask to isolate only red in the image. This
        # is achieved through the thresholds which can be changed in the above
        # constants.

        # create NumPy arrays from the boundaries
        lower = np.array(self.lower, dtype="uint8")
        upper = np.array(self.upper, dtype="uint8")

        # Generate a mask based on the constants.
        blurred = self.mask_image(cv_image, lower, upper)
        blurred = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
        # Compute contours
        cnts = cv2.findContours(blurred.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)

        cnts = cnts[1]
        '''
        We use OpenCV to compute our contours and then begin processing them
        to ensure we are identifying a proper target.
        '''

        shape = ''
        peri_max = 0
        max_x = 0
        max_y = 0
        m_shape = ''
        for c in cnts:
            # compute the center of the contour, then detect the name of the
            # shape using only the contour
            M = cv2.moments(c)
            if M["m00"] == 0:
                M["m00"] = .000001
            cX = int((M["m10"] / M["m00"]))
            cY = int((M["m01"] / M["m00"]))
            shape = self.detect(c)

            # multiply the contour (x, y)-coordinates by the resize ratio,
            # then draw the contours and the name of the shape on the image

            c = c.astype("float")
            # c *= ratio
            c = c.astype("int")
            if shape == "Target Aquisition Successful":
                if self.debug:
                    try:
                        cv2.drawContours(cv_image, [c], -1, (0, 255, 0), 2)
                        cv2.putText(cv_image, shape, (cX, cY),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (255, 255, 255), 2)
                        self.image_pub.publish(cv_image)
                    except CvBridgeError as e:
                        print(e)

                peri = cv2.arcLength(c, True)
                if peri > peri_max:
                    peri_max = peri
                    max_x = cX
                    max_y = cY
                    m_shape = shape
        '''
        This is Kevin's Code, adapted for this project. We are trying to find
        the 3D coordinates of the torpedo board/target to give us a better idea
        of where we are trying to go and perform more accurate movements
        to align with the target. The first thing we need to do is convert from
        camera coordinates in pixels to 3D coordinates.
        Every time we succesfully get a target aquisition we add it to the
        counter. Once we observe it enough times
        we can be confident we are looking at the correct target. We then
        perform an least squares intersection from multiple angles
        to derive the approximate 3D coordinates.
        '''

        if m_shape == "Target Aquisition Successful":
            try:
                self.tf_listener.waitForTransform('/map',
                                                  self.camera_model.tfFrame(),
                                                  self.last_image_time,
                                                  rospy.Duration(0.2))
            except tf.Exception as e:
                rospy.logwarn(
                    "Could not transform camera to map: {}".format(e))
                return False

            (t, rot_q) = self.tf_listener.lookupTransform(
                '/map', self.camera_model.tfFrame(), self.last_image_time)
            R = mil_ros_tools.geometry_helpers.quaternion_matrix(rot_q)
            center = np.array([max_x, max_y])
            self.add_observation(center, (np.array(t), R),
                                 self.last_image_time)

            observations, pose_pairs = self.get_observations_and_pose_pairs()
            if len(observations) > self.min_observations:
                self.est = self.multi_obs.lst_sqr_intersection(
                    observations, pose_pairs)
                self.status = 'Pose found'

            else:
                self.status = '{} observations'.format(len(observations))
コード例 #8
0
ファイル: CLAHE_Processing.py プロジェクト: uf-mil/SubjuGator
class CLAHE_generator:

    def __init__(self):

        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/front/left/image_rect_color')

        # Instantiate remaining variables and objects
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()
        '''
        These variables store the camera information required to perform
        the transformations on the coordinates to move from the subs
        perspective to our global map perspective. This information is
        also necessary to perform the least squares intersection which
        will find the 3D coordinates of the torpedo board based on 2D
        observations from the Camera. More info on this can be found in
        sub8_vision_tools.
        '''

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        self.image_pub = Image_Publisher("CLAHE/debug")

        # Debug
        self.debug = rospy.get_param('~debug', True)

    def image_cb(self, image):
        '''
        Run each time an image comes in from ROS. If enabled,
        attempt to find the torpedo board.
        '''

        self.last_image = image

        if self.last_image_time is not None and \
                self.image_sub.last_image_time < self.last_image_time:
            # Clear tf buffer if time went backwards (nice for playing bags in
            # loop)
            self.tf_listener.clear()

        self.last_image_time = self.image_sub.last_image_time
        self.CLAHE(image)
        print('published')

    def CLAHE(self, cv_image):
        '''
        CLAHE (Contrast Limited Adaptive Histogram Equalization)
        This increases the contrast between color channels and allows us to
        better differentiate colors under certain lighting conditions.
        '''
        clahe = cv2.createCLAHE(clipLimit=9.5, tileGridSize=(4, 4))

        # convert from BGR to LAB color space
        lab = cv2.cvtColor(cv_image, cv2.COLOR_BGR2LAB)
        l, a, b = cv2.split(lab)  # split on 3 different channels

        l2 = clahe.apply(l)  # apply CLAHE to the L-channel

        lab = cv2.merge((l2, a, b))  # merge channels
        cv_image = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)

        self.image_pub.publish(cv_image)
コード例 #9
0
ファイル: vampire_identifier.py プロジェクト: uf-mil/mil
class VampireIdentifier:
    def __init__(self):

        # Pull constants from config file
        self.override = False
        self.lower = [0, 0, 0]
        self.upper = [0, 0, 0]
        self.min_trans = 0
        self.max_velocity = 0
        self.timeout = 0
        self.min_observations = 0
        self.camera = rospy.get_param('~camera_topic',
                                      '/camera/down/image_rect_color')
        self.goal = None
        self.last_config = None
        self.reconfigure_server = DynamicReconfigureServer(
            VampireIdentifierConfig, self.reconfigure)

        # Instantiate remaining variables and objects
        self._observations = deque()
        self._pose_pairs = deque()
        self._times = deque()
        self.last_image_time = None
        self.last_image = None
        self.tf_listener = tf.TransformListener()
        self.status = ''
        self.est = None
        self.visual_id = 0
        self.enabled = False
        self.bridge = CvBridge()

        # Image Subscriber and Camera Information

        self.image_sub = Image_Subscriber(self.camera, self.image_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()

        self.camera_info = self.image_sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        self.frame_id = self.camera_model.tfFrame()

        # Ros Services so mission can be toggled and info requested
        rospy.Service('~enable', SetBool, self.toggle_search)
        self.multi_obs = MultiObservation(self.camera_model)
        rospy.Service('~pose', VisionRequest, self.request_buoy)
        self.image_pub = Image_Publisher("drac_vision/debug")
        self.point_pub = rospy.Publisher("drac_vision/points",
                                         Point,
                                         queue_size=1)
        self.mask_image_pub = rospy.Publisher('drac_vision/mask',
                                              Image,
                                              queue_size=1)

        # Debug
        self.debug = rospy.get_param('~debug', True)

    @staticmethod
    def parse_string(threshes):
        ret = [float(thresh.strip()) for thresh in threshes.split(',')]
        if len(ret) != 3:
            raise ValueError('not 3')
        return ret

    def reconfigure(self, config, level):
        try:
            self.override = config['override']
            self.goal = config['target']
            self.lower = self.parse_string(config['dyn_lower'])
            self.upper = self.parse_string(config['dyn_upper'])
            self.min_trans = config['min_trans']
            self.max_velocity = config['max_velocity']
            self.timeout = config['timeout']
            self.min_observations = config['min_obs']

        except ValueError as e:
            rospy.logwarn('Invalid dynamic reconfigure: {}'.format(e))
            return self.last_config

        if self.override:
            # Dynamic Values for testing
            self.lower = np.array(self.lower)
            self.upper = np.array(self.upper)
        else:
            # Hard Set for use in Competition
            if self.goal == 'drac':
                self.lower = rospy.get_param('~dracula_low_thresh', [0, 0, 80])
                self.upper = rospy.get_param('~dracula_high_thresh',
                                             [0, 0, 80])
            else:
                raise ValueError('Invalid Target Name')
        self.last_config = config
        rospy.loginfo('Params succesfully updated via dynamic reconfigure')
        return config

    def image_cb(self, image):
        '''
        Run each time an image comes in from ROS.
        '''
        if not self.enabled:
            return

        self.last_image = image

        if self.last_image_time is not None and \
                self.image_sub.last_image_time < self.last_image_time:
            # Clear tf buffer if time went backwards (nice for playing bags in
            # loop)
            self.tf_listener.clear()

        self.last_image_time = self.image_sub.last_image_time
        self.acquire_targets(image)

    def toggle_search(self, srv):
        '''
        Callback for standard ~enable service. If true, start
        looking at frames for buoys.
        '''
        if srv.data:
            rospy.loginfo("TARGET ACQUISITION: enabled")
            self.enabled = True

        else:
            rospy.loginfo("TARGET ACQUISITION: disabled")
            self.enabled = False

        return SetBoolResponse(success=True)

    def request_buoy(self, srv):
        '''
        Callback for 3D vision request. Uses recent observations of target
        board  specified in target_name to attempt a least-squares position
        estimate. Ignoring orientation of board.
        '''
        if not self.enabled:
            return VisionRequestResponse(found=False)
        # buoy = self.buoys[srv.target_name]
        if self.est is None:
            return VisionRequestResponse(found=False)
        return VisionRequestResponse(pose=PoseStamped(
            header=Header(stamp=self.last_image_time, frame_id='/map'),
            pose=Pose(position=Point(*self.est))),
                                     found=True)

    def clear_old_observations(self):
        '''
        Observations older than two seconds are discarded.
        '''
        time = rospy.Time.now()
        i = 0
        while i < len(self._times):
            if time - self._times[i] > self.timeout:
                self._times.popleft()
                self._observations.popleft()
                self._pose_pairs.popleft()
            else:
                i += 1
        # print('Clearing')

    def add_observation(self, obs, pose_pair, time):
        '''
        Add a new observation associated with an object
        '''

        self.clear_old_observations()
        # print('Adding...')
        if slen(self._observations) == 0 or np.linalg.norm(
                self._pose_pairs[-1][0] - pose_pair[0]) > self.min_trans:
            self._observations.append(obs)
            self._pose_pairs.append(pose_pair)
            self._times.append(time)

    def get_observations_and_pose_pairs(self):
        '''
        Fetch all recent observations + clear old ones
        '''

        self.clear_old_observations()
        return (self._observations, self._pose_pairs)

    def detect(self, c):
        '''
        Verify the shape in the masked image is large enough to be a valid target.
        This changes depending on target Vampire, as does the number of targets we want.  
        '''
        target = "unidentified"
        peri = cv2.arcLength(c, True)

        if peri < self.min_contour_area or peri > self.max_contour_area:
            return target
        approx = cv2.approxPolyDP(c, 0.04 * peri, True)

        target = "Target Aquisition Successful"

        return target

    def mask_image(self, cv_image, lower, upper):
        mask = cv2.inRange(cv_image, lower, upper)
        # Remove anything not within the bounds of our mask
        output = cv2.bitwise_and(cv_image, cv_image, mask=mask)
        print('ree')

        if (self.debug):
            try:
                # print(output)
                self.mask_image_pub.publish(
                    self.bridge.cv2_to_imgmsg(np.array(output), 'bgr8'))
            except CvBridgeError as e:
                print(e)

        return output

    def acquire_targets(self, cv_image):
        # Take in the data and get its dimensions.
        height, width, channels = cv_image.shape

        # create NumPy arrays from the boundaries
        lower = np.array(self.lower, dtype="uint8")
        upper = np.array(self.upper, dtype="uint8")

        # Generate a mask based on the constants.
        blurred = self.mask_image(cv_image, lower, upper)
        blurred = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
        # Compute contours
        cnts = cv2.findContours(blurred.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)

        cnts = cnts[1]
        '''
        We use OpenCV to compute our contours and then begin processing them
        to ensure we are identifying a proper target.
        '''

        shape = ''
        peri_max = 0
        max_x = 0
        max_y = 0
        m_shape = ''
        for c in cnts:
            # compute the center of the contour, then detect the name of the
            # shape using only the contour
            M = cv2.moments(c)
            if M["m00"] == 0:
                M["m00"] = .000001
            cX = int((M["m10"] / M["m00"]))
            cY = int((M["m01"] / M["m00"]))
            self.point_pub.publish(Point(x=cX, y=cY))
            shape = self.detect(c)

            # multiply the contour (x, y)-coordinates by the resize ratio,
            # then draw the contours and the name of the shape on the image

            c = c.astype("float")
            # c *= ratio
            c = c.astype("int")
            if shape == "Target Aquisition Successful":
                if self.debug:
                    try:
                        cv2.drawContours(cv_image, [c], -1, (0, 255, 0), 2)
                        cv2.putText(cv_image, shape, (cX, cY),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                    (255, 255, 255), 2)
                        self.image_pub.publish(cv_image)
                    except CvBridgeError as e:
                        print(e)

                # Grab the largest contour. Generally this is a safe bet but... We may need to tweak this for the three different vampires.
                peri = cv2.arcLength(c, True)
                if peri > peri_max:
                    peri_max = peri
                    max_x = cX
                    max_y = cY
                    m_shape = shape
        '''
        Approximate 3D coordinates.
        '''

        if m_shape == "Target Aquisition Successful":
            try:
                self.tf_listener.waitForTransform('/map',
                                                  self.camera_model.tfFrame(),
                                                  self.last_image_time,
                                                  rospy.Duration(0.2))
            except tf.Exception as e:
                rospy.logwarn(
                    "Could not transform camera to map: {}".format(e))
                return False

            (t, rot_q) = self.tf_listener.lookupTransform(
                '/map', self.camera_model.tfFrame(), self.last_image_time)
            R = mil_ros_tools.geometry_helpers.quaternion_matrix(rot_q)
            center = np.array([max_x, max_y])
            self.add_observation(center, (np.array(t), R),
                                 self.last_image_time)

            observations, pose_pairs = self.get_observations_and_pose_pairs()
            if len(observations) > self.min_observations:
                self.est = self.multi_obs.lst_sqr_intersection(
                    observations, pose_pairs)
                self.status = 'Pose found'

            else:
                self.status = '{} observations'.format(len(observations))
コード例 #10
0
ファイル: vrx_classifier.py プロジェクト: kledom/mil
class VrxClassifier(object):
    # Handle buoys / black totem specially, discrminating on volume as they have the same color
    # The black objects that we have trained the color classifier on
    BLACK_OBJECT_CLASSES = ['buoy', 'black_totem']
    # All the black objects in VRX
    POSSIBLE_BLACK_OBJECTS = ['polyform_a3', 'polyform_a5', 'polyform_a7']
    # The average perceceived PCODAR volume of each above object
    BLACK_OBJECT_VOLUMES = [0.3, 0.6, 1.9]
    BLACK_OBJECT_AREA = [0., 0.5, 0., 0.]
    TOTEM_MIN_HEIGHT = 0.9

    def __init__(self):
        self.enabled = False
        # Maps ID to running class probabilities
        self.object_map = {}
        # Maps ID to mean volume, used to discriminate buoys / black totem
        self.volume_means = {}
        self.area_means = {}
        self.tf_buffer = tf2_ros.Buffer()
        self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
        self.get_params()
        self.last_panel_points_msg = None
        self.database_client = rospy.ServiceProxy('/database/requests',
                                                  ObjectDBQuery)
        self.sub = Image_Subscriber(self.image_topic, self.img_cb)
        self.camera_info = self.sub.wait_for_camera_info()
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(self.camera_info)
        if self.debug:
            self.image_mux = ImageMux(size=(self.camera_info.height,
                                            self.camera_info.width),
                                      shape=(1, 2),
                                      labels=['Result', 'Mask'])
            self.debug_pub = Image_Publisher('~debug_image')
        self.last_objects = None
        self.last_update_time = rospy.Time.now()
        self.objects_sub = rospy.Subscriber('/pcodar/objects',
                                            PerceptionObjectArray,
                                            self.process_objects,
                                            queue_size=2)
        self.enabled_srv = rospy.Service('~set_enabled', SetBool,
                                         self.set_enable_srv)
        if self.is_training:
            self.enabled = True

    @thread_lock(lock)
    def set_enable_srv(self, req):
        self.enabled = req.data
        return {'success': True}

    def in_frame(self, pixel):
        # TODO: < or <= ???
        return pixel[0] > 0 and pixel[0] < self.camera_info.width and pixel[
            1] > 0 and pixel[1] < self.camera_info.height

    @thread_lock(lock)
    def process_objects(self, msg):
        self.last_objects = msg

    def get_params(self):
        '''
        Set several constants used for image processing and classification
        from ROS params for runtime configurability.
        '''
        self.is_training = rospy.get_param('~train', False)
        self.debug = rospy.get_param('~debug', True)
        self.image_topic = rospy.get_param(
            '~image_topic', '/camera/starboard/image_rect_color')
        self.update_period = rospy.Duration(1.0 /
                                            rospy.get_param('~update_hz', 1))
        self.classifier = VrxColorClassifier()
        self.classifier.train_from_csv()

    def get_box_roi(self, corners):
        roi = roi_enclosing_points(self.camera_model, corners, border=(-10, 0))
        if roi is None:
            rospy.logwarn('No points project into camera.')
            return None
        rect = rect_from_roi(roi)
        bbox_contour = bbox_countour_from_rectangle(rect)
        return bbox_contour

    def get_bbox(self, p, q_mat, obj_msg):
        points = np.zeros((len(obj_msg.points), 3), dtype=np.float)
        for i in range(len(obj_msg.points)):
            points[i, :] = p + q_mat.dot(rosmsg_to_numpy(obj_msg.points[i]))
        return points

    def get_object_roi(self, p, q_mat, obj_msg):
        box_corners = self.get_bbox(p, q_mat, obj_msg)
        return self.get_box_roi(box_corners)

    def update_object(self, object_msg, class_probabilities):
        object_id = object_msg.id

        # Update the total class probabilities
        if object_id in self.object_map:
            self.object_map[object_id] += class_probabilities
        else:
            self.object_map[object_id] = class_probabilities
        total_probabilities = self.object_map[object_id]

        # Guess the type of object based
        most_likely_index = np.argmax(total_probabilities)
        most_likely_name = self.classifier.CLASSES[most_likely_index]
        # Unforuntely this doesn't really work'
        if most_likely_name in self.BLACK_OBJECT_CLASSES:
            object_scale = rosmsg_to_numpy(object_msg.scale)
            object_volume = object_scale.dot(object_scale)
            object_area = object_scale[:2].dot(object_scale[:2])
            height = object_scale[2]
            if object_id in self.volume_means:
                self.volume_means[object_id].add_value(object_volume)
                self.area_means[object_id].add_value(object_area)
            else:
                self.volume_means[object_id] = RunningMean(object_volume)
                self.area_means[object_id] = RunningMean(object_area)
            running_mean_volume = self.volume_means[object_id].mean
            running_mean_area = self.area_means[object_id].mean

            if height > self.TOTEM_MIN_HEIGHT:
                black_guess = 'black_totem'
            else:
                black_guess_index = np.argmin(
                    np.abs(self.BLACK_OBJECT_VOLUMES - running_mean_volume))
                black_guess = self.POSSIBLE_BLACK_OBJECTS[black_guess_index]
            most_likely_name = black_guess
            rospy.loginfo(
                '{} current/running volume={}/{} area={}/{} height={}-> {}'.
                format(object_id, object_volume, running_mean_volume,
                       object_area, running_mean_area, height, black_guess))
        obj_title = object_msg.labeled_classification
        probability = class_probabilities[most_likely_index]
        rospy.loginfo('Object {} {} classified as {} ({}%)'.format(
            object_id, object_msg.labeled_classification, most_likely_name,
            probability * 100.))
        if obj_title != most_likely_name:
            cmd = '{}={}'.format(object_id, most_likely_name)
            rospy.loginfo('Updating object {} to {}'.format(
                object_id, most_likely_name))
            if not self.is_training:
                self.database_client(ObjectDBQueryRequest(cmd=cmd))
        return most_likely_name

    @thread_lock(lock)
    def img_cb(self, img):
        if not self.enabled:
            return
        if self.camera_model is None:
            return
        if self.last_objects is None or len(self.last_objects.objects) == 0:
            return
        now = rospy.Time.now()
        if now - self.last_update_time < self.update_period:
            return
        self.last_update_time = now
        # Get Transform from ENU to optical at the time of this image
        transform = self.tf_buffer.lookup_transform(
            self.sub.last_image_header.frame_id,
            "enu",
            self.sub.last_image_header.stamp,
            timeout=rospy.Duration(1))
        translation = rosmsg_to_numpy(transform.transform.translation)
        rotation = rosmsg_to_numpy(transform.transform.rotation)
        rotation_mat = quaternion_matrix(rotation)[:3, :3]

        # Transform the center of each object into optical frame
        positions_camera = [
            translation + rotation_mat.dot(rosmsg_to_numpy(obj.pose.position))
            for obj in self.last_objects.objects
        ]
        pixel_centers = [
            self.camera_model.project3dToPixel(point)
            for point in positions_camera
        ]
        distances = np.linalg.norm(positions_camera, axis=1)
        CUTOFF_METERS = 15

        # Get a list of indicies of objects who are sufficiently close and can be seen by camera
        met_criteria = []
        for i in xrange(len(self.last_objects.objects)):
            distance = distances[i]
            if self.in_frame(
                    pixel_centers[i]
            ) and distance < CUTOFF_METERS and positions_camera[i][2] > 0:
                met_criteria.append(i)
        # print 'Keeping {} of {}'.format(len(met_criteria), len(self.last_objects.objects))

        rois = [
            self.get_object_roi(translation, rotation_mat,
                                self.last_objects.objects[i])
            for i in met_criteria
        ]
        debug = np.zeros(img.shape, dtype=img.dtype)

        if self.is_training:
            training = []

        for i in xrange(len(rois)):
            # The index in self.last_objects that this object is
            index = met_criteria[i]
            # The actual message object we are looking at
            object_msg = self.last_objects.objects[index]
            object_id = object_msg.id
            # Exit early if we could not get a valid roi
            if rois[i] is None:
                rospy.logwarn(
                    'Object {} had no points, skipping'.format(object_id))
                continue
            # Form a contour from the ROI
            contour = np.array(rois[i], dtype=int)
            # Create image mask from the contour
            mask = contour_mask(contour, img_shape=img.shape)

            # get the color mean features
            features = np.array(self.classifier.get_features(img,
                                                             mask)).reshape(
                                                                 1, 9)
            # Predict class probabilites based on color means
            class_probabilities = self.classifier.feature_probabilities(
                features)[0]
            # Use this and previous probabilities to guess at which object this is
            guess = self.update_object(object_msg, class_probabilities)
            # If training, save this
            if self.is_training and obj_title != 'UNKNOWN':
                classification_index = self.classifier.CLASSES.index(obj_title)
                training.append(np.append(classification_index, features))

            # Draw debug info
            colorful = cv2.bitwise_or(img, img, mask=mask)
            debug = cv2.bitwise_or(debug, colorful)
            scale = 3
            thickness = 2
            center = np.array(pixel_centers[index], dtype=int)
            text = str(object_id)
            putText_ul(debug,
                       text,
                       center,
                       fontScale=scale,
                       thickness=thickness)

        if self.is_training and len(training) != 0:
            training = np.array(training)
            try:
                previous_data = pandas.DataFrame.from_csv(
                    self.classifier.training_file).values
                data = np.vstack((previous_data, training))
            except Exception as e:
                data = training
            self.classifier.save_csv(data[:, 1:], data[:, 0])
            rospy.signal_shutdown('fdfd')
            raise Exception('did something, kev')

        self.image_mux[0] = img
        self.image_mux[1] = debug
        self.debug_pub.publish(self.image_mux())
        return
コード例 #11
0
    def __init__(self):
        self.debug_gui = False
        self.enabled = False
        self.cam = None

        # Constants from launch config file
        self.debug_ros = rospy.get_param("~debug_ros", True)
        self.canny_low = rospy.get_param("~canny_low", 100)
        self.canny_ratio = rospy.get_param("~canny_ratio", 3.0)
        self.thresh_hue_high = rospy.get_param("~thresh_hue_high", 60)
        self.thresh_saturation_low = rospy.get_param("~thresh_satuation_low",
                                                     100)
        self.min_contour_area = rospy.get_param("~min_contour_area", 100)
        self.epsilon_range = rospy.get_param("~epsilon_range", (0.01, 0.1))
        self.epsilon_step = rospy.get_param("~epsilon_step", 0.01)
        self.shape_match_thresh = rospy.get_param("~shape_match_thresh", 0.4)
        self.min_found_count = rospy.get_param("~min_found_count", 10)
        self.timeout_seconds = rospy.get_param("~timeout_seconds", 2.0)
        # Default to scale model of path marker. Please use set_geometry service
        # to set to correct model of object.
        length = rospy.get_param("~length", 1.2192)
        width = rospy.get_param("~width", 0.1524)
        self.rect_model = RectFinder(length, width)
        self.do_3D = rospy.get_param("~do_3D", True)
        camera = rospy.get_param("~image_topic",
                                 "/camera/down/left/image_rect_color")

        self.tf_listener = tf.TransformListener()

        # Create kalman filter to track 3d position and direction vector for marker in /map frame
        self.state_size = 5  # X, Y, Z, DY, DX
        self.filter = cv2.KalmanFilter(self.state_size, self.state_size)
        self.filter.transitionMatrix = 1. * np.eye(self.state_size,
                                                   dtype=np.float32)
        self.filter.measurementMatrix = 1. * np.eye(self.state_size,
                                                    dtype=np.float32)
        self.filter.processNoiseCov = 1e-5 * np.eye(self.state_size,
                                                    dtype=np.float32)
        self.filter.measurementNoiseCov = 1e-4 * np.eye(self.state_size,
                                                        dtype=np.float32)
        self.filter.errorCovPost = 1. * np.eye(self.state_size,
                                               dtype=np.float32)

        self.reset()
        self.service_set_geometry = rospy.Service('~set_geometry', SetGeometry,
                                                  self._set_geometry_cb)
        if self.debug_ros:
            self.debug_pub = Image_Publisher("~debug_image")
            self.markerPub = rospy.Publisher('~marker', Marker, queue_size=10)
        self.service2D = rospy.Service('~2D', VisionRequest2D,
                                       self._vision_cb_2D)
        if self.do_3D:
            self.service3D = rospy.Service('~pose', VisionRequest,
                                           self._vision_cb_3D)
        self.toggle = rospy.Service('~enable', SetBool, self._enable_cb)

        self.image_sub = Image_Subscriber(camera, self._img_cb)
        self.camera_info = self.image_sub.wait_for_camera_info()
        assert self.camera_info is not None
        self.cam = PinholeCameraModel()
        self.cam.fromCameraInfo(self.camera_info)