示例#1
0
class ROSStereoListener:
    def __init__(self, topics, rate=30.0, name='stereo_listener'):
        self.listener = ru.GenericListener(name, [Image, Image], topics, rate)
        self.lbridge = CvBridge()
        self.rbridge = CvBridge()

    def next(self):
        lros, rros =  self.listener.read(allow_duplication=False, willing_to_wait=True, warn=False, quiet=True)
        lcv = cv.CloneMat(self.lbridge.imgmsg_to_cv(lros, 'bgr8'))
        rcv = cv.CloneMat(self.rbridge.imgmsg_to_cv(rros, 'bgr8'))
        return lcv, rcv
示例#2
0
class ROSStereoListener:
    def __init__(self, topics, rate=30.0, name='stereo_listener'):
        self.listener = ru.GenericListener(name, [Image, Image], topics, rate)
        self.lbridge = CvBridge()
        self.rbridge = CvBridge()

    def next(self):
        lros, rros = self.listener.read(allow_duplication=False,
                                        willing_to_wait=True,
                                        warn=False,
                                        quiet=True)
        lcv = cv.CloneMat(self.lbridge.imgmsg_to_cv(lros, 'bgr8'))
        rcv = cv.CloneMat(self.rbridge.imgmsg_to_cv(rros, 'bgr8'))
        return lcv, rcv
示例#3
0
class ROSImageClient:
    def __init__(self, topic_name):
        self.bridge = CvBridge()
        def message_extractor(ros_img):
            try:
                cv_image = self.bridge.imgmsg_to_cv(ros_img, 'bgr8')
                return cv_image, ros_img
            except CvBridgeError, e:
                return None
        self.listener = ru.GenericListener('ROSImageClient', Image, topic_name, 
                                           .1, message_extractor)
示例#4
0
def find_image_features(bagname, topic):
    features_list = []
    bridge = CvBridge()
    i = 0
    for topic, msg, t in ru.bag_iter(bagname, [topic]):
        t = msg.header.stamp.to_time()
        image = bridge.imgmsg_to_cv(msg, 'bgr8')
        image_gray = fea.grayscale(image)
        surf_keypoints, surf_descriptors = fea.surf(image_gray)
        features_list.append((t, (surf_keypoints, surf_descriptors)))
        rospy.loginfo("%.3f frame %d found %d points" % (t, i, len(surf_keypoints)))
        i = i + 1
    return features_list
示例#5
0
class ROSImageClient:
    def __init__(self, topic_name):
        self.bridge = CvBridge()

        def message_extractor(ros_img):
            try:
                cv_image = self.bridge.imgmsg_to_cv(ros_img, 'bgr8')
                return cv_image, ros_img
            except CvBridgeError, e:
                return None

        self.listener = ru.GenericListener('ROSImageClient', Image, topic_name,
                                           .1, message_extractor)
示例#6
0
文件: test03.py 项目: gt-ros-pkg/hrl
def find_contact_images(bag_name, contact_times, all_times, topic_name):
    print 'finding closest images for ', topic_name
    times_tree = sp.KDTree(np.matrix(all_times).T)
    closest_times = [all_times[times_tree.query([a_time])[1]] for a_time in contact_times]
    pdb.set_trace()
    
    print 'getting & saving images, expecting', len(set(closest_times)), 'images'
    bridge = CvBridge()
    cleaned_topic_name = topic_name.replace('/', '')
    i = 0
    for ros_msg in get_closest_msgs(bag_name, [topic_name], closest_times):
        i = i + 1
        msg_time = ros_msg.header.stamp.to_time() - all_times[0]
        cv_image = bridge.imgmsg_to_cv(ros_msg, 'bgr8')
        img_name = "%s_%.3f_touched.png" % (cleaned_topic_name, msg_time)
        print 'writing', img_name
        cv.SaveImage(img_name, cv_image)
    print 'got', i, 'images'
示例#7
0
文件: test03.py 项目: wklharry/hrl
def find_contact_images(bag_name, contact_times, all_times, topic_name):
    print 'finding closest images for ', topic_name
    times_tree = sp.KDTree(np.matrix(all_times).T)
    closest_times = [
        all_times[times_tree.query([a_time])[1]] for a_time in contact_times
    ]
    pdb.set_trace()

    print 'getting & saving images, expecting', len(
        set(closest_times)), 'images'
    bridge = CvBridge()
    cleaned_topic_name = topic_name.replace('/', '')
    i = 0
    for ros_msg in get_closest_msgs(bag_name, [topic_name], closest_times):
        i = i + 1
        msg_time = ros_msg.header.stamp.to_time() - all_times[0]
        cv_image = bridge.imgmsg_to_cv(ros_msg, 'bgr8')
        img_name = "%s_%.3f_touched.png" % (cleaned_topic_name, msg_time)
        print 'writing', img_name
        cv.SaveImage(img_name, cv_image)
    print 'got', i, 'images'
示例#8
0
class ThreePointTracker_Synchronizer:
    """
    Synchronization node for all three point tracker in a given tracking region.
    """
    def __init__(self, region, max_seq_age=200):
        self.lock = threading.Lock()

        self.region = region
        regions_dict = file_tools.read_tracking_2d_regions()
        self.camera_list = regions_dict[region]
        self.camera_list.sort()
        self.create_camera_to_tracking_dict()
        self.latest_seq = None
        self.max_seq_age = max_seq_age
        self.tracking_pts_pool = {}

        self.tracking_pts_roi_size = rospy.get_param(
            '/three_point_tracker_params/roi_size', (150, 150))

        # Color and font for tracking points image
        self.magenta = (255, 255, 0)
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,
                                        0.6,
                                        0.6,
                                        thickness=0)

        # Font for PIL tracking info image
        self.info_image_size = (180, 100)
        self.font = PILImageFont.truetype(
            "/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf", 16)

        # Get transforms from cameras to tracking and stitched image planes
        self.tf2d = transform_2d.Transform2d()

        self.bridge = CvBridge()
        self.ready = False
        rospy.init_node('three_point_tracker_synchronizer',
                        log_level=rospy.DEBUG)

        # Subscribe to raw tracking pts topics
        self.tracking_pts_sub = {}
        for camera, topic in self.camera_to_tracking.iteritems():
            handler = functools.partial(self.tracking_pts_handler, camera)
            self.tracking_pts_sub[camera] = rospy.Subscriber(
                topic, ThreePointTrackerRaw, handler)

        # Create publishers
        self.tracking_pts_pub = rospy.Publisher('tracking_pts',
                                                ThreePointTracker)
        self.image_tracking_pts = None
        self.image_tracking_pts_pub = rospy.Publisher('image_tracking_pts',
                                                      Image)
        self.image_tracking_info_pub = rospy.Publisher('image_tracking_info',
                                                       Image)

        # Setup rand sync service
        rospy.wait_for_service('/get_rand_sync_signal')
        self.rand_sync_srv = rospy.ServiceProxy('/get_rand_sync_signal',
                                                GetRandSyncSignal)

        # Setup reset service - needs to be called anytime the camera trigger is
        # stopped - before it is restarted. Empties buffers of images and sequences.
        self.reset_srv = rospy.Service('reset_tracker_synchronizer', Empty,
                                       self.reset_handler)
        self.ready = True

    def create_camera_to_tracking_dict(self):
        """
        Creates a dictionary relating the cameras in the tracking region to their
        corresponding three point tracker nodes.
        """
        self.camera_to_tracking = {}
        for camera in self.camera_list:
            camera_fullpath_topic = mct_introspection.get_camera_fullpath_topic(
                camera)
            tracking_pts_topic = '{0}/tracking_pts'.format(
                camera_fullpath_topic)
            self.camera_to_tracking[camera] = tracking_pts_topic

    def reset_handler(self, req):
        """
        Reset service handler. Empties the tracking_pts_pool.
        """
        with self.lock:
            self.latest_seq = None
            self.tracking_pts_pool = {}
        return EmptyResponse()

    def tracking_pts_handler(self, camera, msg):
        """
        Handler for messages from the individual tracker nodes. Sticks the tracking
        point data into a dictionary by sequence number and camera.
        """
        if not self.ready:
            return
        with self.lock:
            self.latest_seq = msg.data.seq
            try:
                self.tracking_pts_pool[msg.data.seq][camera] = msg
            except KeyError:
                self.tracking_pts_pool[msg.data.seq] = {camera: msg}

    def process_tracking_pts(self, tracking_pts_dict):
        """
        Determines whether the object has been found in any of the three point
        trackers for the region. If is has been found than best tracking point
        data is selected in a winner takes all fashion. The best tracking point
        data is that which is nearest to the center of the image on camera upon
        which is was captured. 
        """

        # Get time stamp (secs, nsecs) - always from the same camera to avoid jumps due to
        # possible system clock differences.
        time_camera = self.camera_list[0]
        time_camera_secs = tracking_pts_dict[time_camera].data.secs
        time_camera_nsecs = tracking_pts_dict[time_camera].data.nsecs

        # Get list of messages in which the object was found
        found_list = [
            msg for msg in tracking_pts_dict.values() if msg.data.found
        ]
        tracking_pts_msg = ThreePointTracker()

        if found_list:

            # Object found - select object with largest ROI or if the ROIs are of equal size
            # select the object whose distance to center of the # image is the smallest
            found_list.sort(cmp=tracking_pts_sort_cmp)
            best = found_list[0]
            camera = best.data.camera

            # Get coordintates of points in tracking and stitching planes
            best_points_array = numpy.array([(p.x, p.y)
                                             for p in best.data.points])
            pts_anchor_plane = self.tf2d.camera_pts_to_anchor_plane(
                camera, best_points_array)
            pts_stitching_plane = self.tf2d.camera_pts_to_stitching_plane(
                camera, best_points_array)
            pts_anchor_plane = [
                Point2d(p[0], p[1]) for p in list(pts_anchor_plane)
            ]
            pts_stitching_plane = [
                Point2d(p[0], p[1]) for p in list(pts_stitching_plane)
            ]

            # Get orientation angle and mid point of object in anchor and stitching planes
            angle = get_angle(pts_anchor_plane)
            midpt_anchor_plane = get_midpoint(pts_anchor_plane)
            midpt_stitching_plane = get_midpoint(pts_stitching_plane)
            #self.camera_fail = max([(err,cam) for cam, err in self.max_error_by_camera.items()])[1]

            # Get size of tracking points image in the anchor (tracking) plane
            roi = best.data.roi
            x0, x1 = roi[0], roi[0] + roi[2]
            y0, y1 = roi[1], roi[1] + roi[3]
            bndry_camera = [(x0, y0), (x1, y0), (x1, y1), (x0, y1)]
            bndry_camera_array = numpy.array(bndry_camera)
            bndry_anchor = self.tf2d.camera_pts_to_anchor_plane(
                camera, bndry_camera_array)
            bndry_stitching = self.tf2d.camera_pts_to_stitching_plane(
                camera, bndry_camera_array)
            bndry_anchor = [tuple(x) for x in list(bndry_anchor)]
            bndry_stitching = [tuple(x) for x in list(bndry_stitching)]
            dx1 = abs(bndry_anchor[1][0] - bndry_anchor[0][0])
            dx2 = abs(bndry_anchor[3][0] - bndry_anchor[2][0])
            dy1 = abs(bndry_anchor[2][1] - bndry_anchor[1][1])
            dy2 = abs(bndry_anchor[3][1] - bndry_anchor[0][1])
            dx_max = max([dx1, dx2])
            dy_max = max([dy1, dy2])
            dim_max = max([dx_max, dy_max])

            # Convert tracking points image to opencv image.
            image_tracking_pts = self.bridge.imgmsg_to_cv(
                best.image, desired_encoding="passthrough")
            image_tracking_pts = cv.GetImage(image_tracking_pts)
            image_size = cv.GetSize(image_tracking_pts)
            image_dim_max = max(image_size)

            # Get matrix for homography from camera to  anchor plane
            tf_matrix = self.tf2d.get_camera_to_anchor_plane_tf(camera)

            # Shift for local ROI
            tf_shift = numpy.array([
                [1.0, 0.0, roi[0]],
                [0.0, 1.0, roi[1]],
                [0.0, 0.0, 1.0],
            ])
            tf_matrix = numpy.dot(tf_matrix, tf_shift)

            # Get scaling factor
            shift_x = -min([x for x, y in bndry_anchor])
            shift_y = -min([y for x, y in bndry_anchor])
            scale_factor = float(image_dim_max) / dim_max

            # Scale and shift transform so that homography maps the tracking points
            # sub-image into a image_size image starting at coord. 0,0
            tf_shift_and_scale = numpy.array([
                [scale_factor, 0.0, scale_factor * shift_x],
                [0.0, scale_factor, scale_factor * shift_y],
                [0.0, 0.0, 1.0],
            ])
            tf_matrix = numpy.dot(tf_shift_and_scale, tf_matrix)

            # Warp image using homography
            image_tracking_pts_mod = cv.CreateImage(
                image_size, image_tracking_pts.depth,
                image_tracking_pts.channels)
            cv.WarpPerspective(
                image_tracking_pts,
                image_tracking_pts_mod,
                cv.fromarray(tf_matrix),
                cv.CV_INTER_LINEAR | cv.CV_WARP_FILL_OUTLIERS,
            )

            # Add sequence number to image
            message = '{0}'.format(best.data.seq)
            cv.PutText(image_tracking_pts_mod, message, (2, 15),
                       self.cv_text_font, self.magenta)
            self.image_tracking_pts = self.bridge.cv_to_imgmsg(
                image_tracking_pts_mod, encoding="passthrough")

            # Create tracking points message
            tracking_pts_msg.seq = best.data.seq
            tracking_pts_msg.secs = time_camera_secs
            tracking_pts_msg.nsecs = time_camera_nsecs
            tracking_pts_msg.camera = camera
            tracking_pts_msg.found = True
            tracking_pts_msg.angle = angle
            tracking_pts_msg.angle_deg = (180.0 * angle) / math.pi
            tracking_pts_msg.midpt_anchor_plane = midpt_anchor_plane
            tracking_pts_msg.midpt_stitching_plane = midpt_stitching_plane
            tracking_pts_msg.pts_anchor_plane = pts_anchor_plane
            tracking_pts_msg.pts_stitching_plane = pts_stitching_plane
            tracking_pts_msg.bndry_anchor_plane = [
                Point2d(x, y) for x, y in bndry_anchor
            ]
            tracking_pts_msg.bndry_stitching_plane = [
                Point2d(x, y) for x, y in bndry_stitching
            ]
        else:
            sample = tracking_pts_dict.values()[0]
            tracking_pts_msg.seq = sample.data.seq
            tracking_pts_msg.secs = time_camera_secs
            tracking_pts_msg.nsecs = time_camera_nsecs
            tracking_pts_msg.camera = ''
            tracking_pts_msg.found = False

        # Create tracking info image
        pil_info_image = PILImage.new('RGB', self.info_image_size,
                                      (255, 255, 255))
        draw = PILImageDraw.Draw(pil_info_image)
        text_list = []

        label = 'Found:'
        value = '{0}'.format(tracking_pts_msg.found)
        unit = ''
        text_list.append((label, value, unit))

        label = 'Angle:'
        value = '{0:+3.1f}'.format(180.0 * tracking_pts_msg.angle / math.pi)
        unit = 'deg'
        text_list.append((label, value, unit))

        label = 'Pos X:'
        value = '{0:+1.3f}'.format(tracking_pts_msg.midpt_anchor_plane.x)
        unit = 'm'
        text_list.append((label, value, unit))
        #self.camera_fail = max([(err,cam) for cam, err in self.max_error_by_camera.items()])[1]

        label = 'Pos Y:'
        value = '{0:+1.3f}'.format(tracking_pts_msg.midpt_anchor_plane.y)
        unit = 'm'
        text_list.append((label, value, unit))

        for i, text in enumerate(text_list):
            label, value, unit = text
            draw.text((10, 10 + 20 * i), label, font=self.font, fill=(0, 0, 0))
            draw.text((70, 10 + 20 * i), value, font=self.font, fill=(0, 0, 0))
            draw.text((125, 10 + 20 * i), unit, font=self.font, fill=(0, 0, 0))
        cv_info_image = cv.CreateImageHeader(pil_info_image.size,
                                             cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_info_image, pil_info_image.tostring())

        # Convert to a rosimage and publish
        info_rosimage = self.bridge.cv_to_imgmsg(cv_info_image, 'rgb8')
        self.image_tracking_info_pub.publish(info_rosimage)

        # Get sync signal
        sync_rsp = self.rand_sync_srv(tracking_pts_msg.seq)
        if sync_rsp.status:
            tracking_pts_msg.sync_signal = sync_rsp.signal
        else:
            tracking_pts_msg.sync_signal = [0, 0, 0]

        # Publish messages
        self.tracking_pts_pub.publish(tracking_pts_msg)
        if self.image_tracking_pts is not None:
            self.image_tracking_pts_pub.publish(self.image_tracking_pts)
        else:
            zero_image_cv = cv.CreateImage(self.tracking_pts_roi_size,
                                           cv.IPL_DEPTH_8U, 3)
            cv.Zero(zero_image_cv)
            zero_image_ros = self.bridge.cv_to_imgmsg(zero_image_cv,
                                                      encoding="passthrough")
            self.image_tracking_pts_pub.publish(zero_image_ros)

    def run(self):
        """
        Node main loop - consolidates the tracking points messages by
        acquisition sequence number and proccess them by passing them to the
        process_tracking_pts function.
        """
        while not rospy.is_shutdown():

            with self.lock:

                #t0 = time.time()
                #print('len pool:', len(self.tracking_pts_pool))
                for seq, tracking_pts_dict in sorted(
                        self.tracking_pts_pool.items()):
                    #print(' ', seq)

                    # Check if we have all the tracking data for the given sequence number
                    if len(tracking_pts_dict) == len(self.camera_list):
                        self.process_tracking_pts(tracking_pts_dict)
                        del self.tracking_pts_pool[seq]

                    # Throw away tracking data greater than the maximum allowed age
                    if self.latest_seq - seq > self.max_seq_age:
                        #print('Thowing away: ', seq)
                        del self.tracking_pts_pool[seq]
示例#9
0
class ThreePointTracker(object):
    """
    Three point object tracker. Looks for objects in the image stream with
    three bright unequally spaced points.
    """
    def __init__(self, topic=None, max_stamp_age=1.5):

        self.lock = threading.Lock()
        self.ready = False
        self.topic = topic
        self.tracking_pts_roi = None
        self.tracking_pts_roi_src = None
        self.tracking_pts_roi_dst = None
        self.seq_to_data = {}

        self.topic_type = mct_introspection.get_topic_type(self.topic)
        self.bridge = CvBridge()
        self.camera = get_camera_from_topic(self.topic)
        self.camera_info_topic = get_camera_info_from_image_topic(self.topic)
        rospy.init_node('blob_finder')

        # Get parameters from the parameter server
        params_ns = 'three_point_tracker_params'
        self.blobFinder = BlobFinder()
        self.tracking_pts_roi_size = rospy.get_param(
            '/{0}/roi_size'.format(params_ns),
            (200, 200),
        )
        self.blobFinder.threshold = rospy.get_param(
            '/{0}/threshold'.format(params_ns),
            200,
        )
        self.blobFinder.filter_by_area = rospy.get_param(
            '/{0}/filter_by_area'.format(params_ns), False)
        self.blobFinder.min_area = rospy.get_param(
            '/{0}/min_area'.format(params_ns),
            0,
        )
        self.blobFinder.max_area = rospy.get_param(
            '/{0}/max_area'.format(params_ns),
            200,
        )
        self.tracking_pts_spacing = rospy.get_param(
            '/{0}/pts_spacing'.format(params_ns),
            (0.0, 0.04774, 0.07019),
        )
        self.tracking_pts_colors = [(0, 0, 255), (0, 255, 0), (0, 255, 255)]

        # Determine target point spacing
        d0 = self.tracking_pts_spacing[1] - self.tracking_pts_spacing[0]
        d1 = self.tracking_pts_spacing[2] - self.tracking_pts_spacing[1]
        if d0 > d1:
            self.large_step_first = True
        else:
            self.large_step_first = False

        # Set up subscribers based on topic type.
        if self.topic_type == 'sensor_msgs/Image':

            # Data dictionareis for synchronizing tracking data with image seq number
            self.max_stamp_age = max_stamp_age
            self.latest_stamp = None
            self.stamp_to_seq = {}
            self.stamp_to_data = {}

            # Subscribe to image and camera info topics
            self.image_sub = rospy.Subscriber(self.topic, Image,
                                              self.image_callback)
            self.info_sub = rospy.Subscriber(self.camera_info_topic,
                                             CameraInfo,
                                             self.camera_info_callback)

        elif self.topic_type == 'mct_msg_and_srv/SeqAndImage':

            self.seq_and_image_sub = rospy.Subscriber(
                self.topic, SeqAndImage, self.seq_and_image_callback)

        else:
            err_msg = 'unable to handle topic type: {0}'.format(
                self.topic_type)
            raise ValueError, err_msg

        # Create tracking point and tracking points image publications
        self.tracking_pts_pub = rospy.Publisher('tracking_pts',
                                                ThreePointTrackerRaw)
        self.image_tracking_pts_pub = rospy.Publisher('image_tracking_pts',
                                                      Image)

        # Set up services for getting and setting the tracking parameters.
        node_name = rospy.get_name()
        self.set_param_srv = rospy.Service('{0}/set_param'.format(node_name),
                                           BlobFinderSetParam,
                                           self.handle_set_param_srv)
        self.get_param_srv = rospy.Service('{0}/get_param'.format(node_name),
                                           BlobFinderGetParam,
                                           self.handle_get_param_srv)
        self.ready = True

    def handle_set_param_srv(self, req):
        """
        Handles requests to set the blob finder's parameters. Currently this
        is just the threshold used for binarizing the image.
        """
        with self.lock:
            self.blobFinder.threshold = req.threshold
            self.blobFinder.filter_by_area = req.filter_by_area
            self.blobFinder.min_area = req.min_area
            self.blobFinder.max_area = req.max_area
        return BlobFinderSetParamResponse(True, '')

    def handle_get_param_srv(self, req):
        """
        Handles requests for the blob finders parameters
        """
        with self.lock:
            threshold = self.blobFinder.threshold
            filter_by_area = self.blobFinder.filter_by_area
            min_area = self.blobFinder.min_area
            max_area = self.blobFinder.max_area
        resp_args = (threshold, filter_by_area, min_area, smax_area)
        return BlobFinderGetParamResponse(*resp_args)

    def camera_info_callback(self, camera_info):
        """
        Callback for camera info topic subscription - used to get the image seq
        number when subscribing to sensor_msgs/Images topics as the seq numbers
        in the Image headers are not reliable.

        Note, only used when subscription is of type sensor_msgs/Image.
        """
        stamp_tuple = camera_info.header.stamp.secs, camera_info.header.stamp.nsecs
        with self.lock:
            self.latest_stamp = stamp_tuple
            self.stamp_to_seq[stamp_tuple] = camera_info.header.seq

    def image_callback(self, image):
        """
        Callback for image topic subscription - gets images and finds the
        tracking points. Finds tracking data in  

        Note, only used when subscription is of type sensor_msgs/Image. 
        """
        if not self.ready:
            return
        with self.lock:
            blobs_list = self.blobFinder.findBlobs(image, create_image=False)
        tracking_data = self.get_tracking_data(blobs_list, image)
        with self.lock:
            self.stamp_to_data[tracking_data['stamp']] = tracking_data

    def seq_and_image_callback(self, msg_data):
        """
        Callback for subscriptions of type mct_msg_and_srv/SeqAndImage type.
        Finds blobs in msg_data.image and extracts tracking data.
        """
        if not self.ready:
            return
        with self.lock:
            blobs_list = self.blobFinder.findBlobs(msg_data.image,
                                                   create_image=False)
        tracking_data = self.get_tracking_data(blobs_list, msg_data.image)
        with self.lock:
            self.seq_to_data[msg_data.seq] = tracking_data

    def get_tracking_data(self, blobs_list, rosimage):
        """
        Gets tracking data from list of blobs and raw image.
        """
        # Convert to opencv image
        cv_image = self.bridge.imgmsg_to_cv(rosimage,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)

        # Create tracking points  image
        image_tracking_pts = cv.CreateImage(self.tracking_pts_roi_size,
                                            cv.IPL_DEPTH_8U, 3)
        cv.Zero(image_tracking_pts)

        num_blobs = len(blobs_list)
        if num_blobs == 3:
            found = True
            uv_list = self.get_sorted_uv_points(blobs_list)
            self.tracking_pts_roi = self.get_tracking_pts_roi(
                uv_list, cv.GetSize(ipl_image))
            dist_to_image_center = self.get_dist_to_image_center(
                uv_list, cv.GetSize(ipl_image))
        else:
            found = False
            dist_to_image_center = 0.0
            uv_list = [(0, 0), (0, 0), (0, 0)]

        # Create tracking pts image using ROI around tracking points
        if self.tracking_pts_roi is not None:
            src_roi, dst_roi = truncate_roi(self.tracking_pts_roi,
                                            cv.GetSize(ipl_image))
            cv.SetImageROI(ipl_image, src_roi)
            cv.SetImageROI(image_tracking_pts, dst_roi)
            cv.CvtColor(ipl_image, image_tracking_pts, cv.CV_GRAY2BGR)
            cv.ResetImageROI(ipl_image)
            cv.ResetImageROI(image_tracking_pts)
            self.tracking_pts_roi_src = src_roi
            self.tracking_pts_roi_dst = dst_roi
            if found:
                for i, uv in enumerate(uv_list):
                    color = self.tracking_pts_colors[i]
                    u, v = uv
                    u = u - self.tracking_pts_roi[0]
                    v = v - self.tracking_pts_roi[1]
                    cv.Circle(image_tracking_pts, (int(u), int(v)), 3, color)

        # Convert tracking points image to rosimage and publish
        rosimage_tracking_pts = self.bridge.cv_to_imgmsg(
            image_tracking_pts, encoding="passthrough")
        self.image_tracking_pts_pub.publish(rosimage_tracking_pts)

        # If tracking points roi doesn't exist yet just send zeros
        if self.tracking_pts_roi is None:
            tracking_pts_roi = (0, 0, 0, 0)
            tracking_pts_roi_src = (0, 0, 0, 0)
            tracking_pts_roi_dst = (0, 0, 0, 0)
        else:
            tracking_pts_roi = self.tracking_pts_roi
            tracking_pts_roi_src = self.tracking_pts_roi_src
            tracking_pts_roi_dst = self.tracking_pts_roi_dst

        stamp_tuple = rosimage.header.stamp.secs, rosimage.header.stamp.nsecs

        tracking_data = {
            'found': found,
            'stamp': stamp_tuple,
            'tracking_pts': uv_list,
            'image_tracking_pts': rosimage_tracking_pts,
            'dist_to_image_center': dist_to_image_center,
            'tracking_pts_roi': tracking_pts_roi,
            'tracking_pts_roi_src': tracking_pts_roi_src,
            'tracking_pts_roi_dst': tracking_pts_roi_dst,
        }
        return tracking_data

    def get_dist_to_image_center(self, uv_list, img_size):
        """
        Computes the distance from the mid point of the tracking points to the
        middle of the image.
        """
        img_mid = 0.5 * img_size[0], 0.5 * img_size[1]
        pts_mid = get_midpoint_uv_list(uv_list)
        return distance_2d(pts_mid, img_mid)

    def get_tracking_pts_roi(self, uv_list, img_size, trunc=False):
        """
        Get the coordinates of region of interest about the tracking points
        """
        img_width, img_height = img_size
        roi_width, roi_height = self.tracking_pts_roi_size
        u_mid, v_mid = get_midpoint_uv_list(uv_list)
        u_min = int(math.floor(u_mid - roi_width / 2))
        v_min = int(math.floor(v_mid - roi_height / 2))
        u_max = u_min + roi_width
        v_max = v_min + roi_height
        return u_min, v_min, u_max - u_min, v_max - v_min

    def get_sorted_uv_points(self, blobs_list):
        """
        For blob lists with three blobs finds the identities of the blobs based on
        colinearity and distance between the points.
        """
        assert len(blobs_list) == 3, 'blobs list must contain only thre blobs'
        # Get x and y point lists
        u_list = [blob['centroid_x'] for blob in blobs_list]
        v_list = [blob['centroid_y'] for blob in blobs_list]

        # Determine x and y span
        u_min, u_max = min(u_list), max(u_list)
        v_min, v_max = min(v_list), max(v_list)
        u_span = u_max - u_min
        v_span = v_max - v_min

        # Use max span to sort points
        if u_span >= v_span:
            uv_list = zip(u_list, v_list)
            uv_list.sort()
            u_list = [u for u, v in uv_list]
            v_list = [v for u, v in uv_list]
        else:
            vu_list = zip(v_list, u_list)
            vu_list.sort()
            u_list = [u for v, u in vu_list]
            v_list = [v for v, u in vu_list]

        # Look at distances and sort so that either the large gap occurs first
        # or last based on the spacing data.
        uv_list = zip(u_list, v_list)
        dist_0_to_1 = distance_2d(uv_list[0], uv_list[1])
        dist_1_to_2 = distance_2d(uv_list[1], uv_list[2])
        if self.large_step_first:
            if dist_0_to_1 <= dist_1_to_2:
                uv_list.reverse()
        else:
            if dist_0_to_1 >= dist_1_to_2:
                uv_list.reverse()
        return uv_list

    def associate_data_w_seq(self):
        """
        Associate tracking data with acquisition sequence number.

        Note, this is only used when the subscription topic type is
        sensor_msgs/Image.
        """
        with self.lock:
            for stamp, data in self.stamp_to_data.items():
                try:
                    seq = self.stamp_to_seq[stamp]
                    seq_found = True
                except KeyError:
                    seq_found = False

                if seq_found:
                    self.seq_to_data[seq] = data
                    del self.stamp_to_data[stamp]
                    del self.stamp_to_seq[stamp]
                else:
                    # Throw away data greater than the maximum allowed age
                    if self.latest_stamp is not None:
                        latest_stamp_secs = stamp_tuple_to_secs(
                            self.latest_stamp)
                        stamp_secs = stamp_tuple_to_secs(stamp)
                        stamp_age = latest_stamp_secs - stamp_secs
                        if stamp_age > self.max_stamp_age:
                            del self.stamp_to_data[stamp]

    def publish_tracking_pts(self):
        """
        Creates tracking_pts message and publishes it. 
        """
        for seq, data in sorted(self.seq_to_data.items()):

            # Create list of tracking points
            tracking_pts = []
            for u, v in data['tracking_pts']:
                tracking_pts.append(Point2d(u, v))

            # Create the tracking points message and publish
            tracking_pts_msg = ThreePointTrackerRaw()
            tracking_pts_msg.data.seq = seq
            tracking_pts_msg.data.secs = data['stamp'][0]
            tracking_pts_msg.data.nsecs = data['stamp'][1]
            tracking_pts_msg.data.camera = self.camera
            tracking_pts_msg.data.found = data['found']
            tracking_pts_msg.data.distance = data['dist_to_image_center']
            tracking_pts_msg.data.roi = data['tracking_pts_roi']
            tracking_pts_msg.data.roi_src = data['tracking_pts_roi_src']
            tracking_pts_msg.data.roi_dst = data['tracking_pts_roi_dst']
            tracking_pts_msg.data.points = tracking_pts
            tracking_pts_msg.image = data['image_tracking_pts']
            self.tracking_pts_pub.publish(tracking_pts_msg)
            #print('publish', seq)

            # Remove data for this sequence number.
            del self.seq_to_data[seq]

    def run(self):
        """
        Main loop - associates tracking data and time stamps  w/ image sequence
        numbers and publishes the tracking data.
        """
        while not rospy.is_shutdown():
            if self.topic_type == 'sensor_msgs/Image':
                self.associate_data_w_seq()
            self.publish_tracking_pts()
示例#10
0
class ThreePointTracker(object):
    def __init__(self, topic=None):
        self.topic = topic
        self.lock = threading.Lock()
        self.bridge = CvBridge()
        self.camera = get_camera_from_topic(self.topic)
        camera_calibration = file_tools.read_camera_calibration(self.camera)
        self.camera_matrix = get_camera_matrix(camera_calibration)

        # Blob finder parameters
        self.blobFinder = BlobFinder()
        self.blobFinder.threshold = 150
        self.blobFinder.filter_by_area = False
        self.blobFinder.min_area = 0
        self.blobFinder.max_area = 200

        # Tracking parameters
        self.tracking_pts_dist = (0.0, 0.04774, 0.07019)
        self.tracking_pts_colors = [(0, 0, 255), (0, 255, 0), (0, 255, 255)]

        rospy.init_node('blob_finder')
        self.ready = False
        self.image_sub = rospy.Subscriber(self.topic, Image,
                                          self.image_callback)

        self.image_tracking_pts_pub = rospy.Publisher('image_tracking_pts',
                                                      Image)
        self.image_blobs_pub = rospy.Publisher('image_blobs', Image)
        self.blob_data_pub = rospy.Publisher('blob_data', BlobData)

        self.devel_pub = rospy.Publisher('devel_data', Float32)

        node_name = rospy.get_name()
        self.set_param_srv = rospy.Service('{0}/set_param'.format(node_name),
                                           BlobFinderSetParam,
                                           self.handle_set_param_srv)
        self.get_param_srv = rospy.Service('{0}/get_param'.format(node_name),
                                           BlobFinderGetParam,
                                           self.handle_get_param_srv)
        self.ready = True

    def handle_set_param_srv(self, req):
        """
        Handles requests to set the blob finder's parameters. Currently this
        is just the threshold used for binarizing the image.
        """
        with self.lock:
            self.blobFinder.threshold = req.threshold
            self.blobFinder.filter_by_area = req.filter_by_area
            self.blobFinder.min_area = req.min_area
            self.blobFinder.max_area = req.max_area
        return BlobFinderSetParamResponse(True, '')

    def handle_get_param_srv(self, req):
        """
        Handles requests for the blob finders parameters
        """
        with self.lock:
            threshold = self.blobFinder.threshold
            filter_by_area = self.blobFinder.filter_by_area
            min_area = self.blobFinder.min_area
            max_area = self.blobFinder.max_area
        resp_args = (threshold, filter_by_area, min_area, max_area)
        return BlobFinderGetParamResponse(*resp_args)

    def image_callback(self, data):
        """
        Callback for image topic subscription - finds blobs in image.
        """
        if not self.ready:
            return

        with self.lock:
            blobs_list, blobs_rosimage = self.blobFinder.findBlobs(data)

        # ---------------------------------------------------------------------

        # Create trakcing points  image
        cv_image = self.bridge.imgmsg_to_cv(data,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)
        tracking_pts_image = cv.CreateImage(cv.GetSize(ipl_image),
                                            cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(ipl_image, tracking_pts_image, cv.CV_GRAY2BGR)

        #blobs_list = get_synthetic_blobs(self.tracking_pts_dist, self.camera_matrix)
        num_blobs = len(blobs_list)
        if num_blobs == 3:

            #print('3 pts found')
            uv_list = self.get_sorted_uv_points(blobs_list)
            t, w = get_object_pose(1.0, 2.2, uv_list, self.tracking_pts_dist,
                                   self.camera_matrix)
            uv_list_pred = get_uv_list_from_vects(t, w, self.tracking_pts_dist,
                                                  self.camera_matrix)
            elev = math.asin(w[2])
            head = math.atan2(w[1], w[0])
            elev_deg = 180.0 * elev / math.pi
            head_deg = 180.0 * head / math.pi

            #print('w', ['{0:1.3f}'.format(x) for x in w])
            #print('t', ['{0:1.3f}'.format(x) for x in t])
            print('head: {0:1.3f}'.format(head_deg))
            print('elev: {0:1.3f}'.format(elev_deg))
            print('tx:   {0:1.3f}'.format(t[0]))
            print('ty:   {0:1.3f}'.format(t[1]))
            print('tz:   {0:1.3f}'.format(t[2]))
            print()
            self.devel_pub.publish(t[2])

            # Draw points on tracking points image
            for i, uv in enumerate(uv_list):
                color = self.tracking_pts_colors[i]
                u, v = uv
                #cv.Circle(tracking_pts_image, (int(u),int(v)),3, color)
                cv.Circle(tracking_pts_image, (int(u), int(v)), 3, (0, 255, 0))

            for i, uv in enumerate(uv_list_pred):
                color = self.tracking_pts_colors[i]
                u, v = uv
                #cv.Circle(tracking_pts_image, (int(u),int(v)),3, color)
                cv.Circle(tracking_pts_image, (int(u), int(v)), 3, (0, 0, 255))
        else:
            print('3 pts not found ', end='')
            if num_blobs > 3:
                print('too many blobs')
            else:
                print('too few blobs')

        # Publish calibration progress image
        ros_image = self.bridge.cv_to_imgmsg(tracking_pts_image,
                                             encoding="passthrough")
        self.image_tracking_pts_pub.publish(ros_image)

        # ---------------------------------------------------------------------

        ## Publish image of blobs
        #self.image_blobs_pub.publish(blobs_rosimage)

        ## Create the blob data message and publish
        #blob_data_msg = BlobData()
        #blob_data_msg.header = data.header
        #blob_data_msg.number_of_blobs = len(blobs_list)
        #for b in blobs_list:
        #    blob_msg = Blob()
        #    for k, v in b.iteritems():
        #        setattr(blob_msg,k,v)
        #    blob_data_msg.blob.append(blob_msg)
        #self.blob_data_pub.publish(blob_data_msg)

    def get_object_pose(self, uv_list):
        """
        Calculates pose of three point object
        """
        pass

    def get_sorted_uv_points(self, blobs_list):
        """
        For blob lists with three blobs finds the identities of the blobs based on
        colinearity and distance between the points.
        """
        assert len(blobs_list) == 3, 'blobs list must contain only thre blobs'
        # Get x and y point lists
        u_list = [blob['centroid_x'] for blob in blobs_list]
        v_list = [blob['centroid_y'] for blob in blobs_list]

        # Determine x and y span
        u_min, u_max = min(u_list), max(u_list)
        v_min, v_max = min(v_list), max(v_list)
        u_span = u_max - u_min
        v_span = v_max - v_min

        # Use max span to sort points
        if u_span >= v_span:
            uv_list = zip(u_list, v_list)
            uv_list.sort()
            u_list = [u for u, v in uv_list]
            v_list = [v for u, v in uv_list]
        else:
            vu_list = zip(v_list, u_list)
            vu_list.sort()
            u_list = [u for v, u in vu_list]
            v_list = [v for v, u in vu_list]

        # #####################################################################
        # Look at distances and sort so that the large gap occurs first
        # Note currently assuming the largest gap occurs first this should
        # really come from the tracking_pts_pos
        # #####################################################################
        uv_list = zip(u_list, v_list)
        dist_0_to_1 = distance_2d(uv_list[0], uv_list[1])
        dist_1_to_2 = distance_2d(uv_list[1], uv_list[2])
        if dist_0_to_1 <= dist_1_to_2:
            uv_list.reverse()
        return uv_list

    def run(self):
        rospy.spin()
示例#11
0
class AVI_Writer(object):
    def __init__(self, topic, frame_rate=30.0):

        self.topic = topic
        self.frame_rate = frame_rate
        self.start_t = 0.0
        self.current_t = 0.0
        self.progress_t = 0.0
        self.frame_count = 0
        self.recording_message = 'stopped'

        self.writer = None
        self.done = True
        self.cv_image_size = None
        self.filename = os.path.join(os.environ['HOME'], 'default.avi')

        self.lock = threading.Lock()
        self.bridge = CvBridge()
        rospy.init_node('avi_writer')
        self.node_name = rospy.get_name()

        # Set up publications
        self.progress_msg = RecordingProgressMsg()
        self.progress_pub = rospy.Publisher('progress', RecordingProgressMsg)

        # Subscribe to messages
        self.image_sub = rospy.Subscriber(self.topic, Image,
                                          self.image_handler)

        # Set up services
        self.recording_srv = rospy.Service(
            '{0}/recording_cmd'.format(self.node_name), RecordingCmd,
            self.handle_recording_cmd)

    def run(self):
        rospy.spin()

    def handle_recording_cmd(self, req):
        """
        Handles avi recording commands - starts and stops avi recording.
        """
        with self.lock:

            if self.cv_image_size is None:
                # If we don't have and image yet - we can't get started, return fail.
                return RecordingCmdResponse(False)

            self.filename = req.filename
            self.frame_rate = req.frame_rate
            command = req.command.lower()

            if command == 'start':
                # Get start time and create video writer
                self.writer = cv.CreateVideoWriter(
                    self.filename,
                    cv.CV_FOURCC('D', 'I', 'V', 'X'),
                    self.frame_rate,
                    self.cv_image_size,
                )

                if self.writer is None:
                    response = False
                else:
                    response = True
                    self.start_t = rospy.get_time()
                    self.frame_count = 0
                    self.done = False
                    self.recording_message = 'recording'

            elif command == 'stop':
                self.done = True
                del self.writer
                self.writer = None
                self.recording_message = 'stopped'
                response = True

            return RecordingCmdResponse(response)

    def image_handler(self, data):
        """
        Writes frames to avi file.
        """
        self.current_t = rospy.get_time()

        # Convert to opencv image and then to ipl_image
        cv_image = self.bridge.imgmsg_to_cv(data, 'bgr8')
        ipl_image = cv.GetImage(cv_image)

        with self.lock:
            if self.cv_image_size == None:
                self.cv_image_size = cv.GetSize(cv_image)

        if not self.done:

            # Write video frame
            cv.WriteFrame(self.writer, ipl_image)

            # Update times and frame count - these are used elsewhere so we
            # need the lock
            with self.lock:
                self.frame_count += 1
                self.progress_t = self.current_t - self.start_t

        # Publish progress message
        with self.lock:
            self.progress_msg.frame_count = self.frame_count
            self.progress_msg.progress_t = self.progress_t
            self.progress_msg.recording_message = self.recording_message
        self.progress_pub.publish(self.progress_msg)
示例#12
0
class StitchedImageLabeler(object):
    def __init__(self,
                 stitched_image_topic,
                 tracking_pts_topic,
                 max_seq_age=150):
        self.lock = threading.Lock()
        self.bridge = CvBridge()
        self.stitching_params = file_tools.read_tracking_2d_stitching_params()
        self.stitched_image_topic = stitched_image_topic
        self.tracking_pts_topic = tracking_pts_topic
        self.max_seq_age = max_seq_age
        self.latest_seq = None
        self.seq_to_stitched_image = {}
        self.seq_to_tracking_pts = {}
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,
                                        0.8,
                                        0.8,
                                        thickness=1)
        self.magenta = (255, 255, 0)
        self.tracking_pts_colors = [(0, 0, 255), (0, 255, 0), (0, 255, 255)]
        self.labeled_image = None

        self.ready = False
        rospy.init_node('stitched_image_labeler')

        # Subscribe to stitched image and tracking pts topics
        self.image_sub = rospy.Subscriber(self.stitched_image_topic,
                                          SeqAndImage,
                                          self.stitched_image_handler)
        self.tracking_pts_sub = rospy.Subscriber(self.tracking_pts_topic,
                                                 ThreePointTracker,
                                                 self.tracking_pts_handler)

        # Create labeled image publication
        self.labeled_image_pub = rospy.Publisher('image_stitched_labeled',
                                                 Image)

        self.ready = True

    def stitched_image_handler(self, data):
        """
        Callback for handling the sequence and stitched image topics.
        """

        if not self.ready:
            return

        with self.lock:
            self.latest_seq = data.seq
            self.seq_to_stitched_image[data.seq] = data.image

    def tracking_pts_handler(self, data):
        """
        Callback for handling the tracking point data from the tracker
        synchronizer.
        """

        if not self.ready:
            return

        with self.lock:
            if data.seq % self.stitching_params['frame_skip'] == 0:
                self.latest_seq = data.seq
                self.seq_to_tracking_pts[data.seq] = data

    def create_labeled_image(self, ros_image, tracking_pts):
        """
        Create labeled version of stitched image using the tracking points data
        """

        # Convert stitched image from rosimage to opencv image.
        cv_image = self.bridge.imgmsg_to_cv(ros_image,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)

        # Create color version of stitched image.
        if self.labeled_image is None:
            labeled_image = cv.CreateImage(cv.GetSize(ipl_image),
                                           cv.IPL_DEPTH_8U, 3)
            cv.Zero(labeled_image)
        cv.CvtColor(ipl_image, labeled_image, cv.CV_GRAY2BGR)

        # Write sequence number on image
        message = '{0}'.format(tracking_pts.seq)
        cv.PutText(labeled_image, message, (10, 25), self.cv_text_font,
                   self.magenta)
        if tracking_pts.found:
            # Draw boundry box around tracked object
            p_list = [(int(pt.x), int(pt.y))
                      for pt in tracking_pts.bndry_stitching_plane]
            q_list = p_list[1:] + [p_list[0]]
            for p, q in zip(p_list, q_list):
                cv.Line(labeled_image, p, q, self.magenta)
            # Draw circles around tracked object points
            for pt, color in zip(tracking_pts.pts_stitching_plane,
                                 self.tracking_pts_colors):
                cv.Circle(labeled_image, (int(pt.x), int(pt.y)), 3, color)

        # Convert labeled image to ros image and publish
        labeled_rosimage = self.bridge.cv_to_imgmsg(labeled_image,
                                                    encoding="passthrough")
        self.labeled_image_pub.publish(labeled_rosimage)

        # DEBUG ###################################################
        #self.labeled_image_pub.publish(ros_image)
        ###########################################################

    def run(self):
        """
        Node main loop. Pairs up the tracking points data with the stitched
        images by sequence number and passes them to the create labeled image
        function. Also, cleans out any old data from the seq_to_xxx dictionaries.
        """
        while not rospy.is_shutdown():
            with self.lock:

                # Match up tracking points data with stitched images using sequence number
                for seq, tracking_pts in sorted(
                        self.seq_to_tracking_pts.items()):
                    try:
                        image = self.seq_to_stitched_image[seq]
                        found = True
                    except KeyError:
                        found = False

                    if found:
                        # Reomve items from  storage dictionaries
                        del self.seq_to_stitched_image[seq]
                        del self.seq_to_tracking_pts[seq]
                        self.create_labeled_image(image, tracking_pts)
                    else:
                        # Remove any elements seq_to_tracking_pts dict older than maximum allowed age
                        seq_age = self.latest_seq - seq
                        if seq_age > self.max_seq_age:
                            del self.seq_to_tracking_pts[seq]

                # Remove and elements form seq_to_stitched_image dict older than maximum allowed age
                for seq in self.seq_to_stitched_image.keys():
                    seq_age = self.latest_seq - seq
                    if seq_age > self.max_seq_age:
                        del self.seq_to_stitched_image[seq]
示例#13
0
import roslib; roslib.load_manifest('hai_sandbox')
from cv_bridge.cv_bridge import CvBridge, CvBridgeError
import cv
import sys
import hrl_lib.rutils as ru
import hai_sandbox.features as fea

forearm_cam_l = '/l_forearm_cam/image_rect_color'
ws_l = '/wide_stereo/left/image_rect_color'
ws_r = '/wide_stereo/right/image_rect_color'

fname = sys.argv[1]
bridge = CvBridge()

cv.NamedWindow('surf', 1)
cv.NamedWindow('harris', 1)
cv.NamedWindow('star', 1)

for topic, msg, t in ru.bag_iter(fname, [ws_l]):
    image = bridge.imgmsg_to_cv(msg, 'bgr8')
    image_gray = fea.grayscale(image)

    surf_keypoints, surf_descriptors = fea.surf(image_gray)
    cv.ShowImage('surf', fea.draw_surf(image, surf_keypoints, (255, 0, 0)))

    harris_keypoints = fea.harris(image_gray)
    cv.ShowImage('harris', fea.draw_harris(image, harris_keypoints, (0, 255, 0)))

    cv.WaitKey(10)
    
示例#14
0
class AVI_Writer(object):

    def __init__(self,topic,frame_rate=30.0):

        self.topic = topic
        self.frame_rate = frame_rate
        self.start_t = 0.0
        self.current_t = 0.0
        self.progress_t = 0.0
        self.frame_count = 0
        self.recording_message = 'stopped'

        self.writer = None
        self.done = True
        self.cv_image_size = None
        self.filename = os.path.join(os.environ['HOME'],'default.avi')

        self.lock = threading.Lock()
        self.bridge = CvBridge()
        rospy.init_node('avi_writer')
        self.node_name = rospy.get_name()

        # Set up publications
        self.progress_msg = RecordingProgressMsg()
        self.progress_pub = rospy.Publisher('progress',RecordingProgressMsg)

        # Subscribe to messages
        self.image_sub = rospy.Subscriber(self.topic,Image,self.image_handler)

        # Set up services
        self.recording_srv = rospy.Service(
                '{0}/recording_cmd'.format(self.node_name),
                RecordingCmd,
                self.handle_recording_cmd
                )

    def run(self):
        rospy.spin()

    def handle_recording_cmd(self,req):
        """
        Handles avi recording commands - starts and stops avi recording.
        """
        with self.lock:

            if self.cv_image_size is None:
                # If we don't have and image yet - we can't get started, return fail.
                return RecordingCmdResponse(False)

            self.filename = req.filename
            self.frame_rate = req.frame_rate
            command = req.command.lower()

            if command == 'start':
                # Get start time and create video writer
                self.writer = cv.CreateVideoWriter(
                        self.filename,
                        cv.CV_FOURCC('D','I','V','X'),
                        self.frame_rate,
                        self.cv_image_size,
                        )

                if self.writer is None:
                    response = False
                else:
                    response = True
                    self.start_t = rospy.get_time()
                    self.frame_count = 0
                    self.done = False
                    self.recording_message = 'recording'

            elif command == 'stop':
                self.done = True
                del self.writer
                self.writer = None
                self.recording_message = 'stopped'
                response = True

            return RecordingCmdResponse(response)

    def image_handler(self,data):
        """
        Writes frames to avi file.
        """
        self.current_t = rospy.get_time()

        # Convert to opencv image and then to ipl_image
        cv_image = self.bridge.imgmsg_to_cv(data,'bgr8')
        ipl_image = cv.GetImage(cv_image)

        with self.lock:
            if self.cv_image_size == None:
                self.cv_image_size = cv.GetSize(cv_image)

        if not self.done:

            # Write video frame
            cv.WriteFrame(self.writer,ipl_image)

            # Update times and frame count - these are used elsewhere so we
            # need the lock
            with self.lock:
                self.frame_count += 1
                self.progress_t = self.current_t - self.start_t

        # Publish progress message
        with self.lock:
            self.progress_msg.frame_count = self.frame_count
            self.progress_msg.progress_t = self.progress_t
            self.progress_msg.recording_message = self.recording_message
        self.progress_pub.publish(self.progress_msg)
示例#15
0
class ImageStitcher(object):
    """
    Subscribes to all rectified image topics in a 2d tracking region and uses
    the calibration data to stitch the images together. In turn it publishes
    image_stitched and image_stitched/seq.

    Note, in order to handle drop frames gracefully I've modified this node
    so that it can subscribe to the mct_msg_and_srv/SeqAndImage topics published
    by the frame_drop_corrector nodes. 
    """

    def __init__(
            self, 
            region, 
            topic_end='image_rect_skip', 
            topic_type='sensor_msgs/Image', 
            max_seq_age=150, 
            max_stamp_age=1.5
            ):

        self.topic_end = topic_end
        self.topic_type = topic_type
        self.max_seq_age = max_seq_age
        self.max_stamp_age = max_stamp_age

        self.warp_flags = cv.CV_INTER_LINEAR 
        self.stitching_params = file_tools.read_tracking_2d_stitching_params()

        rospy.init_node('image_stitcher')
        self.ready = False
        self.is_first_write = True
        self.stitched_image = None
        self.seq_to_images = {}  
        self.stamp_to_seq_pool= {}
        self.image_waiting_pool = {} 

        # Information on the latest sequence received and stitched
        self.seq_newest = None
        self.stamp_newest = None

        self.lock = threading.Lock()
        self.bridge = CvBridge()

        # Get image and camera information 
        regions_dict = file_tools.read_tracking_2d_regions()
        self.region = region
        self.camera_list = regions_dict[region]
        self.create_camera_to_image_dict()
        self.create_camera_to_info_dict()

        # Get transforms from cameras to stitched image and stitched image size
        self.tf2d = transform_2d.Transform2d()
        self.create_tf_data()
        self.get_stitched_image_size()

        # Subscribe to topics based on incoming topic type.
        if self.topic_type == 'sensor_msgs/Image':
            
            # Create pool dictionaries for incomming data.  
            for camera in self.camera_list:
                self.stamp_to_seq_pool[camera] = {}
                self.image_waiting_pool[camera] = {}

            # Subscribe to camera info topics
            self.info_sub = {} 
            for camera, topic  in  self.camera_to_info.iteritems():
                info_handler = functools.partial(self.info_handler, camera)
                self.info_sub[camera] = rospy.Subscriber(topic, CameraInfo, info_handler)

            # Subscribe to rectified image topics
            self.image_sub = {} 
            for camera, topic  in  self.camera_to_image.iteritems():
                image_handler = functools.partial(self.image_handler, camera)
                self.image_sub[camera] = rospy.Subscriber(topic, Image, image_handler)

        elif self.topic_type == 'mct_msg_and_srv/SeqAndImage':

            # Subscribe to SeqAndImage topics
            self.seq_and_image_sub = {}
            for camera, topic in self.camera_to_image.iteritems():
                seq_and_image_handler = functools.partial(self.seq_and_image_handler, camera)
                self.seq_and_image_sub[camera] = rospy.Subscriber(topic, SeqAndImage, seq_and_image_handler)

        else:
            err_msg = 'unable to handle topic type: {0}'.format(self.topic_type)
            raise ValueError, err_msg

        # Stitched image publisher and seq publisher
        self.image_pub = rospy.Publisher('image_stitched', Image)
        self.image_and_seq_pub = rospy.Publisher('seq_and_image_stitched', SeqAndImage)
        self.seq_pub = rospy.Publisher('image_stitched/seq', UInt32)
        self.stamp_pub = rospy.Publisher('image_stitched/stamp_info', StampInfo)
        self.processing_dt_pub = rospy.Publisher('image_stitched/processing_dt', ProcessingInfo)

        # Setup reset service - needs to be called anytime the camera trigger is  
        # stopped - before it is restarted. Empties buffers of images and sequences. 
        self.reset_srv = rospy.Service('reset_image_stitcher', Empty, self.handle_reset_srv)
        self.ready = True

    def handle_reset_srv(self, req):
        """
        Handles the nodes reset service - which empties the image and sequence buffers.
        """
        with self.lock:
            self.seq_to_images = {}  
            self.stamp_to_seq_pool= {}
            self.image_waiting_pool = {} 
            self.seq_newest = None
            self.stamp_newest = None
        return EmptyResponse()

    def create_camera_to_image_dict(self):
        """
        Create camera to image topic dictionary
        """
        self.camera_to_image = {}
        image_topics = mct_introspection.find_camera_image_topics(transport=self.topic_end)
        #for topic in rect_topics:
        for topic in image_topics:
            topic_split = topic.split('/')
            for camera in self.camera_list:
                if camera in topic_split:
                    self.camera_to_image[camera] = topic

    def create_camera_to_info_dict(self):
        """
        Create camera to image topic dictionary
        """
        self.camera_to_info = {}
        info_topics = mct_introspection.find_camera_info_topics()
        for topic in info_topics:
            topic_split = topic.split('/')
            for camera in self.camera_list:
                if camera in topic_split:
                    self.camera_to_info[camera] = topic

    def create_tf_data(self):
        """
        Pre-computes transform matrices and roi for creating the stitched
        images.
        """
        self.tf_data = {}
        for camera in self.camera_list:

            # Get modified transform which maps to ROI in stitched image 
            bbox = self.tf2d.get_stitching_plane_bounding_box(
                    region, 
                    camera_list=[camera]
                    )
            tf_matrix = self.tf2d.get_camera_to_stitching_plane_tf(camera)
            roi_x = int(math.floor(bbox['min_x']))
            roi_y = int(math.floor(bbox['min_y']))
            roi_w = int(math.floor(bbox['max_x']) - math.floor(bbox['min_x']+5))
            roi_h = int(math.floor(bbox['max_y']) - math.floor(bbox['min_y']+5))
            shift_matrix = numpy.array([
                [1.0, 0.0, -bbox['min_x']],
                [0.0, 1.0, -bbox['min_y']], 
                [0.0, 0.0, 1.0],
                ])
            tf_matrix = numpy.dot(shift_matrix, tf_matrix)
            self.tf_data[camera] = {
                    'matrix': cv.fromarray(tf_matrix), 
                    'roi': (roi_x, roi_y, roi_w, roi_h)
                    }

    def get_stitched_image_size(self):
        """
        Determines the size of the stitched image.
        """
        # Get stitched image size from bounding box
        bbox = self.tf2d.get_stitching_plane_bounding_box(region)
        self.image_width = int(math.ceil(bbox['max_x']))
        self.image_height = int(math.ceil(bbox['max_y']))
        self.image_size = (self.image_width, self.image_height)

    def info_handler(self,camera,data):
        """
        Handler for incoming camera info messages. In this callback we place
        image sequence number into stamp_to_seq_pool dictionay by camera name
        and timestamp 

        Note, only used when imcoming topics are of type sensor_msgs/Image. In
        this case both the Image and camera_info topics are need to associate
        the acquisition sequence numbers with the images as the seq numbers in
        the image headers aren't reliable.
        """
        if self.ready:
            with self.lock:
                stamp = data.header.stamp.secs, data.header.stamp.nsecs
                self.stamp_to_seq_pool[camera][stamp] = data.header.seq
                self.update_seq_newest(data.header.seq)
                self.update_stamp_newest(stamp)


    def image_handler(self, camera, data):
        """
        Handler for incoming camera images. In this callback we place the image
        data into the image_waiting_pool by camera name and timestamp. Note, we
        don't want to use the seq information in data.header.seq as this seems
        to initialized in some random way - probably has to do with python not
        fully supporting ROS's image_transport. The seq's from the camera_info
        topics are correct.

        Note, only used when imcoming topics are of type sensor_msgs/Image. In
        this case both the Image and camera_info topics are need to associate
        the acquisition sequence numbers with the images as the seq numbers in
        the image headers aren't reliable.
        """
        if self.ready:
            with self.lock:
                stamp = data.header.stamp.secs, data.header.stamp.nsecs
                self.image_waiting_pool[camera][stamp] = data

    def seq_and_image_handler(self,camera,data):
        """
        Handler for incoming SeqAndImage messages which contain both the image
        data and the frame drop corrected acquisition sequence numbers.
        
        Note, onle used when the incomind topics are of type
        mct_msg_and_srv/SeqAndImage.
        """
        if self.ready:
            with self.lock:
                try:
                    self.seq_to_images[data.seq][camera] = data.image
                except KeyError:
                    self.seq_to_images[data.seq] = {camera: data.image}
                self.update_seq_newest(data.seq)

    def update_seq_newest(self,seq): 
        if self.seq_newest is None:
            self.seq_newest = seq
        else:
            self.seq_newest = max([self.seq_newest, seq])

    def update_stamp_newest(self,stamp): 
        if self.stamp_newest is None:
            self.stamp_newest = stamp
        else:
            self.stamp_newest = max([self.stamp_newest, stamp])

    def process_waiting_images(self):
        """ 
        Processes waiting images. Associates images in the waiting pool with
        their acquisition sequence number and places them in the seq_to_images
        buffer.         
        """
        with self.lock:
            # Associate images in the waiting pool with their seq numbers 
            for camera in self.camera_list:
                for stamp, image_data in self.image_waiting_pool[camera].items():

                    try:
                        seq = self.stamp_to_seq_pool[camera][stamp]
                        del self.stamp_to_seq_pool[camera][stamp]
                        del self.image_waiting_pool[camera][stamp]
                        image_data.header.seq = seq 
                        deleted = True
                        try:
                            self.seq_to_images[seq][camera] = image_data
                        except KeyError:
                            self.seq_to_images[seq] = {camera: image_data}
                    except KeyError:
                        deleted = False

                    # Clear out stale data from image waiting pool
                    if not deleted:
                        stamp_age = self.get_stamp_age(stamp)
                        if stamp_age > self.max_stamp_age:
                            del self.image_waiting_pool[camera][stamp]

                # Clear out any stale data from stamp-to-seq pool
                for stamp, seq in self.stamp_to_seq_pool[camera].items():
                    seq_age = self.get_seq_age(seq)
                    if seq_age > self.max_seq_age: 
                        del self.stamp_to_seq_pool[camera][stamp]


    def get_stamp_age(self, stamp):
        """
        Computes the age of a time stamp.
        """
        stamp_secs = stamp_tuple_to_secs(stamp)
        stamp_newest_secs = stamp_tuple_to_secs(self.stamp_newest)
        return stamp_newest_secs - stamp_secs


    def get_seq_age(self, seq):
        """
        Compute sequece age - handling 32bit uint rollover of seq counter.  Note, at 
        30Hz this should roll over for something like 4yrs, but you never know.
        """
        age0 = abs(self.seq_newest - seq)
        age1 = MAX_UINT32 - seq + self.seq_newest + 1
        return min([age0, age1])


    def publish_stitched_image(self):
        """
        Checks to see if the sequences to images buffer contains all required frames and if so 
        produces and publishes a stitched image.
        """
        # Check to see if we have all images for a given sequence number
        for seq, image_dict in sorted(self.seq_to_images.items()): 

            # If we have all images for the sequece - stitch into merged view
            if len(image_dict) == len(self.camera_list):

                t0 = rospy.Time.now() 

                for i, camera in enumerate(self.camera_list):

                    # Convert ros image to opencv image
                    ros_image = image_dict[camera] 
                    cv_image = self.bridge.imgmsg_to_cv(
                            ros_image,
                            desired_encoding="passthrough"
                            )
                    ipl_image = cv.GetImage(cv_image)

                    # Create stitced image if it doesn't exist yet
                    if self.stitched_image is None:
                        self.stitched_image = cv.CreateImage(
                                self.image_size,
                                ipl_image.depth,
                                ipl_image.channels
                                )
                    if i==0:
                        cv.Zero(self.stitched_image)

                    # Set image warping flags - if first fill rest of image with zeros
                    if self.is_first_write:
                        warp_flags = self.warp_flags | cv.CV_WARP_FILL_OUTLIERS
                        self.is_first_write = False
                    else:
                        warp_flags = self.warp_flags

                    # Warp into stitched image 
                    cv.SetImageROI(self.stitched_image, self.tf_data[camera]['roi'])

                    # Warp stitched image
                    cv.WarpPerspective(
                            ipl_image, 
                            self.stitched_image, 
                            self.tf_data[camera]['matrix'],
                            flags=warp_flags,
                            )
                    cv.ResetImageROI(self.stitched_image)

                    # Get max and min time stamps for stamp_info
                    stamp = ros_image.header.stamp
                    if i == 0:
                        stamp_max = stamp 
                        stamp_min = stamp
                    else:
                        stamp_max = stamp if stamp.to_sec() > stamp_max.to_sec() else stamp_max 
                        stamp_mim = stamp if stamp.to_sec() < stamp_min.to_sec() else stamp_min 

                # Convert stitched image to ros image
                stitched_ros_image = self.bridge.cv_to_imgmsg(
                        self.stitched_image,
                        encoding="passthrough"
                        )
                stitched_ros_image.header.seq = seq
                self.image_pub.publish(stitched_ros_image)
                self.image_and_seq_pub.publish(seq, stitched_ros_image)
                self.seq_pub.publish(seq)

                # Compute time stamp spread
                stamp_diff = stamp_max.to_sec() - stamp_min.to_sec()
                self.stamp_pub.publish(stamp_max, stamp_min, stamp_diff)
                
                t1 = rospy.Time.now()
                dt = t1.to_sec() - t0.to_sec()
                self.processing_dt_pub.publish(dt,1.0/dt)

                # Remove data from buffer
                del self.seq_to_images[seq]

            # Throw away any stale data in seq to images buffer
            seq_age = self.get_seq_age(seq)
            if seq_age > self.max_seq_age:
                try:
                    del self.seq_to_images[seq]
                except KeyError:
                    pass

    def run(self):

        while not rospy.is_shutdown(): 
            if self.seq_newest is None:
                continue
            if self.topic_type == 'sensor_msgs/Image':
                self.process_waiting_images() 
            self.publish_stitched_image()
示例#16
0
class BlobFinder(object):
    """
    A simple blob finder based on the cvBlob library.  Thresholds the image and filters
    the blobs by area if requested.
    """
    def __init__(self,
                 threshold=200,
                 filter_by_area=False,
                 min_area=0,
                 max_area=200):
        self.threshold = threshold
        self.filter_by_area = filter_by_area
        self.min_area = min_area
        self.max_area = max_area
        self.bridge = CvBridge()
        #self.blob_mask  = cvblob.CV_BLOB_RENDER_CENTROID
        #self.blob_mask |= cvblob.CV_BLOB_RENDER_COLOR
        self.blobs_image = None
        self.label_image = None
        self.thresh_image = None

    def findBlobs(self, data, create_image=True):
        """
        Finds blobs in rosimage data. Returns 

        blobs - stucture containting data for all blobs found in the image which aren't
                filtered out.
        blobs_image - an image with the blobs and their contours.
        """
        # Convert rosimage to opencv image.
        cv_image = self.bridge.imgmsg_to_cv(data,
                                            desired_encoding="passthrough")
        raw_image = cv.GetImage(cv_image)

        if self.thresh_image is None:
            self.thresh_image = cv.CreateImage(cv.GetSize(raw_image),
                                               raw_image.depth,
                                               raw_image.channels)

        # Threshold image
        cv.Threshold(raw_image, self.thresh_image, self.threshold, 255,
                     cv.CV_THRESH_BINARY)

        ### Erode and dilate - problematic when tracking points are close together
        #cv.Dilate(self.thresh_image, self.thresh_image, None, 1)
        #cv.Erode(self.thresh_image, self.thresh_image, None, 1)

        # Find contours
        storage = cv.CreateMemStorage(0)
        #contours = cv.FindContours(self.thresh_image,  storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
        contours = cv.FindContours(self.thresh_image, storage,
                                   cv.CV_RETR_EXTERNAL,
                                   cv.CV_CHAIN_APPROX_SIMPLE)

        # Find blob data
        blobs_list = []
        blobs_contours = []

        while contours:

            blob_ok = True

            # Get area and apply area filter
            area = cv.ContourArea(contours)
            if self.filter_by_area:
                if area < self.min_area or area > self.max_area or area <= 0.0:
                    blob_ok = False

            # Get centroid
            moments = cv.Moments(contours)
            if moments.m00 > 0 and blob_ok:
                centroid_x = moments.m10 / moments.m00
                centroid_y = moments.m01 / moments.m00
            else:
                blob_ok = False
                centroid_x = 0.0
                centroid_y = 0.0

            # Get bounding rectangle
            if blob_ok:
                bound_rect = cv.BoundingRect(list(contours))
                min_x = bound_rect[0]
                min_y = bound_rect[1]
                max_x = bound_rect[0] + bound_rect[2]
                max_y = bound_rect[1] + bound_rect[3]
            else:
                min_x = 0.0
                min_y = 0.0
                max_x = 0.0
                max_y = 0.0

            # Create blob dictionary
            blob = {
                'centroid_x': centroid_x,
                'centroid_y': centroid_y,
                'min_x': min_x,
                'max_x': max_x,
                'min_y': min_y,
                'max_y': max_y,
                'area': area,
                'angle':
                0.0,  # kept this for compatibility with cvblob version
            }

            # If blob is OK add to list of blobs
            if blob_ok:
                blobs_list.append(blob)
                blobs_contours.append([(x, y) for x, y in contours])

            contours = contours.h_next()

        if not create_image:
            return blobs_list
        else:
            if self.blobs_image is None:
                self.blobs_image = cv.CreateImage(cv.GetSize(raw_image),
                                                  cv.IPL_DEPTH_8U, 3)
            cv.CvtColor(raw_image, self.blobs_image, cv.CV_GRAY2BGR)

            # Draw blobs
            for i, cdata in enumerate(blobs_contours):
                p0_list = cdata
                p1_list = cdata[1:] + [cdata[0]]
                for p0, p1 in zip(p0_list, p1_list):
                    cv.Line(self.blobs_image, p0, p1, (0, 0, 255), 2)
            #blobs_rosimage = self.bridge.cv_to_imgmsg(self.blobs_image,encoding="passthrough")
            #return blobs_list, blobs_rosimage
            return blobs_list, self.blobs_image
示例#17
0
    features_db = np.column_stack([ut.load_pickle(p[0]) for p in csv_bag_names(features_file)])
    features_db_reduced = features_mat_compress(features_db, 500)

    #Generate a random color for each feature
    colors = np.matrix(np.random.randint(0, 255, (3, features_db_reduced.shape[1])))
    features_tree = sp.KDTree(np.array(features_db_reduced.T))
    bridge = CvBridge()

    forearm_cam_l = '/l_forearm_cam/image_rect_color'
    cv.NamedWindow('surf', 1)

    #import pdb
    #while not rospy.is_shutdown():
    i = 0
    for topic, msg, t in ru.bag_iter(images_file, [forearm_cam_l]):
        image = bridge.imgmsg_to_cv(msg, 'bgr8')
        #image = camera.get_frame()
        image_gray = fea.grayscale(image)
        surf_keypoints, surf_descriptors = fea.surf(image_gray)
        #print len(surf_keypoints)
        #pdb.set_trace()

        #match each keypoint with one in our db & look up color
        matching_idx = [features_tree.query(d)[1] for d in surf_descriptors]
        coordinated_colors = colors[:, matching_idx]

        #nimage = fea.draw_surf(image, surf_keypoints, (0,255,0))
        nimage = fea.draw_surf2(image, surf_keypoints, coordinated_colors)
        cv.ShowImage('surf', nimage)
        cv.SaveImage('forearm_cam%d.png' % i, nimage)
        i = i + 1
示例#18
0
class LIA_AVI_Writer(object):
    """
    Avi writer for the light induced arousal application. 
    """
    def __init__(self, topic):

        # Video recording parameters
        self.topic = topic
        self.record_t = 10.0
        self.start_t = 0.0
        self.current_t = 0.0
        self.progress_t = 0.0
        self.frame_count = 0
        self.recording_message = 'stopped'
        self.frame_rate = 24.0  # This is a kludge - get from image topic
        self.writer = None
        self.done = True
        self.cv_image_size = None
        self.filename = os.path.join(os.environ['HOME'], 'default.avi')

        # Current pulse parameters
        #self.pulse_channel = 'a'
        self.pulse_channel = 'c'  # Should probalby move this to a configuration file
        self.pulse_controller = None
        self.timing_fid = None

        self.lock = threading.Lock()
        self.redis_db = redis.Redis('localhost', db=lia_config.redis_db)
        self.bridge = CvBridge()
        rospy.init_node('avi_writer')

        # Set up publications
        self.progress_msg = ProgressMsg()
        self.progress_pub = rospy.Publisher('progress', ProgressMsg)

        # Subscribe to messages
        self.image_sub = rospy.Subscriber(self.topic, Image,
                                          self.image_handler)

        # Set up services
        self.recording_srv = rospy.Service('recording_cmd', RecordingCmd,
                                           self.handle_recording_cmd)

        # Set up proxy for set current service
        rospy.wait_for_service('set_current')
        self.set_current_proxy = rospy.ServiceProxy('set_current',
                                                    SetCurrentCmd)

    def run(self):
        rospy.spin()

    def handle_recording_cmd(self, req):
        """
        Handles avi recording commands - starts and stops avi recording.
        """
        with self.lock:

            if self.cv_image_size is None:
                # If we don't have and image yet - we can't get started, return fail.
                return RecordingCmdResponse(False)

            self.filename = req.filename
            self.record_t = req.duration

            command = req.command.lower()
            if command == 'start':
                # Get start time and create video writer
                self.writer = cv.CreateVideoWriter(
                    self.filename,
                    cv.CV_FOURCC('D', 'I', 'V', 'X'),
                    self.frame_rate,
                    self.cv_image_size,
                )

                if self.writer is None:
                    response = False
                else:
                    response = True
                    self.start_t = rospy.get_time()
                    self.frame_count = 0
                    self.done = False
                    self.recording_message = 'recording'

                    # Get trial values and setup pulse controller
                    trial_values = db_tools.get_dict(self.redis_db,
                                                     'trial_values')
                    self.pulse_controller = Pulse_Controller(
                        trial_values,
                        self.pulse_channel,
                        self.set_current_proxy,
                    )

                    # Write settings file and open timing file
                    log_values = db_tools.get_dict(self.redis_db, 'log_values')
                    data_directory = log_values['data_directory']
                    settings_suffix = log_values['settings_file_suffix']
                    timing_suffix = log_values['timing_file_suffix']
                    metadata_filenames = file_tools.get_metadata_filenames(
                        self.filename,
                        data_directory,
                        settings_suffix,
                        timing_suffix,
                    )
                    settings_filename, timing_filename = metadata_filenames
                    settings_fid = open(settings_filename, 'w')
                    for k, v in trial_values.iteritems():
                        settings_fid.write('{0}: {1}\n'.format(k, v))
                    settings_fid.close()
                    self.timing_fid = open(timing_filename, 'w')

            elif command == 'stop':
                self.cleanup_after_recording()
                response = True

            return RecordingCmdResponse(response)

    def image_handler(self, data):
        """
        Writes frames to avi file.
        """
        self.current_t = rospy.get_time()

        # Convert to opencv image and then to ipl_image
        cv_image = self.bridge.imgmsg_to_cv(data, 'bgr8')
        ipl_image = cv.GetImage(cv_image)

        with self.lock:
            if self.cv_image_size == None:
                self.cv_image_size = cv.GetSize(cv_image)

        if not self.done:

            # Write video frame
            cv.WriteFrame(self.writer, ipl_image)

            # Update times and frame count - these are used elsewhere so we
            # need the lock
            with self.lock:
                self.frame_count += 1
                self.progress_t = self.current_t - self.start_t
                self.pulse_controller.update(self.progress_t)
                self.update_timing_file()

                # Check to see if we are done recording - if so stop writing frames
                if self.current_t >= self.start_t + self.record_t:
                    self.cleanup_after_recording()

        # Publish progress message
        with self.lock:
            self.progress_msg.frame_count = self.frame_count
            self.progress_msg.record_t = self.record_t
            self.progress_msg.progress_t = self.progress_t
            self.progress_msg.recording_message = self.recording_message
        self.progress_pub.publish(self.progress_msg)

    def update_timing_file(self):
        """
        Updates information in timing file - frame count, time and pulse
        state.
        """
        if self.timing_fid is not None:
            self.timing_fid.write('{0} '.format(self.frame_count))
            self.timing_fid.write('{0} '.format(self.progress_t))
            if self.pulse_controller.state == 'high':
                self.timing_fid.write('{0}\n'.format(
                    self.pulse_controller.pulse_current))
            else:
                self.timing_fid.write('{0}\n'.format(0))

    def cleanup_after_recording(self):
        """
        Cleans up after recording is stopped - closes open files, turns off current
        controller.

        Should alwars be called with the lock.
        """
        self.done = True
        del self.writer
        self.writer = None
        self.pulse_controller.set_pulse_low()
        self.pulse_controller.turn_off()
        self.recording_message = 'finished'
        db_tools.set_bool(self.redis_db, 'recording_flag', False)
        self.timing_fid.close()
示例#19
0
class Transform_2D_Calibrator(object):
    def __init__(self, topic_0, topic_1):

        self.ready = False
        self.state = WAITING  # There are 3 allowed states WAITING, WORKING, FINISHED
        #self.state = WORKING
        self.topics = topic_0, topic_1
        self.bridge = CvBridge()
        self.lock = threading.Lock()

        rospy.init_node('transform_2d_calibrator')
        self.node_name = rospy.get_name()

        # Initialize data lists
        self.blobs = {
            self.topics[0]: [],
            self.topics[1]: [],
        }
        self.index_to_image = {
            self.topics[0]: {},
            self.topics[1]: {},
        }
        self.overlap_indices = []

        # Set active target information and turn off leds
        target_info = mct_active_target.active_target_info()
        self.led_n_max = target_info[0]
        self.led_m_max = target_info[1]
        self.number_of_leds = self.led_n_max * self.led_m_max
        self.led_max_power = target_info[2]
        self.led_space_x = target_info[3]
        self.led_space_y = target_info[3]
        mct_active_target.off()

        # Current led indices
        self.led_n = 0
        self.led_m = 0

        # Wait counter for image acquisition

        self.image_wait_cnt = {
            self.topics[0]: 0,
            self.topics[1]: 0,
        }

        # Sleep periods for idle and wait count loops
        self.idle_sleep_dt = 0.25
        self.wait_sleep_dt = 0.005

        # Led power setting
        params_namespace = '/transform_2d_calibrator_params'
        self.led_power = rospy.get_param(
            '{0}/target/led_power'.format(params_namespace), 10)

        # Wait count for image acquisition
        self.image_wait_number = rospy.get_param(
            '{0}/image_wait_number'.format(params_namespace), 4)

        # Initialize blob finder
        self.blobFinder = BlobFinder()
        self.blobFinder.threshold = rospy.get_param(
            '{0}/blob_finder/threshold'.format(params_namespace), 150)
        self.blobFinder.filter_by_area = rospy.get_param(
            '{0}/blob_finder/filter_by_area'.format(params_namespace), False)
        self.blobFinder.min_area = rospy.get_param(
            '{0}/blob_finder/min_area'.format(params_namespace), 0)
        self.blobFinder.max_area = rospy.get_param(
            '{0}/blob_finder/max_area'.format(params_namespace), 200)

        # Number of points required
        self.num_points_required = rospy.get_param(
            '{0}/num_points_required'.format(params_namespace), 10)
        self.transform = None
        self.transform_error = None

        # Get homography matrices
        self.homography_dict = {}
        for topic in self.topics:
            try:
                # Get the data from the parameter server
                rows = rospy.get_param(
                    '{0}/homography_matrix/rows'.format(topic))
                cols = rospy.get_param(
                    '{0}/homography_matrix/cols'.format(topic))
                data = rospy.get_param(
                    '{0}/homography_matrix/data'.format(topic))
            except KeyError:
                # Data is not on the parameter server - try getting it by reading the file
                camera = get_camera_from_topic(topic)
                homography = file_tools.read_homography_calibration(camera)
                rows = homography['rows']
                cols = homography['cols']
                data = homography['data']

            # Rearrange into numpy matrix
            homography_matrix = numpy.array(homography['data'])
            homography_matrix = homography_matrix.reshape((rows, cols))
            self.homography_dict[topic] = homography_matrix

        # Set font and initial image information
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,
                                        0.8,
                                        0.8,
                                        thickness=1)
        self.image_info = 'no data'

        # Subscription to image topic
        image_callback_0 = functools.partial(self.image_callback,
                                             self.topics[0])
        image_callback_1 = functools.partial(self.image_callback,
                                             self.topics[1])
        self.image_sub = {
            self.topics[0]:
            rospy.Subscriber(self.topics[0], Image, image_callback_0),
            self.topics[1]:
            rospy.Subscriber(self.topics[1], Image, image_callback_1),
        }

        # Publications - bcalibration progress images for topics 0 and 1
        self.image_pub = {
            self.topics[0]:
            rospy.Publisher('image_transform_calibration_0', Image),
            self.topics[1]:
            rospy.Publisher('image_transform_calibration_1', Image),
        }

        # Services
        self.start_srv = rospy.Service('{0}/start'.format(self.node_name),
                                       GetFlagAndMessage,
                                       self.handle_start_srv)

        self.is_calibrated_srv = rospy.Service(
            '{0}/is_calibrated'.format(self.node_name), GetBool,
            self.handle_is_calibrated_srv)

        self.get_transform_2d_srv = rospy.Service(
            '{0}/get_transform_2d'.format(self.node_name), GetTransform2d,
            self.handle_get_transform_2d_srv)

        self.ready = True

    def handle_get_transform_2d_srv(self, req):
        """
        Handles requests to get the transform found by the 
        """
        if self.transform is None:
            rotation = 0.0
            translation_x = 0.0
            translation_y = 0.1
        else:
            rotation = self.transform['rotation']
            translation_x = self.transform['translation_x']
            translation_y = self.transform['translation_y']
        return GetTransform2dResponse(rotation, translation_x, translation_y)

    def handle_start_srv(self, req):
        """
        Handles to start/restart the homography calibration procedure.
        """
        flag = True
        message = ''
        with self.lock:
            flag, message = mct_active_target.lock(self.node_name)
            if flag:
                self.image_info = ''
                self.state = WORKING
                self.led_n = 0
                self.led_m = 0
                self.blobs = {topic_0: [], topic_1: []}
                self.index_to_image = {topic_0: {}, topic_1: {}}
                self.overlap_indices = []
                self.transform = None
                self.transform_error = None
        return GetFlagAndMessageResponse(flag, message)

    def handle_is_calibrated_srv(self, req):
        """
        Handles requests for whether or not the transform calibration has been completed.
        """
        if self.transform is not None:
            value = True
        else:
            value = False
        return GetBoolResponse(value)

    def image_callback(self, topic, data):
        """
        Image topic callback function. Uses the blobFinder to find the blobs
        (leds) in the image. Publishes images showing the calibration progress.
        """
        # Find blobs
        with self.lock:

            if not self.ready:
                return

            if self.state == WORKING:
                self.blobs[topic], blobs_image = self.blobFinder.findBlobs(
                    data)
                blobs_rosimage = self.bridge.cv_to_imgmsg(
                    blobs_image, encoding="passthrough")
            else:
                self.blobs[topic] = []

            if self.image_wait_cnt[topic] < self.image_wait_number:
                self.image_wait_cnt[topic] += 1

        # Create calibration data  image
        cv_image = self.bridge.imgmsg_to_cv(data,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)
        calib_image = cv.CreateImage(cv.GetSize(ipl_image), cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(ipl_image, calib_image, cv.CV_GRAY2BGR)

        # Add calibration markers to image
        color = STATE_COLOR[self.state]
        if self.state == WORKING:
            for image_point in self.index_to_image[topic].values():
                x, y = image_point
                cv.Circle(calib_image, (int(x), int(y)), 3, color)
        else:
            for index in self.overlap_indices:
                x, y = self.index_to_image[topic][index]
                cv.Circle(calib_image, (int(x), int(y)), 3, color)

        ## Add text to image
        message = [STATE_MESSAGE[self.state]]
        if self.state == WORKING or self.state == FINISHED:
            if self.state == WORKING:
                num_points_found = len(self.index_to_image[topic])
            else:
                num_points_found = len(self.overlap_indices)
            message.append('{0}/{1} pts'.format(num_points_found,
                                                self.get_led_count()))
        if self.image_info:
            message.append('- {0}'.format(self.image_info))
        if (self.state == FINISHED) and (self.transform_error is not None):
            message.append('- error {0:1.2f} mm'.format(1e3 *
                                                        self.transform_error))
        message = ' '.join(message)
        cv.PutText(calib_image, message, (10, 25), self.cv_text_font, color)

        # Publish calibration progress image
        calib_rosimage = self.bridge.cv_to_imgmsg(calib_image,
                                                  encoding="passthrough")
        self.image_pub[topic].publish(calib_rosimage)

    def wait_for_images(self):
        """
        Wait for 'image_wait_number' of images to be acquired.
        """
        with self.lock:
            for topic in self.topics:
                self.image_wait_cnt[topic] = 0
        done = False
        while not done:
            with self.lock:
                image_wait_cnt = min(
                    [self.image_wait_cnt[t] for t in self.topics])
            if image_wait_cnt >= self.image_wait_number:
                done = True
            rospy.sleep(self.wait_sleep_dt)

    def increment_led(self):
        """
        Increments the led array indices. When the last led is reached check to see if
        sufficient points have been captured for the homography calibratoin. If so, then
        the state is changed to FINISHED.  If insufficient points have been gathered then
        the state is changed back to WAITING.
        """

        if self.led_m < self.led_m_max - 1 or self.led_n < self.led_n_max - 1:
            if self.led_n < self.led_n_max - 1:
                self.led_n += 1
            else:
                self.led_n = 0
                self.led_m += 1
        else:

            # Turn off led and unlock active target
            mct_active_target.off()
            mct_active_target.unlock(self.node_name)

            # Need to check that we got enough done otherwise return to idle.
            self.state = FINISHED
            self.overlap_indices = self.get_overlap_indices()
            if len(self.overlap_indices) >= self.num_points_required:
                self.state = FINISHED
                self.image_info = ''
            else:
                self.state = WAITING
                self.image_info = 'not enough data'
                self.transform = None

    def get_overlap_indices(self):
        """
        Returns the overlaping points
        """
        indices_0 = self.index_to_image[self.topics[0]]
        indices_1 = self.index_to_image[self.topics[1]]
        overlap_indices = set(indices_0)
        overlap_indices.intersection_update(indices_1)
        return list(overlap_indices)

    def get_led_count(self):
        return self.led_n + self.led_m * self.led_n_max + 1

    def save_blob_data(self):
        """
        Save blob data found in images
        """
        with self.lock:
            for topic in self.topics:
                if len(self.blobs[topic]) == 1:
                    blob = self.blobs[topic][0]
                    image_x = blob['centroid_x']
                    image_y = blob['centroid_y']
                    self.index_to_image[topic][(self.led_n,
                                                self.led_m)] = (image_x,
                                                                image_y)

    def find_transform(self):
        """
        Find 2d transform (rotation + translation) from the world points found
        in topic[0] and topic[1] and thier respective homographies.
        """
        # Get world points for both topics for all points in overlap
        world_points_dict = {}

        for topic in self.topics:

            # Get homography from image coordinates to world coordinates
            homography_matrix = self.homography_dict[topic]
            homography_matrix_t = homography_matrix.transpose()

            # Get image points for indices in overlap
            image_points = []
            for index in self.overlap_indices:
                image_points.append(self.index_to_image[topic][index])
            image_points = numpy.array(image_points)

            # Convert to homogenious coordinates and compute world coordinates
            image_points_hg = numpy.ones((image_points.shape[0], 3))
            image_points_hg[:, :2] = image_points
            world_points_hg = numpy.dot(image_points_hg, homography_matrix_t)
            world_points = numpy.array(world_points_hg[:, :2])
            denom = numpy.zeros((world_points.shape[0], 2))
            denom[:, 0] = world_points_hg[:, 2]
            denom[:, 1] = world_points_hg[:, 2]
            world_points = world_points / denom
            world_points_dict[topic] = world_points

        # Estimate translate as mean difference in point positions
        points_0 = world_points_dict[self.topics[0]]
        points_1 = world_points_dict[self.topics[1]]
        rotation_est, translation_est = estimate_transform_2d(
            points_0, points_1)

        rotation, translation, error = fit_transform_2d(
            points_0, points_1, rotation_est, translation_est)

        self.transform = {
            'rotation': rotation,
            'translation_x': translation[0],
            'translation_y': translation[1],
        }

        self.transform_error = error

    def run(self):
        """
        Node main function. When the state is WORKING this function activates
        the leds on the active calibration target one at a time. When all of
        the leds have been activated and if sufficient number of points have
        been collected the homography matrix is calculated.
        """

        while not rospy.is_shutdown():

            if self.state == WORKING:
                # Turn on current led and wait for images
                mct_active_target.set_led(self.led_n, self.led_m,
                                          self.led_power)
                self.wait_for_images()
                self.save_blob_data()
                self.increment_led()

            elif self.state == FINISHED and self.transform is None:
                # Calculate 2d transforms
                self.find_transform()
            else:
                rospy.sleep(self.idle_sleep_dt)
示例#20
0
文件: test12.py 项目: wklharry/hrl
class ImageProcess:

    def __init__(self, surf_name, contacts_name):
        forearm_cam_l = '/l_forearm_cam/image_rect_color'

        self.bridge = CvBridge()
        rospy.loginfo('loading %s' % surf_name)
        self.surf_data = ut.load_pickle(surf_name)
        rospy.loginfo('loading %s' % contacts_name)
        self.scene, self.contact_points = ut.load_pickle(contacts_name)
        self.surf_idx = None
        cv.NamedWindow('surf', 1)
        self.tflistener = tf.TransformListener()
        self.camera_geo = ROSCameraCalibration('/l_forearm_cam/camera_info')

        self.lmat0 = None
        self.rmat0 = None
        self.contact = False
        self.contact_stopped = False

        #rospy.Subscriber('/pressure/l_gripper_motor', pm.PressureState, self.lpress_cb)
        rospy.Subscriber(forearm_cam_l, sm.Image, self.image_cb)
        print 'ready'

    #def lpress_cb(self, msg):
    #def lpress_cb(self, pmsg):
    #    #conv to mat
    #    lmat = np.matrix((pmsg.l_finger_tip)).T
    #    rmat = np.matrix((pmsg.r_finger_tip)).T
    #    if self.lmat0 == None:
    #        self.lmat0 = lmat
    #        self.rmat0 = rmat
    #        return

    #    #zero
    #    lmat = lmat - self.lmat0
    #    rmat = rmat - self.rmat0
   
    #    #touch detected
    #    if np.any(np.abs(lmat) > 250) or np.any(np.abs(rmat) > 250): #TODO: replace this with something more sound
    #        self.contact = True
    #    else:
    #        if self.contact == True:
    #            self.contact_stopped = True
    #        self.contact = False
    #        #Contact has been made!! look up gripper tip location
    #        #to_frame = 'base_link'
    #        #def frame_loc(from_frame):
    #        #    p_base = tfu.transform('base_footprint', from_frame, self.tflistener) \
    #        #                   * tfu.tf_as_matrix(([0., 0., 0., 1.], tr.quaternion_from_euler(0,0,0)))
    #        #                   #* tfu.tf_as_matrix((p.A1.tolist(), tr.quaternion_from_euler(0,0,0)))
    #        #    return tfu.matrix_as_tf(p_base)

    #        #tip_locs = [frame_loc(n)[0] for n in self.ftip_frames]
    #        #t = pmsg.header.stamp.to_time() 
    #        #rospy.loginfo("contact detected at %.3f" % t)
    #        #self.contact_locs.append([t, tip_locs])

    #    #self.last_msg = time.time()
    #    rospy.loginfo('lpress_cb ' + str(np.max(rmat)) + ' ' + str(np.max(lmat)))

    def image_cb(self, msg):
        image_time = msg.header.stamp.to_time()
        image      = self.bridge.imgmsg_to_cv(msg, 'bgr8')
        if self.surf_idx == None:
            for i, d in enumerate(self.surf_data):
                t, sdata = d
                if image_time == t:
                    self.surf_idx = i
                    break
        else:
            self.surf_idx = self.surf_idx + 1

        stime, sdata = self.surf_data[self.surf_idx]
        if stime != image_time:
            print 'surf time != image_time'

        surf_keypoints, surf_descriptors = sdata 
        nimage = fea.draw_surf(image, surf_keypoints, (255,0,0))
        cv.ShowImage('surf', nimage)
        cv.WaitKey(10)

        # Track and give 3D location to features.
        ## Project 3D points into this frame (need access to tf => must do online or from cache)
        ##              camera_T_3dframe at ti
        scene2d = self.camera_geo.project(self.scene, self.tflistener, 'base_footprint')
        scene2d_tree = sp.KDTree(np.array(scene2d.T))

        ## Find features close to these 2d points
        for loc, lap, size, d, hess in surf_keypoints:
            idx = scene2d_tree.query(np.array(loc))[1]
            orig3d = self.scene[:, idx]

        ## Get features closest to the contact point
        ## stop running if contact has stopped
        if self.contact:
示例#21
0
class Frame_Drop_Corrector(object):
    """
    Frame drop corrector node. Subscribes to the given image topic and detects
    dropped frames. Dummy frames are inserted for any frames which are dropped.
    """
    def __init__(self,
                 topic,
                 framerate,
                 tol=0.005,
                 max_stamp_age=1.5,
                 publish_image_corr=True):
        self.ready = False
        self.topic = topic
        self.framerate = framerate
        self.tol = tol
        self.publish_image_corr = publish_image_corr
        self.lock = threading.Lock()
        self.camera_info_topic = get_camera_info_from_image_topic(self.topic)
        self.max_stamp_age = max_stamp_age
        self.bridge = CvBridge()
        self.last_pub_stamp = None
        self.latest_stamp = None
        self.dummy_image = None
        self.seq_offset = None
        self.frame_drop_list = []

        # Data dictionaries for synchronizing tracking data with image seq number
        self.stamp_to_image = {}
        self.stamp_to_seq = {}
        self.seq_to_stamp_and_image = {}

        rospy.init_node('frame_drop_corrector')

        # Subscribe to image and camera info topics
        self.image_sub = rospy.Subscriber(self.topic, Image,
                                          self.image_callback)
        self.info_sub = rospy.Subscriber(self.camera_info_topic, CameraInfo,
                                         self.camera_info_callback)

        # Set up image publisher
        topic_split = self.topic.split('/')
        seq_and_image_topic = topic_split[:-1]
        seq_and_image_topic.append('seq_and_image_corr')
        seq_and_image_topic = '/'.join(seq_and_image_topic)
        self.seq_and_image_repub = rospy.Publisher(seq_and_image_topic,
                                                   SeqAndImage)
        if self.publish_image_corr:
            image_topic = topic_split[:-1]
            image_topic.append('image_corr')
            image_topic = '/'.join(image_topic)
            self.image_repub = rospy.Publisher(image_topic, Image)

        # Set up dropped frame # publisher
        frames_dropped_topic = topic_split[:-1]
        frames_dropped_topic.append('frames_dropped')
        frames_dropped_topic = '/'.join(frames_dropped_topic)
        self.frames_dropped_pub = rospy.Publisher(frames_dropped_topic,
                                                  FramesDropped)

        # Set up frame drop info service - returns list of seq numbers
        # corresponding to the dropped frames for image stream.
        frame_drop_srv_name = topic_split[:-1]
        frame_drop_srv_name.append('frame_drop_info')
        frame_drop_srv_name = '/'.join(frame_drop_srv_name)
        self.frame_drop_srv = rospy.Service(frame_drop_srv_name, FrameDropInfo,
                                            self.handle_frame_drop_srv)

        # Setup reset service - needs to be called anytime the camera trigger is
        # stopped. It resets the last_pub_stamp to none.
        reset_srv_name = topic_split[:-1]
        reset_srv_name.append('frame_drop_reset')
        reset_srv_name = '/'.join(reset_srv_name)
        self.reset_srv = rospy.Service(reset_srv_name, Empty,
                                       self.handle_reset_srv)
        self.ready = True

    def handle_reset_srv(self, req):
        """
        Resets the frame not detection node. This prevents the addition of a huge
        number of 'false' dropped frames when the system is restarted.
        """
        with self.lock:
            self.last_pub_stamp = None
            self.seq_offset = None
            self.frame_drop_list = []
        return EmptyResponse()

    def handle_frame_drop_srv(self, req):
        """
        Returns list of sequences numbers for all dropped frames.
        """
        with self.lock:
            frame_drop_list = list(self.frame_drop_list)
        return FrameDropInfoResponse(frame_drop_list)

    def camera_info_callback(self, data):
        """
        Callback for camera info topic subscription - used to associate stamp
        with the image seq number.
        """
        if not self.ready:
            return
        stamp_tuple = data.header.stamp.secs, data.header.stamp.nsecs
        with self.lock:
            self.latest_stamp = stamp_tuple
            self.stamp_to_seq[stamp_tuple] = data.header.seq

    def image_callback(self, data):
        """
        Callback for image topic subscription - used to associate the stamp
        tuple with the imcomming image.
        """
        if not self.ready:
            return
        stamp_tuple = data.header.stamp.secs, data.header.stamp.nsecs
        with self.lock:
            self.stamp_to_image[stamp_tuple] = data

            if self.dummy_image is None:
                # Create dummy image for use when a dropped frame occurs.
                cv_image = self.bridge.imgmsg_to_cv(
                    data, desired_encoding="passthrough")
                raw_image = cv.GetImage(cv_image)
                dummy_cv_image = cv.CreateImage(cv.GetSize(raw_image),
                                                raw_image.depth,
                                                raw_image.channels)
                cv.Zero(dummy_cv_image)
                self.dummy_image = self.bridge.cv_to_imgmsg(
                    dummy_cv_image, encoding="passthrough")

    def associate_image_w_seq(self):
        """
         Associate iamges  with image seq numbers
        """
        with self.lock:
            for stamp, image in self.stamp_to_image.items():
                try:
                    seq = self.stamp_to_seq[stamp]
                    seq_found = True
                except KeyError:
                    seq_found = False

                if seq_found:
                    self.seq_to_stamp_and_image[seq] = stamp, image
                    try:
                        del self.stamp_to_image[stamp]
                    except KeyError:
                        pass
                    try:
                        del self.stamp_to_seq[stamp]
                    except KeyError:
                        pass

                else:
                    # Throw away data greater than the maximum allowed age
                    if self.latest_stamp is not None:
                        latest_stamp_secs = stamp_tuple_to_secs(
                            self.latest_stamp)
                        stamp_secs = stamp_tuple_to_secs(stamp)
                        stamp_age = latest_stamp_secs - stamp_secs
                        if stamp_age > self.max_stamp_age:
                            try:
                                del self.stamp_to_image[stamp]
                            except KeyError:
                                pass

    def republish_seq_and_image(self):
        """
        Republished the sequence numbers and images with black frames inserted for
        any dropped frames which are discovered by looking at the time stamps.
        """
        for seq, stamp_and_image in sorted(
                self.seq_to_stamp_and_image.items()):
            stamp_tuple, image = stamp_and_image

            if self.seq_offset is None:
                # This is the first run or reset has been called. In the case reset
                # the seq offset so that the seq numbers begin with 1.
                self.seq_offset = 1 - seq

            if self.last_pub_stamp is not None:
                dt = stamp_dt_secs(stamp_tuple, self.last_pub_stamp)
                #framegap = int(round(dt*self.framerate))
                framegap = approx_frame_number(dt, 1.0 / self.framerate,
                                               self.tol)

                for i in range(framegap - 1):
                    # Note, framegap > 1 implies that we have drop frames.
                    # Publish dummies frames until we are caught up.
                    dummy_seq = seq + self.seq_offset
                    dummy_stamp_tuple = incr_stamp_tuple(
                        self.last_pub_stamp, (i + 1) / self.framerate)
                    dummy_image = self.dummy_image
                    dummy_image.header.seq = dummy_seq
                    dummy_image.header.stamp = rospy.Time(*dummy_stamp_tuple)
                    self.seq_and_image_repub.publish(dummy_seq, dummy_image)
                    self.frames_dropped_pub.publish(dummy_seq,
                                                    len(self.frame_drop_list))
                    if self.publish_image_corr:
                        self.image_repub.publish(dummy_image)
                    self.seq_offset += 1
                    self.frame_drop_list.append(dummy_seq)

            # Publish new frame with corrected sequence number - to account for dropped frames
            corrected_seq = seq + self.seq_offset
            image.header.seq = corrected_seq
            image.header.stamp = rospy.Time(*stamp_tuple)
            self.seq_and_image_repub.publish(corrected_seq, image)
            self.frames_dropped_pub.publish(corrected_seq,
                                            len(self.frame_drop_list))
            if self.publish_image_corr:
                self.image_repub.publish(image)
            self.last_pub_stamp = stamp_tuple
            del self.seq_to_stamp_and_image[seq]

    def run(self):
        """
        Main loop. Associates images and time stamps w/ image sequence numbers.
        Examines time interval between frames to detect dropped frames.
        Re-publishes sequences and images as combined SeqAndImage topic. When a
        dropped frame is detected a blank "dummy" frame is inserted in its place.
        """
        while not rospy.is_shutdown():
            self.associate_image_w_seq()
            self.republish_seq_and_image()
示例#22
0
class HomographyCalibratorNode(object):
    def __init__(self, topic):

        self.ready = False
        self.state = WAITING  # There are 3 allowed states WAITING, WORKING, FINISHED
        self.topic = topic
        self.bridge = CvBridge()
        self.lock = threading.Lock()

        rospy.init_node('homography_calibrator')
        self.node_name = rospy.get_name()

        # Initialize data lists
        self.blobs_list = []
        self.image_points = []
        self.world_points = []

        # Set active target information and turn off leds
        target_info = mct_active_target.active_target_info()
        self.led_n_max = target_info[0]
        self.led_m_max = target_info[1]
        self.number_of_leds = self.led_n_max * self.led_m_max
        self.led_max_power = target_info[2]
        self.led_space_x = target_info[3]
        self.led_space_y = target_info[3]
        mct_active_target.off()

        # Current led indices
        self.led_n = 0
        self.led_m = 0

        # Led power setting
        params_namespace = '/homography_calibrator_params'
        self.led_power = rospy.get_param(
            '{0}/target/led_power'.format(params_namespace), 10)

        # Wait count for image acquisition
        self.image_wait_number = rospy.get_param(
            '{0}/image_wait_number'.format(params_namespace), 4)
        self.image_wait_cnt = 0

        # Sleep periods for idle and wait count loops
        self.idle_sleep_dt = 0.25
        self.wait_sleep_dt = 0.005

        # Initialize blob finder
        self.blobFinder = BlobFinder()
        self.blobFinder.threshold = rospy.get_param(
            '{0}/blob_finder/threshold'.format(params_namespace), 200)
        self.blobFinder.filter_by_area = rospy.get_param(
            '{0}/blob_finder/filter_by_area'.format(params_namespace), False)
        self.blobFinder.min_area = rospy.get_param(
            '{0}/blob_finder/min_area'.format(params_namespace), 0)
        self.blobFinder.max_area = rospy.get_param(
            '{0}/blob_finder/max_area'.format(params_namespace), 200)

        # Initialize homography matrix and number of points required to solve for it
        self.homography_matrix = None
        self.num_points_required = rospy.get_param(
            '{0}/num_points_required'.format(params_namespace), 10)

        # Set font and initial image information
        self.cv_text_font = cv.InitFont(cv.CV_FONT_HERSHEY_TRIPLEX,
                                        0.8,
                                        0.8,
                                        thickness=1)
        self.image_info = 'no data'

        # Subscription to image topic
        self.image_sub = rospy.Subscriber(self.topic, Image,
                                          self.image_callback)

        # Publications - blobs image and calibration progress image
        self.image_blobs_pub = rospy.Publisher('image_homography_blobs', Image)
        self.image_calib_pub = rospy.Publisher('image_homography_calibration',
                                               Image)

        # Services
        self.start_srv = rospy.Service('{0}/start'.format(self.node_name),
                                       GetFlagAndMessage,
                                       self.handle_start_srv)

        self.get_matrix_srv = rospy.Service(
            '{0}/get_matrix'.format(self.node_name), GetMatrix,
            self.handle_get_matrix_srv)

        self.is_calibrated_srv = rospy.Service(
            '{0}/is_calibrated'.format(self.node_name), GetBool,
            self.handle_is_calibrated_srv)
        self.ready = True

    def handle_start_srv(self, req):
        """
        Handles to start/restart the homography calibration procedure.
        """
        flag = True
        message = ''
        with self.lock:
            flag, message = mct_active_target.lock(self.node_name)
            if flag:
                self.homography_matrix = None
                self.image_info = ''
                self.image_points = []
                self.world_points = []
                self.blobs_list = []
                self.state = WORKING
                self.led_n = 0
                self.led_m = 0
        return GetFlagAndMessageResponse(flag, message)

    def handle_get_matrix_srv(self, req):
        """
        Returns the homography matrix. If the homography matrix has not yet be computed the 
        identity matirx is returned.
        """
        if self.homography_matrix is None:
            data = [1, 0, 0, 0, 1, 0, 0, 0, 1]
        else:
            data = self.homography_matrix.reshape((9, ))
        return GetMatrixResponse(3, 3, data)

    def handle_is_calibrated_srv(self, req):
        """
        Handles requests for whether or not the homography calibration has been completed.
        """
        if self.homography_matrix is not None:
            value = True
        else:
            value = False
        return GetBoolResponse(value)

    def image_callback(self, data):
        """
        Image topic callback function. Uses the blobFinder to find the blobs
        (leds) in the image. Publishes images showing the blobs and the
        calibration progress.
        """
        # Find blobs
        with self.lock:

            if not self.ready:
                return

            if self.state == WORKING:
                self.blobs_list, blobs_image = self.blobFinder.findBlobs(data)
                blobs_rosimage = self.bridge.cv_to_imgmsg(
                    blobs_image, encoding="passthrough")
            else:
                self.blobs_list = []
                blobs_rosimage = data

            if self.image_wait_cnt < self.image_wait_number:
                self.image_wait_cnt += 1

        self.image_blobs_pub.publish(blobs_rosimage)

        # Create calibration data  image
        cv_image = self.bridge.imgmsg_to_cv(data,
                                            desired_encoding="passthrough")
        ipl_image = cv.GetImage(cv_image)
        calib_image = cv.CreateImage(cv.GetSize(ipl_image), cv.IPL_DEPTH_8U, 3)
        cv.CvtColor(ipl_image, calib_image, cv.CV_GRAY2BGR)

        # Add calibration markers to image
        color = STATE_COLOR[self.state]
        for x, y in self.image_points:
            cv.Circle(calib_image, (int(x), int(y)), 3, color)

        ## Add text to image
        message = [STATE_MESSAGE[self.state]]

        if self.state == WORKING or self.state == FINISHED:
            #message.append('{0}/{1} pts'.format(len(self.image_points),self.number_of_leds))
            message.append('{0}/{1} pts'.format(len(self.image_points),
                                                self.get_led_count()))
        if self.image_info:
            message.append('- {0}'.format(self.image_info))
        message = ' '.join(message)
        cv.PutText(calib_image, message, (10, 25), self.cv_text_font, color)

        # Publish calibration progress image
        calib_rosimage = self.bridge.cv_to_imgmsg(calib_image,
                                                  encoding="passthrough")
        self.image_calib_pub.publish(calib_rosimage)

    def wait_for_images(self):
        """
        Wait for 'image_wait_number' of images to be acquired.
        """
        with self.lock:
            self.image_wait_cnt = 0
        done = False
        while not done:
            with self.lock:
                image_wait_cnt = self.image_wait_cnt
            if image_wait_cnt >= self.image_wait_number:
                done = True
            rospy.sleep(self.wait_sleep_dt)

    def increment_led(self):
        """
        Increments the led array indices. When the last led is reached check to see if
        sufficient points have been captured for the homography calibratoin. If so, then
        the state is changed to FINISHED.  If insufficient points have been gathered then
        the state is changed back to WAITING.
        """

        if self.led_m < self.led_m_max - 1 or self.led_n < self.led_n_max - 1:
            if self.led_n < self.led_n_max - 1:
                self.led_n += 1
            else:
                self.led_n = 0
                self.led_m += 1
        else:

            # Turn off led and unlock active target
            mct_active_target.off()
            mct_active_target.unlock(self.node_name)

            # Need to check that we got enough done otherwise return to idle.
            if len(self.image_points) >= self.num_points_required:
                self.state = FINISHED
                self.image_info = ''
            else:
                self.state = WAITING
                self.image_info = 'not enough data'
                self.homography_matrix = None

    def get_led_count(self):
        return self.led_n + self.led_m * self.led_n_max + 1

    def run(self):
        """
        Node main function. When the state is WORKING this function activates the leds on the
        active calibration target one at a time. When all of the leds have been activated and
        if sufficient number of points have been collected the homography matrix is calculated.
        """

        while not rospy.is_shutdown():

            if self.state == WORKING:

                # Turn on current led and wait for images
                mct_active_target.set_led(self.led_n, self.led_m,
                                          self.led_power)
                self.wait_for_images()

                if len(self.blobs_list) == 1:

                    # Get centroid of blob and add to image point list
                    blob = self.blobs_list[0]
                    image_x = blob['centroid_x']
                    image_y = blob['centroid_y']
                    self.image_points.append((image_x, image_y))

                    # Compute coordinates of the led in the world plane
                    world_x = self.led_n * self.led_space_x
                    world_y = self.led_m * self.led_space_y
                    self.world_points.append((world_x, world_y))

                self.increment_led()

            elif self.state == FINISHED and self.homography_matrix is None:

                # Find the homography transformation
                image_points = numpy.array(self.image_points)
                world_points = numpy.array(self.world_points)
                result = cv2.findHomography(image_points, world_points,
                                            cv.CV_RANSAC)
                self.homography_matrix, mask = result

                # Compute the mean reprojection error
                image_points_hg = numpy.ones((image_points.shape[0], 3))
                image_points_hg[:, :2] = image_points
                world_points_hg = numpy.ones((world_points.shape[0], 3))
                world_points_hg[:, :2] = world_points
                homography_matrix_t = self.homography_matrix.transpose()
                world_points_hg_pred = numpy.dot(image_points_hg,
                                                 homography_matrix_t)
                denom = numpy.zeros((world_points.shape[0], 2))
                denom[:, 0] = world_points_hg_pred[:, 2]
                denom[:, 1] = world_points_hg_pred[:, 2]
                world_points_pred = world_points_hg_pred[:, :2] / denom
                error = (world_points - world_points_pred)**2
                error = error.sum(axis=1)
                error = numpy.sqrt(error)
                error = error.mean()
                self.image_info = 'error {0:1.2f} mm'.format(1e3 * error)
            else:
                rospy.sleep(self.idle_sleep_dt)
示例#23
0
class AVI_Writer(object):
    def __init__(self, image_topic, filename):

        self.image_topic = image_topic
        self.filename = filename
        self.frame_count = 0
        self.recording = False
        self.writer = None
        self.cv_image_size = None
        self.framerate = None
        self.last_stamp = None

        self.lock = threading.Lock()
        self.bridge = CvBridge()
        rospy.init_node('avi_writer')
        rospy.on_shutdown(self.stop_video_writer)

        # Subscribe to image topic
        self.image_sub = rospy.Subscriber(self.image_topic, Image,
                                          self.image_handler)

    def image_handler(self, data):
        """
        Writes frames to avi file.
        """
        if self.framerate is not None:
            # Convert to opencv image and then to ipl_image
            cv_image = self.bridge.imgmsg_to_cv(data, 'bgr8')
            ipl_image = cv.GetImage(cv_image)

            cv_image_size = cv.GetSize(cv_image)
            if self.frame_count == 0:
                self.cv_image_size = cv_image_size
                self.start_video_writer()

            if cv_image_size != self.cv_image_size:
                # Abort recording - image size changed.
                pass

            if self.recording:
                # Write video frame
                cv.WriteFrame(self.writer, ipl_image)
                self.frame_count += 1
        else:
            stamp = data.header.stamp
            if self.last_stamp is not None:
                diff_stamp = stamp - self.last_stamp
                dt = diff_stamp.to_sec()
                self.framerate = 1 / dt
            self.last_stamp = stamp

    def start_video_writer(self):
        self.writer = cv.CreateVideoWriter(
            self.filename,
            cv.CV_FOURCC('D', 'I', 'V', 'X'),
            #cv.CV_FOURCC('U','2','6','3'),
            #cv.CV_FOURCC('H','2','6','4'),
            self.framerate,
            self.cv_image_size,
        )
        if self.writer is None:
            raise IOError, 'unable to create video writer'
        self.recording = True

    def stop_video_writer(self):
        with self.lock:
            self.recording = False
            del self.writer
            self.writer = None

    def run(self):
        rospy.spin()